diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/sentencepiece.pc.in b/cc-multilingual-main/cc_net/third_party/sentencepiece/sentencepiece.pc.in new file mode 100644 index 0000000000000000000000000000000000000000..ac7fef68c3958a51e125442be0278ee34c6daec2 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/sentencepiece.pc.in @@ -0,0 +1,10 @@ +prefix=@prefix@ +exec_prefix=@exec_prefix@ +libdir=@libdir@ +includedir=@includedir@ + +Name: @PROJECT_NAME@ +Description: Unsupervised text tokenizer and detokenizer for Neural Network-based text generation. +Version: @PROJECT_VERSION@ +Libs: -L${libdir} -lsentencepiece -lsentencepiece_train @libprotobuf_lite@ @pkgconfiglibs@ +Cflags: -I${includedir} @pkgconfigcflags@ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/CMakeLists.txt b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..d00ecba36b9edda027da912dbcd1de806e45b410 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/CMakeLists.txt @@ -0,0 +1,4 @@ +include_directories(absl/strings darts_clone esaxx protobuf-lite) + + + diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/darts_clone/LICENSE b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/darts_clone/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..28203b3b2d937ad240a79425d6ffe15001d2a298 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/darts_clone/LICENSE @@ -0,0 +1,10 @@ +Copyright (c) 2008-2011, Susumu Yata +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +- Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. +- Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. +- Neither the name of the nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/darts_clone/darts.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/darts_clone/darts.h new file mode 100644 index 0000000000000000000000000000000000000000..8892aaaa01d47bfd8fe1834f8aa087e4e8341390 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/darts_clone/darts.h @@ -0,0 +1,1926 @@ +#ifndef DARTS_H_ +#define DARTS_H_ + +#include +#include +#include + +#define DARTS_VERSION "0.32" + +// DARTS_THROW() throws a whose message starts with the +// file name and the line number. For example, DARTS_THROW("error message") at +// line 123 of "darts.h" throws a which has a pointer to +// "darts.h:123: exception: error message". The message is available by using +// what() as well as that of . +#define DARTS_INT_TO_STR(value) #value +#define DARTS_LINE_TO_STR(line) DARTS_INT_TO_STR(line) +#define DARTS_LINE_STR DARTS_LINE_TO_STR(__LINE__) +#define DARTS_THROW(msg) throw Darts::Details::Exception( \ + __FILE__ ":" DARTS_LINE_STR ": exception: " msg) + +namespace Darts { + +// The following namespace hides the internal types and classes. +namespace Details { + +// This header assumes that and are 32-bit integer types. +// +// Darts-clone keeps values associated with keys. The type of the values is +// . Note that the values must be positive integers because the +// most significant bit (MSB) of each value is used to represent whether the +// corresponding unit is a leaf or not. Also, the keys are represented by +// sequences of s. is the unsigned type of . +typedef char char_type; +typedef unsigned char uchar_type; +typedef int value_type; + +// The main structure of Darts-clone is an array of s, and the +// unit type is actually a wrapper of . +typedef unsigned int id_type; + +// is the type of callback functions for reporting the +// progress of building a dictionary. See also build() of . +// The 1st argument receives the progress value and the 2nd argument receives +// the maximum progress value. A usage example is to show the progress +// percentage, 100.0 * (the 1st argument) / (the 2nd argument). +typedef int (*progress_func_type)(std::size_t, std::size_t); + +// is the type of double-array units and it is a wrapper of +// in practice. +class DoubleArrayUnit { + public: + DoubleArrayUnit() : unit_() {} + + // has_leaf() returns whether a leaf unit is immediately derived from the + // unit (true) or not (false). + bool has_leaf() const { + return ((unit_ >> 8) & 1) == 1; + } + // value() returns the value stored in the unit, and thus value() is + // available when and only when the unit is a leaf unit. + value_type value() const { + return static_cast(unit_ & ((1U << 31) - 1)); + } + + // label() returns the label associted with the unit. Note that a leaf unit + // always returns an invalid label. For this feature, leaf unit's label() + // returns an that has the MSB of 1. + id_type label() const { + return unit_ & ((1U << 31) | 0xFF); + } + // offset() returns the offset from the unit to its derived units. + id_type offset() const { + return (unit_ >> 10) << ((unit_ & (1U << 9)) >> 6); + } + + private: + id_type unit_; + + // Copyable. +}; + +// Darts-clone throws an for memory allocation failure, invalid +// arguments or a too large offset. The last case means that there are too many +// keys in the given set of keys. Note that the `msg' of must be a +// constant or static string because an keeps only a pointer to +// that string. +class Exception : public std::exception { + public: + explicit Exception(const char *msg = NULL) throw() : msg_(msg) {} + Exception(const Exception &rhs) throw() : msg_(rhs.msg_) {} + virtual ~Exception() throw() {} + + // overrides what() of . + virtual const char *what() const throw() { + return (msg_ != NULL) ? msg_ : ""; + } + + private: + const char *msg_; + + // Disallows operator=. + Exception &operator=(const Exception &); +}; + +} // namespace Details + +// is the interface of Darts-clone. Note that other +// classes should not be accessed from outside. +// +// has 4 template arguments but only the 3rd one is used as +// the type of values. Note that the given is used only from outside, and +// the internal value type is not changed from . +// In build(), given values are casted from to +// by using static_cast. On the other hand, values are casted from +// to in searching dictionaries. +template +class DoubleArrayImpl { + public: + // Even if this is changed, the internal value type is still + // . Other types, such as 64-bit integer types + // and floating-point number types, should not be used. + typedef T value_type; + // A key is reprenseted by a sequence of s. For example, + // exactMatchSearch() takes a . + typedef Details::char_type key_type; + // In searching dictionaries, the values associated with the matched keys are + // stored into or returned as s. + typedef value_type result_type; + + // enables applications to get the lengths of the matched + // keys in addition to the values. + struct result_pair_type { + value_type value; + std::size_t length; + }; + + // The constructor initializes member variables with 0 and NULLs. + DoubleArrayImpl() : size_(0), array_(NULL), buf_(NULL) {} + // The destructor frees memory allocated for units and then initializes + // member variables with 0 and NULLs. + virtual ~DoubleArrayImpl() { + clear(); + } + + // has 2 kinds of set_result()s. The 1st set_result() is to + // set a value to a . The 2nd set_result() is to set a value and + // a length to a . By using set_result()s, search methods + // can return the 2 kinds of results in the same way. + // Why the set_result()s are non-static? It is for compatibility. + // + // The 1st set_result() takes a length as the 3rd argument but it is not + // used. If a compiler does a good job, codes for getting the length may be + // removed. + void set_result(value_type *result, value_type value, std::size_t) const { + *result = value; + } + // The 2nd set_result() uses both `value' and `length'. + void set_result(result_pair_type *result, + value_type value, std::size_t length) const { + result->value = value; + result->length = length; + } + + // set_array() calls clear() in order to free memory allocated to the old + // array and then sets a new array. This function is useful to set a memory- + // mapped array. Note that the array set by set_array() is not freed in + // clear() and the destructor of . + // set_array() can also set the size of the new array but the size is not + // used in search methods. So it works well even if the 2nd argument is 0 or + // omitted. Remember that size() and total_size() returns 0 in such a case. + void set_array(const void *ptr, std::size_t size = 0) { + clear(); + array_ = static_cast(ptr); + size_ = size; + } + // array() returns a pointer to the array of units. + const void *array() const { + return array_; + } + + // clear() frees memory allocated to units and then initializes member + // variables with 0 and NULLs. Note that clear() does not free memory if the + // array of units was set by set_array(). In such a case, `array_' is not + // NULL and `buf_' is NULL. + void clear() { + size_ = 0; + array_ = NULL; + if (buf_ != NULL) { + delete[] buf_; + buf_ = NULL; + } + } + + // unit_size() returns the size of each unit. The size must be 4 bytes. + std::size_t unit_size() const { + return sizeof(unit_type); + } + // size() returns the number of units. It can be 0 if set_array() is used. + std::size_t size() const { + return size_; + } + // total_size() returns the number of bytes allocated to the array of units. + // It can be 0 if set_array() is used. + std::size_t total_size() const { + return unit_size() * size(); + } + // nonzero_size() exists for compatibility. It always returns the number of + // units because it takes long time to count the number of non-zero units. + std::size_t nonzero_size() const { + return size(); + } + + // build() constructs a dictionary from given key-value pairs. If `lengths' + // is NULL, `keys' is handled as an array of zero-terminated strings. If + // `values' is NULL, the index in `keys' is associated with each key, i.e. + // the ith key has (i - 1) as its value. + // Note that the key-value pairs must be arranged in key order and the values + // must not be negative. Also, if there are duplicate keys, only the first + // pair will be stored in the resultant dictionary. + // `progress_func' is a pointer to a callback function. If it is not NULL, + // it will be called in build() so that the caller can check the progress of + // dictionary construction. For details, please see the definition of + // . + // The return value of build() is 0, and it indicates the success of the + // operation. Otherwise, build() throws a , which is a + // derived class of . + // build() uses another construction algorithm if `values' is not NULL. In + // this case, Darts-clone uses a Directed Acyclic Word Graph (DAWG) instead + // of a trie because a DAWG is likely to be more compact than a trie. + int build(std::size_t num_keys, const key_type * const *keys, + const std::size_t *lengths = NULL, const value_type *values = NULL, + Details::progress_func_type progress_func = NULL); + + // open() reads an array of units from the specified file. And if it goes + // well, the old array will be freed and replaced with the new array read + // from the file. `offset' specifies the number of bytes to be skipped before + // reading an array. `size' specifies the number of bytes to be read from the + // file. If the `size' is 0, the whole file will be read. + // open() returns 0 iff the operation succeeds. Otherwise, it returns a + // non-zero value or throws a . The exception is thrown + // when and only when a memory allocation fails. + int open(const char *file_name, const char *mode = "rb", + std::size_t offset = 0, std::size_t size = 0); + // save() writes the array of units into the specified file. `offset' + // specifies the number of bytes to be skipped before writing the array. + // open() returns 0 iff the operation succeeds. Otherwise, it returns a + // non-zero value. + int save(const char *file_name, const char *mode = "wb", + std::size_t offset = 0) const; + + // The 1st exactMatchSearch() tests whether the given key exists or not, and + // if it exists, its value and length are set to `result'. Otherwise, the + // value and the length of `result' are set to -1 and 0 respectively. + // Note that if `length' is 0, `key' is handled as a zero-terminated string. + // `node_pos' specifies the start position of matching. This argument enables + // the combination of exactMatchSearch() and traverse(). For example, if you + // want to test "xyzA", "xyzBC", and "xyzDE", you can use traverse() to get + // the node position corresponding to "xyz" and then you can use + // exactMatchSearch() to test "A", "BC", and "DE" from that position. + // Note that the length of `result' indicates the length from the `node_pos'. + // In the above example, the lengths are { 1, 2, 2 }, not { 4, 5, 5 }. + template + void exactMatchSearch(const key_type *key, U &result, + std::size_t length = 0, std::size_t node_pos = 0) const { + result = exactMatchSearch(key, length, node_pos); + } + // The 2nd exactMatchSearch() returns a result instead of updating the 2nd + // argument. So, the following exactMatchSearch() has only 3 arguments. + template + inline U exactMatchSearch(const key_type *key, std::size_t length = 0, + std::size_t node_pos = 0) const; + + // commonPrefixSearch() searches for keys which match a prefix of the given + // string. If `length' is 0, `key' is handled as a zero-terminated string. + // The values and the lengths of at most `max_num_results' matched keys are + // stored in `results'. commonPrefixSearch() returns the number of matched + // keys. Note that the return value can be larger than `max_num_results' if + // there are more than `max_num_results' matches. If you want to get all the + // results, allocate more spaces and call commonPrefixSearch() again. + // `node_pos' works as well as in exactMatchSearch(). + template + inline std::size_t commonPrefixSearch(const key_type *key, U *results, + std::size_t max_num_results, std::size_t length = 0, + std::size_t node_pos = 0) const; + + // In Darts-clone, a dictionary is a deterministic finite-state automaton + // (DFA) and traverse() tests transitions on the DFA. The initial state is + // `node_pos' and traverse() chooses transitions labeled key[key_pos], + // key[key_pos + 1], ... in order. If there is not a transition labeled + // key[key_pos + i], traverse() terminates the transitions at that state and + // returns -2. Otherwise, traverse() ends without a termination and returns + // -1 or a nonnegative value, -1 indicates that the final state was not an + // accept state. When a nonnegative value is returned, it is the value + // associated with the final accept state. That is, traverse() returns the + // value associated with the given key if it exists. Note that traverse() + // updates `node_pos' and `key_pos' after each transition. + inline value_type traverse(const key_type *key, std::size_t &node_pos, + std::size_t &key_pos, std::size_t length = 0) const; + + private: + typedef Details::uchar_type uchar_type; + typedef Details::id_type id_type; + typedef Details::DoubleArrayUnit unit_type; + + std::size_t size_; + const unit_type *array_; + unit_type *buf_; + + // Disallows copy and assignment. + DoubleArrayImpl(const DoubleArrayImpl &); + DoubleArrayImpl &operator=(const DoubleArrayImpl &); +}; + +// is the typical instance of . It uses +// as the type of values and it is suitable for most cases. +typedef DoubleArrayImpl DoubleArray; + +// The interface section ends here. For using Darts-clone, there is no need +// to read the remaining section, which gives the implementation of +// Darts-clone. + +// +// Member functions of DoubleArrayImpl (except build()). +// + +template +int DoubleArrayImpl::open(const char *file_name, + const char *mode, std::size_t offset, std::size_t size) { +#ifdef _MSC_VER + std::FILE *file; + if (::fopen_s(&file, file_name, mode) != 0) { + return -1; + } +#else + std::FILE *file = std::fopen(file_name, mode); + if (file == NULL) { + return -1; + } +#endif + + if (size == 0) { + if (std::fseek(file, 0, SEEK_END) != 0) { + std::fclose(file); + return -1; + } + size = std::ftell(file) - offset; + } + + size /= unit_size(); + if (size < 256 || (size & 0xFF) != 0) { + std::fclose(file); + return -1; + } + + if (std::fseek(file, offset, SEEK_SET) != 0) { + std::fclose(file); + return -1; + } + + unit_type units[256]; + if (std::fread(units, unit_size(), 256, file) != 256) { + std::fclose(file); + return -1; + } + + if (units[0].label() != '\0' || units[0].has_leaf() || + units[0].offset() == 0 || units[0].offset() >= 512) { + std::fclose(file); + return -1; + } + for (id_type i = 1; i < 256; ++i) { + if (units[i].label() <= 0xFF && units[i].offset() >= size) { + std::fclose(file); + return -1; + } + } + + unit_type *buf; + try { + buf = new unit_type[size]; + for (id_type i = 0; i < 256; ++i) { + buf[i] = units[i]; + } + } catch (const std::bad_alloc &) { + std::fclose(file); + DARTS_THROW("failed to open double-array: std::bad_alloc"); + } + + if (size > 256) { + if (std::fread(buf + 256, unit_size(), size - 256, file) != size - 256) { + std::fclose(file); + delete[] buf; + return -1; + } + } + std::fclose(file); + + clear(); + + size_ = size; + array_ = buf; + buf_ = buf; + return 0; +} + +template +int DoubleArrayImpl::save(const char *file_name, + const char *mode, std::size_t) const { + if (size() == 0) { + return -1; + } + +#ifdef _MSC_VER + std::FILE *file; + if (::fopen_s(&file, file_name, mode) != 0) { + return -1; + } +#else + std::FILE *file = std::fopen(file_name, mode); + if (file == NULL) { + return -1; + } +#endif + + if (std::fwrite(array_, unit_size(), size(), file) != size()) { + std::fclose(file); + return -1; + } + std::fclose(file); + return 0; +} + +template +template +inline U DoubleArrayImpl::exactMatchSearch(const key_type *key, + std::size_t length, std::size_t node_pos) const { + U result; + set_result(&result, static_cast(-1), 0); + + unit_type unit = array_[node_pos]; + if (length != 0) { + for (std::size_t i = 0; i < length; ++i) { + node_pos ^= unit.offset() ^ static_cast(key[i]); + unit = array_[node_pos]; + if (unit.label() != static_cast(key[i])) { + return result; + } + } + } else { + for ( ; key[length] != '\0'; ++length) { + node_pos ^= unit.offset() ^ static_cast(key[length]); + unit = array_[node_pos]; + if (unit.label() != static_cast(key[length])) { + return result; + } + } + } + + if (!unit.has_leaf()) { + return result; + } + unit = array_[node_pos ^ unit.offset()]; + set_result(&result, static_cast(unit.value()), length); + return result; +} + +template +template +inline std::size_t DoubleArrayImpl::commonPrefixSearch( + const key_type *key, U *results, std::size_t max_num_results, + std::size_t length, std::size_t node_pos) const { + std::size_t num_results = 0; + + unit_type unit = array_[node_pos]; + node_pos ^= unit.offset(); + if (length != 0) { + for (std::size_t i = 0; i < length; ++i) { + node_pos ^= static_cast(key[i]); + unit = array_[node_pos]; + if (unit.label() != static_cast(key[i])) { + return num_results; + } + + node_pos ^= unit.offset(); + if (unit.has_leaf()) { + if (num_results < max_num_results) { + set_result(&results[num_results], static_cast( + array_[node_pos].value()), i + 1); + } + ++num_results; + } + } + } else { + for ( ; key[length] != '\0'; ++length) { + node_pos ^= static_cast(key[length]); + unit = array_[node_pos]; + if (unit.label() != static_cast(key[length])) { + return num_results; + } + + node_pos ^= unit.offset(); + if (unit.has_leaf()) { + if (num_results < max_num_results) { + set_result(&results[num_results], static_cast( + array_[node_pos].value()), length + 1); + } + ++num_results; + } + } + } + + return num_results; +} + +template +inline typename DoubleArrayImpl::value_type +DoubleArrayImpl::traverse(const key_type *key, + std::size_t &node_pos, std::size_t &key_pos, std::size_t length) const { + id_type id = static_cast(node_pos); + unit_type unit = array_[id]; + + if (length != 0) { + for ( ; key_pos < length; ++key_pos) { + id ^= unit.offset() ^ static_cast(key[key_pos]); + unit = array_[id]; + if (unit.label() != static_cast(key[key_pos])) { + return static_cast(-2); + } + node_pos = id; + } + } else { + for ( ; key[key_pos] != '\0'; ++key_pos) { + id ^= unit.offset() ^ static_cast(key[key_pos]); + unit = array_[id]; + if (unit.label() != static_cast(key[key_pos])) { + return static_cast(-2); + } + node_pos = id; + } + } + + if (!unit.has_leaf()) { + return static_cast(-1); + } + unit = array_[id ^ unit.offset()]; + return static_cast(unit.value()); +} + +namespace Details { + +// +// Memory management of array. +// + +template +class AutoArray { + public: + explicit AutoArray(T *array = NULL) : array_(array) {} + ~AutoArray() { + clear(); + } + + const T &operator[](std::size_t id) const { + return array_[id]; + } + T &operator[](std::size_t id) { + return array_[id]; + } + + bool empty() const { + return array_ == NULL; + } + + void clear() { + if (array_ != NULL) { + delete[] array_; + array_ = NULL; + } + } + void swap(AutoArray *array) { + T *temp = array_; + array_ = array->array_; + array->array_ = temp; + } + void reset(T *array = NULL) { + AutoArray(array).swap(this); + } + + private: + T *array_; + + // Disallows copy and assignment. + AutoArray(const AutoArray &); + AutoArray &operator=(const AutoArray &); +}; + +// +// Memory management of resizable array. +// + +template +class AutoPool { + public: + AutoPool() : buf_(), size_(0), capacity_(0) {} + ~AutoPool() { clear(); } + + const T &operator[](std::size_t id) const { + return *(reinterpret_cast(&buf_[0]) + id); + } + T &operator[](std::size_t id) { + return *(reinterpret_cast(&buf_[0]) + id); + } + + bool empty() const { + return size_ == 0; + } + std::size_t size() const { + return size_; + } + + void clear() { + resize(0); + buf_.clear(); + size_ = 0; + capacity_ = 0; + } + + void push_back(const T &value) { + append(value); + } + void pop_back() { + (*this)[--size_].~T(); + } + + void append() { + if (size_ == capacity_) + resize_buf(size_ + 1); + new(&(*this)[size_++]) T; + } + void append(const T &value) { + if (size_ == capacity_) + resize_buf(size_ + 1); + new(&(*this)[size_++]) T(value); + } + + void resize(std::size_t size) { + while (size_ > size) { + (*this)[--size_].~T(); + } + if (size > capacity_) { + resize_buf(size); + } + while (size_ < size) { + new(&(*this)[size_++]) T; + } + } + void resize(std::size_t size, const T &value) { + while (size_ > size) { + (*this)[--size_].~T(); + } + if (size > capacity_) { + resize_buf(size); + } + while (size_ < size) { + new(&(*this)[size_++]) T(value); + } + } + + void reserve(std::size_t size) { + if (size > capacity_) { + resize_buf(size); + } + } + + private: + AutoArray buf_; + std::size_t size_; + std::size_t capacity_; + + // Disallows copy and assignment. + AutoPool(const AutoPool &); + AutoPool &operator=(const AutoPool &); + + void resize_buf(std::size_t size); +}; + +template +void AutoPool::resize_buf(std::size_t size) { + std::size_t capacity; + if (size >= capacity_ * 2) { + capacity = size; + } else { + capacity = 1; + while (capacity < size) { + capacity <<= 1; + } + } + + AutoArray buf; + try { + buf.reset(new char[sizeof(T) * capacity]); + } catch (const std::bad_alloc &) { + DARTS_THROW("failed to resize pool: std::bad_alloc"); + } + + if (size_ > 0) { + T *src = reinterpret_cast(&buf_[0]); + T *dest = reinterpret_cast(&buf[0]); + for (std::size_t i = 0; i < size_; ++i) { + new(&dest[i]) T(src[i]); + src[i].~T(); + } + } + + buf_.swap(&buf); + capacity_ = capacity; +} + +// +// Memory management of stack. +// + +template +class AutoStack { + public: + AutoStack() : pool_() {} + ~AutoStack() { + clear(); + } + + const T &top() const { + return pool_[size() - 1]; + } + T &top() { + return pool_[size() - 1]; + } + + bool empty() const { + return pool_.empty(); + } + std::size_t size() const { + return pool_.size(); + } + + void push(const T &value) { + pool_.push_back(value); + } + void pop() { + pool_.pop_back(); + } + + void clear() { + pool_.clear(); + } + + private: + AutoPool pool_; + + // Disallows copy and assignment. + AutoStack(const AutoStack &); + AutoStack &operator=(const AutoStack &); +}; + +// +// Succinct bit vector. +// + +class BitVector { + public: + BitVector() : units_(), ranks_(), num_ones_(0), size_(0) {} + ~BitVector() { + clear(); + } + + bool operator[](std::size_t id) const { + return (units_[id / UNIT_SIZE] >> (id % UNIT_SIZE) & 1) == 1; + } + + id_type rank(std::size_t id) const { + std::size_t unit_id = id / UNIT_SIZE; + return ranks_[unit_id] + pop_count(units_[unit_id] + & (~0U >> (UNIT_SIZE - (id % UNIT_SIZE) - 1))); + } + + void set(std::size_t id, bool bit) { + if (bit) { + units_[id / UNIT_SIZE] |= 1U << (id % UNIT_SIZE); + } else { + units_[id / UNIT_SIZE] &= ~(1U << (id % UNIT_SIZE)); + } + } + + bool empty() const { + return units_.empty(); + } + std::size_t num_ones() const { + return num_ones_; + } + std::size_t size() const { + return size_; + } + + void append() { + if ((size_ % UNIT_SIZE) == 0) { + units_.append(0); + } + ++size_; + } + void build(); + + void clear() { + units_.clear(); + ranks_.clear(); + } + + private: + enum { UNIT_SIZE = sizeof(id_type) * 8 }; + + AutoPool units_; + AutoArray ranks_; + std::size_t num_ones_; + std::size_t size_; + + // Disallows copy and assignment. + BitVector(const BitVector &); + BitVector &operator=(const BitVector &); + + static id_type pop_count(id_type unit) { + unit = ((unit & 0xAAAAAAAA) >> 1) + (unit & 0x55555555); + unit = ((unit & 0xCCCCCCCC) >> 2) + (unit & 0x33333333); + unit = ((unit >> 4) + unit) & 0x0F0F0F0F; + unit += unit >> 8; + unit += unit >> 16; + return unit & 0xFF; + } +}; + +inline void BitVector::build() { + try { + ranks_.reset(new id_type[units_.size()]); + } catch (const std::bad_alloc &) { + DARTS_THROW("failed to build rank index: std::bad_alloc"); + } + + num_ones_ = 0; + for (std::size_t i = 0; i < units_.size(); ++i) { + ranks_[i] = num_ones_; + num_ones_ += pop_count(units_[i]); + } +} + +// +// Keyset. +// + +template +class Keyset { + public: + Keyset(std::size_t num_keys, const char_type * const *keys, + const std::size_t *lengths, const T *values) : + num_keys_(num_keys), keys_(keys), lengths_(lengths), values_(values) {} + + std::size_t num_keys() const { + return num_keys_; + } + const char_type *keys(std::size_t id) const { + return keys_[id]; + } + uchar_type keys(std::size_t key_id, std::size_t char_id) const { + if (has_lengths() && char_id >= lengths_[key_id]) + return '\0'; + return keys_[key_id][char_id]; + } + + bool has_lengths() const { + return lengths_ != NULL; + } + std::size_t lengths(std::size_t id) const { + if (has_lengths()) { + return lengths_[id]; + } + std::size_t length = 0; + while (keys_[id][length] != '\0') { + ++length; + } + return length; + } + + bool has_values() const { + return values_ != NULL; + } + const value_type values(std::size_t id) const { + if (has_values()) { + return static_cast(values_[id]); + } + return static_cast(id); + } + + private: + std::size_t num_keys_; + const char_type * const * keys_; + const std::size_t *lengths_; + const T *values_; + + // Disallows copy and assignment. + Keyset(const Keyset &); + Keyset &operator=(const Keyset &); +}; + +// +// Node of Directed Acyclic Word Graph (DAWG). +// + +class DawgNode { + public: + DawgNode() : child_(0), sibling_(0), label_('\0'), + is_state_(false), has_sibling_(false) {} + + void set_child(id_type child) { + child_ = child; + } + void set_sibling(id_type sibling) { + sibling_ = sibling; + } + void set_value(value_type value) { + child_ = value; + } + void set_label(uchar_type label) { + label_ = label; + } + void set_is_state(bool is_state) { + is_state_ = is_state; + } + void set_has_sibling(bool has_sibling) { + has_sibling_ = has_sibling; + } + + id_type child() const { + return child_; + } + id_type sibling() const { + return sibling_; + } + value_type value() const { + return static_cast(child_); + } + uchar_type label() const { + return label_; + } + bool is_state() const { + return is_state_; + } + bool has_sibling() const { + return has_sibling_; + } + + id_type unit() const { + if (label_ == '\0') { + return (child_ << 1) | (has_sibling_ ? 1 : 0); + } + return (child_ << 2) | (is_state_ ? 2 : 0) | (has_sibling_ ? 1 : 0); + } + + private: + id_type child_; + id_type sibling_; + uchar_type label_; + bool is_state_; + bool has_sibling_; + + // Copyable. +}; + +// +// Fixed unit of Directed Acyclic Word Graph (DAWG). +// + +class DawgUnit { + public: + explicit DawgUnit(id_type unit = 0) : unit_(unit) {} + DawgUnit(const DawgUnit &unit) : unit_(unit.unit_) {} + + DawgUnit &operator=(id_type unit) { + unit_ = unit; + return *this; + } + + id_type unit() const { + return unit_; + } + + id_type child() const { + return unit_ >> 2; + } + bool has_sibling() const { + return (unit_ & 1) == 1; + } + value_type value() const { + return static_cast(unit_ >> 1); + } + bool is_state() const { + return (unit_ & 2) == 2; + } + + private: + id_type unit_; + + // Copyable. +}; + +// +// Directed Acyclic Word Graph (DAWG) builder. +// + +class DawgBuilder { + public: + DawgBuilder() : nodes_(), units_(), labels_(), is_intersections_(), + table_(), node_stack_(), recycle_bin_(), num_states_(0) {} + ~DawgBuilder() { + clear(); + } + + id_type root() const { + return 0; + } + + id_type child(id_type id) const { + return units_[id].child(); + } + id_type sibling(id_type id) const { + return units_[id].has_sibling() ? (id + 1) : 0; + } + int value(id_type id) const { + return units_[id].value(); + } + + bool is_leaf(id_type id) const { + return label(id) == '\0'; + } + uchar_type label(id_type id) const { + return labels_[id]; + } + + bool is_intersection(id_type id) const { + return is_intersections_[id]; + } + id_type intersection_id(id_type id) const { + return is_intersections_.rank(id) - 1; + } + + std::size_t num_intersections() const { + return is_intersections_.num_ones(); + } + + std::size_t size() const { + return units_.size(); + } + + void init(); + void finish(); + + void insert(const char *key, std::size_t length, value_type value); + + void clear(); + + private: + enum { INITIAL_TABLE_SIZE = 1 << 10 }; + + AutoPool nodes_; + AutoPool units_; + AutoPool labels_; + BitVector is_intersections_; + AutoPool table_; + AutoStack node_stack_; + AutoStack recycle_bin_; + std::size_t num_states_; + + // Disallows copy and assignment. + DawgBuilder(const DawgBuilder &); + DawgBuilder &operator=(const DawgBuilder &); + + void flush(id_type id); + + void expand_table(); + + id_type find_unit(id_type id, id_type *hash_id) const; + id_type find_node(id_type node_id, id_type *hash_id) const; + + bool are_equal(id_type node_id, id_type unit_id) const; + + id_type hash_unit(id_type id) const; + id_type hash_node(id_type id) const; + + id_type append_node(); + id_type append_unit(); + + void free_node(id_type id) { + recycle_bin_.push(id); + } + + static id_type hash(id_type key) { + key = ~key + (key << 15); // key = (key << 15) - key - 1; + key = key ^ (key >> 12); + key = key + (key << 2); + key = key ^ (key >> 4); + key = key * 2057; // key = (key + (key << 3)) + (key << 11); + key = key ^ (key >> 16); + return key; + } +}; + +inline void DawgBuilder::init() { + table_.resize(INITIAL_TABLE_SIZE, 0); + + append_node(); + append_unit(); + + num_states_ = 1; + + nodes_[0].set_label(0xFF); + node_stack_.push(0); +} + +inline void DawgBuilder::finish() { + flush(0); + + units_[0] = nodes_[0].unit(); + labels_[0] = nodes_[0].label(); + + nodes_.clear(); + table_.clear(); + node_stack_.clear(); + recycle_bin_.clear(); + + is_intersections_.build(); +} + +inline void DawgBuilder::insert(const char *key, std::size_t length, + value_type value) { + if (value < 0) { + DARTS_THROW("failed to insert key: negative value"); + } else if (length == 0) { + DARTS_THROW("failed to insert key: zero-length key"); + } + + id_type id = 0; + std::size_t key_pos = 0; + + for ( ; key_pos <= length; ++key_pos) { + id_type child_id = nodes_[id].child(); + if (child_id == 0) { + break; + } + + uchar_type key_label = static_cast(key[key_pos]); + if (key_pos < length && key_label == '\0') { + DARTS_THROW("failed to insert key: invalid null character"); + } + + uchar_type unit_label = nodes_[child_id].label(); + if (key_label < unit_label) { + DARTS_THROW("failed to insert key: wrong key order"); + } else if (key_label > unit_label) { + nodes_[child_id].set_has_sibling(true); + flush(child_id); + break; + } + id = child_id; + } + + if (key_pos > length) { + return; + } + + for ( ; key_pos <= length; ++key_pos) { + uchar_type key_label = static_cast( + (key_pos < length) ? key[key_pos] : '\0'); + id_type child_id = append_node(); + + if (nodes_[id].child() == 0) { + nodes_[child_id].set_is_state(true); + } + nodes_[child_id].set_sibling(nodes_[id].child()); + nodes_[child_id].set_label(key_label); + nodes_[id].set_child(child_id); + node_stack_.push(child_id); + + id = child_id; + } + nodes_[id].set_value(value); +} + +inline void DawgBuilder::clear() { + nodes_.clear(); + units_.clear(); + labels_.clear(); + is_intersections_.clear(); + table_.clear(); + node_stack_.clear(); + recycle_bin_.clear(); + num_states_ = 0; +} + +inline void DawgBuilder::flush(id_type id) { + while (node_stack_.top() != id) { + id_type node_id = node_stack_.top(); + node_stack_.pop(); + + if (num_states_ >= table_.size() - (table_.size() >> 2)) { + expand_table(); + } + + id_type num_siblings = 0; + for (id_type i = node_id; i != 0; i = nodes_[i].sibling()) { + ++num_siblings; + } + + id_type hash_id; + id_type match_id = find_node(node_id, &hash_id); + if (match_id != 0) { + is_intersections_.set(match_id, true); + } else { + id_type unit_id = 0; + for (id_type i = 0; i < num_siblings; ++i) { + unit_id = append_unit(); + } + for (id_type i = node_id; i != 0; i = nodes_[i].sibling()) { + units_[unit_id] = nodes_[i].unit(); + labels_[unit_id] = nodes_[i].label(); + --unit_id; + } + match_id = unit_id + 1; + table_[hash_id] = match_id; + ++num_states_; + } + + for (id_type i = node_id, next; i != 0; i = next) { + next = nodes_[i].sibling(); + free_node(i); + } + + nodes_[node_stack_.top()].set_child(match_id); + } + node_stack_.pop(); +} + +inline void DawgBuilder::expand_table() { + std::size_t table_size = table_.size() << 1; + table_.clear(); + table_.resize(table_size, 0); + + for (std::size_t i = 1; i < units_.size(); ++i) { + id_type id = static_cast(i); + if (labels_[id] == '\0' || units_[id].is_state()) { + id_type hash_id; + find_unit(id, &hash_id); + table_[hash_id] = id; + } + } +} + +inline id_type DawgBuilder::find_unit(id_type id, id_type *hash_id) const { + *hash_id = hash_unit(id) % table_.size(); + for ( ; ; *hash_id = (*hash_id + 1) % table_.size()) { + id_type unit_id = table_[*hash_id]; + if (unit_id == 0) { + break; + } + + // There must not be the same unit. + } + return 0; +} + +inline id_type DawgBuilder::find_node(id_type node_id, + id_type *hash_id) const { + *hash_id = hash_node(node_id) % table_.size(); + for ( ; ; *hash_id = (*hash_id + 1) % table_.size()) { + id_type unit_id = table_[*hash_id]; + if (unit_id == 0) { + break; + } + + if (are_equal(node_id, unit_id)) { + return unit_id; + } + } + return 0; +} + +inline bool DawgBuilder::are_equal(id_type node_id, id_type unit_id) const { + for (id_type i = nodes_[node_id].sibling(); i != 0; + i = nodes_[i].sibling()) { + if (units_[unit_id].has_sibling() == false) { + return false; + } + ++unit_id; + } + if (units_[unit_id].has_sibling() == true) { + return false; + } + + for (id_type i = node_id; i != 0; i = nodes_[i].sibling(), --unit_id) { + if (nodes_[i].unit() != units_[unit_id].unit() || + nodes_[i].label() != labels_[unit_id]) { + return false; + } + } + return true; +} + +inline id_type DawgBuilder::hash_unit(id_type id) const { + id_type hash_value = 0; + for ( ; id != 0; ++id) { + id_type unit = units_[id].unit(); + uchar_type label = labels_[id]; + hash_value ^= hash((label << 24) ^ unit); + + if (units_[id].has_sibling() == false) { + break; + } + } + return hash_value; +} + +inline id_type DawgBuilder::hash_node(id_type id) const { + id_type hash_value = 0; + for ( ; id != 0; id = nodes_[id].sibling()) { + id_type unit = nodes_[id].unit(); + uchar_type label = nodes_[id].label(); + hash_value ^= hash((label << 24) ^ unit); + } + return hash_value; +} + +inline id_type DawgBuilder::append_unit() { + is_intersections_.append(); + units_.append(); + labels_.append(); + + return static_cast(is_intersections_.size() - 1); +} + +inline id_type DawgBuilder::append_node() { + id_type id; + if (recycle_bin_.empty()) { + id = static_cast(nodes_.size()); + nodes_.append(); + } else { + id = recycle_bin_.top(); + nodes_[id] = DawgNode(); + recycle_bin_.pop(); + } + return id; +} + +// +// Unit of double-array builder. +// + +class DoubleArrayBuilderUnit { + public: + DoubleArrayBuilderUnit() : unit_(0) {} + + void set_has_leaf(bool has_leaf) { + if (has_leaf) { + unit_ |= 1U << 8; + } else { + unit_ &= ~(1U << 8); + } + } + void set_value(value_type value) { + unit_ = value | (1U << 31); + } + void set_label(uchar_type label) { + unit_ = (unit_ & ~0xFFU) | label; + } + void set_offset(id_type offset) { + if (offset >= 1U << 29) { + DARTS_THROW("failed to modify unit: too large offset"); + } + unit_ &= (1U << 31) | (1U << 8) | 0xFF; + if (offset < 1U << 21) { + unit_ |= (offset << 10); + } else { + unit_ |= (offset << 2) | (1U << 9); + } + } + + private: + id_type unit_; + + // Copyable. +}; + +// +// Extra unit of double-array builder. +// + +class DoubleArrayBuilderExtraUnit { + public: + DoubleArrayBuilderExtraUnit() : prev_(0), next_(0), + is_fixed_(false), is_used_(false) {} + + void set_prev(id_type prev) { + prev_ = prev; + } + void set_next(id_type next) { + next_ = next; + } + void set_is_fixed(bool is_fixed) { + is_fixed_ = is_fixed; + } + void set_is_used(bool is_used) { + is_used_ = is_used; + } + + id_type prev() const { + return prev_; + } + id_type next() const { + return next_; + } + bool is_fixed() const { + return is_fixed_; + } + bool is_used() const { + return is_used_; + } + + private: + id_type prev_; + id_type next_; + bool is_fixed_; + bool is_used_; + + // Copyable. +}; + +// +// DAWG -> double-array converter. +// + +class DoubleArrayBuilder { + public: + explicit DoubleArrayBuilder(progress_func_type progress_func) + : progress_func_(progress_func), units_(), extras_(), labels_(), + table_(), extras_head_(0) {} + ~DoubleArrayBuilder() { + clear(); + } + + template + void build(const Keyset &keyset); + void copy(std::size_t *size_ptr, DoubleArrayUnit **buf_ptr) const; + + void clear(); + + private: + enum { BLOCK_SIZE = 256 }; + enum { NUM_EXTRA_BLOCKS = 16 }; + enum { NUM_EXTRAS = BLOCK_SIZE * NUM_EXTRA_BLOCKS }; + + enum { UPPER_MASK = 0xFF << 21 }; + enum { LOWER_MASK = 0xFF }; + + typedef DoubleArrayBuilderUnit unit_type; + typedef DoubleArrayBuilderExtraUnit extra_type; + + progress_func_type progress_func_; + AutoPool units_; + AutoArray extras_; + AutoPool labels_; + AutoArray table_; + id_type extras_head_; + + // Disallows copy and assignment. + DoubleArrayBuilder(const DoubleArrayBuilder &); + DoubleArrayBuilder &operator=(const DoubleArrayBuilder &); + + std::size_t num_blocks() const { + return units_.size() / BLOCK_SIZE; + } + + const extra_type &extras(id_type id) const { + return extras_[id % NUM_EXTRAS]; + } + extra_type &extras(id_type id) { + return extras_[id % NUM_EXTRAS]; + } + + template + void build_dawg(const Keyset &keyset, DawgBuilder *dawg_builder); + void build_from_dawg(const DawgBuilder &dawg); + void build_from_dawg(const DawgBuilder &dawg, + id_type dawg_id, id_type dic_id); + id_type arrange_from_dawg(const DawgBuilder &dawg, + id_type dawg_id, id_type dic_id); + + template + void build_from_keyset(const Keyset &keyset); + template + void build_from_keyset(const Keyset &keyset, std::size_t begin, + std::size_t end, std::size_t depth, id_type dic_id); + template + id_type arrange_from_keyset(const Keyset &keyset, std::size_t begin, + std::size_t end, std::size_t depth, id_type dic_id); + + id_type find_valid_offset(id_type id) const; + bool is_valid_offset(id_type id, id_type offset) const; + + void reserve_id(id_type id); + void expand_units(); + + void fix_all_blocks(); + void fix_block(id_type block_id); +}; + +template +void DoubleArrayBuilder::build(const Keyset &keyset) { + if (keyset.has_values()) { + Details::DawgBuilder dawg_builder; + build_dawg(keyset, &dawg_builder); + build_from_dawg(dawg_builder); + dawg_builder.clear(); + } else { + build_from_keyset(keyset); + } +} + +inline void DoubleArrayBuilder::copy(std::size_t *size_ptr, + DoubleArrayUnit **buf_ptr) const { + if (size_ptr != NULL) { + *size_ptr = units_.size(); + } + if (buf_ptr != NULL) { + *buf_ptr = new DoubleArrayUnit[units_.size()]; + unit_type *units = reinterpret_cast(*buf_ptr); + for (std::size_t i = 0; i < units_.size(); ++i) { + units[i] = units_[i]; + } + } +} + +inline void DoubleArrayBuilder::clear() { + units_.clear(); + extras_.clear(); + labels_.clear(); + table_.clear(); + extras_head_ = 0; +} + +template +void DoubleArrayBuilder::build_dawg(const Keyset &keyset, + DawgBuilder *dawg_builder) { + dawg_builder->init(); + for (std::size_t i = 0; i < keyset.num_keys(); ++i) { + dawg_builder->insert(keyset.keys(i), keyset.lengths(i), keyset.values(i)); + if (progress_func_ != NULL) { + progress_func_(i + 1, keyset.num_keys() + 1); + } + } + dawg_builder->finish(); +} + +inline void DoubleArrayBuilder::build_from_dawg(const DawgBuilder &dawg) { + std::size_t num_units = 1; + while (num_units < dawg.size()) { + num_units <<= 1; + } + units_.reserve(num_units); + + table_.reset(new id_type[dawg.num_intersections()]); + for (std::size_t i = 0; i < dawg.num_intersections(); ++i) { + table_[i] = 0; + } + + extras_.reset(new extra_type[NUM_EXTRAS]); + + reserve_id(0); + extras(0).set_is_used(true); + units_[0].set_offset(1); + units_[0].set_label('\0'); + + if (dawg.child(dawg.root()) != 0) { + build_from_dawg(dawg, dawg.root(), 0); + } + + fix_all_blocks(); + + extras_.clear(); + labels_.clear(); + table_.clear(); +} + +inline void DoubleArrayBuilder::build_from_dawg(const DawgBuilder &dawg, + id_type dawg_id, id_type dic_id) { + id_type dawg_child_id = dawg.child(dawg_id); + if (dawg.is_intersection(dawg_child_id)) { + id_type intersection_id = dawg.intersection_id(dawg_child_id); + id_type offset = table_[intersection_id]; + if (offset != 0) { + offset ^= dic_id; + if (!(offset & UPPER_MASK) || !(offset & LOWER_MASK)) { + if (dawg.is_leaf(dawg_child_id)) { + units_[dic_id].set_has_leaf(true); + } + units_[dic_id].set_offset(offset); + return; + } + } + } + + id_type offset = arrange_from_dawg(dawg, dawg_id, dic_id); + if (dawg.is_intersection(dawg_child_id)) { + table_[dawg.intersection_id(dawg_child_id)] = offset; + } + + do { + uchar_type child_label = dawg.label(dawg_child_id); + id_type dic_child_id = offset ^ child_label; + if (child_label != '\0') { + build_from_dawg(dawg, dawg_child_id, dic_child_id); + } + dawg_child_id = dawg.sibling(dawg_child_id); + } while (dawg_child_id != 0); +} + +inline id_type DoubleArrayBuilder::arrange_from_dawg(const DawgBuilder &dawg, + id_type dawg_id, id_type dic_id) { + labels_.resize(0); + + id_type dawg_child_id = dawg.child(dawg_id); + while (dawg_child_id != 0) { + labels_.append(dawg.label(dawg_child_id)); + dawg_child_id = dawg.sibling(dawg_child_id); + } + + id_type offset = find_valid_offset(dic_id); + units_[dic_id].set_offset(dic_id ^ offset); + + dawg_child_id = dawg.child(dawg_id); + for (std::size_t i = 0; i < labels_.size(); ++i) { + id_type dic_child_id = offset ^ labels_[i]; + reserve_id(dic_child_id); + + if (dawg.is_leaf(dawg_child_id)) { + units_[dic_id].set_has_leaf(true); + units_[dic_child_id].set_value(dawg.value(dawg_child_id)); + } else { + units_[dic_child_id].set_label(labels_[i]); + } + + dawg_child_id = dawg.sibling(dawg_child_id); + } + extras(offset).set_is_used(true); + + return offset; +} + +template +void DoubleArrayBuilder::build_from_keyset(const Keyset &keyset) { + std::size_t num_units = 1; + while (num_units < keyset.num_keys()) { + num_units <<= 1; + } + units_.reserve(num_units); + + extras_.reset(new extra_type[NUM_EXTRAS]); + + reserve_id(0); + extras(0).set_is_used(true); + units_[0].set_offset(1); + units_[0].set_label('\0'); + + if (keyset.num_keys() > 0) { + build_from_keyset(keyset, 0, keyset.num_keys(), 0, 0); + } + + fix_all_blocks(); + + extras_.clear(); + labels_.clear(); +} + +template +void DoubleArrayBuilder::build_from_keyset(const Keyset &keyset, + std::size_t begin, std::size_t end, std::size_t depth, id_type dic_id) { + id_type offset = arrange_from_keyset(keyset, begin, end, depth, dic_id); + + while (begin < end) { + if (keyset.keys(begin, depth) != '\0') { + break; + } + ++begin; + } + if (begin == end) { + return; + } + + std::size_t last_begin = begin; + uchar_type last_label = keyset.keys(begin, depth); + while (++begin < end) { + uchar_type label = keyset.keys(begin, depth); + if (label != last_label) { + build_from_keyset(keyset, last_begin, begin, + depth + 1, offset ^ last_label); + last_begin = begin; + last_label = keyset.keys(begin, depth); + } + } + build_from_keyset(keyset, last_begin, end, depth + 1, offset ^ last_label); +} + +template +id_type DoubleArrayBuilder::arrange_from_keyset(const Keyset &keyset, + std::size_t begin, std::size_t end, std::size_t depth, id_type dic_id) { + labels_.resize(0); + + value_type value = -1; + for (std::size_t i = begin; i < end; ++i) { + uchar_type label = keyset.keys(i, depth); + if (label == '\0') { + if (keyset.has_lengths() && depth < keyset.lengths(i)) { + DARTS_THROW("failed to build double-array: " + "invalid null character"); + } else if (keyset.values(i) < 0) { + DARTS_THROW("failed to build double-array: negative value"); + } + + if (value == -1) { + value = keyset.values(i); + } + if (progress_func_ != NULL) { + progress_func_(i + 1, keyset.num_keys() + 1); + } + } + + if (labels_.empty()) { + labels_.append(label); + } else if (label != labels_[labels_.size() - 1]) { + if (label < labels_[labels_.size() - 1]) { + DARTS_THROW("failed to build double-array: wrong key order"); + } + labels_.append(label); + } + } + + id_type offset = find_valid_offset(dic_id); + units_[dic_id].set_offset(dic_id ^ offset); + + for (std::size_t i = 0; i < labels_.size(); ++i) { + id_type dic_child_id = offset ^ labels_[i]; + reserve_id(dic_child_id); + if (labels_[i] == '\0') { + units_[dic_id].set_has_leaf(true); + units_[dic_child_id].set_value(value); + } else { + units_[dic_child_id].set_label(labels_[i]); + } + } + extras(offset).set_is_used(true); + + return offset; +} + +inline id_type DoubleArrayBuilder::find_valid_offset(id_type id) const { + if (extras_head_ >= units_.size()) { + return units_.size() | (id & LOWER_MASK); + } + + id_type unfixed_id = extras_head_; + do { + id_type offset = unfixed_id ^ labels_[0]; + if (is_valid_offset(id, offset)) { + return offset; + } + unfixed_id = extras(unfixed_id).next(); + } while (unfixed_id != extras_head_); + + return units_.size() | (id & LOWER_MASK); +} + +inline bool DoubleArrayBuilder::is_valid_offset(id_type id, + id_type offset) const { + if (extras(offset).is_used()) { + return false; + } + + id_type rel_offset = id ^ offset; + if ((rel_offset & LOWER_MASK) && (rel_offset & UPPER_MASK)) { + return false; + } + + for (std::size_t i = 1; i < labels_.size(); ++i) { + if (extras(offset ^ labels_[i]).is_fixed()) { + return false; + } + } + + return true; +} + +inline void DoubleArrayBuilder::reserve_id(id_type id) { + if (id >= units_.size()) { + expand_units(); + } + + if (id == extras_head_) { + extras_head_ = extras(id).next(); + if (extras_head_ == id) { + extras_head_ = units_.size(); + } + } + extras(extras(id).prev()).set_next(extras(id).next()); + extras(extras(id).next()).set_prev(extras(id).prev()); + extras(id).set_is_fixed(true); +} + +inline void DoubleArrayBuilder::expand_units() { + id_type src_num_units = units_.size(); + id_type src_num_blocks = num_blocks(); + + id_type dest_num_units = src_num_units + BLOCK_SIZE; + id_type dest_num_blocks = src_num_blocks + 1; + + if (dest_num_blocks > NUM_EXTRA_BLOCKS) { + fix_block(src_num_blocks - NUM_EXTRA_BLOCKS); + } + + units_.resize(dest_num_units); + + if (dest_num_blocks > NUM_EXTRA_BLOCKS) { + for (std::size_t id = src_num_units; id < dest_num_units; ++id) { + extras(id).set_is_used(false); + extras(id).set_is_fixed(false); + } + } + + for (id_type i = src_num_units + 1; i < dest_num_units; ++i) { + extras(i - 1).set_next(i); + extras(i).set_prev(i - 1); + } + + extras(src_num_units).set_prev(dest_num_units - 1); + extras(dest_num_units - 1).set_next(src_num_units); + + extras(src_num_units).set_prev(extras(extras_head_).prev()); + extras(dest_num_units - 1).set_next(extras_head_); + + extras(extras(extras_head_).prev()).set_next(src_num_units); + extras(extras_head_).set_prev(dest_num_units - 1); +} + +inline void DoubleArrayBuilder::fix_all_blocks() { + id_type begin = 0; + if (num_blocks() > NUM_EXTRA_BLOCKS) { + begin = num_blocks() - NUM_EXTRA_BLOCKS; + } + id_type end = num_blocks(); + + for (id_type block_id = begin; block_id != end; ++block_id) { + fix_block(block_id); + } +} + +inline void DoubleArrayBuilder::fix_block(id_type block_id) { + id_type begin = block_id * BLOCK_SIZE; + id_type end = begin + BLOCK_SIZE; + + id_type unused_offset = 0; + for (id_type offset = begin; offset != end; ++offset) { + if (!extras(offset).is_used()) { + unused_offset = offset; + break; + } + } + + for (id_type id = begin; id != end; ++id) { + if (!extras(id).is_fixed()) { + reserve_id(id); + units_[id].set_label(static_cast(id ^ unused_offset)); + } + } +} + +} // namespace Details + +// +// Member function build() of DoubleArrayImpl. +// + +template +int DoubleArrayImpl::build(std::size_t num_keys, + const key_type * const *keys, const std::size_t *lengths, + const value_type *values, Details::progress_func_type progress_func) { + Details::Keyset keyset(num_keys, keys, lengths, values); + + Details::DoubleArrayBuilder builder(progress_func); + builder.build(keyset); + + std::size_t size = 0; + unit_type *buf = NULL; + builder.copy(&size, &buf); + + clear(); + + size_ = size; + array_ = buf; + buf_ = buf; + + if (progress_func != NULL) { + progress_func(num_keys + 1, num_keys + 1); + } + + return 0; +} + +} // namespace Darts + +#undef DARTS_INT_TO_STR +#undef DARTS_LINE_TO_STR +#undef DARTS_LINE_STR +#undef DARTS_THROW + +#endif // DARTS_H_ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/esaxx/LICENSE b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/esaxx/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..07394df7c3732d308872d5c64e337c8739e4c017 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/esaxx/LICENSE @@ -0,0 +1,24 @@ +This is the esaxx copyright. + +Copyright (c) 2010 Daisuke Okanohara All Rights Reserved. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/esaxx/esa.hxx b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/esaxx/esa.hxx new file mode 100644 index 0000000000000000000000000000000000000000..acb5c7a1bffe52ad8db6995d0f8cd05d90e7c508 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/esaxx/esa.hxx @@ -0,0 +1,125 @@ +/* + * esa.hxx + * Copyright (c) 2010 Daisuke Okanohara All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _ESA_HXX +#define _ESA_HXX + +#include +#include +#include +#include "sais.hxx" + +namespace esaxx_private { +template +index_type suffixtree(string_type T, sarray_type SA, sarray_type L, sarray_type R, sarray_type D, index_type n){ + if (n == 0){ + return 0; + } + sarray_type Psi = L; + Psi[SA[0]] = SA[n-1]; + for (index_type i = 1; i < n; ++i){ + Psi[SA[i]] = SA[i-1]; + } + + // Compare at most 2n log n charcters. Practically fastest + // "Permuted Longest-Common-Prefix Array", Juha Karkkainen, CPM 09 + sarray_type PLCP = R; + index_type h = 0; + for (index_type i = 0; i < n; ++i){ + index_type j = Psi[i]; + while (i+h < n && j+h < n && + T[i+h] == T[j+h]){ + ++h; + } + PLCP[i] = h; + if (h > 0) --h; + } + + sarray_type H = L; + for (index_type i = 0; i < n; ++i){ + H[i] = PLCP[SA[i]]; + } + H[0] = -1; + + std::vector > S; + S.push_back(std::make_pair((index_type)-1, (index_type)-1)); + size_t nodeNum = 0; + for (index_type i = 0; ; ++i){ + std::pair cur (i, (i == n) ? -1 : H[i]); + std::pair cand(S.back()); + while (cand.second > cur.second){ + if (i - cand.first > 1){ + L[nodeNum] = cand.first; + R[nodeNum] = i; + D[nodeNum] = cand.second; + ++nodeNum; + } + cur.first = cand.first; + S.pop_back(); + cand = S.back(); + } + if (cand.second < cur.second){ + S.push_back(cur); + } + if (i == n) break; + S.push_back(std::make_pair(i, n - SA[i] + 1)); + } + return nodeNum; +} +} + +/** + * @brief Build an enhanced suffix array of a given string in linear time + * For an input text T, esaxx() builds an enhancd suffix array in linear time. + * i-th internal node is represented as a triple (L[i], R[i], D[i]); + * L[i] and R[i] is the left/right boundary of the suffix array as SA[L[i]....R[i]-1] + * D[i] is the depth of the internal node + * The number of internal node is at most N-1 and return the actual number by + * @param T[0...n-1] The input string. (random access iterator) + * @param SA[0...n-1] The output suffix array (random access iterator) + * @param L[0...n-1] The output left boundary of internal node (random access iterator) + * @param R[0...n-1] The output right boundary of internal node (random access iterator) + * @param D[0...n-1] The output depth of internal node (random access iterator) + * @param n The length of the input string + * @param k The alphabet size + * @pram nodeNum The output the number of internal node + * @return 0 if succeded, -1 or -2 otherwise + */ + +template +int esaxx(string_type T, sarray_type SA, sarray_type L, sarray_type R, sarray_type D, + index_type n, index_type k, index_type& nodeNum) { + if ((n < 0) || (k <= 0)) return -1; + int err = saisxx(T, SA, n, k); + if (err != 0){ + return err; + } + nodeNum = esaxx_private::suffixtree(T, SA, L, R, D, n); + return 0; +} + + +#endif // _ESA_HXX diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/esaxx/sais.hxx b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/esaxx/sais.hxx new file mode 100644 index 0000000000000000000000000000000000000000..20e69dfec46704dbf701e32a25852ad862a341de --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/esaxx/sais.hxx @@ -0,0 +1,364 @@ +/* + * sais.hxx for sais-lite + * Copyright (c) 2008-2009 Yuta Mori All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _SAIS_HXX +#define _SAIS_HXX 1 +#ifdef __cplusplus + +#ifdef __INTEL_COMPILER +#pragma warning(disable : 383 981 1418) +// for icc 64-bit +//#define __builtin_vsnprintf(a, b, c, d) __builtin_vsnprintf(a, b, c, (char *)d) +#endif + +#include +#ifdef _OPENMP +# include +#endif + +namespace saisxx_private { + +/* find the start or end of each bucket */ +template +void +getCounts(const string_type T, bucket_type C, index_type n, index_type k) { +#ifdef _OPENMP + bucket_type D; + index_type i, j, p, sum, first, last; + int thnum, maxthreads = omp_get_max_threads(); +#pragma omp parallel default(shared) private(D, i, thnum, first, last) + { + thnum = omp_get_thread_num(); + D = C + thnum * k; + first = n / maxthreads * thnum; + last = (thnum < (maxthreads - 1)) ? n / maxthreads * (thnum + 1) : n; + for(i = 0; i < k; ++i) { D[i] = 0; } + for(i = first; i < last; ++i) { ++D[T[i]]; } + } + if(1 < maxthreads) { +#pragma omp parallel for default(shared) private(i, j, p, sum) + for(i = 0; i < k; ++i) { + for(j = 1, p = i + k, sum = C[i]; j < maxthreads; ++j, p += k) { + sum += C[p]; + } + C[i] = sum; + } + } +#else + index_type i; + for(i = 0; i < k; ++i) { C[i] = 0; } + for(i = 0; i < n; ++i) { ++C[T[i]]; } +#endif +} +template +void +getBuckets(const bucket_type C, bucket_type B, index_type k, bool end) { + index_type i, sum = 0; + if(end) { for(i = 0; i < k; ++i) { sum += C[i]; B[i] = sum; } } + else { for(i = 0; i < k; ++i) { sum += C[i]; B[i] = sum - C[i]; } } +} + +/* compute SA and BWT */ +template +void +induceSA(string_type T, sarray_type SA, bucket_type C, bucket_type B, + index_type n, index_type k) { +typedef typename std::iterator_traits::value_type char_type; + sarray_type b; + index_type i, j; + char_type c0, c1; + /* compute SAl */ + if(C == B) { getCounts(T, C, n, k); } + getBuckets(C, B, k, false); /* find starts of buckets */ + b = SA + B[c1 = T[j = n - 1]]; + *b++ = ((0 < j) && (T[j - 1] < c1)) ? ~j : j; + for(i = 0; i < n; ++i) { + j = SA[i], SA[i] = ~j; + if(0 < j) { + if((c0 = T[--j]) != c1) { B[c1] = b - SA; b = SA + B[c1 = c0]; } + *b++ = ((0 < j) && (T[j - 1] < c1)) ? ~j : j; + } + } + /* compute SAs */ + if(C == B) { getCounts(T, C, n, k); } + getBuckets(C, B, k, true); /* find ends of buckets */ + for(i = n - 1, b = SA + B[c1 = 0]; 0 <= i; --i) { + if(0 < (j = SA[i])) { + if((c0 = T[--j]) != c1) { B[c1] = b - SA; b = SA + B[c1 = c0]; } + *--b = ((j == 0) || (T[j - 1] > c1)) ? ~j : j; + } else { + SA[i] = ~j; + } + } +} +template +int +computeBWT(string_type T, sarray_type SA, bucket_type C, bucket_type B, + index_type n, index_type k) { +typedef typename std::iterator_traits::value_type char_type; + sarray_type b; + index_type i, j, pidx = -1; + char_type c0, c1; + /* compute SAl */ + if(C == B) { getCounts(T, C, n, k); } + getBuckets(C, B, k, false); /* find starts of buckets */ + b = SA + B[c1 = T[j = n - 1]]; + *b++ = ((0 < j) && (T[j - 1] < c1)) ? ~j : j; + for(i = 0; i < n; ++i) { + if(0 < (j = SA[i])) { + SA[i] = ~(c0 = T[--j]); + if(c0 != c1) { B[c1] = b - SA; b = SA + B[c1 = c0]; } + *b++ = ((0 < j) && (T[j - 1] < c1)) ? ~j : j; + } else if(j != 0) { + SA[i] = ~j; + } + } + /* compute SAs */ + if(C == B) { getCounts(T, C, n, k); } + getBuckets(C, B, k, true); /* find ends of buckets */ + for(i = n - 1, b = SA + B[c1 = 0]; 0 <= i; --i) { + if(0 < (j = SA[i])) { + SA[i] = (c0 = T[--j]); + if(c0 != c1) { B[c1] = b - SA; b = SA + B[c1 = c0]; } + *--b = ((0 < j) && (T[j - 1] > c1)) ? ~((index_type)T[j - 1]) : j; + } else if(j != 0) { + SA[i] = ~j; + } else { + pidx = i; + } + } + return pidx; +} + +/* find the suffix array SA of T[0..n-1] in {0..k}^n + use a working space (excluding s and SA) of at most 2n+O(1) for a constant alphabet */ +template +int +suffixsort(string_type T, sarray_type SA, + index_type fs, index_type n, index_type k, + bool isbwt) { +typedef typename std::iterator_traits::value_type char_type; + sarray_type RA; + index_type i, j, m, p, q, plen, qlen, name, pidx = 0; + bool diff; + int c; +#ifdef _OPENMP + int maxthreads = omp_get_max_threads(); +#else +# define maxthreads 1 +#endif + char_type c0, c1; + + /* stage 1: reduce the problem by at least 1/2 + sort all the S-substrings */ + if(fs < (maxthreads * k)) { + index_type *C, *B; + if((C = new index_type[maxthreads * k]) == 0) { return -2; } + B = (1 < maxthreads) ? C + k : C; + getCounts(T, C, n, k); getBuckets(C, B, k, true); /* find ends of buckets */ +#ifdef _OPENMP +#pragma omp parallel for default(shared) private(i) +#endif + for(i = 0; i < n; ++i) { SA[i] = 0; } + for(i = n - 2, c = 0, c1 = T[n - 1]; 0 <= i; --i, c1 = c0) { + if((c0 = T[i]) < (c1 + c)) { c = 1; } + else if(c != 0) { SA[--B[c1]] = i + 1, c = 0; } + } + induceSA(T, SA, C, B, n, k); + delete [] C; + } else { + sarray_type C, B; + C = SA + n; + B = ((1 < maxthreads) || (k <= (fs - k))) ? C + k : C; + getCounts(T, C, n, k); getBuckets(C, B, k, true); /* find ends of buckets */ +#ifdef _OPENMP +#pragma omp parallel for default(shared) private(i) +#endif + for(i = 0; i < n; ++i) { SA[i] = 0; } + for(i = n - 2, c = 0, c1 = T[n - 1]; 0 <= i; --i, c1 = c0) { + if((c0 = T[i]) < (c1 + c)) { c = 1; } + else if(c != 0) { SA[--B[c1]] = i + 1, c = 0; } + } + induceSA(T, SA, C, B, n, k); + } + + /* compact all the sorted substrings into the first m items of SA + 2*m must be not larger than n (proveable) */ +#ifdef _OPENMP +#pragma omp parallel for default(shared) private(i, j, p, c0, c1) + for(i = 0; i < n; ++i) { + p = SA[i]; + if((0 < p) && (T[p - 1] > (c0 = T[p]))) { + for(j = p + 1; (j < n) && (c0 == (c1 = T[j])); ++j) { } + if((j < n) && (c0 < c1)) { SA[i] = ~p; } + } + } + for(i = 0, m = 0; i < n; ++i) { if((p = SA[i]) < 0) { SA[m++] = ~p; } } +#else + for(i = 0, m = 0; i < n; ++i) { + p = SA[i]; + if((0 < p) && (T[p - 1] > (c0 = T[p]))) { + for(j = p + 1; (j < n) && (c0 == (c1 = T[j])); ++j) { } + if((j < n) && (c0 < c1)) { SA[m++] = p; } + } + } +#endif + j = m + (n >> 1); +#ifdef _OPENMP +#pragma omp parallel for default(shared) private(i) +#endif + for(i = m; i < j; ++i) { SA[i] = 0; } /* init the name array buffer */ + /* store the length of all substrings */ + for(i = n - 2, j = n, c = 0, c1 = T[n - 1]; 0 <= i; --i, c1 = c0) { + if((c0 = T[i]) < (c1 + c)) { c = 1; } + else if(c != 0) { SA[m + ((i + 1) >> 1)] = j - i - 1; j = i + 1; c = 0; } + } + /* find the lexicographic names of all substrings */ + for(i = 0, name = 0, q = n, qlen = 0; i < m; ++i) { + p = SA[i], plen = SA[m + (p >> 1)], diff = true; + if(plen == qlen) { + for(j = 0; (j < plen) && (T[p + j] == T[q + j]); ++j) { } + if(j == plen) { diff = false; } + } + if(diff != false) { ++name, q = p, qlen = plen; } + SA[m + (p >> 1)] = name; + } + + /* stage 2: solve the reduced problem + recurse if names are not yet unique */ + if(name < m) { + RA = SA + n + fs - m; + for(i = m + (n >> 1) - 1, j = m - 1; m <= i; --i) { + if(SA[i] != 0) { RA[j--] = SA[i] - 1; } + } + if(suffixsort(RA, SA, fs + n - m * 2, m, name, false) != 0) { return -2; } + for(i = n - 2, j = m - 1, c = 0, c1 = T[n - 1]; 0 <= i; --i, c1 = c0) { + if((c0 = T[i]) < (c1 + c)) { c = 1; } + else if(c != 0) { RA[j--] = i + 1, c = 0; } /* get p1 */ + } +#ifdef _OPENMP +#pragma omp parallel for default(shared) private(i) +#endif + for(i = 0; i < m; ++i) { SA[i] = RA[SA[i]]; } /* get index in s */ + } + + /* stage 3: induce the result for the original problem */ + if(fs < (maxthreads * k)) { + index_type *B, *C; + if((C = new index_type[maxthreads * k]) == 0) { return -2; } + B = (1 < maxthreads) ? C + k : C; + /* put all left-most S characters into their buckets */ + getCounts(T, C, n, k); getBuckets(C, B, k, true); /* find ends of buckets */ +#ifdef _OPENMP +#pragma omp parallel for default(shared) private(i) +#endif + for(i = m; i < n; ++i) { SA[i] = 0; } /* init SA[m..n-1] */ + for(i = m - 1; 0 <= i; --i) { + j = SA[i], SA[i] = 0; + SA[--B[T[j]]] = j; + } + if(isbwt == false) { induceSA(T, SA, C, B, n, k); } + else { pidx = computeBWT(T, SA, C, B, n, k); } + delete [] C; + } else { + sarray_type C, B; + C = SA + n; + B = ((1 < maxthreads) || (k <= (fs - k))) ? C + k : C; + /* put all left-most S characters into their buckets */ + getCounts(T, C, n, k); getBuckets(C, B, k, true); /* find ends of buckets */ +#ifdef _OPENMP +#pragma omp parallel for default(shared) private(i) +#endif + for(i = m; i < n; ++i) { SA[i] = 0; } /* init SA[m..n-1] */ + for(i = m - 1; 0 <= i; --i) { + j = SA[i], SA[i] = 0; + SA[--B[T[j]]] = j; + } + if(isbwt == false) { induceSA(T, SA, C, B, n, k); } + else { pidx = computeBWT(T, SA, C, B, n, k); } + } + + return pidx; +#ifndef _OPENMP +# undef maxthreads +#endif +} + +} /* namespace saisxx_private */ + + +/** + * @brief Constructs the suffix array of a given string in linear time. + * @param T[0..n-1] The input string. (random access iterator) + * @param SA[0..n-1] The output array of suffixes. (random access iterator) + * @param n The length of the given string. + * @param k The alphabet size. + * @return 0 if no error occurred, -1 or -2 otherwise. + */ +template +int +saisxx(string_type T, sarray_type SA, index_type n, index_type k = 256) { + int err; + if((n < 0) || (k <= 0)) { return -1; } + if(n <= 1) { if(n == 1) { SA[0] = 0; } return 0; } + try { err = saisxx_private::suffixsort(T, SA, 0, n, k, false); } + catch(...) { err = -2; } + return err; +} + +/** + * @brief Constructs the burrows-wheeler transformed string of a given string in linear time. + * @param T[0..n-1] The input string. (random access iterator) + * @param U[0..n-1] The output string. (random access iterator) + * @param A[0..n-1] The temporary array. (random access iterator) + * @param n The length of the given string. + * @param k The alphabet size. + * @return The primary index if no error occurred, -1 or -2 otherwise. + */ +template +index_type +saisxx_bwt(string_type T, string_type U, sarray_type A, index_type n, index_type k = 256) { +typedef typename std::iterator_traits::value_type char_type; + index_type i, pidx; + if((n < 0) || (k <= 0)) { return -1; } + if(n <= 1) { if(n == 1) { U[0] = T[0]; } return n; } + try { + pidx = saisxx_private::suffixsort(T, A, 0, n, k, true); + if(0 <= pidx) { + U[0] = T[n - 1]; + for(i = 0; i < pidx; ++i) { U[i + 1] = (char_type)A[i]; } + for(i += 1; i < n; ++i) { U[i] = (char_type)A[i]; } + pidx += 1; + } + } catch(...) { pidx = -2; } + return pidx; +} + + +#endif /* __cplusplus */ +#endif /* _SAIS_HXX */ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/LICENSE b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..19b305b00060a774a9180fb916c14b49edb2008f --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/LICENSE @@ -0,0 +1,32 @@ +Copyright 2008 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Code generated by the Protocol Buffer compiler is owned by the owner +of the input file used when generating it. This code is not +standalone and requires a support library to be linked with it. This +support library is itself covered by the above license. diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/arena.cc b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/arena.cc new file mode 100644 index 0000000000000000000000000000000000000000..c117c9e528146a582a0b3c233fcd815ec8c5f095 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/arena.cc @@ -0,0 +1,415 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include + +#include +#include + + +#ifdef ADDRESS_SANITIZER +#include +#endif // ADDRESS_SANITIZER + +#include + +namespace google { +static const size_t kMinCleanupListElements = 8; +static const size_t kMaxCleanupListElements = 64; // 1kB on 64-bit. + +namespace protobuf { +namespace internal { + + +std::atomic ArenaImpl::lifecycle_id_generator_; +#if defined(GOOGLE_PROTOBUF_NO_THREADLOCAL) +ArenaImpl::ThreadCache& ArenaImpl::thread_cache() { + static internal::ThreadLocalStorage* thread_cache_ = + new internal::ThreadLocalStorage(); + return *thread_cache_->Get(); +} +#elif defined(PROTOBUF_USE_DLLS) +ArenaImpl::ThreadCache& ArenaImpl::thread_cache() { + static GOOGLE_THREAD_LOCAL ThreadCache thread_cache_ = { -1, NULL }; + return thread_cache_; +} +#else +GOOGLE_THREAD_LOCAL ArenaImpl::ThreadCache ArenaImpl::thread_cache_ = {-1, NULL}; +#endif + +void ArenaImpl::Init() { + lifecycle_id_ = + lifecycle_id_generator_.fetch_add(1, std::memory_order_relaxed); + hint_.store(nullptr, std::memory_order_relaxed); + threads_.store(nullptr, std::memory_order_relaxed); + + if (initial_block_) { + // Thread which calls Init() owns the first block. This allows the + // single-threaded case to allocate on the first block without having to + // perform atomic operations. + new (initial_block_) Block(options_.initial_block_size, NULL); + SerialArena* serial = + SerialArena::New(initial_block_, &thread_cache(), this); + serial->set_next(NULL); + threads_.store(serial, std::memory_order_relaxed); + space_allocated_.store(options_.initial_block_size, + std::memory_order_relaxed); + CacheSerialArena(serial); + } else { + space_allocated_.store(0, std::memory_order_relaxed); + } +} + +ArenaImpl::~ArenaImpl() { + // Have to do this in a first pass, because some of the destructors might + // refer to memory in other blocks. + CleanupList(); + FreeBlocks(); +} + +uint64 ArenaImpl::Reset() { + // Have to do this in a first pass, because some of the destructors might + // refer to memory in other blocks. + CleanupList(); + uint64 space_allocated = FreeBlocks(); + Init(); + + return space_allocated; +} + +ArenaImpl::Block* ArenaImpl::NewBlock(Block* last_block, size_t min_bytes) { + size_t size; + if (last_block) { + // Double the current block size, up to a limit. + size = std::min(2 * last_block->size(), options_.max_block_size); + } else { + size = options_.start_block_size; + } + // Verify that min_bytes + kBlockHeaderSize won't overflow. + GOOGLE_CHECK_LE(min_bytes, std::numeric_limits::max() - kBlockHeaderSize); + size = std::max(size, kBlockHeaderSize + min_bytes); + + void* mem = options_.block_alloc(size); + Block* b = new (mem) Block(size, last_block); + space_allocated_.fetch_add(size, std::memory_order_relaxed); + return b; +} + +ArenaImpl::Block::Block(size_t size, Block* next) + : next_(next), pos_(kBlockHeaderSize), size_(size) {} + +GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE +void ArenaImpl::SerialArena::AddCleanupFallback(void* elem, + void (*cleanup)(void*)) { + size_t size = cleanup_ ? cleanup_->size * 2 : kMinCleanupListElements; + size = std::min(size, kMaxCleanupListElements); + size_t bytes = internal::AlignUpTo8(CleanupChunk::SizeOf(size)); + CleanupChunk* list = reinterpret_cast(AllocateAligned(bytes)); + list->next = cleanup_; + list->size = size; + + cleanup_ = list; + cleanup_ptr_ = &list->nodes[0]; + cleanup_limit_ = &list->nodes[size]; + + AddCleanup(elem, cleanup); +} + +GOOGLE_PROTOBUF_ATTRIBUTE_FUNC_ALIGN(32) +void* ArenaImpl::AllocateAligned(size_t n) { + SerialArena* arena; + if (GOOGLE_PREDICT_TRUE(GetSerialArenaFast(&arena))) { + return arena->AllocateAligned(n); + } else { + return AllocateAlignedFallback(n); + } +} + +void* ArenaImpl::AllocateAlignedAndAddCleanup(size_t n, + void (*cleanup)(void*)) { + SerialArena* arena; + if (GOOGLE_PREDICT_TRUE(GetSerialArenaFast(&arena))) { + return arena->AllocateAlignedAndAddCleanup(n, cleanup); + } else { + return AllocateAlignedAndAddCleanupFallback(n, cleanup); + } +} + +void ArenaImpl::AddCleanup(void* elem, void (*cleanup)(void*)) { + SerialArena* arena; + if (GOOGLE_PREDICT_TRUE(GetSerialArenaFast(&arena))) { + arena->AddCleanup(elem, cleanup); + } else { + return AddCleanupFallback(elem, cleanup); + } +} + +GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE +void* ArenaImpl::AllocateAlignedFallback(size_t n) { + return GetSerialArena()->AllocateAligned(n); +} + +GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE +void* ArenaImpl::AllocateAlignedAndAddCleanupFallback(size_t n, + void (*cleanup)(void*)) { + return GetSerialArena()->AllocateAlignedAndAddCleanup(n, cleanup); +} + +GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE +void ArenaImpl::AddCleanupFallback(void* elem, void (*cleanup)(void*)) { + GetSerialArena()->AddCleanup(elem, cleanup); +} + +inline GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE +bool ArenaImpl::GetSerialArenaFast(ArenaImpl::SerialArena** arena) { + // If this thread already owns a block in this arena then try to use that. + // This fast path optimizes the case where multiple threads allocate from the + // same arena. + ThreadCache* tc = &thread_cache(); + if (GOOGLE_PREDICT_TRUE(tc->last_lifecycle_id_seen == lifecycle_id_)) { + *arena = tc->last_serial_arena; + return true; + } + + // Check whether we own the last accessed SerialArena on this arena. This + // fast path optimizes the case where a single thread uses multiple arenas. + SerialArena* serial = hint_.load(std::memory_order_acquire); + if (GOOGLE_PREDICT_TRUE(serial != NULL && serial->owner() == tc)) { + *arena = serial; + return true; + } + + return false; +} + +ArenaImpl::SerialArena* ArenaImpl::GetSerialArena() { + SerialArena* arena; + if (GOOGLE_PREDICT_TRUE(GetSerialArenaFast(&arena))) { + return arena; + } else { + return GetSerialArenaFallback(&thread_cache()); + } +} + +GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE +void* ArenaImpl::SerialArena::AllocateAlignedFallback(size_t n) { + // Sync back to current's pos. + head_->set_pos(head_->size() - (limit_ - ptr_)); + + head_ = arena_->NewBlock(head_, n); + ptr_ = head_->Pointer(head_->pos()); + limit_ = head_->Pointer(head_->size()); + +#ifdef ADDRESS_SANITIZER + ASAN_POISON_MEMORY_REGION(ptr_, limit_ - ptr_); +#endif // ADDRESS_SANITIZER + + return AllocateAligned(n); +} + +uint64 ArenaImpl::SpaceAllocated() const { + return space_allocated_.load(std::memory_order_relaxed); +} + +uint64 ArenaImpl::SpaceUsed() const { + SerialArena* serial = threads_.load(std::memory_order_acquire); + uint64 space_used = 0; + for ( ; serial; serial = serial->next()) { + space_used += serial->SpaceUsed(); + } + return space_used; +} + +uint64 ArenaImpl::SerialArena::SpaceUsed() const { + // Get current block's size from ptr_ (since we can't trust head_->pos(). + uint64 space_used = ptr_ - head_->Pointer(kBlockHeaderSize); + // Get subsequent block size from b->pos(). + for (Block* b = head_->next(); b; b = b->next()) { + space_used += (b->pos() - kBlockHeaderSize); + } + // Remove the overhead of the SerialArena itself. + space_used -= kSerialArenaSize; + return space_used; +} + +uint64 ArenaImpl::FreeBlocks() { + uint64 space_allocated = 0; + // By omitting an Acquire barrier we ensure that any user code that doesn't + // properly synchronize Reset() or the destructor will throw a TSAN warning. + SerialArena* serial = threads_.load(std::memory_order_relaxed); + + while (serial) { + // This is inside a block we are freeing, so we need to read it now. + SerialArena* next = serial->next(); + space_allocated += ArenaImpl::SerialArena::Free(serial, initial_block_, + options_.block_dealloc); + // serial is dead now. + serial = next; + } + + return space_allocated; +} + +uint64 ArenaImpl::SerialArena::Free(ArenaImpl::SerialArena* serial, + Block* initial_block, + void (*block_dealloc)(void*, size_t)) { + uint64 space_allocated = 0; + + // We have to be careful in this function, since we will be freeing the Block + // that contains this SerialArena. Be careful about accessing |serial|. + + for (Block* b = serial->head_; b; ) { + // This is inside the block we are freeing, so we need to read it now. + Block* next_block = b->next(); + space_allocated += (b->size()); + +#ifdef ADDRESS_SANITIZER + // This memory was provided by the underlying allocator as unpoisoned, so + // return it in an unpoisoned state. + ASAN_UNPOISON_MEMORY_REGION(b->Pointer(0), b->size()); +#endif // ADDRESS_SANITIZER + + if (b != initial_block) { + block_dealloc(b, b->size()); + } + + b = next_block; + } + + return space_allocated; +} + +void ArenaImpl::CleanupList() { + // By omitting an Acquire barrier we ensure that any user code that doesn't + // properly synchronize Reset() or the destructor will throw a TSAN warning. + SerialArena* serial = threads_.load(std::memory_order_relaxed); + + for ( ; serial; serial = serial->next()) { + serial->CleanupList(); + } +} + +void ArenaImpl::SerialArena::CleanupList() { + if (cleanup_ != NULL) { + CleanupListFallback(); + } +} + +void ArenaImpl::SerialArena::CleanupListFallback() { + // Cleanup newest chunk: ptrs give us length. + size_t n = cleanup_ptr_ - &cleanup_->nodes[0]; + CleanupNode* node = cleanup_ptr_; + for (size_t i = 0; i < n; i++) { + --node; + node->cleanup(node->elem); + } + + // Cleanup older chunks, which are known to be full. + CleanupChunk* list = cleanup_->next; + while (list) { + size_t n = list->size; + CleanupNode* node = &list->nodes[list->size]; + for (size_t i = 0; i < n; i++) { + --node; + node->cleanup(node->elem); + } + list = list->next; + } +} + +ArenaImpl::SerialArena* ArenaImpl::SerialArena::New(Block* b, void* owner, + ArenaImpl* arena) { + GOOGLE_DCHECK_EQ(b->pos(), kBlockHeaderSize); // Should be a fresh block + GOOGLE_DCHECK_LE(kBlockHeaderSize + kSerialArenaSize, b->size()); + SerialArena* serial = + reinterpret_cast(b->Pointer(kBlockHeaderSize)); + b->set_pos(kBlockHeaderSize + kSerialArenaSize); + serial->arena_ = arena; + serial->owner_ = owner; + serial->head_ = b; + serial->ptr_ = b->Pointer(b->pos()); + serial->limit_ = b->Pointer(b->size()); + serial->cleanup_ = NULL; + serial->cleanup_ptr_ = NULL; + serial->cleanup_limit_ = NULL; + return serial; +} + +GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE +ArenaImpl::SerialArena* ArenaImpl::GetSerialArenaFallback(void* me) { + // Look for this SerialArena in our linked list. + SerialArena* serial = threads_.load(std::memory_order_acquire); + for ( ; serial; serial = serial->next()) { + if (serial->owner() == me) { + break; + } + } + + if (!serial) { + // This thread doesn't have any SerialArena, which also means it doesn't + // have any blocks yet. So we'll allocate its first block now. + Block* b = NewBlock(NULL, kSerialArenaSize); + serial = SerialArena::New(b, me, this); + + SerialArena* head = threads_.load(std::memory_order_relaxed); + do { + serial->set_next(head); + } while (!threads_.compare_exchange_weak( + head, serial, std::memory_order_release, std::memory_order_relaxed)); + } + + CacheSerialArena(serial); + return serial; +} + +} // namespace internal + +void Arena::CallDestructorHooks() { + uint64 space_allocated = impl_.SpaceAllocated(); + // Call the reset hook + if (on_arena_reset_ != NULL) { + on_arena_reset_(this, hooks_cookie_, space_allocated); + } + + // Call the destruction hook + if (on_arena_destruction_ != NULL) { + on_arena_destruction_(this, hooks_cookie_, space_allocated); + } +} + +void Arena::OnArenaAllocation(const std::type_info* allocated_type, + size_t n) const { + if (on_arena_allocation_ != NULL) { + on_arena_allocation_(allocated_type, n, hooks_cookie_); + } +} + +} // namespace protobuf +} // namespace google diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/arenastring.cc b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/arenastring.cc new file mode 100644 index 0000000000000000000000000000000000000000..7f33a0c8658dc80575a4e72fb32b3f1e89b1b775 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/arenastring.cc @@ -0,0 +1,43 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// The ArenaString implementation is not included in the open-source release. Do +// not include this file in the distribution. + +#include + +namespace google { +namespace protobuf { +namespace internal { + + +} // namespace internal +} // namespace protobuf +} // namespace google diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/bytestream.cc b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/bytestream.cc new file mode 100644 index 0000000000000000000000000000000000000000..f4af6a50abef0a513e5ad41a3c72990d94b541ac --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/bytestream.cc @@ -0,0 +1,196 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include + +#include +#include + +namespace google { +namespace protobuf { +namespace strings { + +void ByteSource::CopyTo(ByteSink* sink, size_t n) { + while (n > 0) { + StringPiece fragment = Peek(); + if (fragment.empty()) { + GOOGLE_LOG(DFATAL) << "ByteSource::CopyTo() overran input."; + break; + } + std::size_t fragment_size = std::min(n, fragment.size()); + sink->Append(fragment.data(), fragment_size); + Skip(fragment_size); + n -= fragment_size; + } +} + +void ByteSink::Flush() {} + +void UncheckedArrayByteSink::Append(const char* data, size_t n) { + if (data != dest_) { + // Catch cases where the pointer returned by GetAppendBuffer() was modified. + GOOGLE_DCHECK(!(dest_ <= data && data < (dest_ + n))) + << "Append() data[] overlaps with dest_[]"; + memcpy(dest_, data, n); + } + dest_ += n; +} + +CheckedArrayByteSink::CheckedArrayByteSink(char* outbuf, size_t capacity) + : outbuf_(outbuf), capacity_(capacity), size_(0), overflowed_(false) { +} + +void CheckedArrayByteSink::Append(const char* bytes, size_t n) { + size_t available = capacity_ - size_; + if (n > available) { + n = available; + overflowed_ = true; + } + if (n > 0 && bytes != (outbuf_ + size_)) { + // Catch cases where the pointer returned by GetAppendBuffer() was modified. + GOOGLE_DCHECK(!(outbuf_ <= bytes && bytes < (outbuf_ + capacity_))) + << "Append() bytes[] overlaps with outbuf_[]"; + memcpy(outbuf_ + size_, bytes, n); + } + size_ += n; +} + +GrowingArrayByteSink::GrowingArrayByteSink(size_t estimated_size) + : capacity_(estimated_size), + buf_(new char[estimated_size]), + size_(0) { +} + +GrowingArrayByteSink::~GrowingArrayByteSink() { + delete[] buf_; // Just in case the user didn't call GetBuffer. +} + +void GrowingArrayByteSink::Append(const char* bytes, size_t n) { + size_t available = capacity_ - size_; + if (bytes != (buf_ + size_)) { + // Catch cases where the pointer returned by GetAppendBuffer() was modified. + // We need to test for this before calling Expand() which may reallocate. + GOOGLE_DCHECK(!(buf_ <= bytes && bytes < (buf_ + capacity_))) + << "Append() bytes[] overlaps with buf_[]"; + } + if (n > available) { + Expand(n - available); + } + if (n > 0 && bytes != (buf_ + size_)) { + memcpy(buf_ + size_, bytes, n); + } + size_ += n; +} + +char* GrowingArrayByteSink::GetBuffer(size_t* nbytes) { + ShrinkToFit(); + char* b = buf_; + *nbytes = size_; + buf_ = NULL; + size_ = capacity_ = 0; + return b; +} + +void GrowingArrayByteSink::Expand(size_t amount) { // Expand by at least 50%. + size_t new_capacity = std::max(capacity_ + amount, (3 * capacity_) / 2); + char* bigger = new char[new_capacity]; + memcpy(bigger, buf_, size_); + delete[] buf_; + buf_ = bigger; + capacity_ = new_capacity; +} + +void GrowingArrayByteSink::ShrinkToFit() { + // Shrink only if the buffer is large and size_ is less than 3/4 + // of capacity_. + if (capacity_ > 256 && size_ < (3 * capacity_) / 4) { + char* just_enough = new char[size_]; + memcpy(just_enough, buf_, size_); + delete[] buf_; + buf_ = just_enough; + capacity_ = size_; + } +} + +void StringByteSink::Append(const char* data, size_t n) { + dest_->append(data, n); +} + +size_t ArrayByteSource::Available() const { + return input_.size(); +} + +StringPiece ArrayByteSource::Peek() { + return input_; +} + +void ArrayByteSource::Skip(size_t n) { + GOOGLE_DCHECK_LE(n, input_.size()); + input_.remove_prefix(n); +} + +LimitByteSource::LimitByteSource(ByteSource *source, size_t limit) + : source_(source), + limit_(limit) { +} + +size_t LimitByteSource::Available() const { + size_t available = source_->Available(); + if (available > limit_) { + available = limit_; + } + + return available; +} + +StringPiece LimitByteSource::Peek() { + StringPiece piece(source_->Peek()); + if (piece.size() > limit_) { + piece.set(piece.data(), limit_); + } + + return piece; +} + +void LimitByteSource::Skip(size_t n) { + GOOGLE_DCHECK_LE(n, limit_); + source_->Skip(n); + limit_ -= n; +} + +void LimitByteSource::CopyTo(ByteSink *sink, size_t n) { + GOOGLE_DCHECK_LE(n, limit_); + source_->CopyTo(sink, n); + limit_ -= n; +} + +} // namespace strings +} // namespace protobuf +} // namespace google diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/coded_stream.cc b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/coded_stream.cc new file mode 100644 index 0000000000000000000000000000000000000000..0851ff0cb6547db7b22fb405a348d4a5850832c8 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/coded_stream.cc @@ -0,0 +1,780 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// This implementation is heavily optimized to make reads and writes +// of small values (especially varints) as fast as possible. In +// particular, we optimize for the common case that a read or a write +// will not cross the end of the buffer, since we can avoid a lot +// of branching in this case. + +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace google { +namespace protobuf { +namespace io { + +namespace { + +static const int kMaxVarintBytes = 10; +static const int kMaxVarint32Bytes = 5; + + +inline bool NextNonEmpty(ZeroCopyInputStream* input, + const void** data, int* size) { + bool success; + do { + success = input->Next(data, size); + } while (success && *size == 0); + return success; +} + +} // namespace + +// CodedInputStream ================================================== + +CodedInputStream::~CodedInputStream() { + if (input_ != NULL) { + BackUpInputToCurrentPosition(); + } +} + +// Static. +int CodedInputStream::default_recursion_limit_ = 100; + + +void CodedOutputStream::EnableAliasing(bool enabled) { + aliasing_enabled_ = enabled && output_->AllowsAliasing(); +} + +void CodedInputStream::BackUpInputToCurrentPosition() { + int backup_bytes = BufferSize() + buffer_size_after_limit_ + overflow_bytes_; + if (backup_bytes > 0) { + input_->BackUp(backup_bytes); + + // total_bytes_read_ doesn't include overflow_bytes_. + total_bytes_read_ -= BufferSize() + buffer_size_after_limit_; + buffer_end_ = buffer_; + buffer_size_after_limit_ = 0; + overflow_bytes_ = 0; + } +} + +inline void CodedInputStream::RecomputeBufferLimits() { + buffer_end_ += buffer_size_after_limit_; + int closest_limit = std::min(current_limit_, total_bytes_limit_); + if (closest_limit < total_bytes_read_) { + // The limit position is in the current buffer. We must adjust + // the buffer size accordingly. + buffer_size_after_limit_ = total_bytes_read_ - closest_limit; + buffer_end_ -= buffer_size_after_limit_; + } else { + buffer_size_after_limit_ = 0; + } +} + +CodedInputStream::Limit CodedInputStream::PushLimit(int byte_limit) { + // Current position relative to the beginning of the stream. + int current_position = CurrentPosition(); + + Limit old_limit = current_limit_; + + // security: byte_limit is possibly evil, so check for negative values + // and overflow. Also check that the new requested limit is before the + // previous limit; otherwise we continue to enforce the previous limit. + if (GOOGLE_PREDICT_TRUE(byte_limit >= 0 && + byte_limit <= INT_MAX - current_position && + byte_limit < current_limit_ - current_position)) { + current_limit_ = current_position + byte_limit; + RecomputeBufferLimits(); + } + + return old_limit; +} + +void CodedInputStream::PopLimit(Limit limit) { + // The limit passed in is actually the *old* limit, which we returned from + // PushLimit(). + current_limit_ = limit; + RecomputeBufferLimits(); + + // We may no longer be at a legitimate message end. ReadTag() needs to be + // called again to find out. + legitimate_message_end_ = false; +} + +std::pair +CodedInputStream::IncrementRecursionDepthAndPushLimit(int byte_limit) { + return std::make_pair(PushLimit(byte_limit), --recursion_budget_); +} + +CodedInputStream::Limit CodedInputStream::ReadLengthAndPushLimit() { + uint32 length; + return PushLimit(ReadVarint32(&length) ? length : 0); +} + +bool CodedInputStream::DecrementRecursionDepthAndPopLimit(Limit limit) { + bool result = ConsumedEntireMessage(); + PopLimit(limit); + GOOGLE_DCHECK_LT(recursion_budget_, recursion_limit_); + ++recursion_budget_; + return result; +} + +bool CodedInputStream::CheckEntireMessageConsumedAndPopLimit(Limit limit) { + bool result = ConsumedEntireMessage(); + PopLimit(limit); + return result; +} + +int CodedInputStream::BytesUntilLimit() const { + if (current_limit_ == INT_MAX) return -1; + int current_position = CurrentPosition(); + + return current_limit_ - current_position; +} + +void CodedInputStream::SetTotalBytesLimit(int total_bytes_limit) { + // Make sure the limit isn't already past, since this could confuse other + // code. + int current_position = CurrentPosition(); + total_bytes_limit_ = std::max(current_position, total_bytes_limit); + RecomputeBufferLimits(); +} + +int CodedInputStream::BytesUntilTotalBytesLimit() const { + if (total_bytes_limit_ == INT_MAX) return -1; + return total_bytes_limit_ - CurrentPosition(); +} + +void CodedInputStream::PrintTotalBytesLimitError() { + GOOGLE_LOG(ERROR) << "A protocol message was rejected because it was too " + "big (more than " << total_bytes_limit_ + << " bytes). To increase the limit (or to disable these " + "warnings), see CodedInputStream::SetTotalBytesLimit() " + "in google/protobuf/io/coded_stream.h."; +} + +bool CodedInputStream::SkipFallback(int count, int original_buffer_size) { + if (buffer_size_after_limit_ > 0) { + // We hit a limit inside this buffer. Advance to the limit and fail. + Advance(original_buffer_size); + return false; + } + + count -= original_buffer_size; + buffer_ = NULL; + buffer_end_ = buffer_; + + // Make sure this skip doesn't try to skip past the current limit. + int closest_limit = std::min(current_limit_, total_bytes_limit_); + int bytes_until_limit = closest_limit - total_bytes_read_; + if (bytes_until_limit < count) { + // We hit the limit. Skip up to it then fail. + if (bytes_until_limit > 0) { + total_bytes_read_ = closest_limit; + input_->Skip(bytes_until_limit); + } + return false; + } + + if (!input_->Skip(count)) { + total_bytes_read_ = input_->ByteCount(); + return false; + } + total_bytes_read_ += count; + return true; +} + +bool CodedInputStream::GetDirectBufferPointer(const void** data, int* size) { + if (BufferSize() == 0 && !Refresh()) return false; + + *data = buffer_; + *size = BufferSize(); + return true; +} + +bool CodedInputStream::ReadRaw(void* buffer, int size) { + return InternalReadRawInline(buffer, size); +} + +bool CodedInputStream::ReadString(string* buffer, int size) { + if (size < 0) return false; // security: size is often user-supplied + return InternalReadStringInline(buffer, size); +} + +bool CodedInputStream::ReadStringFallback(string* buffer, int size) { + if (!buffer->empty()) { + buffer->clear(); + } + + int closest_limit = std::min(current_limit_, total_bytes_limit_); + if (closest_limit != INT_MAX) { + int bytes_to_limit = closest_limit - CurrentPosition(); + if (bytes_to_limit > 0 && size > 0 && size <= bytes_to_limit) { + buffer->reserve(size); + } + } + + int current_buffer_size; + while ((current_buffer_size = BufferSize()) < size) { + // Some STL implementations "helpfully" crash on buffer->append(NULL, 0). + if (current_buffer_size != 0) { + // Note: string1.append(string2) is O(string2.size()) (as opposed to + // O(string1.size() + string2.size()), which would be bad). + buffer->append(reinterpret_cast(buffer_), + current_buffer_size); + } + size -= current_buffer_size; + Advance(current_buffer_size); + if (!Refresh()) return false; + } + + buffer->append(reinterpret_cast(buffer_), size); + Advance(size); + + return true; +} + + +bool CodedInputStream::ReadLittleEndian32Fallback(uint32* value) { + uint8 bytes[sizeof(*value)]; + + const uint8* ptr; + if (BufferSize() >= sizeof(*value)) { + // Fast path: Enough bytes in the buffer to read directly. + ptr = buffer_; + Advance(sizeof(*value)); + } else { + // Slow path: Had to read past the end of the buffer. + if (!ReadRaw(bytes, sizeof(*value))) return false; + ptr = bytes; + } + ReadLittleEndian32FromArray(ptr, value); + return true; +} + +bool CodedInputStream::ReadLittleEndian64Fallback(uint64* value) { + uint8 bytes[sizeof(*value)]; + + const uint8* ptr; + if (BufferSize() >= sizeof(*value)) { + // Fast path: Enough bytes in the buffer to read directly. + ptr = buffer_; + Advance(sizeof(*value)); + } else { + // Slow path: Had to read past the end of the buffer. + if (!ReadRaw(bytes, sizeof(*value))) return false; + ptr = bytes; + } + ReadLittleEndian64FromArray(ptr, value); + return true; +} + +namespace { + +// Read a varint from the given buffer, write it to *value, and return a pair. +// The first part of the pair is true iff the read was successful. The second +// part is buffer + (number of bytes read). This function is always inlined, +// so returning a pair is costless. +GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE +::std::pair ReadVarint32FromArray( + uint32 first_byte, const uint8* buffer, + uint32* value); +inline ::std::pair ReadVarint32FromArray( + uint32 first_byte, const uint8* buffer, uint32* value) { + // Fast path: We have enough bytes left in the buffer to guarantee that + // this read won't cross the end, so we can skip the checks. + GOOGLE_DCHECK_EQ(*buffer, first_byte); + GOOGLE_DCHECK_EQ(first_byte & 0x80, 0x80) << first_byte; + const uint8* ptr = buffer; + uint32 b; + uint32 result = first_byte - 0x80; + ++ptr; // We just processed the first byte. Move on to the second. + b = *(ptr++); result += b << 7; if (!(b & 0x80)) goto done; + result -= 0x80 << 7; + b = *(ptr++); result += b << 14; if (!(b & 0x80)) goto done; + result -= 0x80 << 14; + b = *(ptr++); result += b << 21; if (!(b & 0x80)) goto done; + result -= 0x80 << 21; + b = *(ptr++); result += b << 28; if (!(b & 0x80)) goto done; + // "result -= 0x80 << 28" is irrevelant. + + // If the input is larger than 32 bits, we still need to read it all + // and discard the high-order bits. + for (int i = 0; i < kMaxVarintBytes - kMaxVarint32Bytes; i++) { + b = *(ptr++); if (!(b & 0x80)) goto done; + } + + // We have overrun the maximum size of a varint (10 bytes). Assume + // the data is corrupt. + return std::make_pair(false, ptr); + + done: + *value = result; + return std::make_pair(true, ptr); +} + +GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE::std::pair +ReadVarint64FromArray(const uint8* buffer, uint64* value); +inline ::std::pair ReadVarint64FromArray( + const uint8* buffer, uint64* value) { + const uint8* ptr = buffer; + uint32 b; + + // Splitting into 32-bit pieces gives better performance on 32-bit + // processors. + uint32 part0 = 0, part1 = 0, part2 = 0; + + b = *(ptr++); part0 = b ; if (!(b & 0x80)) goto done; + part0 -= 0x80; + b = *(ptr++); part0 += b << 7; if (!(b & 0x80)) goto done; + part0 -= 0x80 << 7; + b = *(ptr++); part0 += b << 14; if (!(b & 0x80)) goto done; + part0 -= 0x80 << 14; + b = *(ptr++); part0 += b << 21; if (!(b & 0x80)) goto done; + part0 -= 0x80 << 21; + b = *(ptr++); part1 = b ; if (!(b & 0x80)) goto done; + part1 -= 0x80; + b = *(ptr++); part1 += b << 7; if (!(b & 0x80)) goto done; + part1 -= 0x80 << 7; + b = *(ptr++); part1 += b << 14; if (!(b & 0x80)) goto done; + part1 -= 0x80 << 14; + b = *(ptr++); part1 += b << 21; if (!(b & 0x80)) goto done; + part1 -= 0x80 << 21; + b = *(ptr++); part2 = b ; if (!(b & 0x80)) goto done; + part2 -= 0x80; + b = *(ptr++); part2 += b << 7; if (!(b & 0x80)) goto done; + // "part2 -= 0x80 << 7" is irrelevant because (0x80 << 7) << 56 is 0. + + // We have overrun the maximum size of a varint (10 bytes). Assume + // the data is corrupt. + return std::make_pair(false, ptr); + + done: + *value = (static_cast(part0)) | + (static_cast(part1) << 28) | + (static_cast(part2) << 56); + return std::make_pair(true, ptr); +} + +} // namespace + +bool CodedInputStream::ReadVarint32Slow(uint32* value) { + // Directly invoke ReadVarint64Fallback, since we already tried to optimize + // for one-byte varints. + std::pair p = ReadVarint64Fallback(); + *value = static_cast(p.first); + return p.second; +} + +int64 CodedInputStream::ReadVarint32Fallback(uint32 first_byte_or_zero) { + if (BufferSize() >= kMaxVarintBytes || + // Optimization: We're also safe if the buffer is non-empty and it ends + // with a byte that would terminate a varint. + (buffer_end_ > buffer_ && !(buffer_end_[-1] & 0x80))) { + GOOGLE_DCHECK_NE(first_byte_or_zero, 0) + << "Caller should provide us with *buffer_ when buffer is non-empty"; + uint32 temp; + ::std::pair p = + ReadVarint32FromArray(first_byte_or_zero, buffer_, &temp); + if (!p.first) return -1; + buffer_ = p.second; + return temp; + } else { + // Really slow case: we will incur the cost of an extra function call here, + // but moving this out of line reduces the size of this function, which + // improves the common case. In micro benchmarks, this is worth about 10-15% + uint32 temp; + return ReadVarint32Slow(&temp) ? static_cast(temp) : -1; + } +} + +int CodedInputStream::ReadVarintSizeAsIntSlow() { + // Directly invoke ReadVarint64Fallback, since we already tried to optimize + // for one-byte varints. + std::pair p = ReadVarint64Fallback(); + if (!p.second || p.first > static_cast(INT_MAX)) return -1; + return p.first; +} + +int CodedInputStream::ReadVarintSizeAsIntFallback() { + if (BufferSize() >= kMaxVarintBytes || + // Optimization: We're also safe if the buffer is non-empty and it ends + // with a byte that would terminate a varint. + (buffer_end_ > buffer_ && !(buffer_end_[-1] & 0x80))) { + uint64 temp; + ::std::pair p = ReadVarint64FromArray(buffer_, &temp); + if (!p.first || temp > static_cast(INT_MAX)) return -1; + buffer_ = p.second; + return temp; + } else { + // Really slow case: we will incur the cost of an extra function call here, + // but moving this out of line reduces the size of this function, which + // improves the common case. In micro benchmarks, this is worth about 10-15% + return ReadVarintSizeAsIntSlow(); + } +} + +uint32 CodedInputStream::ReadTagSlow() { + if (buffer_ == buffer_end_) { + // Call refresh. + if (!Refresh()) { + // Refresh failed. Make sure that it failed due to EOF, not because + // we hit total_bytes_limit_, which, unlike normal limits, is not a + // valid place to end a message. + int current_position = total_bytes_read_ - buffer_size_after_limit_; + if (current_position >= total_bytes_limit_) { + // Hit total_bytes_limit_. But if we also hit the normal limit, + // we're still OK. + legitimate_message_end_ = current_limit_ == total_bytes_limit_; + } else { + legitimate_message_end_ = true; + } + return 0; + } + } + + // For the slow path, just do a 64-bit read. Try to optimize for one-byte tags + // again, since we have now refreshed the buffer. + uint64 result = 0; + if (!ReadVarint64(&result)) return 0; + return static_cast(result); +} + +uint32 CodedInputStream::ReadTagFallback(uint32 first_byte_or_zero) { + const int buf_size = BufferSize(); + if (buf_size >= kMaxVarintBytes || + // Optimization: We're also safe if the buffer is non-empty and it ends + // with a byte that would terminate a varint. + (buf_size > 0 && !(buffer_end_[-1] & 0x80))) { + GOOGLE_DCHECK_EQ(first_byte_or_zero, buffer_[0]); + if (first_byte_or_zero == 0) { + ++buffer_; + return 0; + } + uint32 tag; + ::std::pair p = + ReadVarint32FromArray(first_byte_or_zero, buffer_, &tag); + if (!p.first) { + return 0; + } + buffer_ = p.second; + return tag; + } else { + // We are commonly at a limit when attempting to read tags. Try to quickly + // detect this case without making another function call. + if ((buf_size == 0) && + ((buffer_size_after_limit_ > 0) || + (total_bytes_read_ == current_limit_)) && + // Make sure that the limit we hit is not total_bytes_limit_, since + // in that case we still need to call Refresh() so that it prints an + // error. + total_bytes_read_ - buffer_size_after_limit_ < total_bytes_limit_) { + // We hit a byte limit. + legitimate_message_end_ = true; + return 0; + } + return ReadTagSlow(); + } +} + +bool CodedInputStream::ReadVarint64Slow(uint64* value) { + // Slow path: This read might cross the end of the buffer, so we + // need to check and refresh the buffer if and when it does. + + uint64 result = 0; + int count = 0; + uint32 b; + + do { + if (count == kMaxVarintBytes) { + *value = 0; + return false; + } + while (buffer_ == buffer_end_) { + if (!Refresh()) { + *value = 0; + return false; + } + } + b = *buffer_; + result |= static_cast(b & 0x7F) << (7 * count); + Advance(1); + ++count; + } while (b & 0x80); + + *value = result; + return true; +} + +std::pair CodedInputStream::ReadVarint64Fallback() { + if (BufferSize() >= kMaxVarintBytes || + // Optimization: We're also safe if the buffer is non-empty and it ends + // with a byte that would terminate a varint. + (buffer_end_ > buffer_ && !(buffer_end_[-1] & 0x80))) { + uint64 temp; + ::std::pair p = ReadVarint64FromArray(buffer_, &temp); + if (!p.first) { + return std::make_pair(0, false); + } + buffer_ = p.second; + return std::make_pair(temp, true); + } else { + uint64 temp; + bool success = ReadVarint64Slow(&temp); + return std::make_pair(temp, success); + } +} + +bool CodedInputStream::Refresh() { + GOOGLE_DCHECK_EQ(0, BufferSize()); + + if (buffer_size_after_limit_ > 0 || overflow_bytes_ > 0 || + total_bytes_read_ == current_limit_) { + // We've hit a limit. Stop. + int current_position = total_bytes_read_ - buffer_size_after_limit_; + + if (current_position >= total_bytes_limit_ && + total_bytes_limit_ != current_limit_) { + // Hit total_bytes_limit_. + PrintTotalBytesLimitError(); + } + + return false; + } + + const void* void_buffer; + int buffer_size; + if (NextNonEmpty(input_, &void_buffer, &buffer_size)) { + buffer_ = reinterpret_cast(void_buffer); + buffer_end_ = buffer_ + buffer_size; + GOOGLE_CHECK_GE(buffer_size, 0); + + if (total_bytes_read_ <= INT_MAX - buffer_size) { + total_bytes_read_ += buffer_size; + } else { + // Overflow. Reset buffer_end_ to not include the bytes beyond INT_MAX. + // We can't get that far anyway, because total_bytes_limit_ is guaranteed + // to be less than it. We need to keep track of the number of bytes + // we discarded, though, so that we can call input_->BackUp() to back + // up over them on destruction. + + // The following line is equivalent to: + // overflow_bytes_ = total_bytes_read_ + buffer_size - INT_MAX; + // except that it avoids overflows. Signed integer overflow has + // undefined results according to the C standard. + overflow_bytes_ = total_bytes_read_ - (INT_MAX - buffer_size); + buffer_end_ -= overflow_bytes_; + total_bytes_read_ = INT_MAX; + } + + RecomputeBufferLimits(); + return true; + } else { + buffer_ = NULL; + buffer_end_ = NULL; + return false; + } +} + +// CodedOutputStream ================================================= + +std::atomic CodedOutputStream::default_serialization_deterministic_{ + false}; + +CodedOutputStream::CodedOutputStream(ZeroCopyOutputStream* output) + : CodedOutputStream(output, true) {} + +CodedOutputStream::CodedOutputStream(ZeroCopyOutputStream* output, + bool do_eager_refresh) + : output_(output), + buffer_(NULL), + buffer_size_(0), + total_bytes_(0), + had_error_(false), + aliasing_enabled_(false), + is_serialization_deterministic_(IsDefaultSerializationDeterministic()) { + if (do_eager_refresh) { + // Eagerly Refresh() so buffer space is immediately available. + Refresh(); + // The Refresh() may have failed. If the client doesn't write any data, + // though, don't consider this an error. If the client does write data, then + // another Refresh() will be attempted and it will set the error once again. + had_error_ = false; + } +} + +CodedOutputStream::~CodedOutputStream() { + Trim(); +} + +void CodedOutputStream::Trim() { + if (buffer_size_ > 0) { + output_->BackUp(buffer_size_); + total_bytes_ -= buffer_size_; + buffer_size_ = 0; + buffer_ = NULL; + } +} + +bool CodedOutputStream::Skip(int count) { + if (count < 0) return false; + + while (count > buffer_size_) { + count -= buffer_size_; + if (!Refresh()) return false; + } + + Advance(count); + return true; +} + +bool CodedOutputStream::GetDirectBufferPointer(void** data, int* size) { + if (buffer_size_ == 0 && !Refresh()) return false; + + *data = buffer_; + *size = buffer_size_; + return true; +} + +void CodedOutputStream::WriteRaw(const void* data, int size) { + while (buffer_size_ < size) { + memcpy(buffer_, data, buffer_size_); + size -= buffer_size_; + data = reinterpret_cast(data) + buffer_size_; + if (!Refresh()) return; + } + + memcpy(buffer_, data, size); + Advance(size); +} + +uint8* CodedOutputStream::WriteRawToArray( + const void* data, int size, uint8* target) { + memcpy(target, data, size); + return target + size; +} + + +void CodedOutputStream::WriteAliasedRaw(const void* data, int size) { + if (size < buffer_size_ + ) { + WriteRaw(data, size); + } else { + Trim(); + + total_bytes_ += size; + had_error_ |= !output_->WriteAliasedRaw(data, size); + } +} + +void CodedOutputStream::WriteLittleEndian32(uint32 value) { + uint8 bytes[sizeof(value)]; + + bool use_fast = buffer_size_ >= sizeof(value); + uint8* ptr = use_fast ? buffer_ : bytes; + + WriteLittleEndian32ToArray(value, ptr); + + if (use_fast) { + Advance(sizeof(value)); + } else { + WriteRaw(bytes, sizeof(value)); + } +} + +void CodedOutputStream::WriteLittleEndian64(uint64 value) { + uint8 bytes[sizeof(value)]; + + bool use_fast = buffer_size_ >= sizeof(value); + uint8* ptr = use_fast ? buffer_ : bytes; + + WriteLittleEndian64ToArray(value, ptr); + + if (use_fast) { + Advance(sizeof(value)); + } else { + WriteRaw(bytes, sizeof(value)); + } +} + +void CodedOutputStream::WriteVarint32SlowPath(uint32 value) { + uint8 bytes[kMaxVarint32Bytes]; + uint8* target = &bytes[0]; + uint8* end = WriteVarint32ToArray(value, target); + int size = end - target; + WriteRaw(bytes, size); +} + +void CodedOutputStream::WriteVarint64SlowPath(uint64 value) { + uint8 bytes[kMaxVarintBytes]; + uint8* target = &bytes[0]; + uint8* end = WriteVarint64ToArray(value, target); + int size = end - target; + WriteRaw(bytes, size); +} + +bool CodedOutputStream::Refresh() { + void* void_buffer; + if (output_->Next(&void_buffer, &buffer_size_)) { + buffer_ = reinterpret_cast(void_buffer); + total_bytes_ += buffer_size_; + return true; + } else { + buffer_ = NULL; + buffer_size_ = 0; + had_error_ = true; + return false; + } +} + +uint8* CodedOutputStream::WriteStringWithSizeToArray(const string& str, + uint8* target) { + GOOGLE_DCHECK_LE(str.size(), kuint32max); + target = WriteVarint32ToArray(str.size(), target); + return WriteStringToArray(str, target); +} + +} // namespace io +} // namespace protobuf +} // namespace google diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/common.cc b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/common.cc new file mode 100644 index 0000000000000000000000000000000000000000..6544c6ed88bfd3653d225d306e4c442e6fcb15e2 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/common.cc @@ -0,0 +1,389 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) + +#include // TODO(gerbens) ideally remove this. +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef _WIN32 +#define WIN32_LEAN_AND_MEAN // We only need minimal includes +#include +#define snprintf _snprintf // see comment in strutil.cc +#elif defined(HAVE_PTHREAD) +#include +#else +#error "No suitable threading library available." +#endif +#if defined(__ANDROID__) +#include +#endif + +namespace google { +namespace protobuf { + +namespace internal { + +void VerifyVersion(int headerVersion, + int minLibraryVersion, + const char* filename) { + if (GOOGLE_PROTOBUF_VERSION < minLibraryVersion) { + // Library is too old for headers. + GOOGLE_LOG(FATAL) + << "This program requires version " << VersionString(minLibraryVersion) + << " of the Protocol Buffer runtime library, but the installed version " + "is " << VersionString(GOOGLE_PROTOBUF_VERSION) << ". Please update " + "your library. If you compiled the program yourself, make sure that " + "your headers are from the same version of Protocol Buffers as your " + "link-time library. (Version verification failed in \"" + << filename << "\".)"; + } + if (headerVersion < kMinHeaderVersionForLibrary) { + // Headers are too old for library. + GOOGLE_LOG(FATAL) + << "This program was compiled against version " + << VersionString(headerVersion) << " of the Protocol Buffer runtime " + "library, which is not compatible with the installed version (" + << VersionString(GOOGLE_PROTOBUF_VERSION) << "). Contact the program " + "author for an update. If you compiled the program yourself, make " + "sure that your headers are from the same version of Protocol Buffers " + "as your link-time library. (Version verification failed in \"" + << filename << "\".)"; + } +} + +string VersionString(int version) { + int major = version / 1000000; + int minor = (version / 1000) % 1000; + int micro = version % 1000; + + // 128 bytes should always be enough, but we use snprintf() anyway to be + // safe. + char buffer[128]; + snprintf(buffer, sizeof(buffer), "%d.%d.%d", major, minor, micro); + + // Guard against broken MSVC snprintf(). + buffer[sizeof(buffer)-1] = '\0'; + + return buffer; +} + +} // namespace internal + +// =================================================================== +// emulates google3/base/logging.cc + +// If the minimum logging level is not set, we default to logging messages for +// all levels. +#ifndef GOOGLE_PROTOBUF_MIN_LOG_LEVEL +#define GOOGLE_PROTOBUF_MIN_LOG_LEVEL LOGLEVEL_INFO +#endif + +namespace internal { + +#if defined(__ANDROID__) +inline void DefaultLogHandler(LogLevel level, const char* filename, int line, + const string& message) { + if (level < GOOGLE_PROTOBUF_MIN_LOG_LEVEL) { + return; + } + static const char* level_names[] = {"INFO", "WARNING", "ERROR", "FATAL"}; + + static const int android_log_levels[] = { + ANDROID_LOG_INFO, // LOG(INFO), + ANDROID_LOG_WARN, // LOG(WARNING) + ANDROID_LOG_ERROR, // LOG(ERROR) + ANDROID_LOG_FATAL, // LOG(FATAL) + }; + + // Bound the logging level. + const int android_log_level = android_log_levels[level]; + ::std::ostringstream ostr; + ostr << "[libprotobuf " << level_names[level] << " " << filename << ":" + << line << "] " << message.c_str(); + + // Output the log string the Android log at the appropriate level. + __android_log_write(android_log_level, "libprotobuf-native", + ostr.str().c_str()); + // Also output to std::cerr. + fprintf(stderr, "%s", ostr.str().c_str()); + fflush(stderr); + + // Indicate termination if needed. + if (android_log_level == ANDROID_LOG_FATAL) { + __android_log_write(ANDROID_LOG_FATAL, "libprotobuf-native", + "terminating.\n"); + } +} + +#else +void DefaultLogHandler(LogLevel level, const char* filename, int line, + const string& message) { + if (level < GOOGLE_PROTOBUF_MIN_LOG_LEVEL) { + return; + } + static const char* level_names[] = { "INFO", "WARNING", "ERROR", "FATAL" }; + + // We use fprintf() instead of cerr because we want this to work at static + // initialization time. + fprintf(stderr, "[libprotobuf %s %s:%d] %s\n", + level_names[level], filename, line, message.c_str()); + fflush(stderr); // Needed on MSVC. +} +#endif + +void NullLogHandler(LogLevel /* level */, const char* /* filename */, + int /* line */, const string& /* message */) { + // Nothing. +} + +static LogHandler* log_handler_ = &DefaultLogHandler; +static int log_silencer_count_ = 0; + +static Mutex* log_silencer_count_mutex_ = NULL; +GOOGLE_PROTOBUF_DECLARE_ONCE(log_silencer_count_init_); + +void DeleteLogSilencerCount() { + delete log_silencer_count_mutex_; + log_silencer_count_mutex_ = NULL; +} +void InitLogSilencerCount() { + log_silencer_count_mutex_ = new Mutex; + OnShutdown(&DeleteLogSilencerCount); +} +void InitLogSilencerCountOnce() { + GoogleOnceInit(&log_silencer_count_init_, &InitLogSilencerCount); +} + +LogMessage& LogMessage::operator<<(const string& value) { + message_ += value; + return *this; +} + +LogMessage& LogMessage::operator<<(const char* value) { + message_ += value; + return *this; +} + +LogMessage& LogMessage::operator<<(const StringPiece& value) { + message_ += value.ToString(); + return *this; +} + +LogMessage& LogMessage::operator<<( + const ::google::protobuf::util::Status& status) { + message_ += status.ToString(); + return *this; +} + +LogMessage& LogMessage::operator<<(const uint128& value) { + std::ostringstream str; + str << value; + message_ += str.str(); + return *this; +} + +// Since this is just for logging, we don't care if the current locale changes +// the results -- in fact, we probably prefer that. So we use snprintf() +// instead of Simple*toa(). +#undef DECLARE_STREAM_OPERATOR +#define DECLARE_STREAM_OPERATOR(TYPE, FORMAT) \ + LogMessage& LogMessage::operator<<(TYPE value) { \ + /* 128 bytes should be big enough for any of the primitive */ \ + /* values which we print with this, but well use snprintf() */ \ + /* anyway to be extra safe. */ \ + char buffer[128]; \ + snprintf(buffer, sizeof(buffer), FORMAT, value); \ + /* Guard against broken MSVC snprintf(). */ \ + buffer[sizeof(buffer)-1] = '\0'; \ + message_ += buffer; \ + return *this; \ + } + +DECLARE_STREAM_OPERATOR(char , "%c" ) +DECLARE_STREAM_OPERATOR(int , "%d" ) +DECLARE_STREAM_OPERATOR(unsigned int , "%u" ) +DECLARE_STREAM_OPERATOR(long , "%ld") +DECLARE_STREAM_OPERATOR(unsigned long, "%lu") +DECLARE_STREAM_OPERATOR(double , "%g" ) +DECLARE_STREAM_OPERATOR(void* , "%p" ) +DECLARE_STREAM_OPERATOR(long long , "%" GOOGLE_LL_FORMAT "d") +DECLARE_STREAM_OPERATOR(unsigned long long, "%" GOOGLE_LL_FORMAT "u") +#undef DECLARE_STREAM_OPERATOR + +LogMessage::LogMessage(LogLevel level, const char* filename, int line) + : level_(level), filename_(filename), line_(line) {} +LogMessage::~LogMessage() {} + +void LogMessage::Finish() { + bool suppress = false; + + if (level_ != LOGLEVEL_FATAL) { + InitLogSilencerCountOnce(); + MutexLock lock(log_silencer_count_mutex_); + suppress = log_silencer_count_ > 0; + } + + if (!suppress) { + log_handler_(level_, filename_, line_, message_); + } + + if (level_ == LOGLEVEL_FATAL) { +#if PROTOBUF_USE_EXCEPTIONS + throw FatalException(filename_, line_, message_); +#else + abort(); +#endif + } +} + +void LogFinisher::operator=(LogMessage& other) { + other.Finish(); +} + +} // namespace internal + +LogHandler* SetLogHandler(LogHandler* new_func) { + LogHandler* old = internal::log_handler_; + if (old == &internal::NullLogHandler) { + old = NULL; + } + if (new_func == NULL) { + internal::log_handler_ = &internal::NullLogHandler; + } else { + internal::log_handler_ = new_func; + } + return old; +} + +LogSilencer::LogSilencer() { + internal::InitLogSilencerCountOnce(); + MutexLock lock(internal::log_silencer_count_mutex_); + ++internal::log_silencer_count_; +}; + +LogSilencer::~LogSilencer() { + internal::InitLogSilencerCountOnce(); + MutexLock lock(internal::log_silencer_count_mutex_); + --internal::log_silencer_count_; +}; + +// =================================================================== +// emulates google3/base/callback.cc + +Closure::~Closure() {} + +namespace internal { FunctionClosure0::~FunctionClosure0() {} } + +void DoNothing() {} + +// =================================================================== +// emulates google3/util/endian/endian.h +// +// TODO(xiaofeng): PROTOBUF_LITTLE_ENDIAN is unfortunately defined in +// google/protobuf/io/coded_stream.h and therefore can not be used here. +// Maybe move that macro definition here in the furture. +uint32 ghtonl(uint32 x) { + union { + uint32 result; + uint8 result_array[4]; + }; + result_array[0] = static_cast(x >> 24); + result_array[1] = static_cast((x >> 16) & 0xFF); + result_array[2] = static_cast((x >> 8) & 0xFF); + result_array[3] = static_cast(x & 0xFF); + return result; +} + +// =================================================================== +// Shutdown support. + +namespace internal { + +typedef void OnShutdownFunc(); +struct ShutdownData { + ~ShutdownData() { + std::reverse(functions.begin(), functions.end()); + for (auto pair : functions) pair.first(pair.second); + } + + static ShutdownData* get() { + static auto* data = new ShutdownData; + return data; + } + + std::vector> functions; + Mutex mutex; +}; + +static void RunZeroArgFunc(const void* arg) { + reinterpret_cast(const_cast(arg))(); +} + +void OnShutdown(void (*func)()) { + OnShutdownRun(RunZeroArgFunc, reinterpret_cast(func)); +} + +void OnShutdownRun(void (*f)(const void*), const void* arg) { + auto shutdown_data = ShutdownData::get(); + MutexLock lock(&shutdown_data->mutex); + shutdown_data->functions.push_back(std::make_pair(f, arg)); +} + +} // namespace internal + +void ShutdownProtobufLibrary() { + // This function should be called only once, but accepts multiple calls. + static bool is_shutdown = false; + if (!is_shutdown) { + delete internal::ShutdownData::get(); + is_shutdown = true; + } +} + +#if PROTOBUF_USE_EXCEPTIONS +FatalException::~FatalException() throw() {} + +const char* FatalException::what() const throw() { + return message_.c_str(); +} +#endif + +} // namespace protobuf +} // namespace google diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/extension_set.cc b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/extension_set.cc new file mode 100644 index 0000000000000000000000000000000000000000..cb205c4f47d613c1a6c5d74c38c252a56cbfd8c0 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/extension_set.cc @@ -0,0 +1,1916 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace google { +namespace protobuf { +namespace internal { + +namespace { + +inline WireFormatLite::FieldType real_type(FieldType type) { + GOOGLE_DCHECK(type > 0 && type <= WireFormatLite::MAX_FIELD_TYPE); + return static_cast(type); +} + +inline WireFormatLite::CppType cpp_type(FieldType type) { + return WireFormatLite::FieldTypeToCppType(real_type(type)); +} + +inline bool is_packable(WireFormatLite::WireType type) { + switch (type) { + case WireFormatLite::WIRETYPE_VARINT: + case WireFormatLite::WIRETYPE_FIXED64: + case WireFormatLite::WIRETYPE_FIXED32: + return true; + case WireFormatLite::WIRETYPE_LENGTH_DELIMITED: + case WireFormatLite::WIRETYPE_START_GROUP: + case WireFormatLite::WIRETYPE_END_GROUP: + return false; + + // Do not add a default statement. Let the compiler complain when someone + // adds a new wire type. + } + GOOGLE_LOG(FATAL) << "can't reach here."; + return false; +} + +// Registry stuff. +typedef hash_map, + ExtensionInfo> ExtensionRegistry; + +static const ExtensionRegistry* global_registry = nullptr; + +// This function is only called at startup, so there is no need for thread- +// safety. +void Register(const MessageLite* containing_type, + int number, ExtensionInfo info) { + static auto local_static_registry = OnShutdownDelete(new ExtensionRegistry); + global_registry = local_static_registry; + if (!InsertIfNotPresent(local_static_registry, + std::make_pair(containing_type, number), info)) { + GOOGLE_LOG(FATAL) << "Multiple extension registrations for type \"" + << containing_type->GetTypeName() + << "\", field number " << number << "."; + } +} + +const ExtensionInfo* FindRegisteredExtension( + const MessageLite* containing_type, int number) { + return global_registry == nullptr + ? nullptr + : FindOrNull(*global_registry, std::make_pair(containing_type, number)); +} + +} // namespace + +ExtensionFinder::~ExtensionFinder() {} + +bool GeneratedExtensionFinder::Find(int number, ExtensionInfo* output) { + const ExtensionInfo* extension = + FindRegisteredExtension(containing_type_, number); + if (extension == NULL) { + return false; + } else { + *output = *extension; + return true; + } +} + +void ExtensionSet::RegisterExtension(const MessageLite* containing_type, + int number, FieldType type, + bool is_repeated, bool is_packed) { + GOOGLE_CHECK_NE(type, WireFormatLite::TYPE_ENUM); + GOOGLE_CHECK_NE(type, WireFormatLite::TYPE_MESSAGE); + GOOGLE_CHECK_NE(type, WireFormatLite::TYPE_GROUP); + ExtensionInfo info(type, is_repeated, is_packed); + Register(containing_type, number, info); +} + +static bool CallNoArgValidityFunc(const void* arg, int number) { + // Note: Must use C-style cast here rather than reinterpret_cast because + // the C++ standard at one point did not allow casts between function and + // data pointers and some compilers enforce this for C++-style casts. No + // compiler enforces it for C-style casts since lots of C-style code has + // relied on these kinds of casts for a long time, despite being + // technically undefined. See: + // http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#195 + // Also note: Some compilers do not allow function pointers to be "const". + // Which makes sense, I suppose, because it's meaningless. + return ((EnumValidityFunc*)arg)(number); +} + +void ExtensionSet::RegisterEnumExtension(const MessageLite* containing_type, + int number, FieldType type, + bool is_repeated, bool is_packed, + EnumValidityFunc* is_valid) { + GOOGLE_CHECK_EQ(type, WireFormatLite::TYPE_ENUM); + ExtensionInfo info(type, is_repeated, is_packed); + info.enum_validity_check.func = CallNoArgValidityFunc; + // See comment in CallNoArgValidityFunc() about why we use a c-style cast. + info.enum_validity_check.arg = (void*)is_valid; + Register(containing_type, number, info); +} + +void ExtensionSet::RegisterMessageExtension(const MessageLite* containing_type, + int number, FieldType type, + bool is_repeated, bool is_packed, + const MessageLite* prototype) { + GOOGLE_CHECK(type == WireFormatLite::TYPE_MESSAGE || + type == WireFormatLite::TYPE_GROUP); + ExtensionInfo info(type, is_repeated, is_packed); + info.message_prototype = prototype; + Register(containing_type, number, info); +} + + +// =================================================================== +// Constructors and basic methods. + +ExtensionSet::ExtensionSet(::google::protobuf::Arena* arena) + : arena_(arena), + flat_capacity_(0), + flat_size_(0), + map_{flat_capacity_ == 0 ? NULL + : ::google::protobuf::Arena::CreateArray( + arena_, flat_capacity_)} {} + +ExtensionSet::ExtensionSet() + : arena_(NULL), + flat_capacity_(0), + flat_size_(0), + map_{flat_capacity_ == 0 ? NULL + : ::google::protobuf::Arena::CreateArray( + arena_, flat_capacity_)} {} + +ExtensionSet::~ExtensionSet() { + // Deletes all allocated extensions. + if (arena_ == NULL) { + ForEach([](int /* number */, Extension& ext) { ext.Free(); }); + if (GOOGLE_PREDICT_FALSE(is_large())) { + delete map_.large; + } else { + delete[] map_.flat; + } + } +} + +// Defined in extension_set_heavy.cc. +// void ExtensionSet::AppendToList(const Descriptor* containing_type, +// const DescriptorPool* pool, +// vector* output) const + +bool ExtensionSet::Has(int number) const { + const Extension* ext = FindOrNull(number); + if (ext == NULL) return false; + GOOGLE_DCHECK(!ext->is_repeated); + return !ext->is_cleared; +} + +int ExtensionSet::NumExtensions() const { + int result = 0; + ForEach([&result](int /* number */, const Extension& ext) { + if (!ext.is_cleared) { + ++result; + } + }); + return result; +} + +int ExtensionSet::ExtensionSize(int number) const { + const Extension* ext = FindOrNull(number); + return ext == NULL ? 0 : ext->GetSize(); +} + +FieldType ExtensionSet::ExtensionType(int number) const { + const Extension* ext = FindOrNull(number); + if (ext == NULL) { + GOOGLE_LOG(DFATAL) << "Don't lookup extension types if they aren't present (1). "; + return 0; + } + if (ext->is_cleared) { + GOOGLE_LOG(DFATAL) << "Don't lookup extension types if they aren't present (2). "; + } + return ext->type; +} + +void ExtensionSet::ClearExtension(int number) { + Extension* ext = FindOrNull(number); + if (ext == NULL) return; + ext->Clear(); +} + +// =================================================================== +// Field accessors + +namespace { + +enum Cardinality { + REPEATED, + OPTIONAL +}; + +} // namespace + +#define GOOGLE_DCHECK_TYPE(EXTENSION, LABEL, CPPTYPE) \ + GOOGLE_DCHECK_EQ((EXTENSION).is_repeated ? REPEATED : OPTIONAL, LABEL); \ + GOOGLE_DCHECK_EQ(cpp_type((EXTENSION).type), WireFormatLite::CPPTYPE_##CPPTYPE) + +// ------------------------------------------------------------------- +// Primitives + +#define PRIMITIVE_ACCESSORS(UPPERCASE, LOWERCASE, CAMELCASE) \ + \ +LOWERCASE ExtensionSet::Get##CAMELCASE(int number, \ + LOWERCASE default_value) const { \ + const Extension* extension = FindOrNull(number); \ + if (extension == NULL || extension->is_cleared) { \ + return default_value; \ + } else { \ + GOOGLE_DCHECK_TYPE(*extension, OPTIONAL, UPPERCASE); \ + return extension->LOWERCASE##_value; \ + } \ +} \ + \ +void ExtensionSet::Set##CAMELCASE(int number, FieldType type, \ + LOWERCASE value, \ + const FieldDescriptor* descriptor) { \ + Extension* extension; \ + if (MaybeNewExtension(number, descriptor, &extension)) { \ + extension->type = type; \ + GOOGLE_DCHECK_EQ(cpp_type(extension->type), WireFormatLite::CPPTYPE_##UPPERCASE); \ + extension->is_repeated = false; \ + } else { \ + GOOGLE_DCHECK_TYPE(*extension, OPTIONAL, UPPERCASE); \ + } \ + extension->is_cleared = false; \ + extension->LOWERCASE##_value = value; \ +} \ + \ +LOWERCASE ExtensionSet::GetRepeated##CAMELCASE(int number, int index) const { \ + const Extension* extension = FindOrNull(number); \ + GOOGLE_CHECK(extension != NULL) << "Index out-of-bounds (field is empty)."; \ + GOOGLE_DCHECK_TYPE(*extension, REPEATED, UPPERCASE); \ + return extension->repeated_##LOWERCASE##_value->Get(index); \ +} \ + \ +void ExtensionSet::SetRepeated##CAMELCASE( \ + int number, int index, LOWERCASE value) { \ + Extension* extension = FindOrNull(number); \ + GOOGLE_CHECK(extension != NULL) << "Index out-of-bounds (field is empty)."; \ + GOOGLE_DCHECK_TYPE(*extension, REPEATED, UPPERCASE); \ + extension->repeated_##LOWERCASE##_value->Set(index, value); \ +} \ + \ +void ExtensionSet::Add##CAMELCASE(int number, FieldType type, \ + bool packed, LOWERCASE value, \ + const FieldDescriptor* descriptor) { \ + Extension* extension; \ + if (MaybeNewExtension(number, descriptor, &extension)) { \ + extension->type = type; \ + GOOGLE_DCHECK_EQ(cpp_type(extension->type), WireFormatLite::CPPTYPE_##UPPERCASE); \ + extension->is_repeated = true; \ + extension->is_packed = packed; \ + extension->repeated_##LOWERCASE##_value = \ + Arena::CreateMessage >(arena_); \ + } else { \ + GOOGLE_DCHECK_TYPE(*extension, REPEATED, UPPERCASE); \ + GOOGLE_DCHECK_EQ(extension->is_packed, packed); \ + } \ + extension->repeated_##LOWERCASE##_value->Add(value); \ +} + +PRIMITIVE_ACCESSORS( INT32, int32, Int32) +PRIMITIVE_ACCESSORS( INT64, int64, Int64) +PRIMITIVE_ACCESSORS(UINT32, uint32, UInt32) +PRIMITIVE_ACCESSORS(UINT64, uint64, UInt64) +PRIMITIVE_ACCESSORS( FLOAT, float, Float) +PRIMITIVE_ACCESSORS(DOUBLE, double, Double) +PRIMITIVE_ACCESSORS( BOOL, bool, Bool) + +#undef PRIMITIVE_ACCESSORS + +const void* ExtensionSet::GetRawRepeatedField(int number, + const void* default_value) const { + const Extension* extension = FindOrNull(number); + if (extension == NULL) { + return default_value; + } + // We assume that all the RepeatedField<>* pointers have the same + // size and alignment within the anonymous union in Extension. + return extension->repeated_int32_value; +} + +void* ExtensionSet::MutableRawRepeatedField(int number, FieldType field_type, + bool packed, + const FieldDescriptor* desc) { + Extension* extension; + + // We instantiate an empty Repeated{,Ptr}Field if one doesn't exist for this + // extension. + if (MaybeNewExtension(number, desc, &extension)) { + extension->is_repeated = true; + extension->type = field_type; + extension->is_packed = packed; + + switch (WireFormatLite::FieldTypeToCppType( + static_cast(field_type))) { + case WireFormatLite::CPPTYPE_INT32: + extension->repeated_int32_value = + Arena::CreateMessage >(arena_); + break; + case WireFormatLite::CPPTYPE_INT64: + extension->repeated_int64_value = + Arena::CreateMessage >(arena_); + break; + case WireFormatLite::CPPTYPE_UINT32: + extension->repeated_uint32_value = + Arena::CreateMessage >(arena_); + break; + case WireFormatLite::CPPTYPE_UINT64: + extension->repeated_uint64_value = + Arena::CreateMessage >(arena_); + break; + case WireFormatLite::CPPTYPE_DOUBLE: + extension->repeated_double_value = + Arena::CreateMessage >(arena_); + break; + case WireFormatLite::CPPTYPE_FLOAT: + extension->repeated_float_value = + Arena::CreateMessage >(arena_); + break; + case WireFormatLite::CPPTYPE_BOOL: + extension->repeated_bool_value = + Arena::CreateMessage >(arena_); + break; + case WireFormatLite::CPPTYPE_ENUM: + extension->repeated_enum_value = + Arena::CreateMessage >(arena_); + break; + case WireFormatLite::CPPTYPE_STRING: + extension->repeated_string_value = + Arena::CreateMessage >(arena_); + break; + case WireFormatLite::CPPTYPE_MESSAGE: + extension->repeated_message_value = + Arena::CreateMessage >(arena_); + break; + } + } + + // We assume that all the RepeatedField<>* pointers have the same + // size and alignment within the anonymous union in Extension. + return extension->repeated_int32_value; +} + +// Compatible version using old call signature. Does not create extensions when +// the don't already exist; instead, just GOOGLE_CHECK-fails. +void* ExtensionSet::MutableRawRepeatedField(int number) { + Extension* extension = FindOrNull(number); + GOOGLE_CHECK(extension != NULL) << "Extension not found."; + // We assume that all the RepeatedField<>* pointers have the same + // size and alignment within the anonymous union in Extension. + return extension->repeated_int32_value; +} + + +// ------------------------------------------------------------------- +// Enums + +int ExtensionSet::GetEnum(int number, int default_value) const { + const Extension* extension = FindOrNull(number); + if (extension == NULL || extension->is_cleared) { + // Not present. Return the default value. + return default_value; + } else { + GOOGLE_DCHECK_TYPE(*extension, OPTIONAL, ENUM); + return extension->enum_value; + } +} + +void ExtensionSet::SetEnum(int number, FieldType type, int value, + const FieldDescriptor* descriptor) { + Extension* extension; + if (MaybeNewExtension(number, descriptor, &extension)) { + extension->type = type; + GOOGLE_DCHECK_EQ(cpp_type(extension->type), WireFormatLite::CPPTYPE_ENUM); + extension->is_repeated = false; + } else { + GOOGLE_DCHECK_TYPE(*extension, OPTIONAL, ENUM); + } + extension->is_cleared = false; + extension->enum_value = value; +} + +int ExtensionSet::GetRepeatedEnum(int number, int index) const { + const Extension* extension = FindOrNull(number); + GOOGLE_CHECK(extension != NULL) << "Index out-of-bounds (field is empty)."; + GOOGLE_DCHECK_TYPE(*extension, REPEATED, ENUM); + return extension->repeated_enum_value->Get(index); +} + +void ExtensionSet::SetRepeatedEnum(int number, int index, int value) { + Extension* extension = FindOrNull(number); + GOOGLE_CHECK(extension != NULL) << "Index out-of-bounds (field is empty)."; + GOOGLE_DCHECK_TYPE(*extension, REPEATED, ENUM); + extension->repeated_enum_value->Set(index, value); +} + +void ExtensionSet::AddEnum(int number, FieldType type, + bool packed, int value, + const FieldDescriptor* descriptor) { + Extension* extension; + if (MaybeNewExtension(number, descriptor, &extension)) { + extension->type = type; + GOOGLE_DCHECK_EQ(cpp_type(extension->type), WireFormatLite::CPPTYPE_ENUM); + extension->is_repeated = true; + extension->is_packed = packed; + extension->repeated_enum_value = + Arena::CreateMessage >(arena_); + } else { + GOOGLE_DCHECK_TYPE(*extension, REPEATED, ENUM); + GOOGLE_DCHECK_EQ(extension->is_packed, packed); + } + extension->repeated_enum_value->Add(value); +} + +// ------------------------------------------------------------------- +// Strings + +const string& ExtensionSet::GetString(int number, + const string& default_value) const { + const Extension* extension = FindOrNull(number); + if (extension == NULL || extension->is_cleared) { + // Not present. Return the default value. + return default_value; + } else { + GOOGLE_DCHECK_TYPE(*extension, OPTIONAL, STRING); + return *extension->string_value; + } +} + +string* ExtensionSet::MutableString(int number, FieldType type, + const FieldDescriptor* descriptor) { + Extension* extension; + if (MaybeNewExtension(number, descriptor, &extension)) { + extension->type = type; + GOOGLE_DCHECK_EQ(cpp_type(extension->type), WireFormatLite::CPPTYPE_STRING); + extension->is_repeated = false; + extension->string_value = Arena::Create(arena_); + } else { + GOOGLE_DCHECK_TYPE(*extension, OPTIONAL, STRING); + } + extension->is_cleared = false; + return extension->string_value; +} + +const string& ExtensionSet::GetRepeatedString(int number, int index) const { + const Extension* extension = FindOrNull(number); + GOOGLE_CHECK(extension != NULL) << "Index out-of-bounds (field is empty)."; + GOOGLE_DCHECK_TYPE(*extension, REPEATED, STRING); + return extension->repeated_string_value->Get(index); +} + +string* ExtensionSet::MutableRepeatedString(int number, int index) { + Extension* extension = FindOrNull(number); + GOOGLE_CHECK(extension != NULL) << "Index out-of-bounds (field is empty)."; + GOOGLE_DCHECK_TYPE(*extension, REPEATED, STRING); + return extension->repeated_string_value->Mutable(index); +} + +string* ExtensionSet::AddString(int number, FieldType type, + const FieldDescriptor* descriptor) { + Extension* extension; + if (MaybeNewExtension(number, descriptor, &extension)) { + extension->type = type; + GOOGLE_DCHECK_EQ(cpp_type(extension->type), WireFormatLite::CPPTYPE_STRING); + extension->is_repeated = true; + extension->is_packed = false; + extension->repeated_string_value = + Arena::CreateMessage >(arena_); + } else { + GOOGLE_DCHECK_TYPE(*extension, REPEATED, STRING); + } + return extension->repeated_string_value->Add(); +} + +// ------------------------------------------------------------------- +// Messages + +const MessageLite& ExtensionSet::GetMessage( + int number, const MessageLite& default_value) const { + const Extension* extension = FindOrNull(number); + if (extension == NULL) { + // Not present. Return the default value. + return default_value; + } else { + GOOGLE_DCHECK_TYPE(*extension, OPTIONAL, MESSAGE); + if (extension->is_lazy) { + return extension->lazymessage_value->GetMessage(default_value); + } else { + return *extension->message_value; + } + } +} + +// Defined in extension_set_heavy.cc. +// const MessageLite& ExtensionSet::GetMessage(int number, +// const Descriptor* message_type, +// MessageFactory* factory) const + +MessageLite* ExtensionSet::MutableMessage(int number, FieldType type, + const MessageLite& prototype, + const FieldDescriptor* descriptor) { + Extension* extension; + if (MaybeNewExtension(number, descriptor, &extension)) { + extension->type = type; + GOOGLE_DCHECK_EQ(cpp_type(extension->type), WireFormatLite::CPPTYPE_MESSAGE); + extension->is_repeated = false; + extension->is_lazy = false; + extension->message_value = prototype.New(arena_); + extension->is_cleared = false; + return extension->message_value; + } else { + GOOGLE_DCHECK_TYPE(*extension, OPTIONAL, MESSAGE); + extension->is_cleared = false; + if (extension->is_lazy) { + return extension->lazymessage_value->MutableMessage(prototype); + } else { + return extension->message_value; + } + } +} + +// Defined in extension_set_heavy.cc. +// MessageLite* ExtensionSet::MutableMessage(int number, FieldType type, +// const Descriptor* message_type, +// MessageFactory* factory) + +void ExtensionSet::SetAllocatedMessage(int number, FieldType type, + const FieldDescriptor* descriptor, + MessageLite* message) { + if (message == NULL) { + ClearExtension(number); + return; + } + ::google::protobuf::Arena* message_arena = message->GetArena(); + Extension* extension; + if (MaybeNewExtension(number, descriptor, &extension)) { + extension->type = type; + GOOGLE_DCHECK_EQ(cpp_type(extension->type), WireFormatLite::CPPTYPE_MESSAGE); + extension->is_repeated = false; + extension->is_lazy = false; + if (message_arena == arena_) { + extension->message_value = message; + } else if (message_arena == NULL) { + extension->message_value = message; + arena_->Own(message); // not NULL because not equal to message_arena + } else { + extension->message_value = message->New(arena_); + extension->message_value->CheckTypeAndMergeFrom(*message); + } + } else { + GOOGLE_DCHECK_TYPE(*extension, OPTIONAL, MESSAGE); + if (extension->is_lazy) { + extension->lazymessage_value->SetAllocatedMessage(message); + } else { + if (arena_ == NULL) { + delete extension->message_value; + } + if (message_arena == arena_) { + extension->message_value = message; + } else if (message_arena == NULL) { + extension->message_value = message; + arena_->Own(message); // not NULL because not equal to message_arena + } else { + extension->message_value = message->New(arena_); + extension->message_value->CheckTypeAndMergeFrom(*message); + } + } + } + extension->is_cleared = false; +} + +void ExtensionSet::UnsafeArenaSetAllocatedMessage( + int number, FieldType type, const FieldDescriptor* descriptor, + MessageLite* message) { + if (message == NULL) { + ClearExtension(number); + return; + } + Extension* extension; + if (MaybeNewExtension(number, descriptor, &extension)) { + extension->type = type; + GOOGLE_DCHECK_EQ(cpp_type(extension->type), WireFormatLite::CPPTYPE_MESSAGE); + extension->is_repeated = false; + extension->is_lazy = false; + extension->message_value = message; + } else { + GOOGLE_DCHECK_TYPE(*extension, OPTIONAL, MESSAGE); + if (extension->is_lazy) { + extension->lazymessage_value->UnsafeArenaSetAllocatedMessage(message); + } else { + if (arena_ == NULL) { + delete extension->message_value; + } + extension->message_value = message; + } + } + extension->is_cleared = false; +} + +MessageLite* ExtensionSet::ReleaseMessage(int number, + const MessageLite& prototype) { + Extension* extension = FindOrNull(number); + if (extension == NULL) { + // Not present. Return NULL. + return NULL; + } else { + GOOGLE_DCHECK_TYPE(*extension, OPTIONAL, MESSAGE); + MessageLite* ret = NULL; + if (extension->is_lazy) { + ret = extension->lazymessage_value->ReleaseMessage(prototype); + if (arena_ == NULL) { + delete extension->lazymessage_value; + } + } else { + if (arena_ == NULL) { + ret = extension->message_value; + } else { + // ReleaseMessage() always returns a heap-allocated message, and we are + // on an arena, so we need to make a copy of this message to return. + ret = extension->message_value->New(); + ret->CheckTypeAndMergeFrom(*extension->message_value); + } + } + Erase(number); + return ret; + } +} + +MessageLite* ExtensionSet::UnsafeArenaReleaseMessage( + int number, const MessageLite& prototype) { + Extension* extension = FindOrNull(number); + if (extension == NULL) { + // Not present. Return NULL. + return NULL; + } else { + GOOGLE_DCHECK_TYPE(*extension, OPTIONAL, MESSAGE); + MessageLite* ret = NULL; + if (extension->is_lazy) { + ret = extension->lazymessage_value->UnsafeArenaReleaseMessage(prototype); + if (arena_ == NULL) { + delete extension->lazymessage_value; + } + } else { + ret = extension->message_value; + } + Erase(number); + return ret; + } +} + +// Defined in extension_set_heavy.cc. +// MessageLite* ExtensionSet::ReleaseMessage(const FieldDescriptor* descriptor, +// MessageFactory* factory); + +const MessageLite& ExtensionSet::GetRepeatedMessage( + int number, int index) const { + const Extension* extension = FindOrNull(number); + GOOGLE_CHECK(extension != NULL) << "Index out-of-bounds (field is empty)."; + GOOGLE_DCHECK_TYPE(*extension, REPEATED, MESSAGE); + return extension->repeated_message_value->Get(index); +} + +MessageLite* ExtensionSet::MutableRepeatedMessage(int number, int index) { + Extension* extension = FindOrNull(number); + GOOGLE_CHECK(extension != NULL) << "Index out-of-bounds (field is empty)."; + GOOGLE_DCHECK_TYPE(*extension, REPEATED, MESSAGE); + return extension->repeated_message_value->Mutable(index); +} + +MessageLite* ExtensionSet::AddMessage(int number, FieldType type, + const MessageLite& prototype, + const FieldDescriptor* descriptor) { + Extension* extension; + if (MaybeNewExtension(number, descriptor, &extension)) { + extension->type = type; + GOOGLE_DCHECK_EQ(cpp_type(extension->type), WireFormatLite::CPPTYPE_MESSAGE); + extension->is_repeated = true; + extension->repeated_message_value = + Arena::CreateMessage >(arena_); + } else { + GOOGLE_DCHECK_TYPE(*extension, REPEATED, MESSAGE); + } + + // RepeatedPtrField does not know how to Add() since it cannot + // allocate an abstract object, so we have to be tricky. + MessageLite* result = + reinterpret_cast<::google::protobuf::internal::RepeatedPtrFieldBase*>( + extension->repeated_message_value) + ->AddFromCleared >(); + if (result == NULL) { + result = prototype.New(arena_); + extension->repeated_message_value->AddAllocated(result); + } + return result; +} + +// Defined in extension_set_heavy.cc. +// MessageLite* ExtensionSet::AddMessage(int number, FieldType type, +// const Descriptor* message_type, +// MessageFactory* factory) + +#undef GOOGLE_DCHECK_TYPE + +void ExtensionSet::RemoveLast(int number) { + Extension* extension = FindOrNull(number); + GOOGLE_CHECK(extension != NULL) << "Index out-of-bounds (field is empty)."; + GOOGLE_DCHECK(extension->is_repeated); + + switch(cpp_type(extension->type)) { + case WireFormatLite::CPPTYPE_INT32: + extension->repeated_int32_value->RemoveLast(); + break; + case WireFormatLite::CPPTYPE_INT64: + extension->repeated_int64_value->RemoveLast(); + break; + case WireFormatLite::CPPTYPE_UINT32: + extension->repeated_uint32_value->RemoveLast(); + break; + case WireFormatLite::CPPTYPE_UINT64: + extension->repeated_uint64_value->RemoveLast(); + break; + case WireFormatLite::CPPTYPE_FLOAT: + extension->repeated_float_value->RemoveLast(); + break; + case WireFormatLite::CPPTYPE_DOUBLE: + extension->repeated_double_value->RemoveLast(); + break; + case WireFormatLite::CPPTYPE_BOOL: + extension->repeated_bool_value->RemoveLast(); + break; + case WireFormatLite::CPPTYPE_ENUM: + extension->repeated_enum_value->RemoveLast(); + break; + case WireFormatLite::CPPTYPE_STRING: + extension->repeated_string_value->RemoveLast(); + break; + case WireFormatLite::CPPTYPE_MESSAGE: + extension->repeated_message_value->RemoveLast(); + break; + } +} + +MessageLite* ExtensionSet::ReleaseLast(int number) { + Extension* extension = FindOrNull(number); + GOOGLE_CHECK(extension != NULL) << "Index out-of-bounds (field is empty)."; + GOOGLE_DCHECK(extension->is_repeated); + GOOGLE_DCHECK(cpp_type(extension->type) == WireFormatLite::CPPTYPE_MESSAGE); + return extension->repeated_message_value->ReleaseLast(); +} + +void ExtensionSet::SwapElements(int number, int index1, int index2) { + Extension* extension = FindOrNull(number); + GOOGLE_CHECK(extension != NULL) << "Index out-of-bounds (field is empty)."; + GOOGLE_DCHECK(extension->is_repeated); + + switch(cpp_type(extension->type)) { + case WireFormatLite::CPPTYPE_INT32: + extension->repeated_int32_value->SwapElements(index1, index2); + break; + case WireFormatLite::CPPTYPE_INT64: + extension->repeated_int64_value->SwapElements(index1, index2); + break; + case WireFormatLite::CPPTYPE_UINT32: + extension->repeated_uint32_value->SwapElements(index1, index2); + break; + case WireFormatLite::CPPTYPE_UINT64: + extension->repeated_uint64_value->SwapElements(index1, index2); + break; + case WireFormatLite::CPPTYPE_FLOAT: + extension->repeated_float_value->SwapElements(index1, index2); + break; + case WireFormatLite::CPPTYPE_DOUBLE: + extension->repeated_double_value->SwapElements(index1, index2); + break; + case WireFormatLite::CPPTYPE_BOOL: + extension->repeated_bool_value->SwapElements(index1, index2); + break; + case WireFormatLite::CPPTYPE_ENUM: + extension->repeated_enum_value->SwapElements(index1, index2); + break; + case WireFormatLite::CPPTYPE_STRING: + extension->repeated_string_value->SwapElements(index1, index2); + break; + case WireFormatLite::CPPTYPE_MESSAGE: + extension->repeated_message_value->SwapElements(index1, index2); + break; + } +} + +// =================================================================== + +void ExtensionSet::Clear() { + ForEach([](int /* number */, Extension& ext) { ext.Clear(); }); +} + +namespace { +// Computes the size of a std::set_union without constructing the union. +template +size_t SizeOfUnion(ItX it_xs, ItX end_xs, ItY it_ys, ItY end_ys) { + size_t result = 0; + while (it_xs != end_xs && it_ys != end_ys) { + ++result; + if (it_xs->first < it_ys->first) { + ++it_xs; + } else if (it_xs->first == it_ys->first) { + ++it_xs; + ++it_ys; + } else { + ++it_ys; + } + } + result += std::distance(it_xs, end_xs); + result += std::distance(it_ys, end_ys); + return result; +} +} // namespace + +void ExtensionSet::MergeFrom(const ExtensionSet& other) { + if (GOOGLE_PREDICT_TRUE(!is_large())) { + if (GOOGLE_PREDICT_TRUE(!other.is_large())) { + GrowCapacity(SizeOfUnion(flat_begin(), flat_end(), other.flat_begin(), + other.flat_end())); + } else { + GrowCapacity(SizeOfUnion(flat_begin(), flat_end(), + other.map_.large->begin(), + other.map_.large->end())); + } + } + other.ForEach([this](int number, const Extension& ext) { + this->InternalExtensionMergeFrom(number, ext); + }); +} + +void ExtensionSet::InternalExtensionMergeFrom( + int number, const Extension& other_extension) { + if (other_extension.is_repeated) { + Extension* extension; + bool is_new = MaybeNewExtension(number, other_extension.descriptor, + &extension); + if (is_new) { + // Extension did not already exist in set. + extension->type = other_extension.type; + extension->is_packed = other_extension.is_packed; + extension->is_repeated = true; + } else { + GOOGLE_DCHECK_EQ(extension->type, other_extension.type); + GOOGLE_DCHECK_EQ(extension->is_packed, other_extension.is_packed); + GOOGLE_DCHECK(extension->is_repeated); + } + + switch (cpp_type(other_extension.type)) { +#define HANDLE_TYPE(UPPERCASE, LOWERCASE, REPEATED_TYPE) \ + case WireFormatLite::CPPTYPE_##UPPERCASE: \ + if (is_new) { \ + extension->repeated_##LOWERCASE##_value = \ + Arena::CreateMessage(arena_); \ + } \ + extension->repeated_##LOWERCASE##_value->MergeFrom( \ + *other_extension.repeated_##LOWERCASE##_value); \ + break; + + HANDLE_TYPE( INT32, int32, RepeatedField < int32>); + HANDLE_TYPE( INT64, int64, RepeatedField < int64>); + HANDLE_TYPE( UINT32, uint32, RepeatedField < uint32>); + HANDLE_TYPE( UINT64, uint64, RepeatedField < uint64>); + HANDLE_TYPE( FLOAT, float, RepeatedField < float>); + HANDLE_TYPE( DOUBLE, double, RepeatedField < double>); + HANDLE_TYPE( BOOL, bool, RepeatedField < bool>); + HANDLE_TYPE( ENUM, enum, RepeatedField < int>); + HANDLE_TYPE( STRING, string, RepeatedPtrField< string>); +#undef HANDLE_TYPE + + case WireFormatLite::CPPTYPE_MESSAGE: + if (is_new) { + extension->repeated_message_value = + Arena::CreateMessage >(arena_); + } + // We can't call RepeatedPtrField::MergeFrom() because + // it would attempt to allocate new objects. + RepeatedPtrField* other_repeated_message = + other_extension.repeated_message_value; + for (int i = 0; i < other_repeated_message->size(); i++) { + const MessageLite& other_message = other_repeated_message->Get(i); + MessageLite* target = + reinterpret_cast<::google::protobuf::internal::RepeatedPtrFieldBase*>( + extension->repeated_message_value) + ->AddFromCleared >(); + if (target == NULL) { + target = other_message.New(arena_); + extension->repeated_message_value->AddAllocated(target); + } + target->CheckTypeAndMergeFrom(other_message); + } + break; + } + } else { + if (!other_extension.is_cleared) { + switch (cpp_type(other_extension.type)) { +#define HANDLE_TYPE(UPPERCASE, LOWERCASE, CAMELCASE) \ + case WireFormatLite::CPPTYPE_##UPPERCASE: \ + Set##CAMELCASE(number, other_extension.type, \ + other_extension.LOWERCASE##_value, \ + other_extension.descriptor); \ + break; + + HANDLE_TYPE( INT32, int32, Int32); + HANDLE_TYPE( INT64, int64, Int64); + HANDLE_TYPE(UINT32, uint32, UInt32); + HANDLE_TYPE(UINT64, uint64, UInt64); + HANDLE_TYPE( FLOAT, float, Float); + HANDLE_TYPE(DOUBLE, double, Double); + HANDLE_TYPE( BOOL, bool, Bool); + HANDLE_TYPE( ENUM, enum, Enum); +#undef HANDLE_TYPE + case WireFormatLite::CPPTYPE_STRING: + SetString(number, other_extension.type, + *other_extension.string_value, + other_extension.descriptor); + break; + case WireFormatLite::CPPTYPE_MESSAGE: { + Extension* extension; + bool is_new = MaybeNewExtension(number, + other_extension.descriptor, + &extension); + if (is_new) { + extension->type = other_extension.type; + extension->is_packed = other_extension.is_packed; + extension->is_repeated = false; + if (other_extension.is_lazy) { + extension->is_lazy = true; + extension->lazymessage_value = + other_extension.lazymessage_value->New(arena_); + extension->lazymessage_value->MergeFrom( + *other_extension.lazymessage_value); + } else { + extension->is_lazy = false; + extension->message_value = + other_extension.message_value->New(arena_); + extension->message_value->CheckTypeAndMergeFrom( + *other_extension.message_value); + } + } else { + GOOGLE_DCHECK_EQ(extension->type, other_extension.type); + GOOGLE_DCHECK_EQ(extension->is_packed,other_extension.is_packed); + GOOGLE_DCHECK(!extension->is_repeated); + if (other_extension.is_lazy) { + if (extension->is_lazy) { + extension->lazymessage_value->MergeFrom( + *other_extension.lazymessage_value); + } else { + extension->message_value->CheckTypeAndMergeFrom( + other_extension.lazymessage_value->GetMessage( + *extension->message_value)); + } + } else { + if (extension->is_lazy) { + extension->lazymessage_value->MutableMessage( + *other_extension.message_value)->CheckTypeAndMergeFrom( + *other_extension.message_value); + } else { + extension->message_value->CheckTypeAndMergeFrom( + *other_extension.message_value); + } + } + } + extension->is_cleared = false; + break; + } + } + } + } +} + +void ExtensionSet::Swap(ExtensionSet* x) { + if (GetArenaNoVirtual() == x->GetArenaNoVirtual()) { + using std::swap; + swap(flat_capacity_, x->flat_capacity_); + swap(flat_size_, x->flat_size_); + swap(map_, x->map_); + } else { + // TODO(cfallin, rohananil): We maybe able to optimize a case where we are + // swapping from heap to arena-allocated extension set, by just Own()'ing + // the extensions. + ExtensionSet extension_set; + extension_set.MergeFrom(*x); + x->Clear(); + x->MergeFrom(*this); + Clear(); + MergeFrom(extension_set); + } +} + +void ExtensionSet::SwapExtension(ExtensionSet* other, + int number) { + if (this == other) return; + Extension* this_ext = FindOrNull(number); + Extension* other_ext = other->FindOrNull(number); + + if (this_ext == NULL && other_ext == NULL) { + return; + } + + if (this_ext != NULL && other_ext != NULL) { + if (GetArenaNoVirtual() == other->GetArenaNoVirtual()) { + using std::swap; + swap(*this_ext, *other_ext); + } else { + // TODO(cfallin, rohananil): We could further optimize these cases, + // especially avoid creation of ExtensionSet, and move MergeFrom logic + // into Extensions itself (which takes arena as an argument). + // We do it this way to reuse the copy-across-arenas logic already + // implemented in ExtensionSet's MergeFrom. + ExtensionSet temp; + temp.InternalExtensionMergeFrom(number, *other_ext); + Extension* temp_ext = temp.FindOrNull(number); + other_ext->Clear(); + other->InternalExtensionMergeFrom(number, *this_ext); + this_ext->Clear(); + InternalExtensionMergeFrom(number, *temp_ext); + } + return; + } + + if (this_ext == NULL) { + if (GetArenaNoVirtual() == other->GetArenaNoVirtual()) { + *Insert(number).first = *other_ext; + } else { + InternalExtensionMergeFrom(number, *other_ext); + } + other->Erase(number); + return; + } + + if (other_ext == NULL) { + if (GetArenaNoVirtual() == other->GetArenaNoVirtual()) { + *other->Insert(number).first = *this_ext; + } else { + other->InternalExtensionMergeFrom(number, *this_ext); + } + Erase(number); + return; + } +} + +bool ExtensionSet::IsInitialized() const { + // Extensions are never required. However, we need to check that all + // embedded messages are initialized. + if (GOOGLE_PREDICT_FALSE(is_large())) { + for (const auto& kv : *map_.large) { + if (!kv.second.IsInitialized()) return false; + } + return true; + } + for (const KeyValue* it = flat_begin(); it != flat_end(); ++it) { + if (!it->second.IsInitialized()) return false; + } + return true; +} + +bool ExtensionSet::FindExtensionInfoFromTag( + uint32 tag, ExtensionFinder* extension_finder, int* field_number, + ExtensionInfo* extension, bool* was_packed_on_wire) { + *field_number = WireFormatLite::GetTagFieldNumber(tag); + WireFormatLite::WireType wire_type = WireFormatLite::GetTagWireType(tag); + return FindExtensionInfoFromFieldNumber(wire_type, *field_number, + extension_finder, extension, + was_packed_on_wire); +} + +bool ExtensionSet::FindExtensionInfoFromFieldNumber( + int wire_type, int field_number, ExtensionFinder* extension_finder, + ExtensionInfo* extension, bool* was_packed_on_wire) { + if (!extension_finder->Find(field_number, extension)) { + return false; + } + + WireFormatLite::WireType expected_wire_type = + WireFormatLite::WireTypeForFieldType(real_type(extension->type)); + + // Check if this is a packed field. + *was_packed_on_wire = false; + if (extension->is_repeated && + wire_type == WireFormatLite::WIRETYPE_LENGTH_DELIMITED && + is_packable(expected_wire_type)) { + *was_packed_on_wire = true; + return true; + } + // Otherwise the wire type must match. + return expected_wire_type == wire_type; +} + +bool ExtensionSet::ParseField(uint32 tag, io::CodedInputStream* input, + ExtensionFinder* extension_finder, + FieldSkipper* field_skipper) { + int number; + bool was_packed_on_wire; + ExtensionInfo extension; + if (!FindExtensionInfoFromTag( + tag, extension_finder, &number, &extension, &was_packed_on_wire)) { + return field_skipper->SkipField(input, tag); + } else { + return ParseFieldWithExtensionInfo( + number, was_packed_on_wire, extension, input, field_skipper); + } +} + +bool ExtensionSet::ParseFieldWithExtensionInfo( + int number, bool was_packed_on_wire, const ExtensionInfo& extension, + io::CodedInputStream* input, + FieldSkipper* field_skipper) { + // Explicitly not read extension.is_packed, instead check whether the field + // was encoded in packed form on the wire. + if (was_packed_on_wire) { + uint32 size; + if (!input->ReadVarint32(&size)) return false; + io::CodedInputStream::Limit limit = input->PushLimit(size); + + switch (extension.type) { +#define HANDLE_TYPE(UPPERCASE, CPP_CAMELCASE, CPP_LOWERCASE) \ + case WireFormatLite::TYPE_##UPPERCASE: \ + while (input->BytesUntilLimit() > 0) { \ + CPP_LOWERCASE value; \ + if (!WireFormatLite::ReadPrimitive< \ + CPP_LOWERCASE, WireFormatLite::TYPE_##UPPERCASE>( \ + input, &value)) return false; \ + Add##CPP_CAMELCASE(number, WireFormatLite::TYPE_##UPPERCASE, \ + extension.is_packed, value, \ + extension.descriptor); \ + } \ + break + + HANDLE_TYPE( INT32, Int32, int32); + HANDLE_TYPE( INT64, Int64, int64); + HANDLE_TYPE( UINT32, UInt32, uint32); + HANDLE_TYPE( UINT64, UInt64, uint64); + HANDLE_TYPE( SINT32, Int32, int32); + HANDLE_TYPE( SINT64, Int64, int64); + HANDLE_TYPE( FIXED32, UInt32, uint32); + HANDLE_TYPE( FIXED64, UInt64, uint64); + HANDLE_TYPE(SFIXED32, Int32, int32); + HANDLE_TYPE(SFIXED64, Int64, int64); + HANDLE_TYPE( FLOAT, Float, float); + HANDLE_TYPE( DOUBLE, Double, double); + HANDLE_TYPE( BOOL, Bool, bool); +#undef HANDLE_TYPE + + case WireFormatLite::TYPE_ENUM: + while (input->BytesUntilLimit() > 0) { + int value; + if (!WireFormatLite::ReadPrimitive( + input, &value)) return false; + if (extension.enum_validity_check.func( + extension.enum_validity_check.arg, value)) { + AddEnum(number, WireFormatLite::TYPE_ENUM, extension.is_packed, + value, extension.descriptor); + } else { + // Invalid value. Treat as unknown. + field_skipper->SkipUnknownEnum(number, value); + } + } + break; + + case WireFormatLite::TYPE_STRING: + case WireFormatLite::TYPE_BYTES: + case WireFormatLite::TYPE_GROUP: + case WireFormatLite::TYPE_MESSAGE: + GOOGLE_LOG(FATAL) << "Non-primitive types can't be packed."; + break; + } + + input->PopLimit(limit); + } else { + switch (extension.type) { +#define HANDLE_TYPE(UPPERCASE, CPP_CAMELCASE, CPP_LOWERCASE) \ + case WireFormatLite::TYPE_##UPPERCASE: { \ + CPP_LOWERCASE value; \ + if (!WireFormatLite::ReadPrimitive< \ + CPP_LOWERCASE, WireFormatLite::TYPE_##UPPERCASE>( \ + input, &value)) return false; \ + if (extension.is_repeated) { \ + Add##CPP_CAMELCASE(number, WireFormatLite::TYPE_##UPPERCASE, \ + extension.is_packed, value, \ + extension.descriptor); \ + } else { \ + Set##CPP_CAMELCASE(number, WireFormatLite::TYPE_##UPPERCASE, value, \ + extension.descriptor); \ + } \ + } break + + HANDLE_TYPE( INT32, Int32, int32); + HANDLE_TYPE( INT64, Int64, int64); + HANDLE_TYPE( UINT32, UInt32, uint32); + HANDLE_TYPE( UINT64, UInt64, uint64); + HANDLE_TYPE( SINT32, Int32, int32); + HANDLE_TYPE( SINT64, Int64, int64); + HANDLE_TYPE( FIXED32, UInt32, uint32); + HANDLE_TYPE( FIXED64, UInt64, uint64); + HANDLE_TYPE(SFIXED32, Int32, int32); + HANDLE_TYPE(SFIXED64, Int64, int64); + HANDLE_TYPE( FLOAT, Float, float); + HANDLE_TYPE( DOUBLE, Double, double); + HANDLE_TYPE( BOOL, Bool, bool); +#undef HANDLE_TYPE + + case WireFormatLite::TYPE_ENUM: { + int value; + if (!WireFormatLite::ReadPrimitive( + input, &value)) return false; + + if (!extension.enum_validity_check.func( + extension.enum_validity_check.arg, value)) { + // Invalid value. Treat as unknown. + field_skipper->SkipUnknownEnum(number, value); + } else if (extension.is_repeated) { + AddEnum(number, WireFormatLite::TYPE_ENUM, extension.is_packed, value, + extension.descriptor); + } else { + SetEnum(number, WireFormatLite::TYPE_ENUM, value, + extension.descriptor); + } + break; + } + + case WireFormatLite::TYPE_STRING: { + string* value = extension.is_repeated ? + AddString(number, WireFormatLite::TYPE_STRING, extension.descriptor) : + MutableString(number, WireFormatLite::TYPE_STRING, + extension.descriptor); + if (!WireFormatLite::ReadString(input, value)) return false; + break; + } + + case WireFormatLite::TYPE_BYTES: { + string* value = extension.is_repeated ? + AddString(number, WireFormatLite::TYPE_BYTES, extension.descriptor) : + MutableString(number, WireFormatLite::TYPE_BYTES, + extension.descriptor); + if (!WireFormatLite::ReadBytes(input, value)) return false; + break; + } + + case WireFormatLite::TYPE_GROUP: { + MessageLite* value = extension.is_repeated ? + AddMessage(number, WireFormatLite::TYPE_GROUP, + *extension.message_prototype, extension.descriptor) : + MutableMessage(number, WireFormatLite::TYPE_GROUP, + *extension.message_prototype, extension.descriptor); + if (!WireFormatLite::ReadGroup(number, input, value)) return false; + break; + } + + case WireFormatLite::TYPE_MESSAGE: { + MessageLite* value = extension.is_repeated ? + AddMessage(number, WireFormatLite::TYPE_MESSAGE, + *extension.message_prototype, extension.descriptor) : + MutableMessage(number, WireFormatLite::TYPE_MESSAGE, + *extension.message_prototype, extension.descriptor); + if (!WireFormatLite::ReadMessage(input, value)) return false; + break; + } + } + } + + return true; +} + +bool ExtensionSet::ParseField(uint32 tag, io::CodedInputStream* input, + const MessageLite* containing_type) { + FieldSkipper skipper; + GeneratedExtensionFinder finder(containing_type); + return ParseField(tag, input, &finder, &skipper); +} + +bool ExtensionSet::ParseField(uint32 tag, io::CodedInputStream* input, + const MessageLite* containing_type, + io::CodedOutputStream* unknown_fields) { + CodedOutputStreamFieldSkipper skipper(unknown_fields); + GeneratedExtensionFinder finder(containing_type); + return ParseField(tag, input, &finder, &skipper); +} + +// Defined in extension_set_heavy.cc. +// bool ExtensionSet::ParseField(uint32 tag, io::CodedInputStream* input, +// const MessageLite* containing_type, +// UnknownFieldSet* unknown_fields) + +// Defined in extension_set_heavy.cc. +// bool ExtensionSet::ParseMessageSet(io::CodedInputStream* input, +// const MessageLite* containing_type, +// UnknownFieldSet* unknown_fields); + +void ExtensionSet::SerializeWithCachedSizes( + int start_field_number, int end_field_number, + io::CodedOutputStream* output) const { + if (GOOGLE_PREDICT_FALSE(is_large())) { + const auto& end = map_.large->end(); + for (auto it = map_.large->lower_bound(start_field_number); + it != end && it->first < end_field_number; ++it) { + it->second.SerializeFieldWithCachedSizes(it->first, output); + } + return; + } + const KeyValue* end = flat_end(); + for (const KeyValue* it = std::lower_bound( + flat_begin(), end, start_field_number, KeyValue::FirstComparator()); + it != end && it->first < end_field_number; ++it) { + it->second.SerializeFieldWithCachedSizes(it->first, output); + } +} + +size_t ExtensionSet::ByteSize() const { + size_t total_size = 0; + ForEach([&total_size](int number, const Extension& ext) { + total_size += ext.ByteSize(number); + }); + return total_size; +} + +// Defined in extension_set_heavy.cc. +// int ExtensionSet::SpaceUsedExcludingSelf() const + +bool ExtensionSet::MaybeNewExtension(int number, + const FieldDescriptor* descriptor, + Extension** result) { + bool extension_is_new = false; + std::tie(*result, extension_is_new) = Insert(number); + (*result)->descriptor = descriptor; + return extension_is_new; +} + +// =================================================================== +// Methods of ExtensionSet::Extension + +void ExtensionSet::Extension::Clear() { + if (is_repeated) { + switch (cpp_type(type)) { +#define HANDLE_TYPE(UPPERCASE, LOWERCASE) \ + case WireFormatLite::CPPTYPE_##UPPERCASE: \ + repeated_##LOWERCASE##_value->Clear(); \ + break + + HANDLE_TYPE( INT32, int32); + HANDLE_TYPE( INT64, int64); + HANDLE_TYPE( UINT32, uint32); + HANDLE_TYPE( UINT64, uint64); + HANDLE_TYPE( FLOAT, float); + HANDLE_TYPE( DOUBLE, double); + HANDLE_TYPE( BOOL, bool); + HANDLE_TYPE( ENUM, enum); + HANDLE_TYPE( STRING, string); + HANDLE_TYPE(MESSAGE, message); +#undef HANDLE_TYPE + } + } else { + if (!is_cleared) { + switch (cpp_type(type)) { + case WireFormatLite::CPPTYPE_STRING: + string_value->clear(); + break; + case WireFormatLite::CPPTYPE_MESSAGE: + if (is_lazy) { + lazymessage_value->Clear(); + } else { + message_value->Clear(); + } + break; + default: + // No need to do anything. Get*() will return the default value + // as long as is_cleared is true and Set*() will overwrite the + // previous value. + break; + } + + is_cleared = true; + } + } +} + +void ExtensionSet::Extension::SerializeFieldWithCachedSizes( + int number, + io::CodedOutputStream* output) const { + if (is_repeated) { + if (is_packed) { + if (cached_size == 0) return; + + WireFormatLite::WriteTag(number, + WireFormatLite::WIRETYPE_LENGTH_DELIMITED, output); + output->WriteVarint32(cached_size); + + switch (real_type(type)) { +#define HANDLE_TYPE(UPPERCASE, CAMELCASE, LOWERCASE) \ + case WireFormatLite::TYPE_##UPPERCASE: \ + for (int i = 0; i < repeated_##LOWERCASE##_value->size(); i++) { \ + WireFormatLite::Write##CAMELCASE##NoTag( \ + repeated_##LOWERCASE##_value->Get(i), output); \ + } \ + break + + HANDLE_TYPE( INT32, Int32, int32); + HANDLE_TYPE( INT64, Int64, int64); + HANDLE_TYPE( UINT32, UInt32, uint32); + HANDLE_TYPE( UINT64, UInt64, uint64); + HANDLE_TYPE( SINT32, SInt32, int32); + HANDLE_TYPE( SINT64, SInt64, int64); + HANDLE_TYPE( FIXED32, Fixed32, uint32); + HANDLE_TYPE( FIXED64, Fixed64, uint64); + HANDLE_TYPE(SFIXED32, SFixed32, int32); + HANDLE_TYPE(SFIXED64, SFixed64, int64); + HANDLE_TYPE( FLOAT, Float, float); + HANDLE_TYPE( DOUBLE, Double, double); + HANDLE_TYPE( BOOL, Bool, bool); + HANDLE_TYPE( ENUM, Enum, enum); +#undef HANDLE_TYPE + + case WireFormatLite::TYPE_STRING: + case WireFormatLite::TYPE_BYTES: + case WireFormatLite::TYPE_GROUP: + case WireFormatLite::TYPE_MESSAGE: + GOOGLE_LOG(FATAL) << "Non-primitive types can't be packed."; + break; + } + } else { + switch (real_type(type)) { +#define HANDLE_TYPE(UPPERCASE, CAMELCASE, LOWERCASE) \ + case WireFormatLite::TYPE_##UPPERCASE: \ + for (int i = 0; i < repeated_##LOWERCASE##_value->size(); i++) { \ + WireFormatLite::Write##CAMELCASE(number, \ + repeated_##LOWERCASE##_value->Get(i), output); \ + } \ + break + + HANDLE_TYPE( INT32, Int32, int32); + HANDLE_TYPE( INT64, Int64, int64); + HANDLE_TYPE( UINT32, UInt32, uint32); + HANDLE_TYPE( UINT64, UInt64, uint64); + HANDLE_TYPE( SINT32, SInt32, int32); + HANDLE_TYPE( SINT64, SInt64, int64); + HANDLE_TYPE( FIXED32, Fixed32, uint32); + HANDLE_TYPE( FIXED64, Fixed64, uint64); + HANDLE_TYPE(SFIXED32, SFixed32, int32); + HANDLE_TYPE(SFIXED64, SFixed64, int64); + HANDLE_TYPE( FLOAT, Float, float); + HANDLE_TYPE( DOUBLE, Double, double); + HANDLE_TYPE( BOOL, Bool, bool); + HANDLE_TYPE( STRING, String, string); + HANDLE_TYPE( BYTES, Bytes, string); + HANDLE_TYPE( ENUM, Enum, enum); + HANDLE_TYPE( GROUP, Group, message); + HANDLE_TYPE( MESSAGE, Message, message); +#undef HANDLE_TYPE + } + } + } else if (!is_cleared) { + switch (real_type(type)) { +#define HANDLE_TYPE(UPPERCASE, CAMELCASE, VALUE) \ + case WireFormatLite::TYPE_##UPPERCASE: \ + WireFormatLite::Write##CAMELCASE(number, VALUE, output); \ + break + + HANDLE_TYPE( INT32, Int32, int32_value); + HANDLE_TYPE( INT64, Int64, int64_value); + HANDLE_TYPE( UINT32, UInt32, uint32_value); + HANDLE_TYPE( UINT64, UInt64, uint64_value); + HANDLE_TYPE( SINT32, SInt32, int32_value); + HANDLE_TYPE( SINT64, SInt64, int64_value); + HANDLE_TYPE( FIXED32, Fixed32, uint32_value); + HANDLE_TYPE( FIXED64, Fixed64, uint64_value); + HANDLE_TYPE(SFIXED32, SFixed32, int32_value); + HANDLE_TYPE(SFIXED64, SFixed64, int64_value); + HANDLE_TYPE( FLOAT, Float, float_value); + HANDLE_TYPE( DOUBLE, Double, double_value); + HANDLE_TYPE( BOOL, Bool, bool_value); + HANDLE_TYPE( STRING, String, *string_value); + HANDLE_TYPE( BYTES, Bytes, *string_value); + HANDLE_TYPE( ENUM, Enum, enum_value); + HANDLE_TYPE( GROUP, Group, *message_value); +#undef HANDLE_TYPE + case WireFormatLite::TYPE_MESSAGE: + if (is_lazy) { + lazymessage_value->WriteMessage(number, output); + } else { + WireFormatLite::WriteMessage(number, *message_value, output); + } + break; + } + } +} + +size_t ExtensionSet::Extension::ByteSize(int number) const { + size_t result = 0; + + if (is_repeated) { + if (is_packed) { + switch (real_type(type)) { +#define HANDLE_TYPE(UPPERCASE, CAMELCASE, LOWERCASE) \ + case WireFormatLite::TYPE_##UPPERCASE: \ + for (int i = 0; i < repeated_##LOWERCASE##_value->size(); i++) { \ + result += WireFormatLite::CAMELCASE##Size( \ + repeated_##LOWERCASE##_value->Get(i)); \ + } \ + break + + HANDLE_TYPE( INT32, Int32, int32); + HANDLE_TYPE( INT64, Int64, int64); + HANDLE_TYPE( UINT32, UInt32, uint32); + HANDLE_TYPE( UINT64, UInt64, uint64); + HANDLE_TYPE( SINT32, SInt32, int32); + HANDLE_TYPE( SINT64, SInt64, int64); + HANDLE_TYPE( ENUM, Enum, enum); +#undef HANDLE_TYPE + + // Stuff with fixed size. +#define HANDLE_TYPE(UPPERCASE, CAMELCASE, LOWERCASE) \ + case WireFormatLite::TYPE_##UPPERCASE: \ + result += WireFormatLite::k##CAMELCASE##Size * \ + FromIntSize(repeated_##LOWERCASE##_value->size()); \ + break + HANDLE_TYPE( FIXED32, Fixed32, uint32); + HANDLE_TYPE( FIXED64, Fixed64, uint64); + HANDLE_TYPE(SFIXED32, SFixed32, int32); + HANDLE_TYPE(SFIXED64, SFixed64, int64); + HANDLE_TYPE( FLOAT, Float, float); + HANDLE_TYPE( DOUBLE, Double, double); + HANDLE_TYPE( BOOL, Bool, bool); +#undef HANDLE_TYPE + + case WireFormatLite::TYPE_STRING: + case WireFormatLite::TYPE_BYTES: + case WireFormatLite::TYPE_GROUP: + case WireFormatLite::TYPE_MESSAGE: + GOOGLE_LOG(FATAL) << "Non-primitive types can't be packed."; + break; + } + + cached_size = ToCachedSize(result); + if (result > 0) { + result += io::CodedOutputStream::VarintSize32(result); + result += io::CodedOutputStream::VarintSize32( + WireFormatLite::MakeTag(number, + WireFormatLite::WIRETYPE_LENGTH_DELIMITED)); + } + } else { + size_t tag_size = WireFormatLite::TagSize(number, real_type(type)); + + switch (real_type(type)) { +#define HANDLE_TYPE(UPPERCASE, CAMELCASE, LOWERCASE) \ + case WireFormatLite::TYPE_##UPPERCASE: \ + result += tag_size * \ + FromIntSize(repeated_##LOWERCASE##_value->size()); \ + for (int i = 0; i < repeated_##LOWERCASE##_value->size(); i++) { \ + result += WireFormatLite::CAMELCASE##Size( \ + repeated_##LOWERCASE##_value->Get(i)); \ + } \ + break + + HANDLE_TYPE( INT32, Int32, int32); + HANDLE_TYPE( INT64, Int64, int64); + HANDLE_TYPE( UINT32, UInt32, uint32); + HANDLE_TYPE( UINT64, UInt64, uint64); + HANDLE_TYPE( SINT32, SInt32, int32); + HANDLE_TYPE( SINT64, SInt64, int64); + HANDLE_TYPE( STRING, String, string); + HANDLE_TYPE( BYTES, Bytes, string); + HANDLE_TYPE( ENUM, Enum, enum); + HANDLE_TYPE( GROUP, Group, message); + HANDLE_TYPE( MESSAGE, Message, message); +#undef HANDLE_TYPE + + // Stuff with fixed size. +#define HANDLE_TYPE(UPPERCASE, CAMELCASE, LOWERCASE) \ + case WireFormatLite::TYPE_##UPPERCASE: \ + result += (tag_size + WireFormatLite::k##CAMELCASE##Size) * \ + FromIntSize(repeated_##LOWERCASE##_value->size()); \ + break + HANDLE_TYPE( FIXED32, Fixed32, uint32); + HANDLE_TYPE( FIXED64, Fixed64, uint64); + HANDLE_TYPE(SFIXED32, SFixed32, int32); + HANDLE_TYPE(SFIXED64, SFixed64, int64); + HANDLE_TYPE( FLOAT, Float, float); + HANDLE_TYPE( DOUBLE, Double, double); + HANDLE_TYPE( BOOL, Bool, bool); +#undef HANDLE_TYPE + } + } + } else if (!is_cleared) { + result += WireFormatLite::TagSize(number, real_type(type)); + switch (real_type(type)) { +#define HANDLE_TYPE(UPPERCASE, CAMELCASE, LOWERCASE) \ + case WireFormatLite::TYPE_##UPPERCASE: \ + result += WireFormatLite::CAMELCASE##Size(LOWERCASE); \ + break + + HANDLE_TYPE( INT32, Int32, int32_value); + HANDLE_TYPE( INT64, Int64, int64_value); + HANDLE_TYPE( UINT32, UInt32, uint32_value); + HANDLE_TYPE( UINT64, UInt64, uint64_value); + HANDLE_TYPE( SINT32, SInt32, int32_value); + HANDLE_TYPE( SINT64, SInt64, int64_value); + HANDLE_TYPE( STRING, String, *string_value); + HANDLE_TYPE( BYTES, Bytes, *string_value); + HANDLE_TYPE( ENUM, Enum, enum_value); + HANDLE_TYPE( GROUP, Group, *message_value); +#undef HANDLE_TYPE + case WireFormatLite::TYPE_MESSAGE: { + if (is_lazy) { + size_t size = lazymessage_value->ByteSize(); + result += io::CodedOutputStream::VarintSize32(size) + size; + } else { + result += WireFormatLite::MessageSize(*message_value); + } + break; + } + + // Stuff with fixed size. +#define HANDLE_TYPE(UPPERCASE, CAMELCASE) \ + case WireFormatLite::TYPE_##UPPERCASE: \ + result += WireFormatLite::k##CAMELCASE##Size; \ + break + HANDLE_TYPE( FIXED32, Fixed32); + HANDLE_TYPE( FIXED64, Fixed64); + HANDLE_TYPE(SFIXED32, SFixed32); + HANDLE_TYPE(SFIXED64, SFixed64); + HANDLE_TYPE( FLOAT, Float); + HANDLE_TYPE( DOUBLE, Double); + HANDLE_TYPE( BOOL, Bool); +#undef HANDLE_TYPE + } + } + + return result; +} + +int ExtensionSet::Extension::GetSize() const { + GOOGLE_DCHECK(is_repeated); + switch (cpp_type(type)) { +#define HANDLE_TYPE(UPPERCASE, LOWERCASE) \ + case WireFormatLite::CPPTYPE_##UPPERCASE: \ + return repeated_##LOWERCASE##_value->size() + + HANDLE_TYPE( INT32, int32); + HANDLE_TYPE( INT64, int64); + HANDLE_TYPE( UINT32, uint32); + HANDLE_TYPE( UINT64, uint64); + HANDLE_TYPE( FLOAT, float); + HANDLE_TYPE( DOUBLE, double); + HANDLE_TYPE( BOOL, bool); + HANDLE_TYPE( ENUM, enum); + HANDLE_TYPE( STRING, string); + HANDLE_TYPE(MESSAGE, message); +#undef HANDLE_TYPE + } + + GOOGLE_LOG(FATAL) << "Can't get here."; + return 0; +} + +// This function deletes all allocated objects. This function should be only +// called if the Extension was created with an arena. +void ExtensionSet::Extension::Free() { + if (is_repeated) { + switch (cpp_type(type)) { +#define HANDLE_TYPE(UPPERCASE, LOWERCASE) \ + case WireFormatLite::CPPTYPE_##UPPERCASE: \ + delete repeated_##LOWERCASE##_value; \ + break + + HANDLE_TYPE( INT32, int32); + HANDLE_TYPE( INT64, int64); + HANDLE_TYPE( UINT32, uint32); + HANDLE_TYPE( UINT64, uint64); + HANDLE_TYPE( FLOAT, float); + HANDLE_TYPE( DOUBLE, double); + HANDLE_TYPE( BOOL, bool); + HANDLE_TYPE( ENUM, enum); + HANDLE_TYPE( STRING, string); + HANDLE_TYPE(MESSAGE, message); +#undef HANDLE_TYPE + } + } else { + switch (cpp_type(type)) { + case WireFormatLite::CPPTYPE_STRING: + delete string_value; + break; + case WireFormatLite::CPPTYPE_MESSAGE: + if (is_lazy) { + delete lazymessage_value; + } else { + delete message_value; + } + break; + default: + break; + } + } +} + +// Defined in extension_set_heavy.cc. +// int ExtensionSet::Extension::SpaceUsedExcludingSelf() const + +bool ExtensionSet::Extension::IsInitialized() const { + if (cpp_type(type) == WireFormatLite::CPPTYPE_MESSAGE) { + if (is_repeated) { + for (int i = 0; i < repeated_message_value->size(); i++) { + if (!repeated_message_value->Get(i).IsInitialized()) { + return false; + } + } + } else { + if (!is_cleared) { + if (is_lazy) { + if (!lazymessage_value->IsInitialized()) return false; + } else { + if (!message_value->IsInitialized()) return false; + } + } + } + } + return true; +} + +// Dummy key method to avoid weak vtable. +void ExtensionSet::LazyMessageExtension::UnusedKeyMethod() {} + +const ExtensionSet::Extension* ExtensionSet::FindOrNull(int key) const { + if (GOOGLE_PREDICT_FALSE(is_large())) { + return FindOrNullInLargeMap(key); + } + const KeyValue* end = flat_end(); + const KeyValue* it = + std::lower_bound(flat_begin(), end, key, KeyValue::FirstComparator()); + if (it != end && it->first == key) { + return &it->second; + } + return NULL; +} + +const ExtensionSet::Extension* ExtensionSet::FindOrNullInLargeMap( + int key) const { + assert(is_large()); + LargeMap::const_iterator it = map_.large->find(key); + if (it != map_.large->end()) { + return &it->second; + } + return NULL; +} + +ExtensionSet::Extension* ExtensionSet::FindOrNull(int key) { + if (GOOGLE_PREDICT_FALSE(is_large())) { + return FindOrNullInLargeMap(key); + } + KeyValue* end = flat_end(); + KeyValue* it = + std::lower_bound(flat_begin(), end, key, KeyValue::FirstComparator()); + if (it != end && it->first == key) { + return &it->second; + } + return NULL; +} + +ExtensionSet::Extension* ExtensionSet::FindOrNullInLargeMap(int key) { + assert(is_large()); + LargeMap::iterator it = map_.large->find(key); + if (it != map_.large->end()) { + return &it->second; + } + return NULL; +} + +std::pair ExtensionSet::Insert(int key) { + if (GOOGLE_PREDICT_FALSE(is_large())) { + auto maybe = map_.large->insert({key, Extension()}); + return {&maybe.first->second, maybe.second}; + } + KeyValue* end = flat_end(); + KeyValue* it = + std::lower_bound(flat_begin(), end, key, KeyValue::FirstComparator()); + if (it != end && it->first == key) { + return {&it->second, false}; + } + if (flat_size_ < flat_capacity_) { + std::copy_backward(it, end, end + 1); + ++flat_size_; + it->first = key; + it->second = Extension(); + return {&it->second, true}; + } + GrowCapacity(flat_size_ + 1); + return Insert(key); +} + +void ExtensionSet::GrowCapacity(size_t minimum_new_capacity) { + if (GOOGLE_PREDICT_FALSE(is_large())) { + return; // LargeMap does not have a "reserve" method. + } + if (flat_capacity_ >= minimum_new_capacity) { + return; + } + + do { + flat_capacity_ = flat_capacity_ == 0 ? 1 : flat_capacity_ * 4; + } while (flat_capacity_ < minimum_new_capacity); + + const KeyValue* begin = flat_begin(); + const KeyValue* end = flat_end(); + if (flat_capacity_ > kMaximumFlatCapacity) { + // Switch to LargeMap + map_.large = ::google::protobuf::Arena::Create(arena_); + LargeMap::iterator hint = map_.large->begin(); + for (const KeyValue* it = begin; it != end; ++it) { + hint = map_.large->insert(hint, {it->first, it->second}); + } + flat_size_ = 0; + } else { + map_.flat = ::google::protobuf::Arena::CreateArray(arena_, flat_capacity_); + std::copy(begin, end, map_.flat); + } + if (arena_ == NULL) delete[] begin; +} + +// static +constexpr uint16 ExtensionSet::kMaximumFlatCapacity; + +void ExtensionSet::Erase(int key) { + if (GOOGLE_PREDICT_FALSE(is_large())) { + map_.large->erase(key); + return; + } + KeyValue* end = flat_end(); + KeyValue* it = + std::lower_bound(flat_begin(), end, key, KeyValue::FirstComparator()); + if (it != end && it->first == key) { + std::copy(it + 1, end, it); + --flat_size_; + } +} + +// ================================================================== +// Default repeated field instances for iterator-compatible accessors + +const RepeatedPrimitiveDefaults* RepeatedPrimitiveDefaults::default_instance() { + static auto instance = OnShutdownDelete(new RepeatedPrimitiveDefaults); + return instance; +} + +const RepeatedStringTypeTraits::RepeatedFieldType* +RepeatedStringTypeTraits::GetDefaultRepeatedField() { + static auto instance = OnShutdownDelete(new RepeatedFieldType); + return instance; +} + +} // namespace internal +} // namespace protobuf +} // namespace google diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/generated_message_util.cc b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/generated_message_util.cc new file mode 100644 index 0000000000000000000000000000000000000000..e02413611dc49c502f558455d5ac3c8e82f2d4a5 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/generated_message_util.cc @@ -0,0 +1,814 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. + +#include + +#include +// We're only using this as a standard way for getting the thread id. +// We're not using any thread functionality. +#include // NOLINT +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace google { + +namespace protobuf { +namespace internal { + +void DestroyMessage(const void* message) { + static_cast(message)->~MessageLite(); +} +void DestroyString(const void* s) { static_cast(s)->~string(); } + +ExplicitlyConstructed fixed_address_empty_string; + +double Infinity() { + return std::numeric_limits::infinity(); +} +double NaN() { + return std::numeric_limits::quiet_NaN(); +} + +static bool InitProtobufDefaultsImpl() { + fixed_address_empty_string.DefaultConstruct(); + OnShutdownDestroyString(fixed_address_empty_string.get_mutable()); + return true; +} + +void InitProtobufDefaults() { + static bool is_inited = InitProtobufDefaultsImpl(); + (void)is_inited; +} + +size_t StringSpaceUsedExcludingSelfLong(const string& str) { + const void* start = &str; + const void* end = &str + 1; + if (start <= str.data() && str.data() < end) { + // The string's data is stored inside the string object itself. + return 0; + } else { + return str.capacity(); + } +} + +template +const T& Get(const void* ptr) { + return *static_cast(ptr); +} + +// PrimitiveTypeHelper is a wrapper around the interface of WireFormatLite. +// WireFormatLite has a very inconvenient interface with respect to template +// meta-programming. This class wraps the different named functions into +// a single Serialize / SerializeToArray interface. +template +struct PrimitiveTypeHelper; + +template <> +struct PrimitiveTypeHelper { + typedef bool Type; + static void Serialize(const void* ptr, + ::google::protobuf::io::CodedOutputStream* output) { + WireFormatLite::WriteBoolNoTag(Get(ptr), output); + } + static uint8* SerializeToArray(const void* ptr, uint8* buffer) { + return WireFormatLite::WriteBoolNoTagToArray(Get(ptr), buffer); + } +}; + +template <> +struct PrimitiveTypeHelper { + typedef int32 Type; + static void Serialize(const void* ptr, + ::google::protobuf::io::CodedOutputStream* output) { + WireFormatLite::WriteInt32NoTag(Get(ptr), output); + } + static uint8* SerializeToArray(const void* ptr, uint8* buffer) { + return WireFormatLite::WriteInt32NoTagToArray(Get(ptr), buffer); + } +}; + +template <> +struct PrimitiveTypeHelper { + typedef int32 Type; + static void Serialize(const void* ptr, + ::google::protobuf::io::CodedOutputStream* output) { + WireFormatLite::WriteSInt32NoTag(Get(ptr), output); + } + static uint8* SerializeToArray(const void* ptr, uint8* buffer) { + return WireFormatLite::WriteSInt32NoTagToArray(Get(ptr), buffer); + } +}; + +template <> +struct PrimitiveTypeHelper { + typedef uint32 Type; + static void Serialize(const void* ptr, + ::google::protobuf::io::CodedOutputStream* output) { + WireFormatLite::WriteUInt32NoTag(Get(ptr), output); + } + static uint8* SerializeToArray(const void* ptr, uint8* buffer) { + return WireFormatLite::WriteUInt32NoTagToArray(Get(ptr), buffer); + } +}; +template <> +struct PrimitiveTypeHelper { + typedef int64 Type; + static void Serialize(const void* ptr, + ::google::protobuf::io::CodedOutputStream* output) { + WireFormatLite::WriteInt64NoTag(Get(ptr), output); + } + static uint8* SerializeToArray(const void* ptr, uint8* buffer) { + return WireFormatLite::WriteInt64NoTagToArray(Get(ptr), buffer); + } +}; + +template <> +struct PrimitiveTypeHelper { + typedef int64 Type; + static void Serialize(const void* ptr, + ::google::protobuf::io::CodedOutputStream* output) { + WireFormatLite::WriteSInt64NoTag(Get(ptr), output); + } + static uint8* SerializeToArray(const void* ptr, uint8* buffer) { + return WireFormatLite::WriteSInt64NoTagToArray(Get(ptr), buffer); + } +}; +template <> +struct PrimitiveTypeHelper { + typedef uint64 Type; + static void Serialize(const void* ptr, + ::google::protobuf::io::CodedOutputStream* output) { + WireFormatLite::WriteUInt64NoTag(Get(ptr), output); + } + static uint8* SerializeToArray(const void* ptr, uint8* buffer) { + return WireFormatLite::WriteUInt64NoTagToArray(Get(ptr), buffer); + } +}; + +template <> +struct PrimitiveTypeHelper { + typedef uint32 Type; + static void Serialize(const void* ptr, + ::google::protobuf::io::CodedOutputStream* output) { + WireFormatLite::WriteFixed32NoTag(Get(ptr), output); + } + static uint8* SerializeToArray(const void* ptr, uint8* buffer) { + return WireFormatLite::WriteFixed32NoTagToArray(Get(ptr), buffer); + } +}; + +template <> +struct PrimitiveTypeHelper { + typedef uint64 Type; + static void Serialize(const void* ptr, + ::google::protobuf::io::CodedOutputStream* output) { + WireFormatLite::WriteFixed64NoTag(Get(ptr), output); + } + static uint8* SerializeToArray(const void* ptr, uint8* buffer) { + return WireFormatLite::WriteFixed64NoTagToArray(Get(ptr), buffer); + } +}; + +template <> +struct PrimitiveTypeHelper + : PrimitiveTypeHelper {}; + +template <> +struct PrimitiveTypeHelper + : PrimitiveTypeHelper { + typedef int32 Type; +}; +template <> +struct PrimitiveTypeHelper + : PrimitiveTypeHelper { + typedef int64 Type; +}; +template <> +struct PrimitiveTypeHelper + : PrimitiveTypeHelper { + typedef float Type; +}; +template <> +struct PrimitiveTypeHelper + : PrimitiveTypeHelper { + typedef double Type; +}; + +template <> +struct PrimitiveTypeHelper { + typedef string Type; + static void Serialize(const void* ptr, + ::google::protobuf::io::CodedOutputStream* output) { + const Type& value = *static_cast(ptr); + output->WriteVarint32(value.size()); + output->WriteRawMaybeAliased(value.data(), value.size()); + } + static uint8* SerializeToArray(const void* ptr, uint8* buffer) { + const Type& value = *static_cast(ptr); + return io::CodedOutputStream::WriteStringWithSizeToArray(value, buffer); + } +}; + +template <> +struct PrimitiveTypeHelper + : PrimitiveTypeHelper {}; + + +template <> +struct PrimitiveTypeHelper + : PrimitiveTypeHelper {}; + +// We want to serialize to both CodedOutputStream and directly into byte arrays +// without duplicating the code. In fact we might want extra output channels in +// the future. +template +struct OutputHelper; + +template +void SerializeTo(const void* ptr, O* output) { + OutputHelper::Serialize(ptr, output); +} + +template +void WriteTagTo(uint32 tag, O* output) { + SerializeTo(&tag, output); +} + +template +void WriteLengthTo(uint32 length, O* output) { + SerializeTo(&length, output); +} + +// Specialization for coded output stream +template +struct OutputHelper<::google::protobuf::io::CodedOutputStream, type> { + static void Serialize(const void* ptr, + ::google::protobuf::io::CodedOutputStream* output) { + PrimitiveTypeHelper::Serialize(ptr, output); + } +}; + +// Specialization for writing into a plain array +struct ArrayOutput { + uint8* ptr; + bool is_deterministic; +}; + +template +struct OutputHelper { + static void Serialize(const void* ptr, ArrayOutput* output) { + output->ptr = PrimitiveTypeHelper::SerializeToArray(ptr, output->ptr); + } +}; + +void SerializeMessageNoTable(const MessageLite* msg, + ::google::protobuf::io::CodedOutputStream* output) { + msg->SerializeWithCachedSizes(output); +} + +void SerializeMessageNoTable(const MessageLite* msg, ArrayOutput* output) { + output->ptr = msg->InternalSerializeWithCachedSizesToArray( + output->is_deterministic, output->ptr); +} + +// Helper to branch to fast path if possible +void SerializeMessageDispatch(const ::google::protobuf::MessageLite& msg, + const FieldMetadata* field_table, int num_fields, + int32 cached_size, + ::google::protobuf::io::CodedOutputStream* output) { + const uint8* base = reinterpret_cast(&msg); + // Try the fast path + uint8* ptr = output->GetDirectBufferForNBytesAndAdvance(cached_size); + if (ptr) { + // We use virtual dispatch to enable dedicated generated code for the + // fast path. + msg.InternalSerializeWithCachedSizesToArray( + output->IsSerializationDeterministic(), ptr); + return; + } + SerializeInternal(base, field_table, num_fields, output); +} + +// Helper to branch to fast path if possible +void SerializeMessageDispatch(const ::google::protobuf::MessageLite& msg, + const FieldMetadata* field_table, int num_fields, + int32 cached_size, ArrayOutput* output) { + const uint8* base = reinterpret_cast(&msg); + output->ptr = SerializeInternalToArray(base, field_table, num_fields, + output->is_deterministic, output->ptr); +} + +// Serializing messages is special as it's not a primitive type and needs an +// explicit overload for each output type. +template +void SerializeMessageTo(const MessageLite* msg, const void* table_ptr, + O* output) { + const SerializationTable* table = + static_cast(table_ptr); + if (!table) { + // Proto1 + WriteLengthTo(msg->GetCachedSize(), output); + SerializeMessageNoTable(msg, output); + return; + } + const FieldMetadata* field_table = table->field_table; + const uint8* base = reinterpret_cast(msg); + int cached_size = *reinterpret_cast(base + field_table->offset); + WriteLengthTo(cached_size, output); + int num_fields = table->num_fields - 1; + SerializeMessageDispatch(*msg, field_table + 1, num_fields, cached_size, + output); +} + +// Almost the same as above only it doesn't output the length field. +template +void SerializeGroupTo(const MessageLite* msg, const void* table_ptr, + O* output) { + const SerializationTable* table = + static_cast(table_ptr); + if (!table) { + // Proto1 + SerializeMessageNoTable(msg, output); + return; + } + const FieldMetadata* field_table = table->field_table; + const uint8* base = reinterpret_cast(msg); + int cached_size = *reinterpret_cast(base + field_table->offset); + int num_fields = table->num_fields - 1; + SerializeMessageDispatch(*msg, field_table + 1, num_fields, cached_size, + output); +} + +template +struct SingularFieldHelper { + template + static void Serialize(const void* field, const FieldMetadata& md, O* output) { + WriteTagTo(md.tag, output); + SerializeTo(field, output); + } +}; + +template <> +struct SingularFieldHelper { + template + static void Serialize(const void* field, const FieldMetadata& md, O* output) { + WriteTagTo(md.tag, output); + SerializeTo(&Get(field).Get(), + output); + } +}; + +template <> +struct SingularFieldHelper + : SingularFieldHelper {}; + +template <> +struct SingularFieldHelper { + template + static void Serialize(const void* field, const FieldMetadata& md, O* output) { + WriteTagTo(md.tag, output); + SerializeGroupTo(Get(field), + static_cast(md.ptr), output); + WriteTagTo(md.tag + 1, output); + } +}; + +template <> +struct SingularFieldHelper { + template + static void Serialize(const void* field, const FieldMetadata& md, O* output) { + WriteTagTo(md.tag, output); + SerializeMessageTo(Get(field), + static_cast(md.ptr), output); + } +}; + +template <> +struct SingularFieldHelper { + template + static void Serialize(const void* field, const FieldMetadata& md, O* output) { + WriteTagTo(md.tag, output); + SerializeTo(&Get<::std::string>(field), output); + } +}; + +template +struct RepeatedFieldHelper { + template + static void Serialize(const void* field, const FieldMetadata& md, O* output) { + typedef typename PrimitiveTypeHelper::Type T; + const RepeatedField& array = Get >(field); + for (int i = 0; i < array.size(); i++) { + WriteTagTo(md.tag, output); + SerializeTo(&array[i], output); + } + } +}; + +// We need to use a helper class to get access to the private members +class AccessorHelper { + public: + static int Size(const RepeatedPtrFieldBase& x) { return x.size(); } + static void const* Get(const RepeatedPtrFieldBase& x, int idx) { + return x.raw_data()[idx]; + } +}; + +template <> +struct RepeatedFieldHelper { + template + static void Serialize(const void* field, const FieldMetadata& md, O* output) { + const internal::RepeatedPtrFieldBase& array = + Get(field); + for (int i = 0; i < AccessorHelper::Size(array); i++) { + WriteTagTo(md.tag, output); + SerializeTo(AccessorHelper::Get(array, i), + output); + } + } +}; + +template <> +struct RepeatedFieldHelper + : RepeatedFieldHelper {}; + +template <> +struct RepeatedFieldHelper { + template + static void Serialize(const void* field, const FieldMetadata& md, O* output) { + const internal::RepeatedPtrFieldBase& array = + Get(field); + for (int i = 0; i < AccessorHelper::Size(array); i++) { + WriteTagTo(md.tag, output); + SerializeGroupTo( + static_cast(AccessorHelper::Get(array, i)), + static_cast(md.ptr), output); + WriteTagTo(md.tag + 1, output); + } + } +}; + +template <> +struct RepeatedFieldHelper { + template + static void Serialize(const void* field, const FieldMetadata& md, O* output) { + const internal::RepeatedPtrFieldBase& array = + Get(field); + for (int i = 0; i < AccessorHelper::Size(array); i++) { + WriteTagTo(md.tag, output); + SerializeMessageTo( + static_cast(AccessorHelper::Get(array, i)), md.ptr, + output); + } + } +}; + + +template <> +struct RepeatedFieldHelper + : RepeatedFieldHelper {}; + +template +struct PackedFieldHelper { + template + static void Serialize(const void* field, const FieldMetadata& md, O* output) { + typedef typename PrimitiveTypeHelper::Type T; + const RepeatedField& array = Get >(field); + if (array.empty()) return; + WriteTagTo(md.tag, output); + int cached_size = + Get(static_cast(field) + sizeof(RepeatedField)); + WriteLengthTo(cached_size, output); + for (int i = 0; i < array.size(); i++) { + SerializeTo(&array[i], output); + } + } +}; + +template <> +struct PackedFieldHelper { + template + static void Serialize(const void* field, const FieldMetadata& md, O* output) { + GOOGLE_LOG(FATAL) << "Not implemented field number " << md.tag << " with type " + << md.type; + } +}; + +template <> +struct PackedFieldHelper + : PackedFieldHelper {}; +template <> +struct PackedFieldHelper + : PackedFieldHelper {}; +template <> +struct PackedFieldHelper + : PackedFieldHelper {}; +template <> +struct PackedFieldHelper + : PackedFieldHelper {}; + +template +struct OneOfFieldHelper { + template + static void Serialize(const void* field, const FieldMetadata& md, O* output) { + SingularFieldHelper::Serialize(field, md, output); + } +}; + + +template <> +struct OneOfFieldHelper { + template + static void Serialize(const void* field, const FieldMetadata& md, O* output) { + SingularFieldHelper::Serialize( + Get(field), md, output); + } +}; + +void SerializeNotImplemented(int field) { + GOOGLE_LOG(FATAL) << "Not implemented field number " << field; +} + +// When switching to c++11 we should make these constexpr functions +#define SERIALIZE_TABLE_OP(type, type_class) \ + ((type - 1) + static_cast(type_class) * FieldMetadata::kNumTypes) + +int FieldMetadata::CalculateType(int type, + FieldMetadata::FieldTypeClass type_class) { + return SERIALIZE_TABLE_OP(type, type_class); +} + +template +bool IsNull(const void* ptr) { + return *static_cast::Type*>(ptr) == + 0; +} + +template <> +bool IsNull(const void* ptr) { + return static_cast(ptr)->Get().size() == 0; +} + +template <> +bool IsNull(const void* ptr) { + return static_cast(ptr)->Get().size() == 0; +} + +template <> +bool IsNull(const void* ptr) { + return Get(ptr) == NULL; +} + +template <> +bool IsNull(const void* ptr) { + return Get(ptr) == NULL; +} + + +template <> +bool IsNull(const void* ptr) { + return static_cast(ptr)->empty(); +} + +#define SERIALIZERS_FOR_TYPE(type) \ + case SERIALIZE_TABLE_OP(type, FieldMetadata::kPresence): \ + if (!IsPresent(base, field_metadata.has_offset)) continue; \ + SingularFieldHelper::Serialize(ptr, field_metadata, output); \ + break; \ + case SERIALIZE_TABLE_OP(type, FieldMetadata::kNoPresence): \ + if (IsNull(ptr)) continue; \ + SingularFieldHelper::Serialize(ptr, field_metadata, output); \ + break; \ + case SERIALIZE_TABLE_OP(type, FieldMetadata::kRepeated): \ + RepeatedFieldHelper::Serialize(ptr, field_metadata, output); \ + break; \ + case SERIALIZE_TABLE_OP(type, FieldMetadata::kPacked): \ + PackedFieldHelper::Serialize(ptr, field_metadata, output); \ + break; \ + case SERIALIZE_TABLE_OP(type, FieldMetadata::kOneOf): \ + if (!IsOneofPresent(base, field_metadata.has_offset, field_metadata.tag)) \ + continue; \ + OneOfFieldHelper::Serialize(ptr, field_metadata, output); \ + break + +void SerializeInternal(const uint8* base, + const FieldMetadata* field_metadata_table, + int32 num_fields, + ::google::protobuf::io::CodedOutputStream* output) { + for (int i = 0; i < num_fields; i++) { + const FieldMetadata& field_metadata = field_metadata_table[i]; + const uint8* ptr = base + field_metadata.offset; + switch (field_metadata.type) { + SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_DOUBLE); + SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_FLOAT); + SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_INT64); + SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_UINT64); + SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_INT32); + SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_FIXED64); + SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_FIXED32); + SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_BOOL); + SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_STRING); + SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_GROUP); + SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_MESSAGE); + SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_BYTES); + SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_UINT32); + SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_ENUM); + SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_SFIXED32); + SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_SFIXED64); + SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_SINT32); + SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_SINT64); + SERIALIZERS_FOR_TYPE(FieldMetadata::kInlinedType); + + // Special cases + case FieldMetadata::kSpecial: + reinterpret_cast( + const_cast(field_metadata.ptr))( + base, field_metadata.offset, field_metadata.tag, + field_metadata.has_offset, output); + break; + default: + // __builtin_unreachable() + SerializeNotImplemented(field_metadata.type); + } + } +} + +uint8* SerializeInternalToArray(const uint8* base, + const FieldMetadata* field_metadata_table, + int32 num_fields, bool is_deterministic, + uint8* buffer) { + ArrayOutput array_output = {buffer, is_deterministic}; + ArrayOutput* output = &array_output; + for (int i = 0; i < num_fields; i++) { + const FieldMetadata& field_metadata = field_metadata_table[i]; + const uint8* ptr = base + field_metadata.offset; + switch (field_metadata.type) { + SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_DOUBLE); + SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_FLOAT); + SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_INT64); + SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_UINT64); + SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_INT32); + SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_FIXED64); + SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_FIXED32); + SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_BOOL); + SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_STRING); + SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_GROUP); + SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_MESSAGE); + SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_BYTES); + SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_UINT32); + SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_ENUM); + SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_SFIXED32); + SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_SFIXED64); + SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_SINT32); + SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_SINT64); + SERIALIZERS_FOR_TYPE(FieldMetadata::kInlinedType); + // Special cases + case FieldMetadata::kSpecial: { + io::ArrayOutputStream array_stream(array_output.ptr, INT_MAX); + io::CodedOutputStream output(&array_stream); + output.SetSerializationDeterministic(is_deterministic); + reinterpret_cast( + const_cast(field_metadata.ptr))( + base, field_metadata.offset, field_metadata.tag, + field_metadata.has_offset, &output); + array_output.ptr += output.ByteCount(); + } break; + default: + // __builtin_unreachable() + SerializeNotImplemented(field_metadata.type); + } + } + return array_output.ptr; +} +#undef SERIALIZERS_FOR_TYPE + +void ExtensionSerializer(const uint8* ptr, uint32 offset, uint32 tag, + uint32 has_offset, + ::google::protobuf::io::CodedOutputStream* output) { + reinterpret_cast(ptr + offset) + ->SerializeWithCachedSizes(tag, has_offset, output); +} + +void UnknownFieldSerializerLite(const uint8* ptr, uint32 offset, uint32 tag, + uint32 has_offset, + ::google::protobuf::io::CodedOutputStream* output) { + output->WriteString( + reinterpret_cast(ptr + offset) + ->unknown_fields()); +} + +MessageLite* DuplicateIfNonNullInternal(MessageLite* message) { + if (message) { + MessageLite* ret = message->New(); + ret->CheckTypeAndMergeFrom(*message); + return ret; + } else { + return NULL; + } +} + +// Returns a message owned by this Arena. This may require Own()ing or +// duplicating the message. +MessageLite* GetOwnedMessageInternal(Arena* message_arena, + MessageLite* submessage, + Arena* submessage_arena) { + GOOGLE_DCHECK(submessage->GetArena() == submessage_arena); + GOOGLE_DCHECK(message_arena != submessage_arena); + if (message_arena != NULL && submessage_arena == NULL) { + message_arena->Own(submessage); + return submessage; + } else { + MessageLite* ret = submessage->New(message_arena); + ret->CheckTypeAndMergeFrom(*submessage); + return ret; + } +} + +namespace { + +void InitSCC_DFS(SCCInfoBase* scc) { + if (scc->visit_status.load(std::memory_order_relaxed) != + SCCInfoBase::kUninitialized) return; + scc->visit_status.store(SCCInfoBase::kRunning, std::memory_order_relaxed); + // Each base is followed by an array of pointers to deps + auto deps = reinterpret_cast(scc + 1); + for (int i = 0; i < scc->num_deps; i++) { + if (deps[i]) InitSCC_DFS(deps[i]); + } + scc->init_func(); + // Mark done (note we use memory order release here), other threads could + // now see this as initialized and thus the initialization must have happened + // before. + scc->visit_status.store(SCCInfoBase::kInitialized, std::memory_order_release); +} + +} // namespace + +void InitSCCImpl(SCCInfoBase* scc) { + static WrappedMutex mu{GOOGLE_PROTOBUF_LINKER_INITIALIZED}; + // Either the default in case no initialization is running or the id of the + // thread that is currently initializing. + static std::atomic runner; + auto me = std::this_thread::get_id(); + // This will only happen because the constructor will call InitSCC while + // constructing the default instance. + if (runner.load(std::memory_order_relaxed) == me) { + // Because we're in the process of constructing the default instance. + // We can be assured that we're already exploring this SCC. + GOOGLE_CHECK_EQ(scc->visit_status.load(std::memory_order_relaxed), + SCCInfoBase::kRunning); + return; + } + InitProtobufDefaults(); + mu.Lock(); + runner.store(me, std::memory_order_relaxed); + InitSCC_DFS(scc); + runner.store(std::thread::id{}, std::memory_order_relaxed); + mu.Unlock(); +} + +} // namespace internal +} // namespace protobuf +} // namespace google diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/arena.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/arena.h new file mode 100644 index 0000000000000000000000000000000000000000..9928c8e663ba27382046a04dfb470a64dee8fdac --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/arena.h @@ -0,0 +1,703 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// This file defines an Arena allocator for better allocation performance. + +#ifndef GOOGLE_PROTOBUF_ARENA_H__ +#define GOOGLE_PROTOBUF_ARENA_H__ + +#include +#ifdef max +#undef max // Visual Studio defines this macro +#endif +#if defined(_MSC_VER) && !defined(_LIBCPP_STD_VER) && !_HAS_EXCEPTIONS +// Work around bugs in MSVC header when _HAS_EXCEPTIONS=0. +#include +#include +namespace std { +using type_info = ::type_info; +} +#else +#include +#endif + +#include +#include +#include + +namespace google { +namespace protobuf { + +struct ArenaOptions; // defined below + +} // namespace protobuf + +namespace quality_webanswers { + +void TempPrivateWorkAround(::google::protobuf::ArenaOptions* arena_options); + +} // namespace quality_webanswers + +namespace protobuf { + +class Arena; // defined below +class Message; // defined in message.h +class MessageLite; + +namespace arena_metrics { + +void EnableArenaMetrics(::google::protobuf::ArenaOptions* options); + +} // namespace arena_metrics + +namespace internal { + +struct ArenaStringPtr; // defined in arenastring.h +class LazyField; // defined in lazy_field.h + +template +class GenericTypeHandler; // defined in repeated_field.h + +// Templated cleanup methods. +template +void arena_destruct_object(void* object) { + reinterpret_cast(object)->~T(); +} +template +void arena_delete_object(void* object) { + delete reinterpret_cast(object); +} +inline void arena_free(void* object, size_t size) { +#if defined(__GXX_DELETE_WITH_SIZE__) || defined(__cpp_sized_deallocation) + ::operator delete(object, size); +#else + (void)size; + ::operator delete(object); +#endif +} + +} // namespace internal + +// ArenaOptions provides optional additional parameters to arena construction +// that control its block-allocation behavior. +struct ArenaOptions { + // This defines the size of the first block requested from the system malloc. + // Subsequent block sizes will increase in a geometric series up to a maximum. + size_t start_block_size; + + // This defines the maximum block size requested from system malloc (unless an + // individual arena allocation request occurs with a size larger than this + // maximum). Requested block sizes increase up to this value, then remain + // here. + size_t max_block_size; + + // An initial block of memory for the arena to use, or NULL for none. If + // provided, the block must live at least as long as the arena itself. The + // creator of the Arena retains ownership of the block after the Arena is + // destroyed. + char* initial_block; + + // The size of the initial block, if provided. + size_t initial_block_size; + + // A function pointer to an alloc method that returns memory blocks of size + // requested. By default, it contains a ptr to the malloc function. + // + // NOTE: block_alloc and dealloc functions are expected to behave like + // malloc and free, including Asan poisoning. + void* (*block_alloc)(size_t); + // A function pointer to a dealloc method that takes ownership of the blocks + // from the arena. By default, it contains a ptr to a wrapper function that + // calls free. + void (*block_dealloc)(void*, size_t); + + ArenaOptions() + : start_block_size(kDefaultStartBlockSize), + max_block_size(kDefaultMaxBlockSize), + initial_block(NULL), + initial_block_size(0), + block_alloc(&::operator new), + block_dealloc(&internal::arena_free), + on_arena_init(NULL), + on_arena_reset(NULL), + on_arena_destruction(NULL), + on_arena_allocation(NULL) {} + + private: + // Hooks for adding external functionality such as user-specific metrics + // collection, specific debugging abilities, etc. + // Init hook may return a pointer to a cookie to be stored in the arena. + // reset and destruction hooks will then be called with the same cookie + // pointer. This allows us to save an external object per arena instance and + // use it on the other hooks (Note: It is just as legal for init to return + // NULL and not use the cookie feature). + // on_arena_reset and on_arena_destruction also receive the space used in + // the arena just before the reset. + void* (*on_arena_init)(Arena* arena); + void (*on_arena_reset)(Arena* arena, void* cookie, uint64 space_used); + void (*on_arena_destruction)(Arena* arena, void* cookie, uint64 space_used); + + // type_info is promised to be static - its lifetime extends to + // match program's lifetime (It is given by typeid operator). + // Note: typeid(void) will be passed as allocated_type every time we + // intentionally want to avoid monitoring an allocation. (i.e. internal + // allocations for managing the arena) + void (*on_arena_allocation)(const std::type_info* allocated_type, + uint64 alloc_size, void* cookie); + + // Constants define default starting block size and max block size for + // arena allocator behavior -- see descriptions above. + static const size_t kDefaultStartBlockSize = 256; + static const size_t kDefaultMaxBlockSize = 8192; + + friend void ::google::protobuf::arena_metrics::EnableArenaMetrics(ArenaOptions*); + friend void quality_webanswers::TempPrivateWorkAround(ArenaOptions*); + friend class Arena; + friend class ArenaOptionsTestFriend; +}; + +// Support for non-RTTI environments. (The metrics hooks API uses type +// information.) +#ifndef GOOGLE_PROTOBUF_NO_RTTI +#define RTTI_TYPE_ID(type) (&typeid(type)) +#else +#define RTTI_TYPE_ID(type) (NULL) +#endif + +// Arena allocator. Arena allocation replaces ordinary (heap-based) allocation +// with new/delete, and improves performance by aggregating allocations into +// larger blocks and freeing allocations all at once. Protocol messages are +// allocated on an arena by using Arena::CreateMessage(Arena*), below, and +// are automatically freed when the arena is destroyed. +// +// This is a thread-safe implementation: multiple threads may allocate from the +// arena concurrently. Destruction is not thread-safe and the destructing +// thread must synchronize with users of the arena first. +// +// An arena provides two allocation interfaces: CreateMessage, which works +// for arena-enabled proto2 message types as well as other types that satisfy +// the appropriate protocol (described below), and Create, which works for +// any arbitrary type T. CreateMessage is better when the type T supports it, +// because this interface (i) passes the arena pointer to the created object so +// that its sub-objects and internal allocations can use the arena too, and (ii) +// elides the object's destructor call when possible. Create does not place +// any special requirements on the type T, and will invoke the object's +// destructor when the arena is destroyed. +// +// The arena message allocation protocol, required by CreateMessage, is as +// follows: +// +// - The type T must have (at least) two constructors: a constructor with no +// arguments, called when a T is allocated on the heap; and a constructor with +// a google::protobuf::Arena* argument, called when a T is allocated on an arena. If the +// second constructor is called with a NULL arena pointer, it must be +// equivalent to invoking the first (no-argument) constructor. +// +// - The type T must have a particular type trait: a nested type +// |InternalArenaConstructable_|. This is usually a typedef to |void|. If no +// such type trait exists, then the instantiation CreateMessage will fail +// to compile. +// +// - The type T *may* have the type trait |DestructorSkippable_|. If this type +// trait is present in the type, then its destructor will not be called if and +// only if it was passed a non-NULL arena pointer. If this type trait is not +// present on the type, then its destructor is always called when the +// containing arena is destroyed. +// +// - One- and two-user-argument forms of CreateMessage() also exist that +// forward these constructor arguments to T's constructor: for example, +// CreateMessage(Arena*, arg1, arg2) forwards to a constructor T(Arena*, +// arg1, arg2). +// +// This protocol is implemented by all arena-enabled proto2 message classes as +// well as RepeatedPtrField. +// +// Do NOT subclass Arena. This class will be marked as final when C++11 is +// enabled. +class LIBPROTOBUF_EXPORT Arena { + public: + // Arena constructor taking custom options. See ArenaOptions below for + // descriptions of the options available. + explicit Arena(const ArenaOptions& options) : impl_(options) { + Init(options); + } + + // Block overhead. Use this as a guide for how much to over-allocate the + // initial block if you want an allocation of size N to fit inside it. + // + // WARNING: if you allocate multiple objects, it is difficult to guarantee + // that a series of allocations will fit in the initial block, especially if + // Arena changes its alignment guarantees in the future! + static const size_t kBlockOverhead = internal::ArenaImpl::kBlockHeaderSize + + internal::ArenaImpl::kSerialArenaSize; + + // Default constructor with sensible default options, tuned for average + // use-cases. + Arena() : impl_(ArenaOptions()) { Init(ArenaOptions()); } + + ~Arena() { + if (hooks_cookie_) { + CallDestructorHooks(); + } + } + + void Init(const ArenaOptions& options) { + on_arena_allocation_ = options.on_arena_allocation; + on_arena_reset_ = options.on_arena_reset; + on_arena_destruction_ = options.on_arena_destruction; + // Call the initialization hook + if (options.on_arena_init != NULL) { + hooks_cookie_ = options.on_arena_init(this); + } else { + hooks_cookie_ = NULL; + } + } + + // API to create proto2 message objects on the arena. If the arena passed in + // is NULL, then a heap allocated object is returned. Type T must be a message + // defined in a .proto file with cc_enable_arenas set to true, otherwise a + // compilation error will occur. + // + // RepeatedField and RepeatedPtrField may also be instantiated directly on an + // arena with this method. + // + // This function also accepts any type T that satisfies the arena message + // allocation protocol, documented above. + template + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE static T* CreateMessage( + Arena* arena, Args&&... args) { + static_assert( + InternalHelper::is_arena_constructable::value, + "CreateMessage can only construct types that are ArenaConstructable"); + // We must delegate to CreateMaybeMessage() and NOT CreateMessageInternal() + // because protobuf generated classes specialize CreateMaybeMessage() and we + // need to use that specialization for code size reasons. + return Arena::CreateMaybeMessage(arena, std::forward(args)...); + } + + // API to create any objects on the arena. Note that only the object will + // be created on the arena; the underlying ptrs (in case of a proto2 message) + // will be still heap allocated. Proto messages should usually be allocated + // with CreateMessage() instead. + // + // Note that even if T satisfies the arena message construction protocol + // (InternalArenaConstructable_ trait and optional DestructorSkippable_ + // trait), as described above, this function does not follow the protocol; + // instead, it treats T as a black-box type, just as if it did not have these + // traits. Specifically, T's constructor arguments will always be only those + // passed to Create() -- no additional arena pointer is implicitly added. + // Furthermore, the destructor will always be called at arena destruction time + // (unless the destructor is trivial). Hence, from T's point of view, it is as + // if the object were allocated on the heap (except that the underlying memory + // is obtained from the arena). + template + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE static T* Create(Arena* arena, + Args&&... args) { + return CreateNoMessage(arena, is_arena_constructable(), + std::forward(args)...); + } + + // Create an array of object type T on the arena *without* invoking the + // constructor of T. If `arena` is null, then the return value should be freed + // with `delete[] x;` (or `::operator delete[](x);`). + // To ensure safe uses, this function checks at compile time + // (when compiled as C++11) that T is trivially default-constructible and + // trivially destructible. + template + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE static T* CreateArray( + Arena* arena, size_t num_elements) { + static_assert(std::is_pod::value, + "CreateArray requires a trivially constructible type"); + static_assert(std::is_trivially_destructible::value, + "CreateArray requires a trivially destructible type"); + GOOGLE_CHECK_LE(num_elements, std::numeric_limits::max() / sizeof(T)) + << "Requested size is too large to fit into size_t."; + if (arena == NULL) { + return static_cast(::operator new[](num_elements * sizeof(T))); + } else { + return arena->CreateInternalRawArray(num_elements); + } + } + + // Returns the total space allocated by the arena, which is the sum of the + // sizes of the underlying blocks. This method is relatively fast; a counter + // is kept as blocks are allocated. + uint64 SpaceAllocated() const { return impl_.SpaceAllocated(); } + // Returns the total space used by the arena. Similar to SpaceAllocated but + // does not include free space and block overhead. The total space returned + // may not include space used by other threads executing concurrently with + // the call to this method. + uint64 SpaceUsed() const { return impl_.SpaceUsed(); } + // DEPRECATED. Please use SpaceAllocated() and SpaceUsed(). + // + // Combines SpaceAllocated and SpaceUsed. Returns a pair of + // . + PROTOBUF_RUNTIME_DEPRECATED("Please use SpaceAllocated() and SpaceUsed()") + std::pair SpaceAllocatedAndUsed() const { + return std::make_pair(SpaceAllocated(), SpaceUsed()); + } + + // Frees all storage allocated by this arena after calling destructors + // registered with OwnDestructor() and freeing objects registered with Own(). + // Any objects allocated on this arena are unusable after this call. It also + // returns the total space used by the arena which is the sums of the sizes + // of the allocated blocks. This method is not thread-safe. + GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE uint64 Reset() { + // Call the reset hook + if (on_arena_reset_ != NULL) { + on_arena_reset_(this, hooks_cookie_, impl_.SpaceAllocated()); + } + return impl_.Reset(); + } + + // Adds |object| to a list of heap-allocated objects to be freed with |delete| + // when the arena is destroyed or reset. + template + GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE void Own(T* object) { + OwnInternal(object, std::is_convertible()); + } + + // Adds |object| to a list of objects whose destructors will be manually + // called when the arena is destroyed or reset. This differs from Own() in + // that it does not free the underlying memory with |delete|; hence, it is + // normally only used for objects that are placement-newed into + // arena-allocated memory. + template + GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE void OwnDestructor(T* object) { + if (object != NULL) { + impl_.AddCleanup(object, &internal::arena_destruct_object); + } + } + + // Adds a custom member function on an object to the list of destructors that + // will be manually called when the arena is destroyed or reset. This differs + // from OwnDestructor() in that any member function may be specified, not only + // the class destructor. + GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE void OwnCustomDestructor( + void* object, void (*destruct)(void*)) { + impl_.AddCleanup(object, destruct); + } + + // Retrieves the arena associated with |value| if |value| is an arena-capable + // message, or NULL otherwise. This differs from value->GetArena() in that the + // latter is a virtual call, while this method is a templated call that + // resolves at compile-time. + template + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE static Arena* GetArena( + const T* value) { + return GetArenaInternal(value, is_arena_constructable()); + } + + template + class InternalHelper { + template + static char DestructorSkippable(const typename U::DestructorSkippable_*); + template + static double DestructorSkippable(...); + + typedef std::integral_constant< + bool, sizeof(DestructorSkippable(static_cast(0))) == + sizeof(char) || + std::is_trivially_destructible::value> + is_destructor_skippable; + + template + static char ArenaConstructable( + const typename U::InternalArenaConstructable_*); + template + static double ArenaConstructable(...); + + typedef std::integral_constant( + static_cast(0))) == + sizeof(char)> + is_arena_constructable; + + template + static T* Construct(void* ptr, Args&&... args) { + return new (ptr) T(std::forward(args)...); + } + + static Arena* GetArena(const T* p) { return p->GetArenaNoVirtual(); } + + friend class Arena; + }; + + // Helper typetraits that indicates support for arenas in a type T at compile + // time. This is public only to allow construction of higher-level templated + // utilities. + // + // is_arena_constructable::value is true if the message type T has arena + // support enabled, and false otherwise. + // + // is_destructor_skippable::value is true if the message type T has told + // the arena that it is safe to skip the destructor, and false otherwise. + // + // This is inside Arena because only Arena has the friend relationships + // necessary to see the underlying generated code traits. + template + struct is_arena_constructable : InternalHelper::is_arena_constructable {}; + template + struct is_destructor_skippable : InternalHelper::is_destructor_skippable { + }; + + private: + template + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE static T* CreateMessageInternal( + Arena* arena, Args&&... args) { + static_assert( + InternalHelper::is_arena_constructable::value, + "CreateMessage can only construct types that are ArenaConstructable"); + if (arena == NULL) { + return new T(nullptr, std::forward(args)...); + } else { + return arena->DoCreateMessage(std::forward(args)...); + } + } + + // This specialization for no arguments is necessary, because its behavior is + // slightly different. When the arena pointer is nullptr, it calls T() + // instead of T(nullptr). + template + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE static T* CreateMessageInternal( + Arena* arena) { + static_assert( + InternalHelper::is_arena_constructable::value, + "CreateMessage can only construct types that are ArenaConstructable"); + if (arena == NULL) { + return new T(); + } else { + return arena->DoCreateMessage(); + } + } + + template + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE static T* CreateInternal( + Arena* arena, Args&&... args) { + if (arena == NULL) { + return new T(std::forward(args)...); + } else { + return arena->DoCreate(std::is_trivially_destructible::value, + std::forward(args)...); + } + } + + void CallDestructorHooks(); + void OnArenaAllocation(const std::type_info* allocated_type, size_t n) const; + inline void AllocHook(const std::type_info* allocated_type, size_t n) const { + if (GOOGLE_PREDICT_FALSE(hooks_cookie_ != NULL)) { + OnArenaAllocation(allocated_type, n); + } + } + + // Allocate and also optionally call on_arena_allocation callback with the + // allocated type info when the hooks are in place in ArenaOptions and + // the cookie is not null. + template + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE void* AllocateInternal( + bool skip_explicit_ownership) { + const size_t n = internal::AlignUpTo8(sizeof(T)); + AllocHook(RTTI_TYPE_ID(T), n); + // Monitor allocation if needed. + if (skip_explicit_ownership) { + return impl_.AllocateAligned(n); + } else { + return impl_.AllocateAlignedAndAddCleanup( + n, &internal::arena_destruct_object); + } + } + + // CreateMessage requires that T supports arenas, but this private method + // works whether or not T supports arenas. These are not exposed to user code + // as it can cause confusing API usages, and end up having double free in + // user code. These are used only internally from LazyField and Repeated + // fields, since they are designed to work in all mode combinations. + template + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE static Msg* DoCreateMaybeMessage( + Arena* arena, std::true_type, Args&&... args) { + return CreateMessageInternal(arena, std::forward(args)...); + } + + template + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE static T* DoCreateMaybeMessage( + Arena* arena, std::false_type, Args&&... args) { + return CreateInternal(arena, std::forward(args)...); + } + + template + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE static T* CreateMaybeMessage( + Arena* arena, Args&&... args) { + return DoCreateMaybeMessage(arena, is_arena_constructable(), + std::forward(args)...); + } + + template + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE static T* CreateNoMessage( + Arena* arena, std::true_type, Args&&... args) { + // User is constructing with Create() despite the fact that T supports arena + // construction. In this case we have to delegate to CreateInternal(), and + // we can't use any CreateMaybeMessage() specialization that may be defined. + return CreateInternal(arena, std::forward(args)...); + } + + template + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE static T* CreateNoMessage( + Arena* arena, std::false_type, Args&&... args) { + // User is constructing with Create() and the type does not support arena + // construction. In this case we can delegate to CreateMaybeMessage() and + // use any specialization that may be available for that. + return CreateMaybeMessage(arena, std::forward(args)...); + } + + // Just allocate the required size for the given type assuming the + // type has a trivial constructor. + template + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE T* CreateInternalRawArray( + size_t num_elements) { + GOOGLE_CHECK_LE(num_elements, std::numeric_limits::max() / sizeof(T)) + << "Requested size is too large to fit into size_t."; + const size_t n = internal::AlignUpTo8(sizeof(T) * num_elements); + // Monitor allocation if needed. + AllocHook(RTTI_TYPE_ID(T), n); + return static_cast(impl_.AllocateAligned(n)); + } + + template + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE T* DoCreate( + bool skip_explicit_ownership, Args&&... args) { + return new (AllocateInternal(skip_explicit_ownership)) + T(std::forward(args)...); + } + template + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE T* DoCreateMessage(Args&&... args) { + return InternalHelper::Construct( + AllocateInternal(InternalHelper::is_destructor_skippable::value), + this, std::forward(args)...); + } + + // CreateInArenaStorage is used to implement map field. Without it, + // google::protobuf::Map need to call generated message's protected arena constructor, + // which needs to declare google::protobuf::Map as friend of generated message. + template + static void CreateInArenaStorage(T* ptr, Arena* arena) { + CreateInArenaStorageInternal(ptr, arena, + typename is_arena_constructable::type()); + RegisterDestructorInternal( + ptr, arena, + typename InternalHelper::is_destructor_skippable::type()); + } + + template + static void CreateInArenaStorageInternal(T* ptr, Arena* arena, + std::true_type) { + InternalHelper::Construct(ptr, arena); + } + template + static void CreateInArenaStorageInternal(T* ptr, Arena* /* arena */, + std::false_type) { + new (ptr) T(); + } + + template + static void RegisterDestructorInternal(T* /* ptr */, Arena* /* arena */, + std::true_type) {} + template + static void RegisterDestructorInternal(T* ptr, Arena* arena, + std::false_type) { + arena->OwnDestructor(ptr); + } + + // These implement Own(), which registers an object for deletion (destructor + // call and operator delete()). The second parameter has type 'true_type' if T + // is a subtype of ::google::protobuf::Message and 'false_type' otherwise. Collapsing + // all template instantiations to one for generic Message reduces code size, + // using the virtual destructor instead. + template + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE void OwnInternal(T* object, + std::true_type) { + if (object != NULL) { + impl_.AddCleanup(object, &internal::arena_delete_object); + } + } + template + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE void OwnInternal(T* object, + std::false_type) { + if (object != NULL) { + impl_.AddCleanup(object, &internal::arena_delete_object); + } + } + + // Implementation for GetArena(). Only message objects with + // InternalArenaConstructable_ tags can be associated with an arena, and such + // objects must implement a GetArenaNoVirtual() method. + template + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE static Arena* GetArenaInternal( + const T* value, std::true_type) { + return InternalHelper::GetArena(value); + } + + template + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE static Arena* GetArenaInternal( + const T* /* value */, std::false_type) { + return NULL; + } + + // For friends of arena. + void* AllocateAligned(size_t n) { + AllocHook(NULL, n); + return impl_.AllocateAligned(internal::AlignUpTo8(n)); + } + + internal::ArenaImpl impl_; + + void (*on_arena_allocation_)(const std::type_info* allocated_type, + uint64 alloc_size, void* cookie); + void (*on_arena_reset_)(Arena* arena, void* cookie, uint64 space_used); + void (*on_arena_destruction_)(Arena* arena, void* cookie, uint64 space_used); + + // The arena may save a cookie it receives from the external on_init hook + // and then use it when calling the on_reset and on_destruction hooks. + void* hooks_cookie_; + + template + friend class internal::GenericTypeHandler; + friend struct internal::ArenaStringPtr; // For AllocateAligned. + friend class internal::LazyField; // For CreateMaybeMessage. + friend class MessageLite; + template + friend class Map; +}; + +// Defined above for supporting environments without RTTI. +#undef RTTI_TYPE_ID + +} // namespace protobuf + +} // namespace google +#endif // GOOGLE_PROTOBUF_ARENA_H__ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/arena_impl.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/arena_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..f648f16621ff2a09aa2237fa8d10ec668bb79924 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/arena_impl.h @@ -0,0 +1,321 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// This file defines an Arena allocator for better allocation performance. + +#ifndef GOOGLE_PROTOBUF_ARENA_IMPL_H__ +#define GOOGLE_PROTOBUF_ARENA_IMPL_H__ + +#include +#include + +#include +#include + +#include + +#ifdef ADDRESS_SANITIZER +#include +#endif // ADDRESS_SANITIZER + +namespace google { + +namespace protobuf { +namespace internal { + +inline size_t AlignUpTo8(size_t n) { + // Align n to next multiple of 8 (from Hacker's Delight, Chapter 3.) + return (n + 7) & -8; +} + +// This class provides the core Arena memory allocation library. Different +// implementations only need to implement the public interface below. +// Arena is not a template type as that would only be useful if all protos +// in turn would be templates, which will/cannot happen. However separating +// the memory allocation part from the cruft of the API users expect we can +// use #ifdef the select the best implementation based on hardware / OS. +class LIBPROTOBUF_EXPORT ArenaImpl { + public: + struct Options { + size_t start_block_size; + size_t max_block_size; + char* initial_block; + size_t initial_block_size; + void* (*block_alloc)(size_t); + void (*block_dealloc)(void*, size_t); + + template + explicit Options(const O& options) + : start_block_size(options.start_block_size), + max_block_size(options.max_block_size), + initial_block(options.initial_block), + initial_block_size(options.initial_block_size), + block_alloc(options.block_alloc), + block_dealloc(options.block_dealloc) {} + }; + + template + explicit ArenaImpl(const O& options) : options_(options) { + if (options_.initial_block != NULL && options_.initial_block_size > 0) { + GOOGLE_CHECK_GE(options_.initial_block_size, sizeof(Block)) + << ": Initial block size too small for header."; + initial_block_ = reinterpret_cast(options_.initial_block); + } else { + initial_block_ = NULL; + } + + Init(); + } + + // Destructor deletes all owned heap allocated objects, and destructs objects + // that have non-trivial destructors, except for proto2 message objects whose + // destructors can be skipped. Also, frees all blocks except the initial block + // if it was passed in. + ~ArenaImpl(); + + uint64 Reset(); + + uint64 SpaceAllocated() const; + uint64 SpaceUsed() const; + + void* AllocateAligned(size_t n); + + void* AllocateAlignedAndAddCleanup(size_t n, void (*cleanup)(void*)); + + // Add object pointer and cleanup function pointer to the list. + void AddCleanup(void* elem, void (*cleanup)(void*)); + + private: + void* AllocateAlignedFallback(size_t n); + void* AllocateAlignedAndAddCleanupFallback(size_t n, void (*cleanup)(void*)); + void AddCleanupFallback(void* elem, void (*cleanup)(void*)); + + // Node contains the ptr of the object to be cleaned up and the associated + // cleanup function ptr. + struct CleanupNode { + void* elem; // Pointer to the object to be cleaned up. + void (*cleanup)(void*); // Function pointer to the destructor or deleter. + }; + + // Cleanup uses a chunked linked list, to reduce pointer chasing. + struct CleanupChunk { + static size_t SizeOf(size_t i) { + return sizeof(CleanupChunk) + (sizeof(CleanupNode) * (i - 1)); + } + size_t size; // Total elements in the list. + CleanupChunk* next; // Next node in the list. + CleanupNode nodes[1]; // True length is |size|. + }; + + class Block; + + // A thread-unsafe Arena that can only be used within its owning thread. + class LIBPROTOBUF_EXPORT SerialArena { + public: + // The allocate/free methods here are a little strange, since SerialArena is + // allocated inside a Block which it also manages. This is to avoid doing + // an extra allocation for the SerialArena itself. + + // Creates a new SerialArena inside Block* and returns it. + static SerialArena* New(Block* b, void* owner, ArenaImpl* arena); + + // Destroys this SerialArena, freeing all blocks with the given dealloc + // function, except any block equal to |initial_block|. + static uint64 Free(SerialArena* serial, Block* initial_block, + void (*block_dealloc)(void*, size_t)); + + void CleanupList(); + uint64 SpaceUsed() const; + + void* AllocateAligned(size_t n) { + GOOGLE_DCHECK_EQ(internal::AlignUpTo8(n), n); // Must be already aligned. + GOOGLE_DCHECK_GE(limit_, ptr_); + if (GOOGLE_PREDICT_FALSE(static_cast(limit_ - ptr_) < n)) { + return AllocateAlignedFallback(n); + } + void* ret = ptr_; + ptr_ += n; +#ifdef ADDRESS_SANITIZER + ASAN_UNPOISON_MEMORY_REGION(ret, n); +#endif // ADDRESS_SANITIZER + return ret; + } + + void AddCleanup(void* elem, void (*cleanup)(void*)) { + if (GOOGLE_PREDICT_FALSE(cleanup_ptr_ == cleanup_limit_)) { + AddCleanupFallback(elem, cleanup); + return; + } + cleanup_ptr_->elem = elem; + cleanup_ptr_->cleanup = cleanup; + cleanup_ptr_++; + } + + void* AllocateAlignedAndAddCleanup(size_t n, void (*cleanup)(void*)) { + void* ret = AllocateAligned(n); + AddCleanup(ret, cleanup); + return ret; + } + + void* owner() const { return owner_; } + SerialArena* next() const { return next_; } + void set_next(SerialArena* next) { next_ = next; } + + private: + void* AllocateAlignedFallback(size_t n); + void AddCleanupFallback(void* elem, void (*cleanup)(void*)); + void CleanupListFallback(); + + ArenaImpl* arena_; // Containing arena. + void* owner_; // &ThreadCache of this thread; + Block* head_; // Head of linked list of blocks. + CleanupChunk* cleanup_; // Head of cleanup list. + SerialArena* next_; // Next SerialArena in this linked list. + + // Next pointer to allocate from. Always 8-byte aligned. Points inside + // head_ (and head_->pos will always be non-canonical). We keep these + // here to reduce indirection. + char* ptr_; + char* limit_; + + // Next CleanupList members to append to. These point inside cleanup_. + CleanupNode* cleanup_ptr_; + CleanupNode* cleanup_limit_; + }; + + // Blocks are variable length malloc-ed objects. The following structure + // describes the common header for all blocks. + class LIBPROTOBUF_EXPORT Block { + public: + Block(size_t size, Block* next); + + char* Pointer(size_t n) { + GOOGLE_DCHECK(n <= size_); + return reinterpret_cast(this) + n; + } + + Block* next() const { return next_; } + size_t pos() const { return pos_; } + size_t size() const { return size_; } + void set_pos(size_t pos) { pos_ = pos; } + + private: + Block* next_; // Next block for this thread. + size_t pos_; + size_t size_; + // data follows + }; + + struct ThreadCache { +#if defined(GOOGLE_PROTOBUF_NO_THREADLOCAL) + // If we are using the ThreadLocalStorage class to store the ThreadCache, + // then the ThreadCache's default constructor has to be responsible for + // initializing it. + ThreadCache() : last_lifecycle_id_seen(-1), last_serial_arena(NULL) {} +#endif + + // The ThreadCache is considered valid as long as this matches the + // lifecycle_id of the arena being used. + int64 last_lifecycle_id_seen; + SerialArena* last_serial_arena; + }; + static std::atomic lifecycle_id_generator_; +#if defined(GOOGLE_PROTOBUF_NO_THREADLOCAL) + // Android ndk does not support GOOGLE_THREAD_LOCAL keyword so we use a custom thread + // local storage class we implemented. + // iOS also does not support the GOOGLE_THREAD_LOCAL keyword. + static ThreadCache& thread_cache(); +#elif defined(PROTOBUF_USE_DLLS) + // Thread local variables cannot be exposed through DLL interface but we can + // wrap them in static functions. + static ThreadCache& thread_cache(); +#else + static GOOGLE_THREAD_LOCAL ThreadCache thread_cache_; + static ThreadCache& thread_cache() { return thread_cache_; } +#endif + + void Init(); + + // Free all blocks and return the total space used which is the sums of sizes + // of the all the allocated blocks. + uint64 FreeBlocks(); + // Delete or Destruct all objects owned by the arena. + void CleanupList(); + + inline void CacheSerialArena(SerialArena* serial) { + thread_cache().last_serial_arena = serial; + thread_cache().last_lifecycle_id_seen = lifecycle_id_; + // TODO(haberman): evaluate whether we would gain efficiency by getting rid + // of hint_. It's the only write we do to ArenaImpl in the allocation path, + // which will dirty the cache line. + + hint_.store(serial, std::memory_order_release); + } + + + std::atomic + threads_; // Pointer to a linked list of SerialArena. + std::atomic hint_; // Fast thread-local block access + std::atomic space_allocated_; // Total size of all allocated blocks. + + Block *initial_block_; // If non-NULL, points to the block that came from + // user data. + + Block* NewBlock(Block* last_block, size_t min_bytes); + + SerialArena* GetSerialArena(); + bool GetSerialArenaFast(SerialArena** arena); + SerialArena* GetSerialArenaFallback(void* me); + int64 lifecycle_id_; // Unique for each arena. Changes on Reset(). + + Options options_; + + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ArenaImpl); + // All protos have pointers back to the arena hence Arena must have + // pointer stability. + ArenaImpl(ArenaImpl&&) = delete; + ArenaImpl& operator=(ArenaImpl&&) = delete; + + public: + // kBlockHeaderSize is sizeof(Block), aligned up to the nearest multiple of 8 + // to protect the invariant that pos is always at a multiple of 8. + static const size_t kBlockHeaderSize = (sizeof(Block) + 7) & -8; + static const size_t kSerialArenaSize = (sizeof(SerialArena) + 7) & -8; + static_assert(kBlockHeaderSize % 8 == 0, + "kBlockHeaderSize must be a multiple of 8."); + static_assert(kSerialArenaSize % 8 == 0, + "kSerialArenaSize must be a multiple of 8."); +}; + +} // namespace internal +} // namespace protobuf + +} // namespace google +#endif // GOOGLE_PROTOBUF_ARENA_IMPL_H__ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/arenastring.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/arenastring.h new file mode 100644 index 0000000000000000000000000000000000000000..168fc972b82d836f97cdbe998e0bf5c00ddded03 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/arenastring.h @@ -0,0 +1,403 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef GOOGLE_PROTOBUF_ARENASTRING_H__ +#define GOOGLE_PROTOBUF_ARENASTRING_H__ + +#include + +#include +#include +#include +#include +#include + +// This is the implementation of arena string fields written for the open-source +// release. The ArenaStringPtr struct below is an internal implementation class +// and *should not be used* by user code. It is used to collect string +// operations together into one place and abstract away the underlying +// string-field pointer representation, so that (for example) an alternate +// implementation that knew more about ::std::string's internals could integrate more +// closely with the arena allocator. + +namespace google { +namespace protobuf { +namespace internal { + +template +class TaggedPtr { + public: + void Set(T* p) { ptr_ = reinterpret_cast(p); } + T* Get() const { return reinterpret_cast(ptr_); } + + bool IsNull() { return ptr_ == 0; } + + private: + uintptr_t ptr_; +}; + +struct LIBPROTOBUF_EXPORT ArenaStringPtr { + inline void Set(const ::std::string* default_value, + const ::std::string& value, ::google::protobuf::Arena* arena) { + if (ptr_ == default_value) { + CreateInstance(arena, &value); + } else { + *ptr_ = value; + } + } + + inline void SetLite(const ::std::string* default_value, + const ::std::string& value, + ::google::protobuf::Arena* arena) { + Set(default_value, value, arena); + } + + // Basic accessors. + inline const ::std::string& Get() const { return *ptr_; } + + inline ::std::string* Mutable(const ::std::string* default_value, + ::google::protobuf::Arena* arena) { + if (ptr_ == default_value) { + CreateInstance(arena, default_value); + } + return ptr_; + } + + // Release returns a ::std::string* instance that is heap-allocated and is not + // Own()'d by any arena. If the field was not set, it returns NULL. The caller + // retains ownership. Clears this field back to NULL state. Used to implement + // release_() methods on generated classes. + inline ::std::string* Release(const ::std::string* default_value, + ::google::protobuf::Arena* arena) { + if (ptr_ == default_value) { + return NULL; + } + return ReleaseNonDefault(default_value, arena); + } + + // Similar to Release, but ptr_ cannot be the default_value. + inline ::std::string* ReleaseNonDefault( + const ::std::string* default_value, ::google::protobuf::Arena* arena) { + GOOGLE_DCHECK(!IsDefault(default_value)); + ::std::string* released = NULL; + if (arena != NULL) { + // ptr_ is owned by the arena. + released = new ::std::string; + released->swap(*ptr_); + } else { + released = ptr_; + } + ptr_ = const_cast< ::std::string* >(default_value); + return released; + } + + // UnsafeArenaRelease returns a ::std::string*, but it may be arena-owned (i.e. + // have its destructor already registered) if arena != NULL. If the field was + // not set, this returns NULL. This method clears this field back to NULL + // state. Used to implement unsafe_arena_release_() methods on + // generated classes. + inline ::std::string* UnsafeArenaRelease(const ::std::string* default_value, + ::google::protobuf::Arena* /* arena */) { + if (ptr_ == default_value) { + return NULL; + } + ::std::string* released = ptr_; + ptr_ = const_cast< ::std::string* >(default_value); + return released; + } + + // Takes a string that is heap-allocated, and takes ownership. The string's + // destructor is registered with the arena. Used to implement + // set_allocated_ in generated classes. + inline void SetAllocated(const ::std::string* default_value, + ::std::string* value, ::google::protobuf::Arena* arena) { + if (arena == NULL && ptr_ != default_value) { + Destroy(default_value, arena); + } + if (value != NULL) { + ptr_ = value; + if (arena != NULL) { + arena->Own(value); + } + } else { + ptr_ = const_cast< ::std::string* >(default_value); + } + } + + // Takes a string that has lifetime equal to the arena's lifetime. The arena + // must be non-null. It is safe only to pass this method a value returned by + // UnsafeArenaRelease() on another field of a message in the same arena. Used + // to implement unsafe_arena_set_allocated_ in generated classes. + inline void UnsafeArenaSetAllocated(const ::std::string* default_value, + ::std::string* value, + ::google::protobuf::Arena* /* arena */) { + if (value != NULL) { + ptr_ = value; + } else { + ptr_ = const_cast< ::std::string* >(default_value); + } + } + + // Swaps internal pointers. Arena-safety semantics: this is guarded by the + // logic in Swap()/UnsafeArenaSwap() at the message level, so this method is + // 'unsafe' if called directly. + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE void Swap(ArenaStringPtr* other) { + std::swap(ptr_, other->ptr_); + } + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE void Swap( + ArenaStringPtr* other, const ::std::string* default_value, Arena* arena) { +#ifndef NDEBUG + // For debug builds, we swap the contents of the string, rather than the + // string instances themselves. This invalidates previously taken const + // references that are (per our documentation) invalidated by calling Swap() + // on the message. + // + // If both strings are the default_value, swapping is uninteresting. + // Otherwise, we use ArenaStringPtr::Mutable() to access the string, to + // ensure that we do not try to mutate default_value itself. + if (IsDefault(default_value) && other->IsDefault(default_value)) { + return; + } + + ::std::string* this_ptr = Mutable(default_value, arena); + ::std::string* other_ptr = other->Mutable(default_value, arena); + + this_ptr->swap(*other_ptr); +#else + std::swap(ptr_, other->ptr_); +#endif + } + + // Frees storage (if not on an arena). + inline void Destroy(const ::std::string* default_value, + ::google::protobuf::Arena* arena) { + if (arena == NULL && ptr_ != default_value) { + delete ptr_; + } + } + + // Clears content, but keeps allocated string if arena != NULL, to avoid the + // overhead of heap operations. After this returns, the content (as seen by + // the user) will always be the empty string. Assumes that |default_value| + // is an empty string. + inline void ClearToEmpty(const ::std::string* default_value, + ::google::protobuf::Arena* /* arena */) { + if (ptr_ == default_value) { + // Already set to default (which is empty) -- do nothing. + } else { + ptr_->clear(); + } + } + + // Clears content, assuming that the current value is not the empty string + // default. + inline void ClearNonDefaultToEmpty() { + ptr_->clear(); + } + inline void ClearNonDefaultToEmptyNoArena() { + ptr_->clear(); + } + + // Clears content, but keeps allocated string if arena != NULL, to avoid the + // overhead of heap operations. After this returns, the content (as seen by + // the user) will always be equal to |default_value|. + inline void ClearToDefault(const ::std::string* default_value, + ::google::protobuf::Arena* /* arena */) { + if (ptr_ == default_value) { + // Already set to default -- do nothing. + } else { + // Have another allocated string -- rather than throwing this away and + // resetting ptr_ to the canonical default string instance, we just reuse + // this instance. + *ptr_ = *default_value; + } + } + + // Called from generated code / reflection runtime only. Resets value to point + // to a default string pointer, with the semantics that this ArenaStringPtr + // does not own the pointed-to memory. Disregards initial value of ptr_ (so + // this is the *ONLY* safe method to call after construction or when + // reinitializing after becoming the active field in a oneof union). + inline void UnsafeSetDefault(const ::std::string* default_value) { + // Casting away 'const' is safe here: accessors ensure that ptr_ is only + // returned as a const if it is equal to default_value. + ptr_ = const_cast< ::std::string* >(default_value); + } + + // The 'NoArena' variants of methods below assume arena == NULL and are + // optimized to provide very little overhead relative to a raw string pointer + // (while still being in-memory compatible with other code that assumes + // ArenaStringPtr). Note the invariant that a class instance that has only + // ever been mutated by NoArena methods must *only* be in the String state + // (i.e., tag bits are not used), *NEVER* ArenaString. This allows all + // tagged-pointer manipulations to be avoided. + inline void SetNoArena(const ::std::string* default_value, + const ::std::string& value) { + if (ptr_ == default_value) { + CreateInstanceNoArena(&value); + } else { + *ptr_ = value; + } + } + +#if LANG_CXX11 + void SetNoArena(const ::std::string* default_value, ::std::string&& value) { + if (IsDefault(default_value)) { + ptr_ = new ::std::string(std::move(value)); + } else { + *ptr_ = std::move(value); + } + } +#endif + + void AssignWithDefault(const ::std::string* default_value, ArenaStringPtr value); + + inline const ::std::string& GetNoArena() const { return *ptr_; } + + inline ::std::string* MutableNoArena(const ::std::string* default_value) { + if (ptr_ == default_value) { + CreateInstanceNoArena(default_value); + } + return ptr_; + } + + inline ::std::string* ReleaseNoArena(const ::std::string* default_value) { + if (ptr_ == default_value) { + return NULL; + } else { + return ReleaseNonDefaultNoArena(default_value); + } + } + + inline ::std::string* ReleaseNonDefaultNoArena( + const ::std::string* default_value) { + GOOGLE_DCHECK(!IsDefault(default_value)); + ::std::string* released = ptr_; + ptr_ = const_cast< ::std::string* >(default_value); + return released; + } + + + inline void SetAllocatedNoArena(const ::std::string* default_value, + ::std::string* value) { + if (ptr_ != default_value) { + delete ptr_; + } + if (value != NULL) { + ptr_ = value; + } else { + ptr_ = const_cast< ::std::string* >(default_value); + } + } + + inline void DestroyNoArena(const ::std::string* default_value) { + if (ptr_ != default_value) { + delete ptr_; + } + } + + inline void ClearToEmptyNoArena(const ::std::string* default_value) { + if (ptr_ == default_value) { + // Nothing: already equal to default (which is the empty string). + } else { + ptr_->clear(); + } + } + + inline void ClearToDefaultNoArena(const ::std::string* default_value) { + if (ptr_ == default_value) { + // Nothing: already set to default. + } else { + // Reuse existing allocated instance. + *ptr_ = *default_value; + } + } + + // Internal accessor used only at parse time to provide direct access to the + // raw pointer from the shared parse routine (in the non-arenas case). The + // parse routine does the string allocation in order to save code size in the + // generated parsing code. + inline ::std::string** UnsafeRawStringPointer() { + return &ptr_; + } + + inline bool IsDefault(const ::std::string* default_value) const { + return ptr_ == default_value; + } + + // Internal accessors!!!! + void UnsafeSetTaggedPointer(TaggedPtr< ::std::string> value) { + ptr_ = value.Get(); + } + // Generated code only! An optimization, in certain cases the generated + // code is certain we can obtain a string with no default checks and + // tag tests. + ::std::string* UnsafeMutablePointer() { return ptr_; } + + private: + ::std::string* ptr_; + + GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE + void CreateInstance(::google::protobuf::Arena* arena, + const ::std::string* initial_value) { + GOOGLE_DCHECK(initial_value != NULL); + // uses "new ::std::string" when arena is nullptr + ptr_ = Arena::Create< ::std::string >(arena, *initial_value); + } + GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE + void CreateInstanceNoArena(const ::std::string* initial_value) { + GOOGLE_DCHECK(initial_value != NULL); + ptr_ = new ::std::string(*initial_value); + } +}; + +} // namespace internal +} // namespace protobuf + + + +namespace protobuf { +namespace internal { + +inline void ArenaStringPtr::AssignWithDefault(const ::std::string* default_value, + ArenaStringPtr value) { + const ::std::string* me = *UnsafeRawStringPointer(); + const ::std::string* other = *value.UnsafeRawStringPointer(); + // If the pointers are the same then do nothing. + if (me != other) { + SetNoArena(default_value, value.GetNoArena()); + } +} + +} // namespace internal +} // namespace protobuf + +} // namespace google +#endif // GOOGLE_PROTOBUF_ARENASTRING_H__ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/extension_set.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/extension_set.h new file mode 100644 index 0000000000000000000000000000000000000000..a1535baaecd189cc10fcf327bf1db08101a92c03 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/extension_set.h @@ -0,0 +1,1462 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// This header is logically internal, but is made public because it is used +// from protocol-compiler-generated code, which may reside in other components. + +#ifndef GOOGLE_PROTOBUF_EXTENSION_SET_H__ +#define GOOGLE_PROTOBUF_EXTENSION_SET_H__ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace google { + +namespace protobuf { + class Arena; + class Descriptor; // descriptor.h + class FieldDescriptor; // descriptor.h + class DescriptorPool; // descriptor.h + class MessageLite; // message_lite.h + class Message; // message.h + class MessageFactory; // message.h + class UnknownFieldSet; // unknown_field_set.h + namespace io { + class CodedInputStream; // coded_stream.h + class CodedOutputStream; // coded_stream.h + } + namespace internal { + class FieldSkipper; // wire_format_lite.h + } +} + +namespace protobuf { +namespace internal { + +// Used to store values of type WireFormatLite::FieldType without having to +// #include wire_format_lite.h. Also, ensures that we use only one byte to +// store these values, which is important to keep the layout of +// ExtensionSet::Extension small. +typedef uint8 FieldType; + +// A function which, given an integer value, returns true if the number +// matches one of the defined values for the corresponding enum type. This +// is used with RegisterEnumExtension, below. +typedef bool EnumValidityFunc(int number); + +// Version of the above which takes an argument. This is needed to deal with +// extensions that are not compiled in. +typedef bool EnumValidityFuncWithArg(const void* arg, int number); + +// Information about a registered extension. +struct ExtensionInfo { + inline ExtensionInfo() {} + inline ExtensionInfo(FieldType type_param, bool isrepeated, bool ispacked) + : type(type_param), is_repeated(isrepeated), is_packed(ispacked), + descriptor(NULL) {} + + FieldType type; + bool is_repeated; + bool is_packed; + + struct EnumValidityCheck { + EnumValidityFuncWithArg* func; + const void* arg; + }; + + union { + EnumValidityCheck enum_validity_check; + const MessageLite* message_prototype; + }; + + // The descriptor for this extension, if one exists and is known. May be + // NULL. Must not be NULL if the descriptor for the extension does not + // live in the same pool as the descriptor for the containing type. + const FieldDescriptor* descriptor; +}; + +// Abstract interface for an object which looks up extension definitions. Used +// when parsing. +class LIBPROTOBUF_EXPORT ExtensionFinder { + public: + virtual ~ExtensionFinder(); + + // Find the extension with the given containing type and number. + virtual bool Find(int number, ExtensionInfo* output) = 0; +}; + +// Implementation of ExtensionFinder which finds extensions defined in .proto +// files which have been compiled into the binary. +class LIBPROTOBUF_EXPORT GeneratedExtensionFinder : public ExtensionFinder { + public: + GeneratedExtensionFinder(const MessageLite* containing_type) + : containing_type_(containing_type) {} + virtual ~GeneratedExtensionFinder() {} + + // Returns true and fills in *output if found, otherwise returns false. + virtual bool Find(int number, ExtensionInfo* output); + + private: + const MessageLite* containing_type_; +}; + +// A FieldSkipper used for parsing MessageSet. +class MessageSetFieldSkipper; + +// Note: extension_set_heavy.cc defines DescriptorPoolExtensionFinder for +// finding extensions from a DescriptorPool. + +// This is an internal helper class intended for use within the protocol buffer +// library and generated classes. Clients should not use it directly. Instead, +// use the generated accessors such as GetExtension() of the class being +// extended. +// +// This class manages extensions for a protocol message object. The +// message's HasExtension(), GetExtension(), MutableExtension(), and +// ClearExtension() methods are just thin wrappers around the embedded +// ExtensionSet. When parsing, if a tag number is encountered which is +// inside one of the message type's extension ranges, the tag is passed +// off to the ExtensionSet for parsing. Etc. +class LIBPROTOBUF_EXPORT ExtensionSet { + public: + ExtensionSet(); + explicit ExtensionSet(::google::protobuf::Arena* arena); + ~ExtensionSet(); + + // These are called at startup by protocol-compiler-generated code to + // register known extensions. The registrations are used by ParseField() + // to look up extensions for parsed field numbers. Note that dynamic parsing + // does not use ParseField(); only protocol-compiler-generated parsing + // methods do. + static void RegisterExtension(const MessageLite* containing_type, + int number, FieldType type, + bool is_repeated, bool is_packed); + static void RegisterEnumExtension(const MessageLite* containing_type, + int number, FieldType type, + bool is_repeated, bool is_packed, + EnumValidityFunc* is_valid); + static void RegisterMessageExtension(const MessageLite* containing_type, + int number, FieldType type, + bool is_repeated, bool is_packed, + const MessageLite* prototype); + + // ================================================================= + + // Add all fields which are currently present to the given vector. This + // is useful to implement Reflection::ListFields(). + void AppendToList(const Descriptor* containing_type, + const DescriptorPool* pool, + std::vector* output) const; + + // ================================================================= + // Accessors + // + // Generated message classes include type-safe templated wrappers around + // these methods. Generally you should use those rather than call these + // directly, unless you are doing low-level memory management. + // + // When calling any of these accessors, the extension number requested + // MUST exist in the DescriptorPool provided to the constructor. Otherwise, + // the method will fail an assert. Normally, though, you would not call + // these directly; you would either call the generated accessors of your + // message class (e.g. GetExtension()) or you would call the accessors + // of the reflection interface. In both cases, it is impossible to + // trigger this assert failure: the generated accessors only accept + // linked-in extension types as parameters, while the Reflection interface + // requires you to provide the FieldDescriptor describing the extension. + // + // When calling any of these accessors, a protocol-compiler-generated + // implementation of the extension corresponding to the number MUST + // be linked in, and the FieldDescriptor used to refer to it MUST be + // the one generated by that linked-in code. Otherwise, the method will + // die on an assert failure. The message objects returned by the message + // accessors are guaranteed to be of the correct linked-in type. + // + // These methods pretty much match Reflection except that: + // - They're not virtual. + // - They identify fields by number rather than FieldDescriptors. + // - They identify enum values using integers rather than descriptors. + // - Strings provide Mutable() in addition to Set() accessors. + + bool Has(int number) const; + int ExtensionSize(int number) const; // Size of a repeated extension. + int NumExtensions() const; // The number of extensions + FieldType ExtensionType(int number) const; + void ClearExtension(int number); + + // singular fields ------------------------------------------------- + + int32 GetInt32 (int number, int32 default_value) const; + int64 GetInt64 (int number, int64 default_value) const; + uint32 GetUInt32(int number, uint32 default_value) const; + uint64 GetUInt64(int number, uint64 default_value) const; + float GetFloat (int number, float default_value) const; + double GetDouble(int number, double default_value) const; + bool GetBool (int number, bool default_value) const; + int GetEnum (int number, int default_value) const; + const string & GetString (int number, const string& default_value) const; + const MessageLite& GetMessage(int number, + const MessageLite& default_value) const; + const MessageLite& GetMessage(int number, const Descriptor* message_type, + MessageFactory* factory) const; + + // |descriptor| may be NULL so long as it is known that the descriptor for + // the extension lives in the same pool as the descriptor for the containing + // type. +#define desc const FieldDescriptor* descriptor // avoid line wrapping + void SetInt32 (int number, FieldType type, int32 value, desc); + void SetInt64 (int number, FieldType type, int64 value, desc); + void SetUInt32(int number, FieldType type, uint32 value, desc); + void SetUInt64(int number, FieldType type, uint64 value, desc); + void SetFloat (int number, FieldType type, float value, desc); + void SetDouble(int number, FieldType type, double value, desc); + void SetBool (int number, FieldType type, bool value, desc); + void SetEnum (int number, FieldType type, int value, desc); + void SetString(int number, FieldType type, const string& value, desc); + string * MutableString (int number, FieldType type, desc); + MessageLite* MutableMessage(int number, FieldType type, + const MessageLite& prototype, desc); + MessageLite* MutableMessage(const FieldDescriptor* decsriptor, + MessageFactory* factory); + // Adds the given message to the ExtensionSet, taking ownership of the + // message object. Existing message with the same number will be deleted. + // If "message" is NULL, this is equivalent to "ClearExtension(number)". + void SetAllocatedMessage(int number, FieldType type, + const FieldDescriptor* descriptor, + MessageLite* message); + void UnsafeArenaSetAllocatedMessage(int number, FieldType type, + const FieldDescriptor* descriptor, + MessageLite* message); + MessageLite* ReleaseMessage(int number, const MessageLite& prototype); + MessageLite* UnsafeArenaReleaseMessage( + int number, const MessageLite& prototype); + + MessageLite* ReleaseMessage(const FieldDescriptor* descriptor, + MessageFactory* factory); + MessageLite* UnsafeArenaReleaseMessage(const FieldDescriptor* descriptor, + MessageFactory* factory); +#undef desc + ::google::protobuf::Arena* GetArenaNoVirtual() const { return arena_; } + + // repeated fields ------------------------------------------------- + + // Fetches a RepeatedField extension by number; returns |default_value| + // if no such extension exists. User should not touch this directly; it is + // used by the GetRepeatedExtension() method. + const void* GetRawRepeatedField(int number, const void* default_value) const; + // Fetches a mutable version of a RepeatedField extension by number, + // instantiating one if none exists. Similar to above, user should not use + // this directly; it underlies MutableRepeatedExtension(). + void* MutableRawRepeatedField(int number, FieldType field_type, + bool packed, const FieldDescriptor* desc); + + // This is an overload of MutableRawRepeatedField to maintain compatibility + // with old code using a previous API. This version of + // MutableRawRepeatedField() will GOOGLE_CHECK-fail on a missing extension. + // (E.g.: borg/clients/internal/proto1/proto2_reflection.cc.) + void* MutableRawRepeatedField(int number); + + int32 GetRepeatedInt32 (int number, int index) const; + int64 GetRepeatedInt64 (int number, int index) const; + uint32 GetRepeatedUInt32(int number, int index) const; + uint64 GetRepeatedUInt64(int number, int index) const; + float GetRepeatedFloat (int number, int index) const; + double GetRepeatedDouble(int number, int index) const; + bool GetRepeatedBool (int number, int index) const; + int GetRepeatedEnum (int number, int index) const; + const string & GetRepeatedString (int number, int index) const; + const MessageLite& GetRepeatedMessage(int number, int index) const; + + void SetRepeatedInt32 (int number, int index, int32 value); + void SetRepeatedInt64 (int number, int index, int64 value); + void SetRepeatedUInt32(int number, int index, uint32 value); + void SetRepeatedUInt64(int number, int index, uint64 value); + void SetRepeatedFloat (int number, int index, float value); + void SetRepeatedDouble(int number, int index, double value); + void SetRepeatedBool (int number, int index, bool value); + void SetRepeatedEnum (int number, int index, int value); + void SetRepeatedString(int number, int index, const string& value); + string * MutableRepeatedString (int number, int index); + MessageLite* MutableRepeatedMessage(int number, int index); + +#define desc const FieldDescriptor* descriptor // avoid line wrapping + void AddInt32 (int number, FieldType type, bool packed, int32 value, desc); + void AddInt64 (int number, FieldType type, bool packed, int64 value, desc); + void AddUInt32(int number, FieldType type, bool packed, uint32 value, desc); + void AddUInt64(int number, FieldType type, bool packed, uint64 value, desc); + void AddFloat (int number, FieldType type, bool packed, float value, desc); + void AddDouble(int number, FieldType type, bool packed, double value, desc); + void AddBool (int number, FieldType type, bool packed, bool value, desc); + void AddEnum (int number, FieldType type, bool packed, int value, desc); + void AddString(int number, FieldType type, const string& value, desc); + string * AddString (int number, FieldType type, desc); + MessageLite* AddMessage(int number, FieldType type, + const MessageLite& prototype, desc); + MessageLite* AddMessage(const FieldDescriptor* descriptor, + MessageFactory* factory); + void AddAllocatedMessage(const FieldDescriptor* descriptor, + MessageLite* new_entry); +#undef desc + + void RemoveLast(int number); + MessageLite* ReleaseLast(int number); + void SwapElements(int number, int index1, int index2); + + // ----------------------------------------------------------------- + // TODO(kenton): Hardcore memory management accessors + + // ================================================================= + // convenience methods for implementing methods of Message + // + // These could all be implemented in terms of the other methods of this + // class, but providing them here helps keep the generated code size down. + + void Clear(); + void MergeFrom(const ExtensionSet& other); + void Swap(ExtensionSet* other); + void SwapExtension(ExtensionSet* other, int number); + bool IsInitialized() const; + + // Parses a single extension from the input. The input should start out + // positioned immediately after the tag. + bool ParseField(uint32 tag, io::CodedInputStream* input, + ExtensionFinder* extension_finder, + FieldSkipper* field_skipper); + + // Specific versions for lite or full messages (constructs the appropriate + // FieldSkipper automatically). |containing_type| is the default + // instance for the containing message; it is used only to look up the + // extension by number. See RegisterExtension(), above. Unlike the other + // methods of ExtensionSet, this only works for generated message types -- + // it looks up extensions registered using RegisterExtension(). + bool ParseField(uint32 tag, io::CodedInputStream* input, + const MessageLite* containing_type); + bool ParseField(uint32 tag, io::CodedInputStream* input, + const Message* containing_type, + UnknownFieldSet* unknown_fields); + bool ParseField(uint32 tag, io::CodedInputStream* input, + const MessageLite* containing_type, + io::CodedOutputStream* unknown_fields); + + // Parse an entire message in MessageSet format. Such messages have no + // fields, only extensions. + bool ParseMessageSet(io::CodedInputStream* input, + ExtensionFinder* extension_finder, + MessageSetFieldSkipper* field_skipper); + + // Specific versions for lite or full messages (constructs the appropriate + // FieldSkipper automatically). + bool ParseMessageSet(io::CodedInputStream* input, + const MessageLite* containing_type); + bool ParseMessageSet(io::CodedInputStream* input, + const Message* containing_type, + UnknownFieldSet* unknown_fields); + + // Write all extension fields with field numbers in the range + // [start_field_number, end_field_number) + // to the output stream, using the cached sizes computed when ByteSize() was + // last called. Note that the range bounds are inclusive-exclusive. + void SerializeWithCachedSizes(int start_field_number, + int end_field_number, + io::CodedOutputStream* output) const; + + // Same as SerializeWithCachedSizes, but without any bounds checking. + // The caller must ensure that target has sufficient capacity for the + // serialized extensions. + // + // Returns a pointer past the last written byte. + uint8* InternalSerializeWithCachedSizesToArray(int start_field_number, + int end_field_number, + bool deterministic, + uint8* target) const; + + // Like above but serializes in MessageSet format. + void SerializeMessageSetWithCachedSizes(io::CodedOutputStream* output) const; + uint8* InternalSerializeMessageSetWithCachedSizesToArray(bool deterministic, + uint8* target) const; + + // For backward-compatibility, versions of two of the above methods that + // serialize deterministically iff SetDefaultSerializationDeterministic() + // has been called. + uint8* SerializeWithCachedSizesToArray(int start_field_number, + int end_field_number, + uint8* target) const; + uint8* SerializeMessageSetWithCachedSizesToArray(uint8* target) const; + + // Returns the total serialized size of all the extensions. + size_t ByteSize() const; + + // Like ByteSize() but uses MessageSet format. + size_t MessageSetByteSize() const; + + // Returns (an estimate of) the total number of bytes used for storing the + // extensions in memory, excluding sizeof(*this). If the ExtensionSet is + // for a lite message (and thus possibly contains lite messages), the results + // are undefined (might work, might crash, might corrupt data, might not even + // be linked in). It's up to the protocol compiler to avoid calling this on + // such ExtensionSets (easy enough since lite messages don't implement + // SpaceUsed()). + size_t SpaceUsedExcludingSelfLong() const; + + // This method just calls SpaceUsedExcludingSelfLong() but it can not be + // inlined because the definition of SpaceUsedExcludingSelfLong() is not + // included in lite runtime and when an inline method refers to it MSVC + // will complain about unresolved symbols when building the lite runtime + // as .dll. + int SpaceUsedExcludingSelf() const; + + private: + + // Interface of a lazily parsed singular message extension. + class LIBPROTOBUF_EXPORT LazyMessageExtension { + public: + LazyMessageExtension() {} + virtual ~LazyMessageExtension() {} + + virtual LazyMessageExtension* New(::google::protobuf::Arena* arena) const = 0; + virtual const MessageLite& GetMessage( + const MessageLite& prototype) const = 0; + virtual MessageLite* MutableMessage(const MessageLite& prototype) = 0; + virtual void SetAllocatedMessage(MessageLite *message) = 0; + virtual void UnsafeArenaSetAllocatedMessage(MessageLite *message) = 0; + virtual MessageLite* ReleaseMessage(const MessageLite& prototype) = 0; + virtual MessageLite* UnsafeArenaReleaseMessage( + const MessageLite& prototype) = 0; + + virtual bool IsInitialized() const = 0; + + PROTOBUF_RUNTIME_DEPRECATED("Please use ByteSizeLong() instead") + virtual int ByteSize() const { + return internal::ToIntSize(ByteSizeLong()); + } + virtual size_t ByteSizeLong() const = 0; + virtual size_t SpaceUsedLong() const = 0; + + virtual void MergeFrom(const LazyMessageExtension& other) = 0; + virtual void Clear() = 0; + + virtual bool ReadMessage(const MessageLite& prototype, + io::CodedInputStream* input) = 0; + virtual void WriteMessage(int number, + io::CodedOutputStream* output) const = 0; + virtual uint8* WriteMessageToArray(int number, uint8* target) const = 0; + virtual uint8* InternalWriteMessageToArray(int number, bool, + uint8* target) const { + // TODO(gpike): make this pure virtual. This is a placeholder because we + // need to update third_party/upb, for example. + return WriteMessageToArray(number, target); + } + + private: + virtual void UnusedKeyMethod(); // Dummy key method to avoid weak vtable. + + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(LazyMessageExtension); + }; + struct Extension { + // The order of these fields packs Extension into 24 bytes when using 8 + // byte alignment. Consider this when adding or removing fields here. + union { + int32 int32_value; + int64 int64_value; + uint32 uint32_value; + uint64 uint64_value; + float float_value; + double double_value; + bool bool_value; + int enum_value; + string* string_value; + MessageLite* message_value; + LazyMessageExtension* lazymessage_value; + + RepeatedField * repeated_int32_value; + RepeatedField * repeated_int64_value; + RepeatedField * repeated_uint32_value; + RepeatedField * repeated_uint64_value; + RepeatedField * repeated_float_value; + RepeatedField * repeated_double_value; + RepeatedField * repeated_bool_value; + RepeatedField * repeated_enum_value; + RepeatedPtrField* repeated_string_value; + RepeatedPtrField* repeated_message_value; + }; + + FieldType type; + bool is_repeated; + + // For singular types, indicates if the extension is "cleared". This + // happens when an extension is set and then later cleared by the caller. + // We want to keep the Extension object around for reuse, so instead of + // removing it from the map, we just set is_cleared = true. This has no + // meaning for repeated types; for those, the size of the RepeatedField + // simply becomes zero when cleared. + bool is_cleared : 4; + + // For singular message types, indicates whether lazy parsing is enabled + // for this extension. This field is only valid when type == TYPE_MESSAGE + // and !is_repeated because we only support lazy parsing for singular + // message types currently. If is_lazy = true, the extension is stored in + // lazymessage_value. Otherwise, the extension will be message_value. + bool is_lazy : 4; + + // For repeated types, this indicates if the [packed=true] option is set. + bool is_packed; + + // For packed fields, the size of the packed data is recorded here when + // ByteSize() is called then used during serialization. + // TODO(kenton): Use atomic when C++ supports it. + mutable int cached_size; + + // The descriptor for this extension, if one exists and is known. May be + // NULL. Must not be NULL if the descriptor for the extension does not + // live in the same pool as the descriptor for the containing type. + const FieldDescriptor* descriptor; + + // Some helper methods for operations on a single Extension. + void SerializeFieldWithCachedSizes( + int number, + io::CodedOutputStream* output) const; + uint8* InternalSerializeFieldWithCachedSizesToArray( + int number, + bool deterministic, + uint8* target) const; + void SerializeMessageSetItemWithCachedSizes( + int number, + io::CodedOutputStream* output) const; + uint8* InternalSerializeMessageSetItemWithCachedSizesToArray( + int number, + bool deterministic, + uint8* target) const; + size_t ByteSize(int number) const; + size_t MessageSetItemByteSize(int number) const; + void Clear(); + int GetSize() const; + void Free(); + size_t SpaceUsedExcludingSelfLong() const; + bool IsInitialized() const; + }; + + // The Extension struct is small enough to be passed by value, so we use it + // directly as the value type in mappings rather than use pointers. We use + // sorted maps rather than hash-maps because we expect most ExtensionSets will + // only contain a small number of extension. Also, we want AppendToList and + // deterministic serialization to order fields by field number. + + struct KeyValue { + int first; + Extension second; + + struct FirstComparator { + bool operator()(const KeyValue& lhs, const KeyValue& rhs) const { + return lhs.first < rhs.first; + } + bool operator()(const KeyValue& lhs, int key) const { + return lhs.first < key; + } + bool operator()(int key, const KeyValue& rhs) const { + return key < rhs.first; + } + }; + }; + + typedef std::map LargeMap; + + // Wrapper API that switches between flat-map and LargeMap. + + // Finds a key (if present) in the ExtensionSet. + const Extension* FindOrNull(int key) const; + Extension* FindOrNull(int key); + + // Helper-functions that only inspect the LargeMap. + const Extension* FindOrNullInLargeMap(int key) const; + Extension* FindOrNullInLargeMap(int key); + + // Inserts a new (key, Extension) into the ExtensionSet (and returns true), or + // finds the already-existing Extension for that key (returns false). + // The Extension* will point to the new-or-found Extension. + std::pair Insert(int key); + + // Grows the flat_capacity_. + // If flat_capacity_ > kMaximumFlatCapacity, converts to LargeMap. + void GrowCapacity(size_t minimum_new_capacity); + static constexpr uint16 kMaximumFlatCapacity = 256; + bool is_large() const { return flat_capacity_ > kMaximumFlatCapacity; } + + // Removes a key from the ExtensionSet. + void Erase(int key); + + size_t Size() const { + return GOOGLE_PREDICT_FALSE(is_large()) ? map_.large->size() : flat_size_; + } + + // Similar to std::for_each. + // Each Iterator is decomposed into ->first and ->second fields, so + // that the KeyValueFunctor can be agnostic vis-a-vis KeyValue-vs-std::pair. + template + static KeyValueFunctor ForEach(Iterator begin, Iterator end, + KeyValueFunctor func) { + for (Iterator it = begin; it != end; ++it) func(it->first, it->second); + return std::move(func); + } + + // Applies a functor to the pairs in sorted order. + template + KeyValueFunctor ForEach(KeyValueFunctor func) { + if (GOOGLE_PREDICT_FALSE(is_large())) { + return ForEach(map_.large->begin(), map_.large->end(), std::move(func)); + } + return ForEach(flat_begin(), flat_end(), std::move(func)); + } + + // Applies a functor to the pairs in sorted order. + template + KeyValueFunctor ForEach(KeyValueFunctor func) const { + if (GOOGLE_PREDICT_FALSE(is_large())) { + return ForEach(map_.large->begin(), map_.large->end(), std::move(func)); + } + return ForEach(flat_begin(), flat_end(), std::move(func)); + } + + // Merges existing Extension from other_extension + void InternalExtensionMergeFrom(int number, const Extension& other_extension); + + // Returns true and fills field_number and extension if extension is found. + // Note to support packed repeated field compatibility, it also fills whether + // the tag on wire is packed, which can be different from + // extension->is_packed (whether packed=true is specified). + bool FindExtensionInfoFromTag(uint32 tag, ExtensionFinder* extension_finder, + int* field_number, ExtensionInfo* extension, + bool* was_packed_on_wire); + + // Returns true and fills extension if extension is found. + // Note to support packed repeated field compatibility, it also fills whether + // the tag on wire is packed, which can be different from + // extension->is_packed (whether packed=true is specified). + bool FindExtensionInfoFromFieldNumber(int wire_type, int field_number, + ExtensionFinder* extension_finder, + ExtensionInfo* extension, + bool* was_packed_on_wire); + + // Parses a single extension from the input. The input should start out + // positioned immediately after the wire tag. This method is called in + // ParseField() after field number and was_packed_on_wire is extracted from + // the wire tag and ExtensionInfo is found by the field number. + bool ParseFieldWithExtensionInfo(int field_number, + bool was_packed_on_wire, + const ExtensionInfo& extension, + io::CodedInputStream* input, + FieldSkipper* field_skipper); + + // Like ParseField(), but this method may parse singular message extensions + // lazily depending on the value of FLAGS_eagerly_parse_message_sets. + bool ParseFieldMaybeLazily(int wire_type, int field_number, + io::CodedInputStream* input, + ExtensionFinder* extension_finder, + MessageSetFieldSkipper* field_skipper); + + // Gets the extension with the given number, creating it if it does not + // already exist. Returns true if the extension did not already exist. + bool MaybeNewExtension(int number, const FieldDescriptor* descriptor, + Extension** result); + + // Gets the repeated extension for the given descriptor, creating it if + // it does not exist. + Extension* MaybeNewRepeatedExtension(const FieldDescriptor* descriptor); + + // Parse a single MessageSet item -- called just after the item group start + // tag has been read. + bool ParseMessageSetItem(io::CodedInputStream* input, + ExtensionFinder* extension_finder, + MessageSetFieldSkipper* field_skipper); + + // Hack: RepeatedPtrFieldBase declares ExtensionSet as a friend. This + // friendship should automatically extend to ExtensionSet::Extension, but + // unfortunately some older compilers (e.g. GCC 3.4.4) do not implement this + // correctly. So, we must provide helpers for calling methods of that + // class. + + // Defined in extension_set_heavy.cc. + static inline size_t RepeatedMessage_SpaceUsedExcludingSelfLong( + RepeatedPtrFieldBase* field); + + KeyValue* flat_begin() { + assert(!is_large()); + return map_.flat; + } + const KeyValue* flat_begin() const { + assert(!is_large()); + return map_.flat; + } + KeyValue* flat_end() { + assert(!is_large()); + return map_.flat + flat_size_; + } + const KeyValue* flat_end() const { + assert(!is_large()); + return map_.flat + flat_size_; + } + + ::google::protobuf::Arena* arena_; + + // Manual memory-management: + // map_.flat is an allocated array of flat_capacity_ elements. + // [map_.flat, map_.flat + flat_size_) is the currently-in-use prefix. + uint16 flat_capacity_; + uint16 flat_size_; + union AllocatedData { + KeyValue* flat; + + // If flat_capacity_ > kMaximumFlatCapacity, switch to LargeMap, + // which guarantees O(n lg n) CPU but larger constant factors. + LargeMap* large; + } map_; + + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ExtensionSet); +}; + +// These are just for convenience... +inline void ExtensionSet::SetString(int number, FieldType type, + const string& value, + const FieldDescriptor* descriptor) { + MutableString(number, type, descriptor)->assign(value); +} +inline void ExtensionSet::SetRepeatedString(int number, int index, + const string& value) { + MutableRepeatedString(number, index)->assign(value); +} +inline void ExtensionSet::AddString(int number, FieldType type, + const string& value, + const FieldDescriptor* descriptor) { + AddString(number, type, descriptor)->assign(value); +} + +// =================================================================== +// Glue for generated extension accessors + +// ------------------------------------------------------------------- +// Template magic + +// First we have a set of classes representing "type traits" for different +// field types. A type traits class knows how to implement basic accessors +// for extensions of a particular type given an ExtensionSet. The signature +// for a type traits class looks like this: +// +// class TypeTraits { +// public: +// typedef ? ConstType; +// typedef ? MutableType; +// // TypeTraits for singular fields and repeated fields will define the +// // symbol "Singular" or "Repeated" respectively. These two symbols will +// // be used in extension accessors to distinguish between singular +// // extensions and repeated extensions. If the TypeTraits for the passed +// // in extension doesn't have the expected symbol defined, it means the +// // user is passing a repeated extension to a singular accessor, or the +// // opposite. In that case the C++ compiler will generate an error +// // message "no matching member function" to inform the user. +// typedef ? Singular +// typedef ? Repeated +// +// static inline ConstType Get(int number, const ExtensionSet& set); +// static inline void Set(int number, ConstType value, ExtensionSet* set); +// static inline MutableType Mutable(int number, ExtensionSet* set); +// +// // Variants for repeated fields. +// static inline ConstType Get(int number, const ExtensionSet& set, +// int index); +// static inline void Set(int number, int index, +// ConstType value, ExtensionSet* set); +// static inline MutableType Mutable(int number, int index, +// ExtensionSet* set); +// static inline void Add(int number, ConstType value, ExtensionSet* set); +// static inline MutableType Add(int number, ExtensionSet* set); +// This is used by the ExtensionIdentifier constructor to register +// the extension at dynamic initialization. +// template +// static void Register(int number, FieldType type, bool is_packed); +// }; +// +// Not all of these methods make sense for all field types. For example, the +// "Mutable" methods only make sense for strings and messages, and the +// repeated methods only make sense for repeated types. So, each type +// traits class implements only the set of methods from this signature that it +// actually supports. This will cause a compiler error if the user tries to +// access an extension using a method that doesn't make sense for its type. +// For example, if "foo" is an extension of type "optional int32", then if you +// try to write code like: +// my_message.MutableExtension(foo) +// you will get a compile error because PrimitiveTypeTraits does not +// have a "Mutable()" method. + +// ------------------------------------------------------------------- +// PrimitiveTypeTraits + +// Since the ExtensionSet has different methods for each primitive type, +// we must explicitly define the methods of the type traits class for each +// known type. +template +class PrimitiveTypeTraits { + public: + typedef Type ConstType; + typedef Type MutableType; + typedef PrimitiveTypeTraits Singular; + + static inline ConstType Get(int number, const ExtensionSet& set, + ConstType default_value); + static inline void Set(int number, FieldType field_type, + ConstType value, ExtensionSet* set); + template + static void Register(int number, FieldType type, bool is_packed) { + ExtensionSet::RegisterExtension(&ExtendeeT::default_instance(), number, + type, false, is_packed); + } +}; + +template +class RepeatedPrimitiveTypeTraits { + public: + typedef Type ConstType; + typedef Type MutableType; + typedef RepeatedPrimitiveTypeTraits Repeated; + + typedef RepeatedField RepeatedFieldType; + + static inline Type Get(int number, const ExtensionSet& set, int index); + static inline void Set(int number, int index, Type value, ExtensionSet* set); + static inline void Add(int number, FieldType field_type, + bool is_packed, Type value, ExtensionSet* set); + + static inline const RepeatedField& + GetRepeated(int number, const ExtensionSet& set); + static inline RepeatedField* + MutableRepeated(int number, FieldType field_type, + bool is_packed, ExtensionSet* set); + + static const RepeatedFieldType* GetDefaultRepeatedField(); + template + static void Register(int number, FieldType type, bool is_packed) { + ExtensionSet::RegisterExtension(&ExtendeeT::default_instance(), number, + type, true, is_packed); + } +}; + +LIBPROTOBUF_EXPORT extern ProtobufOnceType repeated_primitive_generic_type_traits_once_init_; + +class LIBPROTOBUF_EXPORT RepeatedPrimitiveDefaults { + private: + template friend class RepeatedPrimitiveTypeTraits; + static const RepeatedPrimitiveDefaults* default_instance(); + RepeatedField default_repeated_field_int32_; + RepeatedField default_repeated_field_int64_; + RepeatedField default_repeated_field_uint32_; + RepeatedField default_repeated_field_uint64_; + RepeatedField default_repeated_field_double_; + RepeatedField default_repeated_field_float_; + RepeatedField default_repeated_field_bool_; +}; + +#define PROTOBUF_DEFINE_PRIMITIVE_TYPE(TYPE, METHOD) \ +template<> inline TYPE PrimitiveTypeTraits::Get( \ + int number, const ExtensionSet& set, TYPE default_value) { \ + return set.Get##METHOD(number, default_value); \ +} \ +template<> inline void PrimitiveTypeTraits::Set( \ + int number, FieldType field_type, TYPE value, ExtensionSet* set) { \ + set->Set##METHOD(number, field_type, value, NULL); \ +} \ + \ +template<> inline TYPE RepeatedPrimitiveTypeTraits::Get( \ + int number, const ExtensionSet& set, int index) { \ + return set.GetRepeated##METHOD(number, index); \ +} \ +template<> inline void RepeatedPrimitiveTypeTraits::Set( \ + int number, int index, TYPE value, ExtensionSet* set) { \ + set->SetRepeated##METHOD(number, index, value); \ +} \ +template<> inline void RepeatedPrimitiveTypeTraits::Add( \ + int number, FieldType field_type, bool is_packed, \ + TYPE value, ExtensionSet* set) { \ + set->Add##METHOD(number, field_type, is_packed, value, NULL); \ +} \ +template<> inline const RepeatedField* \ + RepeatedPrimitiveTypeTraits::GetDefaultRepeatedField() { \ + return &RepeatedPrimitiveDefaults::default_instance() \ + ->default_repeated_field_##TYPE##_; \ +} \ +template<> inline const RepeatedField& \ + RepeatedPrimitiveTypeTraits::GetRepeated(int number, \ + const ExtensionSet& set) { \ + return *reinterpret_cast*>( \ + set.GetRawRepeatedField( \ + number, GetDefaultRepeatedField())); \ +} \ +template<> inline RepeatedField* \ + RepeatedPrimitiveTypeTraits::MutableRepeated(int number, \ + FieldType field_type, \ + bool is_packed, \ + ExtensionSet* set) { \ + return reinterpret_cast*>( \ + set->MutableRawRepeatedField(number, field_type, is_packed, NULL)); \ +} + +PROTOBUF_DEFINE_PRIMITIVE_TYPE( int32, Int32) +PROTOBUF_DEFINE_PRIMITIVE_TYPE( int64, Int64) +PROTOBUF_DEFINE_PRIMITIVE_TYPE(uint32, UInt32) +PROTOBUF_DEFINE_PRIMITIVE_TYPE(uint64, UInt64) +PROTOBUF_DEFINE_PRIMITIVE_TYPE( float, Float) +PROTOBUF_DEFINE_PRIMITIVE_TYPE(double, Double) +PROTOBUF_DEFINE_PRIMITIVE_TYPE( bool, Bool) + +#undef PROTOBUF_DEFINE_PRIMITIVE_TYPE + +// ------------------------------------------------------------------- +// StringTypeTraits + +// Strings support both Set() and Mutable(). +class LIBPROTOBUF_EXPORT StringTypeTraits { + public: + typedef const string& ConstType; + typedef string* MutableType; + typedef StringTypeTraits Singular; + + static inline const string& Get(int number, const ExtensionSet& set, + ConstType default_value) { + return set.GetString(number, default_value); + } + static inline void Set(int number, FieldType field_type, + const string& value, ExtensionSet* set) { + set->SetString(number, field_type, value, NULL); + } + static inline string* Mutable(int number, FieldType field_type, + ExtensionSet* set) { + return set->MutableString(number, field_type, NULL); + } + template + static void Register(int number, FieldType type, bool is_packed) { + ExtensionSet::RegisterExtension(&ExtendeeT::default_instance(), number, + type, false, is_packed); + } +}; + +class LIBPROTOBUF_EXPORT RepeatedStringTypeTraits { + public: + typedef const string& ConstType; + typedef string* MutableType; + typedef RepeatedStringTypeTraits Repeated; + + typedef RepeatedPtrField RepeatedFieldType; + + static inline const string& Get(int number, const ExtensionSet& set, + int index) { + return set.GetRepeatedString(number, index); + } + static inline void Set(int number, int index, + const string& value, ExtensionSet* set) { + set->SetRepeatedString(number, index, value); + } + static inline string* Mutable(int number, int index, ExtensionSet* set) { + return set->MutableRepeatedString(number, index); + } + static inline void Add(int number, FieldType field_type, + bool /*is_packed*/, const string& value, + ExtensionSet* set) { + set->AddString(number, field_type, value, NULL); + } + static inline string* Add(int number, FieldType field_type, + ExtensionSet* set) { + return set->AddString(number, field_type, NULL); + } + static inline const RepeatedPtrField& + GetRepeated(int number, const ExtensionSet& set) { + return *reinterpret_cast*>( + set.GetRawRepeatedField(number, GetDefaultRepeatedField())); + } + + static inline RepeatedPtrField* + MutableRepeated(int number, FieldType field_type, + bool is_packed, ExtensionSet* set) { + return reinterpret_cast*>( + set->MutableRawRepeatedField(number, field_type, + is_packed, NULL)); + } + + static const RepeatedFieldType* GetDefaultRepeatedField(); + + template + static void Register(int number, FieldType type, bool is_packed) { + ExtensionSet::RegisterExtension(&ExtendeeT::default_instance(), number, + type, true, is_packed); + } + + private: + static void InitializeDefaultRepeatedFields(); + static void DestroyDefaultRepeatedFields(); +}; + +// ------------------------------------------------------------------- +// EnumTypeTraits + +// ExtensionSet represents enums using integers internally, so we have to +// static_cast around. +template +class EnumTypeTraits { + public: + typedef Type ConstType; + typedef Type MutableType; + typedef EnumTypeTraits Singular; + + static inline ConstType Get(int number, const ExtensionSet& set, + ConstType default_value) { + return static_cast(set.GetEnum(number, default_value)); + } + static inline void Set(int number, FieldType field_type, + ConstType value, ExtensionSet* set) { + GOOGLE_DCHECK(IsValid(value)); + set->SetEnum(number, field_type, value, NULL); + } + template + static void Register(int number, FieldType type, bool is_packed) { + ExtensionSet::RegisterEnumExtension(&ExtendeeT::default_instance(), number, + type, false, is_packed, IsValid); + } +}; + +template +class RepeatedEnumTypeTraits { + public: + typedef Type ConstType; + typedef Type MutableType; + typedef RepeatedEnumTypeTraits Repeated; + + typedef RepeatedField RepeatedFieldType; + + static inline ConstType Get(int number, const ExtensionSet& set, int index) { + return static_cast(set.GetRepeatedEnum(number, index)); + } + static inline void Set(int number, int index, + ConstType value, ExtensionSet* set) { + GOOGLE_DCHECK(IsValid(value)); + set->SetRepeatedEnum(number, index, value); + } + static inline void Add(int number, FieldType field_type, + bool is_packed, ConstType value, ExtensionSet* set) { + GOOGLE_DCHECK(IsValid(value)); + set->AddEnum(number, field_type, is_packed, value, NULL); + } + static inline const RepeatedField& GetRepeated(int number, + const ExtensionSet& + set) { + // Hack: the `Extension` struct stores a RepeatedField for enums. + // RepeatedField cannot implicitly convert to RepeatedField + // so we need to do some casting magic. See message.h for similar + // contortions for non-extension fields. + return *reinterpret_cast*>( + set.GetRawRepeatedField(number, GetDefaultRepeatedField())); + } + + static inline RepeatedField* MutableRepeated(int number, + FieldType field_type, + bool is_packed, + ExtensionSet* set) { + return reinterpret_cast*>( + set->MutableRawRepeatedField(number, field_type, is_packed, NULL)); + } + + static const RepeatedFieldType* GetDefaultRepeatedField() { + // Hack: as noted above, repeated enum fields are internally stored as a + // RepeatedField. We need to be able to instantiate global static + // objects to return as default (empty) repeated fields on non-existent + // extensions. We would not be able to know a-priori all of the enum types + // (values of |Type|) to instantiate all of these, so we just re-use int32's + // default repeated field object. + return reinterpret_cast*>( + RepeatedPrimitiveTypeTraits::GetDefaultRepeatedField()); + } + template + static void Register(int number, FieldType type, bool is_packed) { + ExtensionSet::RegisterEnumExtension(&ExtendeeT::default_instance(), number, + type, true, is_packed, IsValid); + } +}; + +// ------------------------------------------------------------------- +// MessageTypeTraits + +// ExtensionSet guarantees that when manipulating extensions with message +// types, the implementation used will be the compiled-in class representing +// that type. So, we can static_cast down to the exact type we expect. +template +class MessageTypeTraits { + public: + typedef const Type& ConstType; + typedef Type* MutableType; + typedef MessageTypeTraits Singular; + + static inline ConstType Get(int number, const ExtensionSet& set, + ConstType default_value) { + return static_cast( + set.GetMessage(number, default_value)); + } + static inline MutableType Mutable(int number, FieldType field_type, + ExtensionSet* set) { + return static_cast( + set->MutableMessage(number, field_type, Type::default_instance(), NULL)); + } + static inline void SetAllocated(int number, FieldType field_type, + MutableType message, ExtensionSet* set) { + set->SetAllocatedMessage(number, field_type, NULL, message); + } + static inline void UnsafeArenaSetAllocated(int number, FieldType field_type, + MutableType message, + ExtensionSet* set) { + set->UnsafeArenaSetAllocatedMessage(number, field_type, NULL, message); + } + static inline MutableType Release(int number, FieldType /* field_type */, + ExtensionSet* set) { + return static_cast(set->ReleaseMessage( + number, Type::default_instance())); + } + static inline MutableType UnsafeArenaRelease(int number, + FieldType /* field_type */, + ExtensionSet* set) { + return static_cast(set->UnsafeArenaReleaseMessage( + number, Type::default_instance())); + } + template + static void Register(int number, FieldType type, bool is_packed) { + ExtensionSet::RegisterMessageExtension(&ExtendeeT::default_instance(), + number, type, false, is_packed, + &Type::default_instance()); + } +}; + +// forward declaration +class RepeatedMessageGenericTypeTraits; + +template +class RepeatedMessageTypeTraits { + public: + typedef const Type& ConstType; + typedef Type* MutableType; + typedef RepeatedMessageTypeTraits Repeated; + + typedef RepeatedPtrField RepeatedFieldType; + + static inline ConstType Get(int number, const ExtensionSet& set, int index) { + return static_cast(set.GetRepeatedMessage(number, index)); + } + static inline MutableType Mutable(int number, int index, ExtensionSet* set) { + return static_cast(set->MutableRepeatedMessage(number, index)); + } + static inline MutableType Add(int number, FieldType field_type, + ExtensionSet* set) { + return static_cast( + set->AddMessage(number, field_type, Type::default_instance(), NULL)); + } + static inline const RepeatedPtrField& GetRepeated(int number, + const ExtensionSet& + set) { + // See notes above in RepeatedEnumTypeTraits::GetRepeated(): same + // casting hack applies here, because a RepeatedPtrField + // cannot naturally become a RepeatedPtrType even though Type is + // presumably a message. google::protobuf::Message goes through similar contortions + // with a reinterpret_cast<>. + return *reinterpret_cast*>( + set.GetRawRepeatedField(number, GetDefaultRepeatedField())); + } + static inline RepeatedPtrField* MutableRepeated(int number, + FieldType field_type, + bool is_packed, + ExtensionSet* set) { + return reinterpret_cast*>( + set->MutableRawRepeatedField(number, field_type, is_packed, NULL)); + } + + static const RepeatedFieldType* GetDefaultRepeatedField(); + template + static void Register(int number, FieldType type, bool is_packed) { + ExtensionSet::RegisterMessageExtension(&ExtendeeT::default_instance(), + number, type, true, is_packed, + &Type::default_instance()); + } +}; + +template inline + const typename RepeatedMessageTypeTraits::RepeatedFieldType* + RepeatedMessageTypeTraits::GetDefaultRepeatedField() { + static auto instance = OnShutdownDelete(new RepeatedFieldType); + return instance; +} + +// ------------------------------------------------------------------- +// ExtensionIdentifier + +// This is the type of actual extension objects. E.g. if you have: +// extends Foo with optional int32 bar = 1234; +// then "bar" will be defined in C++ as: +// ExtensionIdentifier, 1, false> bar(1234); +// +// Note that we could, in theory, supply the field number as a template +// parameter, and thus make an instance of ExtensionIdentifier have no +// actual contents. However, if we did that, then using at extension +// identifier would not necessarily cause the compiler to output any sort +// of reference to any symbol defined in the extension's .pb.o file. Some +// linkers will actually drop object files that are not explicitly referenced, +// but that would be bad because it would cause this extension to not be +// registered at static initialization, and therefore using it would crash. + +template +class ExtensionIdentifier { + public: + typedef TypeTraitsType TypeTraits; + typedef ExtendeeType Extendee; + + ExtensionIdentifier(int number, typename TypeTraits::ConstType default_value) + : number_(number), default_value_(default_value) { + Register(number); + } + inline int number() const { return number_; } + typename TypeTraits::ConstType default_value() const { + return default_value_; + } + + static void Register(int number) { + TypeTraits::template Register(number, field_type, is_packed); + } + + private: + const int number_; + typename TypeTraits::ConstType default_value_; +}; + +// ------------------------------------------------------------------- +// Generated accessors + +// This macro should be expanded in the context of a generated type which +// has extensions. +// +// We use "_proto_TypeTraits" as a type name below because "TypeTraits" +// causes problems if the class has a nested message or enum type with that +// name and "_TypeTraits" is technically reserved for the C++ library since +// it starts with an underscore followed by a capital letter. +// +// For similar reason, we use "_field_type" and "_is_packed" as parameter names +// below, so that "field_type" and "is_packed" can be used as field names. +#define GOOGLE_PROTOBUF_EXTENSION_ACCESSORS(CLASSNAME) \ + /* Has, Size, Clear */ \ + template \ + inline bool HasExtension( \ + const ::google::protobuf::internal::ExtensionIdentifier< \ + CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id) const { \ + return _extensions_.Has(id.number()); \ + } \ + \ + template \ + inline void ClearExtension( \ + const ::google::protobuf::internal::ExtensionIdentifier< \ + CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id) { \ + _extensions_.ClearExtension(id.number()); \ + } \ + \ + template \ + inline int ExtensionSize( \ + const ::google::protobuf::internal::ExtensionIdentifier< \ + CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id) const { \ + return _extensions_.ExtensionSize(id.number()); \ + } \ + \ + /* Singular accessors */ \ + template \ + inline typename _proto_TypeTraits::Singular::ConstType GetExtension( \ + const ::google::protobuf::internal::ExtensionIdentifier< \ + CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id) const { \ + return _proto_TypeTraits::Get(id.number(), _extensions_, \ + id.default_value()); \ + } \ + \ + template \ + inline typename _proto_TypeTraits::Singular::MutableType MutableExtension( \ + const ::google::protobuf::internal::ExtensionIdentifier< \ + CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id) { \ + return _proto_TypeTraits::Mutable(id.number(), _field_type, \ + &_extensions_); \ + } \ + \ + template \ + inline void SetExtension( \ + const ::google::protobuf::internal::ExtensionIdentifier< \ + CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id, \ + typename _proto_TypeTraits::Singular::ConstType value) { \ + _proto_TypeTraits::Set(id.number(), _field_type, value, &_extensions_); \ + } \ + \ + template \ + inline void SetAllocatedExtension( \ + const ::google::protobuf::internal::ExtensionIdentifier< \ + CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id, \ + typename _proto_TypeTraits::Singular::MutableType value) { \ + _proto_TypeTraits::SetAllocated(id.number(), _field_type, \ + value, &_extensions_); \ + } \ + template \ + inline void UnsafeArenaSetAllocatedExtension( \ + const ::google::protobuf::internal::ExtensionIdentifier< \ + CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id, \ + typename _proto_TypeTraits::Singular::MutableType value) { \ + _proto_TypeTraits::UnsafeArenaSetAllocated(id.number(), _field_type, \ + value, &_extensions_); \ + } \ + template \ + inline typename _proto_TypeTraits::Singular::MutableType ReleaseExtension( \ + const ::google::protobuf::internal::ExtensionIdentifier< \ + CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id) { \ + return _proto_TypeTraits::Release(id.number(), _field_type, \ + &_extensions_); \ + } \ + template \ + inline typename _proto_TypeTraits::Singular::MutableType \ + UnsafeArenaReleaseExtension( \ + const ::google::protobuf::internal::ExtensionIdentifier< \ + CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id) { \ + return _proto_TypeTraits::UnsafeArenaRelease(id.number(), _field_type, \ + &_extensions_); \ + } \ + \ + /* Repeated accessors */ \ + template \ + inline typename _proto_TypeTraits::Repeated::ConstType GetExtension( \ + const ::google::protobuf::internal::ExtensionIdentifier< \ + CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id, \ + int index) const { \ + return _proto_TypeTraits::Get(id.number(), _extensions_, index); \ + } \ + \ + template \ + inline typename _proto_TypeTraits::Repeated::MutableType MutableExtension( \ + const ::google::protobuf::internal::ExtensionIdentifier< \ + CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id, \ + int index) { \ + return _proto_TypeTraits::Mutable(id.number(), index, &_extensions_); \ + } \ + \ + template \ + inline void SetExtension( \ + const ::google::protobuf::internal::ExtensionIdentifier< \ + CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id, \ + int index, typename _proto_TypeTraits::Repeated::ConstType value) { \ + _proto_TypeTraits::Set(id.number(), index, value, &_extensions_); \ + } \ + \ + template \ + inline typename _proto_TypeTraits::Repeated::MutableType AddExtension( \ + const ::google::protobuf::internal::ExtensionIdentifier< \ + CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id) { \ + return _proto_TypeTraits::Add(id.number(), _field_type, &_extensions_); \ + } \ + \ + template \ + inline void AddExtension( \ + const ::google::protobuf::internal::ExtensionIdentifier< \ + CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id, \ + typename _proto_TypeTraits::Repeated::ConstType value) { \ + _proto_TypeTraits::Add(id.number(), _field_type, _is_packed, \ + value, &_extensions_); \ + } \ + \ + template \ + inline const typename _proto_TypeTraits::Repeated::RepeatedFieldType& \ + GetRepeatedExtension( \ + const ::google::protobuf::internal::ExtensionIdentifier< \ + CLASSNAME, _proto_TypeTraits, _field_type, \ + _is_packed>& id) const { \ + return _proto_TypeTraits::GetRepeated(id.number(), _extensions_); \ + } \ + \ + template \ + inline typename _proto_TypeTraits::Repeated::RepeatedFieldType* \ + MutableRepeatedExtension( \ + const ::google::protobuf::internal::ExtensionIdentifier< \ + CLASSNAME, _proto_TypeTraits, _field_type, \ + _is_packed>& id) { \ + return _proto_TypeTraits::MutableRepeated(id.number(), _field_type, \ + _is_packed, &_extensions_); \ + } + +} // namespace internal +} // namespace protobuf + +} // namespace google +#endif // GOOGLE_PROTOBUF_EXTENSION_SET_H__ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/generated_enum_util.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/generated_enum_util.h new file mode 100644 index 0000000000000000000000000000000000000000..96b03cc9164f4a02456b9a4ddba90f4a917af35c --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/generated_enum_util.h @@ -0,0 +1,46 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef GOOGLE_PROTOBUF_GENERATED_ENUM_UTIL_H__ +#define GOOGLE_PROTOBUF_GENERATED_ENUM_UTIL_H__ + +#include + +namespace google { +namespace protobuf { + +// This type trait can be used to cause templates to only match proto2 enum +// types. +template struct is_proto_enum : ::std::false_type {}; + +} // namespace protobuf + +} // namespace google +#endif // GOOGLE_PROTOBUF_GENERATED_ENUM_UTIL_H__ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/generated_message_table_driven.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/generated_message_table_driven.h new file mode 100644 index 0000000000000000000000000000000000000000..10ca3aaa36490e9ac4e46023655b2659bf49e405 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/generated_message_table_driven.h @@ -0,0 +1,200 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef GOOGLE_PROTOBUF_GENERATED_MESSAGE_TABLE_DRIVEN_H__ +#define GOOGLE_PROTOBUF_GENERATED_MESSAGE_TABLE_DRIVEN_H__ + +#include +#include +#include +#include +#include +#include + +// We require C++11 and Clang to use constexpr for variables, as GCC 4.8 +// requires constexpr to be consistent between declarations of variables +// unnecessarily (see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58541). +// VS 2017 Update 3 also supports this usage of constexpr. +#if defined(__clang__) || (defined(_MSC_VER) && _MSC_VER >= 1911) +#define PROTOBUF_CONSTEXPR_VAR constexpr +#else // !__clang__ +#define PROTOBUF_CONSTEXPR_VAR +#endif // !_clang + +namespace google { +namespace protobuf { +namespace internal { + +// Processing-type masks. +static constexpr const unsigned char kOneofMask = 0x40; +static constexpr const unsigned char kRepeatedMask = 0x20; +// Mask for the raw type: either a WireFormatLite::FieldType or one of the +// ProcessingTypes below, without the oneof or repeated flag. +static constexpr const unsigned char kTypeMask = 0x1f; + +// Wire type masks. +static constexpr const unsigned char kNotPackedMask = 0x10; +static constexpr const unsigned char kInvalidMask = 0x20; + +enum ProcessingTypes { + TYPE_STRING_INLINED = 23, + TYPE_BYTES_INLINED = 24, + TYPE_MAP = 25, +}; + +static_assert(TYPE_MAP < kRepeatedMask, "Invalid enum"); + +// TODO(ckennelly): Add a static assertion to ensure that these masks do not +// conflict with wiretypes. + +// ParseTableField is kept small to help simplify instructions for computing +// offsets, as we will always need this information to parse a field. +// Additional data, needed for some types, is stored in +// AuxillaryParseTableField. +struct ParseTableField { + uint32 offset; + // The presence_index ordinarily represents a has_bit index, but for fields + // inside a oneof it represents the index in _oneof_case_. + uint32 presence_index; + unsigned char normal_wiretype; + unsigned char packed_wiretype; + + // processing_type is given by: + // (FieldDescriptor->type() << 1) | FieldDescriptor->is_packed() + unsigned char processing_type; + + unsigned char tag_size; +}; + +struct ParseTable; + +union AuxillaryParseTableField { + typedef bool (*EnumValidator)(int); + + // Enums + struct enum_aux { + EnumValidator validator; + }; + enum_aux enums; + // Group, messages + struct message_aux { + // ExplicitlyInitialized -> T requires a reinterpret_cast, which prevents + // the tables from being constructed as a constexpr. We use void to avoid + // the cast. + const void* default_message_void; + const MessageLite* default_message() const { + return static_cast(default_message_void); + } + }; + message_aux messages; + // Strings + struct string_aux { + const void* default_ptr; + const char* field_name; + }; + string_aux strings; + + struct map_aux { + bool (*parse_map)(io::CodedInputStream*, void*); + }; + map_aux maps; + + AuxillaryParseTableField() = default; + constexpr AuxillaryParseTableField(AuxillaryParseTableField::enum_aux e) + : enums(e) {} + constexpr AuxillaryParseTableField(AuxillaryParseTableField::message_aux m) + : messages(m) {} + constexpr AuxillaryParseTableField(AuxillaryParseTableField::string_aux s) + : strings(s) {} + constexpr AuxillaryParseTableField(AuxillaryParseTableField::map_aux m) + : maps(m) {} +}; + +struct ParseTable { + const ParseTableField* fields; + const AuxillaryParseTableField* aux; + int max_field_number; + // TODO(ckennelly): Do something with this padding. + + // TODO(ckennelly): Vet these for sign extension. + int64 has_bits_offset; + int64 oneof_case_offset; + int64 extension_offset; + int64 arena_offset; + + // ExplicitlyInitialized -> T requires a reinterpret_cast, which prevents + // the tables from being constructed as a constexpr. We use void to avoid + // the cast. + const void* default_instance_void; + const MessageLite* default_instance() const { + return static_cast(default_instance_void); + } + + bool unknown_field_set; +}; + +static_assert(sizeof(ParseTableField) <= 16, "ParseTableField is too large"); +// The tables must be composed of POD components to ensure link-time +// initialization. +static_assert(std::is_pod::value, ""); +static_assert(std::is_pod::value, ""); +static_assert(std::is_pod::value, ""); +static_assert(std::is_pod::value, ""); +static_assert(std::is_pod::value, ""); + +#ifndef __NVCC__ // This assertion currently fails under NVCC. +static_assert(std::is_pod::value, ""); +#endif + +// TODO(ckennelly): Consolidate these implementations into a single one, using +// dynamic dispatch to the appropriate unknown field handler. +bool MergePartialFromCodedStream(MessageLite* msg, const ParseTable& table, + io::CodedInputStream* input); +bool MergePartialFromCodedStreamLite(MessageLite* msg, const ParseTable& table, + io::CodedInputStream* input); + +template +bool ParseMap(io::CodedInputStream* input, void* map_field) { + typedef typename MapEntryToMapField::MapFieldType MapFieldType; + typedef google::protobuf::Map + MapType; + typedef typename Entry::template Parser ParserType; + + ParserType parser(static_cast(map_field)); + return ::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(input, + &parser); +} + +} // namespace internal +} // namespace protobuf + +} // namespace google +#endif // GOOGLE_PROTOBUF_GENERATED_MESSAGE_TABLE_DRIVEN_H__ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/generated_message_table_driven_lite.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/generated_message_table_driven_lite.h new file mode 100644 index 0000000000000000000000000000000000000000..0d90fe33a6241a4f2eb04cb5728eac75009f1c81 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/generated_message_table_driven_lite.h @@ -0,0 +1,873 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef GOOGLE_PROTOBUF_GENERATED_MESSAGE_TABLE_DRIVEN_LITE_H__ +#define GOOGLE_PROTOBUF_GENERATED_MESSAGE_TABLE_DRIVEN_LITE_H__ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace google { +namespace protobuf { +namespace internal { + + +enum StringType { + StringType_STRING = 0, + StringType_INLINED = 3 +}; + +// Logically a superset of StringType, consisting of all field types that +// require special initialization. +enum ProcessingType { + ProcessingType_STRING = 0, + ProcessingType_CORD = 1, + ProcessingType_STRING_PIECE = 2, + ProcessingType_INLINED = 3, + ProcessingType_MESSAGE = 4, +}; + +enum Cardinality { + Cardinality_SINGULAR = 0, + Cardinality_REPEATED = 1, + Cardinality_ONEOF = 3 +}; + +template +inline Type* Raw(MessageLite* msg, int64 offset) { + return reinterpret_cast(reinterpret_cast(msg) + offset); +} + +template +inline const Type* Raw(const MessageLite* msg, int64 offset) { + return reinterpret_cast(reinterpret_cast(msg) + + offset); +} + +template +inline Arena* GetArena(MessageLite* msg, int64 arena_offset) { + if (GOOGLE_PREDICT_FALSE(arena_offset == -1)) { + return NULL; + } + + return Raw(msg, arena_offset)->arena(); +} + +inline ExtensionSet* GetExtensionSet(MessageLite* msg, int64 extension_offset) { + if (extension_offset == -1) { + return NULL; + } + + return Raw(msg, extension_offset); +} + +template +inline Type* AddField(MessageLite* msg, int64 offset) { + static_assert(std::is_pod::value || + std::is_same::value, + "Do not assign"); + + google::protobuf::RepeatedField* repeated = + Raw >(msg, offset); + return repeated->Add(); +} + +template <> +inline string* AddField(MessageLite* msg, int64 offset) { + google::protobuf::RepeatedPtrField* repeated = + Raw >(msg, offset); + return repeated->Add(); +} + + +template +inline void AddField(MessageLite* msg, int64 offset, Type value) { + static_assert(std::is_pod::value, + "Do not assign"); + *AddField(msg, offset) = value; +} + +inline void SetBit(uint32* has_bits, uint32 has_bit_index) { + GOOGLE_DCHECK(has_bits != nullptr); + + uint32 mask = static_cast(1u) << (has_bit_index % 32); + has_bits[has_bit_index / 32u] |= mask; +} + +template +inline Type* MutableField(MessageLite* msg, uint32* has_bits, + uint32 has_bit_index, int64 offset) { + SetBit(has_bits, has_bit_index); + return Raw(msg, offset); +} + +template +inline void SetField(MessageLite* msg, uint32* has_bits, uint32 has_bit_index, + int64 offset, Type value) { + static_assert(std::is_pod::value, + "Do not assign"); + *MutableField(msg, has_bits, has_bit_index, offset) = value; +} + +template +inline void SetOneofField(MessageLite* msg, uint32* oneof_case, + uint32 oneof_case_index, int64 offset, + int field_number, Type value) { + oneof_case[oneof_case_index] = field_number; + *Raw(msg, offset) = value; +} + +// Clears a oneof field. The field argument should correspond to the particular +// field that is currently set in the oneof. +inline void ClearOneofField(const ParseTableField& field, Arena* arena, + MessageLite* msg) { + switch (field.processing_type & kTypeMask) { + case WireFormatLite::TYPE_MESSAGE: + if (arena == NULL) { + delete *Raw(msg, field.offset); + } + break; + + case WireFormatLite::TYPE_STRING: + case WireFormatLite::TYPE_BYTES: + Raw(msg, field.offset) + ->Destroy(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), arena); + break; + + case TYPE_STRING_INLINED: + case TYPE_BYTES_INLINED: + Raw(msg, field.offset)->DestroyNoArena(NULL); + break; + + default: + // No cleanup needed. + break; + } +} + +// Clears and reinitializes a oneof field as necessary, in preparation for +// parsing a new value with type field_type and field number field_number. +// +// Note: the oneof_case argument should point directly to the _oneof_case_ +// element corresponding to this particular oneof, not to the beginning of the +// _oneof_case_ array. +template +inline void ResetOneofField(const ParseTable& table, int field_number, + Arena* arena, MessageLite* msg, uint32* oneof_case, + int64 offset, const void* default_ptr) { + if (*oneof_case == field_number) { + // The oneof is already set to the right type, so there is no need to clear + // it. + return; + } + + if (*oneof_case != 0) { + ClearOneofField(table.fields[*oneof_case], arena, msg); + } + *oneof_case = field_number; + + switch (field_type) { + case ProcessingType_STRING: + Raw(msg, offset) + ->UnsafeSetDefault(static_cast(default_ptr)); + break; + case ProcessingType_INLINED: + new (Raw(msg, offset)) + InlinedStringField(*static_cast(default_ptr)); + break; + case ProcessingType_MESSAGE: + MessageLite** submessage = Raw(msg, offset); + const MessageLite* prototype = + table.aux[field_number].messages.default_message(); + *submessage = prototype->New(arena); + break; + } +} + +template +static inline bool HandleString(io::CodedInputStream* input, MessageLite* msg, + Arena* arena, uint32* has_bits, + uint32 has_bit_index, int64 offset, + const void* default_ptr, + const char* field_name) { +#ifdef GOOGLE_PROTOBUF_UTF8_VALIDATION_ENABLED + const char* sdata; + size_t size; +#endif + + switch (ctype) { + case StringType_INLINED: { + InlinedStringField* s; + switch (cardinality) { + case Cardinality_SINGULAR: + // TODO(ckennelly): Is this optimal? + s = MutableField( + msg, has_bits, has_bit_index, offset); + break; + case Cardinality_REPEATED: + s = AddField(msg, offset); + break; + case Cardinality_ONEOF: + s = Raw(msg, offset); + break; + } + GOOGLE_DCHECK(s != nullptr); + ::std::string* value = s->MutableNoArena(NULL); + + if (GOOGLE_PREDICT_FALSE(!WireFormatLite::ReadString(input, value))) { + return false; + } + +#ifdef GOOGLE_PROTOBUF_UTF8_VALIDATION_ENABLED + sdata = value->data(); + size = value->size(); +#endif + break; + } + case StringType_STRING: { + string* value; + switch (cardinality) { + case Cardinality_SINGULAR: + // TODO(ckennelly): Is this optimal? + value = + MutableField(msg, has_bits, has_bit_index, offset) + ->Mutable(static_cast(default_ptr), arena); + break; + case Cardinality_REPEATED: + value = AddField(msg, offset); + break; + case Cardinality_ONEOF: + value = Raw(msg, offset) + ->Mutable(static_cast(default_ptr), arena); + break; + } + GOOGLE_DCHECK(value != nullptr); + + if (GOOGLE_PREDICT_FALSE(!WireFormatLite::ReadString(input, value))) { + return false; + } + +#ifdef GOOGLE_PROTOBUF_UTF8_VALIDATION_ENABLED + sdata = value->data(); + size = value->size(); +#endif + break; + } + } + +#ifdef GOOGLE_PROTOBUF_UTF8_VALIDATION_ENABLED + if (validate) { + WireFormatLite::VerifyUtf8String(sdata, size, WireFormatLite::PARSE, + field_name); + } +#endif + + return true; +} + +template +inline bool HandleEnum(const ParseTable& table, io::CodedInputStream* input, + MessageLite* msg, uint32* presence, + uint32 presence_index, int64 offset, uint32 tag, + int field_number) { + int value; + if (GOOGLE_PREDICT_FALSE( + (!WireFormatLite::ReadPrimitive( + input, &value)))) { + return false; + } + + AuxillaryParseTableField::EnumValidator validator = + table.aux[field_number].enums.validator; + if (validator(value)) { + switch (cardinality) { + case Cardinality_SINGULAR: + SetField(msg, presence, presence_index, offset, value); + break; + case Cardinality_REPEATED: + AddField(msg, offset, value); + break; + case Cardinality_ONEOF: + ClearOneofField(table.fields[presence[presence_index]], + GetArena(msg, table.arena_offset), + msg); + SetOneofField(msg, presence, presence_index, offset, field_number, + value); + break; + } + } else { + UnknownFieldHandler::Varint(msg, table, tag, value); + } + + return true; +} + +// RepeatedMessageTypeHandler allows us to operate on RepeatedPtrField fields +// without instantiating the specific template. +class RepeatedMessageTypeHandler { + public: + typedef MessageLite Type; + typedef MessageLite WeakType; + static Arena* GetArena(Type* t) { return t->GetArena(); } + static void* GetMaybeArenaPointer(Type* t) { + return t->GetMaybeArenaPointer(); + } + static inline Type* NewFromPrototype(const Type* prototype, + Arena* arena = NULL) { + return prototype->New(arena); + } + static void Delete(Type* t, Arena* arena = NULL) { + if (arena == NULL) { + delete t; + } + } +}; + +class MergePartialFromCodedStreamHelper { + public: + static MessageLite* Add(RepeatedPtrFieldBase* field, + const MessageLite* prototype) { + return field->Add( + const_cast(prototype)); + } +}; + +template +bool MergePartialFromCodedStreamImpl(MessageLite* msg, const ParseTable& table, + io::CodedInputStream* input) { + // We require that has_bits are present, as to avoid having to check for them + // for every field. + // + // TODO(ckennelly): Make this a compile-time parameter with templates. + GOOGLE_DCHECK_GE(table.has_bits_offset, 0); + uint32* has_bits = Raw(msg, table.has_bits_offset); + GOOGLE_DCHECK(has_bits != NULL); + + while (true) { + uint32 tag = input->ReadTag(); + + const WireFormatLite::WireType wire_type = + WireFormatLite::GetTagWireType(tag); + const int field_number = WireFormatLite::GetTagFieldNumber(tag); + + if (field_number > table.max_field_number) { + // check for possible extensions + if (UnknownFieldHandler::ParseExtension(msg, table, input, tag)) { + // successfully parsed + continue; + } + + if (GOOGLE_PREDICT_FALSE( + !UnknownFieldHandler::Skip(msg, table, input, tag))) { + return false; + } + + continue; + } + + // We implicitly verify that data points to a valid field as we check the + // wire types. Entries in table.fields[i] that do not correspond to valid + // field numbers have their normal_wiretype and packed_wiretype fields set + // with the kInvalidMask value. As wire_type cannot take on that value, we + // will never match. + const ParseTableField* data = table.fields + field_number; + + // TODO(ckennelly): Avoid sign extension + const int64 presence_index = data->presence_index; + const int64 offset = data->offset; + const unsigned char processing_type = data->processing_type; + + if (data->normal_wiretype == static_cast(wire_type)) { + // TODO(ckennelly): Use a computed goto on GCC/LLVM or otherwise eliminate + // the bounds check on processing_type. + + switch (processing_type) { +#define HANDLE_TYPE(TYPE, CPPTYPE) \ + case (WireFormatLite::TYPE_##TYPE): { \ + CPPTYPE value; \ + if (GOOGLE_PREDICT_FALSE( \ + (!WireFormatLite::ReadPrimitive< \ + CPPTYPE, WireFormatLite::TYPE_##TYPE>(input, &value)))) { \ + return false; \ + } \ + SetField(msg, has_bits, presence_index, offset, value); \ + break; \ + } \ + case (WireFormatLite::TYPE_##TYPE) | kRepeatedMask: { \ + google::protobuf::RepeatedField* values = \ + Raw >(msg, offset); \ + if (GOOGLE_PREDICT_FALSE((!WireFormatLite::ReadRepeatedPrimitive< \ + CPPTYPE, WireFormatLite::TYPE_##TYPE>( \ + data->tag_size, tag, input, values)))) { \ + return false; \ + } \ + break; \ + } \ + case (WireFormatLite::TYPE_##TYPE) | kOneofMask: { \ + uint32* oneof_case = Raw(msg, table.oneof_case_offset); \ + CPPTYPE value; \ + if (GOOGLE_PREDICT_FALSE( \ + (!WireFormatLite::ReadPrimitive< \ + CPPTYPE, WireFormatLite::TYPE_##TYPE>(input, &value)))) { \ + return false; \ + } \ + ClearOneofField(table.fields[oneof_case[presence_index]], \ + GetArena(msg, table.arena_offset), msg); \ + SetOneofField(msg, oneof_case, presence_index, offset, field_number, \ + value); \ + break; \ + } + + HANDLE_TYPE(INT32, int32) + HANDLE_TYPE(INT64, int64) + HANDLE_TYPE(SINT32, int32) + HANDLE_TYPE(SINT64, int64) + HANDLE_TYPE(UINT32, uint32) + HANDLE_TYPE(UINT64, uint64) + + HANDLE_TYPE(FIXED32, uint32) + HANDLE_TYPE(FIXED64, uint64) + HANDLE_TYPE(SFIXED32, int32) + HANDLE_TYPE(SFIXED64, int64) + + HANDLE_TYPE(FLOAT, float) + HANDLE_TYPE(DOUBLE, double) + + HANDLE_TYPE(BOOL, bool) +#undef HANDLE_TYPE + case WireFormatLite::TYPE_BYTES: +#ifndef GOOGLE_PROTOBUF_UTF8_VALIDATION_ENABLED + case WireFormatLite::TYPE_STRING: +#endif + { + Arena* const arena = + GetArena(msg, table.arena_offset); + const void* default_ptr = table.aux[field_number].strings.default_ptr; + + if (GOOGLE_PREDICT_FALSE(( + !HandleString( + input, msg, arena, has_bits, presence_index, offset, + default_ptr, NULL)))) { + return false; + } + break; + } + case TYPE_BYTES_INLINED: +#ifndef GOOGLE_PROTOBUF_UTF8_VALIDATION_ENABLED + case TYPE_STRING_INLINED: +#endif + { + Arena* const arena = + GetArena(msg, table.arena_offset); + const void* default_ptr = table.aux[field_number].strings.default_ptr; + + if (GOOGLE_PREDICT_FALSE((!HandleString( + input, msg, arena, has_bits, presence_index, offset, + default_ptr, NULL)))) { + return false; + } + break; + } + case WireFormatLite::TYPE_BYTES | kOneofMask: +#ifndef GOOGLE_PROTOBUF_UTF8_VALIDATION_ENABLED + case WireFormatLite::TYPE_STRING | kOneofMask: +#endif + { + Arena* const arena = + GetArena(msg, table.arena_offset); + uint32* oneof_case = Raw(msg, table.oneof_case_offset); + const void* default_ptr = table.aux[field_number].strings.default_ptr; + + ResetOneofField( + table, field_number, arena, msg, oneof_case + presence_index, + offset, default_ptr); + + if (GOOGLE_PREDICT_FALSE( + (!HandleString( + input, msg, arena, has_bits, presence_index, offset, + default_ptr, NULL)))) { + return false; + } + break; + } + case (WireFormatLite::TYPE_BYTES) | kRepeatedMask: + case TYPE_BYTES_INLINED | kRepeatedMask: +#ifndef GOOGLE_PROTOBUF_UTF8_VALIDATION_ENABLED + case (WireFormatLite::TYPE_STRING) | kRepeatedMask: + case TYPE_STRING_INLINED | kRepeatedMask: +#endif + { + Arena* const arena = + GetArena(msg, table.arena_offset); + const void* default_ptr = + table.aux[field_number].strings.default_ptr; + + if (GOOGLE_PREDICT_FALSE(( + !HandleString( + input, msg, arena, has_bits, presence_index, offset, + default_ptr, NULL)))) { + return false; + } + break; + } +#ifdef GOOGLE_PROTOBUF_UTF8_VALIDATION_ENABLED + case (WireFormatLite::TYPE_STRING): { + Arena* const arena = + GetArena(msg, table.arena_offset); + const void* default_ptr = table.aux[field_number].strings.default_ptr; + const char* field_name = table.aux[field_number].strings.field_name; + + if (GOOGLE_PREDICT_FALSE( + (!HandleString( + input, msg, arena, has_bits, presence_index, offset, + default_ptr, field_name)))) { + return false; + } + break; + } + case TYPE_STRING_INLINED | kRepeatedMask: + case (WireFormatLite::TYPE_STRING) | kRepeatedMask: { + Arena* const arena = + GetArena(msg, table.arena_offset); + const void* default_ptr = table.aux[field_number].strings.default_ptr; + const char* field_name = table.aux[field_number].strings.field_name; + + if (GOOGLE_PREDICT_FALSE( + (!HandleString( + input, msg, arena, has_bits, presence_index, offset, + default_ptr, field_name)))) { + return false; + } + break; + } + case (WireFormatLite::TYPE_STRING) | kOneofMask: { + Arena* const arena = + GetArena(msg, table.arena_offset); + uint32* oneof_case = Raw(msg, table.oneof_case_offset); + const void* default_ptr = table.aux[field_number].strings.default_ptr; + const char* field_name = table.aux[field_number].strings.field_name; + + ResetOneofField( + table, field_number, arena, msg, oneof_case + presence_index, + offset, default_ptr); + + if (GOOGLE_PREDICT_FALSE( + (!HandleString( + input, msg, arena, has_bits, presence_index, offset, + default_ptr, field_name)))) { + return false; + } + break; + } +#endif + case WireFormatLite::TYPE_ENUM: { + if (GOOGLE_PREDICT_FALSE( + (!HandleEnum( + table, input, msg, has_bits, presence_index, offset, tag, + field_number)))) { + return false; + } + break; + } + case WireFormatLite::TYPE_ENUM | kRepeatedMask: { + if (GOOGLE_PREDICT_FALSE( + (!HandleEnum( + table, input, msg, has_bits, presence_index, offset, tag, + field_number)))) { + return false; + } + break; + } + case WireFormatLite::TYPE_ENUM | kOneofMask: { + uint32* oneof_case = Raw(msg, table.oneof_case_offset); + if (GOOGLE_PREDICT_FALSE( + (!HandleEnum(table, input, msg, oneof_case, + presence_index, offset, tag, + field_number)))) { + return false; + } + break; + } + case WireFormatLite::TYPE_GROUP: { + MessageLite** submsg_holder = + MutableField(msg, has_bits, presence_index, offset); + MessageLite* submsg = *submsg_holder; + + if (submsg == NULL) { + Arena* const arena = + GetArena(msg, table.arena_offset); + const MessageLite* prototype = + table.aux[field_number].messages.default_message(); + submsg = prototype->New(arena); + *submsg_holder = submsg; + } + + if (GOOGLE_PREDICT_FALSE( + !WireFormatLite::ReadGroup(field_number, input, submsg))) { + return false; + } + + break; + } + case WireFormatLite::TYPE_GROUP | kRepeatedMask: { + RepeatedPtrFieldBase* field = Raw(msg, offset); + const MessageLite* prototype = + table.aux[field_number].messages.default_message(); + GOOGLE_DCHECK(prototype != NULL); + + MessageLite* submsg = + MergePartialFromCodedStreamHelper::Add(field, prototype); + + if (GOOGLE_PREDICT_FALSE( + !WireFormatLite::ReadGroup(field_number, input, submsg))) { + return false; + } + + break; + } + case WireFormatLite::TYPE_MESSAGE: { + MessageLite** submsg_holder = + MutableField(msg, has_bits, presence_index, offset); + MessageLite* submsg = *submsg_holder; + + if (submsg == NULL) { + Arena* const arena = + GetArena(msg, table.arena_offset); + const MessageLite* prototype = + table.aux[field_number].messages.default_message(); + if (prototype == NULL) { + prototype = + ::google::protobuf::internal::ImplicitWeakMessage::default_instance(); + } + submsg = prototype->New(arena); + *submsg_holder = submsg; + } + + if (GOOGLE_PREDICT_FALSE(!WireFormatLite::ReadMessage(input, submsg))) { + return false; + } + + break; + } + // TODO(ckennelly): Adapt ReadMessageNoVirtualNoRecursionDepth and + // manage input->IncrementRecursionDepth() here. + case WireFormatLite::TYPE_MESSAGE | kRepeatedMask: { + RepeatedPtrFieldBase* field = Raw(msg, offset); + const MessageLite* prototype = + table.aux[field_number].messages.default_message(); + if (prototype == NULL) { + prototype = + ::google::protobuf::internal::ImplicitWeakMessage::default_instance(); + } + + MessageLite* submsg = + MergePartialFromCodedStreamHelper::Add(field, prototype); + + if (GOOGLE_PREDICT_FALSE(!WireFormatLite::ReadMessage(input, submsg))) { + return false; + } + + break; + } + case WireFormatLite::TYPE_MESSAGE | kOneofMask: { + Arena* const arena = + GetArena(msg, table.arena_offset); + uint32* oneof_case = Raw(msg, table.oneof_case_offset); + MessageLite** submsg_holder = Raw(msg, offset); + ResetOneofField( + table, field_number, arena, msg, oneof_case + presence_index, + offset, NULL); + MessageLite* submsg = *submsg_holder; + + if (GOOGLE_PREDICT_FALSE(!WireFormatLite::ReadMessage(input, submsg))) { + return false; + } + + break; + } +#ifdef GOOGLE_PROTOBUF_UTF8_VALIDATION_ENABLED + case TYPE_STRING_INLINED: { + Arena* const arena = + GetArena(msg, table.arena_offset); + const void* default_ptr = table.aux[field_number].strings.default_ptr; + const char* field_name = table.aux[field_number].strings.field_name; + + if (GOOGLE_PREDICT_FALSE(( + !HandleString( + input, msg, arena, has_bits, presence_index, offset, + default_ptr, field_name)))) { + return false; + } + break; + } +#endif // GOOGLE_PROTOBUF_UTF8_VALIDATION_ENABLED + case TYPE_MAP: { + if (GOOGLE_PREDICT_FALSE(!(*table.aux[field_number].maps.parse_map)( + input, Raw(msg, offset)))) { + return false; + } + break; + } + case 0: { + // Done. + return true; + } + default: + break; + } + } else if (data->packed_wiretype == static_cast(wire_type)) { + // Non-packable fields have their packed_wiretype masked with + // kNotPackedMask, which is impossible to match here. + GOOGLE_DCHECK(processing_type & kRepeatedMask); + GOOGLE_DCHECK_NE(processing_type, kRepeatedMask); + GOOGLE_DCHECK_EQ(0, processing_type & kOneofMask); + + GOOGLE_DCHECK_NE(TYPE_BYTES_INLINED | kRepeatedMask, processing_type); + GOOGLE_DCHECK_NE(TYPE_STRING_INLINED | kRepeatedMask, processing_type); + + // TODO(ckennelly): Use a computed goto on GCC/LLVM. + // + // Mask out kRepeatedMask bit, allowing the jump table to be smaller. + switch (static_cast( + processing_type ^ kRepeatedMask)) { +#define HANDLE_PACKED_TYPE(TYPE, CPPTYPE, CPPTYPE_METHOD) \ + case WireFormatLite::TYPE_##TYPE: { \ + google::protobuf::RepeatedField* values = \ + Raw >(msg, offset); \ + if (GOOGLE_PREDICT_FALSE( \ + (!WireFormatLite::ReadPackedPrimitive< \ + CPPTYPE, WireFormatLite::TYPE_##TYPE>(input, values)))) { \ + return false; \ + } \ + break; \ + } + + HANDLE_PACKED_TYPE(INT32, int32, Int32) + HANDLE_PACKED_TYPE(INT64, int64, Int64) + HANDLE_PACKED_TYPE(SINT32, int32, Int32) + HANDLE_PACKED_TYPE(SINT64, int64, Int64) + HANDLE_PACKED_TYPE(UINT32, uint32, UInt32) + HANDLE_PACKED_TYPE(UINT64, uint64, UInt64) + + HANDLE_PACKED_TYPE(FIXED32, uint32, UInt32) + HANDLE_PACKED_TYPE(FIXED64, uint64, UInt64) + HANDLE_PACKED_TYPE(SFIXED32, int32, Int32) + HANDLE_PACKED_TYPE(SFIXED64, int64, Int64) + + HANDLE_PACKED_TYPE(FLOAT, float, Float) + HANDLE_PACKED_TYPE(DOUBLE, double, Double) + + HANDLE_PACKED_TYPE(BOOL, bool, Bool) +#undef HANDLE_PACKED_TYPE + case WireFormatLite::TYPE_ENUM: { + // To avoid unnecessarily calling MutableUnknownFields (which mutates + // InternalMetadataWithArena) when all inputs in the repeated series + // are valid, we implement our own parser rather than call + // WireFormat::ReadPackedEnumPreserveUnknowns. + uint32 length; + if (GOOGLE_PREDICT_FALSE(!input->ReadVarint32(&length))) { + return false; + } + + AuxillaryParseTableField::EnumValidator validator = + table.aux[field_number].enums.validator; + google::protobuf::RepeatedField* values = + Raw >(msg, offset); + + io::CodedInputStream::Limit limit = input->PushLimit(length); + while (input->BytesUntilLimit() > 0) { + int value; + if (GOOGLE_PREDICT_FALSE( + (!google::protobuf::internal::WireFormatLite::ReadPrimitive< + int, WireFormatLite::TYPE_ENUM>(input, &value)))) { + return false; + } + + if (validator(value)) { + values->Add(value); + } else { + // TODO(ckennelly): Consider caching here. + UnknownFieldHandler::Varint(msg, table, tag, value); + } + } + input->PopLimit(limit); + + break; + } + case WireFormatLite::TYPE_STRING: + case WireFormatLite::TYPE_GROUP: + case WireFormatLite::TYPE_MESSAGE: + case WireFormatLite::TYPE_BYTES: + GOOGLE_DCHECK(false); + return false; + default: + break; + } + } else { + if (wire_type == WireFormatLite::WIRETYPE_END_GROUP) { + // Must be the end of the message. + return true; + } + + // check for possible extensions + if (UnknownFieldHandler::ParseExtension(msg, table, input, tag)) { + // successfully parsed + continue; + } + + // process unknown field. + if (GOOGLE_PREDICT_FALSE( + !UnknownFieldHandler::Skip(msg, table, input, tag))) { + return false; + } + } + } +} + +} // namespace internal +} // namespace protobuf + +} // namespace google +#endif // GOOGLE_PROTOBUF_GENERATED_MESSAGE_TABLE_DRIVEN_LITE_H__ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/generated_message_util.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/generated_message_util.h new file mode 100644 index 0000000000000000000000000000000000000000..04f68a6e0864a25507ba20adece87985d21bf58e --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/generated_message_util.h @@ -0,0 +1,391 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// This file contains miscellaneous helper code used by generated code -- +// including lite types -- but which should not be used directly by users. + +#ifndef GOOGLE_PROTOBUF_GENERATED_MESSAGE_UTIL_H__ +#define GOOGLE_PROTOBUF_GENERATED_MESSAGE_UTIL_H__ + +#include +#include +#include +#include +#include + +#include +#include +#include // Add direct dep on port for pb.cc +#include +#include +#include +#include +#include + +namespace google { + +namespace protobuf { + +class Arena; + +namespace io { class CodedInputStream; } + +namespace internal { + + +// Annotation for the compiler to emit a deprecation message if a field marked +// with option 'deprecated=true' is used in the code, or for other things in +// generated code which are deprecated. +// +// For internal use in the pb.cc files, deprecation warnings are suppressed +// there. +#undef DEPRECATED_PROTOBUF_FIELD +#define PROTOBUF_DEPRECATED + +#define GOOGLE_PROTOBUF_DEPRECATED_ATTR + + +// Returns the offset of the given field within the given aggregate type. +// This is equivalent to the ANSI C offsetof() macro. However, according +// to the C++ standard, offsetof() only works on POD types, and GCC +// enforces this requirement with a warning. In practice, this rule is +// unnecessarily strict; there is probably no compiler or platform on +// which the offsets of the direct fields of a class are non-constant. +// Fields inherited from superclasses *can* have non-constant offsets, +// but that's not what this macro will be used for. +#if defined(__clang__) +// For Clang we use __builtin_offsetof() and suppress the warning, +// to avoid Control Flow Integrity and UBSan vptr sanitizers from +// crashing while trying to validate the invalid reinterpet_casts. +#define GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(TYPE, FIELD) \ + _Pragma("clang diagnostic push") \ + _Pragma("clang diagnostic ignored \"-Winvalid-offsetof\"") \ + __builtin_offsetof(TYPE, FIELD) \ + _Pragma("clang diagnostic pop") +#else +// Note that we calculate relative to the pointer value 16 here since if we +// just use zero, GCC complains about dereferencing a NULL pointer. We +// choose 16 rather than some other number just in case the compiler would +// be confused by an unaligned pointer. +#define GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(TYPE, FIELD) \ + static_cast< ::google::protobuf::uint32>( \ + reinterpret_cast( \ + &reinterpret_cast(16)->FIELD) - \ + reinterpret_cast(16)) +#endif + +// Constants for special floating point values. +LIBPROTOBUF_EXPORT double Infinity(); +LIBPROTOBUF_EXPORT double NaN(); + +LIBPROTOBUF_EXPORT void InitProtobufDefaults(); + +// This used by proto1 +inline const std::string& GetEmptyString() { + InitProtobufDefaults(); + return GetEmptyStringAlreadyInited(); +} + +// True if IsInitialized() is true for all elements of t. Type is expected +// to be a RepeatedPtrField. It's useful to have this +// helper here to keep the protobuf compiler from ever having to emit loops in +// IsInitialized() methods. We want the C++ compiler to inline this or not +// as it sees fit. +template bool AllAreInitialized(const Type& t) { + for (int i = t.size(); --i >= 0; ) { + if (!t.Get(i).IsInitialized()) return false; + } + return true; +} + +// "Weak" variant of AllAreInitialized, used to implement implicit weak fields. +// This version operates on MessageLite to avoid introducing a dependency on the +// concrete message type. +template +bool AllAreInitializedWeak(const ::google::protobuf::RepeatedPtrField& t) { + for (int i = t.size(); --i >= 0;) { + if (!reinterpret_cast(t) + .Get<::google::protobuf::internal::ImplicitWeakTypeHandler >(i) + .IsInitialized()) { + return false; + } + } + return true; +} + +struct LIBPROTOBUF_EXPORT FieldMetadata { + uint32 offset; // offset of this field in the struct + uint32 tag; // field * 8 + wire_type + // byte offset * 8 + bit_offset; + // if the high bit is set then this is the byte offset of the oneof_case + // for this field. + uint32 has_offset; + uint32 type; // the type of this field. + const void* ptr; // auxiliary data + + // From the serializer point of view each fundamental type can occur in + // 4 different ways. For simplicity we treat all combinations as a cartesion + // product although not all combinations are allowed. + enum FieldTypeClass { + kPresence, + kNoPresence, + kRepeated, + kPacked, + kOneOf, + kNumTypeClasses // must be last enum + }; + // C++ protobuf has 20 fundamental types, were we added Cord and StringPiece + // and also distinquish the same types if they have different wire format. + enum { + kCordType = 19, + kStringPieceType = 20, + kInlinedType = 21, + kNumTypes = 21, + kSpecial = kNumTypes * kNumTypeClasses, + }; + + static int CalculateType(int fundamental_type, FieldTypeClass type_class); +}; + +inline bool IsPresent(const void* base, uint32 hasbit) { + const uint32* has_bits_array = static_cast(base); + return (has_bits_array[hasbit / 32] & (1u << (hasbit & 31))) != 0; +} + +inline bool IsOneofPresent(const void* base, uint32 offset, uint32 tag) { + const uint32* oneof = + reinterpret_cast(static_cast(base) + offset); + return *oneof == tag >> 3; +} + +typedef void (*SpecialSerializer)(const uint8* base, uint32 offset, uint32 tag, + uint32 has_offset, + ::google::protobuf::io::CodedOutputStream* output); + +LIBPROTOBUF_EXPORT void ExtensionSerializer(const uint8* base, uint32 offset, uint32 tag, + uint32 has_offset, + ::google::protobuf::io::CodedOutputStream* output); +LIBPROTOBUF_EXPORT void UnknownFieldSerializerLite(const uint8* base, uint32 offset, uint32 tag, + uint32 has_offset, + ::google::protobuf::io::CodedOutputStream* output); + +struct SerializationTable { + int num_fields; + const FieldMetadata* field_table; +}; + +LIBPROTOBUF_EXPORT void SerializeInternal(const uint8* base, const FieldMetadata* table, + int num_fields, ::google::protobuf::io::CodedOutputStream* output); + +inline void TableSerialize(const ::google::protobuf::MessageLite& msg, + const SerializationTable* table, + ::google::protobuf::io::CodedOutputStream* output) { + const FieldMetadata* field_table = table->field_table; + int num_fields = table->num_fields - 1; + const uint8* base = reinterpret_cast(&msg); + // TODO(gerbens) This skips the first test if we could use the fast + // array serialization path, we should make this + // int cached_size = + // *reinterpret_cast(base + field_table->offset); + // SerializeWithCachedSize(msg, field_table + 1, num_fields, cached_size, ...) + // But we keep conformance with the old way for now. + SerializeInternal(base, field_table + 1, num_fields, output); +} + +uint8* SerializeInternalToArray(const uint8* base, const FieldMetadata* table, + int num_fields, bool is_deterministic, + uint8* buffer); + +inline uint8* TableSerializeToArray(const ::google::protobuf::MessageLite& msg, + const SerializationTable* table, + bool is_deterministic, uint8* buffer) { + const uint8* base = reinterpret_cast(&msg); + const FieldMetadata* field_table = table->field_table + 1; + int num_fields = table->num_fields - 1; + return SerializeInternalToArray(base, field_table, num_fields, + is_deterministic, buffer); +} + +template +struct CompareHelper { + bool operator()(const T& a, const T& b) { return a < b; } +}; + +template <> +struct CompareHelper { + bool operator()(const ArenaStringPtr& a, const ArenaStringPtr& b) { + return a.Get() < b.Get(); + } +}; + +struct CompareMapKey { + template + bool operator()(const MapEntryHelper& a, const MapEntryHelper& b) { + return Compare(a.key_, b.key_); + } + template + bool Compare(const T& a, const T& b) { + return CompareHelper()(a, b); + } +}; + +template +void MapFieldSerializer(const uint8* base, uint32 offset, uint32 tag, + uint32 has_offset, + ::google::protobuf::io::CodedOutputStream* output) { + typedef MapEntryHelper Entry; + typedef typename MapFieldType::MapType::const_iterator Iter; + + const MapFieldType& map_field = + *reinterpret_cast(base + offset); + const SerializationTable* t = + table + + has_offset; // has_offset is overloaded for maps to mean table offset + if (!output->IsSerializationDeterministic()) { + for (Iter it = map_field.GetMap().begin(); it != map_field.GetMap().end(); + ++it) { + Entry map_entry(*it); + output->WriteVarint32(tag); + output->WriteVarint32(map_entry._cached_size_); + SerializeInternal(reinterpret_cast(&map_entry), + t->field_table, t->num_fields, output); + } + } else { + std::vector v; + for (Iter it = map_field.GetMap().begin(); it != map_field.GetMap().end(); + ++it) { + v.push_back(Entry(*it)); + } + std::sort(v.begin(), v.end(), CompareMapKey()); + for (int i = 0; i < v.size(); i++) { + output->WriteVarint32(tag); + output->WriteVarint32(v[i]._cached_size_); + SerializeInternal(reinterpret_cast(&v[i]), t->field_table, + t->num_fields, output); + } + } +} + +LIBPROTOBUF_EXPORT MessageLite* DuplicateIfNonNullInternal(MessageLite* message); +LIBPROTOBUF_EXPORT MessageLite* GetOwnedMessageInternal(Arena* message_arena, + MessageLite* submessage, + Arena* submessage_arena); + +template +T* DuplicateIfNonNull(T* message) { + // The casts must be reinterpret_cast<> because T might be a forward-declared + // type that the compiler doesn't know is related to MessageLite. + return reinterpret_cast( + DuplicateIfNonNullInternal(reinterpret_cast(message))); +} + +template +T* GetOwnedMessage(Arena* message_arena, T* submessage, + Arena* submessage_arena) { + // The casts must be reinterpret_cast<> because T might be a forward-declared + // type that the compiler doesn't know is related to MessageLite. + return reinterpret_cast(GetOwnedMessageInternal( + message_arena, reinterpret_cast(submessage), + submessage_arena)); +} + +// Hide atomic from the public header and allow easy change to regular int +// on platforms where the atomic might have a perf impact. +class LIBPROTOBUF_EXPORT CachedSize { + public: + int Get() const { return size_.load(std::memory_order_relaxed); } + void Set(int size) { size_.store(size, std::memory_order_relaxed); } + private: + std::atomic size_{0}; +}; + +// SCCInfo represents information of a strongly connected component of +// mutual dependent messages. +struct LIBPROTOBUF_EXPORT SCCInfoBase { + // We use 0 for the Initialized state, because test eax,eax, jnz is smaller + // and is subject to macro fusion. + enum { + kInitialized = 0, // final state + kRunning = 1, + kUninitialized = -1, // initial state + }; +#ifndef _MSC_VER + std::atomic visit_status; +#else + // MSVC doesnt make std::atomic constant initialized. This union trick + // makes it so. + union { + int visit_status_to_make_linker_init; + std::atomic visit_status; + }; +#endif + int num_deps; + void (*init_func)(); + // This is followed by an array of num_deps + // const SCCInfoBase* deps[]; +}; + +template +struct SCCInfo { + SCCInfoBase base; + // Semantically this is const SCCInfo* which is is a templated type. + // The obvious inheriting from SCCInfoBase mucks with struct initialization. + // Attempts showed the compiler was generating dynamic initialization code. + // Zero length arrays produce warnings with MSVC. + SCCInfoBase* deps[N ? N : 1]; +}; + +LIBPROTOBUF_EXPORT void InitSCCImpl(SCCInfoBase* scc); + +inline void InitSCC(SCCInfoBase* scc) { + auto status = scc->visit_status.load(std::memory_order_acquire); + if (GOOGLE_PREDICT_FALSE(status != SCCInfoBase::kInitialized)) InitSCCImpl(scc); +} + +LIBPROTOBUF_EXPORT void DestroyMessage(const void* message); +LIBPROTOBUF_EXPORT void DestroyString(const void* s); +// Destroy (not delete) the message +inline void OnShutdownDestroyMessage(const void* ptr) { + OnShutdownRun(DestroyMessage, ptr); +} +// Destroy the string (call string destructor) +inline void OnShutdownDestroyString(const std::string* ptr) { + OnShutdownRun(DestroyString, ptr); +} + +} // namespace internal +} // namespace protobuf + +} // namespace google +#endif // GOOGLE_PROTOBUF_GENERATED_MESSAGE_UTIL_H__ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/has_bits.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/has_bits.h new file mode 100644 index 0000000000000000000000000000000000000000..e3a0149a47598e2919a027e19f4ca491d46fc75c --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/has_bits.h @@ -0,0 +1,105 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef GOOGLE_PROTOBUF_HAS_BITS_H__ +#define GOOGLE_PROTOBUF_HAS_BITS_H__ + +#include +#include + +namespace google { +namespace protobuf { +namespace internal { + +template +class HasBits { + public: + HasBits() GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE { Clear(); } + + void Clear() GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE { + memset(has_bits_, 0, sizeof(has_bits_)); + } + + ::google::protobuf::uint32& operator[](int index) GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE { + return has_bits_[index]; + } + + const ::google::protobuf::uint32& operator[](int index) const + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE { + return has_bits_[index]; + } + + bool operator==(const HasBits& rhs) const { + return memcmp(has_bits_, rhs.has_bits_, sizeof(has_bits_)) == 0; + } + + bool operator!=(const HasBits& rhs) const { + return !(*this == rhs); + } + + bool empty() const; + + private: + ::google::protobuf::uint32 has_bits_[doublewords]; +}; + +template <> +inline bool HasBits<1>::empty() const { + return !has_bits_[0]; +} + +template <> +inline bool HasBits<2>::empty() const { + return !(has_bits_[0] | has_bits_[1]); +} + +template <> +inline bool HasBits<3>::empty() const { + return !(has_bits_[0] | has_bits_[1] | has_bits_[2]); +} + +template <> +inline bool HasBits<4>::empty() const { + return !(has_bits_[0] | has_bits_[1] | has_bits_[2] | has_bits_[3]); +} + +template +inline bool HasBits::empty() const { + for (size_t i = 0; i < doublewords; ++i) { + if (has_bits_[i]) return false; + } + return true; +} + +} // namespace internal +} // namespace protobuf + +} // namespace google +#endif // GOOGLE_PROTOBUF_HAS_BITS_H__ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/implicit_weak_message.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/implicit_weak_message.h new file mode 100644 index 0000000000000000000000000000000000000000..3279bd17771e255f31942b3c5c01ef312fec360f --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/implicit_weak_message.h @@ -0,0 +1,135 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef GOOGLE_PROTOBUF_IMPLICIT_WEAK_MESSAGE_H__ +#define GOOGLE_PROTOBUF_IMPLICIT_WEAK_MESSAGE_H__ + +#include +#include +#include + +// This file is logically internal-only and should only be used by protobuf +// generated code. + +namespace google { +namespace protobuf { +namespace internal { + +// An implementation of MessageLite that treats all data as unknown. This type +// acts as a placeholder for an implicit weak field in the case where the true +// message type does not get linked into the binary. +class LIBPROTOBUF_EXPORT ImplicitWeakMessage : public MessageLite { + public: + ImplicitWeakMessage() : arena_(NULL) {} + explicit ImplicitWeakMessage(Arena* arena) : arena_(arena) {} + + static const ImplicitWeakMessage* default_instance(); + + string GetTypeName() const { return ""; } + + MessageLite* New() const { return new ImplicitWeakMessage; } + MessageLite* New(Arena* arena) const { + return Arena::CreateMessage(arena); + } + + Arena* GetArena() const { return arena_; } + + void Clear() { data_.clear(); } + + bool IsInitialized() const { return true; } + + void CheckTypeAndMergeFrom(const MessageLite& other) { + data_.append(static_cast(other).data_); + } + + bool MergePartialFromCodedStream(io::CodedInputStream* input); + + size_t ByteSizeLong() const { return data_.size(); } + + void SerializeWithCachedSizes(io::CodedOutputStream* output) const { + output->WriteString(data_); + } + + int GetCachedSize() const { return static_cast(data_.size()); } + + typedef void InternalArenaConstructable_; + + private: + Arena* const arena_; + string data_; + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ImplicitWeakMessage); +}; + +// A type handler for use with implicit weak repeated message fields. +template +class ImplicitWeakTypeHandler { + public: + typedef ImplicitWeakType Type; + typedef ::google::protobuf::MessageLite WeakType; + static const bool Moveable = false; + + // With implicit weak fields, we need separate NewFromPrototype and + // NewFromPrototypeWeak functions. The former is used when we want to create a + // strong dependency on the message type, and it just delegates to the + // GenericTypeHandler. The latter avoids creating a strong dependency, by + // simply calling MessageLite::New. + static inline ::google::protobuf::MessageLite* NewFromPrototype( + const ::google::protobuf::MessageLite* prototype, ::google::protobuf::Arena* arena = NULL) { + return prototype->New(arena); + } + + static inline void Delete(::google::protobuf::MessageLite* value, Arena* arena) { + if (arena == NULL) { + delete value; + } + } + static inline ::google::protobuf::Arena* GetArena(::google::protobuf::MessageLite* value) { + return value->GetArena(); + } + static inline void* GetMaybeArenaPointer(::google::protobuf::MessageLite* value) { + return value->GetArena(); + } + static inline void Clear(::google::protobuf::MessageLite* value) { + value->Clear(); + } + static void Merge(const ::google::protobuf::MessageLite& from, + ::google::protobuf::MessageLite* to) { + to->CheckTypeAndMergeFrom(from); + } + static inline size_t SpaceUsedLong(const Type& value) { + return value.SpaceUsedLong(); + } +}; + +} // namespace internal +} // namespace protobuf + +} // namespace google +#endif // GOOGLE_PROTOBUF_IMPLICIT_WEAK_MESSAGE_H__ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/inlined_string_field.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/inlined_string_field.h new file mode 100644 index 0000000000000000000000000000000000000000..95d4687be33d674f8f3ddc74b1deda8d4a45e6a8 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/inlined_string_field.h @@ -0,0 +1,271 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef GOOGLE_PROTOBUF_INLINED_STRING_FIELD_H__ +#define GOOGLE_PROTOBUF_INLINED_STRING_FIELD_H__ + +#include + +#include +#include + +namespace google { +namespace protobuf { + +class Arena; + +namespace internal { + +// InlinedStringField wraps a ::std::string instance and exposes an API similar to +// ArenaStringPtr's wrapping of a ::std::string* instance. As ::std::string is never +// allocated on the Arena, we expose only the *NoArena methods of +// ArenaStringPtr. +// +// default_value parameters are taken for consistency with ArenaStringPtr, but +// are not used for most methods. With inlining, these should be removed from +// the generated binary. +class LIBPROTOBUF_EXPORT InlinedStringField { + public: + InlinedStringField() + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE; + explicit InlinedStringField(const ::std::string& default_value); + + void AssignWithDefault(const ::std::string* default_value, + const InlinedStringField& from) + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE; + + void ClearToEmpty(const ::std::string* default_value, Arena* arena) + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE { + ClearToEmptyNoArena(default_value); + } + void ClearNonDefaultToEmpty() GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE { + ClearNonDefaultToEmptyNoArena(); + } + void ClearToEmptyNoArena(const ::std::string* default_value) + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE { + ClearNonDefaultToEmptyNoArena(); + } + void ClearNonDefaultToEmptyNoArena() + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE; + + void ClearToDefault(const ::std::string* default_value, Arena* arena) + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE { + ClearToDefaultNoArena(default_value); + } + void ClearToDefaultNoArena(const ::std::string* default_value) + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE; + + void Destroy(const ::std::string* default_value, Arena* arena) + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE { + DestroyNoArena(default_value); + } + void DestroyNoArena(const ::std::string* default_value) + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE; + + const ::std::string& Get() const GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE { + return GetNoArena(); + } + const ::std::string& GetNoArena() const GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE; + + ::std::string* Mutable(const ::std::string* default_value, Arena* arena) + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE { + return MutableNoArena(default_value); + } + ::std::string* MutableNoArena(const ::std::string* default_value) + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE; + + ::std::string* Release(const ::std::string* default_value, Arena* arena) { + return ReleaseNoArena(default_value); + } + ::std::string* ReleaseNonDefault(const ::std::string* default_value, Arena* arena) { + return ReleaseNonDefaultNoArena(default_value); + } + ::std::string* ReleaseNoArena(const ::std::string* default_value) { + return ReleaseNonDefaultNoArena(default_value); + } + ::std::string* ReleaseNonDefaultNoArena(const ::std::string* default_value); + + void Set(const ::std::string* default_value, + StringPiece value, + Arena* arena) GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE { + SetNoArena(default_value, value); + } + void SetLite(const ::std::string* default_value, + StringPiece value, + Arena* arena) GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE { + SetNoArena(default_value, value); + } + void SetNoArena(const ::std::string* default_value, + StringPiece value) GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE; + + void Set(const ::std::string* default_value, + const ::std::string& value, + Arena* arena) GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE { + SetNoArena(default_value, value); + } + void SetLite(const ::std::string* default_value, + const ::std::string& value, + Arena* arena) GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE { + SetNoArena(default_value, value); + } + void SetNoArena(const ::std::string* default_value, + const ::std::string& value) + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE; + +#if LANG_CXX11 + void SetNoArena(const ::std::string* default_value, + ::std::string&& value) + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE; +#endif + void SetAllocated(const ::std::string* default_value, + ::std::string* value, + Arena* arena) { + SetAllocatedNoArena(default_value, value); + } + void SetAllocatedNoArena(const ::std::string* default_value, + ::std::string* value); + void Swap(InlinedStringField* from) + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE; + ::std::string* UnsafeMutablePointer(); + void UnsafeSetDefault(const ::std::string* default_value); + ::std::string* UnsafeArenaRelease(const ::std::string* default_value, Arena* arena); + void UnsafeArenaSetAllocated( + const ::std::string* default_value, ::std::string* value, Arena* arena); + + bool IsDefault(const ::std::string* default_value) { + return false; + } + private: + ::std::string value_; +}; + +inline InlinedStringField::InlinedStringField() {} + +inline InlinedStringField::InlinedStringField(const ::std::string& default_value) : + value_(default_value) {} + +inline void InlinedStringField::AssignWithDefault( + const ::std::string* default_value, const InlinedStringField& from) { + value_ = from.value_; +} + +inline const ::std::string& InlinedStringField::GetNoArena() const { + return value_; +} + +inline ::std::string* InlinedStringField::MutableNoArena(const ::std::string*) { + return &value_; +} + +inline void InlinedStringField::SetAllocatedNoArena( + const ::std::string* default_value, ::std::string* value) { + if (value == NULL) { + value_.assign(*default_value); + } else { +#if LANG_CXX11 + value_.assign(std::move(*value)); +#else + value_.swap(*value); +#endif + delete value; + } +} + +inline void InlinedStringField::DestroyNoArena(const ::std::string*) { + // This is invoked from the generated message's ArenaDtor, which is used to + // clean up objects not allocated on the Arena. + this->~InlinedStringField(); +} + +inline void InlinedStringField::ClearNonDefaultToEmptyNoArena() { + value_.clear(); +} + +inline void InlinedStringField::ClearToDefaultNoArena( + const ::std::string* default_value) { + value_.assign(*default_value); +} + +inline ::std::string* InlinedStringField::ReleaseNonDefaultNoArena( + const ::std::string* default_value) { + ::std::string* released = new ::std::string(*default_value); + value_.swap(*released); + return released; +} + +inline void InlinedStringField::SetNoArena( + const ::std::string* default_value, StringPiece value) { + value_.assign(value.data(), value.length()); +} + +inline void InlinedStringField::SetNoArena( + const ::std::string* default_value, const ::std::string& value) { + value_.assign(value); +} + +#if LANG_CXX11 +inline void InlinedStringField::SetNoArena( + const ::std::string* default_value, ::std::string&& value) { + value_.assign(std::move(value)); +} +#endif + +inline void InlinedStringField::Swap(InlinedStringField* from) { + value_.swap(from->value_); +} + +inline ::std::string* InlinedStringField::UnsafeMutablePointer() { + return &value_; +} + +inline void InlinedStringField::UnsafeSetDefault( + const ::std::string* default_value) { + value_.assign(*default_value); +} + +inline ::std::string* InlinedStringField::UnsafeArenaRelease( + const ::std::string* default_value, Arena* arena) { + return ReleaseNoArena(default_value); +} + +inline void InlinedStringField::UnsafeArenaSetAllocated( + const ::std::string* default_value, ::std::string* value, Arena* arena) { + if (value == NULL) { + value_.assign(*default_value); + } else { + value_.assign(*value); + } +} + +} // namespace internal +} // namespace protobuf + +} // namespace google +#endif // GOOGLE_PROTOBUF_INLINED_STRING_FIELD_H__ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/io/coded_stream.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/io/coded_stream.h new file mode 100644 index 0000000000000000000000000000000000000000..0f70ecde4c3f0050d79d6eea5028f813d3b77e8f --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/io/coded_stream.h @@ -0,0 +1,1400 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// This file contains the CodedInputStream and CodedOutputStream classes, +// which wrap a ZeroCopyInputStream or ZeroCopyOutputStream, respectively, +// and allow you to read or write individual pieces of data in various +// formats. In particular, these implement the varint encoding for +// integers, a simple variable-length encoding in which smaller numbers +// take fewer bytes. +// +// Typically these classes will only be used internally by the protocol +// buffer library in order to encode and decode protocol buffers. Clients +// of the library only need to know about this class if they wish to write +// custom message parsing or serialization procedures. +// +// CodedOutputStream example: +// // Write some data to "myfile". First we write a 4-byte "magic number" +// // to identify the file type, then write a length-delimited string. The +// // string is composed of a varint giving the length followed by the raw +// // bytes. +// int fd = open("myfile", O_CREAT | O_WRONLY); +// ZeroCopyOutputStream* raw_output = new FileOutputStream(fd); +// CodedOutputStream* coded_output = new CodedOutputStream(raw_output); +// +// int magic_number = 1234; +// char text[] = "Hello world!"; +// coded_output->WriteLittleEndian32(magic_number); +// coded_output->WriteVarint32(strlen(text)); +// coded_output->WriteRaw(text, strlen(text)); +// +// delete coded_output; +// delete raw_output; +// close(fd); +// +// CodedInputStream example: +// // Read a file created by the above code. +// int fd = open("myfile", O_RDONLY); +// ZeroCopyInputStream* raw_input = new FileInputStream(fd); +// CodedInputStream coded_input = new CodedInputStream(raw_input); +// +// coded_input->ReadLittleEndian32(&magic_number); +// if (magic_number != 1234) { +// cerr << "File not in expected format." << endl; +// return; +// } +// +// uint32 size; +// coded_input->ReadVarint32(&size); +// +// char* text = new char[size + 1]; +// coded_input->ReadRaw(buffer, size); +// text[size] = '\0'; +// +// delete coded_input; +// delete raw_input; +// close(fd); +// +// cout << "Text is: " << text << endl; +// delete [] text; +// +// For those who are interested, varint encoding is defined as follows: +// +// The encoding operates on unsigned integers of up to 64 bits in length. +// Each byte of the encoded value has the format: +// * bits 0-6: Seven bits of the number being encoded. +// * bit 7: Zero if this is the last byte in the encoding (in which +// case all remaining bits of the number are zero) or 1 if +// more bytes follow. +// The first byte contains the least-significant 7 bits of the number, the +// second byte (if present) contains the next-least-significant 7 bits, +// and so on. So, the binary number 1011000101011 would be encoded in two +// bytes as "10101011 00101100". +// +// In theory, varint could be used to encode integers of any length. +// However, for practicality we set a limit at 64 bits. The maximum encoded +// length of a number is thus 10 bytes. + +#ifndef GOOGLE_PROTOBUF_IO_CODED_STREAM_H__ +#define GOOGLE_PROTOBUF_IO_CODED_STREAM_H__ + +#include +#include +#include +#include +#include +#ifdef _MSC_VER + // Assuming windows is always little-endian. + #if !defined(PROTOBUF_DISABLE_LITTLE_ENDIAN_OPT_FOR_TEST) + #define PROTOBUF_LITTLE_ENDIAN 1 + #endif + #if _MSC_VER >= 1300 && !defined(__INTEL_COMPILER) + // If MSVC has "/RTCc" set, it will complain about truncating casts at + // runtime. This file contains some intentional truncating casts. + #pragma runtime_checks("c", off) + #endif +#else + #include // __BYTE_ORDER + #if ((defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__)) || \ + (defined(__BYTE_ORDER) && __BYTE_ORDER == __LITTLE_ENDIAN)) && \ + !defined(PROTOBUF_DISABLE_LITTLE_ENDIAN_OPT_FOR_TEST) + #define PROTOBUF_LITTLE_ENDIAN 1 + #endif +#endif +#include +#include +#include + +namespace google { + +namespace protobuf { + +class DescriptorPool; +class MessageFactory; + +namespace internal { void MapTestForceDeterministic(); } + +namespace io { + +// Defined in this file. +class CodedInputStream; +class CodedOutputStream; + +// Defined in other files. +class ZeroCopyInputStream; // zero_copy_stream.h +class ZeroCopyOutputStream; // zero_copy_stream.h + +// Class which reads and decodes binary data which is composed of varint- +// encoded integers and fixed-width pieces. Wraps a ZeroCopyInputStream. +// Most users will not need to deal with CodedInputStream. +// +// Most methods of CodedInputStream that return a bool return false if an +// underlying I/O error occurs or if the data is malformed. Once such a +// failure occurs, the CodedInputStream is broken and is no longer useful. +class LIBPROTOBUF_EXPORT CodedInputStream { + public: + // Create a CodedInputStream that reads from the given ZeroCopyInputStream. + explicit CodedInputStream(ZeroCopyInputStream* input); + + // Create a CodedInputStream that reads from the given flat array. This is + // faster than using an ArrayInputStream. PushLimit(size) is implied by + // this constructor. + explicit CodedInputStream(const uint8* buffer, int size); + + // Destroy the CodedInputStream and position the underlying + // ZeroCopyInputStream at the first unread byte. If an error occurred while + // reading (causing a method to return false), then the exact position of + // the input stream may be anywhere between the last value that was read + // successfully and the stream's byte limit. + ~CodedInputStream(); + + // Return true if this CodedInputStream reads from a flat array instead of + // a ZeroCopyInputStream. + inline bool IsFlat() const; + + // Skips a number of bytes. Returns false if an underlying read error + // occurs. + inline bool Skip(int count); + + // Sets *data to point directly at the unread part of the CodedInputStream's + // underlying buffer, and *size to the size of that buffer, but does not + // advance the stream's current position. This will always either produce + // a non-empty buffer or return false. If the caller consumes any of + // this data, it should then call Skip() to skip over the consumed bytes. + // This may be useful for implementing external fast parsing routines for + // types of data not covered by the CodedInputStream interface. + bool GetDirectBufferPointer(const void** data, int* size); + + // Like GetDirectBufferPointer, but this method is inlined, and does not + // attempt to Refresh() if the buffer is currently empty. + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE + void GetDirectBufferPointerInline(const void** data, int* size); + + // Read raw bytes, copying them into the given buffer. + bool ReadRaw(void* buffer, int size); + + // Like the above, with inlined optimizations. This should only be used + // by the protobuf implementation. + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE + bool InternalReadRawInline(void* buffer, int size); + + // Like ReadRaw, but reads into a string. + bool ReadString(string* buffer, int size); + // Like the above, with inlined optimizations. This should only be used + // by the protobuf implementation. + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE + bool InternalReadStringInline(string* buffer, int size); + + + // Read a 32-bit little-endian integer. + bool ReadLittleEndian32(uint32* value); + // Read a 64-bit little-endian integer. + bool ReadLittleEndian64(uint64* value); + + // These methods read from an externally provided buffer. The caller is + // responsible for ensuring that the buffer has sufficient space. + // Read a 32-bit little-endian integer. + static const uint8* ReadLittleEndian32FromArray(const uint8* buffer, + uint32* value); + // Read a 64-bit little-endian integer. + static const uint8* ReadLittleEndian64FromArray(const uint8* buffer, + uint64* value); + + // Read an unsigned integer with Varint encoding, truncating to 32 bits. + // Reading a 32-bit value is equivalent to reading a 64-bit one and casting + // it to uint32, but may be more efficient. + bool ReadVarint32(uint32* value); + // Read an unsigned integer with Varint encoding. + bool ReadVarint64(uint64* value); + + // Reads a varint off the wire into an "int". This should be used for reading + // sizes off the wire (sizes of strings, submessages, bytes fields, etc). + // + // The value from the wire is interpreted as unsigned. If its value exceeds + // the representable value of an integer on this platform, instead of + // truncating we return false. Truncating (as performed by ReadVarint32() + // above) is an acceptable approach for fields representing an integer, but + // when we are parsing a size from the wire, truncating the value would result + // in us misparsing the payload. + bool ReadVarintSizeAsInt(int* value); + + // Read a tag. This calls ReadVarint32() and returns the result, or returns + // zero (which is not a valid tag) if ReadVarint32() fails. Also, ReadTag + // (but not ReadTagNoLastTag) updates the last tag value, which can be checked + // with LastTagWas(). + // + // Always inline because this is only called in one place per parse loop + // but it is called for every iteration of said loop, so it should be fast. + // GCC doesn't want to inline this by default. + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE uint32 ReadTag() { + return last_tag_ = ReadTagNoLastTag(); + } + + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE uint32 ReadTagNoLastTag(); + + + // This usually a faster alternative to ReadTag() when cutoff is a manifest + // constant. It does particularly well for cutoff >= 127. The first part + // of the return value is the tag that was read, though it can also be 0 in + // the cases where ReadTag() would return 0. If the second part is true + // then the tag is known to be in [0, cutoff]. If not, the tag either is + // above cutoff or is 0. (There's intentional wiggle room when tag is 0, + // because that can arise in several ways, and for best performance we want + // to avoid an extra "is tag == 0?" check here.) + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE + std::pair ReadTagWithCutoff(uint32 cutoff) { + std::pair result = ReadTagWithCutoffNoLastTag(cutoff); + last_tag_ = result.first; + return result; + } + + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE + std::pair ReadTagWithCutoffNoLastTag(uint32 cutoff); + + // Usually returns true if calling ReadVarint32() now would produce the given + // value. Will always return false if ReadVarint32() would not return the + // given value. If ExpectTag() returns true, it also advances past + // the varint. For best performance, use a compile-time constant as the + // parameter. + // Always inline because this collapses to a small number of instructions + // when given a constant parameter, but GCC doesn't want to inline by default. + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE bool ExpectTag(uint32 expected); + + // Like above, except this reads from the specified buffer. The caller is + // responsible for ensuring that the buffer is large enough to read a varint + // of the expected size. For best performance, use a compile-time constant as + // the expected tag parameter. + // + // Returns a pointer beyond the expected tag if it was found, or NULL if it + // was not. + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE + static const uint8* ExpectTagFromArray(const uint8* buffer, uint32 expected); + + // Usually returns true if no more bytes can be read. Always returns false + // if more bytes can be read. If ExpectAtEnd() returns true, a subsequent + // call to LastTagWas() will act as if ReadTag() had been called and returned + // zero, and ConsumedEntireMessage() will return true. + bool ExpectAtEnd(); + + // If the last call to ReadTag() or ReadTagWithCutoff() returned the given + // value, returns true. Otherwise, returns false. + // ReadTagNoLastTag/ReadTagWithCutoffNoLastTag do not preserve the last + // returned value. + // + // This is needed because parsers for some types of embedded messages + // (with field type TYPE_GROUP) don't actually know that they've reached the + // end of a message until they see an ENDGROUP tag, which was actually part + // of the enclosing message. The enclosing message would like to check that + // tag to make sure it had the right number, so it calls LastTagWas() on + // return from the embedded parser to check. + bool LastTagWas(uint32 expected); + void SetLastTag(uint32 tag) { last_tag_ = tag; } + + // When parsing message (but NOT a group), this method must be called + // immediately after MergeFromCodedStream() returns (if it returns true) + // to further verify that the message ended in a legitimate way. For + // example, this verifies that parsing did not end on an end-group tag. + // It also checks for some cases where, due to optimizations, + // MergeFromCodedStream() can incorrectly return true. + bool ConsumedEntireMessage(); + + // Limits ---------------------------------------------------------- + // Limits are used when parsing length-delimited embedded messages. + // After the message's length is read, PushLimit() is used to prevent + // the CodedInputStream from reading beyond that length. Once the + // embedded message has been parsed, PopLimit() is called to undo the + // limit. + + // Opaque type used with PushLimit() and PopLimit(). Do not modify + // values of this type yourself. The only reason that this isn't a + // struct with private internals is for efficiency. + typedef int Limit; + + // Places a limit on the number of bytes that the stream may read, + // starting from the current position. Once the stream hits this limit, + // it will act like the end of the input has been reached until PopLimit() + // is called. + // + // As the names imply, the stream conceptually has a stack of limits. The + // shortest limit on the stack is always enforced, even if it is not the + // top limit. + // + // The value returned by PushLimit() is opaque to the caller, and must + // be passed unchanged to the corresponding call to PopLimit(). + Limit PushLimit(int byte_limit); + + // Pops the last limit pushed by PushLimit(). The input must be the value + // returned by that call to PushLimit(). + void PopLimit(Limit limit); + + // Returns the number of bytes left until the nearest limit on the + // stack is hit, or -1 if no limits are in place. + int BytesUntilLimit() const; + + // Returns current position relative to the beginning of the input stream. + int CurrentPosition() const; + + // Total Bytes Limit ----------------------------------------------- + // To prevent malicious users from sending excessively large messages + // and causing memory exhaustion, CodedInputStream imposes a hard limit on + // the total number of bytes it will read. + + // Sets the maximum number of bytes that this CodedInputStream will read + // before refusing to continue. To prevent servers from allocating enormous + // amounts of memory to hold parsed messages, the maximum message length + // should be limited to the shortest length that will not harm usability. + // The default limit is INT_MAX (~2GB) and apps should set shorter limits + // if possible. An error will always be printed to stderr if the limit is + // reached. + // + // Note: setting a limit less than the current read position is interpreted + // as a limit on the current position. + // + // This is unrelated to PushLimit()/PopLimit(). + void SetTotalBytesLimit(int total_bytes_limit); + + PROTOBUF_RUNTIME_DEPRECATED( + "Please use the single parameter version of SetTotalBytesLimit(). The " + "second parameter is ignored.") + void SetTotalBytesLimit(int total_bytes_limit, int) { + SetTotalBytesLimit(total_bytes_limit); + } + + // The Total Bytes Limit minus the Current Position, or -1 if the total bytes + // limit is INT_MAX. + int BytesUntilTotalBytesLimit() const; + + // Recursion Limit ------------------------------------------------- + // To prevent corrupt or malicious messages from causing stack overflows, + // we must keep track of the depth of recursion when parsing embedded + // messages and groups. CodedInputStream keeps track of this because it + // is the only object that is passed down the stack during parsing. + + // Sets the maximum recursion depth. The default is 100. + void SetRecursionLimit(int limit); + + + // Increments the current recursion depth. Returns true if the depth is + // under the limit, false if it has gone over. + bool IncrementRecursionDepth(); + + // Decrements the recursion depth if possible. + void DecrementRecursionDepth(); + + // Decrements the recursion depth blindly. This is faster than + // DecrementRecursionDepth(). It should be used only if all previous + // increments to recursion depth were successful. + void UnsafeDecrementRecursionDepth(); + + // Shorthand for make_pair(PushLimit(byte_limit), --recursion_budget_). + // Using this can reduce code size and complexity in some cases. The caller + // is expected to check that the second part of the result is non-negative (to + // bail out if the depth of recursion is too high) and, if all is well, to + // later pass the first part of the result to PopLimit() or similar. + std::pair IncrementRecursionDepthAndPushLimit( + int byte_limit); + + // Shorthand for PushLimit(ReadVarint32(&length) ? length : 0). + Limit ReadLengthAndPushLimit(); + + // Helper that is equivalent to: { + // bool result = ConsumedEntireMessage(); + // PopLimit(limit); + // UnsafeDecrementRecursionDepth(); + // return result; } + // Using this can reduce code size and complexity in some cases. + // Do not use unless the current recursion depth is greater than zero. + bool DecrementRecursionDepthAndPopLimit(Limit limit); + + // Helper that is equivalent to: { + // bool result = ConsumedEntireMessage(); + // PopLimit(limit); + // return result; } + // Using this can reduce code size and complexity in some cases. + bool CheckEntireMessageConsumedAndPopLimit(Limit limit); + + // Extension Registry ---------------------------------------------- + // ADVANCED USAGE: 99.9% of people can ignore this section. + // + // By default, when parsing extensions, the parser looks for extension + // definitions in the pool which owns the outer message's Descriptor. + // However, you may call SetExtensionRegistry() to provide an alternative + // pool instead. This makes it possible, for example, to parse a message + // using a generated class, but represent some extensions using + // DynamicMessage. + + // Set the pool used to look up extensions. Most users do not need to call + // this as the correct pool will be chosen automatically. + // + // WARNING: It is very easy to misuse this. Carefully read the requirements + // below. Do not use this unless you are sure you need it. Almost no one + // does. + // + // Let's say you are parsing a message into message object m, and you want + // to take advantage of SetExtensionRegistry(). You must follow these + // requirements: + // + // The given DescriptorPool must contain m->GetDescriptor(). It is not + // sufficient for it to simply contain a descriptor that has the same name + // and content -- it must be the *exact object*. In other words: + // assert(pool->FindMessageTypeByName(m->GetDescriptor()->full_name()) == + // m->GetDescriptor()); + // There are two ways to satisfy this requirement: + // 1) Use m->GetDescriptor()->pool() as the pool. This is generally useless + // because this is the pool that would be used anyway if you didn't call + // SetExtensionRegistry() at all. + // 2) Use a DescriptorPool which has m->GetDescriptor()->pool() as an + // "underlay". Read the documentation for DescriptorPool for more + // information about underlays. + // + // You must also provide a MessageFactory. This factory will be used to + // construct Message objects representing extensions. The factory's + // GetPrototype() MUST return non-NULL for any Descriptor which can be found + // through the provided pool. + // + // If the provided factory might return instances of protocol-compiler- + // generated (i.e. compiled-in) types, or if the outer message object m is + // a generated type, then the given factory MUST have this property: If + // GetPrototype() is given a Descriptor which resides in + // DescriptorPool::generated_pool(), the factory MUST return the same + // prototype which MessageFactory::generated_factory() would return. That + // is, given a descriptor for a generated type, the factory must return an + // instance of the generated class (NOT DynamicMessage). However, when + // given a descriptor for a type that is NOT in generated_pool, the factory + // is free to return any implementation. + // + // The reason for this requirement is that generated sub-objects may be + // accessed via the standard (non-reflection) extension accessor methods, + // and these methods will down-cast the object to the generated class type. + // If the object is not actually of that type, the results would be undefined. + // On the other hand, if an extension is not compiled in, then there is no + // way the code could end up accessing it via the standard accessors -- the + // only way to access the extension is via reflection. When using reflection, + // DynamicMessage and generated messages are indistinguishable, so it's fine + // if these objects are represented using DynamicMessage. + // + // Using DynamicMessageFactory on which you have called + // SetDelegateToGeneratedFactory(true) should be sufficient to satisfy the + // above requirement. + // + // If either pool or factory is NULL, both must be NULL. + // + // Note that this feature is ignored when parsing "lite" messages as they do + // not have descriptors. + void SetExtensionRegistry(const DescriptorPool* pool, + MessageFactory* factory); + + // Get the DescriptorPool set via SetExtensionRegistry(), or NULL if no pool + // has been provided. + const DescriptorPool* GetExtensionPool(); + + // Get the MessageFactory set via SetExtensionRegistry(), or NULL if no + // factory has been provided. + MessageFactory* GetExtensionFactory(); + + private: + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(CodedInputStream); + + const uint8* buffer_; + const uint8* buffer_end_; // pointer to the end of the buffer. + ZeroCopyInputStream* input_; + int total_bytes_read_; // total bytes read from input_, including + // the current buffer + + // If total_bytes_read_ surpasses INT_MAX, we record the extra bytes here + // so that we can BackUp() on destruction. + int overflow_bytes_; + + // LastTagWas() stuff. + uint32 last_tag_; // result of last ReadTag() or ReadTagWithCutoff(). + + // This is set true by ReadTag{Fallback/Slow}() if it is called when exactly + // at EOF, or by ExpectAtEnd() when it returns true. This happens when we + // reach the end of a message and attempt to read another tag. + bool legitimate_message_end_; + + // See EnableAliasing(). + bool aliasing_enabled_; + + // Limits + Limit current_limit_; // if position = -1, no limit is applied + + // For simplicity, if the current buffer crosses a limit (either a normal + // limit created by PushLimit() or the total bytes limit), buffer_size_ + // only tracks the number of bytes before that limit. This field + // contains the number of bytes after it. Note that this implies that if + // buffer_size_ == 0 and buffer_size_after_limit_ > 0, we know we've + // hit a limit. However, if both are zero, it doesn't necessarily mean + // we aren't at a limit -- the buffer may have ended exactly at the limit. + int buffer_size_after_limit_; + + // Maximum number of bytes to read, period. This is unrelated to + // current_limit_. Set using SetTotalBytesLimit(). + int total_bytes_limit_; + + // Current recursion budget, controlled by IncrementRecursionDepth() and + // similar. Starts at recursion_limit_ and goes down: if this reaches + // -1 we are over budget. + int recursion_budget_; + // Recursion depth limit, set by SetRecursionLimit(). + int recursion_limit_; + + // See SetExtensionRegistry(). + const DescriptorPool* extension_pool_; + MessageFactory* extension_factory_; + + // Private member functions. + + // Fallback when Skip() goes past the end of the current buffer. + bool SkipFallback(int count, int original_buffer_size); + + // Advance the buffer by a given number of bytes. + void Advance(int amount); + + // Back up input_ to the current buffer position. + void BackUpInputToCurrentPosition(); + + // Recomputes the value of buffer_size_after_limit_. Must be called after + // current_limit_ or total_bytes_limit_ changes. + void RecomputeBufferLimits(); + + // Writes an error message saying that we hit total_bytes_limit_. + void PrintTotalBytesLimitError(); + + // Called when the buffer runs out to request more data. Implies an + // Advance(BufferSize()). + bool Refresh(); + + // When parsing varints, we optimize for the common case of small values, and + // then optimize for the case when the varint fits within the current buffer + // piece. The Fallback method is used when we can't use the one-byte + // optimization. The Slow method is yet another fallback when the buffer is + // not large enough. Making the slow path out-of-line speeds up the common + // case by 10-15%. The slow path is fairly uncommon: it only triggers when a + // message crosses multiple buffers. Note: ReadVarint32Fallback() and + // ReadVarint64Fallback() are called frequently and generally not inlined, so + // they have been optimized to avoid "out" parameters. The former returns -1 + // if it fails and the uint32 it read otherwise. The latter has a bool + // indicating success or failure as part of its return type. + int64 ReadVarint32Fallback(uint32 first_byte_or_zero); + int ReadVarintSizeAsIntFallback(); + std::pair ReadVarint64Fallback(); + bool ReadVarint32Slow(uint32* value); + bool ReadVarint64Slow(uint64* value); + int ReadVarintSizeAsIntSlow(); + bool ReadLittleEndian32Fallback(uint32* value); + bool ReadLittleEndian64Fallback(uint64* value); + + // Fallback/slow methods for reading tags. These do not update last_tag_, + // but will set legitimate_message_end_ if we are at the end of the input + // stream. + uint32 ReadTagFallback(uint32 first_byte_or_zero); + uint32 ReadTagSlow(); + bool ReadStringFallback(string* buffer, int size); + + // Return the size of the buffer. + int BufferSize() const; + + static const int kDefaultTotalBytesLimit = INT_MAX; + + static int default_recursion_limit_; // 100 by default. +}; + +// Class which encodes and writes binary data which is composed of varint- +// encoded integers and fixed-width pieces. Wraps a ZeroCopyOutputStream. +// Most users will not need to deal with CodedOutputStream. +// +// Most methods of CodedOutputStream which return a bool return false if an +// underlying I/O error occurs. Once such a failure occurs, the +// CodedOutputStream is broken and is no longer useful. The Write* methods do +// not return the stream status, but will invalidate the stream if an error +// occurs. The client can probe HadError() to determine the status. +// +// Note that every method of CodedOutputStream which writes some data has +// a corresponding static "ToArray" version. These versions write directly +// to the provided buffer, returning a pointer past the last written byte. +// They require that the buffer has sufficient capacity for the encoded data. +// This allows an optimization where we check if an output stream has enough +// space for an entire message before we start writing and, if there is, we +// call only the ToArray methods to avoid doing bound checks for each +// individual value. +// i.e., in the example above: +// +// CodedOutputStream coded_output = new CodedOutputStream(raw_output); +// int magic_number = 1234; +// char text[] = "Hello world!"; +// +// int coded_size = sizeof(magic_number) + +// CodedOutputStream::VarintSize32(strlen(text)) + +// strlen(text); +// +// uint8* buffer = +// coded_output->GetDirectBufferForNBytesAndAdvance(coded_size); +// if (buffer != NULL) { +// // The output stream has enough space in the buffer: write directly to +// // the array. +// buffer = CodedOutputStream::WriteLittleEndian32ToArray(magic_number, +// buffer); +// buffer = CodedOutputStream::WriteVarint32ToArray(strlen(text), buffer); +// buffer = CodedOutputStream::WriteRawToArray(text, strlen(text), buffer); +// } else { +// // Make bound-checked writes, which will ask the underlying stream for +// // more space as needed. +// coded_output->WriteLittleEndian32(magic_number); +// coded_output->WriteVarint32(strlen(text)); +// coded_output->WriteRaw(text, strlen(text)); +// } +// +// delete coded_output; +class LIBPROTOBUF_EXPORT CodedOutputStream { + public: + // Create an CodedOutputStream that writes to the given ZeroCopyOutputStream. + explicit CodedOutputStream(ZeroCopyOutputStream* output); + CodedOutputStream(ZeroCopyOutputStream* output, bool do_eager_refresh); + + // Destroy the CodedOutputStream and position the underlying + // ZeroCopyOutputStream immediately after the last byte written. + ~CodedOutputStream(); + + // Trims any unused space in the underlying buffer so that its size matches + // the number of bytes written by this stream. The underlying buffer will + // automatically be trimmed when this stream is destroyed; this call is only + // necessary if the underlying buffer is accessed *before* the stream is + // destroyed. + void Trim(); + + // Skips a number of bytes, leaving the bytes unmodified in the underlying + // buffer. Returns false if an underlying write error occurs. This is + // mainly useful with GetDirectBufferPointer(). + bool Skip(int count); + + // Sets *data to point directly at the unwritten part of the + // CodedOutputStream's underlying buffer, and *size to the size of that + // buffer, but does not advance the stream's current position. This will + // always either produce a non-empty buffer or return false. If the caller + // writes any data to this buffer, it should then call Skip() to skip over + // the consumed bytes. This may be useful for implementing external fast + // serialization routines for types of data not covered by the + // CodedOutputStream interface. + bool GetDirectBufferPointer(void** data, int* size); + + // If there are at least "size" bytes available in the current buffer, + // returns a pointer directly into the buffer and advances over these bytes. + // The caller may then write directly into this buffer (e.g. using the + // *ToArray static methods) rather than go through CodedOutputStream. If + // there are not enough bytes available, returns NULL. The return pointer is + // invalidated as soon as any other non-const method of CodedOutputStream + // is called. + inline uint8* GetDirectBufferForNBytesAndAdvance(int size); + + // Write raw bytes, copying them from the given buffer. + void WriteRaw(const void* buffer, int size); + // Like WriteRaw() but will try to write aliased data if aliasing is + // turned on. + void WriteRawMaybeAliased(const void* data, int size); + // Like WriteRaw() but writing directly to the target array. + // This is _not_ inlined, as the compiler often optimizes memcpy into inline + // copy loops. Since this gets called by every field with string or bytes + // type, inlining may lead to a significant amount of code bloat, with only a + // minor performance gain. + static uint8* WriteRawToArray(const void* buffer, int size, uint8* target); + + // Equivalent to WriteRaw(str.data(), str.size()). + void WriteString(const string& str); + // Like WriteString() but writing directly to the target array. + static uint8* WriteStringToArray(const string& str, uint8* target); + // Write the varint-encoded size of str followed by str. + static uint8* WriteStringWithSizeToArray(const string& str, uint8* target); + + + // Instructs the CodedOutputStream to allow the underlying + // ZeroCopyOutputStream to hold pointers to the original structure instead of + // copying, if it supports it (i.e. output->AllowsAliasing() is true). If the + // underlying stream does not support aliasing, then enabling it has no + // affect. For now, this only affects the behavior of + // WriteRawMaybeAliased(). + // + // NOTE: It is caller's responsibility to ensure that the chunk of memory + // remains live until all of the data has been consumed from the stream. + void EnableAliasing(bool enabled); + + // Write a 32-bit little-endian integer. + void WriteLittleEndian32(uint32 value); + // Like WriteLittleEndian32() but writing directly to the target array. + static uint8* WriteLittleEndian32ToArray(uint32 value, uint8* target); + // Write a 64-bit little-endian integer. + void WriteLittleEndian64(uint64 value); + // Like WriteLittleEndian64() but writing directly to the target array. + static uint8* WriteLittleEndian64ToArray(uint64 value, uint8* target); + + // Write an unsigned integer with Varint encoding. Writing a 32-bit value + // is equivalent to casting it to uint64 and writing it as a 64-bit value, + // but may be more efficient. + void WriteVarint32(uint32 value); + // Like WriteVarint32() but writing directly to the target array. + static uint8* WriteVarint32ToArray(uint32 value, uint8* target); + // Write an unsigned integer with Varint encoding. + void WriteVarint64(uint64 value); + // Like WriteVarint64() but writing directly to the target array. + static uint8* WriteVarint64ToArray(uint64 value, uint8* target); + + // Equivalent to WriteVarint32() except when the value is negative, + // in which case it must be sign-extended to a full 10 bytes. + void WriteVarint32SignExtended(int32 value); + // Like WriteVarint32SignExtended() but writing directly to the target array. + static uint8* WriteVarint32SignExtendedToArray(int32 value, uint8* target); + + // This is identical to WriteVarint32(), but optimized for writing tags. + // In particular, if the input is a compile-time constant, this method + // compiles down to a couple instructions. + // Always inline because otherwise the aformentioned optimization can't work, + // but GCC by default doesn't want to inline this. + void WriteTag(uint32 value); + // Like WriteTag() but writing directly to the target array. + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE + static uint8* WriteTagToArray(uint32 value, uint8* target); + + // Returns the number of bytes needed to encode the given value as a varint. + static size_t VarintSize32(uint32 value); + // Returns the number of bytes needed to encode the given value as a varint. + static size_t VarintSize64(uint64 value); + + // If negative, 10 bytes. Otheriwse, same as VarintSize32(). + static size_t VarintSize32SignExtended(int32 value); + + // Compile-time equivalent of VarintSize32(). + template + struct StaticVarintSize32 { + static const size_t value = + (Value < (1 << 7)) + ? 1 + : (Value < (1 << 14)) + ? 2 + : (Value < (1 << 21)) + ? 3 + : (Value < (1 << 28)) + ? 4 + : 5; + }; + + // Returns the total number of bytes written since this object was created. + inline int ByteCount() const; + + // Returns true if there was an underlying I/O error since this object was + // created. + bool HadError() const { return had_error_; } + + // Deterministic serialization, if requested, guarantees that for a given + // binary, equal messages will always be serialized to the same bytes. This + // implies: + // . repeated serialization of a message will return the same bytes + // . different processes of the same binary (which may be executing on + // different machines) will serialize equal messages to the same bytes. + // + // Note the deterministic serialization is NOT canonical across languages; it + // is also unstable across different builds with schema changes due to unknown + // fields. Users who need canonical serialization, e.g., persistent storage in + // a canonical form, fingerprinting, etc., should define their own + // canonicalization specification and implement the serializer using + // reflection APIs rather than relying on this API. + // + // If deterministic serialization is requested, the serializer will + // sort map entries by keys in lexicographical order or numerical order. + // (This is an implementation detail and may subject to change.) + // + // There are two ways to determine whether serialization should be + // deterministic for this CodedOutputStream. If SetSerializationDeterministic + // has not yet been called, then the default comes from the global default, + // which is false, until SetDefaultSerializationDeterministic has been called. + // Otherwise, SetSerializationDeterministic has been called, and the last + // value passed to it is all that matters. + void SetSerializationDeterministic(bool value) { + is_serialization_deterministic_ = value; + } + // See above. Also, note that users of this CodedOutputStream may need to + // call IsSerializationDeterministic() to serialize in the intended way. This + // CodedOutputStream cannot enforce a desire for deterministic serialization + // by itself. + bool IsSerializationDeterministic() const { + return is_serialization_deterministic_; + } + + static bool IsDefaultSerializationDeterministic() { + return default_serialization_deterministic_.load(std::memory_order_relaxed) != 0; + } + + private: + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(CodedOutputStream); + + ZeroCopyOutputStream* output_; + uint8* buffer_; + int buffer_size_; + int total_bytes_; // Sum of sizes of all buffers seen so far. + bool had_error_; // Whether an error occurred during output. + bool aliasing_enabled_; // See EnableAliasing(). + bool is_serialization_deterministic_; + static std::atomic default_serialization_deterministic_; + + // Advance the buffer by a given number of bytes. + void Advance(int amount); + + // Called when the buffer runs out to request more data. Implies an + // Advance(buffer_size_). + bool Refresh(); + + // Like WriteRaw() but may avoid copying if the underlying + // ZeroCopyOutputStream supports it. + void WriteAliasedRaw(const void* buffer, int size); + + // If this write might cross the end of the buffer, we compose the bytes first + // then use WriteRaw(). + void WriteVarint32SlowPath(uint32 value); + void WriteVarint64SlowPath(uint64 value); + + // See above. Other projects may use "friend" to allow them to call this. + // After SetDefaultSerializationDeterministic() completes, all protocol + // buffer serializations will be deterministic by default. Thread safe. + // However, the meaning of "after" is subtle here: to be safe, each thread + // that wants deterministic serialization by default needs to call + // SetDefaultSerializationDeterministic() or ensure on its own that another + // thread has done so. + friend void ::google::protobuf::internal::MapTestForceDeterministic(); + static void SetDefaultSerializationDeterministic() { + default_serialization_deterministic_.store(true, std::memory_order_relaxed); + } +}; + +// inline methods ==================================================== +// The vast majority of varints are only one byte. These inline +// methods optimize for that case. + +inline bool CodedInputStream::ReadVarint32(uint32* value) { + uint32 v = 0; + if (GOOGLE_PREDICT_TRUE(buffer_ < buffer_end_)) { + v = *buffer_; + if (v < 0x80) { + *value = v; + Advance(1); + return true; + } + } + int64 result = ReadVarint32Fallback(v); + *value = static_cast(result); + return result >= 0; +} + +inline bool CodedInputStream::ReadVarint64(uint64* value) { + if (GOOGLE_PREDICT_TRUE(buffer_ < buffer_end_) && *buffer_ < 0x80) { + *value = *buffer_; + Advance(1); + return true; + } + std::pair p = ReadVarint64Fallback(); + *value = p.first; + return p.second; +} + +inline bool CodedInputStream::ReadVarintSizeAsInt(int* value) { + if (GOOGLE_PREDICT_TRUE(buffer_ < buffer_end_)) { + int v = *buffer_; + if (v < 0x80) { + *value = v; + Advance(1); + return true; + } + } + *value = ReadVarintSizeAsIntFallback(); + return *value >= 0; +} + +// static +inline const uint8* CodedInputStream::ReadLittleEndian32FromArray( + const uint8* buffer, + uint32* value) { +#if defined(PROTOBUF_LITTLE_ENDIAN) + memcpy(value, buffer, sizeof(*value)); + return buffer + sizeof(*value); +#else + *value = (static_cast(buffer[0]) ) | + (static_cast(buffer[1]) << 8) | + (static_cast(buffer[2]) << 16) | + (static_cast(buffer[3]) << 24); + return buffer + sizeof(*value); +#endif +} +// static +inline const uint8* CodedInputStream::ReadLittleEndian64FromArray( + const uint8* buffer, + uint64* value) { +#if defined(PROTOBUF_LITTLE_ENDIAN) + memcpy(value, buffer, sizeof(*value)); + return buffer + sizeof(*value); +#else + uint32 part0 = (static_cast(buffer[0]) ) | + (static_cast(buffer[1]) << 8) | + (static_cast(buffer[2]) << 16) | + (static_cast(buffer[3]) << 24); + uint32 part1 = (static_cast(buffer[4]) ) | + (static_cast(buffer[5]) << 8) | + (static_cast(buffer[6]) << 16) | + (static_cast(buffer[7]) << 24); + *value = static_cast(part0) | + (static_cast(part1) << 32); + return buffer + sizeof(*value); +#endif +} + +inline bool CodedInputStream::ReadLittleEndian32(uint32* value) { +#if defined(PROTOBUF_LITTLE_ENDIAN) + if (GOOGLE_PREDICT_TRUE(BufferSize() >= static_cast(sizeof(*value)))) { + buffer_ = ReadLittleEndian32FromArray(buffer_, value); + return true; + } else { + return ReadLittleEndian32Fallback(value); + } +#else + return ReadLittleEndian32Fallback(value); +#endif +} + +inline bool CodedInputStream::ReadLittleEndian64(uint64* value) { +#if defined(PROTOBUF_LITTLE_ENDIAN) + if (GOOGLE_PREDICT_TRUE(BufferSize() >= static_cast(sizeof(*value)))) { + buffer_ = ReadLittleEndian64FromArray(buffer_, value); + return true; + } else { + return ReadLittleEndian64Fallback(value); + } +#else + return ReadLittleEndian64Fallback(value); +#endif +} + +inline uint32 CodedInputStream::ReadTagNoLastTag() { + uint32 v = 0; + if (GOOGLE_PREDICT_TRUE(buffer_ < buffer_end_)) { + v = *buffer_; + if (v < 0x80) { + Advance(1); + return v; + } + } + v = ReadTagFallback(v); + return v; +} + +inline std::pair CodedInputStream::ReadTagWithCutoffNoLastTag( + uint32 cutoff) { + // In performance-sensitive code we can expect cutoff to be a compile-time + // constant, and things like "cutoff >= kMax1ByteVarint" to be evaluated at + // compile time. + uint32 first_byte_or_zero = 0; + if (GOOGLE_PREDICT_TRUE(buffer_ < buffer_end_)) { + // Hot case: buffer_ non_empty, buffer_[0] in [1, 128). + // TODO(gpike): Is it worth rearranging this? E.g., if the number of fields + // is large enough then is it better to check for the two-byte case first? + first_byte_or_zero = buffer_[0]; + if (static_cast(buffer_[0]) > 0) { + const uint32 kMax1ByteVarint = 0x7f; + uint32 tag = buffer_[0]; + Advance(1); + return std::make_pair(tag, cutoff >= kMax1ByteVarint || tag <= cutoff); + } + // Other hot case: cutoff >= 0x80, buffer_ has at least two bytes available, + // and tag is two bytes. The latter is tested by bitwise-and-not of the + // first byte and the second byte. + if (cutoff >= 0x80 && GOOGLE_PREDICT_TRUE(buffer_ + 1 < buffer_end_) && + GOOGLE_PREDICT_TRUE((buffer_[0] & ~buffer_[1]) >= 0x80)) { + const uint32 kMax2ByteVarint = (0x7f << 7) + 0x7f; + uint32 tag = (1u << 7) * buffer_[1] + (buffer_[0] - 0x80); + Advance(2); + // It might make sense to test for tag == 0 now, but it is so rare that + // that we don't bother. A varint-encoded 0 should be one byte unless + // the encoder lost its mind. The second part of the return value of + // this function is allowed to be either true or false if the tag is 0, + // so we don't have to check for tag == 0. We may need to check whether + // it exceeds cutoff. + bool at_or_below_cutoff = cutoff >= kMax2ByteVarint || tag <= cutoff; + return std::make_pair(tag, at_or_below_cutoff); + } + } + // Slow path + const uint32 tag = ReadTagFallback(first_byte_or_zero); + return std::make_pair(tag, static_cast(tag - 1) < cutoff); +} + +inline bool CodedInputStream::LastTagWas(uint32 expected) { + return last_tag_ == expected; +} + +inline bool CodedInputStream::ConsumedEntireMessage() { + return legitimate_message_end_; +} + +inline bool CodedInputStream::ExpectTag(uint32 expected) { + if (expected < (1 << 7)) { + if (GOOGLE_PREDICT_TRUE(buffer_ < buffer_end_) && buffer_[0] == expected) { + Advance(1); + return true; + } else { + return false; + } + } else if (expected < (1 << 14)) { + if (GOOGLE_PREDICT_TRUE(BufferSize() >= 2) && + buffer_[0] == static_cast(expected | 0x80) && + buffer_[1] == static_cast(expected >> 7)) { + Advance(2); + return true; + } else { + return false; + } + } else { + // Don't bother optimizing for larger values. + return false; + } +} + +inline const uint8* CodedInputStream::ExpectTagFromArray( + const uint8* buffer, uint32 expected) { + if (expected < (1 << 7)) { + if (buffer[0] == expected) { + return buffer + 1; + } + } else if (expected < (1 << 14)) { + if (buffer[0] == static_cast(expected | 0x80) && + buffer[1] == static_cast(expected >> 7)) { + return buffer + 2; + } + } + return NULL; +} + +inline void CodedInputStream::GetDirectBufferPointerInline(const void** data, + int* size) { + *data = buffer_; + *size = static_cast(buffer_end_ - buffer_); +} + +inline bool CodedInputStream::ExpectAtEnd() { + // If we are at a limit we know no more bytes can be read. Otherwise, it's + // hard to say without calling Refresh(), and we'd rather not do that. + + if (buffer_ == buffer_end_ && + ((buffer_size_after_limit_ != 0) || + (total_bytes_read_ == current_limit_))) { + last_tag_ = 0; // Pretend we called ReadTag()... + legitimate_message_end_ = true; // ... and it hit EOF. + return true; + } else { + return false; + } +} + +inline int CodedInputStream::CurrentPosition() const { + return total_bytes_read_ - (BufferSize() + buffer_size_after_limit_); +} + +inline uint8* CodedOutputStream::GetDirectBufferForNBytesAndAdvance(int size) { + if (buffer_size_ < size) { + return NULL; + } else { + uint8* result = buffer_; + Advance(size); + return result; + } +} + +inline uint8* CodedOutputStream::WriteVarint32ToArray(uint32 value, + uint8* target) { + while (value >= 0x80) { + *target = static_cast(value | 0x80); + value >>= 7; + ++target; + } + *target = static_cast(value); + return target + 1; +} + +inline uint8* CodedOutputStream::WriteVarint64ToArray(uint64 value, + uint8* target) { + while (value >= 0x80) { + *target = static_cast(value | 0x80); + value >>= 7; + ++target; + } + *target = static_cast(value); + return target + 1; +} + +inline void CodedOutputStream::WriteVarint32SignExtended(int32 value) { + WriteVarint64(static_cast(value)); +} + +inline uint8* CodedOutputStream::WriteVarint32SignExtendedToArray( + int32 value, uint8* target) { + return WriteVarint64ToArray(static_cast(value), target); +} + +inline uint8* CodedOutputStream::WriteLittleEndian32ToArray(uint32 value, + uint8* target) { +#if defined(PROTOBUF_LITTLE_ENDIAN) + memcpy(target, &value, sizeof(value)); +#else + target[0] = static_cast(value); + target[1] = static_cast(value >> 8); + target[2] = static_cast(value >> 16); + target[3] = static_cast(value >> 24); +#endif + return target + sizeof(value); +} + +inline uint8* CodedOutputStream::WriteLittleEndian64ToArray(uint64 value, + uint8* target) { +#if defined(PROTOBUF_LITTLE_ENDIAN) + memcpy(target, &value, sizeof(value)); +#else + uint32 part0 = static_cast(value); + uint32 part1 = static_cast(value >> 32); + + target[0] = static_cast(part0); + target[1] = static_cast(part0 >> 8); + target[2] = static_cast(part0 >> 16); + target[3] = static_cast(part0 >> 24); + target[4] = static_cast(part1); + target[5] = static_cast(part1 >> 8); + target[6] = static_cast(part1 >> 16); + target[7] = static_cast(part1 >> 24); +#endif + return target + sizeof(value); +} + +inline void CodedOutputStream::WriteVarint32(uint32 value) { + if (buffer_size_ >= 5) { + // Fast path: We have enough bytes left in the buffer to guarantee that + // this write won't cross the end, so we can skip the checks. + uint8* target = buffer_; + uint8* end = WriteVarint32ToArray(value, target); + int size = static_cast(end - target); + Advance(size); + } else { + WriteVarint32SlowPath(value); + } +} + +inline void CodedOutputStream::WriteVarint64(uint64 value) { + if (buffer_size_ >= 10) { + // Fast path: We have enough bytes left in the buffer to guarantee that + // this write won't cross the end, so we can skip the checks. + uint8* target = buffer_; + uint8* end = WriteVarint64ToArray(value, target); + int size = static_cast(end - target); + Advance(size); + } else { + WriteVarint64SlowPath(value); + } +} + +inline void CodedOutputStream::WriteTag(uint32 value) { + WriteVarint32(value); +} + +inline uint8* CodedOutputStream::WriteTagToArray( + uint32 value, uint8* target) { + return WriteVarint32ToArray(value, target); +} + +inline size_t CodedOutputStream::VarintSize32(uint32 value) { + // This computes value == 0 ? 1 : floor(log2(value)) / 7 + 1 + // Use an explicit multiplication to implement the divide of + // a number in the 1..31 range. + // Explicit OR 0x1 to avoid calling Bits::Log2FloorNonZero(0), which is + // undefined. + uint32 log2value = Bits::Log2FloorNonZero(value | 0x1); + return static_cast((log2value * 9 + 73) / 64); +} + +inline size_t CodedOutputStream::VarintSize64(uint64 value) { + // This computes value == 0 ? 1 : floor(log2(value)) / 7 + 1 + // Use an explicit multiplication to implement the divide of + // a number in the 1..63 range. + // Explicit OR 0x1 to avoid calling Bits::Log2FloorNonZero(0), which is + // undefined. + uint32 log2value = Bits::Log2FloorNonZero64(value | 0x1); + return static_cast((log2value * 9 + 73) / 64); +} + +inline size_t CodedOutputStream::VarintSize32SignExtended(int32 value) { + if (value < 0) { + return 10; // TODO(kenton): Make this a symbolic constant. + } else { + return VarintSize32(static_cast(value)); + } +} + +inline void CodedOutputStream::WriteString(const string& str) { + WriteRaw(str.data(), static_cast(str.size())); +} + +inline void CodedOutputStream::WriteRawMaybeAliased( + const void* data, int size) { + if (aliasing_enabled_) { + WriteAliasedRaw(data, size); + } else { + WriteRaw(data, size); + } +} + +inline uint8* CodedOutputStream::WriteStringToArray( + const string& str, uint8* target) { + return WriteRawToArray(str.data(), static_cast(str.size()), target); +} + +inline int CodedOutputStream::ByteCount() const { + return total_bytes_ - buffer_size_; +} + +inline void CodedInputStream::Advance(int amount) { + buffer_ += amount; +} + +inline void CodedOutputStream::Advance(int amount) { + buffer_ += amount; + buffer_size_ -= amount; +} + +inline void CodedInputStream::SetRecursionLimit(int limit) { + recursion_budget_ += limit - recursion_limit_; + recursion_limit_ = limit; +} + +inline bool CodedInputStream::IncrementRecursionDepth() { + --recursion_budget_; + return recursion_budget_ >= 0; +} + +inline void CodedInputStream::DecrementRecursionDepth() { + if (recursion_budget_ < recursion_limit_) ++recursion_budget_; +} + +inline void CodedInputStream::UnsafeDecrementRecursionDepth() { + assert(recursion_budget_ < recursion_limit_); + ++recursion_budget_; +} + +inline void CodedInputStream::SetExtensionRegistry(const DescriptorPool* pool, + MessageFactory* factory) { + extension_pool_ = pool; + extension_factory_ = factory; +} + +inline const DescriptorPool* CodedInputStream::GetExtensionPool() { + return extension_pool_; +} + +inline MessageFactory* CodedInputStream::GetExtensionFactory() { + return extension_factory_; +} + +inline int CodedInputStream::BufferSize() const { + return static_cast(buffer_end_ - buffer_); +} + +inline CodedInputStream::CodedInputStream(ZeroCopyInputStream* input) + : buffer_(NULL), + buffer_end_(NULL), + input_(input), + total_bytes_read_(0), + overflow_bytes_(0), + last_tag_(0), + legitimate_message_end_(false), + aliasing_enabled_(false), + current_limit_(kint32max), + buffer_size_after_limit_(0), + total_bytes_limit_(kDefaultTotalBytesLimit), + recursion_budget_(default_recursion_limit_), + recursion_limit_(default_recursion_limit_), + extension_pool_(NULL), + extension_factory_(NULL) { + // Eagerly Refresh() so buffer space is immediately available. + Refresh(); +} + +inline CodedInputStream::CodedInputStream(const uint8* buffer, int size) + : buffer_(buffer), + buffer_end_(buffer + size), + input_(NULL), + total_bytes_read_(size), + overflow_bytes_(0), + last_tag_(0), + legitimate_message_end_(false), + aliasing_enabled_(false), + current_limit_(size), + buffer_size_after_limit_(0), + total_bytes_limit_(kDefaultTotalBytesLimit), + recursion_budget_(default_recursion_limit_), + recursion_limit_(default_recursion_limit_), + extension_pool_(NULL), + extension_factory_(NULL) { + // Note that setting current_limit_ == size is important to prevent some + // code paths from trying to access input_ and segfaulting. +} + +inline bool CodedInputStream::IsFlat() const { + return input_ == NULL; +} + +inline bool CodedInputStream::Skip(int count) { + if (count < 0) return false; // security: count is often user-supplied + + const int original_buffer_size = BufferSize(); + + if (count <= original_buffer_size) { + // Just skipping within the current buffer. Easy. + Advance(count); + return true; + } + + return SkipFallback(count, original_buffer_size); +} + +} // namespace io +} // namespace protobuf + + +#if defined(_MSC_VER) && _MSC_VER >= 1300 && !defined(__INTEL_COMPILER) + #pragma runtime_checks("c", restore) +#endif // _MSC_VER && !defined(__INTEL_COMPILER) + +} // namespace google +#endif // GOOGLE_PROTOBUF_IO_CODED_STREAM_H__ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/io/coded_stream_inl.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/io/coded_stream_inl.h new file mode 100644 index 0000000000000000000000000000000000000000..d95b06e04e5310d25eb4294959bacd2479933e2f --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/io/coded_stream_inl.h @@ -0,0 +1,90 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: jasonh@google.com (Jason Hsueh) +// +// Implements methods of coded_stream.h that need to be inlined for performance +// reasons, but should not be defined in a public header. + +#ifndef GOOGLE_PROTOBUF_IO_CODED_STREAM_INL_H__ +#define GOOGLE_PROTOBUF_IO_CODED_STREAM_INL_H__ + +#include +#include +#include +#include +#include +#include + +namespace google { +namespace protobuf { +namespace io { + +inline bool CodedInputStream::InternalReadStringInline(string* buffer, + int size) { + if (size < 0) return false; // security: size is often user-supplied + + if (BufferSize() >= size) { + STLStringResizeUninitialized(buffer, size); + std::pair z = as_string_data(buffer); + if (z.second) { + // Oddly enough, memcpy() requires its first two args to be non-NULL even + // if we copy 0 bytes. So, we have ensured that z.first is non-NULL here. + GOOGLE_DCHECK(z.first != NULL); + memcpy(z.first, buffer_, size); + Advance(size); + } + return true; + } + + return ReadStringFallback(buffer, size); +} + +inline bool CodedInputStream::InternalReadRawInline(void* buffer, int size) { + int current_buffer_size; + while ((current_buffer_size = BufferSize()) < size) { + // Reading past end of buffer. Copy what we have, then refresh. + memcpy(buffer, buffer_, current_buffer_size); + buffer = reinterpret_cast(buffer) + current_buffer_size; + size -= current_buffer_size; + Advance(current_buffer_size); + if (!Refresh()) return false; + } + + memcpy(buffer, buffer_, size); + Advance(size); + + return true; +} + +} // namespace io +} // namespace protobuf +} // namespace google +#endif // GOOGLE_PROTOBUF_IO_CODED_STREAM_INL_H__ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/io/zero_copy_stream.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/io/zero_copy_stream.h new file mode 100644 index 0000000000000000000000000000000000000000..62ace7ae33f302db34a006ca70f16d633615cef9 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/io/zero_copy_stream.h @@ -0,0 +1,248 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// This file contains the ZeroCopyInputStream and ZeroCopyOutputStream +// interfaces, which represent abstract I/O streams to and from which +// protocol buffers can be read and written. For a few simple +// implementations of these interfaces, see zero_copy_stream_impl.h. +// +// These interfaces are different from classic I/O streams in that they +// try to minimize the amount of data copying that needs to be done. +// To accomplish this, responsibility for allocating buffers is moved to +// the stream object, rather than being the responsibility of the caller. +// So, the stream can return a buffer which actually points directly into +// the final data structure where the bytes are to be stored, and the caller +// can interact directly with that buffer, eliminating an intermediate copy +// operation. +// +// As an example, consider the common case in which you are reading bytes +// from an array that is already in memory (or perhaps an mmap()ed file). +// With classic I/O streams, you would do something like: +// char buffer[BUFFER_SIZE]; +// input->Read(buffer, BUFFER_SIZE); +// DoSomething(buffer, BUFFER_SIZE); +// Then, the stream basically just calls memcpy() to copy the data from +// the array into your buffer. With a ZeroCopyInputStream, you would do +// this instead: +// const void* buffer; +// int size; +// input->Next(&buffer, &size); +// DoSomething(buffer, size); +// Here, no copy is performed. The input stream returns a pointer directly +// into the backing array, and the caller ends up reading directly from it. +// +// If you want to be able to read the old-fashion way, you can create +// a CodedInputStream or CodedOutputStream wrapping these objects and use +// their ReadRaw()/WriteRaw() methods. These will, of course, add a copy +// step, but Coded*Stream will handle buffering so at least it will be +// reasonably efficient. +// +// ZeroCopyInputStream example: +// // Read in a file and print its contents to stdout. +// int fd = open("myfile", O_RDONLY); +// ZeroCopyInputStream* input = new FileInputStream(fd); +// +// const void* buffer; +// int size; +// while (input->Next(&buffer, &size)) { +// cout.write(buffer, size); +// } +// +// delete input; +// close(fd); +// +// ZeroCopyOutputStream example: +// // Copy the contents of "infile" to "outfile", using plain read() for +// // "infile" but a ZeroCopyOutputStream for "outfile". +// int infd = open("infile", O_RDONLY); +// int outfd = open("outfile", O_WRONLY); +// ZeroCopyOutputStream* output = new FileOutputStream(outfd); +// +// void* buffer; +// int size; +// while (output->Next(&buffer, &size)) { +// int bytes = read(infd, buffer, size); +// if (bytes < size) { +// // Reached EOF. +// output->BackUp(size - bytes); +// break; +// } +// } +// +// delete output; +// close(infd); +// close(outfd); + +#ifndef GOOGLE_PROTOBUF_IO_ZERO_COPY_STREAM_H__ +#define GOOGLE_PROTOBUF_IO_ZERO_COPY_STREAM_H__ + +#include +#include + +namespace google { + +namespace protobuf { +namespace io { + +// Defined in this file. +class ZeroCopyInputStream; +class ZeroCopyOutputStream; + +// Abstract interface similar to an input stream but designed to minimize +// copying. +class LIBPROTOBUF_EXPORT ZeroCopyInputStream { + public: + ZeroCopyInputStream() {} + virtual ~ZeroCopyInputStream() {} + + // Obtains a chunk of data from the stream. + // + // Preconditions: + // * "size" and "data" are not NULL. + // + // Postconditions: + // * If the returned value is false, there is no more data to return or + // an error occurred. All errors are permanent. + // * Otherwise, "size" points to the actual number of bytes read and "data" + // points to a pointer to a buffer containing these bytes. + // * Ownership of this buffer remains with the stream, and the buffer + // remains valid only until some other method of the stream is called + // or the stream is destroyed. + // * It is legal for the returned buffer to have zero size, as long + // as repeatedly calling Next() eventually yields a buffer with non-zero + // size. + virtual bool Next(const void** data, int* size) = 0; + + // Backs up a number of bytes, so that the next call to Next() returns + // data again that was already returned by the last call to Next(). This + // is useful when writing procedures that are only supposed to read up + // to a certain point in the input, then return. If Next() returns a + // buffer that goes beyond what you wanted to read, you can use BackUp() + // to return to the point where you intended to finish. + // + // Preconditions: + // * The last method called must have been Next(). + // * count must be less than or equal to the size of the last buffer + // returned by Next(). + // + // Postconditions: + // * The last "count" bytes of the last buffer returned by Next() will be + // pushed back into the stream. Subsequent calls to Next() will return + // the same data again before producing new data. + virtual void BackUp(int count) = 0; + + // Skips a number of bytes. Returns false if the end of the stream is + // reached or some input error occurred. In the end-of-stream case, the + // stream is advanced to the end of the stream (so ByteCount() will return + // the total size of the stream). + virtual bool Skip(int count) = 0; + + // Returns the total number of bytes read since this object was created. + virtual int64 ByteCount() const = 0; + + + private: + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ZeroCopyInputStream); +}; + +// Abstract interface similar to an output stream but designed to minimize +// copying. +class LIBPROTOBUF_EXPORT ZeroCopyOutputStream { + public: + ZeroCopyOutputStream() {} + virtual ~ZeroCopyOutputStream() {} + + // Obtains a buffer into which data can be written. Any data written + // into this buffer will eventually (maybe instantly, maybe later on) + // be written to the output. + // + // Preconditions: + // * "size" and "data" are not NULL. + // + // Postconditions: + // * If the returned value is false, an error occurred. All errors are + // permanent. + // * Otherwise, "size" points to the actual number of bytes in the buffer + // and "data" points to the buffer. + // * Ownership of this buffer remains with the stream, and the buffer + // remains valid only until some other method of the stream is called + // or the stream is destroyed. + // * Any data which the caller stores in this buffer will eventually be + // written to the output (unless BackUp() is called). + // * It is legal for the returned buffer to have zero size, as long + // as repeatedly calling Next() eventually yields a buffer with non-zero + // size. + virtual bool Next(void** data, int* size) = 0; + + // Backs up a number of bytes, so that the end of the last buffer returned + // by Next() is not actually written. This is needed when you finish + // writing all the data you want to write, but the last buffer was bigger + // than you needed. You don't want to write a bunch of garbage after the + // end of your data, so you use BackUp() to back up. + // + // Preconditions: + // * The last method called must have been Next(). + // * count must be less than or equal to the size of the last buffer + // returned by Next(). + // * The caller must not have written anything to the last "count" bytes + // of that buffer. + // + // Postconditions: + // * The last "count" bytes of the last buffer returned by Next() will be + // ignored. + virtual void BackUp(int count) = 0; + + // Returns the total number of bytes written since this object was created. + virtual int64 ByteCount() const = 0; + + // Write a given chunk of data to the output. Some output streams may + // implement this in a way that avoids copying. Check AllowsAliasing() before + // calling WriteAliasedRaw(). It will GOOGLE_CHECK fail if WriteAliasedRaw() is + // called on a stream that does not allow aliasing. + // + // NOTE: It is caller's responsibility to ensure that the chunk of memory + // remains live until all of the data has been consumed from the stream. + virtual bool WriteAliasedRaw(const void* data, int size); + virtual bool AllowsAliasing() const { return false; } + + + private: + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ZeroCopyOutputStream); +}; + +} // namespace io +} // namespace protobuf + +} // namespace google +#endif // GOOGLE_PROTOBUF_IO_ZERO_COPY_STREAM_H__ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/io/zero_copy_stream_impl_lite.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/io/zero_copy_stream_impl_lite.h new file mode 100644 index 0000000000000000000000000000000000000000..29f63bf04a467720aa11bdb539cdc6702b2ea5bd --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/io/zero_copy_stream_impl_lite.h @@ -0,0 +1,383 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// This file contains common implementations of the interfaces defined in +// zero_copy_stream.h which are included in the "lite" protobuf library. +// These implementations cover I/O on raw arrays and strings, as well as +// adaptors which make it easy to implement streams based on traditional +// streams. Of course, many users will probably want to write their own +// implementations of these interfaces specific to the particular I/O +// abstractions they prefer to use, but these should cover the most common +// cases. + +#ifndef GOOGLE_PROTOBUF_IO_ZERO_COPY_STREAM_IMPL_LITE_H__ +#define GOOGLE_PROTOBUF_IO_ZERO_COPY_STREAM_IMPL_LITE_H__ + +#include +#include +#include +#include +#include +#include +#include + + +namespace google { +namespace protobuf { +namespace io { + +// =================================================================== + +// A ZeroCopyInputStream backed by an in-memory array of bytes. +class LIBPROTOBUF_EXPORT ArrayInputStream : public ZeroCopyInputStream { + public: + // Create an InputStream that returns the bytes pointed to by "data". + // "data" remains the property of the caller but must remain valid until + // the stream is destroyed. If a block_size is given, calls to Next() + // will return data blocks no larger than the given size. Otherwise, the + // first call to Next() returns the entire array. block_size is mainly + // useful for testing; in production you would probably never want to set + // it. + ArrayInputStream(const void* data, int size, int block_size = -1); + + // implements ZeroCopyInputStream ---------------------------------- + bool Next(const void** data, int* size); + void BackUp(int count); + bool Skip(int count); + int64 ByteCount() const; + + + private: + const uint8* const data_; // The byte array. + const int size_; // Total size of the array. + const int block_size_; // How many bytes to return at a time. + + int position_; + int last_returned_size_; // How many bytes we returned last time Next() + // was called (used for error checking only). + + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ArrayInputStream); +}; + +// =================================================================== + +// A ZeroCopyOutputStream backed by an in-memory array of bytes. +class LIBPROTOBUF_EXPORT ArrayOutputStream : public ZeroCopyOutputStream { + public: + // Create an OutputStream that writes to the bytes pointed to by "data". + // "data" remains the property of the caller but must remain valid until + // the stream is destroyed. If a block_size is given, calls to Next() + // will return data blocks no larger than the given size. Otherwise, the + // first call to Next() returns the entire array. block_size is mainly + // useful for testing; in production you would probably never want to set + // it. + ArrayOutputStream(void* data, int size, int block_size = -1); + + // implements ZeroCopyOutputStream --------------------------------- + bool Next(void** data, int* size); + void BackUp(int count); + int64 ByteCount() const; + + private: + uint8* const data_; // The byte array. + const int size_; // Total size of the array. + const int block_size_; // How many bytes to return at a time. + + int position_; + int last_returned_size_; // How many bytes we returned last time Next() + // was called (used for error checking only). + + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ArrayOutputStream); +}; + +// =================================================================== + +// A ZeroCopyOutputStream which appends bytes to a string. +class LIBPROTOBUF_EXPORT StringOutputStream : public ZeroCopyOutputStream { + public: + // Create a StringOutputStream which appends bytes to the given string. + // The string remains property of the caller, but it is mutated in arbitrary + // ways and MUST NOT be accessed in any way until you're done with the + // stream. Either be sure there's no further usage, or (safest) destroy the + // stream before using the contents. + // + // Hint: If you call target->reserve(n) before creating the stream, + // the first call to Next() will return at least n bytes of buffer + // space. + explicit StringOutputStream(string* target); + + // implements ZeroCopyOutputStream --------------------------------- + bool Next(void** data, int* size); + void BackUp(int count); + int64 ByteCount() const; + + protected: + void SetString(string* target); + + private: + static const int kMinimumSize = 16; + + string* target_; + + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(StringOutputStream); +}; + +// Note: There is no StringInputStream. Instead, just create an +// ArrayInputStream as follows: +// ArrayInputStream input(str.data(), str.size()); + +// =================================================================== + +// A generic traditional input stream interface. +// +// Lots of traditional input streams (e.g. file descriptors, C stdio +// streams, and C++ iostreams) expose an interface where every read +// involves copying bytes into a buffer. If you want to take such an +// interface and make a ZeroCopyInputStream based on it, simply implement +// CopyingInputStream and then use CopyingInputStreamAdaptor. +// +// CopyingInputStream implementations should avoid buffering if possible. +// CopyingInputStreamAdaptor does its own buffering and will read data +// in large blocks. +class LIBPROTOBUF_EXPORT CopyingInputStream { + public: + virtual ~CopyingInputStream() {} + + // Reads up to "size" bytes into the given buffer. Returns the number of + // bytes read. Read() waits until at least one byte is available, or + // returns zero if no bytes will ever become available (EOF), or -1 if a + // permanent read error occurred. + virtual int Read(void* buffer, int size) = 0; + + // Skips the next "count" bytes of input. Returns the number of bytes + // actually skipped. This will always be exactly equal to "count" unless + // EOF was reached or a permanent read error occurred. + // + // The default implementation just repeatedly calls Read() into a scratch + // buffer. + virtual int Skip(int count); +}; + +// A ZeroCopyInputStream which reads from a CopyingInputStream. This is +// useful for implementing ZeroCopyInputStreams that read from traditional +// streams. Note that this class is not really zero-copy. +// +// If you want to read from file descriptors or C++ istreams, this is +// already implemented for you: use FileInputStream or IstreamInputStream +// respectively. +class LIBPROTOBUF_EXPORT CopyingInputStreamAdaptor : public ZeroCopyInputStream { + public: + // Creates a stream that reads from the given CopyingInputStream. + // If a block_size is given, it specifies the number of bytes that + // should be read and returned with each call to Next(). Otherwise, + // a reasonable default is used. The caller retains ownership of + // copying_stream unless SetOwnsCopyingStream(true) is called. + explicit CopyingInputStreamAdaptor(CopyingInputStream* copying_stream, + int block_size = -1); + ~CopyingInputStreamAdaptor(); + + // Call SetOwnsCopyingStream(true) to tell the CopyingInputStreamAdaptor to + // delete the underlying CopyingInputStream when it is destroyed. + void SetOwnsCopyingStream(bool value) { owns_copying_stream_ = value; } + + // implements ZeroCopyInputStream ---------------------------------- + bool Next(const void** data, int* size); + void BackUp(int count); + bool Skip(int count); + int64 ByteCount() const; + + private: + // Insures that buffer_ is not NULL. + void AllocateBufferIfNeeded(); + // Frees the buffer and resets buffer_used_. + void FreeBuffer(); + + // The underlying copying stream. + CopyingInputStream* copying_stream_; + bool owns_copying_stream_; + + // True if we have seen a permenant error from the underlying stream. + bool failed_; + + // The current position of copying_stream_, relative to the point where + // we started reading. + int64 position_; + + // Data is read into this buffer. It may be NULL if no buffer is currently + // in use. Otherwise, it points to an array of size buffer_size_. + std::unique_ptr buffer_; + const int buffer_size_; + + // Number of valid bytes currently in the buffer (i.e. the size last + // returned by Next()). 0 <= buffer_used_ <= buffer_size_. + int buffer_used_; + + // Number of bytes in the buffer which were backed up over by a call to + // BackUp(). These need to be returned again. + // 0 <= backup_bytes_ <= buffer_used_ + int backup_bytes_; + + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(CopyingInputStreamAdaptor); +}; + +// =================================================================== + +// A generic traditional output stream interface. +// +// Lots of traditional output streams (e.g. file descriptors, C stdio +// streams, and C++ iostreams) expose an interface where every write +// involves copying bytes from a buffer. If you want to take such an +// interface and make a ZeroCopyOutputStream based on it, simply implement +// CopyingOutputStream and then use CopyingOutputStreamAdaptor. +// +// CopyingOutputStream implementations should avoid buffering if possible. +// CopyingOutputStreamAdaptor does its own buffering and will write data +// in large blocks. +class LIBPROTOBUF_EXPORT CopyingOutputStream { + public: + virtual ~CopyingOutputStream() {} + + // Writes "size" bytes from the given buffer to the output. Returns true + // if successful, false on a write error. + virtual bool Write(const void* buffer, int size) = 0; +}; + +// A ZeroCopyOutputStream which writes to a CopyingOutputStream. This is +// useful for implementing ZeroCopyOutputStreams that write to traditional +// streams. Note that this class is not really zero-copy. +// +// If you want to write to file descriptors or C++ ostreams, this is +// already implemented for you: use FileOutputStream or OstreamOutputStream +// respectively. +class LIBPROTOBUF_EXPORT CopyingOutputStreamAdaptor : public ZeroCopyOutputStream { + public: + // Creates a stream that writes to the given Unix file descriptor. + // If a block_size is given, it specifies the size of the buffers + // that should be returned by Next(). Otherwise, a reasonable default + // is used. + explicit CopyingOutputStreamAdaptor(CopyingOutputStream* copying_stream, + int block_size = -1); + ~CopyingOutputStreamAdaptor(); + + // Writes all pending data to the underlying stream. Returns false if a + // write error occurred on the underlying stream. (The underlying + // stream itself is not necessarily flushed.) + bool Flush(); + + // Call SetOwnsCopyingStream(true) to tell the CopyingOutputStreamAdaptor to + // delete the underlying CopyingOutputStream when it is destroyed. + void SetOwnsCopyingStream(bool value) { owns_copying_stream_ = value; } + + // implements ZeroCopyOutputStream --------------------------------- + bool Next(void** data, int* size); + void BackUp(int count); + int64 ByteCount() const; + + private: + // Write the current buffer, if it is present. + bool WriteBuffer(); + // Insures that buffer_ is not NULL. + void AllocateBufferIfNeeded(); + // Frees the buffer. + void FreeBuffer(); + + // The underlying copying stream. + CopyingOutputStream* copying_stream_; + bool owns_copying_stream_; + + // True if we have seen a permenant error from the underlying stream. + bool failed_; + + // The current position of copying_stream_, relative to the point where + // we started writing. + int64 position_; + + // Data is written from this buffer. It may be NULL if no buffer is + // currently in use. Otherwise, it points to an array of size buffer_size_. + std::unique_ptr buffer_; + const int buffer_size_; + + // Number of valid bytes currently in the buffer (i.e. the size last + // returned by Next()). When BackUp() is called, we just reduce this. + // 0 <= buffer_used_ <= buffer_size_. + int buffer_used_; + + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(CopyingOutputStreamAdaptor); +}; + +// =================================================================== + +// mutable_string_data() and as_string_data() are workarounds to improve +// the performance of writing new data to an existing string. Unfortunately +// the methods provided by the string class are suboptimal, and using memcpy() +// is mildly annoying because it requires its pointer args to be non-NULL even +// if we ask it to copy 0 bytes. Furthermore, string_as_array() has the +// property that it always returns NULL if its arg is the empty string, exactly +// what we want to avoid if we're using it in conjunction with memcpy()! +// With C++11, the desired memcpy() boils down to memcpy(..., &(*s)[0], size), +// where s is a string*. Without C++11, &(*s)[0] is not guaranteed to be safe, +// so we use string_as_array(), and live with the extra logic that tests whether +// *s is empty. + +// Return a pointer to mutable characters underlying the given string. The +// return value is valid until the next time the string is resized. We +// trust the caller to treat the return value as an array of length s->size(). +inline char* mutable_string_data(string* s) { +#ifdef LANG_CXX11 + // This should be simpler & faster than string_as_array() because the latter + // is guaranteed to return NULL when *s is empty, so it has to check for that. + return &(*s)[0]; +#else + return string_as_array(s); +#endif +} + +// as_string_data(s) is equivalent to +// ({ char* p = mutable_string_data(s); make_pair(p, p != NULL); }) +// Sometimes it's faster: in some scenarios p cannot be NULL, and then the +// code can avoid that check. +inline std::pair as_string_data(string* s) { + char *p = mutable_string_data(s); +#ifdef LANG_CXX11 + return std::make_pair(p, true); +#else + return std::make_pair(p, p != NULL); +#endif +} + +} // namespace io +} // namespace protobuf + +} // namespace google +#endif // GOOGLE_PROTOBUF_IO_ZERO_COPY_STREAM_IMPL_LITE_H__ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/map.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/map.h new file mode 100644 index 0000000000000000000000000000000000000000..6463ac2e02b46aae10b7c080693a024bb44431f1 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/map.h @@ -0,0 +1,1219 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// This file defines the map container and its helpers to support protobuf maps. +// +// The Map and MapIterator types are provided by this header file. +// Please avoid using other types defined here, unless they are public +// types within Map or MapIterator, such as Map::value_type. + +#ifndef GOOGLE_PROTOBUF_MAP_H__ +#define GOOGLE_PROTOBUF_MAP_H__ + +#include +#include +#include // To support Visual Studio 2008 +#include +#include + +#include +#include +#include +#include +#include + +namespace google { +namespace protobuf { + +template +class Map; + +class MapIterator; + +template struct is_proto_enum; + +namespace internal { +template +class MapFieldLite; + +template +class MapField; + +template +class TypeDefinedMapFieldBase; + +class DynamicMapField; + +class GeneratedMessageReflection; +} // namespace internal + +// This is the class for google::protobuf::Map's internal value_type. Instead of using +// std::pair as value_type, we use this class which provides us more control of +// its process of construction and destruction. +template +class MapPair { + public: + typedef const Key first_type; + typedef T second_type; + + MapPair(const Key& other_first, const T& other_second) + : first(other_first), second(other_second) {} + explicit MapPair(const Key& other_first) : first(other_first), second() {} + MapPair(const MapPair& other) + : first(other.first), second(other.second) {} + + ~MapPair() {} + + // Implicitly convertible to std::pair of compatible types. + template + operator std::pair() const { + return std::pair(first, second); + } + + const Key first; + T second; + + private: + friend class ::google::protobuf::Arena; + friend class Map; +}; + +// google::protobuf::Map is an associative container type used to store protobuf map +// fields. Each Map instance may or may not use a different hash function, a +// different iteration order, and so on. E.g., please don't examine +// implementation details to decide if the following would work: +// Map m0, m1; +// m0[0] = m1[0] = m0[1] = m1[1] = 0; +// assert(m0.begin()->first == m1.begin()->first); // Bug! +// +// Map's interface is similar to std::unordered_map, except that Map is not +// designed to play well with exceptions. +template +class Map { + public: + typedef Key key_type; + typedef T mapped_type; + typedef MapPair value_type; + + typedef value_type* pointer; + typedef const value_type* const_pointer; + typedef value_type& reference; + typedef const value_type& const_reference; + + typedef size_t size_type; + typedef hash hasher; + + Map() : arena_(NULL), default_enum_value_(0) { Init(); } + explicit Map(Arena* arena) : arena_(arena), default_enum_value_(0) { Init(); } + + Map(const Map& other) + : arena_(NULL), default_enum_value_(other.default_enum_value_) { + Init(); + insert(other.begin(), other.end()); + } + + Map(Map&& other) noexcept : Map() { + if (other.arena_) { + *this = other; + } else { + swap(other); + } + } + Map& operator=(Map&& other) noexcept { + if (this != &other) { + if (arena_ != other.arena_) { + *this = other; + } else { + swap(other); + } + } + return *this; + } + + template + Map(const InputIt& first, const InputIt& last) + : arena_(NULL), default_enum_value_(0) { + Init(); + insert(first, last); + } + + ~Map() { + clear(); + if (arena_ == NULL) { + delete elements_; + } + } + + private: + void Init() { + elements_ = Arena::Create(arena_, 0u, hasher(), Allocator(arena_)); + } + + // re-implement std::allocator to use arena allocator for memory allocation. + // Used for google::protobuf::Map implementation. Users should not use this class + // directly. + template + class MapAllocator { + public: + typedef U value_type; + typedef value_type* pointer; + typedef const value_type* const_pointer; + typedef value_type& reference; + typedef const value_type& const_reference; + typedef size_t size_type; + typedef ptrdiff_t difference_type; + + MapAllocator() : arena_(NULL) {} + explicit MapAllocator(Arena* arena) : arena_(arena) {} + template + MapAllocator(const MapAllocator& allocator) + : arena_(allocator.arena()) {} + + pointer allocate(size_type n, const void* /* hint */ = 0) { + // If arena is not given, malloc needs to be called which doesn't + // construct element object. + if (arena_ == NULL) { + return static_cast(::operator new(n * sizeof(value_type))); + } else { + return reinterpret_cast( + Arena::CreateArray(arena_, n * sizeof(value_type))); + } + } + + void deallocate(pointer p, size_type n) { + if (arena_ == NULL) { +#if defined(__GXX_DELETE_WITH_SIZE__) || defined(__cpp_sized_deallocation) + ::operator delete(p, n * sizeof(value_type)); +#else + (void)n; + ::operator delete(p); +#endif + } + } + +#if __cplusplus >= 201103L && !defined(GOOGLE_PROTOBUF_OS_APPLE) && \ + !defined(GOOGLE_PROTOBUF_OS_NACL) && \ + !defined(GOOGLE_PROTOBUF_OS_EMSCRIPTEN) + template + void construct(NodeType* p, Args&&... args) { + // Clang 3.6 doesn't compile static casting to void* directly. (Issue + // #1266) According C++ standard 5.2.9/1: "The static_cast operator shall + // not cast away constness". So first the maybe const pointer is casted to + // const void* and after the const void* is const casted. + new (const_cast(static_cast(p))) + NodeType(std::forward(args)...); + } + + template + void destroy(NodeType* p) { + p->~NodeType(); + } +#else + void construct(pointer p, const_reference t) { new (p) value_type(t); } + + void destroy(pointer p) { p->~value_type(); } +#endif + + template + struct rebind { + typedef MapAllocator other; + }; + + template + bool operator==(const MapAllocator& other) const { + return arena_ == other.arena_; + } + + template + bool operator!=(const MapAllocator& other) const { + return arena_ != other.arena_; + } + + // To support Visual Studio 2008 + size_type max_size() const { + // parentheses around (std::...:max) prevents macro warning of max() + return (std::numeric_limits::max)(); + } + + // To support gcc-4.4, which does not properly + // support templated friend classes + Arena* arena() const { + return arena_; + } + + private: + typedef void DestructorSkippable_; + Arena* const arena_; + }; + + // InnerMap's key type is Key and its value type is value_type*. We use a + // custom class here and for Node, below, to ensure that k_ is at offset 0, + // allowing safe conversion from pointer to Node to pointer to Key, and vice + // versa when appropriate. + class KeyValuePair { + public: + KeyValuePair(const Key& k, value_type* v) : k_(k), v_(v) {} + + const Key& key() const { return k_; } + Key& key() { return k_; } + value_type* value() const { return v_; } + value_type*& value() { return v_; } + + private: + Key k_; + value_type* v_; + }; + + typedef MapAllocator Allocator; + + // InnerMap is a generic hash-based map. It doesn't contain any + // protocol-buffer-specific logic. It is a chaining hash map with the + // additional feature that some buckets can be converted to use an ordered + // container. This ensures O(lg n) bounds on find, insert, and erase, while + // avoiding the overheads of ordered containers most of the time. + // + // The implementation doesn't need the full generality of unordered_map, + // and it doesn't have it. More bells and whistles can be added as needed. + // Some implementation details: + // 1. The hash function has type hasher and the equality function + // equal_to. We inherit from hasher to save space + // (empty-base-class optimization). + // 2. The number of buckets is a power of two. + // 3. Buckets are converted to trees in pairs: if we convert bucket b then + // buckets b and b^1 will share a tree. Invariant: buckets b and b^1 have + // the same non-NULL value iff they are sharing a tree. (An alternative + // implementation strategy would be to have a tag bit per bucket.) + // 4. As is typical for hash_map and such, the Keys and Values are always + // stored in linked list nodes. Pointers to elements are never invalidated + // until the element is deleted. + // 5. The trees' payload type is pointer to linked-list node. Tree-converting + // a bucket doesn't copy Key-Value pairs. + // 6. Once we've tree-converted a bucket, it is never converted back. However, + // the items a tree contains may wind up assigned to trees or lists upon a + // rehash. + // 7. The code requires no C++ features from C++11 or later. + // 8. Mutations to a map do not invalidate the map's iterators, pointers to + // elements, or references to elements. + // 9. Except for erase(iterator), any non-const method can reorder iterators. + class InnerMap : private hasher { + public: + typedef value_type* Value; + + InnerMap(size_type n, hasher h, Allocator alloc) + : hasher(h), + num_elements_(0), + seed_(Seed()), + table_(NULL), + alloc_(alloc) { + n = TableSize(n); + table_ = CreateEmptyTable(n); + num_buckets_ = index_of_first_non_null_ = n; + } + + ~InnerMap() { + if (table_ != NULL) { + clear(); + Dealloc(table_, num_buckets_); + } + } + + private: + enum { kMinTableSize = 8 }; + + // Linked-list nodes, as one would expect for a chaining hash table. + struct Node { + KeyValuePair kv; + Node* next; + }; + + // This is safe only if the given pointer is known to point to a Key that is + // part of a Node. + static Node* NodePtrFromKeyPtr(Key* k) { + return reinterpret_cast(k); + } + + static Key* KeyPtrFromNodePtr(Node* node) { return &node->kv.key(); } + + // Trees. The payload type is pointer to Key, so that we can query the tree + // with Keys that are not in any particular data structure. When we insert, + // though, the pointer is always pointing to a Key that is inside a Node. + struct KeyCompare { + bool operator()(const Key* n0, const Key* n1) const { return *n0 < *n1; } + }; + typedef typename Allocator::template rebind::other KeyPtrAllocator; + typedef std::set Tree; + typedef typename Tree::iterator TreeIterator; + + // iterator and const_iterator are instantiations of iterator_base. + template + struct iterator_base { + typedef KeyValueType& reference; + typedef KeyValueType* pointer; + + // Invariants: + // node_ is always correct. This is handy because the most common + // operations are operator* and operator-> and they only use node_. + // When node_ is set to a non-NULL value, all the other non-const fields + // are updated to be correct also, but those fields can become stale + // if the underlying map is modified. When those fields are needed they + // are rechecked, and updated if necessary. + iterator_base() : node_(NULL), m_(NULL), bucket_index_(0) {} + + explicit iterator_base(const InnerMap* m) : m_(m) { + SearchFrom(m->index_of_first_non_null_); + } + + // Any iterator_base can convert to any other. This is overkill, and we + // rely on the enclosing class to use it wisely. The standard "iterator + // can convert to const_iterator" is OK but the reverse direction is not. + template + explicit iterator_base(const iterator_base& it) + : node_(it.node_), m_(it.m_), bucket_index_(it.bucket_index_) {} + + iterator_base(Node* n, const InnerMap* m, size_type index) + : node_(n), m_(m), bucket_index_(index) {} + + iterator_base(TreeIterator tree_it, const InnerMap* m, size_type index) + : node_(NodePtrFromKeyPtr(*tree_it)), m_(m), bucket_index_(index) { + // Invariant: iterators that use buckets with trees have an even + // bucket_index_. + GOOGLE_DCHECK_EQ(bucket_index_ % 2, 0); + } + + // Advance through buckets, looking for the first that isn't empty. + // If nothing non-empty is found then leave node_ == NULL. + void SearchFrom(size_type start_bucket) { + GOOGLE_DCHECK(m_->index_of_first_non_null_ == m_->num_buckets_ || + m_->table_[m_->index_of_first_non_null_] != NULL); + node_ = NULL; + for (bucket_index_ = start_bucket; bucket_index_ < m_->num_buckets_; + bucket_index_++) { + if (m_->TableEntryIsNonEmptyList(bucket_index_)) { + node_ = static_cast(m_->table_[bucket_index_]); + break; + } else if (m_->TableEntryIsTree(bucket_index_)) { + Tree* tree = static_cast(m_->table_[bucket_index_]); + GOOGLE_DCHECK(!tree->empty()); + node_ = NodePtrFromKeyPtr(*tree->begin()); + break; + } + } + } + + reference operator*() const { return node_->kv; } + pointer operator->() const { return &(operator*()); } + + friend bool operator==(const iterator_base& a, const iterator_base& b) { + return a.node_ == b.node_; + } + friend bool operator!=(const iterator_base& a, const iterator_base& b) { + return a.node_ != b.node_; + } + + iterator_base& operator++() { + if (node_->next == NULL) { + TreeIterator tree_it; + const bool is_list = revalidate_if_necessary(&tree_it); + if (is_list) { + SearchFrom(bucket_index_ + 1); + } else { + GOOGLE_DCHECK_EQ(bucket_index_ & 1, 0); + Tree* tree = static_cast(m_->table_[bucket_index_]); + if (++tree_it == tree->end()) { + SearchFrom(bucket_index_ + 2); + } else { + node_ = NodePtrFromKeyPtr(*tree_it); + } + } + } else { + node_ = node_->next; + } + return *this; + } + + iterator_base operator++(int /* unused */) { + iterator_base tmp = *this; + ++*this; + return tmp; + } + + // Assumes node_ and m_ are correct and non-NULL, but other fields may be + // stale. Fix them as needed. Then return true iff node_ points to a + // Node in a list. If false is returned then *it is modified to be + // a valid iterator for node_. + bool revalidate_if_necessary(TreeIterator* it) { + GOOGLE_DCHECK(node_ != NULL && m_ != NULL); + // Force bucket_index_ to be in range. + bucket_index_ &= (m_->num_buckets_ - 1); + // Common case: the bucket we think is relevant points to node_. + if (m_->table_[bucket_index_] == static_cast(node_)) + return true; + // Less common: the bucket is a linked list with node_ somewhere in it, + // but not at the head. + if (m_->TableEntryIsNonEmptyList(bucket_index_)) { + Node* l = static_cast(m_->table_[bucket_index_]); + while ((l = l->next) != NULL) { + if (l == node_) { + return true; + } + } + } + // Well, bucket_index_ still might be correct, but probably + // not. Revalidate just to be sure. This case is rare enough that we + // don't worry about potential optimizations, such as having a custom + // find-like method that compares Node* instead of const Key&. + iterator_base i(m_->find(*KeyPtrFromNodePtr(node_), it)); + bucket_index_ = i.bucket_index_; + return m_->TableEntryIsList(bucket_index_); + } + + Node* node_; + const InnerMap* m_; + size_type bucket_index_; + }; + + public: + typedef iterator_base iterator; + typedef iterator_base const_iterator; + + iterator begin() { return iterator(this); } + iterator end() { return iterator(); } + const_iterator begin() const { return const_iterator(this); } + const_iterator end() const { return const_iterator(); } + + void clear() { + for (size_type b = 0; b < num_buckets_; b++) { + if (TableEntryIsNonEmptyList(b)) { + Node* node = static_cast(table_[b]); + table_[b] = NULL; + do { + Node* next = node->next; + DestroyNode(node); + node = next; + } while (node != NULL); + } else if (TableEntryIsTree(b)) { + Tree* tree = static_cast(table_[b]); + GOOGLE_DCHECK(table_[b] == table_[b + 1] && (b & 1) == 0); + table_[b] = table_[b + 1] = NULL; + typename Tree::iterator tree_it = tree->begin(); + do { + Node* node = NodePtrFromKeyPtr(*tree_it); + typename Tree::iterator next = tree_it; + ++next; + tree->erase(tree_it); + DestroyNode(node); + tree_it = next; + } while (tree_it != tree->end()); + DestroyTree(tree); + b++; + } + } + num_elements_ = 0; + index_of_first_non_null_ = num_buckets_; + } + + const hasher& hash_function() const { return *this; } + + static size_type max_size() { + return static_cast(1) << (sizeof(void**) >= 8 ? 60 : 28); + } + size_type size() const { return num_elements_; } + bool empty() const { return size() == 0; } + + iterator find(const Key& k) { return iterator(FindHelper(k).first); } + const_iterator find(const Key& k) const { return find(k, NULL); } + + // In traditional C++ style, this performs "insert if not present." + std::pair insert(const KeyValuePair& kv) { + std::pair p = FindHelper(kv.key()); + // Case 1: key was already present. + if (p.first.node_ != NULL) + return std::make_pair(iterator(p.first), false); + // Case 2: insert. + if (ResizeIfLoadIsOutOfRange(num_elements_ + 1)) { + p = FindHelper(kv.key()); + } + const size_type b = p.second; // bucket number + Node* node = Alloc(1); + alloc_.construct(&node->kv, kv); + iterator result = InsertUnique(b, node); + ++num_elements_; + return std::make_pair(result, true); + } + + // The same, but if an insertion is necessary then the value portion of the + // inserted key-value pair is left uninitialized. + std::pair insert(const Key& k) { + std::pair p = FindHelper(k); + // Case 1: key was already present. + if (p.first.node_ != NULL) + return std::make_pair(iterator(p.first), false); + // Case 2: insert. + if (ResizeIfLoadIsOutOfRange(num_elements_ + 1)) { + p = FindHelper(k); + } + const size_type b = p.second; // bucket number + Node* node = Alloc(1); + typedef typename Allocator::template rebind::other KeyAllocator; + KeyAllocator(alloc_).construct(&node->kv.key(), k); + iterator result = InsertUnique(b, node); + ++num_elements_; + return std::make_pair(result, true); + } + + Value& operator[](const Key& k) { + KeyValuePair kv(k, Value()); + return insert(kv).first->value(); + } + + void erase(iterator it) { + GOOGLE_DCHECK_EQ(it.m_, this); + typename Tree::iterator tree_it; + const bool is_list = it.revalidate_if_necessary(&tree_it); + size_type b = it.bucket_index_; + Node* const item = it.node_; + if (is_list) { + GOOGLE_DCHECK(TableEntryIsNonEmptyList(b)); + Node* head = static_cast(table_[b]); + head = EraseFromLinkedList(item, head); + table_[b] = static_cast(head); + } else { + GOOGLE_DCHECK(TableEntryIsTree(b)); + Tree* tree = static_cast(table_[b]); + tree->erase(*tree_it); + if (tree->empty()) { + // Force b to be the minimum of b and b ^ 1. This is important + // only because we want index_of_first_non_null_ to be correct. + b &= ~static_cast(1); + DestroyTree(tree); + table_[b] = table_[b + 1] = NULL; + } + } + DestroyNode(item); + --num_elements_; + if (GOOGLE_PREDICT_FALSE(b == index_of_first_non_null_)) { + while (index_of_first_non_null_ < num_buckets_ && + table_[index_of_first_non_null_] == NULL) { + ++index_of_first_non_null_; + } + } + } + + private: + const_iterator find(const Key& k, TreeIterator* it) const { + return FindHelper(k, it).first; + } + std::pair FindHelper(const Key& k) const { + return FindHelper(k, NULL); + } + std::pair FindHelper(const Key& k, + TreeIterator* it) const { + size_type b = BucketNumber(k); + if (TableEntryIsNonEmptyList(b)) { + Node* node = static_cast(table_[b]); + do { + if (IsMatch(*KeyPtrFromNodePtr(node), k)) { + return std::make_pair(const_iterator(node, this, b), b); + } else { + node = node->next; + } + } while (node != NULL); + } else if (TableEntryIsTree(b)) { + GOOGLE_DCHECK_EQ(table_[b], table_[b ^ 1]); + b &= ~static_cast(1); + Tree* tree = static_cast(table_[b]); + Key* key = const_cast(&k); + typename Tree::iterator tree_it = tree->find(key); + if (tree_it != tree->end()) { + if (it != NULL) *it = tree_it; + return std::make_pair(const_iterator(tree_it, this, b), b); + } + } + return std::make_pair(end(), b); + } + + // Insert the given Node in bucket b. If that would make bucket b too big, + // and bucket b is not a tree, create a tree for buckets b and b^1 to share. + // Requires count(*KeyPtrFromNodePtr(node)) == 0 and that b is the correct + // bucket. num_elements_ is not modified. + iterator InsertUnique(size_type b, Node* node) { + GOOGLE_DCHECK(index_of_first_non_null_ == num_buckets_ || + table_[index_of_first_non_null_] != NULL); + // In practice, the code that led to this point may have already + // determined whether we are inserting into an empty list, a short list, + // or whatever. But it's probably cheap enough to recompute that here; + // it's likely that we're inserting into an empty or short list. + iterator result; + GOOGLE_DCHECK(find(*KeyPtrFromNodePtr(node)) == end()); + if (TableEntryIsEmpty(b)) { + result = InsertUniqueInList(b, node); + } else if (TableEntryIsNonEmptyList(b)) { + if (GOOGLE_PREDICT_FALSE(TableEntryIsTooLong(b))) { + TreeConvert(b); + result = InsertUniqueInTree(b, node); + GOOGLE_DCHECK_EQ(result.bucket_index_, b & ~static_cast(1)); + } else { + // Insert into a pre-existing list. This case cannot modify + // index_of_first_non_null_, so we skip the code to update it. + return InsertUniqueInList(b, node); + } + } else { + // Insert into a pre-existing tree. This case cannot modify + // index_of_first_non_null_, so we skip the code to update it. + return InsertUniqueInTree(b, node); + } + // parentheses around (std::min) prevents macro expansion of min(...) + index_of_first_non_null_ = + (std::min)(index_of_first_non_null_, result.bucket_index_); + return result; + } + + // Helper for InsertUnique. Handles the case where bucket b is a + // not-too-long linked list. + iterator InsertUniqueInList(size_type b, Node* node) { + node->next = static_cast(table_[b]); + table_[b] = static_cast(node); + return iterator(node, this, b); + } + + // Helper for InsertUnique. Handles the case where bucket b points to a + // Tree. + iterator InsertUniqueInTree(size_type b, Node* node) { + GOOGLE_DCHECK_EQ(table_[b], table_[b ^ 1]); + // Maintain the invariant that node->next is NULL for all Nodes in Trees. + node->next = NULL; + return iterator(static_cast(table_[b]) + ->insert(KeyPtrFromNodePtr(node)) + .first, + this, b & ~static_cast(1)); + } + + // Returns whether it did resize. Currently this is only used when + // num_elements_ increases, though it could be used in other situations. + // It checks for load too low as well as load too high: because any number + // of erases can occur between inserts, the load could be as low as 0 here. + // Resizing to a lower size is not always helpful, but failing to do so can + // destroy the expected big-O bounds for some operations. By having the + // policy that sometimes we resize down as well as up, clients can easily + // keep O(size()) = O(number of buckets) if they want that. + bool ResizeIfLoadIsOutOfRange(size_type new_size) { + const size_type kMaxMapLoadTimes16 = 12; // controls RAM vs CPU tradeoff + const size_type hi_cutoff = num_buckets_ * kMaxMapLoadTimes16 / 16; + const size_type lo_cutoff = hi_cutoff / 4; + // We don't care how many elements are in trees. If a lot are, + // we may resize even though there are many empty buckets. In + // practice, this seems fine. + if (GOOGLE_PREDICT_FALSE(new_size >= hi_cutoff)) { + if (num_buckets_ <= max_size() / 2) { + Resize(num_buckets_ * 2); + return true; + } + } else if (GOOGLE_PREDICT_FALSE(new_size <= lo_cutoff && + num_buckets_ > kMinTableSize)) { + size_type lg2_of_size_reduction_factor = 1; + // It's possible we want to shrink a lot here... size() could even be 0. + // So, estimate how much to shrink by making sure we don't shrink so + // much that we would need to grow the table after a few inserts. + const size_type hypothetical_size = new_size * 5 / 4 + 1; + while ((hypothetical_size << lg2_of_size_reduction_factor) < + hi_cutoff) { + ++lg2_of_size_reduction_factor; + } + size_type new_num_buckets = std::max( + kMinTableSize, num_buckets_ >> lg2_of_size_reduction_factor); + if (new_num_buckets != num_buckets_) { + Resize(new_num_buckets); + return true; + } + } + return false; + } + + // Resize to the given number of buckets. + void Resize(size_t new_num_buckets) { + GOOGLE_DCHECK_GE(new_num_buckets, kMinTableSize); + void** const old_table = table_; + const size_type old_table_size = num_buckets_; + num_buckets_ = new_num_buckets; + table_ = CreateEmptyTable(num_buckets_); + const size_type start = index_of_first_non_null_; + index_of_first_non_null_ = num_buckets_; + for (size_type i = start; i < old_table_size; i++) { + if (TableEntryIsNonEmptyList(old_table, i)) { + TransferList(old_table, i); + } else if (TableEntryIsTree(old_table, i)) { + TransferTree(old_table, i++); + } + } + Dealloc(old_table, old_table_size); + } + + void TransferList(void* const* table, size_type index) { + Node* node = static_cast(table[index]); + do { + Node* next = node->next; + InsertUnique(BucketNumber(*KeyPtrFromNodePtr(node)), node); + node = next; + } while (node != NULL); + } + + void TransferTree(void* const* table, size_type index) { + Tree* tree = static_cast(table[index]); + typename Tree::iterator tree_it = tree->begin(); + do { + Node* node = NodePtrFromKeyPtr(*tree_it); + InsertUnique(BucketNumber(**tree_it), node); + } while (++tree_it != tree->end()); + DestroyTree(tree); + } + + Node* EraseFromLinkedList(Node* item, Node* head) { + if (head == item) { + return head->next; + } else { + head->next = EraseFromLinkedList(item, head->next); + return head; + } + } + + bool TableEntryIsEmpty(size_type b) const { + return TableEntryIsEmpty(table_, b); + } + bool TableEntryIsNonEmptyList(size_type b) const { + return TableEntryIsNonEmptyList(table_, b); + } + bool TableEntryIsTree(size_type b) const { + return TableEntryIsTree(table_, b); + } + bool TableEntryIsList(size_type b) const { + return TableEntryIsList(table_, b); + } + static bool TableEntryIsEmpty(void* const* table, size_type b) { + return table[b] == NULL; + } + static bool TableEntryIsNonEmptyList(void* const* table, size_type b) { + return table[b] != NULL && table[b] != table[b ^ 1]; + } + static bool TableEntryIsTree(void* const* table, size_type b) { + return !TableEntryIsEmpty(table, b) && + !TableEntryIsNonEmptyList(table, b); + } + static bool TableEntryIsList(void* const* table, size_type b) { + return !TableEntryIsTree(table, b); + } + + void TreeConvert(size_type b) { + GOOGLE_DCHECK(!TableEntryIsTree(b) && !TableEntryIsTree(b ^ 1)); + typename Allocator::template rebind::other tree_allocator(alloc_); + Tree* tree = tree_allocator.allocate(1); + // We want to use the three-arg form of construct, if it exists, but we + // create a temporary and use the two-arg construct that's known to exist. + // It's clunky, but the compiler should be able to generate more-or-less + // the same code. + tree_allocator.construct(tree, + Tree(KeyCompare(), KeyPtrAllocator(alloc_))); + // Now the tree is ready to use. + size_type count = CopyListToTree(b, tree) + CopyListToTree(b ^ 1, tree); + GOOGLE_DCHECK_EQ(count, tree->size()); + table_[b] = table_[b ^ 1] = static_cast(tree); + } + + // Copy a linked list in the given bucket to a tree. + // Returns the number of things it copied. + size_type CopyListToTree(size_type b, Tree* tree) { + size_type count = 0; + Node* node = static_cast(table_[b]); + while (node != NULL) { + tree->insert(KeyPtrFromNodePtr(node)); + ++count; + Node* next = node->next; + node->next = NULL; + node = next; + } + return count; + } + + // Return whether table_[b] is a linked list that seems awfully long. + // Requires table_[b] to point to a non-empty linked list. + bool TableEntryIsTooLong(size_type b) { + const size_type kMaxLength = 8; + size_type count = 0; + Node* node = static_cast(table_[b]); + do { + ++count; + node = node->next; + } while (node != NULL); + // Invariant: no linked list ever is more than kMaxLength in length. + GOOGLE_DCHECK_LE(count, kMaxLength); + return count >= kMaxLength; + } + + size_type BucketNumber(const Key& k) const { + // We inherit from hasher, so one-arg operator() provides a hash function. + size_type h = (*const_cast(this))(k); + return (h + seed_) & (num_buckets_ - 1); + } + + bool IsMatch(const Key& k0, const Key& k1) const { + return std::equal_to()(k0, k1); + } + + // Return a power of two no less than max(kMinTableSize, n). + // Assumes either n < kMinTableSize or n is a power of two. + size_type TableSize(size_type n) { + return n < static_cast(kMinTableSize) + ? static_cast(kMinTableSize) + : n; + } + + // Use alloc_ to allocate an array of n objects of type U. + template + U* Alloc(size_type n) { + typedef typename Allocator::template rebind::other alloc_type; + return alloc_type(alloc_).allocate(n); + } + + // Use alloc_ to deallocate an array of n objects of type U. + template + void Dealloc(U* t, size_type n) { + typedef typename Allocator::template rebind::other alloc_type; + alloc_type(alloc_).deallocate(t, n); + } + + void DestroyNode(Node* node) { + alloc_.destroy(&node->kv); + Dealloc(node, 1); + } + + void DestroyTree(Tree* tree) { + typename Allocator::template rebind::other tree_allocator(alloc_); + tree_allocator.destroy(tree); + tree_allocator.deallocate(tree, 1); + } + + void** CreateEmptyTable(size_type n) { + GOOGLE_DCHECK(n >= kMinTableSize); + GOOGLE_DCHECK_EQ(n & (n - 1), 0); + void** result = Alloc(n); + memset(result, 0, n * sizeof(result[0])); + return result; + } + + // Return a randomish value. + size_type Seed() const { + size_type s = static_cast(reinterpret_cast(this)); +#if defined(__x86_64__) && defined(__GNUC__) + uint32 hi, lo; + asm("rdtsc" : "=a" (lo), "=d" (hi)); + s += ((static_cast(hi) << 32) | lo); +#endif + return s; + } + + size_type num_elements_; + size_type num_buckets_; + size_type seed_; + size_type index_of_first_non_null_; + void** table_; // an array with num_buckets_ entries + Allocator alloc_; + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(InnerMap); + }; // end of class InnerMap + + public: + // Iterators + class const_iterator { + typedef typename InnerMap::const_iterator InnerIt; + + public: + typedef std::forward_iterator_tag iterator_category; + typedef typename Map::value_type value_type; + typedef ptrdiff_t difference_type; + typedef const value_type* pointer; + typedef const value_type& reference; + + const_iterator() {} + explicit const_iterator(const InnerIt& it) : it_(it) {} + + const_reference operator*() const { + return *it_->value(); + } + const_pointer operator->() const { return &(operator*()); } + + const_iterator& operator++() { + ++it_; + return *this; + } + const_iterator operator++(int) { return const_iterator(it_++); } + + friend bool operator==(const const_iterator& a, const const_iterator& b) { + return a.it_ == b.it_; + } + friend bool operator!=(const const_iterator& a, const const_iterator& b) { + return !(a == b); + } + + private: + InnerIt it_; + }; + + class iterator { + typedef typename InnerMap::iterator InnerIt; + + public: + typedef std::forward_iterator_tag iterator_category; + typedef typename Map::value_type value_type; + typedef ptrdiff_t difference_type; + typedef value_type* pointer; + typedef value_type& reference; + + iterator() {} + explicit iterator(const InnerIt& it) : it_(it) {} + + reference operator*() const { return *it_->value(); } + pointer operator->() const { return &(operator*()); } + + iterator& operator++() { + ++it_; + return *this; + } + iterator operator++(int) { return iterator(it_++); } + + // Allow implicit conversion to const_iterator. + operator const_iterator() const { + return const_iterator(typename InnerMap::const_iterator(it_)); + } + + friend bool operator==(const iterator& a, const iterator& b) { + return a.it_ == b.it_; + } + friend bool operator!=(const iterator& a, const iterator& b) { + return !(a == b); + } + + private: + friend class Map; + + InnerIt it_; + }; + + iterator begin() { return iterator(elements_->begin()); } + iterator end() { return iterator(elements_->end()); } + const_iterator begin() const { + return const_iterator(iterator(elements_->begin())); + } + const_iterator end() const { + return const_iterator(iterator(elements_->end())); + } + const_iterator cbegin() const { return begin(); } + const_iterator cend() const { return end(); } + + // Capacity + size_type size() const { return elements_->size(); } + bool empty() const { return size() == 0; } + + // Element access + T& operator[](const key_type& key) { + value_type** value = &(*elements_)[key]; + if (*value == NULL) { + *value = CreateValueTypeInternal(key); + internal::MapValueInitializer::value, + T>::Initialize((*value)->second, + default_enum_value_); + } + return (*value)->second; + } + const T& at(const key_type& key) const { + const_iterator it = find(key); + GOOGLE_CHECK(it != end()) << "key not found: " << key; + return it->second; + } + T& at(const key_type& key) { + iterator it = find(key); + GOOGLE_CHECK(it != end()) << "key not found: " << key; + return it->second; + } + + // Lookup + size_type count(const key_type& key) const { + const_iterator it = find(key); + GOOGLE_DCHECK(it == end() || key == it->first); + return it == end() ? 0 : 1; + } + const_iterator find(const key_type& key) const { + return const_iterator(iterator(elements_->find(key))); + } + iterator find(const key_type& key) { return iterator(elements_->find(key)); } + std::pair equal_range( + const key_type& key) const { + const_iterator it = find(key); + if (it == end()) { + return std::pair(it, it); + } else { + const_iterator begin = it++; + return std::pair(begin, it); + } + } + std::pair equal_range(const key_type& key) { + iterator it = find(key); + if (it == end()) { + return std::pair(it, it); + } else { + iterator begin = it++; + return std::pair(begin, it); + } + } + + // insert + std::pair insert(const value_type& value) { + std::pair p = + elements_->insert(value.first); + if (p.second) { + p.first->value() = CreateValueTypeInternal(value); + } + return std::pair(iterator(p.first), p.second); + } + template + void insert(InputIt first, InputIt last) { + for (InputIt it = first; it != last; ++it) { + iterator exist_it = find(it->first); + if (exist_it == end()) { + operator[](it->first) = it->second; + } + } + } + void insert(std::initializer_list values) { + insert(values.begin(), values.end()); + } + + // Erase and clear + size_type erase(const key_type& key) { + iterator it = find(key); + if (it == end()) { + return 0; + } else { + erase(it); + return 1; + } + } + iterator erase(iterator pos) { + if (arena_ == NULL) delete pos.operator->(); + iterator i = pos++; + elements_->erase(i.it_); + return pos; + } + void erase(iterator first, iterator last) { + while (first != last) { + first = erase(first); + } + } + void clear() { erase(begin(), end()); } + + // Assign + Map& operator=(const Map& other) { + if (this != &other) { + clear(); + insert(other.begin(), other.end()); + } + return *this; + } + + void swap(Map& other) { + if (arena_ == other.arena_) { + std::swap(default_enum_value_, other.default_enum_value_); + std::swap(elements_, other.elements_); + } else { + // TODO(zuguang): optimize this. The temporary copy can be allocated + // in the same arena as the other message, and the "other = copy" can + // be replaced with the fast-path swap above. + Map copy = *this; + *this = other; + other = copy; + } + } + + // Access to hasher. Currently this returns a copy, but it may + // be modified to return a const reference in the future. + hasher hash_function() const { return elements_->hash_function(); } + + private: + // Set default enum value only for proto2 map field whose value is enum type. + void SetDefaultEnumValue(int default_enum_value) { + default_enum_value_ = default_enum_value; + } + + value_type* CreateValueTypeInternal(const Key& key) { + if (arena_ == NULL) { + return new value_type(key); + } else { + value_type* value = reinterpret_cast( + Arena::CreateArray(arena_, sizeof(value_type))); + Arena::CreateInArenaStorage(const_cast(&value->first), arena_); + Arena::CreateInArenaStorage(&value->second, arena_); + const_cast(value->first) = key; + return value; + } + } + + value_type* CreateValueTypeInternal(const value_type& value) { + if (arena_ == NULL) { + return new value_type(value); + } else { + value_type* p = reinterpret_cast( + Arena::CreateArray(arena_, sizeof(value_type))); + Arena::CreateInArenaStorage(const_cast(&p->first), arena_); + Arena::CreateInArenaStorage(&p->second, arena_); + const_cast(p->first) = value.first; + p->second = value.second; + return p; + } + } + + Arena* arena_; + int default_enum_value_; + InnerMap* elements_; + + friend class ::google::protobuf::Arena; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + template + friend class internal::MapFieldLite; +}; + +} // namespace protobuf + +} // namespace google +#endif // GOOGLE_PROTOBUF_MAP_H__ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/map_entry_lite.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/map_entry_lite.h new file mode 100644 index 0000000000000000000000000000000000000000..85a0bed7139cda21cbeba201adfc2ed24e663ae5 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/map_entry_lite.h @@ -0,0 +1,671 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef GOOGLE_PROTOBUF_MAP_ENTRY_LITE_H__ +#define GOOGLE_PROTOBUF_MAP_ENTRY_LITE_H__ + +#include + +#include +#include +#include +#include +#include +#include +#include + +namespace google { +namespace protobuf { +namespace internal { +template +class MapEntry; +template +class MapFieldLite; +} // namespace internal +} // namespace protobuf + +namespace protobuf { +namespace internal { + +// MoveHelper::Move is used to set *dest. It copies *src, or moves it (in +// the C++11 sense), or swaps it. *src is left in a sane state for +// subsequent destruction, but shouldn't be used for anything. +template +struct MoveHelper { // primitives + static void Move(T* src, T* dest) { *dest = *src; } +}; + +template +struct MoveHelper { // enums + static void Move(T* src, T* dest) { *dest = *src; } + // T is an enum here, so allow conversions to and from int. + static void Move(T* src, int* dest) { *dest = static_cast(*src); } + static void Move(int* src, T* dest) { *dest = static_cast(*src); } +}; + +template +struct MoveHelper { // messages + static void Move(T* src, T* dest) { dest->Swap(src); } +}; + +template +struct MoveHelper { // strings and similar + static void Move(T* src, T* dest) { +#if __cplusplus >= 201103L + *dest = std::move(*src); +#else + dest->swap(*src); +#endif + } +}; + +// MapEntryImpl is used to implement parsing and serialization of map entries. +// It uses Curious Recursive Template Pattern (CRTP) to provide the type of +// the eventual code to the template code. +template +class MapEntryImpl : public Base { + protected: + // Provide utilities to parse/serialize key/value. Provide utilities to + // manipulate internal stored type. + typedef MapTypeHandler KeyTypeHandler; + typedef MapTypeHandler ValueTypeHandler; + + // Define internal memory layout. Strings and messages are stored as + // pointers, while other types are stored as values. + typedef typename KeyTypeHandler::TypeOnMemory KeyOnMemory; + typedef typename ValueTypeHandler::TypeOnMemory ValueOnMemory; + + // Enum type cannot be used for MapTypeHandler::Read. Define a type + // which will replace Enum with int. + typedef typename KeyTypeHandler::MapEntryAccessorType KeyMapEntryAccessorType; + typedef typename ValueTypeHandler::MapEntryAccessorType + ValueMapEntryAccessorType; + + // Constants for field number. + static const int kKeyFieldNumber = 1; + static const int kValueFieldNumber = 2; + + // Constants for field tag. + static const uint8 kKeyTag = GOOGLE_PROTOBUF_WIRE_FORMAT_MAKE_TAG( + kKeyFieldNumber, KeyTypeHandler::kWireType); + static const uint8 kValueTag = GOOGLE_PROTOBUF_WIRE_FORMAT_MAKE_TAG( + kValueFieldNumber, ValueTypeHandler::kWireType); + static const size_t kTagSize = 1; + + public: + // Work-around for a compiler bug (see repeated_field.h). + typedef void MapEntryHasMergeTypeTrait; + typedef Derived EntryType; + typedef Key EntryKeyType; + typedef Value EntryValueType; + static const WireFormatLite::FieldType kEntryKeyFieldType = kKeyFieldType; + static const WireFormatLite::FieldType kEntryValueFieldType = kValueFieldType; + static const int kEntryDefaultEnumValue = default_enum_value; + + MapEntryImpl() : arena_(NULL) { + KeyTypeHandler::Initialize(&key_, NULL); + ValueTypeHandler::InitializeMaybeByDefaultEnum(&value_, default_enum_value, + NULL); + _has_bits_[0] = 0; + } + + explicit MapEntryImpl(Arena* arena) : arena_(arena) { + KeyTypeHandler::Initialize(&key_, arena); + ValueTypeHandler::InitializeMaybeByDefaultEnum(&value_, default_enum_value, + arena); + _has_bits_[0] = 0; + } + + ~MapEntryImpl() { + if (GetArenaNoVirtual() != NULL) return; + KeyTypeHandler::DeleteNoArena(key_); + ValueTypeHandler::DeleteNoArena(value_); + } + + // accessors ====================================================== + + virtual inline const KeyMapEntryAccessorType& key() const { + return KeyTypeHandler::GetExternalReference(key_); + } + virtual inline const ValueMapEntryAccessorType& value() const { + return ValueTypeHandler::DefaultIfNotInitialized( + value_, Derived::internal_default_instance()->value_); + } + inline KeyMapEntryAccessorType* mutable_key() { + set_has_key(); + return KeyTypeHandler::EnsureMutable(&key_, GetArenaNoVirtual()); + } + inline ValueMapEntryAccessorType* mutable_value() { + set_has_value(); + return ValueTypeHandler::EnsureMutable(&value_, GetArenaNoVirtual()); + } + + // implements MessageLite ========================================= + + // MapEntryImpl is for implementation only and this function isn't called + // anywhere. Just provide a fake implementation here for MessageLite. + string GetTypeName() const { return ""; } + + void CheckTypeAndMergeFrom(const MessageLite& other) { + MergeFromInternal(*::google::protobuf::down_cast(&other)); + } + + bool MergePartialFromCodedStream(::google::protobuf::io::CodedInputStream* input) { + uint32 tag; + + for (;;) { + // 1) corrupted data: return false; + // 2) unknown field: skip without putting into unknown field set; + // 3) unknown enum value: keep it in parsing. In proto2, caller should + // check the value and put this entry into containing message's unknown + // field set if the value is an unknown enum. In proto3, caller doesn't + // need to care whether the value is unknown enum; + // 4) missing key/value: missed key/value will have default value. caller + // should take this entry as if key/value is set to default value. + tag = input->ReadTagNoLastTag(); + switch (tag) { + case kKeyTag: + if (!KeyTypeHandler::Read(input, mutable_key())) { + return false; + } + set_has_key(); + break; + + case kValueTag: + if (!ValueTypeHandler::Read(input, mutable_value())) { + return false; + } + set_has_value(); + if (input->ExpectAtEnd()) return true; + break; + + default: + if (tag == 0 || + WireFormatLite::GetTagWireType(tag) == + WireFormatLite::WIRETYPE_END_GROUP) { + return true; + } + if (!WireFormatLite::SkipField(input, tag)) return false; + break; + } + } + } + + size_t ByteSizeLong() const { + size_t size = 0; + size += has_key() ? + kTagSize + static_cast(KeyTypeHandler::ByteSize(key())) : 0; + size += has_value() ? + kTagSize + static_cast(ValueTypeHandler::ByteSize(value())) : 0; + return size; + } + + void SerializeWithCachedSizes(::google::protobuf::io::CodedOutputStream* output) const { + KeyTypeHandler::Write(kKeyFieldNumber, key(), output); + ValueTypeHandler::Write(kValueFieldNumber, value(), output); + } + + ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(bool deterministic, + ::google::protobuf::uint8* output) const { + output = KeyTypeHandler::InternalWriteToArray(kKeyFieldNumber, key(), + deterministic, output); + output = ValueTypeHandler::InternalWriteToArray(kValueFieldNumber, value(), + deterministic, output); + return output; + } + + // Don't override SerializeWithCachedSizesToArray. Use MessageLite's. + + int GetCachedSize() const { + int size = 0; + size += has_key() + ? static_cast(kTagSize) + KeyTypeHandler::GetCachedSize(key()) + : 0; + size += has_value() + ? static_cast(kTagSize) + ValueTypeHandler::GetCachedSize(value()) + : 0; + return size; + } + + bool IsInitialized() const { return ValueTypeHandler::IsInitialized(value_); } + + Base* New() const { + Derived* entry = new Derived; + return entry; + } + + Base* New(Arena* arena) const { + Derived* entry = Arena::CreateMessage(arena); + return entry; + } + + size_t SpaceUsedLong() const { + size_t size = sizeof(Derived); + size += KeyTypeHandler::SpaceUsedInMapEntryLong(key_); + size += ValueTypeHandler::SpaceUsedInMapEntryLong(value_); + return size; + } + + protected: + // We can't declare this function directly here as it would hide the other + // overload (const Message&). + void MergeFromInternal(const MapEntryImpl& from) { + if (from._has_bits_[0]) { + if (from.has_key()) { + KeyTypeHandler::EnsureMutable(&key_, GetArenaNoVirtual()); + KeyTypeHandler::Merge(from.key(), &key_, GetArenaNoVirtual()); + set_has_key(); + } + if (from.has_value()) { + ValueTypeHandler::EnsureMutable(&value_, GetArenaNoVirtual()); + ValueTypeHandler::Merge(from.value(), &value_, GetArenaNoVirtual()); + set_has_value(); + } + } + } + + public: + void Clear() { + KeyTypeHandler::Clear(&key_, GetArenaNoVirtual()); + ValueTypeHandler::ClearMaybeByDefaultEnum( + &value_, GetArenaNoVirtual(), default_enum_value); + clear_has_key(); + clear_has_value(); + } + + static void InitAsDefaultInstance() { + Derived* d = const_cast(Derived::internal_default_instance()); + KeyTypeHandler::AssignDefaultValue(&d->key_); + ValueTypeHandler::AssignDefaultValue(&d->value_); + } + + Arena* GetArena() const { + return GetArenaNoVirtual(); + } + + // Create a MapEntryImpl for given key and value from google::protobuf::Map in + // serialization. This function is only called when value is enum. Enum is + // treated differently because its type in MapEntry is int and its type in + // google::protobuf::Map is enum. We cannot create a reference to int from an enum. + static Derived* EnumWrap(const Key& key, const Value value, Arena* arena) { + return Arena::CreateMessage(arena, key, value); + } + + // Like above, but for all the other types. This avoids value copy to create + // MapEntryImpl from google::protobuf::Map in serialization. + static Derived* Wrap(const Key& key, const Value& value, Arena* arena) { + return Arena::CreateMessage(arena, key, value); + } + + // Parsing using MergePartialFromCodedStream, above, is not as + // efficient as it could be. This helper class provides a speedier way. + template + class Parser { + public: + explicit Parser(MapField* mf) : mf_(mf), map_(mf->MutableMap()) {} + + // This does what the typical MergePartialFromCodedStream() is expected to + // do, with the additional side-effect that if successful (i.e., if true is + // going to be its return value) it inserts the key-value pair into map_. + bool MergePartialFromCodedStream(::google::protobuf::io::CodedInputStream* input) { + // Look for the expected thing: a key and then a value. If it fails, + // invoke the enclosing class's MergePartialFromCodedStream, or return + // false if that would be pointless. + if (input->ExpectTag(kKeyTag)) { + if (!KeyTypeHandler::Read(input, &key_)) { + return false; + } + // Peek at the next byte to see if it is kValueTag. If not, bail out. + const void* data; + int size; + input->GetDirectBufferPointerInline(&data, &size); + // We could use memcmp here, but we don't bother. The tag is one byte. + GOOGLE_COMPILE_ASSERT(kTagSize == 1, tag_size_error); + if (size > 0 && *reinterpret_cast(data) == kValueTag) { + typename Map::size_type map_size = map_->size(); + value_ptr_ = &(*map_)[key_]; + if (GOOGLE_PREDICT_TRUE(map_size != map_->size())) { + // We created a new key-value pair. Fill in the value. + typedef + typename MapIf::type T; + input->Skip(kTagSize); // Skip kValueTag. + if (!ValueTypeHandler::Read(input, + reinterpret_cast(value_ptr_))) { + map_->erase(key_); // Failure! Undo insertion. + return false; + } + if (input->ExpectAtEnd()) return true; + return ReadBeyondKeyValuePair(input); + } + } + } else { + key_ = Key(); + } + + entry_.reset(mf_->NewEntry()); + *entry_->mutable_key() = key_; + const bool result = entry_->MergePartialFromCodedStream(input); + if (result) UseKeyAndValueFromEntry(); + if (entry_->GetArena() != NULL) entry_.release(); + return result; + } + + const Key& key() const { return key_; } + const Value& value() const { return *value_ptr_; } + + private: + void UseKeyAndValueFromEntry() GOOGLE_PROTOBUF_ATTRIBUTE_COLD { + // Update key_ in case we need it later (because key() is called). + // This is potentially inefficient, especially if the key is + // expensive to copy (e.g., a long string), but this is a cold + // path, so it's not a big deal. + key_ = entry_->key(); + value_ptr_ = &(*map_)[key_]; + MoveHelper::Move(entry_->mutable_value(), value_ptr_); + } + + // After reading a key and value successfully, and inserting that data + // into map_, we are not at the end of the input. This is unusual, but + // allowed by the spec. + bool ReadBeyondKeyValuePair(::google::protobuf::io::CodedInputStream* input) + GOOGLE_PROTOBUF_ATTRIBUTE_COLD { + typedef MoveHelper KeyMover; + typedef MoveHelper ValueMover; + entry_.reset(mf_->NewEntry()); + ValueMover::Move(value_ptr_, entry_->mutable_value()); + map_->erase(key_); + KeyMover::Move(&key_, entry_->mutable_key()); + const bool result = entry_->MergePartialFromCodedStream(input); + if (result) UseKeyAndValueFromEntry(); + if (entry_->GetArena() != NULL) entry_.release(); + return result; + } + + MapField* const mf_; + Map* const map_; + Key key_; + Value* value_ptr_; + // On the fast path entry_ is not used. And, when entry_ is used, it's set + // to mf_->NewEntry(), so in the arena case we must call entry_.release. + std::unique_ptr entry_; + }; + + protected: + void set_has_key() { _has_bits_[0] |= 0x00000001u; } + bool has_key() const { return (_has_bits_[0] & 0x00000001u) != 0; } + void clear_has_key() { _has_bits_[0] &= ~0x00000001u; } + void set_has_value() { _has_bits_[0] |= 0x00000002u; } + bool has_value() const { return (_has_bits_[0] & 0x00000002u) != 0; } + void clear_has_value() { _has_bits_[0] &= ~0x00000002u; } + + private: + // Serializing a generated message containing map field involves serializing + // key-value pairs from google::protobuf::Map. The wire format of each key-value pair + // after serialization should be the same as that of a MapEntry message + // containing the same key and value inside it. However, google::protobuf::Map doesn't + // store key and value as MapEntry message, which disables us to use existing + // code to serialize message. In order to use existing code to serialize + // message, we need to construct a MapEntry from key-value pair. But it + // involves copy of key and value to construct a MapEntry. In order to avoid + // this copy in constructing a MapEntry, we need the following class which + // only takes references of given key and value. + class MapEntryWrapper : public Derived { + typedef Derived BaseClass; + typedef typename BaseClass::KeyMapEntryAccessorType KeyMapEntryAccessorType; + typedef + typename BaseClass::ValueMapEntryAccessorType ValueMapEntryAccessorType; + + public: + MapEntryWrapper(Arena* arena, const Key& key, const Value& value) + : Derived(arena), key_(key), value_(value) { + BaseClass::set_has_key(); + BaseClass::set_has_value(); + } + inline const KeyMapEntryAccessorType& key() const { return key_; } + inline const ValueMapEntryAccessorType& value() const { return value_; } + + private: + const Key& key_; + const Value& value_; + + friend class ::google::protobuf::Arena; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + }; + + // Like above, but for enum value only, which stores value instead of + // reference of value field inside. This is needed because the type of value + // field in constructor is an enum, while we need to store it as an int. If we + // initialize a reference to int with a reference to enum, compiler will + // generate a temporary int from enum and initialize the reference to int with + // the temporary. + class MapEnumEntryWrapper : public Derived { + typedef Derived BaseClass; + typedef typename BaseClass::KeyMapEntryAccessorType KeyMapEntryAccessorType; + typedef + typename BaseClass::ValueMapEntryAccessorType ValueMapEntryAccessorType; + + public: + MapEnumEntryWrapper(Arena* arena, const Key& key, const Value& value) + : Derived(arena), key_(key), value_(value) { + BaseClass::set_has_key(); + BaseClass::set_has_value(); + } + inline const KeyMapEntryAccessorType& key() const { return key_; } + inline const ValueMapEntryAccessorType& value() const { return value_; } + + private: + const KeyMapEntryAccessorType& key_; + const ValueMapEntryAccessorType value_; + + friend class google::protobuf::Arena; + typedef void DestructorSkippable_; + }; + + inline Arena* GetArenaNoVirtual() const { + return arena_; + } + + public: // Needed for constructing tables + KeyOnMemory key_; + ValueOnMemory value_; + Arena* arena_; + uint32 _has_bits_[1]; + + private: + friend class ::google::protobuf::Arena; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + template + friend class internal::MapEntry; + template + friend class internal::MapFieldLite; + + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(MapEntryImpl); +}; + +template +class MapEntryLite + : public MapEntryImpl { + public: + typedef MapEntryImpl + SuperType; + MapEntryLite() {} + explicit MapEntryLite(Arena* arena) : SuperType(arena) {} + void MergeFrom(const MapEntryLite& other) { MergeFromInternal(other); } + + private: + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(MapEntryLite); +}; +// The completely unprincipled and unwieldy use of template parameters in +// the map code necessitates wrappers to make the code a little bit more +// manageable. +template +struct DeconstructMapEntry; + +template +struct DeconstructMapEntry > { + typedef K Key; + typedef V Value; + static const WireFormatLite::FieldType kKeyFieldType = key; + static const WireFormatLite::FieldType kValueFieldType = value; + static const int default_enum_value = default_enum; +}; + +// Helpers for deterministic serialization ============================= + +// This struct can be used with any generic sorting algorithm. If the Key +// type is relatively small and easy to copy then copying Keys into an +// array of SortItems can be beneficial. Then all the data the sorting +// algorithm needs to touch is in that one array. +template struct SortItem { + SortItem() {} + explicit SortItem(PtrToKeyValuePair p) : first(p->first), second(p) {} + + Key first; + PtrToKeyValuePair second; +}; + +template struct CompareByFirstField { + bool operator()(const T& a, const T& b) const { + return a.first < b.first; + } +}; + +template struct CompareByDerefFirst { + bool operator()(const T& a, const T& b) const { + return a->first < b->first; + } +}; + +// Helper for table driven serialization + +template +struct FromHelper { + template + static const T& From(const T& x) { + return x; + } +}; + +template <> +struct FromHelper { + static ArenaStringPtr From(const string& x) { + ArenaStringPtr res; + TaggedPtr<::std::string> ptr; + ptr.Set(const_cast(&x)); + res.UnsafeSetTaggedPointer(ptr); + return res; + } +}; +template <> +struct FromHelper { + static ArenaStringPtr From(const string& x) { + ArenaStringPtr res; + TaggedPtr<::std::string> ptr; + ptr.Set(const_cast(&x)); + res.UnsafeSetTaggedPointer(ptr); + return res; + } +}; +template <> +struct FromHelper { + template + static T* From(const T& x) { + return const_cast(&x); + } +}; + +template +struct MapEntryHelper; + +template +struct MapEntryHelper > { + // Provide utilities to parse/serialize key/value. Provide utilities to + // manipulate internal stored type. + typedef MapTypeHandler KeyTypeHandler; + typedef MapTypeHandler ValueTypeHandler; + + // Define internal memory layout. Strings and messages are stored as + // pointers, while other types are stored as values. + typedef typename KeyTypeHandler::TypeOnMemory KeyOnMemory; + typedef typename ValueTypeHandler::TypeOnMemory ValueOnMemory; + + explicit MapEntryHelper(const MapPair& map_pair) + : _has_bits_(3), + _cached_size_(2 + KeyTypeHandler::GetCachedSize(map_pair.first) + + ValueTypeHandler::GetCachedSize(map_pair.second)), + key_(FromHelper::From(map_pair.first)), + value_(FromHelper::From(map_pair.second)) {} + + // Purposely not folowing the style guide naming. These are the names + // the proto compiler would generate given the map entry descriptor. + // The proto compiler generates the offsets in this struct as if this was + // a regular message. This way the table driven code barely notices it's + // dealing with a map field. + uint32 _has_bits_; // NOLINT + uint32 _cached_size_; // NOLINT + KeyOnMemory key_; // NOLINT + ValueOnMemory value_; // NOLINT +}; + +} // namespace internal +} // namespace protobuf + +} // namespace google +#endif // GOOGLE_PROTOBUF_MAP_ENTRY_LITE_H__ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/map_field_lite.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/map_field_lite.h new file mode 100644 index 0000000000000000000000000000000000000000..f648b43034f6f1aae741ac89cb0773814b782f17 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/map_field_lite.h @@ -0,0 +1,143 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef GOOGLE_PROTOBUF_MAP_FIELD_LITE_H__ +#define GOOGLE_PROTOBUF_MAP_FIELD_LITE_H__ + +#include +#include +#include + +namespace google { +namespace protobuf { +namespace internal { + +// This class provides access to map field using generated api. It is used for +// internal generated message implentation only. Users should never use this +// directly. +template +class MapFieldLite { + // Define message type for internal repeated field. + typedef Derived EntryType; + + public: + typedef Map MapType; + typedef EntryType EntryTypeTrait; + + MapFieldLite() : arena_(NULL) { SetDefaultEnumValue(); } + + explicit MapFieldLite(Arena* arena) : arena_(arena), map_(arena) { + SetDefaultEnumValue(); + } + + // Accessors + const Map& GetMap() const { return map_; } + Map* MutableMap() { return &map_; } + + // Convenient methods for generated message implementation. + int size() const { return static_cast(map_.size()); } + void Clear() { return map_.clear(); } + void MergeFrom(const MapFieldLite& other) { + for (typename Map::const_iterator it = other.map_.begin(); + it != other.map_.end(); ++it) { + map_[it->first] = it->second; + } + } + void Swap(MapFieldLite* other) { map_.swap(other->map_); } + + // Set default enum value only for proto2 map field whose value is enum type. + void SetDefaultEnumValue() { + MutableMap()->SetDefaultEnumValue(default_enum_value); + } + + // Used in the implementation of parsing. Caller should take the ownership iff + // arena_ is NULL. + EntryType* NewEntry() const { + if (arena_ == NULL) { + return new EntryType(); + } else { + return Arena::CreateMessage(arena_); + } + } + // Used in the implementation of serializing enum value type. Caller should + // take the ownership iff arena_ is NULL. + EntryType* NewEnumEntryWrapper(const Key& key, const T t) const { + return EntryType::EnumWrap(key, t, arena_); + } + // Used in the implementation of serializing other value types. Caller should + // take the ownership iff arena_ is NULL. + EntryType* NewEntryWrapper(const Key& key, const T& t) const { + return EntryType::Wrap(key, t, arena_); + } + + private: + typedef void DestructorSkippable_; + + Arena* arena_; + Map map_; + + friend class ::google::protobuf::Arena; +}; + +// True if IsInitialized() is true for value field in all elements of t. T is +// expected to be message. It's useful to have this helper here to keep the +// protobuf compiler from ever having to emit loops in IsInitialized() methods. +// We want the C++ compiler to inline this or not as it sees fit. +template +bool AllAreInitialized(const Map& t) { + for (typename Map::const_iterator it = t.begin(); it != t.end(); + ++it) { + if (!it->second.IsInitialized()) return false; + } + return true; +} + +template +struct MapEntryToMapField : MapEntryToMapField {}; + +template +struct MapEntryToMapField > { + typedef MapFieldLite, + Key, Value, kKeyFieldType, kValueFieldType, + default_enum_value> + MapFieldType; +}; + +} // namespace internal +} // namespace protobuf + +} // namespace google +#endif // GOOGLE_PROTOBUF_MAP_FIELD_LITE_H__ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/map_type_handler.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/map_type_handler.h new file mode 100644 index 0000000000000000000000000000000000000000..7f7b1e0e1d007f32f06c0c28f46f2308d53f94c0 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/map_type_handler.h @@ -0,0 +1,739 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef GOOGLE_PROTOBUF_TYPE_HANDLER_H__ +#define GOOGLE_PROTOBUF_TYPE_HANDLER_H__ + +#include +#include + +namespace google { +namespace protobuf { +namespace internal { + +// Used for compile time type selection. MapIf::type will be TrueType if Flag is +// true and FalseType otherwise. +template +struct MapIf; + +template +struct MapIf { + typedef TrueType type; +}; + +template +struct MapIf { + typedef FalseType type; +}; + +// In proto2 Map, enum needs to be initialized to given default value, while +// other types' default value can be inferred from the type. +template +class MapValueInitializer { + public: + static inline void Initialize(Type& type, int default_enum_value); +}; + +template +class MapValueInitializer { + public: + static inline void Initialize(Type& value, int default_enum_value) { + value = static_cast(default_enum_value); + } +}; + +template +class MapValueInitializer { + public: + static inline void Initialize(Type& /* value */, int /* default_enum_value */) {} +}; + +template +class MapArenaMessageCreator { + public: + // Use arena to create message if Type is arena constructable. Otherwise, + // create the message on heap. + static inline Type* CreateMessage(Arena* arena); +}; +template +class MapArenaMessageCreator { + public: + static inline Type* CreateMessage(Arena* arena) { + return Arena::CreateMessage(arena); + } +}; +template +class MapArenaMessageCreator { + public: + static inline Type* CreateMessage(Arena* arena) { + return Arena::Create(arena); + } +}; + +// Define constants for given wire field type +template +class MapWireFieldTypeTraits {}; + +#define TYPE_TRAITS(FieldType, CType, WireFormatType, IsMessage, IsEnum) \ + template \ + class MapWireFieldTypeTraits { \ + public: \ + static const bool kIsMessage = IsMessage; \ + static const bool kIsEnum = IsEnum; \ + typedef typename MapIf::type TypeOnMemory; \ + typedef typename MapIf::type MapEntryAccessorType; \ + static const WireFormatLite::WireType kWireType = \ + WireFormatLite::WIRETYPE_##WireFormatType; \ + }; + +TYPE_TRAITS(MESSAGE , Type, LENGTH_DELIMITED, true, false) +TYPE_TRAITS(STRING , ArenaStringPtr, LENGTH_DELIMITED, false, false) +TYPE_TRAITS(BYTES , ArenaStringPtr , LENGTH_DELIMITED, false, false) +TYPE_TRAITS(INT64 , int64 , VARINT , false, false) +TYPE_TRAITS(UINT64 , uint64 , VARINT , false, false) +TYPE_TRAITS(INT32 , int32 , VARINT , false, false) +TYPE_TRAITS(UINT32 , uint32 , VARINT , false, false) +TYPE_TRAITS(SINT64 , int64 , VARINT , false, false) +TYPE_TRAITS(SINT32 , int32 , VARINT , false, false) +TYPE_TRAITS(ENUM , int , VARINT , false, true ) +TYPE_TRAITS(DOUBLE , double , FIXED64, false, false) +TYPE_TRAITS(FLOAT , float , FIXED32, false, false) +TYPE_TRAITS(FIXED64 , uint64 , FIXED64, false, false) +TYPE_TRAITS(FIXED32 , uint32 , FIXED32, false, false) +TYPE_TRAITS(SFIXED64, int64 , FIXED64, false, false) +TYPE_TRAITS(SFIXED32, int32 , FIXED32, false, false) +TYPE_TRAITS(BOOL , bool , VARINT , false, false) + +#undef TYPE_TRAITS + +template +class MapTypeHandler {}; + +template +class MapTypeHandler { + public: + // Enum type cannot be used for MapTypeHandler::Read. Define a type which will + // replace Enum with int. + typedef typename MapWireFieldTypeTraits::MapEntryAccessorType MapEntryAccessorType; + // Internal stored type in MapEntryLite for given wire field type. + typedef typename MapWireFieldTypeTraits::TypeOnMemory TypeOnMemory; + // Corresponding wire type for field type. + static const WireFormatLite::WireType kWireType = + MapWireFieldTypeTraits::kWireType; + // Whether wire type is for message. + static const bool kIsMessage = + MapWireFieldTypeTraits::kIsMessage; + // Whether wire type is for enum. + static const bool kIsEnum = + MapWireFieldTypeTraits::kIsEnum; + + // Functions used in parsing and serialization. =================== + static inline size_t ByteSize(const MapEntryAccessorType& value); + static inline int GetCachedSize(const MapEntryAccessorType& value); + static inline bool Read(io::CodedInputStream* input, + MapEntryAccessorType* value); + static inline void Write(int field, const MapEntryAccessorType& value, + io::CodedOutputStream* output); + static inline uint8* InternalWriteToArray(int field, + const MapEntryAccessorType& value, + bool deterministic, uint8* target); + static inline uint8* WriteToArray(int field, + const MapEntryAccessorType& value, + uint8* target); + + // Functions to manipulate data on memory. ======================== + static inline const Type& GetExternalReference(const Type* value); + static inline void DeleteNoArena(const Type* x); + static inline void Merge(const Type& from, Type** to, Arena* arena); + static inline void Clear(Type** value, Arena* arena); + static inline void ClearMaybeByDefaultEnum(Type** value, Arena* arena, + int default_enum_value); + static inline void Initialize(Type** x, Arena* arena); + + static inline void InitializeMaybeByDefaultEnum(Type** x, + int default_enum_value, + Arena* arena); + static inline Type* EnsureMutable(Type** value, Arena* arena); + // SpaceUsedInMapEntry: Return bytes used by value in MapEntry, excluding + // those already calculate in sizeof(MapField). + static inline size_t SpaceUsedInMapEntryLong(const Type* value); + // Return bytes used by value in Map. + static inline size_t SpaceUsedInMapLong(const Type& value); + // Assign default value to given instance. + static inline void AssignDefaultValue(Type** value); + // Return default instance if value is not initialized when calling const + // reference accessor. + static inline const Type& DefaultIfNotInitialized( + const Type* value, const Type* default_value); + // Check if all required fields have values set. + static inline bool IsInitialized(Type* value); +}; + +#define MAP_HANDLER(FieldType) \ + template \ + class MapTypeHandler { \ + public: \ + typedef typename MapWireFieldTypeTraits::MapEntryAccessorType \ + MapEntryAccessorType; \ + typedef typename MapWireFieldTypeTraits::TypeOnMemory TypeOnMemory; \ + static const WireFormatLite::WireType kWireType = \ + MapWireFieldTypeTraits::kWireType; \ + static const bool kIsMessage = \ + MapWireFieldTypeTraits::kIsMessage; \ + static const bool kIsEnum = \ + MapWireFieldTypeTraits::kIsEnum; \ + static inline int ByteSize(const MapEntryAccessorType& value); \ + static inline int GetCachedSize(const MapEntryAccessorType& value); \ + static inline bool Read(io::CodedInputStream* input, \ + MapEntryAccessorType* value); \ + static inline void Write(int field, const MapEntryAccessorType& value, \ + io::CodedOutputStream* output); \ + static inline uint8* InternalWriteToArray( \ + int field, const MapEntryAccessorType& value, bool deterministic, \ + uint8* target); \ + static inline uint8* WriteToArray(int field, \ + const MapEntryAccessorType& value, \ + uint8* target) { \ + return InternalWriteToArray(field, value, false, target); \ + } \ + static inline const MapEntryAccessorType& GetExternalReference( \ + const TypeOnMemory& value); \ + static inline void DeleteNoArena(const TypeOnMemory& x); \ + static inline void Merge(const MapEntryAccessorType& from, \ + TypeOnMemory* to, Arena* arena); \ + static inline void Clear(TypeOnMemory* value, Arena* arena); \ + static inline void ClearMaybeByDefaultEnum(TypeOnMemory* value, \ + Arena* arena, \ + int default_enum); \ + static inline size_t SpaceUsedInMapEntryLong(const TypeOnMemory& value); \ + static inline size_t SpaceUsedInMapLong(const TypeOnMemory& value); \ + static inline size_t SpaceUsedInMapLong(const string& value); \ + static inline void AssignDefaultValue(TypeOnMemory* value); \ + static inline const MapEntryAccessorType& DefaultIfNotInitialized( \ + const TypeOnMemory& value, const TypeOnMemory& default_value); \ + static inline bool IsInitialized(const TypeOnMemory& value); \ + static void DeleteNoArena(TypeOnMemory& value); \ + static inline void Initialize(TypeOnMemory* value, Arena* arena); \ + static inline void InitializeMaybeByDefaultEnum(TypeOnMemory* value, \ + int default_enum_value, \ + Arena* arena); \ + static inline MapEntryAccessorType* EnsureMutable(TypeOnMemory* value, \ + Arena* arena); \ + }; +MAP_HANDLER(STRING) +MAP_HANDLER(BYTES) +MAP_HANDLER(INT64) +MAP_HANDLER(UINT64) +MAP_HANDLER(INT32) +MAP_HANDLER(UINT32) +MAP_HANDLER(SINT64) +MAP_HANDLER(SINT32) +MAP_HANDLER(ENUM) +MAP_HANDLER(DOUBLE) +MAP_HANDLER(FLOAT) +MAP_HANDLER(FIXED64) +MAP_HANDLER(FIXED32) +MAP_HANDLER(SFIXED64) +MAP_HANDLER(SFIXED32) +MAP_HANDLER(BOOL) +#undef MAP_HANDLER + +template +inline size_t +MapTypeHandler::ByteSize( + const MapEntryAccessorType& value) { + return WireFormatLite::MessageSizeNoVirtual(value); +} + +#define GOOGLE_PROTOBUF_BYTE_SIZE(FieldType, DeclaredType) \ + template \ + inline int MapTypeHandler::ByteSize( \ + const MapEntryAccessorType& value) { \ + return static_cast(WireFormatLite::DeclaredType##Size(value)); \ + } + +GOOGLE_PROTOBUF_BYTE_SIZE(STRING, String) +GOOGLE_PROTOBUF_BYTE_SIZE(BYTES , Bytes) +GOOGLE_PROTOBUF_BYTE_SIZE(INT64 , Int64) +GOOGLE_PROTOBUF_BYTE_SIZE(UINT64, UInt64) +GOOGLE_PROTOBUF_BYTE_SIZE(INT32 , Int32) +GOOGLE_PROTOBUF_BYTE_SIZE(UINT32, UInt32) +GOOGLE_PROTOBUF_BYTE_SIZE(SINT64, SInt64) +GOOGLE_PROTOBUF_BYTE_SIZE(SINT32, SInt32) +GOOGLE_PROTOBUF_BYTE_SIZE(ENUM , Enum) + +#undef GOOGLE_PROTOBUF_BYTE_SIZE + +#define FIXED_BYTE_SIZE(FieldType, DeclaredType) \ + template \ + inline int MapTypeHandler::ByteSize( \ + const MapEntryAccessorType& /* value */) { \ + return WireFormatLite::k##DeclaredType##Size; \ + } + +FIXED_BYTE_SIZE(DOUBLE , Double) +FIXED_BYTE_SIZE(FLOAT , Float) +FIXED_BYTE_SIZE(FIXED64 , Fixed64) +FIXED_BYTE_SIZE(FIXED32 , Fixed32) +FIXED_BYTE_SIZE(SFIXED64, SFixed64) +FIXED_BYTE_SIZE(SFIXED32, SFixed32) +FIXED_BYTE_SIZE(BOOL , Bool) + +#undef FIXED_BYTE_SIZE + +template +inline int +MapTypeHandler::GetCachedSize( + const MapEntryAccessorType& value) { + return static_cast( + WireFormatLite::LengthDelimitedSize( + static_cast(value.GetCachedSize()))); +} + +#define GET_CACHED_SIZE(FieldType, DeclaredType) \ + template \ + inline int \ + MapTypeHandler::GetCachedSize( \ + const MapEntryAccessorType& value) { \ + return static_cast(WireFormatLite::DeclaredType##Size(value)); \ + } + +GET_CACHED_SIZE(STRING, String) +GET_CACHED_SIZE(BYTES , Bytes) +GET_CACHED_SIZE(INT64 , Int64) +GET_CACHED_SIZE(UINT64, UInt64) +GET_CACHED_SIZE(INT32 , Int32) +GET_CACHED_SIZE(UINT32, UInt32) +GET_CACHED_SIZE(SINT64, SInt64) +GET_CACHED_SIZE(SINT32, SInt32) +GET_CACHED_SIZE(ENUM , Enum) + +#undef GET_CACHED_SIZE + +#define GET_FIXED_CACHED_SIZE(FieldType, DeclaredType) \ + template \ + inline int \ + MapTypeHandler::GetCachedSize( \ + const MapEntryAccessorType& /* value */) { \ + return WireFormatLite::k##DeclaredType##Size; \ + } + +GET_FIXED_CACHED_SIZE(DOUBLE , Double) +GET_FIXED_CACHED_SIZE(FLOAT , Float) +GET_FIXED_CACHED_SIZE(FIXED64 , Fixed64) +GET_FIXED_CACHED_SIZE(FIXED32 , Fixed32) +GET_FIXED_CACHED_SIZE(SFIXED64, SFixed64) +GET_FIXED_CACHED_SIZE(SFIXED32, SFixed32) +GET_FIXED_CACHED_SIZE(BOOL , Bool) + +#undef GET_FIXED_CACHED_SIZE + +template +inline void MapTypeHandler::Write( + int field, const MapEntryAccessorType& value, + io::CodedOutputStream* output) { + WireFormatLite::WriteMessageMaybeToArray(field, value, output); +} + +template +inline uint8* +MapTypeHandler::InternalWriteToArray( + int field, const MapEntryAccessorType& value, bool deterministic, + uint8* target) { + return WireFormatLite::InternalWriteMessageToArray(field, value, + deterministic, target); +} + +#define WRITE_METHOD(FieldType, DeclaredType) \ + template \ + inline void MapTypeHandler::Write( \ + int field, const MapEntryAccessorType& value, \ + io::CodedOutputStream* output) { \ + return WireFormatLite::Write##DeclaredType(field, value, output); \ + } \ + template \ + inline uint8* \ + MapTypeHandler::InternalWriteToArray( \ + int field, const MapEntryAccessorType& value, bool, uint8* target) { \ + return WireFormatLite::Write##DeclaredType##ToArray(field, value, target); \ + } + +WRITE_METHOD(STRING , String) +WRITE_METHOD(BYTES , Bytes) +WRITE_METHOD(INT64 , Int64) +WRITE_METHOD(UINT64 , UInt64) +WRITE_METHOD(INT32 , Int32) +WRITE_METHOD(UINT32 , UInt32) +WRITE_METHOD(SINT64 , SInt64) +WRITE_METHOD(SINT32 , SInt32) +WRITE_METHOD(ENUM , Enum) +WRITE_METHOD(DOUBLE , Double) +WRITE_METHOD(FLOAT , Float) +WRITE_METHOD(FIXED64 , Fixed64) +WRITE_METHOD(FIXED32 , Fixed32) +WRITE_METHOD(SFIXED64, SFixed64) +WRITE_METHOD(SFIXED32, SFixed32) +WRITE_METHOD(BOOL , Bool) + +#undef WRITE_METHOD + +template +inline bool MapTypeHandler::Read( + io::CodedInputStream* input, MapEntryAccessorType* value) { + return WireFormatLite::ReadMessageNoVirtual(input, value); +} + +template +inline bool MapTypeHandler::Read( + io::CodedInputStream* input, MapEntryAccessorType* value) { + return WireFormatLite::ReadString(input, value); +} + +template +inline bool MapTypeHandler::Read( + io::CodedInputStream* input, MapEntryAccessorType* value) { + return WireFormatLite::ReadBytes(input, value); +} + +#define READ_METHOD(FieldType) \ + template \ + inline bool MapTypeHandler::Read( \ + io::CodedInputStream* input, MapEntryAccessorType* value) { \ + return WireFormatLite::ReadPrimitive( \ + input, value); \ + } + +READ_METHOD(INT64) +READ_METHOD(UINT64) +READ_METHOD(INT32) +READ_METHOD(UINT32) +READ_METHOD(SINT64) +READ_METHOD(SINT32) +READ_METHOD(ENUM) +READ_METHOD(DOUBLE) +READ_METHOD(FLOAT) +READ_METHOD(FIXED64) +READ_METHOD(FIXED32) +READ_METHOD(SFIXED64) +READ_METHOD(SFIXED32) +READ_METHOD(BOOL) + +#undef READ_METHOD + +// Definition for message handler + +template +inline const Type& +MapTypeHandler::GetExternalReference(const Type* value) { + return *value; +} + +template +inline size_t MapTypeHandler::SpaceUsedInMapEntryLong(const Type* value) { + return value->SpaceUsedLong(); +} + +template +size_t MapTypeHandler::SpaceUsedInMapLong( + const Type& value) { + return value.SpaceUsedLong(); +} + +template +inline void MapTypeHandler::Clear( + Type** value, Arena* /* arena */) { + if (*value != NULL) (*value)->Clear(); +} +template +inline void +MapTypeHandler::ClearMaybeByDefaultEnum(Type** value, + Arena* /* arena */, + int /* default_enum_value */) { + if (*value != NULL) (*value)->Clear(); +} +template +inline void MapTypeHandler::Merge( + const Type& from, Type** to, Arena* /* arena */) { + (*to)->MergeFrom(from); +} + +template +void MapTypeHandler::DeleteNoArena( + const Type* ptr) { + delete ptr; +} + +template +inline void MapTypeHandler::AssignDefaultValue(Type** value) { + *value = const_cast(Type::internal_default_instance()); +} + +template +inline void MapTypeHandler::Initialize(Type** x, + Arena* /* arena */) { + *x = NULL; +} + +template +inline void MapTypeHandler:: + InitializeMaybeByDefaultEnum(Type** x, int /* default_enum_value */, + Arena* /* arena */) { + *x = NULL; +} + +template +inline Type* MapTypeHandler::EnsureMutable(Type** value, + Arena* arena) { + if (*value == NULL) { + *value = + MapArenaMessageCreator:: + type::value>::CreateMessage(arena); + } + return *value; +} + +template +inline const Type& MapTypeHandler:: + DefaultIfNotInitialized(const Type* value, const Type* default_value) { + return value != NULL ? *value : *default_value; +} + +template +inline bool MapTypeHandler::IsInitialized(Type* value) { + return value->IsInitialized(); +} + +// Definition for string/bytes handler + +#define STRING_OR_BYTES_HANDLER_FUNCTIONS(FieldType) \ + template \ + inline const typename MapTypeHandler::MapEntryAccessorType& \ + MapTypeHandler::GetExternalReference(const TypeOnMemory& value) { \ + return value.Get(); \ + } \ + template \ + inline size_t \ + MapTypeHandler::SpaceUsedInMapEntryLong(const TypeOnMemory& value) { \ + return sizeof(value); \ + } \ + template \ + inline size_t \ + MapTypeHandler::SpaceUsedInMapLong( \ + const TypeOnMemory& value) { \ + return sizeof(value); \ + } \ + template \ + inline size_t \ + MapTypeHandler::SpaceUsedInMapLong( \ + const string& value) { \ + return sizeof(value); \ + } \ + template \ + inline void MapTypeHandler::Clear( \ + TypeOnMemory* value, Arena* arena) { \ + value->ClearToEmpty(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), \ + arena); \ + } \ + template \ + inline void MapTypeHandler:: \ + ClearMaybeByDefaultEnum(TypeOnMemory* value, Arena* arena, \ + int /* default_enum */) { \ + Clear(value, arena); \ + } \ + template \ + inline void MapTypeHandler::Merge( \ + const MapEntryAccessorType& from, TypeOnMemory* to, Arena* arena) { \ + to->Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), from, arena); \ + } \ + template \ + void MapTypeHandler::DeleteNoArena( \ + TypeOnMemory& value) { \ + value.DestroyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); \ + } \ + template \ + inline void MapTypeHandler::AssignDefaultValue(TypeOnMemory* /* value */) {} \ + template \ + inline void \ + MapTypeHandler::Initialize( \ + TypeOnMemory* value, Arena* /* arena */) { \ + value->UnsafeSetDefault( \ + &::google::protobuf::internal::GetEmptyStringAlreadyInited()); \ + } \ + template \ + inline void MapTypeHandler:: \ + InitializeMaybeByDefaultEnum(TypeOnMemory* value, \ + int /* default_enum_value */, \ + Arena* arena) { \ + Initialize(value, arena); \ + } \ + template \ + inline typename MapTypeHandler::MapEntryAccessorType* \ + MapTypeHandler::EnsureMutable( \ + TypeOnMemory* value, Arena* arena) { \ + return value->Mutable(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), \ + arena); \ + } \ + template \ + inline const typename MapTypeHandler::MapEntryAccessorType& \ + MapTypeHandler::DefaultIfNotInitialized(const TypeOnMemory& value, \ + const TypeOnMemory& \ + /* default_value */) { \ + return value.Get(); \ + } \ + template \ + inline bool MapTypeHandler::IsInitialized(const TypeOnMemory& /* value */) { \ + return true; \ + } +STRING_OR_BYTES_HANDLER_FUNCTIONS(STRING) +STRING_OR_BYTES_HANDLER_FUNCTIONS(BYTES) +#undef STRING_OR_BYTES_HANDLER_FUNCTIONS + +#define PRIMITIVE_HANDLER_FUNCTIONS(FieldType) \ + template \ + inline const typename MapTypeHandler::MapEntryAccessorType& \ + MapTypeHandler::GetExternalReference(const TypeOnMemory& value) { \ + return value; \ + } \ + template \ + inline size_t \ + MapTypeHandler::SpaceUsedInMapEntryLong(const TypeOnMemory& /* value */) { \ + return 0; \ + } \ + template \ + inline size_t \ + MapTypeHandler::SpaceUsedInMapLong( \ + const TypeOnMemory& /* value */) { \ + return sizeof(Type); \ + } \ + template \ + inline void MapTypeHandler::Clear( \ + TypeOnMemory* value, Arena* /* arena */) { \ + *value = 0; \ + } \ + template \ + inline void MapTypeHandler:: \ + ClearMaybeByDefaultEnum(TypeOnMemory* value, Arena* /* arena */, \ + int default_enum_value) { \ + *value = static_cast(default_enum_value); \ + } \ + template \ + inline void MapTypeHandler::Merge( \ + const MapEntryAccessorType& from, TypeOnMemory* to, \ + Arena* /* arena */) { \ + *to = from; \ + } \ + template \ + inline void MapTypeHandler::DeleteNoArena(TypeOnMemory& /* x */) {} \ + template \ + inline void MapTypeHandler::AssignDefaultValue(TypeOnMemory* /* value */) {} \ + template \ + inline void \ + MapTypeHandler::Initialize( \ + TypeOnMemory* value, Arena* /* arena */) { \ + *value = 0; \ + } \ + template \ + inline void MapTypeHandler:: \ + InitializeMaybeByDefaultEnum(TypeOnMemory* value, \ + int default_enum_value, \ + Arena* /* arena */) { \ + *value = static_cast(default_enum_value); \ + } \ + template \ + inline typename MapTypeHandler::MapEntryAccessorType* \ + MapTypeHandler::EnsureMutable( \ + TypeOnMemory* value, Arena* /* arena */) { \ + return value; \ + } \ + template \ + inline const typename MapTypeHandler::MapEntryAccessorType& \ + MapTypeHandler::DefaultIfNotInitialized(const TypeOnMemory& value, \ + const TypeOnMemory& \ + /* default_value */) { \ + return value; \ + } \ + template \ + inline bool MapTypeHandler::IsInitialized(const TypeOnMemory& /* value */) { \ + return true; \ + } +PRIMITIVE_HANDLER_FUNCTIONS(INT64) +PRIMITIVE_HANDLER_FUNCTIONS(UINT64) +PRIMITIVE_HANDLER_FUNCTIONS(INT32) +PRIMITIVE_HANDLER_FUNCTIONS(UINT32) +PRIMITIVE_HANDLER_FUNCTIONS(SINT64) +PRIMITIVE_HANDLER_FUNCTIONS(SINT32) +PRIMITIVE_HANDLER_FUNCTIONS(ENUM) +PRIMITIVE_HANDLER_FUNCTIONS(DOUBLE) +PRIMITIVE_HANDLER_FUNCTIONS(FLOAT) +PRIMITIVE_HANDLER_FUNCTIONS(FIXED64) +PRIMITIVE_HANDLER_FUNCTIONS(FIXED32) +PRIMITIVE_HANDLER_FUNCTIONS(SFIXED64) +PRIMITIVE_HANDLER_FUNCTIONS(SFIXED32) +PRIMITIVE_HANDLER_FUNCTIONS(BOOL) +#undef PRIMITIVE_HANDLER_FUNCTIONS + +} // namespace internal +} // namespace protobuf + +} // namespace google +#endif // GOOGLE_PROTOBUF_TYPE_HANDLER_H__ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/message_lite.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/message_lite.h new file mode 100644 index 0000000000000000000000000000000000000000..b8644142ae114a01bc196b34100f72e31407afba --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/message_lite.h @@ -0,0 +1,424 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Authors: wink@google.com (Wink Saville), +// kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// Defines MessageLite, the abstract interface implemented by all (lite +// and non-lite) protocol message objects. + +#ifndef GOOGLE_PROTOBUF_MESSAGE_LITE_H__ +#define GOOGLE_PROTOBUF_MESSAGE_LITE_H__ + +#include +#include +#include +#include +#include +#include + +namespace google { +namespace protobuf { +template +class RepeatedPtrField; +namespace io { +class CodedInputStream; +class CodedOutputStream; +class ZeroCopyInputStream; +class ZeroCopyOutputStream; +} +namespace internal { + +class RepeatedPtrFieldBase; +class WireFormatLite; +class WeakFieldMap; + +#ifndef SWIG +// We compute sizes as size_t but cache them as int. This function converts a +// computed size to a cached size. Since we don't proceed with serialization +// if the total size was > INT_MAX, it is not important what this function +// returns for inputs > INT_MAX. However this case should not error or +// GOOGLE_CHECK-fail, because the full size_t resolution is still returned from +// ByteSizeLong() and checked against INT_MAX; we can catch the overflow +// there. +inline int ToCachedSize(size_t size) { return static_cast(size); } + +// We mainly calculate sizes in terms of size_t, but some functions that +// compute sizes return "int". These int sizes are expected to always be +// positive. This function is more efficient than casting an int to size_t +// directly on 64-bit platforms because it avoids making the compiler emit a +// sign extending instruction, which we don't want and don't want to pay for. +inline size_t FromIntSize(int size) { + // Convert to unsigned before widening so sign extension is not necessary. + return static_cast(size); +} + +// For cases where a legacy function returns an integer size. We GOOGLE_DCHECK() +// that the conversion will fit within an integer; if this is false then we +// are losing information. +inline int ToIntSize(size_t size) { + GOOGLE_DCHECK_LE(size, static_cast(INT_MAX)); + return static_cast(size); +} + +// This type wraps a variable whose constructor and destructor are explicitly +// called. It is particularly useful for a global variable, without its +// constructor and destructor run on start and end of the program lifetime. +// This circumvents the initial construction order fiasco, while keeping +// the address of the empty string a compile time constant. +// +// Pay special attention to the initialization state of the object. +// 1. The object is "uninitialized" to begin with. +// 2. Call DefaultConstruct() only if the object is uninitialized. +// After the call, the object becomes "initialized". +// 3. Call get() and get_mutable() only if the object is initialized. +// 4. Call Destruct() only if the object is initialized. +// After the call, the object becomes uninitialized. +template +class ExplicitlyConstructed { + public: + void DefaultConstruct() { + new (&union_) T(); + } + + void Destruct() { + get_mutable()->~T(); + } + + constexpr const T& get() const { return reinterpret_cast(union_); } + T* get_mutable() { return reinterpret_cast(&union_); } + + private: + // Prefer c++14 aligned_storage, but for compatibility this will do. + union AlignedUnion { + char space[sizeof(T)]; + int64 align_to_int64; + void* align_to_ptr; + } union_; +}; + +// Default empty string object. Don't use this directly. Instead, call +// GetEmptyString() to get the reference. +LIBPROTOBUF_EXPORT extern ExplicitlyConstructed<::std::string> fixed_address_empty_string; + +LIBPROTOBUF_EXPORT inline const ::std::string& GetEmptyStringAlreadyInited() { + return fixed_address_empty_string.get(); +} + +LIBPROTOBUF_EXPORT size_t StringSpaceUsedExcludingSelfLong(const string& str); +#endif // SWIG +} // namespace internal + +// Interface to light weight protocol messages. +// +// This interface is implemented by all protocol message objects. Non-lite +// messages additionally implement the Message interface, which is a +// subclass of MessageLite. Use MessageLite instead when you only need +// the subset of features which it supports -- namely, nothing that uses +// descriptors or reflection. You can instruct the protocol compiler +// to generate classes which implement only MessageLite, not the full +// Message interface, by adding the following line to the .proto file: +// +// option optimize_for = LITE_RUNTIME; +// +// This is particularly useful on resource-constrained systems where +// the full protocol buffers runtime library is too big. +// +// Note that on non-constrained systems (e.g. servers) when you need +// to link in lots of protocol definitions, a better way to reduce +// total code footprint is to use optimize_for = CODE_SIZE. This +// will make the generated code smaller while still supporting all the +// same features (at the expense of speed). optimize_for = LITE_RUNTIME +// is best when you only have a small number of message types linked +// into your binary, in which case the size of the protocol buffers +// runtime itself is the biggest problem. +class LIBPROTOBUF_EXPORT MessageLite { + public: + inline MessageLite() {} + virtual ~MessageLite() {} + + // Basic Operations ------------------------------------------------ + + // Get the name of this message type, e.g. "foo.bar.BazProto". + virtual string GetTypeName() const = 0; + + // Construct a new instance of the same type. Ownership is passed to the + // caller. + virtual MessageLite* New() const = 0; + + // Construct a new instance on the arena. Ownership is passed to the caller + // if arena is a NULL. Default implementation for backwards compatibility. + virtual MessageLite* New(::google::protobuf::Arena* arena) const; + + // Get the arena, if any, associated with this message. Virtual method + // required for generic operations but most arena-related operations should + // use the GetArenaNoVirtual() generated-code method. Default implementation + // to reduce code size by avoiding the need for per-type implementations + // when types do not implement arena support. + virtual ::google::protobuf::Arena* GetArena() const { return NULL; } + + // Get a pointer that may be equal to this message's arena, or may not be. + // If the value returned by this method is equal to some arena pointer, then + // this message is on that arena; however, if this message is on some arena, + // this method may or may not return that arena's pointer. As a tradeoff, + // this method may be more efficient than GetArena(). The intent is to allow + // underlying representations that use e.g. tagged pointers to sometimes + // store the arena pointer directly, and sometimes in a more indirect way, + // and allow a fastpath comparison against the arena pointer when it's easy + // to obtain. + virtual void* GetMaybeArenaPointer() const { return GetArena(); } + + // Clear all fields of the message and set them to their default values. + // Clear() avoids freeing memory, assuming that any memory allocated + // to hold parts of the message will be needed again to hold the next + // message. If you actually want to free the memory used by a Message, + // you must delete it. + virtual void Clear() = 0; + + // Quickly check if all required fields have values set. + virtual bool IsInitialized() const = 0; + + // This is not implemented for Lite messages -- it just returns "(cannot + // determine missing fields for lite message)". However, it is implemented + // for full messages. See message.h. + virtual string InitializationErrorString() const; + + // If |other| is the exact same class as this, calls MergeFrom(). Otherwise, + // results are undefined (probably crash). + virtual void CheckTypeAndMergeFrom(const MessageLite& other) = 0; + + // Parsing --------------------------------------------------------- + // Methods for parsing in protocol buffer format. Most of these are + // just simple wrappers around MergeFromCodedStream(). Clear() will be + // called before merging the input. + + // Fill the message with a protocol buffer parsed from the given input + // stream. Returns false on a read error or if the input is in the wrong + // format. A successful return does not indicate the entire input is + // consumed, ensure you call ConsumedEntireMessage() to check that if + // applicable. + bool ParseFromCodedStream(io::CodedInputStream* input); + // Like ParseFromCodedStream(), but accepts messages that are missing + // required fields. + bool ParsePartialFromCodedStream(io::CodedInputStream* input); + // Read a protocol buffer from the given zero-copy input stream. If + // successful, the entire input will be consumed. + bool ParseFromZeroCopyStream(io::ZeroCopyInputStream* input); + // Like ParseFromZeroCopyStream(), but accepts messages that are missing + // required fields. + bool ParsePartialFromZeroCopyStream(io::ZeroCopyInputStream* input); + // Read a protocol buffer from the given zero-copy input stream, expecting + // the message to be exactly "size" bytes long. If successful, exactly + // this many bytes will have been consumed from the input. + bool ParseFromBoundedZeroCopyStream(io::ZeroCopyInputStream* input, int size); + // Like ParseFromBoundedZeroCopyStream(), but accepts messages that are + // missing required fields. + bool ParsePartialFromBoundedZeroCopyStream(io::ZeroCopyInputStream* input, + int size); + // Parses a protocol buffer contained in a string. Returns true on success. + // This function takes a string in the (non-human-readable) binary wire + // format, matching the encoding output by MessageLite::SerializeToString(). + // If you'd like to convert a human-readable string into a protocol buffer + // object, see google::protobuf::TextFormat::ParseFromString(). + bool ParseFromString(const string& data); + // Like ParseFromString(), but accepts messages that are missing + // required fields. + bool ParsePartialFromString(const string& data); + // Parse a protocol buffer contained in an array of bytes. + bool ParseFromArray(const void* data, int size); + // Like ParseFromArray(), but accepts messages that are missing + // required fields. + bool ParsePartialFromArray(const void* data, int size); + + + // Reads a protocol buffer from the stream and merges it into this + // Message. Singular fields read from the what is + // already in the Message and repeated fields are appended to those + // already present. + // + // It is the responsibility of the caller to call input->LastTagWas() + // (for groups) or input->ConsumedEntireMessage() (for non-groups) after + // this returns to verify that the message's end was delimited correctly. + // + // ParsefromCodedStream() is implemented as Clear() followed by + // MergeFromCodedStream(). + bool MergeFromCodedStream(io::CodedInputStream* input); + + // Like MergeFromCodedStream(), but succeeds even if required fields are + // missing in the input. + // + // MergeFromCodedStream() is just implemented as MergePartialFromCodedStream() + // followed by IsInitialized(). + virtual bool MergePartialFromCodedStream(io::CodedInputStream* input) = 0; + + + // Serialization --------------------------------------------------- + // Methods for serializing in protocol buffer format. Most of these + // are just simple wrappers around ByteSize() and SerializeWithCachedSizes(). + + // Write a protocol buffer of this message to the given output. Returns + // false on a write error. If the message is missing required fields, + // this may GOOGLE_CHECK-fail. + bool SerializeToCodedStream(io::CodedOutputStream* output) const; + // Like SerializeToCodedStream(), but allows missing required fields. + bool SerializePartialToCodedStream(io::CodedOutputStream* output) const; + // Write the message to the given zero-copy output stream. All required + // fields must be set. + bool SerializeToZeroCopyStream(io::ZeroCopyOutputStream* output) const; + // Like SerializeToZeroCopyStream(), but allows missing required fields. + bool SerializePartialToZeroCopyStream(io::ZeroCopyOutputStream* output) const; + // Serialize the message and store it in the given string. All required + // fields must be set. + bool SerializeToString(string* output) const; + // Like SerializeToString(), but allows missing required fields. + bool SerializePartialToString(string* output) const; + // Serialize the message and store it in the given byte array. All required + // fields must be set. + bool SerializeToArray(void* data, int size) const; + // Like SerializeToArray(), but allows missing required fields. + bool SerializePartialToArray(void* data, int size) const; + + // Make a string encoding the message. Is equivalent to calling + // SerializeToString() on a string and using that. Returns the empty + // string if SerializeToString() would have returned an error. + // Note: If you intend to generate many such strings, you may + // reduce heap fragmentation by instead re-using the same string + // object with calls to SerializeToString(). + string SerializeAsString() const; + // Like SerializeAsString(), but allows missing required fields. + string SerializePartialAsString() const; + + // Like SerializeToString(), but appends to the data to the string's existing + // contents. All required fields must be set. + bool AppendToString(string* output) const; + // Like AppendToString(), but allows missing required fields. + bool AppendPartialToString(string* output) const; + + // Computes the serialized size of the message. This recursively calls + // ByteSizeLong() on all embedded messages. + // + // ByteSizeLong() is generally linear in the number of fields defined for the + // proto. + virtual size_t ByteSizeLong() const = 0; + + // Legacy ByteSize() API. + PROTOBUF_RUNTIME_DEPRECATED("Please use ByteSizeLong() instead") + int ByteSize() const { + return internal::ToIntSize(ByteSizeLong()); + } + + // Serializes the message without recomputing the size. The message must not + // have changed since the last call to ByteSize(), and the value returned by + // ByteSize must be non-negative. Otherwise the results are undefined. + virtual void SerializeWithCachedSizes( + io::CodedOutputStream* output) const; + + // Functions below here are not part of the public interface. It isn't + // enforced, but they should be treated as private, and will be private + // at some future time. Unfortunately the implementation of the "friend" + // keyword in GCC is broken at the moment, but we expect it will be fixed. + + // Like SerializeWithCachedSizes, but writes directly to *target, returning + // a pointer to the byte immediately after the last byte written. "target" + // must point at a byte array of at least ByteSize() bytes. Whether to use + // deterministic serialization, e.g., maps in sorted order, is determined by + // CodedOutputStream::IsDefaultSerializationDeterministic(). + virtual uint8* SerializeWithCachedSizesToArray(uint8* target) const; + + // Returns the result of the last call to ByteSize(). An embedded message's + // size is needed both to serialize it (because embedded messages are + // length-delimited) and to compute the outer message's size. Caching + // the size avoids computing it multiple times. + // + // ByteSize() does not automatically use the cached size when available + // because this would require invalidating it every time the message was + // modified, which would be too hard and expensive. (E.g. if a deeply-nested + // sub-message is changed, all of its parents' cached sizes would need to be + // invalidated, which is too much work for an otherwise inlined setter + // method.) + virtual int GetCachedSize() const = 0; + + virtual uint8* InternalSerializeWithCachedSizesToArray(bool deterministic, + uint8* target) const; + + protected: + // CastToBase allows generated code to cast a RepeatedPtrField to + // RepeatedPtrFieldBase. We try to restrict access to RepeatedPtrFieldBase + // because it is an implementation detail that user code should not access + // directly. + template + static ::google::protobuf::internal::RepeatedPtrFieldBase* CastToBase( + ::google::protobuf::RepeatedPtrField* repeated) { + return repeated; + } + template + static const ::google::protobuf::internal::RepeatedPtrFieldBase& CastToBase( + const ::google::protobuf::RepeatedPtrField& repeated) { + return repeated; + } + + template + static T* CreateMaybeMessage(Arena* arena) { + return Arena::CreateMaybeMessage(arena); + } + + private: + // TODO(gerbens) make this a pure abstract function + virtual const void* InternalGetTable() const { return NULL; } + + friend class internal::WireFormatLite; + friend class Message; + friend class internal::WeakFieldMap; + + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(MessageLite); +}; + +namespace internal { + +extern bool LIBPROTOBUF_EXPORT proto3_preserve_unknown_; + +// DO NOT USE: For migration only. Will be removed when Proto3 defaults to +// preserve unknowns. +inline bool GetProto3PreserveUnknownsDefault() { + return proto3_preserve_unknown_; +} + +// DO NOT USE: For migration only. Will be removed when Proto3 defaults to +// preserve unknowns. +void LIBPROTOBUF_EXPORT SetProto3PreserveUnknownsDefault(bool preserve); +} // namespace internal + + +} // namespace protobuf + +} // namespace google +#endif // GOOGLE_PROTOBUF_MESSAGE_LITE_H__ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/metadata_lite.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/metadata_lite.h new file mode 100644 index 0000000000000000000000000000000000000000..454d088c965e8c95a327db9d8c405caa0fe48d15 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/metadata_lite.h @@ -0,0 +1,224 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef GOOGLE_PROTOBUF_METADATA_LITE_H__ +#define GOOGLE_PROTOBUF_METADATA_LITE_H__ + +#include +#include +#include +#include + +namespace google { +namespace protobuf { +namespace internal { + +// This is the representation for messages that support arena allocation. It +// uses a tagged pointer to either store the Arena pointer, if there are no +// unknown fields, or a pointer to a block of memory with both the Arena pointer +// and the UnknownFieldSet, if there are unknown fields. This optimization +// allows for "zero-overhead" storage of the Arena pointer, relative to the +// above baseline implementation. +// +// The tagged pointer uses the LSB to disambiguate cases, and uses bit 0 == 0 to +// indicate an arena pointer and bit 0 == 1 to indicate a UFS+Arena-container +// pointer. +template +class InternalMetadataWithArenaBase { + public: + InternalMetadataWithArenaBase() : ptr_(NULL) {} + explicit InternalMetadataWithArenaBase(Arena* arena) : ptr_(arena) {} + + ~InternalMetadataWithArenaBase() { + if (have_unknown_fields() && arena() == NULL) { + delete PtrValue(); + } + ptr_ = NULL; + } + + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE const T& unknown_fields() const { + if (GOOGLE_PREDICT_FALSE(have_unknown_fields())) { + return PtrValue()->unknown_fields; + } else { + return Derived::default_instance(); + } + } + + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE T* mutable_unknown_fields() { + if (GOOGLE_PREDICT_TRUE(have_unknown_fields())) { + return &PtrValue()->unknown_fields; + } else { + return mutable_unknown_fields_slow(); + } + } + + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE Arena* arena() const { + if (GOOGLE_PREDICT_FALSE(have_unknown_fields())) { + return PtrValue()->arena; + } else { + return PtrValue(); + } + } + + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE bool have_unknown_fields() const { + return PtrTag() == kTagContainer; + } + + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE void Swap(Derived* other) { + // Semantics here are that we swap only the unknown fields, not the arena + // pointer. We cannot simply swap ptr_ with other->ptr_ because we need to + // maintain our own arena ptr. Also, our ptr_ and other's ptr_ may be in + // different states (direct arena pointer vs. container with UFS) so we + // cannot simply swap ptr_ and then restore the arena pointers. We reuse + // UFS's swap implementation instead. + if (have_unknown_fields() || other->have_unknown_fields()) { + static_cast(this)->DoSwap(other->mutable_unknown_fields()); + } + } + + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE void MergeFrom(const Derived& other) { + if (other.have_unknown_fields()) { + static_cast(this)->DoMergeFrom(other.unknown_fields()); + } + } + + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE void Clear() { + if (have_unknown_fields()) { + static_cast(this)->DoClear(); + } + } + + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE void* raw_arena_ptr() const { + return ptr_; + } + + private: + void* ptr_; + + // Tagged pointer implementation. + enum { + // ptr_ is an Arena*. + kTagArena = 0, + // ptr_ is a Container*. + kTagContainer = 1, + }; + static const intptr_t kPtrTagMask = 1; + static const intptr_t kPtrValueMask = ~kPtrTagMask; + + // Accessors for pointer tag and pointer value. + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE int PtrTag() const { + return reinterpret_cast(ptr_) & kPtrTagMask; + } + + template U* PtrValue() const { + return reinterpret_cast( + reinterpret_cast(ptr_) & kPtrValueMask); + } + + // If ptr_'s tag is kTagContainer, it points to an instance of this struct. + struct Container { + T unknown_fields; + Arena* arena; + }; + + GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE T* mutable_unknown_fields_slow() { + Arena* my_arena = arena(); + Container* container = Arena::Create(my_arena); + // Two-step assignment works around a bug in clang's static analyzer: + // https://bugs.llvm.org/show_bug.cgi?id=34198. + ptr_ = container; + ptr_ = reinterpret_cast( + reinterpret_cast(ptr_) | kTagContainer); + container->arena = my_arena; + return &(container->unknown_fields); + } +}; + +// We store unknown fields as a string right now, because there is currently no +// good interface for reading unknown fields into an ArenaString. We may want +// to revisit this to allow unknown fields to be parsed onto the Arena. +class InternalMetadataWithArenaLite + : public InternalMetadataWithArenaBase { + public: + InternalMetadataWithArenaLite() {} + + explicit InternalMetadataWithArenaLite(Arena* arena) + : InternalMetadataWithArenaBase(arena) {} + + void DoSwap(string* other) { + mutable_unknown_fields()->swap(*other); + } + + void DoMergeFrom(const string& other) { + mutable_unknown_fields()->append(other); + } + + void DoClear() { + mutable_unknown_fields()->clear(); + } + + static const string& default_instance() { + return GetEmptyStringAlreadyInited(); + } +}; + +// This helper RAII class is needed to efficiently parse unknown fields. We +// should only call mutable_unknown_fields if there are actual unknown fields. +// The obvious thing to just use a stack string and swap it at the end of the +// parse won't work, because the destructor of StringOutputStream needs to be +// called before we can modify the string (it check-fails). Using +// LiteUnknownFieldSetter setter(&_internal_metadata_); +// StringOutputStream stream(setter.buffer()); +// guarantees that the string is only swapped after stream is destroyed. +class LIBPROTOBUF_EXPORT LiteUnknownFieldSetter { + public: + explicit LiteUnknownFieldSetter(InternalMetadataWithArenaLite* metadata) + : metadata_(metadata) { + if (metadata->have_unknown_fields()) { + buffer_.swap(*metadata->mutable_unknown_fields()); + } + } + ~LiteUnknownFieldSetter() { + if (!buffer_.empty()) metadata_->mutable_unknown_fields()->swap(buffer_); + } + string* buffer() { return &buffer_; } + + private: + InternalMetadataWithArenaLite* metadata_; + string buffer_; +}; + +} // namespace internal +} // namespace protobuf + +} // namespace google +#endif // GOOGLE_PROTOBUF_METADATA_LITE_H__ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/repeated_field.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/repeated_field.h new file mode 100644 index 0000000000000000000000000000000000000000..b47ea99484e0caed08cdb5e95cb27904bf9a2a7d --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/repeated_field.h @@ -0,0 +1,2630 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// RepeatedField and RepeatedPtrField are used by generated protocol message +// classes to manipulate repeated fields. These classes are very similar to +// STL's vector, but include a number of optimizations found to be useful +// specifically in the case of Protocol Buffers. RepeatedPtrField is +// particularly different from STL vector as it manages ownership of the +// pointers that it contains. +// +// Typically, clients should not need to access RepeatedField objects directly, +// but should instead use the accessor functions generated automatically by the +// protocol compiler. + +#ifndef GOOGLE_PROTOBUF_REPEATED_FIELD_H__ +#define GOOGLE_PROTOBUF_REPEATED_FIELD_H__ + +#ifdef _MSC_VER +// This is required for min/max on VS2013 only. +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +// Forward-declare these so that we can make them friends. +namespace google { +namespace upb { +namespace google_opensource { +class GMR_Handlers; +} // namespace google_opensource +} // namespace upb + +namespace protobuf { + +class Message; + +namespace internal { + +class MergePartialFromCodedStreamHelper; + +static const int kMinRepeatedFieldAllocationSize = 4; + +// A utility function for logging that doesn't need any template types. +void LogIndexOutOfBounds(int index, int size); + +template +inline int CalculateReserve(Iter begin, Iter end, std::forward_iterator_tag) { + return static_cast(std::distance(begin, end)); +} + +template +inline int CalculateReserve(Iter /*begin*/, Iter /*end*/, + std::input_iterator_tag /*unused*/) { + return -1; +} + +template +inline int CalculateReserve(Iter begin, Iter end) { + typedef typename std::iterator_traits::iterator_category Category; + return CalculateReserve(begin, end, Category()); +} +} // namespace internal + + +// RepeatedField is used to represent repeated fields of a primitive type (in +// other words, everything except strings and nested Messages). Most users will +// not ever use a RepeatedField directly; they will use the get-by-index, +// set-by-index, and add accessors that are generated for all repeated fields. +template +class RepeatedField final { + public: + RepeatedField(); + explicit RepeatedField(Arena* arena); + RepeatedField(const RepeatedField& other); + template + RepeatedField(Iter begin, const Iter& end); + ~RepeatedField(); + + RepeatedField& operator=(const RepeatedField& other); + + RepeatedField(RepeatedField&& other) noexcept; + RepeatedField& operator=(RepeatedField&& other) noexcept; + + bool empty() const; + int size() const; + + const Element& Get(int index) const; + Element* Mutable(int index); + + const Element& operator[](int index) const { return Get(index); } + Element& operator[](int index) { return *Mutable(index); } + + void Set(int index, const Element& value); + void Add(const Element& value); + // Appends a new element and return a pointer to it. + // The new element is uninitialized if |Element| is a POD type. + Element* Add(); + // Remove the last element in the array. + void RemoveLast(); + + // Extract elements with indices in "[start .. start+num-1]". + // Copy them into "elements[0 .. num-1]" if "elements" is not NULL. + // Caution: implementation also moves elements with indices [start+num ..]. + // Calling this routine inside a loop can cause quadratic behavior. + void ExtractSubrange(int start, int num, Element* elements); + + void Clear(); + void MergeFrom(const RepeatedField& other); + void CopyFrom(const RepeatedField& other); + + // Reserve space to expand the field to at least the given size. If the + // array is grown, it will always be at least doubled in size. + void Reserve(int new_size); + + // Resize the RepeatedField to a new, smaller size. This is O(1). + void Truncate(int new_size); + + void AddAlreadyReserved(const Element& value); + // Appends a new element and return a pointer to it. + // The new element is uninitialized if |Element| is a POD type. + // Should be called only if Capacity() > Size(). + Element* AddAlreadyReserved(); + Element* AddNAlreadyReserved(int elements); + int Capacity() const; + + // Like STL resize. Uses value to fill appended elements. + // Like Truncate() if new_size <= size(), otherwise this is + // O(new_size - size()). + void Resize(int new_size, const Element& value); + + // Gets the underlying array. This pointer is possibly invalidated by + // any add or remove operation. + Element* mutable_data(); + const Element* data() const; + + // Swap entire contents with "other". If they are separate arenas then, copies + // data between each other. + void Swap(RepeatedField* other); + + // Swap entire contents with "other". Should be called only if the caller can + // guarantee that both repeated fields are on the same arena or are on the + // heap. Swapping between different arenas is disallowed and caught by a + // GOOGLE_DCHECK (see API docs for details). + void UnsafeArenaSwap(RepeatedField* other); + + // Swap two elements. + void SwapElements(int index1, int index2); + + // STL-like iterator support + typedef Element* iterator; + typedef const Element* const_iterator; + typedef Element value_type; + typedef value_type& reference; + typedef const value_type& const_reference; + typedef value_type* pointer; + typedef const value_type* const_pointer; + typedef int size_type; + typedef ptrdiff_t difference_type; + + iterator begin(); + const_iterator begin() const; + const_iterator cbegin() const; + iterator end(); + const_iterator end() const; + const_iterator cend() const; + + // Reverse iterator support + typedef std::reverse_iterator const_reverse_iterator; + typedef std::reverse_iterator reverse_iterator; + reverse_iterator rbegin() { + return reverse_iterator(end()); + } + const_reverse_iterator rbegin() const { + return const_reverse_iterator(end()); + } + reverse_iterator rend() { + return reverse_iterator(begin()); + } + const_reverse_iterator rend() const { + return const_reverse_iterator(begin()); + } + + // Returns the number of bytes used by the repeated field, excluding + // sizeof(*this) + size_t SpaceUsedExcludingSelfLong() const; + + int SpaceUsedExcludingSelf() const { + return internal::ToIntSize(SpaceUsedExcludingSelfLong()); + } + + // Removes the element referenced by position. + // + // Returns an iterator to the element immediately following the removed + // element. + // + // Invalidates all iterators at or after the removed element, including end(). + iterator erase(const_iterator position); + + // Removes the elements in the range [first, last). + // + // Returns an iterator to the element immediately following the removed range. + // + // Invalidates all iterators at or after the removed range, including end(). + iterator erase(const_iterator first, const_iterator last); + + // Get the Arena on which this RepeatedField stores its elements. + ::google::protobuf::Arena* GetArena() const { + return GetArenaNoVirtual(); + } + + // For internal use only. + // + // This is public due to it being called by generated code. + inline void InternalSwap(RepeatedField* other); + + private: + static const int kInitialSize = 0; + // A note on the representation here (see also comment below for + // RepeatedPtrFieldBase's struct Rep): + // + // We maintain the same sizeof(RepeatedField) as before we added arena support + // so that we do not degrade performance by bloating memory usage. Directly + // adding an arena_ element to RepeatedField is quite costly. By using + // indirection in this way, we keep the same size when the RepeatedField is + // empty (common case), and add only an 8-byte header to the elements array + // when non-empty. We make sure to place the size fields directly in the + // RepeatedField class to avoid costly cache misses due to the indirection. + int current_size_; + int total_size_; + struct Rep { + Arena* arena; + Element elements[1]; + }; + // We can not use sizeof(Rep) - sizeof(Element) due to the trailing padding on + // the struct. We can not use sizeof(Arena*) as well because there might be + // a "gap" after the field arena and before the field elements (e.g., when + // Element is double and pointer is 32bit). + static const size_t kRepHeaderSize; + + // We reuse the Rep* for an Arena* when total_size == 0, to avoid having to do + // an allocation in the constructor when we have an Arena. + union Pointer { + Pointer(Arena* a) : arena(a) {} + Arena* arena; // When total_size_ == 0. + Rep* rep; // When total_size_ != 0. + } ptr_; + + Rep* rep() const { + GOOGLE_DCHECK_GT(total_size_, 0); + return ptr_.rep; + } + + friend class Arena; + typedef void InternalArenaConstructable_; + + + // Move the contents of |from| into |to|, possibly clobbering |from| in the + // process. For primitive types this is just a memcpy(), but it could be + // specialized for non-primitive types to, say, swap each element instead. + void MoveArray(Element* to, Element* from, int size); + + // Copy the elements of |from| into |to|. + void CopyArray(Element* to, const Element* from, int size); + + // Internal helper expected by Arena methods. + inline Arena* GetArenaNoVirtual() const { + return (total_size_ == 0) ? ptr_.arena : ptr_.rep->arena; + } + + // Internal helper to delete all elements and deallocate the storage. + // If Element has a trivial destructor (for example, if it's a fundamental + // type, like int32), the loop will be removed by the optimizer. + void InternalDeallocate(Rep* rep, int size) { + if (rep != NULL) { + Element* e = &rep->elements[0]; + Element* limit = &rep->elements[size]; + for (; e < limit; e++) { + e->~Element(); + } + if (rep->arena == NULL) { +#if defined(__GXX_DELETE_WITH_SIZE__) || defined(__cpp_sized_deallocation) + const size_t bytes = size * sizeof(*e) + kRepHeaderSize; + ::operator delete(static_cast(rep), bytes); +#else + ::operator delete(static_cast(rep)); +#endif + } + } + } + + friend class internal::WireFormatLite; + const Element* unsafe_data() const; +}; + +template +const size_t RepeatedField::kRepHeaderSize = + reinterpret_cast(&reinterpret_cast(16)->elements[0]) - 16; + +namespace internal { +template class RepeatedPtrIterator; +template class RepeatedPtrOverPtrsIterator; +} // namespace internal + +namespace internal { + +// This is a helper template to copy an array of elements efficiently when they +// have a trivial copy constructor, and correctly otherwise. This really +// shouldn't be necessary, but our compiler doesn't optimize std::copy very +// effectively. +template ::value> +struct ElementCopier { + void operator()(Element* to, const Element* from, int array_size); +}; + +} // namespace internal + +namespace internal { + +// type-traits helper for RepeatedPtrFieldBase: we only want to invoke +// arena-related "copy if on different arena" behavior if the necessary methods +// exist on the contained type. In particular, we rely on MergeFrom() existing +// as a general proxy for the fact that a copy will work, and we also provide a +// specific override for string*. +template +struct TypeImplementsMergeBehaviorProbeForMergeFrom { + typedef char HasMerge; + typedef long HasNoMerge; + + // We accept either of: + // - void MergeFrom(const T& other) + // - bool MergeFrom(const T& other) + // + // We mangle these names a bit to avoid compatibility issues in 'unclean' + // include environments that may have, e.g., "#define test ..." (yes, this + // exists). + template + struct CheckType; + template static HasMerge Check( + CheckType*); + template static HasMerge Check( + CheckType*); + template static HasNoMerge Check(...); + + // Resolves to either std::true_type or std::false_type. + typedef std::integral_constant(0)) == sizeof(HasMerge))> type; +}; + +template +struct TypeImplementsMergeBehavior : + TypeImplementsMergeBehaviorProbeForMergeFrom {}; + + +template <> +struct TypeImplementsMergeBehavior< ::std::string> { + typedef std::true_type type; +}; + +// This is the common base class for RepeatedPtrFields. It deals only in void* +// pointers. Users should not use this interface directly. +// +// The methods of this interface correspond to the methods of RepeatedPtrField, +// but may have a template argument called TypeHandler. Its signature is: +// class TypeHandler { +// public: +// typedef MyType Type; +// // WeakType is almost always the same as MyType, but we use it in +// // ImplicitWeakTypeHandler. +// typedef MyType WeakType; +// static Type* New(); +// static WeakType* NewFromPrototype(const WeakType* prototype, +// ::google::protobuf::Arena* arena); +// static void Delete(Type*); +// static void Clear(Type*); +// static void Merge(const Type& from, Type* to); +// +// // Only needs to be implemented if SpaceUsedExcludingSelf() is called. +// static int SpaceUsedLong(const Type&); +// }; +class LIBPROTOBUF_EXPORT RepeatedPtrFieldBase { + protected: + RepeatedPtrFieldBase(); + explicit RepeatedPtrFieldBase(::google::protobuf::Arena* arena); + ~RepeatedPtrFieldBase() {} + + // Must be called from destructor. + template + void Destroy(); + + bool empty() const; + int size() const; + + template + typename TypeHandler::Type* Mutable(int index); + template + void Delete(int index); + template + typename TypeHandler::Type* Add(typename TypeHandler::Type* prototype = NULL); + + public: + // The next few methods are public so that they can be called from generated + // code when implicit weak fields are used, but they should never be called by + // application code. + + template + const typename TypeHandler::WeakType& Get(int index) const; + + // Creates and adds an element using the given prototype, without introducing + // a link-time dependency on the concrete message type. This method is used to + // implement implicit weak fields. The prototype may be NULL, in which case an + // ImplicitWeakMessage will be used as a placeholder. + google::protobuf::MessageLite* AddWeak(const google::protobuf::MessageLite* prototype); + + template + void Clear(); + + template + void MergeFrom(const RepeatedPtrFieldBase& other); + + inline void InternalSwap(RepeatedPtrFieldBase* other); + + protected: + template + void Add(typename TypeHandler::Type&& value, + std::enable_if* dummy = NULL); + + template + void RemoveLast(); + template + void CopyFrom(const RepeatedPtrFieldBase& other); + + void CloseGap(int start, int num); + + void Reserve(int new_size); + + int Capacity() const; + + // Used for constructing iterators. + void* const* raw_data() const; + void** raw_mutable_data() const; + + template + typename TypeHandler::Type** mutable_data(); + template + const typename TypeHandler::Type* const* data() const; + + template GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE + void Swap(RepeatedPtrFieldBase* other); + + void SwapElements(int index1, int index2); + + template + size_t SpaceUsedExcludingSelfLong() const; + + // Advanced memory management -------------------------------------- + + // Like Add(), but if there are no cleared objects to use, returns NULL. + template + typename TypeHandler::Type* AddFromCleared(); + + template + void AddAllocated(typename TypeHandler::Type* value) { + typename TypeImplementsMergeBehavior::type t; + AddAllocatedInternal(value, t); + } + + template + void UnsafeArenaAddAllocated(typename TypeHandler::Type* value); + + template + typename TypeHandler::Type* ReleaseLast() { + typename TypeImplementsMergeBehavior::type t; + return ReleaseLastInternal(t); + } + + // Releases last element and returns it, but does not do out-of-arena copy. + // And just returns the raw pointer to the contained element in the arena. + template + typename TypeHandler::Type* UnsafeArenaReleaseLast(); + + int ClearedCount() const; + template + void AddCleared(typename TypeHandler::Type* value); + template + typename TypeHandler::Type* ReleaseCleared(); + + template + void AddAllocatedInternal(typename TypeHandler::Type* value, std::true_type); + template + void AddAllocatedInternal(typename TypeHandler::Type* value, std::false_type); + + template GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE + void AddAllocatedSlowWithCopy(typename TypeHandler::Type* value, + Arena* value_arena, + Arena* my_arena); + template GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE + void AddAllocatedSlowWithoutCopy(typename TypeHandler::Type* value); + + template + typename TypeHandler::Type* ReleaseLastInternal(std::true_type); + template + typename TypeHandler::Type* ReleaseLastInternal(std::false_type); + + template GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE + void SwapFallback(RepeatedPtrFieldBase* other); + + inline Arena* GetArenaNoVirtual() const { + return arena_; + } + + private: + static const int kInitialSize = 0; + // A few notes on internal representation: + // + // We use an indirected approach, with struct Rep, to keep + // sizeof(RepeatedPtrFieldBase) equivalent to what it was before arena support + // was added, namely, 3 8-byte machine words on x86-64. An instance of Rep is + // allocated only when the repeated field is non-empty, and it is a + // dynamically-sized struct (the header is directly followed by elements[]). + // We place arena_ and current_size_ directly in the object to avoid cache + // misses due to the indirection, because these fields are checked frequently. + // Placing all fields directly in the RepeatedPtrFieldBase instance costs + // significant performance for memory-sensitive workloads. + Arena* arena_; + int current_size_; + int total_size_; + struct Rep { + int allocated_size; + void* elements[1]; + }; + static const size_t kRepHeaderSize = sizeof(Rep) - sizeof(void*); + // Contains arena ptr and the elements array. We also keep the invariant that + // if rep_ is NULL, then arena is NULL. + Rep* rep_; + + template + static inline typename TypeHandler::Type* cast(void* element) { + return reinterpret_cast(element); + } + template + static inline const typename TypeHandler::Type* cast(const void* element) { + return reinterpret_cast(element); + } + + // Non-templated inner function to avoid code duplication. Takes a function + // pointer to the type-specific (templated) inner allocate/merge loop. + void MergeFromInternal( + const RepeatedPtrFieldBase& other, + void (RepeatedPtrFieldBase::*inner_loop)(void**, void**, int, int)); + + template + void MergeFromInnerLoop( + void** our_elems, void** other_elems, int length, int already_allocated); + + // Internal helper: extend array space if necessary to contain |extend_amount| + // more elements, and return a pointer to the element immediately following + // the old list of elements. This interface factors out common behavior from + // Reserve() and MergeFrom() to reduce code size. |extend_amount| must be > 0. + void** InternalExtend(int extend_amount); + + // The reflection implementation needs to call protected methods directly, + // reinterpreting pointers as being to Message instead of a specific Message + // subclass. + friend class GeneratedMessageReflection; + + // ExtensionSet stores repeated message extensions as + // RepeatedPtrField, but non-lite ExtensionSets need to implement + // SpaceUsedLong(), and thus need to call SpaceUsedExcludingSelfLong() + // reinterpreting MessageLite as Message. ExtensionSet also needs to make use + // of AddFromCleared(), which is not part of the public interface. + friend class ExtensionSet; + + // The MapFieldBase implementation needs to call protected methods directly, + // reinterpreting pointers as being to Message instead of a specific Message + // subclass. + friend class MapFieldBase; + + // The table-driven MergePartialFromCodedStream implementation needs to + // operate on RepeatedPtrField. + friend class MergePartialFromCodedStreamHelper; + + // To parse directly into a proto2 generated class, the upb class GMR_Handlers + // needs to be able to modify a RepeatedPtrFieldBase directly. + friend class upb::google_opensource::GMR_Handlers; + + friend class AccessorHelper; + + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(RepeatedPtrFieldBase); +}; + +template +class GenericTypeHandler { + public: + typedef GenericType Type; + typedef GenericType WeakType; + static const bool Moveable = false; + + static inline GenericType* New(Arena* arena) { + return ::google::protobuf::Arena::CreateMaybeMessage(arena); + } + static inline GenericType* NewFromPrototype( + const GenericType* prototype, ::google::protobuf::Arena* arena = NULL); + static inline void Delete(GenericType* value, Arena* arena) { + if (arena == NULL) { + delete value; + } + } + static inline ::google::protobuf::Arena* GetArena(GenericType* value) { + return ::google::protobuf::Arena::GetArena(value); + } + static inline void* GetMaybeArenaPointer(GenericType* value) { + return ::google::protobuf::Arena::GetArena(value); + } + + static inline void Clear(GenericType* value) { value->Clear(); } + GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE + static void Merge(const GenericType& from, GenericType* to); + static inline size_t SpaceUsedLong(const GenericType& value) { + return value.SpaceUsedLong(); + } +}; + +template +GenericType* GenericTypeHandler::NewFromPrototype( + const GenericType* /* prototype */, ::google::protobuf::Arena* arena) { + return New(arena); +} +template +void GenericTypeHandler::Merge(const GenericType& from, + GenericType* to) { + to->MergeFrom(from); +} + +// NewFromPrototype() and Merge() are not defined inline here, as we will need +// to do a virtual function dispatch anyways to go from Message* to call +// New/Merge. +template<> +MessageLite* GenericTypeHandler::NewFromPrototype( + const MessageLite* prototype, google::protobuf::Arena* arena); +template<> +inline google::protobuf::Arena* GenericTypeHandler::GetArena( + MessageLite* value) { + return value->GetArena(); +} +template<> +inline void* GenericTypeHandler::GetMaybeArenaPointer( + MessageLite* value) { + return value->GetMaybeArenaPointer(); +} +template <> +void GenericTypeHandler::Merge(const MessageLite& from, + MessageLite* to); +template<> +inline void GenericTypeHandler::Clear(string* value) { + value->clear(); +} +template<> +void GenericTypeHandler::Merge(const string& from, + string* to); + +// Declarations of the specialization as we cannot define them here, as the +// header that defines ProtocolMessage depends on types defined in this header. +#define DECLARE_SPECIALIZATIONS_FOR_BASE_PROTO_TYPES(TypeName) \ + template<> \ + TypeName* GenericTypeHandler::NewFromPrototype( \ + const TypeName* prototype, google::protobuf::Arena* arena); \ + template<> \ + google::protobuf::Arena* GenericTypeHandler::GetArena( \ + TypeName* value); \ + template<> \ + void* GenericTypeHandler::GetMaybeArenaPointer( \ + TypeName* value); + +// Message specialization bodies defined in message.cc. This split is necessary +// to allow proto2-lite (which includes this header) to be independent of +// Message. +DECLARE_SPECIALIZATIONS_FOR_BASE_PROTO_TYPES(Message) + + +#undef DECLARE_SPECIALIZATIONS_FOR_BASE_PROTO_TYPES + +class StringTypeHandler { + public: + typedef string Type; + typedef string WeakType; + static const bool Moveable = std::is_move_constructible::value && + std::is_move_assignable::value; + + static inline string* New(Arena* arena) { + return Arena::Create(arena); + } + static inline string* New(Arena* arena, string&& value) { + return Arena::Create(arena, std::move(value)); + } + static inline string* NewFromPrototype(const string*, + ::google::protobuf::Arena* arena) { + return New(arena); + } + static inline ::google::protobuf::Arena* GetArena(string*) { + return NULL; + } + static inline void* GetMaybeArenaPointer(string* /* value */) { + return NULL; + } + static inline void Delete(string* value, Arena* arena) { + if (arena == NULL) { + delete value; + } + } + static inline void Clear(string* value) { value->clear(); } + static inline void Merge(const string& from, string* to) { *to = from; } + static size_t SpaceUsedLong(const string& value) { + return sizeof(value) + StringSpaceUsedExcludingSelfLong(value); + } +}; + +} // namespace internal + +// RepeatedPtrField is like RepeatedField, but used for repeated strings or +// Messages. +template +class RepeatedPtrField final : private internal::RepeatedPtrFieldBase { + public: + RepeatedPtrField(); + explicit RepeatedPtrField(::google::protobuf::Arena* arena); + + RepeatedPtrField(const RepeatedPtrField& other); + template + RepeatedPtrField(Iter begin, const Iter& end); + ~RepeatedPtrField(); + + RepeatedPtrField& operator=(const RepeatedPtrField& other); + + RepeatedPtrField(RepeatedPtrField&& other) noexcept; + RepeatedPtrField& operator=(RepeatedPtrField&& other) noexcept; + + bool empty() const; + int size() const; + + const Element& Get(int index) const; + Element* Mutable(int index); + Element* Add(); + void Add(Element&& value); + + const Element& operator[](int index) const { return Get(index); } + Element& operator[](int index) { return *Mutable(index); } + + // Remove the last element in the array. + // Ownership of the element is retained by the array. + void RemoveLast(); + + // Delete elements with indices in the range [start .. start+num-1]. + // Caution: implementation moves all elements with indices [start+num .. ]. + // Calling this routine inside a loop can cause quadratic behavior. + void DeleteSubrange(int start, int num); + + void Clear(); + void MergeFrom(const RepeatedPtrField& other); + void CopyFrom(const RepeatedPtrField& other); + + // Reserve space to expand the field to at least the given size. This only + // resizes the pointer array; it doesn't allocate any objects. If the + // array is grown, it will always be at least doubled in size. + void Reserve(int new_size); + + int Capacity() const; + + // Gets the underlying array. This pointer is possibly invalidated by + // any add or remove operation. + Element** mutable_data(); + const Element* const* data() const; + + // Swap entire contents with "other". If they are on separate arenas, then + // copies data. + void Swap(RepeatedPtrField* other); + + // Swap entire contents with "other". Caller should guarantee that either both + // fields are on the same arena or both are on the heap. Swapping between + // different arenas with this function is disallowed and is caught via + // GOOGLE_DCHECK. + void UnsafeArenaSwap(RepeatedPtrField* other); + + // Swap two elements. + void SwapElements(int index1, int index2); + + // STL-like iterator support + typedef internal::RepeatedPtrIterator iterator; + typedef internal::RepeatedPtrIterator const_iterator; + typedef Element value_type; + typedef value_type& reference; + typedef const value_type& const_reference; + typedef value_type* pointer; + typedef const value_type* const_pointer; + typedef int size_type; + typedef ptrdiff_t difference_type; + + iterator begin(); + const_iterator begin() const; + const_iterator cbegin() const; + iterator end(); + const_iterator end() const; + const_iterator cend() const; + + // Reverse iterator support + typedef std::reverse_iterator const_reverse_iterator; + typedef std::reverse_iterator reverse_iterator; + reverse_iterator rbegin() { + return reverse_iterator(end()); + } + const_reverse_iterator rbegin() const { + return const_reverse_iterator(end()); + } + reverse_iterator rend() { + return reverse_iterator(begin()); + } + const_reverse_iterator rend() const { + return const_reverse_iterator(begin()); + } + + // Custom STL-like iterator that iterates over and returns the underlying + // pointers to Element rather than Element itself. + typedef internal::RepeatedPtrOverPtrsIterator + pointer_iterator; + typedef internal::RepeatedPtrOverPtrsIterator + const_pointer_iterator; + pointer_iterator pointer_begin(); + const_pointer_iterator pointer_begin() const; + pointer_iterator pointer_end(); + const_pointer_iterator pointer_end() const; + + // Returns (an estimate of) the number of bytes used by the repeated field, + // excluding sizeof(*this). + size_t SpaceUsedExcludingSelfLong() const; + + int SpaceUsedExcludingSelf() const { + return internal::ToIntSize(SpaceUsedExcludingSelfLong()); + } + + // Advanced memory management -------------------------------------- + // When hardcore memory management becomes necessary -- as it sometimes + // does here at Google -- the following methods may be useful. + + // Add an already-allocated object, passing ownership to the + // RepeatedPtrField. + // + // Note that some special behavior occurs with respect to arenas: + // + // (i) if this field holds submessages, the new submessage will be copied if + // the original is in an arena and this RepeatedPtrField is either in a + // different arena, or on the heap. + // (ii) if this field holds strings, the passed-in string *must* be + // heap-allocated, not arena-allocated. There is no way to dynamically check + // this at runtime, so User Beware. + void AddAllocated(Element* value); + + // Remove the last element and return it, passing ownership to the caller. + // Requires: size() > 0 + // + // If this RepeatedPtrField is on an arena, an object copy is required to pass + // ownership back to the user (for compatible semantics). Use + // UnsafeArenaReleaseLast() if this behavior is undesired. + Element* ReleaseLast(); + + // Add an already-allocated object, skipping arena-ownership checks. The user + // must guarantee that the given object is in the same arena as this + // RepeatedPtrField. + // It is also useful in legacy code that uses temporary ownership to avoid + // copies. Example: + // RepeatedPtrField temp_field; + // temp_field.AddAllocated(new T); + // ... // Do something with temp_field + // temp_field.ExtractSubrange(0, temp_field.size(), nullptr); + // If you put temp_field on the arena this fails, because the ownership + // transfers to the arena at the "AddAllocated" call and is not released + // anymore causing a double delete. UnsafeArenaAddAllocated prevents this. + void UnsafeArenaAddAllocated(Element* value); + + // Remove the last element and return it. Works only when operating on an + // arena. The returned pointer is to the original object in the arena, hence + // has the arena's lifetime. + // Requires: current_size_ > 0 + Element* UnsafeArenaReleaseLast(); + + // Extract elements with indices in the range "[start .. start+num-1]". + // The caller assumes ownership of the extracted elements and is responsible + // for deleting them when they are no longer needed. + // If "elements" is non-NULL, then pointers to the extracted elements + // are stored in "elements[0 .. num-1]" for the convenience of the caller. + // If "elements" is NULL, then the caller must use some other mechanism + // to perform any further operations (like deletion) on these elements. + // Caution: implementation also moves elements with indices [start+num ..]. + // Calling this routine inside a loop can cause quadratic behavior. + // + // Memory copying behavior is identical to ReleaseLast(), described above: if + // this RepeatedPtrField is on an arena, an object copy is performed for each + // returned element, so that all returned element pointers are to + // heap-allocated copies. If this copy is not desired, the user should call + // UnsafeArenaExtractSubrange(). + void ExtractSubrange(int start, int num, Element** elements); + + // Identical to ExtractSubrange() described above, except that when this + // repeated field is on an arena, no object copies are performed. Instead, the + // raw object pointers are returned. Thus, if on an arena, the returned + // objects must not be freed, because they will not be heap-allocated objects. + void UnsafeArenaExtractSubrange(int start, int num, Element** elements); + + // When elements are removed by calls to RemoveLast() or Clear(), they + // are not actually freed. Instead, they are cleared and kept so that + // they can be reused later. This can save lots of CPU time when + // repeatedly reusing a protocol message for similar purposes. + // + // Hardcore programs may choose to manipulate these cleared objects + // to better optimize memory management using the following routines. + + // Get the number of cleared objects that are currently being kept + // around for reuse. + int ClearedCount() const; + // Add an element to the pool of cleared objects, passing ownership to + // the RepeatedPtrField. The element must be cleared prior to calling + // this method. + // + // This method cannot be called when the repeated field is on an arena or when + // |value| is; both cases will trigger a GOOGLE_DCHECK-failure. + void AddCleared(Element* value); + // Remove a single element from the cleared pool and return it, passing + // ownership to the caller. The element is guaranteed to be cleared. + // Requires: ClearedCount() > 0 + // + // + // This method cannot be called when the repeated field is on an arena; doing + // so will trigger a GOOGLE_DCHECK-failure. + Element* ReleaseCleared(); + + // Removes the element referenced by position. + // + // Returns an iterator to the element immediately following the removed + // element. + // + // Invalidates all iterators at or after the removed element, including end(). + iterator erase(const_iterator position); + + // Removes the elements in the range [first, last). + // + // Returns an iterator to the element immediately following the removed range. + // + // Invalidates all iterators at or after the removed range, including end(). + iterator erase(const_iterator first, const_iterator last); + + // Gets the arena on which this RepeatedPtrField stores its elements. + ::google::protobuf::Arena* GetArena() const { + return GetArenaNoVirtual(); + } + + // For internal use only. + // + // This is public due to it being called by generated code. + using RepeatedPtrFieldBase::InternalSwap; + + private: + // Note: RepeatedPtrField SHOULD NOT be subclassed by users. + class TypeHandler; + + // Internal arena accessor expected by helpers in Arena. + inline Arena* GetArenaNoVirtual() const; + + // Implementations for ExtractSubrange(). The copying behavior must be + // included only if the type supports the necessary operations (e.g., + // MergeFrom()), so we must resolve this at compile time. ExtractSubrange() + // uses SFINAE to choose one of the below implementations. + void ExtractSubrangeInternal(int start, int num, Element** elements, + std::true_type); + void ExtractSubrangeInternal(int start, int num, Element** elements, + std::false_type); + + friend class Arena; + friend class MessageLite; + + typedef void InternalArenaConstructable_; + +}; + +// implementation ==================================================== + +template +inline RepeatedField::RepeatedField() + : current_size_(0), + total_size_(0), + ptr_(NULL) { +} + +template +inline RepeatedField::RepeatedField(Arena* arena) + : current_size_(0), + total_size_(0), + ptr_(arena) { +} + +template +inline RepeatedField::RepeatedField(const RepeatedField& other) + : current_size_(0), + total_size_(0), + ptr_(NULL) { + if (other.current_size_ != 0) { + Reserve(other.size()); + AddNAlreadyReserved(other.size()); + CopyArray(Mutable(0), &other.Get(0), other.size()); + } +} + +template +template +RepeatedField::RepeatedField(Iter begin, const Iter& end) + : current_size_(0), + total_size_(0), + ptr_(NULL) { + int reserve = internal::CalculateReserve(begin, end); + if (reserve != -1) { + Reserve(reserve); + for (; begin != end; ++begin) { + AddAlreadyReserved(*begin); + } + } else { + for (; begin != end; ++begin) { + Add(*begin); + } + } +} + +template +RepeatedField::~RepeatedField() { + if (total_size_ > 0) { + InternalDeallocate(rep(), total_size_); + } +} + +template +inline RepeatedField& +RepeatedField::operator=(const RepeatedField& other) { + if (this != &other) + CopyFrom(other); + return *this; +} + +template +inline RepeatedField::RepeatedField(RepeatedField&& other) noexcept + : RepeatedField() { + // We don't just call Swap(&other) here because it would perform 3 copies if + // the two fields are on different arenas. + if (other.GetArenaNoVirtual()) { + CopyFrom(other); + } else { + InternalSwap(&other); + } +} + +template +inline RepeatedField& RepeatedField::operator=( + RepeatedField&& other) noexcept { + // We don't just call Swap(&other) here because it would perform 3 copies if + // the two fields are on different arenas. + if (this != &other) { + if (this->GetArenaNoVirtual() != other.GetArenaNoVirtual()) { + CopyFrom(other); + } else { + InternalSwap(&other); + } + } + return *this; +} + +template +inline bool RepeatedField::empty() const { + return current_size_ == 0; +} + +template +inline int RepeatedField::size() const { + return current_size_; +} + +template +inline int RepeatedField::Capacity() const { + return total_size_; +} + +template +inline void RepeatedField::AddAlreadyReserved(const Element& value) { + GOOGLE_DCHECK_LT(current_size_, total_size_); + rep()->elements[current_size_++] = value; +} + +template +inline Element* RepeatedField::AddAlreadyReserved() { + GOOGLE_DCHECK_LT(current_size_, total_size_); + return &rep()->elements[current_size_++]; +} + +template +inline Element* RepeatedField::AddNAlreadyReserved(int elements) { + GOOGLE_DCHECK_LE(current_size_ + elements, total_size_); + // Warning: total_size_ can be NULL if elements == 0 && current_size_ == 0. + // Existing callers depend on this behavior. :( + Element* ret = &ptr_.rep->elements[current_size_]; + current_size_ += elements; + return ret; +} + +template +inline void RepeatedField::Resize(int new_size, const Element& value) { + GOOGLE_DCHECK_GE(new_size, 0); + if (new_size > current_size_) { + Reserve(new_size); + std::fill(&rep()->elements[current_size_], + &rep()->elements[new_size], value); + } + current_size_ = new_size; +} + +template +inline const Element& RepeatedField::Get(int index) const { + GOOGLE_DCHECK_GE(index, 0); + GOOGLE_DCHECK_LT(index, current_size_); + return rep()->elements[index]; +} + +template +inline Element* RepeatedField::Mutable(int index) { + GOOGLE_DCHECK_GE(index, 0); + GOOGLE_DCHECK_LT(index, current_size_); + return &rep()->elements[index]; +} + +template +inline void RepeatedField::Set(int index, const Element& value) { + GOOGLE_DCHECK_GE(index, 0); + GOOGLE_DCHECK_LT(index, current_size_); + rep()->elements[index] = value; +} + +template +inline void RepeatedField::Add(const Element& value) { + if (current_size_ == total_size_) Reserve(total_size_ + 1); + rep()->elements[current_size_++] = value; +} + +template +inline Element* RepeatedField::Add() { + if (current_size_ == total_size_) Reserve(total_size_ + 1); + return &rep()->elements[current_size_++]; +} + +template +inline void RepeatedField::RemoveLast() { + GOOGLE_DCHECK_GT(current_size_, 0); + current_size_--; +} + +template +void RepeatedField::ExtractSubrange( + int start, int num, Element* elements) { + GOOGLE_DCHECK_GE(start, 0); + GOOGLE_DCHECK_GE(num, 0); + GOOGLE_DCHECK_LE(start + num, this->current_size_); + + // Save the values of the removed elements if requested. + if (elements != NULL) { + for (int i = 0; i < num; ++i) + elements[i] = this->Get(i + start); + } + + // Slide remaining elements down to fill the gap. + if (num > 0) { + for (int i = start + num; i < this->current_size_; ++i) + this->Set(i - num, this->Get(i)); + this->Truncate(this->current_size_ - num); + } +} + +template +inline void RepeatedField::Clear() { + current_size_ = 0; +} + +template +inline void RepeatedField::MergeFrom(const RepeatedField& other) { + GOOGLE_DCHECK_NE(&other, this); + if (other.current_size_ != 0) { + int existing_size = size(); + Reserve(existing_size + other.size()); + AddNAlreadyReserved(other.size()); + CopyArray(Mutable(existing_size), &other.Get(0), other.size()); + } +} + +template +inline void RepeatedField::CopyFrom(const RepeatedField& other) { + if (&other == this) return; + Clear(); + MergeFrom(other); +} + +template +inline typename RepeatedField::iterator RepeatedField::erase( + const_iterator position) { + return erase(position, position + 1); +} + +template +inline typename RepeatedField::iterator RepeatedField::erase( + const_iterator first, const_iterator last) { + size_type first_offset = first - cbegin(); + if (first != last) { + Truncate(std::copy(last, cend(), begin() + first_offset) - cbegin()); + } + return begin() + first_offset; +} + +template +inline Element* RepeatedField::mutable_data() { + return total_size_ > 0 ? rep()->elements : NULL; +} + +template +inline const Element* RepeatedField::data() const { + return total_size_ > 0 ? rep()->elements : NULL; +} + +template +inline const Element* RepeatedField::unsafe_data() const { + return rep()->elements; +} + +template +inline void RepeatedField::InternalSwap(RepeatedField* other) { + GOOGLE_DCHECK(this != other); + GOOGLE_DCHECK(GetArenaNoVirtual() == other->GetArenaNoVirtual()); + + std::swap(ptr_, other->ptr_); + std::swap(current_size_, other->current_size_); + std::swap(total_size_, other->total_size_); +} + +template +void RepeatedField::Swap(RepeatedField* other) { + if (this == other) return; + if (GetArenaNoVirtual() == other->GetArenaNoVirtual()) { + InternalSwap(other); + } else { + RepeatedField temp(other->GetArenaNoVirtual()); + temp.MergeFrom(*this); + CopyFrom(*other); + other->UnsafeArenaSwap(&temp); + } +} + +template +void RepeatedField::UnsafeArenaSwap(RepeatedField* other) { + if (this == other) return; + InternalSwap(other); +} + +template +void RepeatedField::SwapElements(int index1, int index2) { + using std::swap; // enable ADL with fallback + swap(rep()->elements[index1], rep()->elements[index2]); +} + +template +inline typename RepeatedField::iterator +RepeatedField::begin() { + return total_size_ > 0 ? rep()->elements : NULL; +} +template +inline typename RepeatedField::const_iterator +RepeatedField::begin() const { + return total_size_ > 0 ? rep()->elements : NULL; +} +template +inline typename RepeatedField::const_iterator +RepeatedField::cbegin() const { + return total_size_ > 0 ? rep()->elements : NULL; +} +template +inline typename RepeatedField::iterator +RepeatedField::end() { + return total_size_ > 0 ? rep()->elements + current_size_ : NULL; +} +template +inline typename RepeatedField::const_iterator +RepeatedField::end() const { + return total_size_ > 0 ? rep()->elements + current_size_ : NULL; +} +template +inline typename RepeatedField::const_iterator +RepeatedField::cend() const { + return total_size_ > 0 ? rep()->elements + current_size_ : NULL; +} + +template +inline size_t RepeatedField::SpaceUsedExcludingSelfLong() const { + return total_size_ > 0 ? (total_size_ * sizeof(Element) + kRepHeaderSize) : 0; +} + +// Avoid inlining of Reserve(): new, copy, and delete[] lead to a significant +// amount of code bloat. +template +void RepeatedField::Reserve(int new_size) { + if (total_size_ >= new_size) return; + Rep* old_rep = total_size_ > 0 ? rep() : NULL; + Arena* arena = GetArenaNoVirtual(); + new_size = std::max(google::protobuf::internal::kMinRepeatedFieldAllocationSize, + std::max(total_size_ * 2, new_size)); + GOOGLE_DCHECK_LE( + static_cast(new_size), + (std::numeric_limits::max() - kRepHeaderSize) / sizeof(Element)) + << "Requested size is too large to fit into size_t."; + size_t bytes = kRepHeaderSize + sizeof(Element) * static_cast(new_size); + if (arena == NULL) { + ptr_.rep = static_cast(::operator new(bytes)); + } else { + ptr_.rep = reinterpret_cast( + ::google::protobuf::Arena::CreateArray(arena, bytes)); + } + ptr_.rep->arena = arena; + int old_total_size = total_size_; + total_size_ = new_size; + // Invoke placement-new on newly allocated elements. We shouldn't have to do + // this, since Element is supposed to be POD, but a previous version of this + // code allocated storage with "new Element[size]" and some code uses + // RepeatedField with non-POD types, relying on constructor invocation. If + // Element has a trivial constructor (e.g., int32), gcc (tested with -O2) + // completely removes this loop because the loop body is empty, so this has no + // effect unless its side-effects are required for correctness. + // Note that we do this before MoveArray() below because Element's copy + // assignment implementation will want an initialized instance first. + Element* e = &rep()->elements[0]; + Element* limit = e + total_size_; + for (; e < limit; e++) { + new (e) Element; + } + if (current_size_ > 0) { + MoveArray(&rep()->elements[0], old_rep->elements, current_size_); + } + + // Likewise, we need to invoke destructors on the old array. + InternalDeallocate(old_rep, old_total_size); + +} + +template +inline void RepeatedField::Truncate(int new_size) { + GOOGLE_DCHECK_LE(new_size, current_size_); + if (current_size_ > 0) { + current_size_ = new_size; + } +} + +template +inline void RepeatedField::MoveArray( + Element* to, Element* from, int array_size) { + CopyArray(to, from, array_size); +} + +template +inline void RepeatedField::CopyArray( + Element* to, const Element* from, int array_size) { + internal::ElementCopier()(to, from, array_size); +} + +namespace internal { + +template +void ElementCopier::operator()( + Element* to, const Element* from, int array_size) { + std::copy(from, from + array_size, to); +} + +template +struct ElementCopier { + void operator()(Element* to, const Element* from, int array_size) { + memcpy(to, from, static_cast(array_size) * sizeof(Element)); + } +}; + +} // namespace internal + + +// ------------------------------------------------------------------- + +namespace internal { + +inline RepeatedPtrFieldBase::RepeatedPtrFieldBase() + : arena_(NULL), + current_size_(0), + total_size_(0), + rep_(NULL) { +} + +inline RepeatedPtrFieldBase::RepeatedPtrFieldBase(::google::protobuf::Arena* arena) + : arena_(arena), + current_size_(0), + total_size_(0), + rep_(NULL) { +} + +template +void RepeatedPtrFieldBase::Destroy() { + if (rep_ != NULL && arena_ == NULL) { + int n = rep_->allocated_size; + void* const* elements = rep_->elements; + for (int i = 0; i < n; i++) { + TypeHandler::Delete(cast(elements[i]), NULL); + } +#if defined(__GXX_DELETE_WITH_SIZE__) || defined(__cpp_sized_deallocation) + const size_t size = total_size_ * sizeof(elements[0]) + kRepHeaderSize; + ::operator delete(static_cast(rep_), size); +#else + ::operator delete(static_cast(rep_)); +#endif + } + rep_ = NULL; +} + +template +inline void RepeatedPtrFieldBase::Swap(RepeatedPtrFieldBase* other) { + if (other->GetArenaNoVirtual() == GetArenaNoVirtual()) { + InternalSwap(other); + } else { + SwapFallback(other); + } +} + +template +void RepeatedPtrFieldBase::SwapFallback(RepeatedPtrFieldBase* other) { + GOOGLE_DCHECK(other->GetArenaNoVirtual() != GetArenaNoVirtual()); + + // Copy semantics in this case. We try to improve efficiency by placing the + // temporary on |other|'s arena so that messages are copied cross-arena only + // once, not twice. + RepeatedPtrFieldBase temp(other->GetArenaNoVirtual()); + temp.MergeFrom(*this); + this->Clear(); + this->MergeFrom(*other); + other->Clear(); + other->InternalSwap(&temp); + temp.Destroy(); // Frees rep_ if `other` had no arena. +} + +inline bool RepeatedPtrFieldBase::empty() const { + return current_size_ == 0; +} + +inline int RepeatedPtrFieldBase::size() const { + return current_size_; +} + +template +inline const typename TypeHandler::WeakType& +RepeatedPtrFieldBase::Get(int index) const { + GOOGLE_DCHECK_GE(index, 0); + GOOGLE_DCHECK_LT(index, current_size_); + return *cast(rep_->elements[index]); +} + +template +inline typename TypeHandler::Type* +RepeatedPtrFieldBase::Mutable(int index) { + GOOGLE_DCHECK_GE(index, 0); + GOOGLE_DCHECK_LT(index, current_size_); + return cast(rep_->elements[index]); +} + +template +inline void RepeatedPtrFieldBase::Delete(int index) { + GOOGLE_DCHECK_GE(index, 0); + GOOGLE_DCHECK_LT(index, current_size_); + TypeHandler::Delete(cast(rep_->elements[index]), arena_); +} + +template +inline typename TypeHandler::Type* RepeatedPtrFieldBase::Add( + typename TypeHandler::Type* prototype) { + if (rep_ != NULL && current_size_ < rep_->allocated_size) { + return cast(rep_->elements[current_size_++]); + } + if (!rep_ || rep_->allocated_size == total_size_) { + Reserve(total_size_ + 1); + } + ++rep_->allocated_size; + typename TypeHandler::Type* result = + TypeHandler::NewFromPrototype(prototype, arena_); + rep_->elements[current_size_++] = result; + return result; +} + +template +inline void RepeatedPtrFieldBase::Add( + typename TypeHandler::Type&& value, + std::enable_if*) { + if (rep_ != NULL && current_size_ < rep_->allocated_size) { + *cast(rep_->elements[current_size_++]) = std::move(value); + return; + } + if (!rep_ || rep_->allocated_size == total_size_) { + Reserve(total_size_ + 1); + } + ++rep_->allocated_size; + typename TypeHandler::Type* result = + TypeHandler::New(arena_, std::move(value)); + rep_->elements[current_size_++] = result; +} + +template +inline void RepeatedPtrFieldBase::RemoveLast() { + GOOGLE_DCHECK_GT(current_size_, 0); + TypeHandler::Clear(cast(rep_->elements[--current_size_])); +} + +template +void RepeatedPtrFieldBase::Clear() { + const int n = current_size_; + GOOGLE_DCHECK_GE(n, 0); + if (n > 0) { + void* const* elements = rep_->elements; + int i = 0; + do { + TypeHandler::Clear(cast(elements[i++])); + } while (i < n); + current_size_ = 0; + } +} + +// To avoid unnecessary code duplication and reduce binary size, we use a +// layered approach to implementing MergeFrom(). The toplevel method is +// templated, so we get a small thunk per concrete message type in the binary. +// This calls a shared implementation with most of the logic, passing a function +// pointer to another type-specific piece of code that calls the object-allocate +// and merge handlers. +template +inline void RepeatedPtrFieldBase::MergeFrom(const RepeatedPtrFieldBase& other) { + GOOGLE_DCHECK_NE(&other, this); + if (other.current_size_ == 0) return; + MergeFromInternal( + other, &RepeatedPtrFieldBase::MergeFromInnerLoop); +} + +inline void RepeatedPtrFieldBase::MergeFromInternal( + const RepeatedPtrFieldBase& other, + void (RepeatedPtrFieldBase::*inner_loop)(void**, void**, int, int)) { + // Note: wrapper has already guaranteed that other.rep_ != NULL here. + int other_size = other.current_size_; + void** other_elements = other.rep_->elements; + void** new_elements = InternalExtend(other_size); + int allocated_elems = rep_->allocated_size - current_size_; + (this->*inner_loop)(new_elements, other_elements, + other_size, allocated_elems); + current_size_ += other_size; + if (rep_->allocated_size < current_size_) { + rep_->allocated_size = current_size_; + } +} + +// Merges other_elems to our_elems. +template +void RepeatedPtrFieldBase::MergeFromInnerLoop( + void** our_elems, void** other_elems, int length, int already_allocated) { + // Split into two loops, over ranges [0, allocated) and [allocated, length), + // to avoid a branch within the loop. + for (int i = 0; i < already_allocated && i < length; i++) { + // Already allocated: use existing element. + typename TypeHandler::WeakType* other_elem = + reinterpret_cast(other_elems[i]); + typename TypeHandler::WeakType* new_elem = + reinterpret_cast(our_elems[i]); + TypeHandler::Merge(*other_elem, new_elem); + } + Arena* arena = GetArenaNoVirtual(); + for (int i = already_allocated; i < length; i++) { + // Not allocated: alloc a new element first, then merge it. + typename TypeHandler::WeakType* other_elem = + reinterpret_cast(other_elems[i]); + typename TypeHandler::WeakType* new_elem = + TypeHandler::NewFromPrototype(other_elem, arena); + TypeHandler::Merge(*other_elem, new_elem); + our_elems[i] = new_elem; + } +} + +template +inline void RepeatedPtrFieldBase::CopyFrom(const RepeatedPtrFieldBase& other) { + if (&other == this) return; + RepeatedPtrFieldBase::Clear(); + RepeatedPtrFieldBase::MergeFrom(other); +} + +inline int RepeatedPtrFieldBase::Capacity() const { + return total_size_; +} + +inline void* const* RepeatedPtrFieldBase::raw_data() const { + return rep_ ? rep_->elements : NULL; +} + +inline void** RepeatedPtrFieldBase::raw_mutable_data() const { + return rep_ ? const_cast(rep_->elements) : NULL; +} + +template +inline typename TypeHandler::Type** RepeatedPtrFieldBase::mutable_data() { + // TODO(kenton): Breaks C++ aliasing rules. We should probably remove this + // method entirely. + return reinterpret_cast(raw_mutable_data()); +} + +template +inline const typename TypeHandler::Type* const* +RepeatedPtrFieldBase::data() const { + // TODO(kenton): Breaks C++ aliasing rules. We should probably remove this + // method entirely. + return reinterpret_cast(raw_data()); +} + +inline void RepeatedPtrFieldBase::SwapElements(int index1, int index2) { + using std::swap; // enable ADL with fallback + swap(rep_->elements[index1], rep_->elements[index2]); +} + +template +inline size_t RepeatedPtrFieldBase::SpaceUsedExcludingSelfLong() const { + size_t allocated_bytes = static_cast(total_size_) * sizeof(void*); + if (rep_ != NULL) { + for (int i = 0; i < rep_->allocated_size; ++i) { + allocated_bytes += TypeHandler::SpaceUsedLong( + *cast(rep_->elements[i])); + } + allocated_bytes += kRepHeaderSize; + } + return allocated_bytes; +} + +template +inline typename TypeHandler::Type* RepeatedPtrFieldBase::AddFromCleared() { + if (rep_ != NULL && current_size_ < rep_->allocated_size) { + return cast(rep_->elements[current_size_++]); + } else { + return NULL; + } +} + +// AddAllocated version that implements arena-safe copying behavior. +template +void RepeatedPtrFieldBase::AddAllocatedInternal( + typename TypeHandler::Type* value, + std::true_type) { + Arena* element_arena = reinterpret_cast( + TypeHandler::GetMaybeArenaPointer(value)); + Arena* arena = GetArenaNoVirtual(); + if (arena == element_arena && rep_ && + rep_->allocated_size < total_size_) { + // Fast path: underlying arena representation (tagged pointer) is equal to + // our arena pointer, and we can add to array without resizing it (at least + // one slot that is not allocated). + void** elems = rep_->elements; + if (current_size_ < rep_->allocated_size) { + // Make space at [current] by moving first allocated element to end of + // allocated list. + elems[rep_->allocated_size] = elems[current_size_]; + } + elems[current_size_] = value; + current_size_ = current_size_ + 1; + rep_->allocated_size = rep_->allocated_size + 1; + } else { + AddAllocatedSlowWithCopy( + value, TypeHandler::GetArena(value), arena); + } +} + +// Slowpath handles all cases, copying if necessary. +template +void RepeatedPtrFieldBase::AddAllocatedSlowWithCopy( + // Pass value_arena and my_arena to avoid duplicate virtual call (value) or + // load (mine). + typename TypeHandler::Type* value, Arena* value_arena, Arena* my_arena) { + // Ensure that either the value is in the same arena, or if not, we do the + // appropriate thing: Own() it (if it's on heap and we're in an arena) or copy + // it to our arena/heap (otherwise). + if (my_arena != NULL && value_arena == NULL) { + my_arena->Own(value); + } else if (my_arena != value_arena) { + typename TypeHandler::Type* new_value = + TypeHandler::NewFromPrototype(value, my_arena); + TypeHandler::Merge(*value, new_value); + TypeHandler::Delete(value, value_arena); + value = new_value; + } + + UnsafeArenaAddAllocated(value); +} + +// AddAllocated version that does not implement arena-safe copying behavior. +template +void RepeatedPtrFieldBase::AddAllocatedInternal( + typename TypeHandler::Type* value, + std::false_type) { + if (rep_ && rep_->allocated_size < total_size_) { + // Fast path: underlying arena representation (tagged pointer) is equal to + // our arena pointer, and we can add to array without resizing it (at least + // one slot that is not allocated). + void** elems = rep_->elements; + if (current_size_ < rep_->allocated_size) { + // Make space at [current] by moving first allocated element to end of + // allocated list. + elems[rep_->allocated_size] = elems[current_size_]; + } + elems[current_size_] = value; + current_size_ = current_size_ + 1; + ++rep_->allocated_size; + } else { + UnsafeArenaAddAllocated(value); + } +} + +template +void RepeatedPtrFieldBase::UnsafeArenaAddAllocated( + typename TypeHandler::Type* value) { + // Make room for the new pointer. + if (!rep_ || current_size_ == total_size_) { + // The array is completely full with no cleared objects, so grow it. + Reserve(total_size_ + 1); + ++rep_->allocated_size; + } else if (rep_->allocated_size == total_size_) { + // There is no more space in the pointer array because it contains some + // cleared objects awaiting reuse. We don't want to grow the array in this + // case because otherwise a loop calling AddAllocated() followed by Clear() + // would leak memory. + TypeHandler::Delete( + cast(rep_->elements[current_size_]), arena_); + } else if (current_size_ < rep_->allocated_size) { + // We have some cleared objects. We don't care about their order, so we + // can just move the first one to the end to make space. + rep_->elements[rep_->allocated_size] = rep_->elements[current_size_]; + ++rep_->allocated_size; + } else { + // There are no cleared objects. + ++rep_->allocated_size; + } + + rep_->elements[current_size_++] = value; +} + +// ReleaseLast() for types that implement merge/copy behavior. +template +inline typename TypeHandler::Type* +RepeatedPtrFieldBase::ReleaseLastInternal(std::true_type) { + // First, release an element. + typename TypeHandler::Type* result = UnsafeArenaReleaseLast(); + // Now perform a copy if we're on an arena. + Arena* arena = GetArenaNoVirtual(); + if (arena == NULL) { + return result; + } else { + typename TypeHandler::Type* new_result = + TypeHandler::NewFromPrototype(result, NULL); + TypeHandler::Merge(*result, new_result); + return new_result; + } +} + +// ReleaseLast() for types that *do not* implement merge/copy behavior -- this +// is the same as UnsafeArenaReleaseLast(). Note that we GOOGLE_DCHECK-fail if we're on +// an arena, since the user really should implement the copy operation in this +// case. +template +inline typename TypeHandler::Type* +RepeatedPtrFieldBase::ReleaseLastInternal(std::false_type) { + GOOGLE_DCHECK(GetArenaNoVirtual() == NULL) + << "ReleaseLast() called on a RepeatedPtrField that is on an arena, " + << "with a type that does not implement MergeFrom. This is unsafe; " + << "please implement MergeFrom for your type."; + return UnsafeArenaReleaseLast(); +} + +template +inline typename TypeHandler::Type* + RepeatedPtrFieldBase::UnsafeArenaReleaseLast() { + GOOGLE_DCHECK_GT(current_size_, 0); + typename TypeHandler::Type* result = + cast(rep_->elements[--current_size_]); + --rep_->allocated_size; + if (current_size_ < rep_->allocated_size) { + // There are cleared elements on the end; replace the removed element + // with the last allocated element. + rep_->elements[current_size_] = rep_->elements[rep_->allocated_size]; + } + return result; +} + +inline int RepeatedPtrFieldBase::ClearedCount() const { + return rep_ ? (rep_->allocated_size - current_size_) : 0; +} + +template +inline void RepeatedPtrFieldBase::AddCleared( + typename TypeHandler::Type* value) { + GOOGLE_DCHECK(GetArenaNoVirtual() == NULL) + << "AddCleared() can only be used on a RepeatedPtrField not on an arena."; + GOOGLE_DCHECK(TypeHandler::GetArena(value) == NULL) + << "AddCleared() can only accept values not on an arena."; + if (!rep_ || rep_->allocated_size == total_size_) { + Reserve(total_size_ + 1); + } + rep_->elements[rep_->allocated_size++] = value; +} + +template +inline typename TypeHandler::Type* RepeatedPtrFieldBase::ReleaseCleared() { + GOOGLE_DCHECK(GetArenaNoVirtual() == NULL) + << "ReleaseCleared() can only be used on a RepeatedPtrField not on " + << "an arena."; + GOOGLE_DCHECK(GetArenaNoVirtual() == NULL); + GOOGLE_DCHECK(rep_ != NULL); + GOOGLE_DCHECK_GT(rep_->allocated_size, current_size_); + return cast(rep_->elements[--rep_->allocated_size]); +} + +} // namespace internal + +// ------------------------------------------------------------------- + +template +class RepeatedPtrField::TypeHandler + : public internal::GenericTypeHandler { +}; + +template <> +class RepeatedPtrField::TypeHandler + : public internal::StringTypeHandler { +}; + +template +inline RepeatedPtrField::RepeatedPtrField() + : RepeatedPtrFieldBase() {} + +template +inline RepeatedPtrField::RepeatedPtrField(::google::protobuf::Arena* arena) : + RepeatedPtrFieldBase(arena) {} + +template +inline RepeatedPtrField::RepeatedPtrField( + const RepeatedPtrField& other) + : RepeatedPtrFieldBase() { + MergeFrom(other); +} + +template +template +inline RepeatedPtrField::RepeatedPtrField( + Iter begin, const Iter& end) { + int reserve = internal::CalculateReserve(begin, end); + if (reserve != -1) { + Reserve(reserve); + } + for (; begin != end; ++begin) { + *Add() = *begin; + } +} + +template +RepeatedPtrField::~RepeatedPtrField() { + Destroy(); +} + +template +inline RepeatedPtrField& RepeatedPtrField::operator=( + const RepeatedPtrField& other) { + if (this != &other) + CopyFrom(other); + return *this; +} + +template +inline RepeatedPtrField::RepeatedPtrField( + RepeatedPtrField&& other) noexcept + : RepeatedPtrField() { + // We don't just call Swap(&other) here because it would perform 3 copies if + // the two fields are on different arenas. + if (other.GetArenaNoVirtual()) { + CopyFrom(other); + } else { + InternalSwap(&other); + } +} + +template +inline RepeatedPtrField& RepeatedPtrField::operator=( + RepeatedPtrField&& other) noexcept { + // We don't just call Swap(&other) here because it would perform 3 copies if + // the two fields are on different arenas. + if (this != &other) { + if (this->GetArenaNoVirtual() != other.GetArenaNoVirtual()) { + CopyFrom(other); + } else { + InternalSwap(&other); + } + } + return *this; +} + +template +inline bool RepeatedPtrField::empty() const { + return RepeatedPtrFieldBase::empty(); +} + +template +inline int RepeatedPtrField::size() const { + return RepeatedPtrFieldBase::size(); +} + +template +inline const Element& RepeatedPtrField::Get(int index) const { + return RepeatedPtrFieldBase::Get(index); +} + + +template +inline Element* RepeatedPtrField::Mutable(int index) { + return RepeatedPtrFieldBase::Mutable(index); +} + +template +inline Element* RepeatedPtrField::Add() { + return RepeatedPtrFieldBase::Add(); +} + +template +inline void RepeatedPtrField::Add(Element&& value) { + RepeatedPtrFieldBase::Add(std::move(value)); +} + +template +inline void RepeatedPtrField::RemoveLast() { + RepeatedPtrFieldBase::RemoveLast(); +} + +template +inline void RepeatedPtrField::DeleteSubrange(int start, int num) { + GOOGLE_DCHECK_GE(start, 0); + GOOGLE_DCHECK_GE(num, 0); + GOOGLE_DCHECK_LE(start + num, size()); + for (int i = 0; i < num; ++i) { + RepeatedPtrFieldBase::Delete(start + i); + } + ExtractSubrange(start, num, NULL); +} + +template +inline void RepeatedPtrField::ExtractSubrange( + int start, int num, Element** elements) { + typename internal::TypeImplementsMergeBehavior< + typename TypeHandler::Type>::type t; + ExtractSubrangeInternal(start, num, elements, t); +} + +// ExtractSubrange() implementation for types that implement merge/copy +// behavior. +template +inline void RepeatedPtrField::ExtractSubrangeInternal( + int start, int num, Element** elements, std::true_type) { + GOOGLE_DCHECK_GE(start, 0); + GOOGLE_DCHECK_GE(num, 0); + GOOGLE_DCHECK_LE(start + num, size()); + + if (num > 0) { + // Save the values of the removed elements if requested. + if (elements != NULL) { + if (GetArenaNoVirtual() != NULL) { + // If we're on an arena, we perform a copy for each element so that the + // returned elements are heap-allocated. + for (int i = 0; i < num; ++i) { + Element* element = RepeatedPtrFieldBase:: + Mutable(i + start); + typename TypeHandler::Type* new_value = + TypeHandler::NewFromPrototype(element, NULL); + TypeHandler::Merge(*element, new_value); + elements[i] = new_value; + } + } else { + for (int i = 0; i < num; ++i) { + elements[i] = RepeatedPtrFieldBase::Mutable(i + start); + } + } + } + CloseGap(start, num); + } +} + +// ExtractSubrange() implementation for types that do not implement merge/copy +// behavior. +template +inline void RepeatedPtrField::ExtractSubrangeInternal( + int start, int num, Element** elements, std::false_type) { + // This case is identical to UnsafeArenaExtractSubrange(). However, since + // ExtractSubrange() must return heap-allocated objects by contract, and we + // cannot fulfill this contract if we are an on arena, we must GOOGLE_DCHECK() that + // we are not on an arena. + GOOGLE_DCHECK(GetArenaNoVirtual() == NULL) + << "ExtractSubrange() when arena is non-NULL is only supported when " + << "the Element type supplies a MergeFrom() operation to make copies."; + UnsafeArenaExtractSubrange(start, num, elements); +} + +template +inline void RepeatedPtrField::UnsafeArenaExtractSubrange( + int start, int num, Element** elements) { + GOOGLE_DCHECK_GE(start, 0); + GOOGLE_DCHECK_GE(num, 0); + GOOGLE_DCHECK_LE(start + num, size()); + + if (num > 0) { + // Save the values of the removed elements if requested. + if (elements != NULL) { + for (int i = 0; i < num; ++i) { + elements[i] = RepeatedPtrFieldBase::Mutable(i + start); + } + } + CloseGap(start, num); + } +} + +template +inline void RepeatedPtrField::Clear() { + RepeatedPtrFieldBase::Clear(); +} + +template +inline void RepeatedPtrField::MergeFrom( + const RepeatedPtrField& other) { + RepeatedPtrFieldBase::MergeFrom(other); +} + +template +inline void RepeatedPtrField::CopyFrom( + const RepeatedPtrField& other) { + RepeatedPtrFieldBase::CopyFrom(other); +} + +template +inline typename RepeatedPtrField::iterator +RepeatedPtrField::erase(const_iterator position) { + return erase(position, position + 1); +} + +template +inline typename RepeatedPtrField::iterator +RepeatedPtrField::erase(const_iterator first, const_iterator last) { + size_type pos_offset = std::distance(cbegin(), first); + size_type last_offset = std::distance(cbegin(), last); + DeleteSubrange(pos_offset, last_offset - pos_offset); + return begin() + pos_offset; +} + +template +inline Element** RepeatedPtrField::mutable_data() { + return RepeatedPtrFieldBase::mutable_data(); +} + +template +inline const Element* const* RepeatedPtrField::data() const { + return RepeatedPtrFieldBase::data(); +} + +template +inline void RepeatedPtrField::Swap(RepeatedPtrField* other) { + if (this == other) + return; + RepeatedPtrFieldBase::Swap(other); +} + +template +inline void RepeatedPtrField::UnsafeArenaSwap( + RepeatedPtrField* other) { + if (this == other) + return; + RepeatedPtrFieldBase::InternalSwap(other); +} + +template +inline void RepeatedPtrField::SwapElements(int index1, int index2) { + RepeatedPtrFieldBase::SwapElements(index1, index2); +} + +template +inline Arena* RepeatedPtrField::GetArenaNoVirtual() const { + return RepeatedPtrFieldBase::GetArenaNoVirtual(); +} + +template +inline size_t RepeatedPtrField::SpaceUsedExcludingSelfLong() const { + return RepeatedPtrFieldBase::SpaceUsedExcludingSelfLong(); +} + +template +inline void RepeatedPtrField::AddAllocated(Element* value) { + RepeatedPtrFieldBase::AddAllocated(value); +} + +template +inline void RepeatedPtrField::UnsafeArenaAddAllocated(Element* value) { + RepeatedPtrFieldBase::UnsafeArenaAddAllocated(value); +} + +template +inline Element* RepeatedPtrField::ReleaseLast() { + return RepeatedPtrFieldBase::ReleaseLast(); +} + +template +inline Element* RepeatedPtrField::UnsafeArenaReleaseLast() { + return RepeatedPtrFieldBase::UnsafeArenaReleaseLast(); +} + +template +inline int RepeatedPtrField::ClearedCount() const { + return RepeatedPtrFieldBase::ClearedCount(); +} + +template +inline void RepeatedPtrField::AddCleared(Element* value) { + return RepeatedPtrFieldBase::AddCleared(value); +} + +template +inline Element* RepeatedPtrField::ReleaseCleared() { + return RepeatedPtrFieldBase::ReleaseCleared(); +} + +template +inline void RepeatedPtrField::Reserve(int new_size) { + return RepeatedPtrFieldBase::Reserve(new_size); +} + +template +inline int RepeatedPtrField::Capacity() const { + return RepeatedPtrFieldBase::Capacity(); +} + +// ------------------------------------------------------------------- + +namespace internal { + +// STL-like iterator implementation for RepeatedPtrField. You should not +// refer to this class directly; use RepeatedPtrField::iterator instead. +// +// The iterator for RepeatedPtrField, RepeatedPtrIterator, is +// very similar to iterator_ptr in util/gtl/iterator_adaptors.h, +// but adds random-access operators and is modified to wrap a void** base +// iterator (since RepeatedPtrField stores its array as a void* array and +// casting void** to T** would violate C++ aliasing rules). +// +// This code based on net/proto/proto-array-internal.h by Jeffrey Yasskin +// (jyasskin@google.com). +template +class RepeatedPtrIterator + : public std::iterator< + std::random_access_iterator_tag, Element> { + public: + typedef RepeatedPtrIterator iterator; + typedef std::iterator< + std::random_access_iterator_tag, Element> superclass; + + // Shadow the value_type in std::iterator<> because const_iterator::value_type + // needs to be T, not const T. + typedef typename std::remove_const::type value_type; + + // Let the compiler know that these are type names, so we don't have to + // write "typename" in front of them everywhere. + typedef typename superclass::reference reference; + typedef typename superclass::pointer pointer; + typedef typename superclass::difference_type difference_type; + + RepeatedPtrIterator() : it_(NULL) {} + explicit RepeatedPtrIterator(void* const* it) : it_(it) {} + + // Allow "upcasting" from RepeatedPtrIterator to + // RepeatedPtrIterator. + template + RepeatedPtrIterator(const RepeatedPtrIterator& other) + : it_(other.it_) { + // Force a compiler error if the other type is not convertible to ours. + if (false) { + implicit_cast(static_cast(nullptr)); + } + } + + // dereferenceable + reference operator*() const { return *reinterpret_cast(*it_); } + pointer operator->() const { return &(operator*()); } + + // {inc,dec}rementable + iterator& operator++() { ++it_; return *this; } + iterator operator++(int) { return iterator(it_++); } + iterator& operator--() { --it_; return *this; } + iterator operator--(int) { return iterator(it_--); } + + // equality_comparable + bool operator==(const iterator& x) const { return it_ == x.it_; } + bool operator!=(const iterator& x) const { return it_ != x.it_; } + + // less_than_comparable + bool operator<(const iterator& x) const { return it_ < x.it_; } + bool operator<=(const iterator& x) const { return it_ <= x.it_; } + bool operator>(const iterator& x) const { return it_ > x.it_; } + bool operator>=(const iterator& x) const { return it_ >= x.it_; } + + // addable, subtractable + iterator& operator+=(difference_type d) { + it_ += d; + return *this; + } + friend iterator operator+(iterator it, const difference_type d) { + it += d; + return it; + } + friend iterator operator+(const difference_type d, iterator it) { + it += d; + return it; + } + iterator& operator-=(difference_type d) { + it_ -= d; + return *this; + } + friend iterator operator-(iterator it, difference_type d) { + it -= d; + return it; + } + + // indexable + reference operator[](difference_type d) const { return *(*this + d); } + + // random access iterator + difference_type operator-(const iterator& x) const { return it_ - x.it_; } + + private: + template + friend class RepeatedPtrIterator; + + // The internal iterator. + void* const* it_; +}; + +// Provide an iterator that operates on pointers to the underlying objects +// rather than the objects themselves as RepeatedPtrIterator does. +// Consider using this when working with stl algorithms that change +// the array. +// The VoidPtr template parameter holds the type-agnostic pointer value +// referenced by the iterator. It should either be "void *" for a mutable +// iterator, or "const void* const" for a constant iterator. +template +class RepeatedPtrOverPtrsIterator + : public std::iterator { + public: + typedef RepeatedPtrOverPtrsIterator iterator; + typedef std::iterator superclass; + + // Shadow the value_type in std::iterator<> because const_iterator::value_type + // needs to be T, not const T. + typedef typename std::remove_const::type value_type; + + // Let the compiler know that these are type names, so we don't have to + // write "typename" in front of them everywhere. + typedef typename superclass::reference reference; + typedef typename superclass::pointer pointer; + typedef typename superclass::difference_type difference_type; + + RepeatedPtrOverPtrsIterator() : it_(NULL) {} + explicit RepeatedPtrOverPtrsIterator(VoidPtr* it) : it_(it) {} + + // dereferenceable + reference operator*() const { return *reinterpret_cast(it_); } + pointer operator->() const { return &(operator*()); } + + // {inc,dec}rementable + iterator& operator++() { ++it_; return *this; } + iterator operator++(int) { return iterator(it_++); } + iterator& operator--() { --it_; return *this; } + iterator operator--(int) { return iterator(it_--); } + + // equality_comparable + bool operator==(const iterator& x) const { return it_ == x.it_; } + bool operator!=(const iterator& x) const { return it_ != x.it_; } + + // less_than_comparable + bool operator<(const iterator& x) const { return it_ < x.it_; } + bool operator<=(const iterator& x) const { return it_ <= x.it_; } + bool operator>(const iterator& x) const { return it_ > x.it_; } + bool operator>=(const iterator& x) const { return it_ >= x.it_; } + + // addable, subtractable + iterator& operator+=(difference_type d) { + it_ += d; + return *this; + } + friend iterator operator+(iterator it, difference_type d) { + it += d; + return it; + } + friend iterator operator+(difference_type d, iterator it) { + it += d; + return it; + } + iterator& operator-=(difference_type d) { + it_ -= d; + return *this; + } + friend iterator operator-(iterator it, difference_type d) { + it -= d; + return it; + } + + // indexable + reference operator[](difference_type d) const { return *(*this + d); } + + // random access iterator + difference_type operator-(const iterator& x) const { return it_ - x.it_; } + + private: + template + friend class RepeatedPtrIterator; + + // The internal iterator. + VoidPtr* it_; +}; + +void RepeatedPtrFieldBase::InternalSwap(RepeatedPtrFieldBase* other) { + GOOGLE_DCHECK(this != other); + GOOGLE_DCHECK(GetArenaNoVirtual() == other->GetArenaNoVirtual()); + + std::swap(rep_, other->rep_); + std::swap(current_size_, other->current_size_); + std::swap(total_size_, other->total_size_); +} + +} // namespace internal + +template +inline typename RepeatedPtrField::iterator +RepeatedPtrField::begin() { + return iterator(raw_data()); +} +template +inline typename RepeatedPtrField::const_iterator +RepeatedPtrField::begin() const { + return iterator(raw_data()); +} +template +inline typename RepeatedPtrField::const_iterator +RepeatedPtrField::cbegin() const { + return begin(); +} +template +inline typename RepeatedPtrField::iterator +RepeatedPtrField::end() { + return iterator(raw_data() + size()); +} +template +inline typename RepeatedPtrField::const_iterator +RepeatedPtrField::end() const { + return iterator(raw_data() + size()); +} +template +inline typename RepeatedPtrField::const_iterator +RepeatedPtrField::cend() const { + return end(); +} + +template +inline typename RepeatedPtrField::pointer_iterator +RepeatedPtrField::pointer_begin() { + return pointer_iterator(raw_mutable_data()); +} +template +inline typename RepeatedPtrField::const_pointer_iterator +RepeatedPtrField::pointer_begin() const { + return const_pointer_iterator(const_cast(raw_data())); +} +template +inline typename RepeatedPtrField::pointer_iterator +RepeatedPtrField::pointer_end() { + return pointer_iterator(raw_mutable_data() + size()); +} +template +inline typename RepeatedPtrField::const_pointer_iterator +RepeatedPtrField::pointer_end() const { + return const_pointer_iterator( + const_cast(raw_data() + size())); +} + + +// Iterators and helper functions that follow the spirit of the STL +// std::back_insert_iterator and std::back_inserter but are tailor-made +// for RepeatedField and RepeatedPtrField. Typical usage would be: +// +// std::copy(some_sequence.begin(), some_sequence.end(), +// google::protobuf::RepeatedFieldBackInserter(proto.mutable_sequence())); +// +// Ported by johannes from util/gtl/proto-array-iterators.h + +namespace internal { +// A back inserter for RepeatedField objects. +template class RepeatedFieldBackInsertIterator + : public std::iterator { + public: + explicit RepeatedFieldBackInsertIterator( + RepeatedField* const mutable_field) + : field_(mutable_field) { + } + RepeatedFieldBackInsertIterator& operator=(const T& value) { + field_->Add(value); + return *this; + } + RepeatedFieldBackInsertIterator& operator*() { + return *this; + } + RepeatedFieldBackInsertIterator& operator++() { + return *this; + } + RepeatedFieldBackInsertIterator& operator++(int /* unused */) { + return *this; + } + + private: + RepeatedField* field_; +}; + +// A back inserter for RepeatedPtrField objects. +template class RepeatedPtrFieldBackInsertIterator + : public std::iterator { + public: + RepeatedPtrFieldBackInsertIterator( + RepeatedPtrField* const mutable_field) + : field_(mutable_field) { + } + RepeatedPtrFieldBackInsertIterator& operator=(const T& value) { + *field_->Add() = value; + return *this; + } + RepeatedPtrFieldBackInsertIterator& operator=( + const T* const ptr_to_value) { + *field_->Add() = *ptr_to_value; + return *this; + } + RepeatedPtrFieldBackInsertIterator& operator=(T&& value) { + *field_->Add() = std::move(value); + return *this; + } + RepeatedPtrFieldBackInsertIterator& operator*() { + return *this; + } + RepeatedPtrFieldBackInsertIterator& operator++() { + return *this; + } + RepeatedPtrFieldBackInsertIterator& operator++(int /* unused */) { + return *this; + } + + private: + RepeatedPtrField* field_; +}; + +// A back inserter for RepeatedPtrFields that inserts by transferring ownership +// of a pointer. +template class AllocatedRepeatedPtrFieldBackInsertIterator + : public std::iterator { + public: + explicit AllocatedRepeatedPtrFieldBackInsertIterator( + RepeatedPtrField* const mutable_field) + : field_(mutable_field) { + } + AllocatedRepeatedPtrFieldBackInsertIterator& operator=( + T* const ptr_to_value) { + field_->AddAllocated(ptr_to_value); + return *this; + } + AllocatedRepeatedPtrFieldBackInsertIterator& operator*() { + return *this; + } + AllocatedRepeatedPtrFieldBackInsertIterator& operator++() { + return *this; + } + AllocatedRepeatedPtrFieldBackInsertIterator& operator++( + int /* unused */) { + return *this; + } + + private: + RepeatedPtrField* field_; +}; + +// Almost identical to AllocatedRepeatedPtrFieldBackInsertIterator. This one +// uses the UnsafeArenaAddAllocated instead. +template +class UnsafeArenaAllocatedRepeatedPtrFieldBackInsertIterator + : public std::iterator { + public: + explicit UnsafeArenaAllocatedRepeatedPtrFieldBackInsertIterator( + ::google::protobuf::RepeatedPtrField* const mutable_field) + : field_(mutable_field) { + } + UnsafeArenaAllocatedRepeatedPtrFieldBackInsertIterator& operator=( + T const* const ptr_to_value) { + field_->UnsafeArenaAddAllocated(const_cast(ptr_to_value)); + return *this; + } + UnsafeArenaAllocatedRepeatedPtrFieldBackInsertIterator& operator*() { + return *this; + } + UnsafeArenaAllocatedRepeatedPtrFieldBackInsertIterator& operator++() { + return *this; + } + UnsafeArenaAllocatedRepeatedPtrFieldBackInsertIterator& operator++( + int /* unused */) { + return *this; + } + + private: + ::google::protobuf::RepeatedPtrField* field_; +}; + +} // namespace internal + +// Provides a back insert iterator for RepeatedField instances, +// similar to std::back_inserter(). +template internal::RepeatedFieldBackInsertIterator +RepeatedFieldBackInserter(RepeatedField* const mutable_field) { + return internal::RepeatedFieldBackInsertIterator(mutable_field); +} + +// Provides a back insert iterator for RepeatedPtrField instances, +// similar to std::back_inserter(). +template internal::RepeatedPtrFieldBackInsertIterator +RepeatedPtrFieldBackInserter(RepeatedPtrField* const mutable_field) { + return internal::RepeatedPtrFieldBackInsertIterator(mutable_field); +} + +// Special back insert iterator for RepeatedPtrField instances, just in +// case someone wants to write generic template code that can access both +// RepeatedFields and RepeatedPtrFields using a common name. +template internal::RepeatedPtrFieldBackInsertIterator +RepeatedFieldBackInserter(RepeatedPtrField* const mutable_field) { + return internal::RepeatedPtrFieldBackInsertIterator(mutable_field); +} + +// Provides a back insert iterator for RepeatedPtrField instances +// similar to std::back_inserter() which transfers the ownership while +// copying elements. +template internal::AllocatedRepeatedPtrFieldBackInsertIterator +AllocatedRepeatedPtrFieldBackInserter( + RepeatedPtrField* const mutable_field) { + return internal::AllocatedRepeatedPtrFieldBackInsertIterator( + mutable_field); +} + +// Similar to AllocatedRepeatedPtrFieldBackInserter, using +// UnsafeArenaAddAllocated instead of AddAllocated. +// This is slightly faster if that matters. It is also useful in legacy code +// that uses temporary ownership to avoid copies. Example: +// RepeatedPtrField temp_field; +// temp_field.AddAllocated(new T); +// ... // Do something with temp_field +// temp_field.ExtractSubrange(0, temp_field.size(), nullptr); +// If you put temp_field on the arena this fails, because the ownership +// transfers to the arena at the "AddAllocated" call and is not released anymore +// causing a double delete. Using UnsafeArenaAddAllocated prevents this. +template +internal::UnsafeArenaAllocatedRepeatedPtrFieldBackInsertIterator +UnsafeArenaAllocatedRepeatedPtrFieldBackInserter( + ::google::protobuf::RepeatedPtrField* const mutable_field) { + return internal::UnsafeArenaAllocatedRepeatedPtrFieldBackInsertIterator( + mutable_field); +} + +} // namespace protobuf + +} // namespace google +#endif // GOOGLE_PROTOBUF_REPEATED_FIELD_H__ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/bytestream.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/bytestream.h new file mode 100644 index 0000000000000000000000000000000000000000..86510d140bafc8256f894edccac50bef4b72b37f --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/bytestream.h @@ -0,0 +1,348 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// This file declares the ByteSink and ByteSource abstract interfaces. These +// interfaces represent objects that consume (ByteSink) or produce (ByteSource) +// a sequence of bytes. Using these abstract interfaces in your APIs can help +// make your code work with a variety of input and output types. +// +// This file also declares the following commonly used implementations of these +// interfaces. +// +// ByteSink: +// UncheckedArrayByteSink Writes to an array, without bounds checking +// CheckedArrayByteSink Writes to an array, with bounds checking +// GrowingArrayByteSink Allocates and writes to a growable buffer +// StringByteSink Writes to an STL string +// NullByteSink Consumes a never-ending stream of bytes +// +// ByteSource: +// ArrayByteSource Reads from an array or string/StringPiece +// LimitedByteSource Limits the number of bytes read from an + +#ifndef GOOGLE_PROTOBUF_STUBS_BYTESTREAM_H_ +#define GOOGLE_PROTOBUF_STUBS_BYTESTREAM_H_ + +#include +#include + +#include +#include + +class CordByteSink; +class MemBlock; + +namespace google { +namespace protobuf { +namespace strings { + +// An abstract interface for an object that consumes a sequence of bytes. This +// interface offers a way to append data as well as a Flush() function. +// +// Example: +// +// string my_data; +// ... +// ByteSink* sink = ... +// sink->Append(my_data.data(), my_data.size()); +// sink->Flush(); +// +class LIBPROTOBUF_EXPORT ByteSink { + public: + ByteSink() {} + virtual ~ByteSink() {} + + // Appends the "n" bytes starting at "bytes". + virtual void Append(const char* bytes, size_t n) = 0; + + // Flushes internal buffers. The default implemenation does nothing. ByteSink + // subclasses may use internal buffers that require calling Flush() at the end + // of the stream. + virtual void Flush(); + + private: + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ByteSink); +}; + +// An abstract interface for an object that produces a fixed-size sequence of +// bytes. +// +// Example: +// +// ByteSource* source = ... +// while (source->Available() > 0) { +// StringPiece data = source->Peek(); +// ... do something with "data" ... +// source->Skip(data.length()); +// } +// +class LIBPROTOBUF_EXPORT ByteSource { + public: + ByteSource() {} + virtual ~ByteSource() {} + + // Returns the number of bytes left to read from the source. Available() + // should decrease by N each time Skip(N) is called. Available() may not + // increase. Available() returning 0 indicates that the ByteSource is + // exhausted. + // + // Note: Size() may have been a more appropriate name as it's more + // indicative of the fixed-size nature of a ByteSource. + virtual size_t Available() const = 0; + + // Returns a StringPiece of the next contiguous region of the source. Does not + // reposition the source. The returned region is empty iff Available() == 0. + // + // The returned region is valid until the next call to Skip() or until this + // object is destroyed, whichever occurs first. + // + // The length of the returned StringPiece will be <= Available(). + virtual StringPiece Peek() = 0; + + // Skips the next n bytes. Invalidates any StringPiece returned by a previous + // call to Peek(). + // + // REQUIRES: Available() >= n + virtual void Skip(size_t n) = 0; + + // Writes the next n bytes in this ByteSource to the given ByteSink, and + // advances this ByteSource past the copied bytes. The default implementation + // of this method just copies the bytes normally, but subclasses might + // override CopyTo to optimize certain cases. + // + // REQUIRES: Available() >= n + virtual void CopyTo(ByteSink* sink, size_t n); + + private: + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ByteSource); +}; + +// +// Some commonly used implementations of ByteSink +// + +// Implementation of ByteSink that writes to an unsized byte array. No +// bounds-checking is performed--it is the caller's responsibility to ensure +// that the destination array is large enough. +// +// Example: +// +// char buf[10]; +// UncheckedArrayByteSink sink(buf); +// sink.Append("hi", 2); // OK +// sink.Append(data, 100); // WOOPS! Overflows buf[10]. +// +class LIBPROTOBUF_EXPORT UncheckedArrayByteSink : public ByteSink { + public: + explicit UncheckedArrayByteSink(char* dest) : dest_(dest) {} + virtual void Append(const char* data, size_t n); + + // Returns the current output pointer so that a caller can see how many bytes + // were produced. + // + // Note: this method is not part of the ByteSink interface. + char* CurrentDestination() const { return dest_; } + + private: + char* dest_; + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(UncheckedArrayByteSink); +}; + +// Implementation of ByteSink that writes to a sized byte array. This sink will +// not write more than "capacity" bytes to outbuf. Once "capacity" bytes are +// appended, subsequent bytes will be ignored and Overflowed() will return true. +// Overflowed() does not cause a runtime error (i.e., it does not CHECK fail). +// +// Example: +// +// char buf[10]; +// CheckedArrayByteSink sink(buf, 10); +// sink.Append("hi", 2); // OK +// sink.Append(data, 100); // Will only write 8 more bytes +// +class LIBPROTOBUF_EXPORT CheckedArrayByteSink : public ByteSink { + public: + CheckedArrayByteSink(char* outbuf, size_t capacity); + virtual void Append(const char* bytes, size_t n); + + // Returns the number of bytes actually written to the sink. + size_t NumberOfBytesWritten() const { return size_; } + + // Returns true if any bytes were discarded, i.e., if there was an + // attempt to write more than 'capacity' bytes. + bool Overflowed() const { return overflowed_; } + + private: + char* outbuf_; + const size_t capacity_; + size_t size_; + bool overflowed_; + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(CheckedArrayByteSink); +}; + +// Implementation of ByteSink that allocates an internal buffer (a char array) +// and expands it as needed to accommodate appended data (similar to a string), +// and allows the caller to take ownership of the internal buffer via the +// GetBuffer() method. The buffer returned from GetBuffer() must be deleted by +// the caller with delete[]. GetBuffer() also sets the internal buffer to be +// empty, and subsequent appends to the sink will create a new buffer. The +// destructor will free the internal buffer if GetBuffer() was not called. +// +// Example: +// +// GrowingArrayByteSink sink(10); +// sink.Append("hi", 2); +// sink.Append(data, n); +// const char* buf = sink.GetBuffer(); // Ownership transferred +// delete[] buf; +// +class LIBPROTOBUF_EXPORT GrowingArrayByteSink : public strings::ByteSink { + public: + explicit GrowingArrayByteSink(size_t estimated_size); + virtual ~GrowingArrayByteSink(); + virtual void Append(const char* bytes, size_t n); + + // Returns the allocated buffer, and sets nbytes to its size. The caller takes + // ownership of the buffer and must delete it with delete[]. + char* GetBuffer(size_t* nbytes); + + private: + void Expand(size_t amount); + void ShrinkToFit(); + + size_t capacity_; + char* buf_; + size_t size_; + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(GrowingArrayByteSink); +}; + +// Implementation of ByteSink that appends to the given string. +// Existing contents of "dest" are not modified; new data is appended. +// +// Example: +// +// string dest = "Hello "; +// StringByteSink sink(&dest); +// sink.Append("World", 5); +// assert(dest == "Hello World"); +// +class LIBPROTOBUF_EXPORT StringByteSink : public ByteSink { + public: + explicit StringByteSink(string* dest) : dest_(dest) {} + virtual void Append(const char* data, size_t n); + + private: + string* dest_; + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(StringByteSink); +}; + +// Implementation of ByteSink that discards all data. +// +// Example: +// +// NullByteSink sink; +// sink.Append(data, data.size()); // All data ignored. +// +class LIBPROTOBUF_EXPORT NullByteSink : public ByteSink { + public: + NullByteSink() {} + virtual void Append(const char *data, size_t n) {} + + private: + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(NullByteSink); +}; + +// +// Some commonly used implementations of ByteSource +// + +// Implementation of ByteSource that reads from a StringPiece. +// +// Example: +// +// string data = "Hello"; +// ArrayByteSource source(data); +// assert(source.Available() == 5); +// assert(source.Peek() == "Hello"); +// +class LIBPROTOBUF_EXPORT ArrayByteSource : public ByteSource { + public: + explicit ArrayByteSource(StringPiece s) : input_(s) {} + + virtual size_t Available() const; + virtual StringPiece Peek(); + virtual void Skip(size_t n); + + private: + StringPiece input_; + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ArrayByteSource); +}; + +// Implementation of ByteSource that wraps another ByteSource, limiting the +// number of bytes returned. +// +// The caller maintains ownership of the underlying source, and may not use the +// underlying source while using the LimitByteSource object. The underlying +// source's pointer is advanced by n bytes every time this LimitByteSource +// object is advanced by n. +// +// Example: +// +// string data = "Hello World"; +// ArrayByteSource abs(data); +// assert(abs.Available() == data.size()); +// +// LimitByteSource limit(abs, 5); +// assert(limit.Available() == 5); +// assert(limit.Peek() == "Hello"); +// +class LIBPROTOBUF_EXPORT LimitByteSource : public ByteSource { + public: + // Returns at most "limit" bytes from "source". + LimitByteSource(ByteSource* source, size_t limit); + + virtual size_t Available() const; + virtual StringPiece Peek(); + virtual void Skip(size_t n); + + // We override CopyTo so that we can forward to the underlying source, in + // case it has an efficient implementation of CopyTo. + virtual void CopyTo(ByteSink* sink, size_t n); + + private: + ByteSource* source_; + size_t limit_; +}; + +} // namespace strings +} // namespace protobuf +} // namespace google + +#endif // GOOGLE_PROTOBUF_STUBS_BYTESTREAM_H_ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/casts.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/casts.h new file mode 100644 index 0000000000000000000000000000000000000000..35e2dba0585160b7d5820da2918916fd80362371 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/casts.h @@ -0,0 +1,134 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2014 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef GOOGLE_PROTOBUF_CASTS_H__ +#define GOOGLE_PROTOBUF_CASTS_H__ + +#include + +#include + +namespace google { +namespace protobuf { +namespace internal { +// Use implicit_cast as a safe version of static_cast or const_cast +// for upcasting in the type hierarchy (i.e. casting a pointer to Foo +// to a pointer to SuperclassOfFoo or casting a pointer to Foo to +// a const pointer to Foo). +// When you use implicit_cast, the compiler checks that the cast is safe. +// Such explicit implicit_casts are necessary in surprisingly many +// situations where C++ demands an exact type match instead of an +// argument type convertable to a target type. +// +// The From type can be inferred, so the preferred syntax for using +// implicit_cast is the same as for static_cast etc.: +// +// implicit_cast(expr) +// +// implicit_cast would have been part of the C++ standard library, +// but the proposal was submitted too late. It will probably make +// its way into the language in the future. +template +inline To implicit_cast(From const &f) { + return f; +} + +// When you upcast (that is, cast a pointer from type Foo to type +// SuperclassOfFoo), it's fine to use implicit_cast<>, since upcasts +// always succeed. When you downcast (that is, cast a pointer from +// type Foo to type SubclassOfFoo), static_cast<> isn't safe, because +// how do you know the pointer is really of type SubclassOfFoo? It +// could be a bare Foo, or of type DifferentSubclassOfFoo. Thus, +// when you downcast, you should use this macro. In debug mode, we +// use dynamic_cast<> to double-check the downcast is legal (we die +// if it's not). In normal mode, we do the efficient static_cast<> +// instead. Thus, it's important to test in debug mode to make sure +// the cast is legal! +// This is the only place in the code we should use dynamic_cast<>. +// In particular, you SHOULDN'T be using dynamic_cast<> in order to +// do RTTI (eg code like this: +// if (dynamic_cast(foo)) HandleASubclass1Object(foo); +// if (dynamic_cast(foo)) HandleASubclass2Object(foo); +// You should design the code some other way not to need this. + +template // use like this: down_cast(foo); +inline To down_cast(From* f) { // so we only accept pointers + // Ensures that To is a sub-type of From *. This test is here only + // for compile-time type checking, and has no overhead in an + // optimized build at run-time, as it will be optimized away + // completely. + if (false) { + implicit_cast(0); + } + +#if !defined(NDEBUG) && !defined(GOOGLE_PROTOBUF_NO_RTTI) + assert(f == NULL || dynamic_cast(f) != NULL); // RTTI: debug mode only! +#endif + return static_cast(f); +} + +template // use like this: down_cast(foo); +inline To down_cast(From& f) { + typedef typename std::remove_reference::type* ToAsPointer; + // Ensures that To is a sub-type of From *. This test is here only + // for compile-time type checking, and has no overhead in an + // optimized build at run-time, as it will be optimized away + // completely. + if (false) { + implicit_cast(0); + } + +#if !defined(NDEBUG) && !defined(GOOGLE_PROTOBUF_NO_RTTI) + // RTTI: debug mode only! + assert(dynamic_cast(&f) != NULL); +#endif + return *static_cast(&f); +} + +template +inline To bit_cast(const From& from) { + GOOGLE_COMPILE_ASSERT(sizeof(From) == sizeof(To), + bit_cast_with_different_sizes); + To dest; + memcpy(&dest, &from, sizeof(dest)); + return dest; +} + +} // namespace internal + +// We made these internal so that they would show up as such in the docs, +// but we don't want to stick "internal::" in front of them everywhere. +using internal::implicit_cast; +using internal::down_cast; +using internal::bit_cast; + +} // namespace protobuf +} // namespace google +#endif // GOOGLE_PROTOBUF_CASTS_H__ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/common.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/common.h new file mode 100644 index 0000000000000000000000000000000000000000..d35377ae58f770bfd9b804bfbf85adbc6d298314 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/common.h @@ -0,0 +1,242 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) and others +// +// Contains basic types and utilities used by the rest of the library. + +#ifndef GOOGLE_PROTOBUF_COMMON_H__ +#define GOOGLE_PROTOBUF_COMMON_H__ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +// TODO(liujisi): Remove the following includes after the include clean-up. +#include +#include +#include + +#ifndef PROTOBUF_USE_EXCEPTIONS +#if defined(_MSC_VER) && defined(_CPPUNWIND) + #define PROTOBUF_USE_EXCEPTIONS 1 +#elif defined(__EXCEPTIONS) + #define PROTOBUF_USE_EXCEPTIONS 1 +#else + #define PROTOBUF_USE_EXCEPTIONS 0 +#endif +#endif + +#if PROTOBUF_USE_EXCEPTIONS +#include +#endif +#if defined(__APPLE__) +#include // for TARGET_OS_IPHONE +#endif + +#if defined(__ANDROID__) || defined(GOOGLE_PROTOBUF_OS_ANDROID) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || defined(GOOGLE_PROTOBUF_OS_IPHONE) +#include +#endif + +#if defined(_WIN32) && defined(GetMessage) +// Allow GetMessage to be used as a valid method name in protobuf classes. +// windows.h defines GetMessage() as a macro. Let's re-define it as an inline +// function. The inline function should be equivalent for C++ users. +inline BOOL GetMessage_Win32( + LPMSG lpMsg, HWND hWnd, + UINT wMsgFilterMin, UINT wMsgFilterMax) { + return GetMessage(lpMsg, hWnd, wMsgFilterMin, wMsgFilterMax); +} +#undef GetMessage +inline BOOL GetMessage( + LPMSG lpMsg, HWND hWnd, + UINT wMsgFilterMin, UINT wMsgFilterMax) { + return GetMessage_Win32(lpMsg, hWnd, wMsgFilterMin, wMsgFilterMax); +} +#endif + +namespace std {} + +namespace google { +namespace protobuf { +namespace internal { + +// Some of these constants are macros rather than const ints so that they can +// be used in #if directives. + +// The current version, represented as a single integer to make comparison +// easier: major * 10^6 + minor * 10^3 + micro +#define GOOGLE_PROTOBUF_VERSION 3006001 + +// A suffix string for alpha, beta or rc releases. Empty for stable releases. +#define GOOGLE_PROTOBUF_VERSION_SUFFIX "" + +// The minimum library version which works with the current version of the +// headers. +#define GOOGLE_PROTOBUF_MIN_LIBRARY_VERSION 3006001 + +// The minimum header version which works with the current version of +// the library. This constant should only be used by protoc's C++ code +// generator. +static const int kMinHeaderVersionForLibrary = 3006001; + +// The minimum protoc version which works with the current version of the +// headers. +#define GOOGLE_PROTOBUF_MIN_PROTOC_VERSION 3006001 + +// The minimum header version which works with the current version of +// protoc. This constant should only be used in VerifyVersion(). +static const int kMinHeaderVersionForProtoc = 3006001; + +// Verifies that the headers and libraries are compatible. Use the macro +// below to call this. +void LIBPROTOBUF_EXPORT VerifyVersion(int headerVersion, int minLibraryVersion, + const char* filename); + +// Converts a numeric version number to a string. +std::string LIBPROTOBUF_EXPORT VersionString(int version); + +} // namespace internal + +// Place this macro in your main() function (or somewhere before you attempt +// to use the protobuf library) to verify that the version you link against +// matches the headers you compiled against. If a version mismatch is +// detected, the process will abort. +#define GOOGLE_PROTOBUF_VERIFY_VERSION \ + ::google::protobuf::internal::VerifyVersion( \ + GOOGLE_PROTOBUF_VERSION, GOOGLE_PROTOBUF_MIN_LIBRARY_VERSION, \ + __FILE__) + + +// =================================================================== +// from google3/util/utf8/public/unilib.h + +class StringPiece; +namespace internal { + +// Checks if the buffer contains structurally-valid UTF-8. Implemented in +// structurally_valid.cc. +LIBPROTOBUF_EXPORT bool IsStructurallyValidUTF8(const char* buf, int len); + +inline bool IsStructurallyValidUTF8(const std::string& str) { + return IsStructurallyValidUTF8(str.data(), static_cast(str.length())); +} + +// Returns initial number of bytes of structually valid UTF-8. +LIBPROTOBUF_EXPORT int UTF8SpnStructurallyValid(const StringPiece& str); + +// Coerce UTF-8 byte string in src_str to be +// a structurally-valid equal-length string by selectively +// overwriting illegal bytes with replace_char (typically ' ' or '?'). +// replace_char must be legal printable 7-bit Ascii 0x20..0x7e. +// src_str is read-only. +// +// Returns pointer to output buffer, src_str.data() if no changes were made, +// or idst if some bytes were changed. idst is allocated by the caller +// and must be at least as big as src_str +// +// Optimized for: all structurally valid and no byte copying is done. +// +LIBPROTOBUF_EXPORT char* UTF8CoerceToStructurallyValid( + const StringPiece& str, char* dst, char replace_char); + +} // namespace internal + + +// =================================================================== +// Shutdown support. + +// Shut down the entire protocol buffers library, deleting all static-duration +// objects allocated by the library or by generated .pb.cc files. +// +// There are two reasons you might want to call this: +// * You use a draconian definition of "memory leak" in which you expect +// every single malloc() to have a corresponding free(), even for objects +// which live until program exit. +// * You are writing a dynamically-loaded library which needs to clean up +// after itself when the library is unloaded. +// +// It is safe to call this multiple times. However, it is not safe to use +// any other part of the protocol buffers library after +// ShutdownProtobufLibrary() has been called. Furthermore this call is not +// thread safe, user needs to synchronize multiple calls. +LIBPROTOBUF_EXPORT void ShutdownProtobufLibrary(); + +namespace internal { + +// Register a function to be called when ShutdownProtocolBuffers() is called. +LIBPROTOBUF_EXPORT void OnShutdown(void (*func)()); +// Run an arbitrary function on an arg +LIBPROTOBUF_EXPORT void OnShutdownRun(void (*f)(const void*), const void* arg); + +template +T* OnShutdownDelete(T* p) { + OnShutdownRun([](const void* p) { delete static_cast(p); }, p); + return p; +} + +} // namespace internal + +#if PROTOBUF_USE_EXCEPTIONS +class FatalException : public std::exception { + public: + FatalException(const char* filename, int line, const std::string& message) + : filename_(filename), line_(line), message_(message) {} + virtual ~FatalException() throw(); + + virtual const char* what() const throw(); + + const char* filename() const { return filename_; } + int line() const { return line_; } + const std::string& message() const { return message_; } + + private: + const char* filename_; + const int line_; + const std::string message_; +}; +#endif + +// This is at the end of the file instead of the beginning to work around a bug +// in some versions of MSVC. +using std::string; + +} // namespace protobuf +} // namespace google + +#endif // GOOGLE_PROTOBUF_COMMON_H__ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/fastmem.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/fastmem.h new file mode 100644 index 0000000000000000000000000000000000000000..1f1f6ed3df41a71d8f05677b5dde3626dd58563c --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/fastmem.h @@ -0,0 +1,153 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2014 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Fast memory copying and comparison routines. +// strings::fastmemcmp_inlined() replaces memcmp() +// strings::memcpy_inlined() replaces memcpy() +// strings::memeq(a, b, n) replaces memcmp(a, b, n) == 0 +// +// strings::*_inlined() routines are inline versions of the +// routines exported by this module. Sometimes using the inlined +// versions is faster. Measure before using the inlined versions. +// +// Performance measurement: +// strings::fastmemcmp_inlined +// Analysis: memcmp, fastmemcmp_inlined, fastmemcmp +// 2012-01-30 + +#ifndef GOOGLE_PROTOBUF_STUBS_FASTMEM_H_ +#define GOOGLE_PROTOBUF_STUBS_FASTMEM_H_ + +#include +#include +#include + +#include + +namespace google { +namespace protobuf { +namespace internal { + +// Return true if the n bytes at a equal the n bytes at b. +// The regions are allowed to overlap. +// +// The performance is similar to the performance memcmp(), but faster for +// moderately-sized inputs, or inputs that share a common prefix and differ +// somewhere in their last 8 bytes. Further optimizations can be added later +// if it makes sense to do so.:w +inline bool memeq(const char* a, const char* b, size_t n) { + size_t n_rounded_down = n & ~static_cast(7); + if (GOOGLE_PREDICT_FALSE(n_rounded_down == 0)) { // n <= 7 + return memcmp(a, b, n) == 0; + } + // n >= 8 + uint64 u = GOOGLE_UNALIGNED_LOAD64(a) ^ GOOGLE_UNALIGNED_LOAD64(b); + uint64 v = GOOGLE_UNALIGNED_LOAD64(a + n - 8) ^ GOOGLE_UNALIGNED_LOAD64(b + n - 8); + if ((u | v) != 0) { // The first or last 8 bytes differ. + return false; + } + a += 8; + b += 8; + n = n_rounded_down - 8; + if (n > 128) { + // As of 2012, memcmp on x86-64 uses a big unrolled loop with SSE2 + // instructions, and while we could try to do something faster, it + // doesn't seem worth pursuing. + return memcmp(a, b, n) == 0; + } + for (; n >= 16; n -= 16) { + uint64 x = GOOGLE_UNALIGNED_LOAD64(a) ^ GOOGLE_UNALIGNED_LOAD64(b); + uint64 y = GOOGLE_UNALIGNED_LOAD64(a + 8) ^ GOOGLE_UNALIGNED_LOAD64(b + 8); + if ((x | y) != 0) { + return false; + } + a += 16; + b += 16; + } + // n must be 0 or 8 now because it was a multiple of 8 at the top of the loop. + return n == 0 || GOOGLE_UNALIGNED_LOAD64(a) == GOOGLE_UNALIGNED_LOAD64(b); +} + +inline int fastmemcmp_inlined(const char *a, const char *b, size_t n) { + if (n >= 64) { + return memcmp(a, b, n); + } + const char* a_limit = a + n; + while (a + sizeof(uint64) <= a_limit && + GOOGLE_UNALIGNED_LOAD64(a) == GOOGLE_UNALIGNED_LOAD64(b)) { + a += sizeof(uint64); + b += sizeof(uint64); + } + if (a + sizeof(uint32) <= a_limit && + GOOGLE_UNALIGNED_LOAD32(a) == GOOGLE_UNALIGNED_LOAD32(b)) { + a += sizeof(uint32); + b += sizeof(uint32); + } + while (a < a_limit) { + int d = + static_cast(static_cast(*a++) - static_cast(*b++)); + if (d) return d; + } + return 0; +} + +// The standard memcpy operation is slow for variable small sizes. +// This implementation inlines the optimal realization for sizes 1 to 16. +// To avoid code bloat don't use it in case of not performance-critical spots, +// nor when you don't expect very frequent values of size <= 16. +inline void memcpy_inlined(char *dst, const char *src, size_t size) { + // Compiler inlines code with minimal amount of data movement when third + // parameter of memcpy is a constant. + switch (size) { + case 1: memcpy(dst, src, 1); break; + case 2: memcpy(dst, src, 2); break; + case 3: memcpy(dst, src, 3); break; + case 4: memcpy(dst, src, 4); break; + case 5: memcpy(dst, src, 5); break; + case 6: memcpy(dst, src, 6); break; + case 7: memcpy(dst, src, 7); break; + case 8: memcpy(dst, src, 8); break; + case 9: memcpy(dst, src, 9); break; + case 10: memcpy(dst, src, 10); break; + case 11: memcpy(dst, src, 11); break; + case 12: memcpy(dst, src, 12); break; + case 13: memcpy(dst, src, 13); break; + case 14: memcpy(dst, src, 14); break; + case 15: memcpy(dst, src, 15); break; + case 16: memcpy(dst, src, 16); break; + default: memcpy(dst, src, size); break; + } +} + +} // namespace internal +} // namespace protobuf +} // namespace google + +#endif // GOOGLE_PROTOBUF_STUBS_FASTMEM_H_ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/hash.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/hash.h new file mode 100644 index 0000000000000000000000000000000000000000..fd8ba156aee9840dd99ff4b337b93104edffcf40 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/hash.h @@ -0,0 +1,441 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// +// Deals with the fact that hash_map is not defined everywhere. + +#ifndef GOOGLE_PROTOBUF_STUBS_HASH_H__ +#define GOOGLE_PROTOBUF_STUBS_HASH_H__ + +#include +#include + +#define GOOGLE_PROTOBUF_HAVE_HASH_MAP 1 +#define GOOGLE_PROTOBUF_HAVE_HASH_SET 1 + +// Use C++11 unordered_{map|set} if available. +#if ((defined(_LIBCPP_STD_VER) && _LIBCPP_STD_VER >= 11) || \ + (((__cplusplus >= 201103L) || defined(__GXX_EXPERIMENTAL_CXX0X)) && \ + (__GLIBCXX__ > 20090421))) +# define GOOGLE_PROTOBUF_HAS_CXX11_HASH + +// For XCode >= 4.6: the compiler is clang with libc++. +// For earlier XCode version: the compiler is gcc-4.2.1 with libstdc++. +// libc++ provides and friends even in non C++11 mode, +// and it does not provide the tr1 library. Therefore the following macro +// checks against this special case. +// Note that we should not test the __APPLE_CC__ version number or the +// __clang__ macro, since the new compiler can still use -stdlib=libstdc++, in +// which case is not compilable without -std=c++11 +#elif defined(__APPLE_CC__) +# if __GNUC__ >= 4 +# define GOOGLE_PROTOBUF_HAS_TR1 +# else +// Not tested for gcc < 4... These setting can compile under 4.2.1 though. +# define GOOGLE_PROTOBUF_HASH_NAMESPACE __gnu_cxx +# include +# define GOOGLE_PROTOBUF_HASH_MAP_CLASS hash_map +# include +# define GOOGLE_PROTOBUF_HASH_SET_CLASS hash_set +# endif + +// Version checks for gcc. +#elif defined(__GNUC__) +// For GCC 4.x+, use tr1::unordered_map/set; otherwise, follow the +// instructions from: +// https://gcc.gnu.org/onlinedocs/libstdc++/manual/backwards.html +# if __GNUC__ >= 4 +# define GOOGLE_PROTOBUF_HAS_TR1 +# elif __GNUC__ >= 3 +# include +# define GOOGLE_PROTOBUF_HASH_MAP_CLASS hash_map +# include +# define GOOGLE_PROTOBUF_HASH_SET_CLASS hash_set +# if __GNUC__ == 3 && __GNUC_MINOR__ == 0 +# define GOOGLE_PROTOBUF_HASH_NAMESPACE std // GCC 3.0 +# else +# define GOOGLE_PROTOBUF_HASH_NAMESPACE __gnu_cxx // GCC 3.1 and later +# endif +# else +# define GOOGLE_PROTOBUF_HASH_NAMESPACE +# include +# define GOOGLE_PROTOBUF_HASH_MAP_CLASS hash_map +# include +# define GOOGLE_PROTOBUF_HASH_SET_CLASS hash_set +# endif + +// GCC <= 4.1 does not define std::tr1::hash for `long long int` or `long long unsigned int` +# if __GNUC__ == 4 && defined(__GNUC_MINOR__) && __GNUC_MINOR__ <= 1 +# undef GOOGLE_PROTOBUF_HAS_TR1 +# undef GOOGLE_PROTOBUF_HAVE_HASH_MAP +# undef GOOGLE_PROTOBUF_HAVE_HASH_SET +# endif + +// Version checks for MSC. +// Apparently Microsoft decided to move hash_map *back* to the std namespace in +// MSVC 2010: +// http://blogs.msdn.com/vcblog/archive/2009/05/25/stl-breaking-changes-in-visual-studio-2010-beta-1.aspx +// And.. they are moved back to stdext in MSVC 2013 (haven't checked 2012). That +// said, use unordered_map for MSVC 2010 and beyond is our safest bet. +#elif defined(_MSC_VER) +# if _MSC_VER >= 1600 // Since Visual Studio 2010 +# define GOOGLE_PROTOBUF_HAS_CXX11_HASH +# define GOOGLE_PROTOBUF_HASH_COMPARE std::hash_compare +# elif _MSC_VER >= 1500 // Since Visual Studio 2008 +# define GOOGLE_PROTOBUF_HASH_NAMESPACE stdext +# include +# define GOOGLE_PROTOBUF_HASH_MAP_CLASS hash_map +# include +# define GOOGLE_PROTOBUF_HASH_SET_CLASS hash_set +# define GOOGLE_PROTOBUF_HASH_COMPARE stdext::hash_compare +# define GOOGLE_PROTOBUF_CONTAINERS_NEED_HASH_COMPARE +# elif _MSC_VER >= 1310 +# define GOOGLE_PROTOBUF_HASH_NAMESPACE stdext +# include +# define GOOGLE_PROTOBUF_HASH_MAP_CLASS hash_map +# include +# define GOOGLE_PROTOBUF_HASH_SET_CLASS hash_set +# define GOOGLE_PROTOBUF_HASH_COMPARE stdext::hash_compare +# else +# define GOOGLE_PROTOBUF_HASH_NAMESPACE std +# include +# define GOOGLE_PROTOBUF_HASH_MAP_CLASS hash_map +# include +# define GOOGLE_PROTOBUF_HASH_SET_CLASS hash_set +# define GOOGLE_PROTOBUF_HASH_COMPARE stdext::hash_compare +# endif + +// **ADD NEW COMPILERS SUPPORT HERE.** +// For other compilers, undefine the macro and fallback to use std::map, in +// google/protobuf/stubs/hash.h +#else +# undef GOOGLE_PROTOBUF_HAVE_HASH_MAP +# undef GOOGLE_PROTOBUF_HAVE_HASH_SET +#endif + +#if defined(GOOGLE_PROTOBUF_HAS_CXX11_HASH) +# define GOOGLE_PROTOBUF_HASH_NAMESPACE std +# include +# define GOOGLE_PROTOBUF_HASH_MAP_CLASS unordered_map +# include +# define GOOGLE_PROTOBUF_HASH_SET_CLASS unordered_set +#elif defined(GOOGLE_PROTOBUF_HAS_TR1) +# define GOOGLE_PROTOBUF_HASH_NAMESPACE std::tr1 +# include +# define GOOGLE_PROTOBUF_HASH_MAP_CLASS unordered_map +# include +# define GOOGLE_PROTOBUF_HASH_SET_CLASS unordered_set +#endif + +# define GOOGLE_PROTOBUF_HASH_NAMESPACE_DECLARATION_START \ + namespace google { \ + namespace protobuf { +# define GOOGLE_PROTOBUF_HASH_NAMESPACE_DECLARATION_END }} + +#undef GOOGLE_PROTOBUF_HAS_CXX11_HASH +#undef GOOGLE_PROTOBUF_HAS_TR1 + +#if defined(GOOGLE_PROTOBUF_HAVE_HASH_MAP) && \ + defined(GOOGLE_PROTOBUF_HAVE_HASH_SET) +#else +#define GOOGLE_PROTOBUF_MISSING_HASH +#include +#include +#endif + +namespace google { +namespace protobuf { + +#ifdef GOOGLE_PROTOBUF_MISSING_HASH +#undef GOOGLE_PROTOBUF_MISSING_HASH + +// This system doesn't have hash_map or hash_set. Emulate them using map and +// set. + +// Make hash be the same as less. Note that everywhere where custom +// hash functions are defined in the protobuf code, they are also defined such +// that they can be used as "less" functions, which is required by MSVC anyway. +template +struct hash { + // Dummy, just to make derivative hash functions compile. + int operator()(const Key& key) { + GOOGLE_LOG(FATAL) << "Should never be called."; + return 0; + } + + inline bool operator()(const Key& a, const Key& b) const { + return a < b; + } +}; + +// Make sure char* is compared by value. +template <> +struct hash { + // Dummy, just to make derivative hash functions compile. + int operator()(const char* key) { + GOOGLE_LOG(FATAL) << "Should never be called."; + return 0; + } + + inline bool operator()(const char* a, const char* b) const { + return strcmp(a, b) < 0; + } +}; + +template , + typename EqualKey = std::equal_to, + typename Alloc = std::allocator< std::pair > > +class hash_map : public std::map { + typedef std::map BaseClass; + + public: + hash_map(int a = 0, const HashFcn& b = HashFcn(), + const EqualKey& c = EqualKey(), + const Alloc& d = Alloc()) : BaseClass(b, d) {} + + HashFcn hash_function() const { return HashFcn(); } +}; + +template , + typename EqualKey = std::equal_to > +class hash_set : public std::set { + public: + hash_set(int = 0) {} + + HashFcn hash_function() const { return HashFcn(); } +}; + +#elif defined(_MSC_VER) && !defined(_STLPORT_VERSION) && \ + !(defined(_LIBCPP_STD_VER) && _LIBCPP_STD_VER >= 11) + +template +struct hash : public GOOGLE_PROTOBUF_HASH_COMPARE { +}; + +// MSVC's hash_compare hashes based on the string contents but +// compares based on the string pointer. WTF? +class CstringLess { + public: + inline bool operator()(const char* a, const char* b) const { + return strcmp(a, b) < 0; + } +}; + +template <> +struct hash + : public GOOGLE_PROTOBUF_HASH_COMPARE {}; + +#ifdef GOOGLE_PROTOBUF_CONTAINERS_NEED_HASH_COMPARE + +template +struct InternalHashCompare : public GOOGLE_PROTOBUF_HASH_COMPARE { + InternalHashCompare() {} + InternalHashCompare(HashFcn hashfcn, EqualKey equalkey) + : hashfcn_(hashfcn), equalkey_(equalkey) {} + size_t operator()(const Key& key) const { return hashfcn_(key); } + bool operator()(const Key& key1, const Key& key2) const { + return !equalkey_(key1, key2); + } + HashFcn hashfcn_; + EqualKey equalkey_; +}; + +template , + typename EqualKey = std::equal_to, + typename Alloc = std::allocator< std::pair > > +class hash_map + : public GOOGLE_PROTOBUF_HASH_NAMESPACE::GOOGLE_PROTOBUF_HASH_MAP_CLASS< + Key, Data, InternalHashCompare, Alloc> { + typedef GOOGLE_PROTOBUF_HASH_NAMESPACE::GOOGLE_PROTOBUF_HASH_MAP_CLASS< + Key, Data, InternalHashCompare, Alloc> BaseClass; + + public: + hash_map(int a = 0, const HashFcn& b = HashFcn(), + const EqualKey& c = EqualKey(), const Alloc& d = Alloc()) + : BaseClass(InternalHashCompare(b, c), d) {} + + HashFcn hash_function() const { return HashFcn(); } +}; + +template , + typename EqualKey = std::equal_to > +class hash_set + : public GOOGLE_PROTOBUF_HASH_NAMESPACE::GOOGLE_PROTOBUF_HASH_SET_CLASS< + Key, InternalHashCompare > { + public: + hash_set(int = 0) {} + + HashFcn hash_function() const { return HashFcn(); } +}; + +#else // GOOGLE_PROTOBUF_CONTAINERS_NEED_HASH_COMPARE + +template , + typename EqualKey = std::equal_to, + typename Alloc = std::allocator< std::pair > > +class hash_map + : public GOOGLE_PROTOBUF_HASH_NAMESPACE::GOOGLE_PROTOBUF_HASH_MAP_CLASS< + Key, Data, HashFcn, EqualKey, Alloc> { + typedef GOOGLE_PROTOBUF_HASH_NAMESPACE::GOOGLE_PROTOBUF_HASH_MAP_CLASS< + Key, Data, HashFcn, EqualKey, Alloc> BaseClass; + + public: + hash_map(int a = 0, const HashFcn& b = HashFcn(), + const EqualKey& c = EqualKey(), + const Alloc& d = Alloc()) : BaseClass(a, b, c, d) {} + + HashFcn hash_function() const { return HashFcn(); } +}; + +template , + typename EqualKey = std::equal_to > +class hash_set + : public GOOGLE_PROTOBUF_HASH_NAMESPACE::GOOGLE_PROTOBUF_HASH_SET_CLASS< + Key, HashFcn, EqualKey> { + public: + hash_set(int = 0) {} + + HashFcn hash_function() const { return HashFcn(); } +}; +#endif // GOOGLE_PROTOBUF_CONTAINERS_NEED_HASH_COMPARE + +#else // defined(_MSC_VER) && !defined(_STLPORT_VERSION) + +template +struct hash : public GOOGLE_PROTOBUF_HASH_NAMESPACE::hash { +}; + +template +struct hash { + inline size_t operator()(const Key* key) const { + return reinterpret_cast(key); + } +}; + +// Unlike the old SGI version, the TR1 "hash" does not special-case char*. So, +// we go ahead and provide our own implementation. +template <> +struct hash { + inline size_t operator()(const char* str) const { + size_t result = 0; + for (; *str != '\0'; str++) { + result = 5 * result + static_cast(*str); + } + return result; + } +}; + +template<> +struct hash { + size_t operator()(bool x) const { + return static_cast(x); + } +}; + +template , + typename EqualKey = std::equal_to, + typename Alloc = std::allocator< std::pair > > +class hash_map + : public GOOGLE_PROTOBUF_HASH_NAMESPACE::GOOGLE_PROTOBUF_HASH_MAP_CLASS< + Key, Data, HashFcn, EqualKey, Alloc> { + typedef GOOGLE_PROTOBUF_HASH_NAMESPACE::GOOGLE_PROTOBUF_HASH_MAP_CLASS< + Key, Data, HashFcn, EqualKey, Alloc> BaseClass; + + public: + hash_map(int a = 0, const HashFcn& b = HashFcn(), + const EqualKey& c = EqualKey(), + const Alloc& d = Alloc()) : BaseClass(a, b, c, d) {} + + HashFcn hash_function() const { return HashFcn(); } +}; + +template , + typename EqualKey = std::equal_to > +class hash_set + : public GOOGLE_PROTOBUF_HASH_NAMESPACE::GOOGLE_PROTOBUF_HASH_SET_CLASS< + Key, HashFcn, EqualKey> { + public: + hash_set(int = 0) {} + + HashFcn hash_function() const { return HashFcn(); } +}; + +#endif // !GOOGLE_PROTOBUF_MISSING_HASH + +template <> +struct hash { + inline size_t operator()(const string& key) const { + return hash()(key.c_str()); + } + + static const size_t bucket_size = 4; + static const size_t min_buckets = 8; + inline bool operator()(const string& a, const string& b) const { + return a < b; + } +}; + +template +struct hash > { + inline size_t operator()(const std::pair& key) const { + size_t first_hash = hash()(key.first); + size_t second_hash = hash()(key.second); + + // FIXME(kenton): What is the best way to compute this hash? I have + // no idea! This seems a bit better than an XOR. + return first_hash * ((1 << 16) - 1) + second_hash; + } + + static const size_t bucket_size = 4; + static const size_t min_buckets = 8; + inline bool operator()(const std::pair& a, + const std::pair& b) const { + return a < b; + } +}; + +// Used by GCC/SGI STL only. (Why isn't this provided by the standard +// library? :( ) +struct streq { + inline bool operator()(const char* a, const char* b) const { + return strcmp(a, b) == 0; + } +}; + +} // namespace protobuf +} // namespace google + +#endif // GOOGLE_PROTOBUF_STUBS_HASH_H__ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/int128.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/int128.h new file mode 100644 index 0000000000000000000000000000000000000000..1499bb76d5e0ba7959243ef462059f50f98831db --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/int128.h @@ -0,0 +1,383 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +#ifndef GOOGLE_PROTOBUF_STUBS_INT128_H_ +#define GOOGLE_PROTOBUF_STUBS_INT128_H_ + +#include + +#include + +namespace google { +namespace protobuf { + +struct uint128_pod; + +// TODO(xiaofeng): Define GOOGLE_PROTOBUF_HAS_CONSTEXPR when constexpr is +// available. +#ifdef GOOGLE_PROTOBUF_HAS_CONSTEXPR +# define UINT128_CONSTEXPR constexpr +#else +# define UINT128_CONSTEXPR +#endif + +// An unsigned 128-bit integer type. Thread-compatible. +class LIBPROTOBUF_EXPORT uint128 { + public: + UINT128_CONSTEXPR uint128(); // Sets to 0, but don't trust on this behavior. + UINT128_CONSTEXPR uint128(uint64 top, uint64 bottom); +#ifndef SWIG + UINT128_CONSTEXPR uint128(int bottom); + UINT128_CONSTEXPR uint128(uint32 bottom); // Top 96 bits = 0 +#endif + UINT128_CONSTEXPR uint128(uint64 bottom); // hi_ = 0 + UINT128_CONSTEXPR uint128(const uint128_pod &val); + + // Trivial copy constructor, assignment operator and destructor. + + void Initialize(uint64 top, uint64 bottom); + + // Arithmetic operators. + uint128& operator+=(const uint128& b); + uint128& operator-=(const uint128& b); + uint128& operator*=(const uint128& b); + // Long division/modulo for uint128. + uint128& operator/=(const uint128& b); + uint128& operator%=(const uint128& b); + uint128 operator++(int); + uint128 operator--(int); + uint128& operator<<=(int); + uint128& operator>>=(int); + uint128& operator&=(const uint128& b); + uint128& operator|=(const uint128& b); + uint128& operator^=(const uint128& b); + uint128& operator++(); + uint128& operator--(); + + friend uint64 Uint128Low64(const uint128& v); + friend uint64 Uint128High64(const uint128& v); + + // We add "std::" to avoid including all of port.h. + LIBPROTOBUF_EXPORT friend std::ostream& operator<<(std::ostream& o, + const uint128& b); + + private: + static void DivModImpl(uint128 dividend, uint128 divisor, + uint128* quotient_ret, uint128* remainder_ret); + + // Little-endian memory order optimizations can benefit from + // having lo_ first, hi_ last. + // See util/endian/endian.h and Load128/Store128 for storing a uint128. + uint64 lo_; + uint64 hi_; + + // Not implemented, just declared for catching automatic type conversions. + uint128(uint8); + uint128(uint16); + uint128(float v); + uint128(double v); +}; + +// This is a POD form of uint128 which can be used for static variables which +// need to be operated on as uint128. +struct uint128_pod { + // Note: The ordering of fields is different than 'class uint128' but the + // same as its 2-arg constructor. This enables more obvious initialization + // of static instances, which is the primary reason for this struct in the + // first place. This does not seem to defeat any optimizations wrt + // operations involving this struct. + uint64 hi; + uint64 lo; +}; + +LIBPROTOBUF_EXPORT extern const uint128_pod kuint128max; + +// allow uint128 to be logged +LIBPROTOBUF_EXPORT extern std::ostream& operator<<(std::ostream& o, + const uint128& b); + +// Methods to access low and high pieces of 128-bit value. +// Defined externally from uint128 to facilitate conversion +// to native 128-bit types when compilers support them. +inline uint64 Uint128Low64(const uint128& v) { return v.lo_; } +inline uint64 Uint128High64(const uint128& v) { return v.hi_; } + +// TODO: perhaps it would be nice to have int128, a signed 128-bit type? + +// -------------------------------------------------------------------------- +// Implementation details follow +// -------------------------------------------------------------------------- +inline bool operator==(const uint128& lhs, const uint128& rhs) { + return (Uint128Low64(lhs) == Uint128Low64(rhs) && + Uint128High64(lhs) == Uint128High64(rhs)); +} +inline bool operator!=(const uint128& lhs, const uint128& rhs) { + return !(lhs == rhs); +} + +inline UINT128_CONSTEXPR uint128::uint128() : lo_(0), hi_(0) {} +inline UINT128_CONSTEXPR uint128::uint128(uint64 top, uint64 bottom) + : lo_(bottom), hi_(top) {} +inline UINT128_CONSTEXPR uint128::uint128(const uint128_pod& v) + : lo_(v.lo), hi_(v.hi) {} +inline UINT128_CONSTEXPR uint128::uint128(uint64 bottom) + : lo_(bottom), hi_(0) {} +#ifndef SWIG +inline UINT128_CONSTEXPR uint128::uint128(uint32 bottom) + : lo_(bottom), hi_(0) {} +inline UINT128_CONSTEXPR uint128::uint128(int bottom) + : lo_(bottom), hi_(static_cast((bottom < 0) ? -1 : 0)) {} +#endif + +#undef UINT128_CONSTEXPR + +inline void uint128::Initialize(uint64 top, uint64 bottom) { + hi_ = top; + lo_ = bottom; +} + +// Comparison operators. + +#define CMP128(op) \ +inline bool operator op(const uint128& lhs, const uint128& rhs) { \ + return (Uint128High64(lhs) == Uint128High64(rhs)) ? \ + (Uint128Low64(lhs) op Uint128Low64(rhs)) : \ + (Uint128High64(lhs) op Uint128High64(rhs)); \ +} + +CMP128(<) +CMP128(>) +CMP128(>=) +CMP128(<=) + +#undef CMP128 + +// Unary operators + +inline uint128 operator-(const uint128& val) { + const uint64 hi_flip = ~Uint128High64(val); + const uint64 lo_flip = ~Uint128Low64(val); + const uint64 lo_add = lo_flip + 1; + if (lo_add < lo_flip) { + return uint128(hi_flip + 1, lo_add); + } + return uint128(hi_flip, lo_add); +} + +inline bool operator!(const uint128& val) { + return !Uint128High64(val) && !Uint128Low64(val); +} + +// Logical operators. + +inline uint128 operator~(const uint128& val) { + return uint128(~Uint128High64(val), ~Uint128Low64(val)); +} + +#define LOGIC128(op) \ +inline uint128 operator op(const uint128& lhs, const uint128& rhs) { \ + return uint128(Uint128High64(lhs) op Uint128High64(rhs), \ + Uint128Low64(lhs) op Uint128Low64(rhs)); \ +} + +LOGIC128(|) +LOGIC128(&) +LOGIC128(^) + +#undef LOGIC128 + +#define LOGICASSIGN128(op) \ +inline uint128& uint128::operator op(const uint128& other) { \ + hi_ op other.hi_; \ + lo_ op other.lo_; \ + return *this; \ +} + +LOGICASSIGN128(|=) +LOGICASSIGN128(&=) +LOGICASSIGN128(^=) + +#undef LOGICASSIGN128 + +// Shift operators. + +inline uint128 operator<<(const uint128& val, int amount) { + // uint64 shifts of >= 64 are undefined, so we will need some special-casing. + if (amount < 64) { + if (amount == 0) { + return val; + } + uint64 new_hi = (Uint128High64(val) << amount) | + (Uint128Low64(val) >> (64 - amount)); + uint64 new_lo = Uint128Low64(val) << amount; + return uint128(new_hi, new_lo); + } else if (amount < 128) { + return uint128(Uint128Low64(val) << (amount - 64), 0); + } else { + return uint128(0, 0); + } +} + +inline uint128 operator>>(const uint128& val, int amount) { + // uint64 shifts of >= 64 are undefined, so we will need some special-casing. + if (amount < 64) { + if (amount == 0) { + return val; + } + uint64 new_hi = Uint128High64(val) >> amount; + uint64 new_lo = (Uint128Low64(val) >> amount) | + (Uint128High64(val) << (64 - amount)); + return uint128(new_hi, new_lo); + } else if (amount < 128) { + return uint128(0, Uint128High64(val) >> (amount - 64)); + } else { + return uint128(0, 0); + } +} + +inline uint128& uint128::operator<<=(int amount) { + // uint64 shifts of >= 64 are undefined, so we will need some special-casing. + if (amount < 64) { + if (amount != 0) { + hi_ = (hi_ << amount) | (lo_ >> (64 - amount)); + lo_ = lo_ << amount; + } + } else if (amount < 128) { + hi_ = lo_ << (amount - 64); + lo_ = 0; + } else { + hi_ = 0; + lo_ = 0; + } + return *this; +} + +inline uint128& uint128::operator>>=(int amount) { + // uint64 shifts of >= 64 are undefined, so we will need some special-casing. + if (amount < 64) { + if (amount != 0) { + lo_ = (lo_ >> amount) | (hi_ << (64 - amount)); + hi_ = hi_ >> amount; + } + } else if (amount < 128) { + lo_ = hi_ >> (amount - 64); + hi_ = 0; + } else { + lo_ = 0; + hi_ = 0; + } + return *this; +} + +inline uint128 operator+(const uint128& lhs, const uint128& rhs) { + return uint128(lhs) += rhs; +} + +inline uint128 operator-(const uint128& lhs, const uint128& rhs) { + return uint128(lhs) -= rhs; +} + +inline uint128 operator*(const uint128& lhs, const uint128& rhs) { + return uint128(lhs) *= rhs; +} + +inline uint128 operator/(const uint128& lhs, const uint128& rhs) { + return uint128(lhs) /= rhs; +} + +inline uint128 operator%(const uint128& lhs, const uint128& rhs) { + return uint128(lhs) %= rhs; +} + +inline uint128& uint128::operator+=(const uint128& b) { + hi_ += b.hi_; + uint64 lolo = lo_ + b.lo_; + if (lolo < lo_) + ++hi_; + lo_ = lolo; + return *this; +} + +inline uint128& uint128::operator-=(const uint128& b) { + hi_ -= b.hi_; + if (b.lo_ > lo_) + --hi_; + lo_ -= b.lo_; + return *this; +} + +inline uint128& uint128::operator*=(const uint128& b) { + uint64 a96 = hi_ >> 32; + uint64 a64 = hi_ & 0xffffffffu; + uint64 a32 = lo_ >> 32; + uint64 a00 = lo_ & 0xffffffffu; + uint64 b96 = b.hi_ >> 32; + uint64 b64 = b.hi_ & 0xffffffffu; + uint64 b32 = b.lo_ >> 32; + uint64 b00 = b.lo_ & 0xffffffffu; + // multiply [a96 .. a00] x [b96 .. b00] + // terms higher than c96 disappear off the high side + // terms c96 and c64 are safe to ignore carry bit + uint64 c96 = a96 * b00 + a64 * b32 + a32 * b64 + a00 * b96; + uint64 c64 = a64 * b00 + a32 * b32 + a00 * b64; + this->hi_ = (c96 << 32) + c64; + this->lo_ = 0; + // add terms after this one at a time to capture carry + *this += uint128(a32 * b00) << 32; + *this += uint128(a00 * b32) << 32; + *this += a00 * b00; + return *this; +} + +inline uint128 uint128::operator++(int) { + uint128 tmp(*this); + *this += 1; + return tmp; +} + +inline uint128 uint128::operator--(int) { + uint128 tmp(*this); + *this -= 1; + return tmp; +} + +inline uint128& uint128::operator++() { + *this += 1; + return *this; +} + +inline uint128& uint128::operator--() { + *this -= 1; + return *this; +} + +} // namespace protobuf +} // namespace google + +#endif // GOOGLE_PROTOBUF_STUBS_INT128_H_ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/io_win32.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/io_win32.h new file mode 100644 index 0000000000000000000000000000000000000000..9e17d25304d2d65af03c0f6f2278a3e96685c397 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/io_win32.h @@ -0,0 +1,115 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: laszlocsomor@google.com (Laszlo Csomor) +// +// This file contains the declarations for Windows implementations of +// commonly used POSIX functions such as open(2) and access(2), as well +// as macro definitions for flags of these functions. +// +// By including this file you'll redefine open/access/etc. to +// ::google::protobuf::internal::win32::{open/access/etc.}. +// Make sure you don't include a header that attempts to redeclare or +// redefine these functions, that'll lead to confusing compilation +// errors. It's best to #include this file as the last one to ensure that. +// +// This file is only used on Windows, it's empty on other platforms. + +#ifndef GOOGLE_PROTOBUF_STUBS_IO_WIN32_H__ +#define GOOGLE_PROTOBUF_STUBS_IO_WIN32_H__ + +#if defined(_WIN32) + +#include +#include + +// Compilers on Windows other than MSVC (e.g. Cygwin, MinGW32) define the +// following functions already, except for mkdir. +namespace google { +namespace protobuf { +namespace internal { +namespace win32 { + +LIBPROTOBUF_EXPORT FILE* fopen(const char* path, const char* mode); +LIBPROTOBUF_EXPORT int access(const char* path, int mode); +LIBPROTOBUF_EXPORT int chdir(const char* path); +LIBPROTOBUF_EXPORT int close(int fd); +LIBPROTOBUF_EXPORT int dup(int fd); +LIBPROTOBUF_EXPORT int dup2(int fd1, int fd2); +LIBPROTOBUF_EXPORT int mkdir(const char* path, int _mode); +LIBPROTOBUF_EXPORT int open(const char* path, int flags, int mode = 0); +LIBPROTOBUF_EXPORT int read(int fd, void* buffer, size_t size); +LIBPROTOBUF_EXPORT int setmode(int fd, int mode); +LIBPROTOBUF_EXPORT int stat(const char* path, struct _stat* buffer); +LIBPROTOBUF_EXPORT int write(int fd, const void* buffer, size_t size); +LIBPROTOBUF_EXPORT std::wstring testonly_utf8_to_winpath(const char* path); + +namespace strings { + +// Convert from UTF-16 to Active-Code-Page-encoded or to UTF-8-encoded text. +LIBPROTOBUF_EXPORT bool wcs_to_mbs( + const wchar_t* s, std::string* out, bool outUtf8); + +// Convert from Active-Code-Page-encoded or UTF-8-encoded text to UTF-16. +LIBPROTOBUF_EXPORT bool mbs_to_wcs( + const char* s, std::wstring* out, bool inUtf8); + +// Convert from UTF-8-encoded text to UTF-16. +LIBPROTOBUF_EXPORT bool utf8_to_wcs(const char* input, std::wstring* out); + +// Convert from UTF-16-encoded text to UTF-8. +LIBPROTOBUF_EXPORT bool wcs_to_utf8(const wchar_t* input, std::string* out); + +} // namespace strings + +} // namespace win32 +} // namespace internal +} // namespace protobuf +} // namespace google + +#ifndef W_OK +#define W_OK 02 // not defined by MSVC for whatever reason +#endif + +#ifndef F_OK +#define F_OK 00 // not defined by MSVC for whatever reason +#endif + +#ifndef STDIN_FILENO +#define STDIN_FILENO 0 +#endif + +#ifndef STDOUT_FILENO +#define STDOUT_FILENO 1 +#endif + +#endif // defined(_WIN32) + +#endif // GOOGLE_PROTOBUF_STUBS_IO_WIN32_H__ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/logging.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/logging.h new file mode 100644 index 0000000000000000000000000000000000000000..f69605d9f6393735102e69873395f30188a1b2fd --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/logging.h @@ -0,0 +1,237 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef GOOGLE_PROTOBUF_STUBS_LOGGING_H_ +#define GOOGLE_PROTOBUF_STUBS_LOGGING_H_ + +#include +#include + +// =================================================================== +// emulates google3/base/logging.h + +namespace google { +namespace protobuf { + +enum LogLevel { + LOGLEVEL_INFO, // Informational. This is never actually used by + // libprotobuf. + LOGLEVEL_WARNING, // Warns about issues that, although not technically a + // problem now, could cause problems in the future. For + // example, a // warning will be printed when parsing a + // message that is near the message size limit. + LOGLEVEL_ERROR, // An error occurred which should never happen during + // normal use. + LOGLEVEL_FATAL, // An error occurred from which the library cannot + // recover. This usually indicates a programming error + // in the code which calls the library, especially when + // compiled in debug mode. + +#ifdef NDEBUG + LOGLEVEL_DFATAL = LOGLEVEL_ERROR +#else + LOGLEVEL_DFATAL = LOGLEVEL_FATAL +#endif +}; + +class StringPiece; +namespace util { +class Status; +} +class uint128; +namespace internal { + +class LogFinisher; + +class LIBPROTOBUF_EXPORT LogMessage { + public: + LogMessage(LogLevel level, const char* filename, int line); + ~LogMessage(); + + LogMessage& operator<<(const std::string& value); + LogMessage& operator<<(const char* value); + LogMessage& operator<<(char value); + LogMessage& operator<<(int value); + LogMessage& operator<<(uint value); + LogMessage& operator<<(long value); + LogMessage& operator<<(unsigned long value); + LogMessage& operator<<(long long value); + LogMessage& operator<<(unsigned long long value); + LogMessage& operator<<(double value); + LogMessage& operator<<(void* value); + LogMessage& operator<<(const StringPiece& value); + LogMessage& operator<<(const ::google::protobuf::util::Status& status); + LogMessage& operator<<(const uint128& value); + + private: + friend class LogFinisher; + void Finish(); + + LogLevel level_; + const char* filename_; + int line_; + std::string message_; +}; + +// Used to make the entire "LOG(BLAH) << etc." expression have a void return +// type and print a newline after each message. +class LIBPROTOBUF_EXPORT LogFinisher { + public: + void operator=(LogMessage& other); +}; + +template +bool IsOk(T status) { return status.ok(); } +template<> +inline bool IsOk(bool status) { return status; } + +} // namespace internal + +// Undef everything in case we're being mixed with some other Google library +// which already defined them itself. Presumably all Google libraries will +// support the same syntax for these so it should not be a big deal if they +// end up using our definitions instead. +#undef GOOGLE_LOG +#undef GOOGLE_LOG_IF + +#undef GOOGLE_CHECK +#undef GOOGLE_CHECK_OK +#undef GOOGLE_CHECK_EQ +#undef GOOGLE_CHECK_NE +#undef GOOGLE_CHECK_LT +#undef GOOGLE_CHECK_LE +#undef GOOGLE_CHECK_GT +#undef GOOGLE_CHECK_GE +#undef GOOGLE_CHECK_NOTNULL + +#undef GOOGLE_DLOG +#undef GOOGLE_DCHECK +#undef GOOGLE_DCHECK_OK +#undef GOOGLE_DCHECK_EQ +#undef GOOGLE_DCHECK_NE +#undef GOOGLE_DCHECK_LT +#undef GOOGLE_DCHECK_LE +#undef GOOGLE_DCHECK_GT +#undef GOOGLE_DCHECK_GE + +#define GOOGLE_LOG(LEVEL) \ + ::google::protobuf::internal::LogFinisher() = \ + ::google::protobuf::internal::LogMessage( \ + ::google::protobuf::LOGLEVEL_##LEVEL, __FILE__, __LINE__) +#define GOOGLE_LOG_IF(LEVEL, CONDITION) \ + !(CONDITION) ? (void)0 : GOOGLE_LOG(LEVEL) + +#define GOOGLE_CHECK(EXPRESSION) \ + GOOGLE_LOG_IF(FATAL, !(EXPRESSION)) << "CHECK failed: " #EXPRESSION ": " +#define GOOGLE_CHECK_OK(A) GOOGLE_CHECK(::google::protobuf::internal::IsOk(A)) +#define GOOGLE_CHECK_EQ(A, B) GOOGLE_CHECK((A) == (B)) +#define GOOGLE_CHECK_NE(A, B) GOOGLE_CHECK((A) != (B)) +#define GOOGLE_CHECK_LT(A, B) GOOGLE_CHECK((A) < (B)) +#define GOOGLE_CHECK_LE(A, B) GOOGLE_CHECK((A) <= (B)) +#define GOOGLE_CHECK_GT(A, B) GOOGLE_CHECK((A) > (B)) +#define GOOGLE_CHECK_GE(A, B) GOOGLE_CHECK((A) >= (B)) + +namespace internal { +template +T* CheckNotNull(const char* /* file */, int /* line */, + const char* name, T* val) { + if (val == NULL) { + GOOGLE_LOG(FATAL) << name; + } + return val; +} +} // namespace internal +#define GOOGLE_CHECK_NOTNULL(A) \ + ::google::protobuf::internal::CheckNotNull(\ + __FILE__, __LINE__, "'" #A "' must not be NULL", (A)) + +#ifdef NDEBUG + +#define GOOGLE_DLOG(LEVEL) GOOGLE_LOG_IF(LEVEL, false) + +#define GOOGLE_DCHECK(EXPRESSION) while(false) GOOGLE_CHECK(EXPRESSION) +#define GOOGLE_DCHECK_OK(E) GOOGLE_DCHECK(::google::protobuf::internal::IsOk(E)) +#define GOOGLE_DCHECK_EQ(A, B) GOOGLE_DCHECK((A) == (B)) +#define GOOGLE_DCHECK_NE(A, B) GOOGLE_DCHECK((A) != (B)) +#define GOOGLE_DCHECK_LT(A, B) GOOGLE_DCHECK((A) < (B)) +#define GOOGLE_DCHECK_LE(A, B) GOOGLE_DCHECK((A) <= (B)) +#define GOOGLE_DCHECK_GT(A, B) GOOGLE_DCHECK((A) > (B)) +#define GOOGLE_DCHECK_GE(A, B) GOOGLE_DCHECK((A) >= (B)) + +#else // NDEBUG + +#define GOOGLE_DLOG GOOGLE_LOG + +#define GOOGLE_DCHECK GOOGLE_CHECK +#define GOOGLE_DCHECK_OK GOOGLE_CHECK_OK +#define GOOGLE_DCHECK_EQ GOOGLE_CHECK_EQ +#define GOOGLE_DCHECK_NE GOOGLE_CHECK_NE +#define GOOGLE_DCHECK_LT GOOGLE_CHECK_LT +#define GOOGLE_DCHECK_LE GOOGLE_CHECK_LE +#define GOOGLE_DCHECK_GT GOOGLE_CHECK_GT +#define GOOGLE_DCHECK_GE GOOGLE_CHECK_GE + +#endif // !NDEBUG + +typedef void LogHandler(LogLevel level, const char* filename, int line, + const std::string& message); + +// The protobuf library sometimes writes warning and error messages to +// stderr. These messages are primarily useful for developers, but may +// also help end users figure out a problem. If you would prefer that +// these messages be sent somewhere other than stderr, call SetLogHandler() +// to set your own handler. This returns the old handler. Set the handler +// to NULL to ignore log messages (but see also LogSilencer, below). +// +// Obviously, SetLogHandler is not thread-safe. You should only call it +// at initialization time, and probably not from library code. If you +// simply want to suppress log messages temporarily (e.g. because you +// have some code that tends to trigger them frequently and you know +// the warnings are not important to you), use the LogSilencer class +// below. +LIBPROTOBUF_EXPORT LogHandler* SetLogHandler(LogHandler* new_func); + +// Create a LogSilencer if you want to temporarily suppress all log +// messages. As long as any LogSilencer objects exist, non-fatal +// log messages will be discarded (the current LogHandler will *not* +// be called). Constructing a LogSilencer is thread-safe. You may +// accidentally suppress log messages occurring in another thread, but +// since messages are generally for debugging purposes only, this isn't +// a big deal. If you want to intercept log messages, use SetLogHandler(). +class LIBPROTOBUF_EXPORT LogSilencer { + public: + LogSilencer(); + ~LogSilencer(); +}; + +} // namespace protobuf +} // namespace google + +#endif // GOOGLE_PROTOBUF_STUBS_LOGGING_H_ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/macros.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/macros.h new file mode 100644 index 0000000000000000000000000000000000000000..0e9a9ec198208fd253a48cf01dd87e3fb5f9fe06 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/macros.h @@ -0,0 +1,168 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef GOOGLE_PROTOBUF_MACROS_H__ +#define GOOGLE_PROTOBUF_MACROS_H__ + +#include + +namespace google { +namespace protobuf { + +#undef GOOGLE_DISALLOW_EVIL_CONSTRUCTORS +#define GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(TypeName) \ + TypeName(const TypeName&); \ + void operator=(const TypeName&) + +#undef GOOGLE_DISALLOW_IMPLICIT_CONSTRUCTORS +#define GOOGLE_DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \ + TypeName(); \ + TypeName(const TypeName&); \ + void operator=(const TypeName&) + +// =================================================================== +// from google3/base/basictypes.h + +// The GOOGLE_ARRAYSIZE(arr) macro returns the # of elements in an array arr. +// The expression is a compile-time constant, and therefore can be +// used in defining new arrays, for example. +// +// GOOGLE_ARRAYSIZE catches a few type errors. If you see a compiler error +// +// "warning: division by zero in ..." +// +// when using GOOGLE_ARRAYSIZE, you are (wrongfully) giving it a pointer. +// You should only use GOOGLE_ARRAYSIZE on statically allocated arrays. +// +// The following comments are on the implementation details, and can +// be ignored by the users. +// +// ARRAYSIZE(arr) works by inspecting sizeof(arr) (the # of bytes in +// the array) and sizeof(*(arr)) (the # of bytes in one array +// element). If the former is divisible by the latter, perhaps arr is +// indeed an array, in which case the division result is the # of +// elements in the array. Otherwise, arr cannot possibly be an array, +// and we generate a compiler error to prevent the code from +// compiling. +// +// Since the size of bool is implementation-defined, we need to cast +// !(sizeof(a) & sizeof(*(a))) to size_t in order to ensure the final +// result has type size_t. +// +// This macro is not perfect as it wrongfully accepts certain +// pointers, namely where the pointer size is divisible by the pointee +// size. Since all our code has to go through a 32-bit compiler, +// where a pointer is 4 bytes, this means all pointers to a type whose +// size is 3 or greater than 4 will be (righteously) rejected. +// +// Kudos to Jorg Brown for this simple and elegant implementation. + +#undef GOOGLE_ARRAYSIZE +#define GOOGLE_ARRAYSIZE(a) \ + ((sizeof(a) / sizeof(*(a))) / \ + static_cast(!(sizeof(a) % sizeof(*(a))))) + +// The COMPILE_ASSERT macro can be used to verify that a compile time +// expression is true. For example, you could use it to verify the +// size of a static array: +// +// COMPILE_ASSERT(ARRAYSIZE(content_type_names) == CONTENT_NUM_TYPES, +// content_type_names_incorrect_size); +// +// or to make sure a struct is smaller than a certain size: +// +// COMPILE_ASSERT(sizeof(foo) < 128, foo_too_large); +// +// The second argument to the macro is the name of the variable. If +// the expression is false, most compilers will issue a warning/error +// containing the name of the variable. + +namespace internal { + +template +struct CompileAssert { +}; + +} // namespace internal + +#undef GOOGLE_COMPILE_ASSERT +#if __cplusplus >= 201103L +#define GOOGLE_COMPILE_ASSERT(expr, msg) static_assert(expr, #msg) +#else +#define GOOGLE_COMPILE_ASSERT(expr, msg) \ + ::google::protobuf::internal::CompileAssert<(bool(expr))> \ + msg[bool(expr) ? 1 : -1]; \ + (void)msg +// Implementation details of COMPILE_ASSERT: +// +// - COMPILE_ASSERT works by defining an array type that has -1 +// elements (and thus is invalid) when the expression is false. +// +// - The simpler definition +// +// #define COMPILE_ASSERT(expr, msg) typedef char msg[(expr) ? 1 : -1] +// +// does not work, as gcc supports variable-length arrays whose sizes +// are determined at run-time (this is gcc's extension and not part +// of the C++ standard). As a result, gcc fails to reject the +// following code with the simple definition: +// +// int foo; +// COMPILE_ASSERT(foo, msg); // not supposed to compile as foo is +// // not a compile-time constant. +// +// - By using the type CompileAssert<(bool(expr))>, we ensures that +// expr is a compile-time constant. (Template arguments must be +// determined at compile-time.) +// +// - The outter parentheses in CompileAssert<(bool(expr))> are necessary +// to work around a bug in gcc 3.4.4 and 4.0.1. If we had written +// +// CompileAssert +// +// instead, these compilers will refuse to compile +// +// COMPILE_ASSERT(5 > 0, some_message); +// +// (They seem to think the ">" in "5 > 0" marks the end of the +// template argument list.) +// +// - The array size is (bool(expr) ? 1 : -1), instead of simply +// +// ((expr) ? 1 : -1). +// +// This is to avoid running into a bug in MS VC 7.1, which +// causes ((0.0) ? 1 : -1) to incorrectly evaluate to 1. +#endif // __cplusplus >= 201103L + +} // namespace protobuf +} // namespace google + +#endif // GOOGLE_PROTOBUF_MACROS_H__ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/map_util.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/map_util.h new file mode 100644 index 0000000000000000000000000000000000000000..3e6d381f4a480955c1e577ea52bf280af6775198 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/map_util.h @@ -0,0 +1,771 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2014 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// from google3/util/gtl/map_util.h +// Author: Anton Carver + +#ifndef GOOGLE_PROTOBUF_STUBS_MAP_UTIL_H__ +#define GOOGLE_PROTOBUF_STUBS_MAP_UTIL_H__ + +#include +#include +#include +#include +#include + +#include + +namespace google { +namespace protobuf { +namespace internal { +// Local implementation of RemoveConst to avoid including base/type_traits.h. +template struct RemoveConst { typedef T type; }; +template struct RemoveConst : RemoveConst {}; +} // namespace internal + +// +// Find*() +// + +// Returns a const reference to the value associated with the given key if it +// exists. Crashes otherwise. +// +// This is intended as a replacement for operator[] as an rvalue (for reading) +// when the key is guaranteed to exist. +// +// operator[] for lookup is discouraged for several reasons: +// * It has a side-effect of inserting missing keys +// * It is not thread-safe (even when it is not inserting, it can still +// choose to resize the underlying storage) +// * It invalidates iterators (when it chooses to resize) +// * It default constructs a value object even if it doesn't need to +// +// This version assumes the key is printable, and includes it in the fatal log +// message. +template +const typename Collection::value_type::second_type& +FindOrDie(const Collection& collection, + const typename Collection::value_type::first_type& key) { + typename Collection::const_iterator it = collection.find(key); + GOOGLE_CHECK(it != collection.end()) << "Map key not found: " << key; + return it->second; +} + +// Same as above, but returns a non-const reference. +template +typename Collection::value_type::second_type& +FindOrDie(Collection& collection, // NOLINT + const typename Collection::value_type::first_type& key) { + typename Collection::iterator it = collection.find(key); + GOOGLE_CHECK(it != collection.end()) << "Map key not found: " << key; + return it->second; +} + +// Same as FindOrDie above, but doesn't log the key on failure. +template +const typename Collection::value_type::second_type& +FindOrDieNoPrint(const Collection& collection, + const typename Collection::value_type::first_type& key) { + typename Collection::const_iterator it = collection.find(key); + GOOGLE_CHECK(it != collection.end()) << "Map key not found"; + return it->second; +} + +// Same as above, but returns a non-const reference. +template +typename Collection::value_type::second_type& +FindOrDieNoPrint(Collection& collection, // NOLINT + const typename Collection::value_type::first_type& key) { + typename Collection::iterator it = collection.find(key); + GOOGLE_CHECK(it != collection.end()) << "Map key not found"; + return it->second; +} + +// Returns a const reference to the value associated with the given key if it +// exists, otherwise returns a const reference to the provided default value. +// +// WARNING: If a temporary object is passed as the default "value," +// this function will return a reference to that temporary object, +// which will be destroyed at the end of the statement. A common +// example: if you have a map with string values, and you pass a char* +// as the default "value," either use the returned value immediately +// or store it in a string (not string&). +// Details: http://go/findwithdefault +template +const typename Collection::value_type::second_type& +FindWithDefault(const Collection& collection, + const typename Collection::value_type::first_type& key, + const typename Collection::value_type::second_type& value) { + typename Collection::const_iterator it = collection.find(key); + if (it == collection.end()) { + return value; + } + return it->second; +} + +// Returns a pointer to the const value associated with the given key if it +// exists, or NULL otherwise. +template +const typename Collection::value_type::second_type* +FindOrNull(const Collection& collection, + const typename Collection::value_type::first_type& key) { + typename Collection::const_iterator it = collection.find(key); + if (it == collection.end()) { + return 0; + } + return &it->second; +} + +// Same as above but returns a pointer to the non-const value. +template +typename Collection::value_type::second_type* +FindOrNull(Collection& collection, // NOLINT + const typename Collection::value_type::first_type& key) { + typename Collection::iterator it = collection.find(key); + if (it == collection.end()) { + return 0; + } + return &it->second; +} + +// Returns the pointer value associated with the given key. If none is found, +// NULL is returned. The function is designed to be used with a map of keys to +// pointers. +// +// This function does not distinguish between a missing key and a key mapped +// to a NULL value. +template +typename Collection::value_type::second_type +FindPtrOrNull(const Collection& collection, + const typename Collection::value_type::first_type& key) { + typename Collection::const_iterator it = collection.find(key); + if (it == collection.end()) { + return typename Collection::value_type::second_type(); + } + return it->second; +} + +// Same as above, except takes non-const reference to collection. +// +// This function is needed for containers that propagate constness to the +// pointee, such as boost::ptr_map. +template +typename Collection::value_type::second_type +FindPtrOrNull(Collection& collection, // NOLINT + const typename Collection::value_type::first_type& key) { + typename Collection::iterator it = collection.find(key); + if (it == collection.end()) { + return typename Collection::value_type::second_type(); + } + return it->second; +} + +// Finds the pointer value associated with the given key in a map whose values +// are linked_ptrs. Returns NULL if key is not found. +template +typename Collection::value_type::second_type::element_type* +FindLinkedPtrOrNull(const Collection& collection, + const typename Collection::value_type::first_type& key) { + typename Collection::const_iterator it = collection.find(key); + if (it == collection.end()) { + return 0; + } + // Since linked_ptr::get() is a const member returning a non const, + // we do not need a version of this function taking a non const collection. + return it->second.get(); +} + +// Same as above, but dies if the key is not found. +template +typename Collection::value_type::second_type::element_type& +FindLinkedPtrOrDie(const Collection& collection, + const typename Collection::value_type::first_type& key) { + typename Collection::const_iterator it = collection.find(key); + GOOGLE_CHECK(it != collection.end()) << "key not found: " << key; + // Since linked_ptr::operator*() is a const member returning a non const, + // we do not need a version of this function taking a non const collection. + return *it->second; +} + +// Finds the value associated with the given key and copies it to *value (if not +// NULL). Returns false if the key was not found, true otherwise. +template +bool FindCopy(const Collection& collection, + const Key& key, + Value* const value) { + typename Collection::const_iterator it = collection.find(key); + if (it == collection.end()) { + return false; + } + if (value) { + *value = it->second; + } + return true; +} + +// +// Contains*() +// + +// Returns true if and only if the given collection contains the given key. +template +bool ContainsKey(const Collection& collection, const Key& key) { + return collection.find(key) != collection.end(); +} + +// Returns true if and only if the given collection contains the given key-value +// pair. +template +bool ContainsKeyValuePair(const Collection& collection, + const Key& key, + const Value& value) { + typedef typename Collection::const_iterator const_iterator; + std::pair range = collection.equal_range(key); + for (const_iterator it = range.first; it != range.second; ++it) { + if (it->second == value) { + return true; + } + } + return false; +} + +// +// Insert*() +// + +// Inserts the given key-value pair into the collection. Returns true if and +// only if the key from the given pair didn't previously exist. Otherwise, the +// value in the map is replaced with the value from the given pair. +template +bool InsertOrUpdate(Collection* const collection, + const typename Collection::value_type& vt) { + std::pair ret = collection->insert(vt); + if (!ret.second) { + // update + ret.first->second = vt.second; + return false; + } + return true; +} + +// Same as above, except that the key and value are passed separately. +template +bool InsertOrUpdate(Collection* const collection, + const typename Collection::value_type::first_type& key, + const typename Collection::value_type::second_type& value) { + return InsertOrUpdate( + collection, typename Collection::value_type(key, value)); +} + +// Inserts/updates all the key-value pairs from the range defined by the +// iterators "first" and "last" into the given collection. +template +void InsertOrUpdateMany(Collection* const collection, + InputIterator first, InputIterator last) { + for (; first != last; ++first) { + InsertOrUpdate(collection, *first); + } +} + +// Change the value associated with a particular key in a map or hash_map +// of the form map which owns the objects pointed to by the +// value pointers. If there was an existing value for the key, it is deleted. +// True indicates an insert took place, false indicates an update + delete. +template +bool InsertAndDeleteExisting( + Collection* const collection, + const typename Collection::value_type::first_type& key, + const typename Collection::value_type::second_type& value) { + std::pair ret = + collection->insert(typename Collection::value_type(key, value)); + if (!ret.second) { + delete ret.first->second; + ret.first->second = value; + return false; + } + return true; +} + +// Inserts the given key and value into the given collection if and only if the +// given key did NOT already exist in the collection. If the key previously +// existed in the collection, the value is not changed. Returns true if the +// key-value pair was inserted; returns false if the key was already present. +template +bool InsertIfNotPresent(Collection* const collection, + const typename Collection::value_type& vt) { + return collection->insert(vt).second; +} + +// Same as above except the key and value are passed separately. +template +bool InsertIfNotPresent( + Collection* const collection, + const typename Collection::value_type::first_type& key, + const typename Collection::value_type::second_type& value) { + return InsertIfNotPresent( + collection, typename Collection::value_type(key, value)); +} + +// Same as above except dies if the key already exists in the collection. +template +void InsertOrDie(Collection* const collection, + const typename Collection::value_type& value) { + GOOGLE_CHECK(InsertIfNotPresent(collection, value)) + << "duplicate value: " << value; +} + +// Same as above except doesn't log the value on error. +template +void InsertOrDieNoPrint(Collection* const collection, + const typename Collection::value_type& value) { + GOOGLE_CHECK(InsertIfNotPresent(collection, value)) << "duplicate value."; +} + +// Inserts the key-value pair into the collection. Dies if key was already +// present. +template +void InsertOrDie(Collection* const collection, + const typename Collection::value_type::first_type& key, + const typename Collection::value_type::second_type& data) { + GOOGLE_CHECK(InsertIfNotPresent(collection, key, data)) + << "duplicate key: " << key; +} + +// Same as above except doesn't log the key on error. +template +void InsertOrDieNoPrint( + Collection* const collection, + const typename Collection::value_type::first_type& key, + const typename Collection::value_type::second_type& data) { + GOOGLE_CHECK(InsertIfNotPresent(collection, key, data)) << "duplicate key."; +} + +// Inserts a new key and default-initialized value. Dies if the key was already +// present. Returns a reference to the value. Example usage: +// +// map m; +// SomeProto& proto = InsertKeyOrDie(&m, 3); +// proto.set_field("foo"); +template +typename Collection::value_type::second_type& InsertKeyOrDie( + Collection* const collection, + const typename Collection::value_type::first_type& key) { + typedef typename Collection::value_type value_type; + std::pair res = + collection->insert(value_type(key, typename value_type::second_type())); + GOOGLE_CHECK(res.second) << "duplicate key: " << key; + return res.first->second; +} + +// +// Lookup*() +// + +// Looks up a given key and value pair in a collection and inserts the key-value +// pair if it's not already present. Returns a reference to the value associated +// with the key. +template +typename Collection::value_type::second_type& +LookupOrInsert(Collection* const collection, + const typename Collection::value_type& vt) { + return collection->insert(vt).first->second; +} + +// Same as above except the key-value are passed separately. +template +typename Collection::value_type::second_type& +LookupOrInsert(Collection* const collection, + const typename Collection::value_type::first_type& key, + const typename Collection::value_type::second_type& value) { + return LookupOrInsert( + collection, typename Collection::value_type(key, value)); +} + +// Counts the number of equivalent elements in the given "sequence", and stores +// the results in "count_map" with element as the key and count as the value. +// +// Example: +// vector v = {"a", "b", "c", "a", "b"}; +// map m; +// AddTokenCounts(v, 1, &m); +// assert(m["a"] == 2); +// assert(m["b"] == 2); +// assert(m["c"] == 1); +template +void AddTokenCounts( + const Sequence& sequence, + const typename Collection::value_type::second_type& increment, + Collection* const count_map) { + for (typename Sequence::const_iterator it = sequence.begin(); + it != sequence.end(); ++it) { + typename Collection::value_type::second_type& value = + LookupOrInsert(count_map, *it, + typename Collection::value_type::second_type()); + value += increment; + } +} + +// Returns a reference to the value associated with key. If not found, a value +// is default constructed on the heap and added to the map. +// +// This function is useful for containers of the form map, where +// inserting a new key, value pair involves constructing a new heap-allocated +// Value, and storing a pointer to that in the collection. +template +typename Collection::value_type::second_type& +LookupOrInsertNew(Collection* const collection, + const typename Collection::value_type::first_type& key) { + typedef typename std::iterator_traits< + typename Collection::value_type::second_type>::value_type Element; + std::pair ret = + collection->insert(typename Collection::value_type( + key, + static_cast(NULL))); + if (ret.second) { + ret.first->second = new Element(); + } + return ret.first->second; +} + +// Same as above but constructs the value using the single-argument constructor +// and the given "arg". +template +typename Collection::value_type::second_type& +LookupOrInsertNew(Collection* const collection, + const typename Collection::value_type::first_type& key, + const Arg& arg) { + typedef typename std::iterator_traits< + typename Collection::value_type::second_type>::value_type Element; + std::pair ret = + collection->insert(typename Collection::value_type( + key, + static_cast(NULL))); + if (ret.second) { + ret.first->second = new Element(arg); + } + return ret.first->second; +} + +// Lookup of linked/shared pointers is used in two scenarios: +// +// Use LookupOrInsertNewLinkedPtr if the container owns the elements. +// In this case it is fine working with the raw pointer as long as it is +// guaranteed that no other thread can delete/update an accessed element. +// A mutex will need to lock the container operation as well as the use +// of the returned elements. Finding an element may be performed using +// FindLinkedPtr*(). +// +// Use LookupOrInsertNewSharedPtr if the container does not own the elements +// for their whole lifetime. This is typically the case when a reader allows +// parallel updates to the container. In this case a Mutex only needs to lock +// container operations, but all element operations must be performed on the +// shared pointer. Finding an element must be performed using FindPtr*() and +// cannot be done with FindLinkedPtr*() even though it compiles. + +// Lookup a key in a map or hash_map whose values are linked_ptrs. If it is +// missing, set collection[key].reset(new Value::element_type) and return that. +// Value::element_type must be default constructable. +template +typename Collection::value_type::second_type::element_type* +LookupOrInsertNewLinkedPtr( + Collection* const collection, + const typename Collection::value_type::first_type& key) { + typedef typename Collection::value_type::second_type Value; + std::pair ret = + collection->insert(typename Collection::value_type(key, Value())); + if (ret.second) { + ret.first->second.reset(new typename Value::element_type); + } + return ret.first->second.get(); +} + +// A variant of LookupOrInsertNewLinkedPtr where the value is constructed using +// a single-parameter constructor. Note: the constructor argument is computed +// even if it will not be used, so only values cheap to compute should be passed +// here. On the other hand it does not matter how expensive the construction of +// the actual stored value is, as that only occurs if necessary. +template +typename Collection::value_type::second_type::element_type* +LookupOrInsertNewLinkedPtr( + Collection* const collection, + const typename Collection::value_type::first_type& key, + const Arg& arg) { + typedef typename Collection::value_type::second_type Value; + std::pair ret = + collection->insert(typename Collection::value_type(key, Value())); + if (ret.second) { + ret.first->second.reset(new typename Value::element_type(arg)); + } + return ret.first->second.get(); +} + +// Lookup a key in a map or hash_map whose values are shared_ptrs. If it is +// missing, set collection[key].reset(new Value::element_type). Unlike +// LookupOrInsertNewLinkedPtr, this function returns the shared_ptr instead of +// the raw pointer. Value::element_type must be default constructable. +template +typename Collection::value_type::second_type& +LookupOrInsertNewSharedPtr( + Collection* const collection, + const typename Collection::value_type::first_type& key) { + typedef typename Collection::value_type::second_type SharedPtr; + typedef typename Collection::value_type::second_type::element_type Element; + std::pair ret = + collection->insert(typename Collection::value_type(key, SharedPtr())); + if (ret.second) { + ret.first->second.reset(new Element()); + } + return ret.first->second; +} + +// A variant of LookupOrInsertNewSharedPtr where the value is constructed using +// a single-parameter constructor. Note: the constructor argument is computed +// even if it will not be used, so only values cheap to compute should be passed +// here. On the other hand it does not matter how expensive the construction of +// the actual stored value is, as that only occurs if necessary. +template +typename Collection::value_type::second_type& +LookupOrInsertNewSharedPtr( + Collection* const collection, + const typename Collection::value_type::first_type& key, + const Arg& arg) { + typedef typename Collection::value_type::second_type SharedPtr; + typedef typename Collection::value_type::second_type::element_type Element; + std::pair ret = + collection->insert(typename Collection::value_type(key, SharedPtr())); + if (ret.second) { + ret.first->second.reset(new Element(arg)); + } + return ret.first->second; +} + +// +// Misc Utility Functions +// + +// Updates the value associated with the given key. If the key was not already +// present, then the key-value pair are inserted and "previous" is unchanged. If +// the key was already present, the value is updated and "*previous" will +// contain a copy of the old value. +// +// InsertOrReturnExisting has complementary behavior that returns the +// address of an already existing value, rather than updating it. +template +bool UpdateReturnCopy(Collection* const collection, + const typename Collection::value_type::first_type& key, + const typename Collection::value_type::second_type& value, + typename Collection::value_type::second_type* previous) { + std::pair ret = + collection->insert(typename Collection::value_type(key, value)); + if (!ret.second) { + // update + if (previous) { + *previous = ret.first->second; + } + ret.first->second = value; + return true; + } + return false; +} + +// Same as above except that the key and value are passed as a pair. +template +bool UpdateReturnCopy(Collection* const collection, + const typename Collection::value_type& vt, + typename Collection::value_type::second_type* previous) { + std::pair ret = collection->insert(vt); + if (!ret.second) { + // update + if (previous) { + *previous = ret.first->second; + } + ret.first->second = vt.second; + return true; + } + return false; +} + +// Tries to insert the given key-value pair into the collection. Returns NULL if +// the insert succeeds. Otherwise, returns a pointer to the existing value. +// +// This complements UpdateReturnCopy in that it allows to update only after +// verifying the old value and still insert quickly without having to look up +// twice. Unlike UpdateReturnCopy this also does not come with the issue of an +// undefined previous* in case new data was inserted. +template +typename Collection::value_type::second_type* const +InsertOrReturnExisting(Collection* const collection, + const typename Collection::value_type& vt) { + std::pair ret = collection->insert(vt); + if (ret.second) { + return NULL; // Inserted, no existing previous value. + } else { + return &ret.first->second; // Return address of already existing value. + } +} + +// Same as above, except for explicit key and data. +template +typename Collection::value_type::second_type* const +InsertOrReturnExisting( + Collection* const collection, + const typename Collection::value_type::first_type& key, + const typename Collection::value_type::second_type& data) { + return InsertOrReturnExisting(collection, + typename Collection::value_type(key, data)); +} + +// Erases the collection item identified by the given key, and returns the value +// associated with that key. It is assumed that the value (i.e., the +// mapped_type) is a pointer. Returns NULL if the key was not found in the +// collection. +// +// Examples: +// map my_map; +// +// One line cleanup: +// delete EraseKeyReturnValuePtr(&my_map, "abc"); +// +// Use returned value: +// std::unique_ptr value_ptr( +// EraseKeyReturnValuePtr(&my_map, "abc")); +// if (value_ptr.get()) +// value_ptr->DoSomething(); +// +template +typename Collection::value_type::second_type EraseKeyReturnValuePtr( + Collection* const collection, + const typename Collection::value_type::first_type& key) { + typename Collection::iterator it = collection->find(key); + if (it == collection->end()) { + return NULL; + } + typename Collection::value_type::second_type v = it->second; + collection->erase(it); + return v; +} + +// Inserts all the keys from map_container into key_container, which must +// support insert(MapContainer::key_type). +// +// Note: any initial contents of the key_container are not cleared. +template +void InsertKeysFromMap(const MapContainer& map_container, + KeyContainer* key_container) { + GOOGLE_CHECK(key_container != NULL); + for (typename MapContainer::const_iterator it = map_container.begin(); + it != map_container.end(); ++it) { + key_container->insert(it->first); + } +} + +// Appends all the keys from map_container into key_container, which must +// support push_back(MapContainer::key_type). +// +// Note: any initial contents of the key_container are not cleared. +template +void AppendKeysFromMap(const MapContainer& map_container, + KeyContainer* key_container) { + GOOGLE_CHECK(key_container != NULL); + for (typename MapContainer::const_iterator it = map_container.begin(); + it != map_container.end(); ++it) { + key_container->push_back(it->first); + } +} + +// A more specialized overload of AppendKeysFromMap to optimize reallocations +// for the common case in which we're appending keys to a vector and hence can +// (and sometimes should) call reserve() first. +// +// (It would be possible to play SFINAE games to call reserve() for any +// container that supports it, but this seems to get us 99% of what we need +// without the complexity of a SFINAE-based solution.) +template +void AppendKeysFromMap(const MapContainer& map_container, + std::vector* key_container) { + GOOGLE_CHECK(key_container != NULL); + // We now have the opportunity to call reserve(). Calling reserve() every + // time is a bad idea for some use cases: libstdc++'s implementation of + // vector<>::reserve() resizes the vector's backing store to exactly the + // given size (unless it's already at least that big). Because of this, + // the use case that involves appending a lot of small maps (total size + // N) one by one to a vector would be O(N^2). But never calling reserve() + // loses the opportunity to improve the use case of adding from a large + // map to an empty vector (this improves performance by up to 33%). A + // number of heuristics are possible; see the discussion in + // cl/34081696. Here we use the simplest one. + if (key_container->empty()) { + key_container->reserve(map_container.size()); + } + for (typename MapContainer::const_iterator it = map_container.begin(); + it != map_container.end(); ++it) { + key_container->push_back(it->first); + } +} + +// Inserts all the values from map_container into value_container, which must +// support push_back(MapContainer::mapped_type). +// +// Note: any initial contents of the value_container are not cleared. +template +void AppendValuesFromMap(const MapContainer& map_container, + ValueContainer* value_container) { + GOOGLE_CHECK(value_container != NULL); + for (typename MapContainer::const_iterator it = map_container.begin(); + it != map_container.end(); ++it) { + value_container->push_back(it->second); + } +} + +// A more specialized overload of AppendValuesFromMap to optimize reallocations +// for the common case in which we're appending values to a vector and hence +// can (and sometimes should) call reserve() first. +// +// (It would be possible to play SFINAE games to call reserve() for any +// container that supports it, but this seems to get us 99% of what we need +// without the complexity of a SFINAE-based solution.) +template +void AppendValuesFromMap(const MapContainer& map_container, + std::vector* value_container) { + GOOGLE_CHECK(value_container != NULL); + // See AppendKeysFromMap for why this is done. + if (value_container->empty()) { + value_container->reserve(map_container.size()); + } + for (typename MapContainer::const_iterator it = map_container.begin(); + it != map_container.end(); ++it) { + value_container->push_back(it->second); + } +} + +} // namespace protobuf +} // namespace google + +#endif // GOOGLE_PROTOBUF_STUBS_MAP_UTIL_H__ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/mutex.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/mutex.h new file mode 100644 index 0000000000000000000000000000000000000000..b9b7d2e1be1f948fa93748b59db497a04a7549c3 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/mutex.h @@ -0,0 +1,130 @@ +// Copyright (c) 2006, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef GOOGLE_PROTOBUF_STUBS_MUTEX_H_ +#define GOOGLE_PROTOBUF_STUBS_MUTEX_H_ + +#include + +#include + +// =================================================================== +// emulates google3/base/mutex.h +namespace google { +namespace protobuf { +namespace internal { + +#define GOOGLE_PROTOBUF_LINKER_INITIALIZED + +// Mutex is a natural type to wrap. As both google and other organization have +// specialized mutexes. gRPC also provides an injection mechanism for custom +// mutexes. +class LIBPROTOBUF_EXPORT WrappedMutex { + public: + WrappedMutex() = default; + void Lock() { mu_.lock(); } + void Unlock() { mu_.unlock(); } + // Crash if this Mutex is not held exclusively by this thread. + // May fail to crash when it should; will never crash when it should not. + void AssertHeld() const {} + + private: + std::mutex mu_; +}; + +using Mutex = WrappedMutex; + +// MutexLock(mu) acquires mu when constructed and releases it when destroyed. +class LIBPROTOBUF_EXPORT MutexLock { + public: + explicit MutexLock(Mutex *mu) : mu_(mu) { this->mu_->Lock(); } + ~MutexLock() { this->mu_->Unlock(); } + private: + Mutex *const mu_; + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(MutexLock); +}; + +// TODO(kenton): Implement these? Hard to implement portably. +typedef MutexLock ReaderMutexLock; +typedef MutexLock WriterMutexLock; + +// MutexLockMaybe is like MutexLock, but is a no-op when mu is NULL. +class LIBPROTOBUF_EXPORT MutexLockMaybe { + public: + explicit MutexLockMaybe(Mutex *mu) : + mu_(mu) { if (this->mu_ != NULL) { this->mu_->Lock(); } } + ~MutexLockMaybe() { if (this->mu_ != NULL) { this->mu_->Unlock(); } } + private: + Mutex *const mu_; + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(MutexLockMaybe); +}; + +#if defined(GOOGLE_PROTOBUF_NO_THREADLOCAL) +template +class ThreadLocalStorage { + public: + ThreadLocalStorage() { + pthread_key_create(&key_, &ThreadLocalStorage::Delete); + } + ~ThreadLocalStorage() { + pthread_key_delete(key_); + } + T* Get() { + T* result = static_cast(pthread_getspecific(key_)); + if (result == NULL) { + result = new T(); + pthread_setspecific(key_, result); + } + return result; + } + private: + static void Delete(void* value) { + delete static_cast(value); + } + pthread_key_t key_; + + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ThreadLocalStorage); +}; +#endif + +} // namespace internal + +// We made these internal so that they would show up as such in the docs, +// but we don't want to stick "internal::" in front of them everywhere. +using internal::Mutex; +using internal::MutexLock; +using internal::ReaderMutexLock; +using internal::WriterMutexLock; +using internal::MutexLockMaybe; + + +} // namespace protobuf +} // namespace google + +#endif // GOOGLE_PROTOBUF_STUBS_MUTEX_H_ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/once.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/once.h new file mode 100644 index 0000000000000000000000000000000000000000..4a18497182f35e46f41d7e1daa7f3803ce0e3fad --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/once.h @@ -0,0 +1,155 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// +// emulates google3/base/once.h +// +// This header is intended to be included only by internal .cc files and +// generated .pb.cc files. Users should not use this directly. +// +// This is basically a portable version of pthread_once(). +// +// This header declares: +// * A type called ProtobufOnceType. +// * A macro GOOGLE_PROTOBUF_DECLARE_ONCE() which declares a variable of type +// ProtobufOnceType. This is the only legal way to declare such a variable. +// The macro may only be used at the global scope (you cannot create local or +// class member variables of this type). +// * A function GoogleOnceInit(ProtobufOnceType* once, void (*init_func)()). +// This function, when invoked multiple times given the same ProtobufOnceType +// object, will invoke init_func on the first call only, and will make sure +// none of the calls return before that first call to init_func has finished. +// * The user can provide a parameter which GoogleOnceInit() forwards to the +// user-provided function when it is called. Usage example: +// int a = 10; +// GoogleOnceInit(&my_once, &MyFunctionExpectingIntArgument, &a); +// * This implementation guarantees that ProtobufOnceType is a POD (i.e. no +// static initializer generated). +// +// This implements a way to perform lazy initialization. It's more efficient +// than using mutexes as no lock is needed if initialization has already +// happened. +// +// Example usage: +// void Init(); +// GOOGLE_PROTOBUF_DECLARE_ONCE(once_init); +// +// // Calls Init() exactly once. +// void InitOnce() { +// GoogleOnceInit(&once_init, &Init); +// } +// +// Note that if GoogleOnceInit() is called before main() has begun, it must +// only be called by the thread that will eventually call main() -- that is, +// the thread that performs dynamic initialization. In general this is a safe +// assumption since people don't usually construct threads before main() starts, +// but it is technically not guaranteed. Unfortunately, Win32 provides no way +// whatsoever to statically-initialize its synchronization primitives, so our +// only choice is to assume that dynamic initialization is single-threaded. + +#ifndef GOOGLE_PROTOBUF_STUBS_ONCE_H__ +#define GOOGLE_PROTOBUF_STUBS_ONCE_H__ + +#include +#include +#include + +namespace google { +namespace protobuf { +namespace internal { + +using once_flag = std::atomic; + +template +void my_call_once(once_flag& once, Callable&& fn, Args&&... args) { + enum CallOnceState { + ONCE_INIT = 0, + ONCE_RUNNING = 1, + ONCE_DONE = 2, + }; + + int expected_state = ONCE_INIT; + if (once.compare_exchange_strong(expected_state, ONCE_RUNNING)) { + fn(std::forward(args)...); + once.store(ONCE_DONE); + return; + } + + if (expected_state == ONCE_DONE) { + return; + } + + while (once.load() == ONCE_RUNNING) { + } +} + +template +void call_once(Args&&... args) { + my_call_once(std::forward(args)...); +} +} // namespace internal + +// TODO(gerbens) remove this once third_party is fully extracted +using ProtobufOnceType = internal::once_flag; + +inline void GoogleOnceInit(ProtobufOnceType* once, void (*init_func)()) { + internal::my_call_once(*once, init_func); +} + +template +inline void GoogleOnceInitArg(ProtobufOnceType* once, void (*init_func)(Arg*), + Arg* arg) { + internal::my_call_once(*once, init_func, arg); +} + +class GoogleOnceDynamic { + public: + // If this->Init() has not been called before by any thread, + // execute (*func_with_arg)(arg) then return. + // Otherwise, wait until that prior invocation has finished + // executing its function, then return. + template + void Init(void (*func_with_arg)(T*), T* arg) { + GoogleOnceInitArg(&this->state_, func_with_arg, arg); + } + + private: + ProtobufOnceType state_; +}; + +#define GOOGLE_PROTOBUF_ONCE_TYPE ::google::protobuf::ProtobufOnceType +#define GOOGLE_PROTOBUF_DECLARE_ONCE(NAME) \ + ::google::protobuf::ProtobufOnceType NAME + +} // namespace protobuf +} // namespace google + +#endif // GOOGLE_PROTOBUF_STUBS_ONCE_H__ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/once.h.org b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/once.h.org new file mode 100644 index 0000000000000000000000000000000000000000..f3835ccd0496cdc3bd8bd5e3df7817395072b433 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/once.h.org @@ -0,0 +1,130 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// +// emulates google3/base/once.h +// +// This header is intended to be included only by internal .cc files and +// generated .pb.cc files. Users should not use this directly. +// +// This is basically a portable version of pthread_once(). +// +// This header declares: +// * A type called ProtobufOnceType. +// * A macro GOOGLE_PROTOBUF_DECLARE_ONCE() which declares a variable of type +// ProtobufOnceType. This is the only legal way to declare such a variable. +// The macro may only be used at the global scope (you cannot create local or +// class member variables of this type). +// * A function GoogleOnceInit(ProtobufOnceType* once, void (*init_func)()). +// This function, when invoked multiple times given the same ProtobufOnceType +// object, will invoke init_func on the first call only, and will make sure +// none of the calls return before that first call to init_func has finished. +// * The user can provide a parameter which GoogleOnceInit() forwards to the +// user-provided function when it is called. Usage example: +// int a = 10; +// GoogleOnceInit(&my_once, &MyFunctionExpectingIntArgument, &a); +// * This implementation guarantees that ProtobufOnceType is a POD (i.e. no +// static initializer generated). +// +// This implements a way to perform lazy initialization. It's more efficient +// than using mutexes as no lock is needed if initialization has already +// happened. +// +// Example usage: +// void Init(); +// GOOGLE_PROTOBUF_DECLARE_ONCE(once_init); +// +// // Calls Init() exactly once. +// void InitOnce() { +// GoogleOnceInit(&once_init, &Init); +// } +// +// Note that if GoogleOnceInit() is called before main() has begun, it must +// only be called by the thread that will eventually call main() -- that is, +// the thread that performs dynamic initialization. In general this is a safe +// assumption since people don't usually construct threads before main() starts, +// but it is technically not guaranteed. Unfortunately, Win32 provides no way +// whatsoever to statically-initialize its synchronization primitives, so our +// only choice is to assume that dynamic initialization is single-threaded. + +#ifndef GOOGLE_PROTOBUF_STUBS_ONCE_H__ +#define GOOGLE_PROTOBUF_STUBS_ONCE_H__ + +#include +#include + +namespace google { +namespace protobuf { +namespace internal { + +using once_flag = std::once_flag; +template +void call_once(Args&&... args ) { + std::call_once(std::forward(args)...); +} + +} // namespace internal + +// TODO(gerbens) remove this once third_party is fully extracted +using ProtobufOnceType = internal::once_flag; + +inline void GoogleOnceInit(ProtobufOnceType* once, void (*init_func)()) { + std::call_once(*once, init_func); +} + +template +inline void GoogleOnceInitArg(ProtobufOnceType* once, void (*init_func)(Arg*), + Arg* arg) { + std::call_once(*once, init_func, arg); +} + +class GoogleOnceDynamic { + public: + // If this->Init() has not been called before by any thread, + // execute (*func_with_arg)(arg) then return. + // Otherwise, wait until that prior invocation has finished + // executing its function, then return. + template + void Init(void (*func_with_arg)(T*), T* arg) { + GoogleOnceInitArg(&this->state_, func_with_arg, arg); + } + private: + ProtobufOnceType state_; +}; + +#define GOOGLE_PROTOBUF_ONCE_TYPE ::google::protobuf::ProtobufOnceType +#define GOOGLE_PROTOBUF_DECLARE_ONCE(NAME) \ + ::google::protobuf::ProtobufOnceType NAME + +} // namespace protobuf +} // namespace google + +#endif // GOOGLE_PROTOBUF_STUBS_ONCE_H__ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/platform_macros.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/platform_macros.h new file mode 100644 index 0000000000000000000000000000000000000000..c3a64dd2b38e6daa0b7546ab4febb46cab3a1ec9 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/platform_macros.h @@ -0,0 +1,128 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2012 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef GOOGLE_PROTOBUF_PLATFORM_MACROS_H_ +#define GOOGLE_PROTOBUF_PLATFORM_MACROS_H_ + +#define GOOGLE_PROTOBUF_PLATFORM_ERROR \ +#error "Host platform was not detected as supported by protobuf" + +// Processor architecture detection. For more info on what's defined, see: +// http://msdn.microsoft.com/en-us/library/b0084kay.aspx +// http://www.agner.org/optimize/calling_conventions.pdf +// or with gcc, run: "echo | gcc -E -dM -" +#if defined(_M_X64) || defined(__x86_64__) +#define GOOGLE_PROTOBUF_ARCH_X64 1 +#define GOOGLE_PROTOBUF_ARCH_64_BIT 1 +#elif defined(_M_IX86) || defined(__i386__) +#define GOOGLE_PROTOBUF_ARCH_IA32 1 +#define GOOGLE_PROTOBUF_ARCH_32_BIT 1 +#elif defined(__QNX__) +#define GOOGLE_PROTOBUF_ARCH_ARM_QNX 1 +#define GOOGLE_PROTOBUF_ARCH_32_BIT 1 +#elif defined(_M_ARM) || defined(__ARMEL__) +#define GOOGLE_PROTOBUF_ARCH_ARM 1 +#define GOOGLE_PROTOBUF_ARCH_32_BIT 1 +#elif defined(_M_ARM64) +#define GOOGLE_PROTOBUF_ARCH_ARM 1 +#define GOOGLE_PROTOBUF_ARCH_64_BIT 1 +#elif defined(__aarch64__) +#define GOOGLE_PROTOBUF_ARCH_AARCH64 1 +#define GOOGLE_PROTOBUF_ARCH_64_BIT 1 +#elif defined(__MIPSEL__) +#if defined(__LP64__) +#define GOOGLE_PROTOBUF_ARCH_MIPS64 1 +#define GOOGLE_PROTOBUF_ARCH_64_BIT 1 +#else +#define GOOGLE_PROTOBUF_ARCH_MIPS 1 +#define GOOGLE_PROTOBUF_ARCH_32_BIT 1 +#endif +#elif defined(__pnacl__) +#define GOOGLE_PROTOBUF_ARCH_32_BIT 1 +#elif defined(sparc) +#define GOOGLE_PROTOBUF_ARCH_SPARC 1 +#if defined(__sparc_v9__) || defined(__sparcv9) || defined(__arch64__) +#define GOOGLE_PROTOBUF_ARCH_64_BIT 1 +#else +#define GOOGLE_PROTOBUF_ARCH_32_BIT 1 +#endif +#elif defined(_POWER) || defined(__powerpc64__) || defined(__PPC64__) +#define GOOGLE_PROTOBUF_ARCH_POWER 1 +#define GOOGLE_PROTOBUF_ARCH_64_BIT 1 +#elif defined(__PPC__) +#define GOOGLE_PROTOBUF_ARCH_PPC 1 +#define GOOGLE_PROTOBUF_ARCH_32_BIT 1 +#elif defined(__GNUC__) +# if (((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4)) +// We fallback to the generic Clang/GCC >= 4.7 implementation in atomicops.h +# elif defined(__clang__) +# if !__has_extension(c_atomic) +GOOGLE_PROTOBUF_PLATFORM_ERROR +# endif +// We fallback to the generic Clang/GCC >= 4.7 implementation in atomicops.h +# endif +# if __LP64__ +# define GOOGLE_PROTOBUF_ARCH_64_BIT 1 +# else +# define GOOGLE_PROTOBUF_ARCH_32_BIT 1 +# endif +#else +GOOGLE_PROTOBUF_PLATFORM_ERROR +#endif + +#if defined(__APPLE__) +#define GOOGLE_PROTOBUF_OS_APPLE +#include +#if TARGET_OS_IPHONE +#define GOOGLE_PROTOBUF_OS_IPHONE +#endif +#elif defined(__EMSCRIPTEN__) +#define GOOGLE_PROTOBUF_OS_EMSCRIPTEN +#elif defined(__native_client__) +#define GOOGLE_PROTOBUF_OS_NACL +#elif defined(sun) +#define GOOGLE_PROTOBUF_OS_SOLARIS +#elif defined(_AIX) +#define GOOGLE_PROTOBUF_OS_AIX +#elif defined(__ANDROID__) +#define GOOGLE_PROTOBUF_OS_ANDROID +#endif + +#undef GOOGLE_PROTOBUF_PLATFORM_ERROR + +#if defined(GOOGLE_PROTOBUF_OS_ANDROID) || defined(GOOGLE_PROTOBUF_OS_IPHONE) || defined(__OpenBSD__) +// Android ndk does not support the __thread keyword very well yet. Here +// we use pthread_key_create()/pthread_getspecific()/... methods for +// TLS support on android. +// iOS and OpenBSD also do not support the __thread keyword. +#define GOOGLE_PROTOBUF_NO_THREADLOCAL +#endif + +#endif // GOOGLE_PROTOBUF_PLATFORM_MACROS_H_ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/port.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/port.h new file mode 100644 index 0000000000000000000000000000000000000000..6b52305f9398ac9eb378c365f546967d195ec2b4 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/port.h @@ -0,0 +1,542 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef GOOGLE_PROTOBUF_STUBS_PORT_H_ +#define GOOGLE_PROTOBUF_STUBS_PORT_H_ + +#include +#include +#include +#include +#include +#if defined(__osf__) +// Tru64 lacks stdint.h, but has inttypes.h which defines a superset of +// what stdint.h would define. +#include +#elif !defined(_MSC_VER) +#include +#endif + +#include + +#undef PROTOBUF_LITTLE_ENDIAN +#ifdef _WIN32 + // Assuming windows is always little-endian. + // TODO(xiaofeng): The PROTOBUF_LITTLE_ENDIAN is not only used for + // optimization but also for correctness. We should define an + // different macro to test the big-endian code path in coded_stream. + #if !defined(PROTOBUF_DISABLE_LITTLE_ENDIAN_OPT_FOR_TEST) + #define PROTOBUF_LITTLE_ENDIAN 1 + #endif + #if _MSC_VER >= 1300 && !defined(__INTEL_COMPILER) + // If MSVC has "/RTCc" set, it will complain about truncating casts at + // runtime. This file contains some intentional truncating casts. + #pragma runtime_checks("c", off) + #endif +#else + #include // __BYTE_ORDER + #if defined(__OpenBSD__) + #include + #endif + #if ((defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__)) || \ + (defined(__BYTE_ORDER) && __BYTE_ORDER == __LITTLE_ENDIAN) || \ + (defined(BYTE_ORDER) && BYTE_ORDER == LITTLE_ENDIAN)) && \ + !defined(PROTOBUF_DISABLE_LITTLE_ENDIAN_OPT_FOR_TEST) + #define PROTOBUF_LITTLE_ENDIAN 1 + #endif +#endif +#if defined(_MSC_VER) && defined(PROTOBUF_USE_DLLS) + #ifdef LIBPROTOBUF_EXPORTS + #define LIBPROTOBUF_EXPORT __declspec(dllexport) + #else + #define LIBPROTOBUF_EXPORT __declspec(dllimport) + #endif + #ifdef LIBPROTOC_EXPORTS + #define LIBPROTOC_EXPORT __declspec(dllexport) + #else + #define LIBPROTOC_EXPORT __declspec(dllimport) + #endif +#else + #define LIBPROTOBUF_EXPORT + #define LIBPROTOC_EXPORT +#endif + +// These #includes are for the byte swap functions declared later on. +#ifdef _MSC_VER +#include // NOLINT(build/include) +#include +#elif defined(__APPLE__) +#include +#elif defined(__GLIBC__) || defined(__BIONIC__) || defined(__CYGWIN__) +#include // IWYU pragma: export +#endif + +#define PROTOBUF_RUNTIME_DEPRECATED(message) + +// =================================================================== +// from google3/base/port.h + +#if (defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L || \ + (defined(_MSC_VER) && _MSC_VER >= 1900)) +// Define this to 1 if the code is compiled in C++11 mode; leave it +// undefined otherwise. Do NOT define it to 0 -- that causes +// '#ifdef LANG_CXX11' to behave differently from '#if LANG_CXX11'. +#define LANG_CXX11 1 +#endif + +#if LANG_CXX11 && !defined(__NVCC__) +#define PROTOBUF_CXX11 1 +#else +#define PROTOBUF_CXX11 0 +#endif + +#if PROTOBUF_CXX11 +#define PROTOBUF_FINAL final +#else +#define PROTOBUF_FINAL +#endif + +namespace google { +namespace protobuf { + +typedef unsigned int uint; + +#ifdef _MSC_VER +typedef signed __int8 int8; +typedef __int16 int16; +typedef __int32 int32; +typedef __int64 int64; + +typedef unsigned __int8 uint8; +typedef unsigned __int16 uint16; +typedef unsigned __int32 uint32; +typedef unsigned __int64 uint64; +#else +typedef int8_t int8; +typedef int16_t int16; +typedef int32_t int32; +typedef int64_t int64; + +typedef uint8_t uint8; +typedef uint16_t uint16; +typedef uint32_t uint32; +typedef uint64_t uint64; +#endif + +// long long macros to be used because gcc and vc++ use different suffixes, +// and different size specifiers in format strings +#undef GOOGLE_LONGLONG +#undef GOOGLE_ULONGLONG +#undef GOOGLE_LL_FORMAT + +#ifdef _MSC_VER +#define GOOGLE_LONGLONG(x) x##I64 +#define GOOGLE_ULONGLONG(x) x##UI64 +#define GOOGLE_LL_FORMAT "I64" // As in printf("%I64d", ...) +#else +// By long long, we actually mean int64. +#define GOOGLE_LONGLONG(x) x##LL +#define GOOGLE_ULONGLONG(x) x##ULL +// Used to format real long long integers. +#define GOOGLE_LL_FORMAT "ll" // As in "%lld". Note that "q" is poor form also. +#endif + +static const int32 kint32max = 0x7FFFFFFF; +static const int32 kint32min = -kint32max - 1; +static const int64 kint64max = GOOGLE_LONGLONG(0x7FFFFFFFFFFFFFFF); +static const int64 kint64min = -kint64max - 1; +static const uint32 kuint32max = 0xFFFFFFFFu; +static const uint64 kuint64max = GOOGLE_ULONGLONG(0xFFFFFFFFFFFFFFFF); + +// ------------------------------------------------------------------- +// Annotations: Some parts of the code have been annotated in ways that might +// be useful to some compilers or tools, but are not supported universally. +// You can #define these annotations yourself if the default implementation +// is not right for you. + +#ifndef GOOGLE_ATTRIBUTE_ALWAYS_INLINE +#if defined(__GNUC__) && (__GNUC__ > 3 ||(__GNUC__ == 3 && __GNUC_MINOR__ >= 1)) +// For functions we want to force inline. +// Introduced in gcc 3.1. +#define GOOGLE_ATTRIBUTE_ALWAYS_INLINE __attribute__ ((always_inline)) +#else +// Other compilers will have to figure it out for themselves. +#define GOOGLE_ATTRIBUTE_ALWAYS_INLINE +#endif +#endif + +#define GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE GOOGLE_ATTRIBUTE_ALWAYS_INLINE + +#ifndef GOOGLE_ATTRIBUTE_NOINLINE +#if defined(__GNUC__) && (__GNUC__ > 3 ||(__GNUC__ == 3 && __GNUC_MINOR__ >= 1)) +// For functions we want to force not inline. +// Introduced in gcc 3.1. +#define GOOGLE_ATTRIBUTE_NOINLINE __attribute__ ((noinline)) +#elif defined(_MSC_VER) && (_MSC_VER >= 1400) +// Seems to have been around since at least Visual Studio 2005 +#define GOOGLE_ATTRIBUTE_NOINLINE __declspec(noinline) +#else +// Other compilers will have to figure it out for themselves. +#define GOOGLE_ATTRIBUTE_NOINLINE +#endif +#endif + +#define GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE GOOGLE_ATTRIBUTE_NOINLINE + +#ifndef GOOGLE_ATTRIBUTE_FUNC_ALIGN +#if defined(__clang__) || \ + defined(__GNUC__) && (__GNUC__ > 4 ||(__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) +// Function alignment attribute introduced in gcc 4.3 +#define GOOGLE_ATTRIBUTE_FUNC_ALIGN(bytes) __attribute__ ((aligned(bytes))) +#else +#define GOOGLE_ATTRIBUTE_FUNC_ALIGN(bytes) +#endif +#endif + +#define GOOGLE_PROTOBUF_ATTRIBUTE_FUNC_ALIGN(bytes) \ + GOOGLE_ATTRIBUTE_FUNC_ALIGN(bytes) + +#ifndef GOOGLE_PREDICT_TRUE +#ifdef __GNUC__ +// Provided at least since GCC 3.0. +#define GOOGLE_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1)) +#else +#define GOOGLE_PREDICT_TRUE(x) (x) +#endif +#endif + +#ifndef GOOGLE_PREDICT_FALSE +#ifdef __GNUC__ +// Provided at least since GCC 3.0. +#define GOOGLE_PREDICT_FALSE(x) (__builtin_expect(x, 0)) +#else +#define GOOGLE_PREDICT_FALSE(x) (x) +#endif +#endif + +#ifndef GOOGLE_PROTOBUF_ATTRIBUTE_RETURNS_NONNULL +#ifdef __GNUC__ +#define GOOGLE_PROTOBUF_ATTRIBUTE_RETURNS_NONNULL \ + __attribute__((returns_nonnull)) +#endif +#endif + +// Delimits a block of code which may write to memory which is simultaneously +// written by other threads, but which has been determined to be thread-safe +// (e.g. because it is an idempotent write). +#ifndef GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN +#define GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN() +#endif +#ifndef GOOGLE_SAFE_CONCURRENT_WRITES_END +#define GOOGLE_SAFE_CONCURRENT_WRITES_END() +#endif + +#define GOOGLE_GUARDED_BY(x) +#define GOOGLE_ATTRIBUTE_COLD + +#ifdef GOOGLE_PROTOBUF_DONT_USE_UNALIGNED +# define GOOGLE_PROTOBUF_USE_UNALIGNED 0 +#else +# if defined(_M_X64) || defined(__x86_64__) || defined(_M_IX86) || defined(__i386__) +# define GOOGLE_PROTOBUF_USE_UNALIGNED 1 +# else +# define GOOGLE_PROTOBUF_USE_UNALIGNED 0 +# endif +#endif + +#define GOOGLE_PROTOBUF_ATTRIBUTE_COLD GOOGLE_ATTRIBUTE_COLD + +#if defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) ||\ + defined(MEMORY_SANITIZER) + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus +uint16_t __sanitizer_unaligned_load16(const void *p); +uint32_t __sanitizer_unaligned_load32(const void *p); +uint64_t __sanitizer_unaligned_load64(const void *p); +void __sanitizer_unaligned_store16(void *p, uint16_t v); +void __sanitizer_unaligned_store32(void *p, uint32_t v); +void __sanitizer_unaligned_store64(void *p, uint64_t v); +#ifdef __cplusplus +} // extern "C" +#endif // __cplusplus + +inline uint16 GOOGLE_UNALIGNED_LOAD16(const void *p) { + return __sanitizer_unaligned_load16(p); +} + +inline uint32 GOOGLE_UNALIGNED_LOAD32(const void *p) { + return __sanitizer_unaligned_load32(p); +} + +inline uint64 GOOGLE_UNALIGNED_LOAD64(const void *p) { + return __sanitizer_unaligned_load64(p); +} + +inline void GOOGLE_UNALIGNED_STORE16(void *p, uint16 v) { + __sanitizer_unaligned_store16(p, v); +} + +inline void GOOGLE_UNALIGNED_STORE32(void *p, uint32 v) { + __sanitizer_unaligned_store32(p, v); +} + +inline void GOOGLE_UNALIGNED_STORE64(void *p, uint64 v) { + __sanitizer_unaligned_store64(p, v); +} + +#elif GOOGLE_PROTOBUF_USE_UNALIGNED + +#define GOOGLE_UNALIGNED_LOAD16(_p) (*reinterpret_cast(_p)) +#define GOOGLE_UNALIGNED_LOAD32(_p) (*reinterpret_cast(_p)) +#define GOOGLE_UNALIGNED_LOAD64(_p) (*reinterpret_cast(_p)) + +#define GOOGLE_UNALIGNED_STORE16(_p, _val) (*reinterpret_cast(_p) = (_val)) +#define GOOGLE_UNALIGNED_STORE32(_p, _val) (*reinterpret_cast(_p) = (_val)) +#define GOOGLE_UNALIGNED_STORE64(_p, _val) (*reinterpret_cast(_p) = (_val)) + +#else +inline uint16 GOOGLE_UNALIGNED_LOAD16(const void *p) { + uint16 t; + memcpy(&t, p, sizeof t); + return t; +} + +inline uint32 GOOGLE_UNALIGNED_LOAD32(const void *p) { + uint32 t; + memcpy(&t, p, sizeof t); + return t; +} + +inline uint64 GOOGLE_UNALIGNED_LOAD64(const void *p) { + uint64 t; + memcpy(&t, p, sizeof t); + return t; +} + +inline void GOOGLE_UNALIGNED_STORE16(void *p, uint16 v) { + memcpy(p, &v, sizeof v); +} + +inline void GOOGLE_UNALIGNED_STORE32(void *p, uint32 v) { + memcpy(p, &v, sizeof v); +} + +inline void GOOGLE_UNALIGNED_STORE64(void *p, uint64 v) { + memcpy(p, &v, sizeof v); +} +#endif + +#if defined(GOOGLE_PROTOBUF_OS_NACL) \ + || (defined(__ANDROID__) && defined(__clang__) \ + && (__clang_major__ == 3 && __clang_minor__ == 8) \ + && (__clang_patchlevel__ < 275480)) +# define GOOGLE_PROTOBUF_USE_PORTABLE_LOG2 +#endif + +#if defined(_MSC_VER) +#define GOOGLE_THREAD_LOCAL __declspec(thread) +#else +#define GOOGLE_THREAD_LOCAL __thread +#endif + +// The following guarantees declaration of the byte swap functions. +#ifdef _MSC_VER +#define bswap_16(x) _byteswap_ushort(x) +#define bswap_32(x) _byteswap_ulong(x) +#define bswap_64(x) _byteswap_uint64(x) + +#elif defined(__APPLE__) +// Mac OS X / Darwin features +#define bswap_16(x) OSSwapInt16(x) +#define bswap_32(x) OSSwapInt32(x) +#define bswap_64(x) OSSwapInt64(x) + +#elif !defined(__GLIBC__) && !defined(__BIONIC__) && !defined(__CYGWIN__) + +static inline uint16 bswap_16(uint16 x) { + return static_cast(((x & 0xFF) << 8) | ((x & 0xFF00) >> 8)); +} +#define bswap_16(x) bswap_16(x) +static inline uint32 bswap_32(uint32 x) { + return (((x & 0xFF) << 24) | + ((x & 0xFF00) << 8) | + ((x & 0xFF0000) >> 8) | + ((x & 0xFF000000) >> 24)); +} +#define bswap_32(x) bswap_32(x) +static inline uint64 bswap_64(uint64 x) { + return (((x & GOOGLE_ULONGLONG(0xFF)) << 56) | + ((x & GOOGLE_ULONGLONG(0xFF00)) << 40) | + ((x & GOOGLE_ULONGLONG(0xFF0000)) << 24) | + ((x & GOOGLE_ULONGLONG(0xFF000000)) << 8) | + ((x & GOOGLE_ULONGLONG(0xFF00000000)) >> 8) | + ((x & GOOGLE_ULONGLONG(0xFF0000000000)) >> 24) | + ((x & GOOGLE_ULONGLONG(0xFF000000000000)) >> 40) | + ((x & GOOGLE_ULONGLONG(0xFF00000000000000)) >> 56)); +} +#define bswap_64(x) bswap_64(x) + +#endif + +// =================================================================== +// from google3/util/bits/bits.h + +class Bits { + public: + static uint32 Log2FloorNonZero(uint32 n) { +#if defined(__GNUC__) + return 31 ^ static_cast(__builtin_clz(n)); +#elif defined(_MSC_VER) + unsigned long where; + _BitScanReverse(&where, n); + return where; +#else + return Log2FloorNonZero_Portable(n); +#endif + } + + static uint32 Log2FloorNonZero64(uint64 n) { + // Older versions of clang run into an instruction-selection failure when + // it encounters __builtin_clzll: + // https://bugs.chromium.org/p/nativeclient/issues/detail?id=4395 + // This includes arm-nacl-clang and clang in older Android NDK versions. + // To work around this, when we build with those we use the portable + // implementation instead. +#if defined(__GNUC__) && !defined(GOOGLE_PROTOBUF_USE_PORTABLE_LOG2) + return 63 ^ static_cast(__builtin_clzll(n)); +#elif defined(_MSC_VER) && defined(_M_X64) + unsigned long where; + _BitScanReverse64(&where, n); + return where; +#else + return Log2FloorNonZero64_Portable(n); +#endif + } + private: + static int Log2FloorNonZero_Portable(uint32 n) { + if (n == 0) + return -1; + int log = 0; + uint32 value = n; + for (int i = 4; i >= 0; --i) { + int shift = (1 << i); + uint32 x = value >> shift; + if (x != 0) { + value = x; + log += shift; + } + } + assert(value == 1); + return log; + } + + static int Log2FloorNonZero64_Portable(uint64 n) { + const uint32 topbits = static_cast(n >> 32); + if (topbits == 0) { + // Top bits are zero, so scan in bottom bits + return static_cast(Log2FloorNonZero(static_cast(n))); + } else { + return 32 + static_cast(Log2FloorNonZero(topbits)); + } + } +}; + +// =================================================================== +// from google3/util/endian/endian.h +LIBPROTOBUF_EXPORT uint32 ghtonl(uint32 x); + +class BigEndian { + public: +#ifdef PROTOBUF_LITTLE_ENDIAN + + static uint16 FromHost16(uint16 x) { return bswap_16(x); } + static uint16 ToHost16(uint16 x) { return bswap_16(x); } + + static uint32 FromHost32(uint32 x) { return bswap_32(x); } + static uint32 ToHost32(uint32 x) { return bswap_32(x); } + + static uint64 FromHost64(uint64 x) { return bswap_64(x); } + static uint64 ToHost64(uint64 x) { return bswap_64(x); } + + static bool IsLittleEndian() { return true; } + +#else + + static uint16 FromHost16(uint16 x) { return x; } + static uint16 ToHost16(uint16 x) { return x; } + + static uint32 FromHost32(uint32 x) { return x; } + static uint32 ToHost32(uint32 x) { return x; } + + static uint64 FromHost64(uint64 x) { return x; } + static uint64 ToHost64(uint64 x) { return x; } + + static bool IsLittleEndian() { return false; } + +#endif /* ENDIAN */ + + // Functions to do unaligned loads and stores in big-endian order. + static uint16 Load16(const void *p) { + return ToHost16(GOOGLE_UNALIGNED_LOAD16(p)); + } + + static void Store16(void *p, uint16 v) { + GOOGLE_UNALIGNED_STORE16(p, FromHost16(v)); + } + + static uint32 Load32(const void *p) { + return ToHost32(GOOGLE_UNALIGNED_LOAD32(p)); + } + + static void Store32(void *p, uint32 v) { + GOOGLE_UNALIGNED_STORE32(p, FromHost32(v)); + } + + static uint64 Load64(const void *p) { + return ToHost64(GOOGLE_UNALIGNED_LOAD64(p)); + } + + static void Store64(void *p, uint64 v) { + GOOGLE_UNALIGNED_STORE64(p, FromHost64(v)); + } +}; + +#ifndef GOOGLE_ATTRIBUTE_SECTION_VARIABLE +#define GOOGLE_ATTRIBUTE_SECTION_VARIABLE(name) +#endif + +#define GOOGLE_PROTOBUF_ATTRIBUTE_SECTION_VARIABLE(name) + +} // namespace protobuf +} // namespace google + +#endif // GOOGLE_PROTOBUF_STUBS_PORT_H_ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/status.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/status.h new file mode 100644 index 0000000000000000000000000000000000000000..c5d38f0b46bcb69255d7b3ecc2fa1a2a30e16286 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/status.h @@ -0,0 +1,116 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +#ifndef GOOGLE_PROTOBUF_STUBS_STATUS_H_ +#define GOOGLE_PROTOBUF_STUBS_STATUS_H_ + +#include +#include + +#include +#include + +namespace google { +namespace protobuf { +namespace util { +namespace error { +// These values must match error codes defined in google/rpc/code.proto. +enum Code { + OK = 0, + CANCELLED = 1, + UNKNOWN = 2, + INVALID_ARGUMENT = 3, + DEADLINE_EXCEEDED = 4, + NOT_FOUND = 5, + ALREADY_EXISTS = 6, + PERMISSION_DENIED = 7, + UNAUTHENTICATED = 16, + RESOURCE_EXHAUSTED = 8, + FAILED_PRECONDITION = 9, + ABORTED = 10, + OUT_OF_RANGE = 11, + UNIMPLEMENTED = 12, + INTERNAL = 13, + UNAVAILABLE = 14, + DATA_LOSS = 15, +}; +} // namespace error + +class LIBPROTOBUF_EXPORT Status { + public: + // Creates a "successful" status. + Status(); + + // Create a status in the canonical error space with the specified + // code, and error message. If "code == 0", error_message is + // ignored and a Status object identical to Status::OK is + // constructed. + Status(error::Code error_code, StringPiece error_message); + Status(const Status&); + Status& operator=(const Status& x); + ~Status() {} + + // Some pre-defined Status objects + static const Status OK; // Identical to 0-arg constructor + static const Status CANCELLED; + static const Status UNKNOWN; + + // Accessor + bool ok() const { + return error_code_ == error::OK; + } + int error_code() const { + return error_code_; + } + StringPiece error_message() const { + return error_message_; + } + + bool operator==(const Status& x) const; + bool operator!=(const Status& x) const { + return !operator==(x); + } + + // Return a combination of the error code name and message. + string ToString() const; + + private: + error::Code error_code_; + string error_message_; +}; + +// Prints a human-readable representation of 'x' to 'os'. +LIBPROTOBUF_EXPORT std::ostream& operator<<(std::ostream& os, const Status& x); + +#define EXPECT_OK(value) EXPECT_TRUE((value).ok()) + +} // namespace util +} // namespace protobuf +} // namespace google +#endif // GOOGLE_PROTOBUF_STUBS_STATUS_H_ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/statusor.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/statusor.h new file mode 100644 index 0000000000000000000000000000000000000000..29f869ad5e8d58a334c8cf7067e83027cf9b9e61 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/statusor.h @@ -0,0 +1,259 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// StatusOr is the union of a Status object and a T +// object. StatusOr models the concept of an object that is either a +// usable value, or an error Status explaining why such a value is +// not present. To this end, StatusOr does not allow its Status +// value to be Status::OK. Further, StatusOr does not allow the +// contained pointer to be NULL. +// +// The primary use-case for StatusOr is as the return value of a +// function which may fail. +// +// Example client usage for a StatusOr, where T is not a pointer: +// +// StatusOr result = DoBigCalculationThatCouldFail(); +// if (result.ok()) { +// float answer = result.ValueOrDie(); +// printf("Big calculation yielded: %f", answer); +// } else { +// LOG(ERROR) << result.status(); +// } +// +// Example client usage for a StatusOr: +// +// StatusOr result = FooFactory::MakeNewFoo(arg); +// if (result.ok()) { +// std::unique_ptr foo(result.ValueOrDie()); +// foo->DoSomethingCool(); +// } else { +// LOG(ERROR) << result.status(); +// } +// +// Example client usage for a StatusOr>: +// +// StatusOr> result = FooFactory::MakeNewFoo(arg); +// if (result.ok()) { +// std::unique_ptr foo = result.ConsumeValueOrDie(); +// foo->DoSomethingCool(); +// } else { +// LOG(ERROR) << result.status(); +// } +// +// Example factory implementation returning StatusOr: +// +// StatusOr FooFactory::MakeNewFoo(int arg) { +// if (arg <= 0) { +// return ::util::Status(::util::error::INVALID_ARGUMENT, +// "Arg must be positive"); +// } else { +// return new Foo(arg); +// } +// } +// + +#ifndef GOOGLE_PROTOBUF_STUBS_STATUSOR_H_ +#define GOOGLE_PROTOBUF_STUBS_STATUSOR_H_ + +#include +#include +#include + +#include + +namespace google { +namespace protobuf { +namespace util { + +template +class StatusOr { + template friend class StatusOr; + + public: + // Construct a new StatusOr with Status::UNKNOWN status + StatusOr(); + + // Construct a new StatusOr with the given non-ok status. After calling + // this constructor, calls to ValueOrDie() will CHECK-fail. + // + // NOTE: Not explicit - we want to use StatusOr as a return + // value, so it is convenient and sensible to be able to do 'return + // Status()' when the return type is StatusOr. + // + // REQUIRES: status != Status::OK. This requirement is DCHECKed. + // In optimized builds, passing Status::OK here will have the effect + // of passing PosixErrorSpace::EINVAL as a fallback. + StatusOr(const Status& status); // NOLINT + + // Construct a new StatusOr with the given value. If T is a plain pointer, + // value must not be NULL. After calling this constructor, calls to + // ValueOrDie() will succeed, and calls to status() will return OK. + // + // NOTE: Not explicit - we want to use StatusOr as a return type + // so it is convenient and sensible to be able to do 'return T()' + // when when the return type is StatusOr. + // + // REQUIRES: if T is a plain pointer, value != NULL. This requirement is + // DCHECKed. In optimized builds, passing a NULL pointer here will have + // the effect of passing PosixErrorSpace::EINVAL as a fallback. + StatusOr(const T& value); // NOLINT + + // Copy constructor. + StatusOr(const StatusOr& other); + + // Conversion copy constructor, T must be copy constructible from U + template + StatusOr(const StatusOr& other); + + // Assignment operator. + StatusOr& operator=(const StatusOr& other); + + // Conversion assignment operator, T must be assignable from U + template + StatusOr& operator=(const StatusOr& other); + + // Returns a reference to our status. If this contains a T, then + // returns Status::OK. + const Status& status() const; + + // Returns this->status().ok() + bool ok() const; + + // Returns a reference to our current value, or CHECK-fails if !this->ok(). + // If you need to initialize a T object from the stored value, + // ConsumeValueOrDie() may be more efficient. + const T& ValueOrDie() const; + + private: + Status status_; + T value_; +}; + +//////////////////////////////////////////////////////////////////////////////// +// Implementation details for StatusOr + +namespace internal { + +class LIBPROTOBUF_EXPORT StatusOrHelper { + public: + // Move type-agnostic error handling to the .cc. + static void Crash(const util::Status& status); + + // Customized behavior for StatusOr vs. StatusOr + template + struct Specialize; +}; + +template +struct StatusOrHelper::Specialize { + // For non-pointer T, a reference can never be NULL. + static inline bool IsValueNull(const T& t) { return false; } +}; + +template +struct StatusOrHelper::Specialize { + static inline bool IsValueNull(const T* t) { return t == NULL; } +}; + +} // namespace internal + +template +inline StatusOr::StatusOr() + : status_(util::Status::UNKNOWN) { +} + +template +inline StatusOr::StatusOr(const Status& status) { + if (status.ok()) { + status_ = Status(error::INTERNAL, "Status::OK is not a valid argument."); + } else { + status_ = status; + } +} + +template +inline StatusOr::StatusOr(const T& value) { + if (internal::StatusOrHelper::Specialize::IsValueNull(value)) { + status_ = Status(error::INTERNAL, "NULL is not a vaild argument."); + } else { + status_ = Status::OK; + value_ = value; + } +} + +template +inline StatusOr::StatusOr(const StatusOr& other) + : status_(other.status_), value_(other.value_) { +} + +template +inline StatusOr& StatusOr::operator=(const StatusOr& other) { + status_ = other.status_; + value_ = other.value_; + return *this; +} + +template +template +inline StatusOr::StatusOr(const StatusOr& other) + : status_(other.status_), value_(other.status_.ok() ? other.value_ : T()) { +} + +template +template +inline StatusOr& StatusOr::operator=(const StatusOr& other) { + status_ = other.status_; + if (status_.ok()) value_ = other.value_; + return *this; +} + +template +inline const Status& StatusOr::status() const { + return status_; +} + +template +inline bool StatusOr::ok() const { + return status().ok(); +} + +template +inline const T& StatusOr::ValueOrDie() const { + if (!status_.ok()) { + internal::StatusOrHelper::Crash(status_); + } + return value_; +} +} // namespace util +} // namespace protobuf +} // namespace google + +#endif // GOOGLE_PROTOBUF_STUBS_STATUSOR_H_ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/stl_util.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/stl_util.h new file mode 100644 index 0000000000000000000000000000000000000000..9e4c82a4c3be35245cbeaf9e97afbdbba8ab4558 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/stl_util.h @@ -0,0 +1,121 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// from google3/util/gtl/stl_util.h + +#ifndef GOOGLE_PROTOBUF_STUBS_STL_UTIL_H__ +#define GOOGLE_PROTOBUF_STUBS_STL_UTIL_H__ + +#include + +namespace google { +namespace protobuf { + +// STLDeleteContainerPointers() +// For a range within a container of pointers, calls delete +// (non-array version) on these pointers. +// NOTE: for these three functions, we could just implement a DeleteObject +// functor and then call for_each() on the range and functor, but this +// requires us to pull in all of algorithm.h, which seems expensive. +// For hash_[multi]set, it is important that this deletes behind the iterator +// because the hash_set may call the hash function on the iterator when it is +// advanced, which could result in the hash function trying to deference a +// stale pointer. +template +void STLDeleteContainerPointers(ForwardIterator begin, + ForwardIterator end) { + while (begin != end) { + ForwardIterator temp = begin; + ++begin; + delete *temp; + } +} + +// Inside Google, this function implements a horrible, disgusting hack in which +// we reach into the string's private implementation and resize it without +// initializing the new bytes. In some cases doing this can significantly +// improve performance. However, since it's totally non-portable it has no +// place in open source code. Feel free to fill this function in with your +// own disgusting hack if you want the perf boost. +inline void STLStringResizeUninitialized(string* s, size_t new_size) { + s->resize(new_size); +} + +// Return a mutable char* pointing to a string's internal buffer, +// which may not be null-terminated. Writing through this pointer will +// modify the string. +// +// string_as_array(&str)[i] is valid for 0 <= i < str.size() until the +// next call to a string method that invalidates iterators. +// +// As of 2006-04, there is no standard-blessed way of getting a +// mutable reference to a string's internal buffer. However, issue 530 +// (http://www.open-std.org/JTC1/SC22/WG21/docs/lwg-active.html#530) +// proposes this as the method. According to Matt Austern, this should +// already work on all current implementations. +inline char* string_as_array(string* str) { + // DO NOT USE const_cast(str->data())! See the unittest for why. + return str->empty() ? NULL : &*str->begin(); +} + +// STLDeleteElements() deletes all the elements in an STL container and clears +// the container. This function is suitable for use with a vector, set, +// hash_set, or any other STL container which defines sensible begin(), end(), +// and clear() methods. +// +// If container is NULL, this function is a no-op. +// +// As an alternative to calling STLDeleteElements() directly, consider +// ElementDeleter (defined below), which ensures that your container's elements +// are deleted when the ElementDeleter goes out of scope. +template +void STLDeleteElements(T *container) { + if (!container) return; + STLDeleteContainerPointers(container->begin(), container->end()); + container->clear(); +} + +// Given an STL container consisting of (key, value) pairs, STLDeleteValues +// deletes all the "value" components and clears the container. Does nothing +// in the case it's given a NULL pointer. + +template +void STLDeleteValues(T *v) { + if (!v) return; + for (typename T::iterator i = v->begin(); i != v->end(); ++i) { + delete i->second; + } + v->clear(); +} + +} // namespace protobuf +} // namespace google + +#endif // GOOGLE_PROTOBUF_STUBS_STL_UTIL_H__ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/stringpiece.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/stringpiece.h new file mode 100644 index 0000000000000000000000000000000000000000..563ff75dcf446f69864313643d665a88ac1fc8a9 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/stringpiece.h @@ -0,0 +1,487 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// A StringPiece points to part or all of a string, Cord, double-quoted string +// literal, or other string-like object. A StringPiece does *not* own the +// string to which it points. A StringPiece is not null-terminated. +// +// You can use StringPiece as a function or method parameter. A StringPiece +// parameter can receive a double-quoted string literal argument, a "const +// char*" argument, a string argument, or a StringPiece argument with no data +// copying. Systematic use of StringPiece for arguments reduces data +// copies and strlen() calls. +// +// Prefer passing StringPieces by value: +// void MyFunction(StringPiece arg); +// If circumstances require, you may also pass by const reference: +// void MyFunction(const StringPiece& arg); // not preferred +// Both of these have the same lifetime semantics. Passing by value +// generates slightly smaller code. For more discussion, see the thread +// go/stringpiecebyvalue on c-users. +// +// StringPiece is also suitable for local variables if you know that +// the lifetime of the underlying object is longer than the lifetime +// of your StringPiece variable. +// +// Beware of binding a StringPiece to a temporary: +// StringPiece sp = obj.MethodReturningString(); // BAD: lifetime problem +// +// This code is okay: +// string str = obj.MethodReturningString(); // str owns its contents +// StringPiece sp(str); // GOOD, because str outlives sp +// +// StringPiece is sometimes a poor choice for a return value and usually a poor +// choice for a data member. If you do use a StringPiece this way, it is your +// responsibility to ensure that the object pointed to by the StringPiece +// outlives the StringPiece. +// +// A StringPiece may represent just part of a string; thus the name "Piece". +// For example, when splitting a string, vector is a natural data +// type for the output. For another example, a Cord is a non-contiguous, +// potentially very long string-like object. The Cord class has an interface +// that iteratively provides StringPiece objects that point to the +// successive pieces of a Cord object. +// +// A StringPiece is not null-terminated. If you write code that scans a +// StringPiece, you must check its length before reading any characters. +// Common idioms that work on null-terminated strings do not work on +// StringPiece objects. +// +// There are several ways to create a null StringPiece: +// StringPiece() +// StringPiece(NULL) +// StringPiece(NULL, 0) +// For all of the above, sp.data() == NULL, sp.length() == 0, +// and sp.empty() == true. Also, if you create a StringPiece with +// a non-NULL pointer then sp.data() != NULL. Once created, +// sp.data() will stay either NULL or not-NULL, except if you call +// sp.clear() or sp.set(). +// +// Thus, you can use StringPiece(NULL) to signal an out-of-band value +// that is different from other StringPiece values. This is similar +// to the way that const char* p1 = NULL; is different from +// const char* p2 = "";. +// +// There are many ways to create an empty StringPiece: +// StringPiece() +// StringPiece(NULL) +// StringPiece(NULL, 0) +// StringPiece("") +// StringPiece("", 0) +// StringPiece("abcdef", 0) +// StringPiece("abcdef"+6, 0) +// For all of the above, sp.length() will be 0 and sp.empty() will be true. +// For some empty StringPiece values, sp.data() will be NULL. +// For some empty StringPiece values, sp.data() will not be NULL. +// +// Be careful not to confuse: null StringPiece and empty StringPiece. +// The set of empty StringPieces properly includes the set of null StringPieces. +// That is, every null StringPiece is an empty StringPiece, +// but some non-null StringPieces are empty Stringpieces too. +// +// All empty StringPiece values compare equal to each other. +// Even a null StringPieces compares equal to a non-null empty StringPiece: +// StringPiece() == StringPiece("", 0) +// StringPiece(NULL) == StringPiece("abc", 0) +// StringPiece(NULL, 0) == StringPiece("abcdef"+6, 0) +// +// Look carefully at this example: +// StringPiece("") == NULL +// True or false? TRUE, because StringPiece::operator== converts +// the right-hand side from NULL to StringPiece(NULL), +// and then compares two zero-length spans of characters. +// However, we are working to make this example produce a compile error. +// +// Suppose you want to write: +// bool TestWhat?(StringPiece sp) { return sp == NULL; } // BAD +// Do not do that. Write one of these instead: +// bool TestNull(StringPiece sp) { return sp.data() == NULL; } +// bool TestEmpty(StringPiece sp) { return sp.empty(); } +// The intent of TestWhat? is unclear. Did you mean TestNull or TestEmpty? +// Right now, TestWhat? behaves likes TestEmpty. +// We are working to make TestWhat? produce a compile error. +// TestNull is good to test for an out-of-band signal. +// TestEmpty is good to test for an empty StringPiece. +// +// Caveats (again): +// (1) The lifetime of the pointed-to string (or piece of a string) +// must be longer than the lifetime of the StringPiece. +// (2) There may or may not be a '\0' character after the end of +// StringPiece data. +// (3) A null StringPiece is empty. +// An empty StringPiece may or may not be a null StringPiece. + +#ifndef GOOGLE_PROTOBUF_STUBS_STRINGPIECE_H_ +#define GOOGLE_PROTOBUF_STUBS_STRINGPIECE_H_ + +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace google { +namespace protobuf { +// StringPiece has *two* size types. +// StringPiece::size_type +// is unsigned +// is 32 bits in LP32, 64 bits in LP64, 64 bits in LLP64 +// no future changes intended +// stringpiece_ssize_type +// is signed +// is 32 bits in LP32, 64 bits in LP64, 64 bits in LLP64 +// future changes intended: http://go/64BitStringPiece +// +typedef string::difference_type stringpiece_ssize_type; + +// STRINGPIECE_CHECK_SIZE protects us from 32-bit overflows. +// TODO(mec): delete this after stringpiece_ssize_type goes 64 bit. +#if !defined(NDEBUG) +#define STRINGPIECE_CHECK_SIZE 1 +#elif defined(_FORTIFY_SOURCE) && _FORTIFY_SOURCE > 0 +#define STRINGPIECE_CHECK_SIZE 1 +#else +#define STRINGPIECE_CHECK_SIZE 0 +#endif + +class LIBPROTOBUF_EXPORT StringPiece { + private: + const char* ptr_; + stringpiece_ssize_type length_; + + // Prevent overflow in debug mode or fortified mode. + // sizeof(stringpiece_ssize_type) may be smaller than sizeof(size_t). + static stringpiece_ssize_type CheckedSsizeTFromSizeT(size_t size) { +#if STRINGPIECE_CHECK_SIZE > 0 +#ifdef max +#undef max +#endif + if (size > static_cast( + std::numeric_limits::max())) { + // Some people grep for this message in logs + // so take care if you ever change it. + LogFatalSizeTooBig(size, "size_t to int conversion"); + } +#endif + return static_cast(size); + } + + // Out-of-line error path. + static void LogFatalSizeTooBig(size_t size, const char* details); + + public: + // We provide non-explicit singleton constructors so users can pass + // in a "const char*" or a "string" wherever a "StringPiece" is + // expected. + // + // Style guide exception granted: + // http://goto/style-guide-exception-20978288 + StringPiece() : ptr_(NULL), length_(0) {} + + StringPiece(const char* str) // NOLINT(runtime/explicit) + : ptr_(str), length_(0) { + if (str != NULL) { + length_ = CheckedSsizeTFromSizeT(strlen(str)); + } + } + + template + StringPiece( // NOLINT(runtime/explicit) + const std::basic_string, Allocator>& str) + : ptr_(str.data()), length_(0) { + length_ = CheckedSsizeTFromSizeT(str.size()); + } + + StringPiece(const char* offset, stringpiece_ssize_type len) + : ptr_(offset), length_(len) { + assert(len >= 0); + } + + // Substring of another StringPiece. + // pos must be non-negative and <= x.length(). + StringPiece(StringPiece x, stringpiece_ssize_type pos); + // Substring of another StringPiece. + // pos must be non-negative and <= x.length(). + // len must be non-negative and will be pinned to at most x.length() - pos. + StringPiece(StringPiece x, + stringpiece_ssize_type pos, + stringpiece_ssize_type len); + + // data() may return a pointer to a buffer with embedded NULs, and the + // returned buffer may or may not be null terminated. Therefore it is + // typically a mistake to pass data() to a routine that expects a NUL + // terminated string. + const char* data() const { return ptr_; } + stringpiece_ssize_type size() const { return length_; } + stringpiece_ssize_type length() const { return length_; } + bool empty() const { return length_ == 0; } + + void clear() { + ptr_ = NULL; + length_ = 0; + } + + void set(const char* data, stringpiece_ssize_type len) { + assert(len >= 0); + ptr_ = data; + length_ = len; + } + + void set(const char* str) { + ptr_ = str; + if (str != NULL) + length_ = CheckedSsizeTFromSizeT(strlen(str)); + else + length_ = 0; + } + + void set(const void* data, stringpiece_ssize_type len) { + ptr_ = reinterpret_cast(data); + length_ = len; + } + + char operator[](stringpiece_ssize_type i) const { + assert(0 <= i); + assert(i < length_); + return ptr_[i]; + } + + void remove_prefix(stringpiece_ssize_type n) { + assert(length_ >= n); + ptr_ += n; + length_ -= n; + } + + void remove_suffix(stringpiece_ssize_type n) { + assert(length_ >= n); + length_ -= n; + } + + // returns {-1, 0, 1} + int compare(StringPiece x) const { + const stringpiece_ssize_type min_size = + length_ < x.length_ ? length_ : x.length_; + int r = memcmp(ptr_, x.ptr_, static_cast(min_size)); + if (r < 0) return -1; + if (r > 0) return 1; + if (length_ < x.length_) return -1; + if (length_ > x.length_) return 1; + return 0; + } + + string as_string() const { + return ToString(); + } + // We also define ToString() here, since many other string-like + // interfaces name the routine that converts to a C++ string + // "ToString", and it's confusing to have the method that does that + // for a StringPiece be called "as_string()". We also leave the + // "as_string()" method defined here for existing code. + string ToString() const { + if (ptr_ == NULL) return string(); + return string(data(), static_cast(size())); + } + + operator string() const { + return ToString(); + } + + void CopyToString(string* target) const; + void AppendToString(string* target) const; + + bool starts_with(StringPiece x) const { + return (length_ >= x.length_) && + (memcmp(ptr_, x.ptr_, static_cast(x.length_)) == 0); + } + + bool ends_with(StringPiece x) const { + return ((length_ >= x.length_) && + (memcmp(ptr_ + (length_-x.length_), x.ptr_, + static_cast(x.length_)) == 0)); + } + + // Checks whether StringPiece starts with x and if so advances the beginning + // of it to past the match. It's basically a shortcut for starts_with + // followed by remove_prefix. + bool Consume(StringPiece x); + // Like above but for the end of the string. + bool ConsumeFromEnd(StringPiece x); + + // standard STL container boilerplate + typedef char value_type; + typedef const char* pointer; + typedef const char& reference; + typedef const char& const_reference; + typedef size_t size_type; + typedef ptrdiff_t difference_type; + static const size_type npos; + typedef const char* const_iterator; + typedef const char* iterator; + typedef std::reverse_iterator const_reverse_iterator; + typedef std::reverse_iterator reverse_iterator; + iterator begin() const { return ptr_; } + iterator end() const { return ptr_ + length_; } + const_reverse_iterator rbegin() const { + return const_reverse_iterator(ptr_ + length_); + } + const_reverse_iterator rend() const { + return const_reverse_iterator(ptr_); + } + stringpiece_ssize_type max_size() const { return length_; } + stringpiece_ssize_type capacity() const { return length_; } + + // cpplint.py emits a false positive [build/include_what_you_use] + stringpiece_ssize_type copy(char* buf, size_type n, size_type pos = 0) const; // NOLINT + + bool contains(StringPiece s) const; + + stringpiece_ssize_type find(StringPiece s, size_type pos = 0) const; + stringpiece_ssize_type find(char c, size_type pos = 0) const; + stringpiece_ssize_type rfind(StringPiece s, size_type pos = npos) const; + stringpiece_ssize_type rfind(char c, size_type pos = npos) const; + + stringpiece_ssize_type find_first_of(StringPiece s, size_type pos = 0) const; + stringpiece_ssize_type find_first_of(char c, size_type pos = 0) const { + return find(c, pos); + } + stringpiece_ssize_type find_first_not_of(StringPiece s, + size_type pos = 0) const; + stringpiece_ssize_type find_first_not_of(char c, size_type pos = 0) const; + stringpiece_ssize_type find_last_of(StringPiece s, + size_type pos = npos) const; + stringpiece_ssize_type find_last_of(char c, size_type pos = npos) const { + return rfind(c, pos); + } + stringpiece_ssize_type find_last_not_of(StringPiece s, + size_type pos = npos) const; + stringpiece_ssize_type find_last_not_of(char c, size_type pos = npos) const; + + StringPiece substr(size_type pos, size_type n = npos) const; +}; + +// This large function is defined inline so that in a fairly common case where +// one of the arguments is a literal, the compiler can elide a lot of the +// following comparisons. +inline bool operator==(StringPiece x, StringPiece y) { + stringpiece_ssize_type len = x.size(); + if (len != y.size()) { + return false; + } + + return x.data() == y.data() || len <= 0 || + memcmp(x.data(), y.data(), static_cast(len)) == 0; +} + +inline bool operator!=(StringPiece x, StringPiece y) { + return !(x == y); +} + +inline bool operator<(StringPiece x, StringPiece y) { + const stringpiece_ssize_type min_size = + x.size() < y.size() ? x.size() : y.size(); + const int r = memcmp(x.data(), y.data(), static_cast(min_size)); + return (r < 0) || (r == 0 && x.size() < y.size()); +} + +inline bool operator>(StringPiece x, StringPiece y) { + return y < x; +} + +inline bool operator<=(StringPiece x, StringPiece y) { + return !(x > y); +} + +inline bool operator>=(StringPiece x, StringPiece y) { + return !(x < y); +} + +// allow StringPiece to be logged +extern std::ostream& operator<<(std::ostream& o, StringPiece piece); + +namespace internal { +// StringPiece is not a POD and can not be used in an union (pre C++11). We +// need a POD version of it. +struct StringPiecePod { + // Create from a StringPiece. + static StringPiecePod CreateFromStringPiece(StringPiece str) { + StringPiecePod pod; + pod.data_ = str.data(); + pod.size_ = str.size(); + return pod; + } + + // Cast to StringPiece. + operator StringPiece() const { return StringPiece(data_, size_); } + + bool operator==(const char* value) const { + return StringPiece(data_, size_) == StringPiece(value); + } + + char operator[](stringpiece_ssize_type i) const { + assert(0 <= i); + assert(i < size_); + return data_[i]; + } + + const char* data() const { return data_; } + + stringpiece_ssize_type size() const { + return size_; + } + + std::string ToString() const { + return std::string(data_, static_cast(size_)); + } + private: + const char* data_; + stringpiece_ssize_type size_; +}; + +} // namespace internal +} // namespace protobuf +} // namespace google + +GOOGLE_PROTOBUF_HASH_NAMESPACE_DECLARATION_START +template<> struct hash { + size_t operator()(const StringPiece& s) const { + size_t result = 0; + for (const char *str = s.data(), *end = str + s.size(); str < end; str++) { + result = 5 * result + static_cast(*str); + } + return result; + } +}; +GOOGLE_PROTOBUF_HASH_NAMESPACE_DECLARATION_END + +#endif // STRINGS_STRINGPIECE_H_ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/stringprintf.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/stringprintf.h new file mode 100644 index 0000000000000000000000000000000000000000..7183ec6a07e7845cf805ebfc03a448cb215977bc --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/stringprintf.h @@ -0,0 +1,76 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2012 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// from google3/base/stringprintf.h +// +// Printf variants that place their output in a C++ string. +// +// Usage: +// string result = StringPrintf("%d %s\n", 10, "hello"); +// SStringPrintf(&result, "%d %s\n", 10, "hello"); +// StringAppendF(&result, "%d %s\n", 20, "there"); + +#ifndef GOOGLE_PROTOBUF_STUBS_STRINGPRINTF_H +#define GOOGLE_PROTOBUF_STUBS_STRINGPRINTF_H + +#include +#include +#include + +#include + +namespace google { +namespace protobuf { + +// Return a C++ string +LIBPROTOBUF_EXPORT extern string StringPrintf(const char* format, ...); + +// Store result into a supplied string and return it +LIBPROTOBUF_EXPORT extern const string& SStringPrintf(string* dst, const char* format, ...); + +// Append result to a supplied string +LIBPROTOBUF_EXPORT extern void StringAppendF(string* dst, const char* format, ...); + +// Lower-level routine that takes a va_list and appends to a specified +// string. All other routines are just convenience wrappers around it. +LIBPROTOBUF_EXPORT extern void StringAppendV(string* dst, const char* format, va_list ap); + +// The max arguments supported by StringPrintfVector +LIBPROTOBUF_EXPORT extern const int kStringPrintfVectorMaxArgs; + +// You can use this version when all your arguments are strings, but +// you don't know how many arguments you'll have at compile time. +// StringPrintfVector will LOG(FATAL) if v.size() > kStringPrintfVectorMaxArgs +LIBPROTOBUF_EXPORT extern string StringPrintfVector(const char* format, const std::vector& v); + +} // namespace protobuf +} // namespace google + +#endif // GOOGLE_PROTOBUF_STUBS_STRINGPRINTF_H diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/strutil.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/strutil.h new file mode 100644 index 0000000000000000000000000000000000000000..a839b8b3ed45af791b80817c0b721fee52ffa57f --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/strutil.h @@ -0,0 +1,878 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// from google3/strings/strutil.h + +#ifndef GOOGLE_PROTOBUF_STUBS_STRUTIL_H__ +#define GOOGLE_PROTOBUF_STUBS_STRUTIL_H__ + +#include +#include +#include +#include + +namespace google { +namespace protobuf { + +#ifdef _MSC_VER +#define strtoll _strtoi64 +#define strtoull _strtoui64 +#elif defined(__DECCXX) && defined(__osf__) +// HP C++ on Tru64 does not have strtoll, but strtol is already 64-bit. +#define strtoll strtol +#define strtoull strtoul +#endif + +// ---------------------------------------------------------------------- +// ascii_isalnum() +// Check if an ASCII character is alphanumeric. We can't use ctype's +// isalnum() because it is affected by locale. This function is applied +// to identifiers in the protocol buffer language, not to natural-language +// strings, so locale should not be taken into account. +// ascii_isdigit() +// Like above, but only accepts digits. +// ascii_isspace() +// Check if the character is a space character. +// ---------------------------------------------------------------------- + +inline bool ascii_isalnum(char c) { + return ('a' <= c && c <= 'z') || + ('A' <= c && c <= 'Z') || + ('0' <= c && c <= '9'); +} + +inline bool ascii_isdigit(char c) { + return ('0' <= c && c <= '9'); +} + +inline bool ascii_isspace(char c) { + return c == ' ' || c == '\t' || c == '\n' || c == '\v' || c == '\f' || + c == '\r'; +} + +inline bool ascii_isupper(char c) { + return c >= 'A' && c <= 'Z'; +} + +inline bool ascii_islower(char c) { + return c >= 'a' && c <= 'z'; +} + +inline char ascii_toupper(char c) { + return ascii_islower(c) ? c - ('a' - 'A') : c; +} + +inline char ascii_tolower(char c) { + return ascii_isupper(c) ? c + ('a' - 'A') : c; +} + +inline int hex_digit_to_int(char c) { + /* Assume ASCII. */ + int x = static_cast(c); + if (x > '9') { + x += 9; + } + return x & 0xf; +} + +// ---------------------------------------------------------------------- +// HasPrefixString() +// Check if a string begins with a given prefix. +// StripPrefixString() +// Given a string and a putative prefix, returns the string minus the +// prefix string if the prefix matches, otherwise the original +// string. +// ---------------------------------------------------------------------- +inline bool HasPrefixString(const string& str, + const string& prefix) { + return str.size() >= prefix.size() && + str.compare(0, prefix.size(), prefix) == 0; +} + +inline string StripPrefixString(const string& str, const string& prefix) { + if (HasPrefixString(str, prefix)) { + return str.substr(prefix.size()); + } else { + return str; + } +} + +// ---------------------------------------------------------------------- +// HasSuffixString() +// Return true if str ends in suffix. +// StripSuffixString() +// Given a string and a putative suffix, returns the string minus the +// suffix string if the suffix matches, otherwise the original +// string. +// ---------------------------------------------------------------------- +inline bool HasSuffixString(const string& str, + const string& suffix) { + return str.size() >= suffix.size() && + str.compare(str.size() - suffix.size(), suffix.size(), suffix) == 0; +} + +inline string StripSuffixString(const string& str, const string& suffix) { + if (HasSuffixString(str, suffix)) { + return str.substr(0, str.size() - suffix.size()); + } else { + return str; + } +} + +// ---------------------------------------------------------------------- +// ReplaceCharacters +// Replaces any occurrence of the character 'remove' (or the characters +// in 'remove') with the character 'replacewith'. +// Good for keeping html characters or protocol characters (\t) out +// of places where they might cause a problem. +// StripWhitespace +// Removes whitespaces from both ends of the given string. +// ---------------------------------------------------------------------- +LIBPROTOBUF_EXPORT void ReplaceCharacters(string* s, const char* remove, + char replacewith); +LIBPROTOBUF_EXPORT void StripString(string* s, const char* remove, + char replacewith); + +LIBPROTOBUF_EXPORT void StripWhitespace(string* s); + + +// ---------------------------------------------------------------------- +// LowerString() +// UpperString() +// ToUpper() +// Convert the characters in "s" to lowercase or uppercase. ASCII-only: +// these functions intentionally ignore locale because they are applied to +// identifiers used in the Protocol Buffer language, not to natural-language +// strings. +// ---------------------------------------------------------------------- + +inline void LowerString(string * s) { + string::iterator end = s->end(); + for (string::iterator i = s->begin(); i != end; ++i) { + // tolower() changes based on locale. We don't want this! + if ('A' <= *i && *i <= 'Z') *i += 'a' - 'A'; + } +} + +inline void UpperString(string * s) { + string::iterator end = s->end(); + for (string::iterator i = s->begin(); i != end; ++i) { + // toupper() changes based on locale. We don't want this! + if ('a' <= *i && *i <= 'z') *i += 'A' - 'a'; + } +} + +inline string ToUpper(const string& s) { + string out = s; + UpperString(&out); + return out; +} + +// ---------------------------------------------------------------------- +// StringReplace() +// Give me a string and two patterns "old" and "new", and I replace +// the first instance of "old" in the string with "new", if it +// exists. RETURN a new string, regardless of whether the replacement +// happened or not. +// ---------------------------------------------------------------------- + +LIBPROTOBUF_EXPORT string StringReplace(const string& s, const string& oldsub, + const string& newsub, bool replace_all); + +// ---------------------------------------------------------------------- +// SplitStringUsing() +// Split a string using a character delimiter. Append the components +// to 'result'. If there are consecutive delimiters, this function skips +// over all of them. +// ---------------------------------------------------------------------- +LIBPROTOBUF_EXPORT void SplitStringUsing(const string& full, const char* delim, + std::vector* res); + +// Split a string using one or more byte delimiters, presented +// as a nul-terminated c string. Append the components to 'result'. +// If there are consecutive delimiters, this function will return +// corresponding empty strings. If you want to drop the empty +// strings, try SplitStringUsing(). +// +// If "full" is the empty string, yields an empty string as the only value. +// ---------------------------------------------------------------------- +LIBPROTOBUF_EXPORT void SplitStringAllowEmpty(const string& full, + const char* delim, + std::vector* result); + +// ---------------------------------------------------------------------- +// Split() +// Split a string using a character delimiter. +// ---------------------------------------------------------------------- +inline std::vector Split( + const string& full, const char* delim, bool skip_empty = true) { + std::vector result; + if (skip_empty) { + SplitStringUsing(full, delim, &result); + } else { + SplitStringAllowEmpty(full, delim, &result); + } + return result; +} + +// ---------------------------------------------------------------------- +// JoinStrings() +// These methods concatenate a vector of strings into a C++ string, using +// the C-string "delim" as a separator between components. There are two +// flavors of the function, one flavor returns the concatenated string, +// another takes a pointer to the target string. In the latter case the +// target string is cleared and overwritten. +// ---------------------------------------------------------------------- +LIBPROTOBUF_EXPORT void JoinStrings(const std::vector& components, + const char* delim, string* result); + +inline string JoinStrings(const std::vector& components, + const char* delim) { + string result; + JoinStrings(components, delim, &result); + return result; +} + +// ---------------------------------------------------------------------- +// UnescapeCEscapeSequences() +// Copies "source" to "dest", rewriting C-style escape sequences +// -- '\n', '\r', '\\', '\ooo', etc -- to their ASCII +// equivalents. "dest" must be sufficiently large to hold all +// the characters in the rewritten string (i.e. at least as large +// as strlen(source) + 1 should be safe, since the replacements +// are always shorter than the original escaped sequences). It's +// safe for source and dest to be the same. RETURNS the length +// of dest. +// +// It allows hex sequences \xhh, or generally \xhhhhh with an +// arbitrary number of hex digits, but all of them together must +// specify a value of a single byte (e.g. \x0045 is equivalent +// to \x45, and \x1234 is erroneous). +// +// It also allows escape sequences of the form \uhhhh (exactly four +// hex digits, upper or lower case) or \Uhhhhhhhh (exactly eight +// hex digits, upper or lower case) to specify a Unicode code +// point. The dest array will contain the UTF8-encoded version of +// that code-point (e.g., if source contains \u2019, then dest will +// contain the three bytes 0xE2, 0x80, and 0x99). +// +// Errors: In the first form of the call, errors are reported with +// LOG(ERROR). The same is true for the second form of the call if +// the pointer to the string std::vector is NULL; otherwise, error +// messages are stored in the std::vector. In either case, the effect on +// the dest array is not defined, but rest of the source will be +// processed. +// ---------------------------------------------------------------------- + +LIBPROTOBUF_EXPORT int UnescapeCEscapeSequences(const char* source, char* dest); +LIBPROTOBUF_EXPORT int UnescapeCEscapeSequences(const char* source, char* dest, + std::vector *errors); + +// ---------------------------------------------------------------------- +// UnescapeCEscapeString() +// This does the same thing as UnescapeCEscapeSequences, but creates +// a new string. The caller does not need to worry about allocating +// a dest buffer. This should be used for non performance critical +// tasks such as printing debug messages. It is safe for src and dest +// to be the same. +// +// The second call stores its errors in a supplied string vector. +// If the string vector pointer is NULL, it reports the errors with LOG(). +// +// In the first and second calls, the length of dest is returned. In the +// the third call, the new string is returned. +// ---------------------------------------------------------------------- + +LIBPROTOBUF_EXPORT int UnescapeCEscapeString(const string& src, string* dest); +LIBPROTOBUF_EXPORT int UnescapeCEscapeString(const string& src, string* dest, + std::vector *errors); +LIBPROTOBUF_EXPORT string UnescapeCEscapeString(const string& src); + +// ---------------------------------------------------------------------- +// CEscape() +// Escapes 'src' using C-style escape sequences and returns the resulting +// string. +// +// Escaped chars: \n, \r, \t, ", ', \, and !isprint(). +// ---------------------------------------------------------------------- +LIBPROTOBUF_EXPORT string CEscape(const string& src); + +// ---------------------------------------------------------------------- +// CEscapeAndAppend() +// Escapes 'src' using C-style escape sequences, and appends the escaped +// string to 'dest'. +// ---------------------------------------------------------------------- +LIBPROTOBUF_EXPORT void CEscapeAndAppend(StringPiece src, string* dest); + +namespace strings { +// Like CEscape() but does not escape bytes with the upper bit set. +LIBPROTOBUF_EXPORT string Utf8SafeCEscape(const string& src); + +// Like CEscape() but uses hex (\x) escapes instead of octals. +LIBPROTOBUF_EXPORT string CHexEscape(const string& src); +} // namespace strings + +// ---------------------------------------------------------------------- +// strto32() +// strtou32() +// strto64() +// strtou64() +// Architecture-neutral plug compatible replacements for strtol() and +// strtoul(). Long's have different lengths on ILP-32 and LP-64 +// platforms, so using these is safer, from the point of view of +// overflow behavior, than using the standard libc functions. +// ---------------------------------------------------------------------- +LIBPROTOBUF_EXPORT int32 strto32_adaptor(const char *nptr, char **endptr, + int base); +LIBPROTOBUF_EXPORT uint32 strtou32_adaptor(const char *nptr, char **endptr, + int base); + +inline int32 strto32(const char *nptr, char **endptr, int base) { + if (sizeof(int32) == sizeof(long)) + return strtol(nptr, endptr, base); + else + return strto32_adaptor(nptr, endptr, base); +} + +inline uint32 strtou32(const char *nptr, char **endptr, int base) { + if (sizeof(uint32) == sizeof(unsigned long)) + return strtoul(nptr, endptr, base); + else + return strtou32_adaptor(nptr, endptr, base); +} + +// For now, long long is 64-bit on all the platforms we care about, so these +// functions can simply pass the call to strto[u]ll. +inline int64 strto64(const char *nptr, char **endptr, int base) { + GOOGLE_COMPILE_ASSERT(sizeof(int64) == sizeof(long long), + sizeof_int64_is_not_sizeof_long_long); + return strtoll(nptr, endptr, base); +} + +inline uint64 strtou64(const char *nptr, char **endptr, int base) { + GOOGLE_COMPILE_ASSERT(sizeof(uint64) == sizeof(unsigned long long), + sizeof_uint64_is_not_sizeof_long_long); + return strtoull(nptr, endptr, base); +} + +// ---------------------------------------------------------------------- +// safe_strtob() +// safe_strto32() +// safe_strtou32() +// safe_strto64() +// safe_strtou64() +// safe_strtof() +// safe_strtod() +// ---------------------------------------------------------------------- +LIBPROTOBUF_EXPORT bool safe_strtob(StringPiece str, bool* value); + +LIBPROTOBUF_EXPORT bool safe_strto32(const string& str, int32* value); +LIBPROTOBUF_EXPORT bool safe_strtou32(const string& str, uint32* value); +inline bool safe_strto32(const char* str, int32* value) { + return safe_strto32(string(str), value); +} +inline bool safe_strto32(StringPiece str, int32* value) { + return safe_strto32(str.ToString(), value); +} +inline bool safe_strtou32(const char* str, uint32* value) { + return safe_strtou32(string(str), value); +} +inline bool safe_strtou32(StringPiece str, uint32* value) { + return safe_strtou32(str.ToString(), value); +} + +LIBPROTOBUF_EXPORT bool safe_strto64(const string& str, int64* value); +LIBPROTOBUF_EXPORT bool safe_strtou64(const string& str, uint64* value); +inline bool safe_strto64(const char* str, int64* value) { + return safe_strto64(string(str), value); +} +inline bool safe_strto64(StringPiece str, int64* value) { + return safe_strto64(str.ToString(), value); +} +inline bool safe_strtou64(const char* str, uint64* value) { + return safe_strtou64(string(str), value); +} +inline bool safe_strtou64(StringPiece str, uint64* value) { + return safe_strtou64(str.ToString(), value); +} + +LIBPROTOBUF_EXPORT bool safe_strtof(const char* str, float* value); +LIBPROTOBUF_EXPORT bool safe_strtod(const char* str, double* value); +inline bool safe_strtof(const string& str, float* value) { + return safe_strtof(str.c_str(), value); +} +inline bool safe_strtod(const string& str, double* value) { + return safe_strtod(str.c_str(), value); +} +inline bool safe_strtof(StringPiece str, float* value) { + return safe_strtof(str.ToString(), value); +} +inline bool safe_strtod(StringPiece str, double* value) { + return safe_strtod(str.ToString(), value); +} + +// ---------------------------------------------------------------------- +// FastIntToBuffer() +// FastHexToBuffer() +// FastHex64ToBuffer() +// FastHex32ToBuffer() +// FastTimeToBuffer() +// These are intended for speed. FastIntToBuffer() assumes the +// integer is non-negative. FastHexToBuffer() puts output in +// hex rather than decimal. FastTimeToBuffer() puts the output +// into RFC822 format. +// +// FastHex64ToBuffer() puts a 64-bit unsigned value in hex-format, +// padded to exactly 16 bytes (plus one byte for '\0') +// +// FastHex32ToBuffer() puts a 32-bit unsigned value in hex-format, +// padded to exactly 8 bytes (plus one byte for '\0') +// +// All functions take the output buffer as an arg. +// They all return a pointer to the beginning of the output, +// which may not be the beginning of the input buffer. +// ---------------------------------------------------------------------- + +// Suggested buffer size for FastToBuffer functions. Also works with +// DoubleToBuffer() and FloatToBuffer(). +static const int kFastToBufferSize = 32; + +LIBPROTOBUF_EXPORT char* FastInt32ToBuffer(int32 i, char* buffer); +LIBPROTOBUF_EXPORT char* FastInt64ToBuffer(int64 i, char* buffer); +char* FastUInt32ToBuffer(uint32 i, char* buffer); // inline below +char* FastUInt64ToBuffer(uint64 i, char* buffer); // inline below +LIBPROTOBUF_EXPORT char* FastHexToBuffer(int i, char* buffer); +LIBPROTOBUF_EXPORT char* FastHex64ToBuffer(uint64 i, char* buffer); +LIBPROTOBUF_EXPORT char* FastHex32ToBuffer(uint32 i, char* buffer); + +// at least 22 bytes long +inline char* FastIntToBuffer(int i, char* buffer) { + return (sizeof(i) == 4 ? + FastInt32ToBuffer(i, buffer) : FastInt64ToBuffer(i, buffer)); +} +inline char* FastUIntToBuffer(unsigned int i, char* buffer) { + return (sizeof(i) == 4 ? + FastUInt32ToBuffer(i, buffer) : FastUInt64ToBuffer(i, buffer)); +} +inline char* FastLongToBuffer(long i, char* buffer) { + return (sizeof(i) == 4 ? + FastInt32ToBuffer(i, buffer) : FastInt64ToBuffer(i, buffer)); +} +inline char* FastULongToBuffer(unsigned long i, char* buffer) { + return (sizeof(i) == 4 ? + FastUInt32ToBuffer(i, buffer) : FastUInt64ToBuffer(i, buffer)); +} + +// ---------------------------------------------------------------------- +// FastInt32ToBufferLeft() +// FastUInt32ToBufferLeft() +// FastInt64ToBufferLeft() +// FastUInt64ToBufferLeft() +// +// Like the Fast*ToBuffer() functions above, these are intended for speed. +// Unlike the Fast*ToBuffer() functions, however, these functions write +// their output to the beginning of the buffer (hence the name, as the +// output is left-aligned). The caller is responsible for ensuring that +// the buffer has enough space to hold the output. +// +// Returns a pointer to the end of the string (i.e. the null character +// terminating the string). +// ---------------------------------------------------------------------- + +LIBPROTOBUF_EXPORT char* FastInt32ToBufferLeft(int32 i, char* buffer); +LIBPROTOBUF_EXPORT char* FastUInt32ToBufferLeft(uint32 i, char* buffer); +LIBPROTOBUF_EXPORT char* FastInt64ToBufferLeft(int64 i, char* buffer); +LIBPROTOBUF_EXPORT char* FastUInt64ToBufferLeft(uint64 i, char* buffer); + +// Just define these in terms of the above. +inline char* FastUInt32ToBuffer(uint32 i, char* buffer) { + FastUInt32ToBufferLeft(i, buffer); + return buffer; +} +inline char* FastUInt64ToBuffer(uint64 i, char* buffer) { + FastUInt64ToBufferLeft(i, buffer); + return buffer; +} + +inline string SimpleBtoa(bool value) { + return value ? "true" : "false"; +} + +// ---------------------------------------------------------------------- +// SimpleItoa() +// Description: converts an integer to a string. +// +// Return value: string +// ---------------------------------------------------------------------- +LIBPROTOBUF_EXPORT string SimpleItoa(int i); +LIBPROTOBUF_EXPORT string SimpleItoa(unsigned int i); +LIBPROTOBUF_EXPORT string SimpleItoa(long i); +LIBPROTOBUF_EXPORT string SimpleItoa(unsigned long i); +LIBPROTOBUF_EXPORT string SimpleItoa(long long i); +LIBPROTOBUF_EXPORT string SimpleItoa(unsigned long long i); + +// ---------------------------------------------------------------------- +// SimpleDtoa() +// SimpleFtoa() +// DoubleToBuffer() +// FloatToBuffer() +// Description: converts a double or float to a string which, if +// passed to NoLocaleStrtod(), will produce the exact same original double +// (except in case of NaN; all NaNs are considered the same value). +// We try to keep the string short but it's not guaranteed to be as +// short as possible. +// +// DoubleToBuffer() and FloatToBuffer() write the text to the given +// buffer and return it. The buffer must be at least +// kDoubleToBufferSize bytes for doubles and kFloatToBufferSize +// bytes for floats. kFastToBufferSize is also guaranteed to be large +// enough to hold either. +// +// Return value: string +// ---------------------------------------------------------------------- +LIBPROTOBUF_EXPORT string SimpleDtoa(double value); +LIBPROTOBUF_EXPORT string SimpleFtoa(float value); + +LIBPROTOBUF_EXPORT char* DoubleToBuffer(double i, char* buffer); +LIBPROTOBUF_EXPORT char* FloatToBuffer(float i, char* buffer); + +// In practice, doubles should never need more than 24 bytes and floats +// should never need more than 14 (including null terminators), but we +// overestimate to be safe. +static const int kDoubleToBufferSize = 32; +static const int kFloatToBufferSize = 24; + +namespace strings { + +enum PadSpec { + NO_PAD = 1, + ZERO_PAD_2, + ZERO_PAD_3, + ZERO_PAD_4, + ZERO_PAD_5, + ZERO_PAD_6, + ZERO_PAD_7, + ZERO_PAD_8, + ZERO_PAD_9, + ZERO_PAD_10, + ZERO_PAD_11, + ZERO_PAD_12, + ZERO_PAD_13, + ZERO_PAD_14, + ZERO_PAD_15, + ZERO_PAD_16, +}; + +struct Hex { + uint64 value; + enum PadSpec spec; + template + explicit Hex(Int v, PadSpec s = NO_PAD) + : spec(s) { + // Prevent sign-extension by casting integers to + // their unsigned counterparts. +#ifdef LANG_CXX11 + static_assert( + sizeof(v) == 1 || sizeof(v) == 2 || sizeof(v) == 4 || sizeof(v) == 8, + "Unknown integer type"); +#endif + value = sizeof(v) == 1 ? static_cast(v) + : sizeof(v) == 2 ? static_cast(v) + : sizeof(v) == 4 ? static_cast(v) + : static_cast(v); + } +}; + +struct LIBPROTOBUF_EXPORT AlphaNum { + const char *piece_data_; // move these to string_ref eventually + size_t piece_size_; // move these to string_ref eventually + + char digits[kFastToBufferSize]; + + // No bool ctor -- bools convert to an integral type. + // A bool ctor would also convert incoming pointers (bletch). + + AlphaNum(int32 i32) + : piece_data_(digits), + piece_size_(FastInt32ToBufferLeft(i32, digits) - &digits[0]) {} + AlphaNum(uint32 u32) + : piece_data_(digits), + piece_size_(FastUInt32ToBufferLeft(u32, digits) - &digits[0]) {} + AlphaNum(int64 i64) + : piece_data_(digits), + piece_size_(FastInt64ToBufferLeft(i64, digits) - &digits[0]) {} + AlphaNum(uint64 u64) + : piece_data_(digits), + piece_size_(FastUInt64ToBufferLeft(u64, digits) - &digits[0]) {} + + AlphaNum(float f) + : piece_data_(digits), piece_size_(strlen(FloatToBuffer(f, digits))) {} + AlphaNum(double f) + : piece_data_(digits), piece_size_(strlen(DoubleToBuffer(f, digits))) {} + + AlphaNum(Hex hex); + + AlphaNum(const char* c_str) + : piece_data_(c_str), piece_size_(strlen(c_str)) {} + // TODO: Add a string_ref constructor, eventually + // AlphaNum(const StringPiece &pc) : piece(pc) {} + + AlphaNum(const string& str) + : piece_data_(str.data()), piece_size_(str.size()) {} + + AlphaNum(StringPiece str) + : piece_data_(str.data()), piece_size_(str.size()) {} + + AlphaNum(internal::StringPiecePod str) + : piece_data_(str.data()), piece_size_(str.size()) {} + + size_t size() const { return piece_size_; } + const char *data() const { return piece_data_; } + + private: + // Use ":" not ':' + AlphaNum(char c); // NOLINT(runtime/explicit) + + // Disallow copy and assign. + AlphaNum(const AlphaNum&); + void operator=(const AlphaNum&); +}; + +} // namespace strings + +using strings::AlphaNum; + +// ---------------------------------------------------------------------- +// StrCat() +// This merges the given strings or numbers, with no delimiter. This +// is designed to be the fastest possible way to construct a string out +// of a mix of raw C strings, strings, bool values, +// and numeric values. +// +// Don't use this for user-visible strings. The localization process +// works poorly on strings built up out of fragments. +// +// For clarity and performance, don't use StrCat when appending to a +// string. In particular, avoid using any of these (anti-)patterns: +// str.append(StrCat(...) +// str += StrCat(...) +// str = StrCat(str, ...) +// where the last is the worse, with the potential to change a loop +// from a linear time operation with O(1) dynamic allocations into a +// quadratic time operation with O(n) dynamic allocations. StrAppend +// is a better choice than any of the above, subject to the restriction +// of StrAppend(&str, a, b, c, ...) that none of the a, b, c, ... may +// be a reference into str. +// ---------------------------------------------------------------------- + +LIBPROTOBUF_EXPORT string StrCat(const AlphaNum& a, const AlphaNum& b); +LIBPROTOBUF_EXPORT string StrCat(const AlphaNum& a, const AlphaNum& b, + const AlphaNum& c); +LIBPROTOBUF_EXPORT string StrCat(const AlphaNum& a, const AlphaNum& b, + const AlphaNum& c, const AlphaNum& d); +LIBPROTOBUF_EXPORT string StrCat(const AlphaNum& a, const AlphaNum& b, + const AlphaNum& c, const AlphaNum& d, + const AlphaNum& e); +LIBPROTOBUF_EXPORT string StrCat(const AlphaNum& a, const AlphaNum& b, + const AlphaNum& c, const AlphaNum& d, + const AlphaNum& e, const AlphaNum& f); +LIBPROTOBUF_EXPORT string StrCat(const AlphaNum& a, const AlphaNum& b, + const AlphaNum& c, const AlphaNum& d, + const AlphaNum& e, const AlphaNum& f, + const AlphaNum& g); +LIBPROTOBUF_EXPORT string StrCat(const AlphaNum& a, const AlphaNum& b, + const AlphaNum& c, const AlphaNum& d, + const AlphaNum& e, const AlphaNum& f, + const AlphaNum& g, const AlphaNum& h); +LIBPROTOBUF_EXPORT string StrCat(const AlphaNum& a, const AlphaNum& b, + const AlphaNum& c, const AlphaNum& d, + const AlphaNum& e, const AlphaNum& f, + const AlphaNum& g, const AlphaNum& h, + const AlphaNum& i); + +inline string StrCat(const AlphaNum& a) { return string(a.data(), a.size()); } + +// ---------------------------------------------------------------------- +// StrAppend() +// Same as above, but adds the output to the given string. +// WARNING: For speed, StrAppend does not try to check each of its input +// arguments to be sure that they are not a subset of the string being +// appended to. That is, while this will work: +// +// string s = "foo"; +// s += s; +// +// This will not (necessarily) work: +// +// string s = "foo"; +// StrAppend(&s, s); +// +// Note: while StrCat supports appending up to 9 arguments, StrAppend +// is currently limited to 4. That's rarely an issue except when +// automatically transforming StrCat to StrAppend, and can easily be +// worked around as consecutive calls to StrAppend are quite efficient. +// ---------------------------------------------------------------------- + +LIBPROTOBUF_EXPORT void StrAppend(string* dest, const AlphaNum& a); +LIBPROTOBUF_EXPORT void StrAppend(string* dest, const AlphaNum& a, + const AlphaNum& b); +LIBPROTOBUF_EXPORT void StrAppend(string* dest, const AlphaNum& a, + const AlphaNum& b, const AlphaNum& c); +LIBPROTOBUF_EXPORT void StrAppend(string* dest, const AlphaNum& a, + const AlphaNum& b, const AlphaNum& c, + const AlphaNum& d); + +// ---------------------------------------------------------------------- +// Join() +// These methods concatenate a range of components into a C++ string, using +// the C-string "delim" as a separator between components. +// ---------------------------------------------------------------------- +template +void Join(Iterator start, Iterator end, + const char* delim, string* result) { + for (Iterator it = start; it != end; ++it) { + if (it != start) { + result->append(delim); + } + StrAppend(result, *it); + } +} + +template +string Join(const Range& components, + const char* delim) { + string result; + Join(components.begin(), components.end(), delim, &result); + return result; +} + +// ---------------------------------------------------------------------- +// ToHex() +// Return a lower-case hex string representation of the given integer. +// ---------------------------------------------------------------------- +LIBPROTOBUF_EXPORT string ToHex(uint64 num); + +// ---------------------------------------------------------------------- +// GlobalReplaceSubstring() +// Replaces all instances of a substring in a string. Does nothing +// if 'substring' is empty. Returns the number of replacements. +// +// NOTE: The string pieces must not overlap s. +// ---------------------------------------------------------------------- +LIBPROTOBUF_EXPORT int GlobalReplaceSubstring(const string& substring, + const string& replacement, + string* s); + +// ---------------------------------------------------------------------- +// Base64Unescape() +// Converts "src" which is encoded in Base64 to its binary equivalent and +// writes it to "dest". If src contains invalid characters, dest is cleared +// and the function returns false. Returns true on success. +// ---------------------------------------------------------------------- +LIBPROTOBUF_EXPORT bool Base64Unescape(StringPiece src, string* dest); + +// ---------------------------------------------------------------------- +// WebSafeBase64Unescape() +// This is a variation of Base64Unescape which uses '-' instead of '+', and +// '_' instead of '/'. src is not null terminated, instead specify len. I +// recommend that slen + +namespace google { +namespace protobuf { +namespace internal { + +struct DateTime { + int year; + int month; + int day; + int hour; + int minute; + int second; +}; + +// Converts a timestamp (seconds elapsed since 1970-01-01T00:00:00, could be +// negative to represent time before 1970-01-01) to DateTime. Returns false +// if the timestamp is not in the range between 0001-01-01T00:00:00 and +// 9999-12-31T23:59:59. +bool LIBPROTOBUF_EXPORT SecondsToDateTime(int64 seconds, DateTime* time); +// Converts DateTime to a timestamp (seconds since 1970-01-01T00:00:00). +// Returns false if the DateTime is not valid or is not in the valid range. +bool LIBPROTOBUF_EXPORT DateTimeToSeconds(const DateTime& time, int64* seconds); + +void LIBPROTOBUF_EXPORT GetCurrentTime(int64* seconds, int32* nanos); + +// Formats a time string in RFC3339 fromat. +// +// For example, "2015-05-20T13:29:35.120Z". For nanos, 0, 3, 6 or 9 fractional +// digits will be used depending on how many are required to represent the exact +// value. +// +// Note that "nanos" must in the range of [0, 999999999]. +string LIBPROTOBUF_EXPORT FormatTime(int64 seconds, int32 nanos); +// Parses a time string. This method accepts RFC3339 date/time string with UTC +// offset. For example, "2015-05-20T13:29:35.120-08:00". +bool LIBPROTOBUF_EXPORT ParseTime(const string& value, int64* seconds, int32* nanos); + +} // namespace internal +} // namespace protobuf +} // namespace google + +#endif // GOOGLE_PROTOBUF_STUBS_TIME_H_ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/wire_format_lite.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/wire_format_lite.h new file mode 100644 index 0000000000000000000000000000000000000000..77eaa9a68499e1f3ca44c04b95b3e96f9500b440 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/wire_format_lite.h @@ -0,0 +1,893 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// atenasio@google.com (Chris Atenasio) (ZigZag transform) +// wink@google.com (Wink Saville) (refactored from wire_format.h) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// This header is logically internal, but is made public because it is used +// from protocol-compiler-generated code, which may reside in other components. + +#ifndef GOOGLE_PROTOBUF_WIRE_FORMAT_LITE_H__ +#define GOOGLE_PROTOBUF_WIRE_FORMAT_LITE_H__ + +#include + +#include +#include +#include +#include +#include + +// Do UTF-8 validation on string type in Debug build only +#ifndef NDEBUG +#define GOOGLE_PROTOBUF_UTF8_VALIDATION_ENABLED +#endif + +// Avoid conflict with iOS where #defines TYPE_BOOL. +// +// If some one needs the macro TYPE_BOOL in a file that includes this header, it's +// possible to bring it back using push/pop_macro as follows. +// +// #pragma push_macro("TYPE_BOOL") +// #include this header and/or all headers that need the macro to be undefined. +// #pragma pop_macro("TYPE_BOOL") +#undef TYPE_BOOL + +namespace google { + +namespace protobuf { + template class RepeatedField; // repeated_field.h +} + +namespace protobuf { +namespace internal { + +class StringPieceField; + +// This class is for internal use by the protocol buffer library and by +// protocol-complier-generated message classes. It must not be called +// directly by clients. +// +// This class contains helpers for implementing the binary protocol buffer +// wire format without the need for reflection. Use WireFormat when using +// reflection. +// +// This class is really a namespace that contains only static methods. +class LIBPROTOBUF_EXPORT WireFormatLite { + public: + + // ----------------------------------------------------------------- + // Helper constants and functions related to the format. These are + // mostly meant for internal and generated code to use. + + // The wire format is composed of a sequence of tag/value pairs, each + // of which contains the value of one field (or one element of a repeated + // field). Each tag is encoded as a varint. The lower bits of the tag + // identify its wire type, which specifies the format of the data to follow. + // The rest of the bits contain the field number. Each type of field (as + // declared by FieldDescriptor::Type, in descriptor.h) maps to one of + // these wire types. Immediately following each tag is the field's value, + // encoded in the format specified by the wire type. Because the tag + // identifies the encoding of this data, it is possible to skip + // unrecognized fields for forwards compatibility. + + enum WireType { + WIRETYPE_VARINT = 0, + WIRETYPE_FIXED64 = 1, + WIRETYPE_LENGTH_DELIMITED = 2, + WIRETYPE_START_GROUP = 3, + WIRETYPE_END_GROUP = 4, + WIRETYPE_FIXED32 = 5, + }; + + // Lite alternative to FieldDescriptor::Type. Must be kept in sync. + enum FieldType { + TYPE_DOUBLE = 1, + TYPE_FLOAT = 2, + TYPE_INT64 = 3, + TYPE_UINT64 = 4, + TYPE_INT32 = 5, + TYPE_FIXED64 = 6, + TYPE_FIXED32 = 7, + TYPE_BOOL = 8, + TYPE_STRING = 9, + TYPE_GROUP = 10, + TYPE_MESSAGE = 11, + TYPE_BYTES = 12, + TYPE_UINT32 = 13, + TYPE_ENUM = 14, + TYPE_SFIXED32 = 15, + TYPE_SFIXED64 = 16, + TYPE_SINT32 = 17, + TYPE_SINT64 = 18, + MAX_FIELD_TYPE = 18, + }; + + // Lite alternative to FieldDescriptor::CppType. Must be kept in sync. + enum CppType { + CPPTYPE_INT32 = 1, + CPPTYPE_INT64 = 2, + CPPTYPE_UINT32 = 3, + CPPTYPE_UINT64 = 4, + CPPTYPE_DOUBLE = 5, + CPPTYPE_FLOAT = 6, + CPPTYPE_BOOL = 7, + CPPTYPE_ENUM = 8, + CPPTYPE_STRING = 9, + CPPTYPE_MESSAGE = 10, + MAX_CPPTYPE = 10, + }; + + // Helper method to get the CppType for a particular Type. + static CppType FieldTypeToCppType(FieldType type); + + // Given a FieldDescriptor::Type return its WireType + static inline WireFormatLite::WireType WireTypeForFieldType( + WireFormatLite::FieldType type) { + return kWireTypeForFieldType[type]; + } + + // Number of bits in a tag which identify the wire type. + static const int kTagTypeBits = 3; + // Mask for those bits. + static const uint32 kTagTypeMask = (1 << kTagTypeBits) - 1; + + // Helper functions for encoding and decoding tags. (Inlined below and in + // _inl.h) + // + // This is different from MakeTag(field->number(), field->type()) in the case + // of packed repeated fields. + static uint32 MakeTag(int field_number, WireType type); + static WireType GetTagWireType(uint32 tag); + static int GetTagFieldNumber(uint32 tag); + + // Compute the byte size of a tag. For groups, this includes both the start + // and end tags. + static inline size_t TagSize(int field_number, + WireFormatLite::FieldType type); + + // Skips a field value with the given tag. The input should start + // positioned immediately after the tag. Skipped values are simply discarded, + // not recorded anywhere. See WireFormat::SkipField() for a version that + // records to an UnknownFieldSet. + static bool SkipField(io::CodedInputStream* input, uint32 tag); + + // Skips a field value with the given tag. The input should start + // positioned immediately after the tag. Skipped values are recorded to a + // CodedOutputStream. + static bool SkipField(io::CodedInputStream* input, uint32 tag, + io::CodedOutputStream* output); + + // Reads and ignores a message from the input. Skipped values are simply + // discarded, not recorded anywhere. See WireFormat::SkipMessage() for a + // version that records to an UnknownFieldSet. + static bool SkipMessage(io::CodedInputStream* input); + + // Reads and ignores a message from the input. Skipped values are recorded + // to a CodedOutputStream. + static bool SkipMessage(io::CodedInputStream* input, + io::CodedOutputStream* output); + +// This macro does the same thing as WireFormatLite::MakeTag(), but the +// result is usable as a compile-time constant, which makes it usable +// as a switch case or a template input. WireFormatLite::MakeTag() is more +// type-safe, though, so prefer it if possible. +#define GOOGLE_PROTOBUF_WIRE_FORMAT_MAKE_TAG(FIELD_NUMBER, TYPE) \ + static_cast( \ + (static_cast(FIELD_NUMBER) << ::google::protobuf::internal::WireFormatLite::kTagTypeBits) \ + | (TYPE)) + + // These are the tags for the old MessageSet format, which was defined as: + // message MessageSet { + // repeated group Item = 1 { + // required int32 type_id = 2; + // required string message = 3; + // } + // } + static const int kMessageSetItemNumber = 1; + static const int kMessageSetTypeIdNumber = 2; + static const int kMessageSetMessageNumber = 3; + static const int kMessageSetItemStartTag = + GOOGLE_PROTOBUF_WIRE_FORMAT_MAKE_TAG(kMessageSetItemNumber, + WireFormatLite::WIRETYPE_START_GROUP); + static const int kMessageSetItemEndTag = + GOOGLE_PROTOBUF_WIRE_FORMAT_MAKE_TAG(kMessageSetItemNumber, + WireFormatLite::WIRETYPE_END_GROUP); + static const int kMessageSetTypeIdTag = + GOOGLE_PROTOBUF_WIRE_FORMAT_MAKE_TAG(kMessageSetTypeIdNumber, + WireFormatLite::WIRETYPE_VARINT); + static const int kMessageSetMessageTag = + GOOGLE_PROTOBUF_WIRE_FORMAT_MAKE_TAG(kMessageSetMessageNumber, + WireFormatLite::WIRETYPE_LENGTH_DELIMITED); + + // Byte size of all tags of a MessageSet::Item combined. + static const size_t kMessageSetItemTagsSize; + + // Helper functions for converting between floats/doubles and IEEE-754 + // uint32s/uint64s so that they can be written. (Assumes your platform + // uses IEEE-754 floats.) + static uint32 EncodeFloat(float value); + static float DecodeFloat(uint32 value); + static uint64 EncodeDouble(double value); + static double DecodeDouble(uint64 value); + + // Helper functions for mapping signed integers to unsigned integers in + // such a way that numbers with small magnitudes will encode to smaller + // varints. If you simply static_cast a negative number to an unsigned + // number and varint-encode it, it will always take 10 bytes, defeating + // the purpose of varint. So, for the "sint32" and "sint64" field types, + // we ZigZag-encode the values. + static uint32 ZigZagEncode32(int32 n); + static int32 ZigZagDecode32(uint32 n); + static uint64 ZigZagEncode64(int64 n); + static int64 ZigZagDecode64(uint64 n); + + // ================================================================= + // Methods for reading/writing individual field. The implementations + // of these methods are defined in wire_format_lite_inl.h; you must #include + // that file to use these. + +#ifdef NDEBUG +#define INL GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE +#else +// Avoid excessive inlining in non-optimized builds. Without other optimizations +// the inlining is not going to provide benefits anyway and the huge resulting +// functions, especially in the proto-generated serialization functions, produce +// stack frames so large that many tests run into stack overflows (b/32192897). +#define INL +#endif + + // Read fields, not including tags. The assumption is that you already + // read the tag to determine what field to read. + + // For primitive fields, we just use a templatized routine parameterized by + // the represented type and the FieldType. These are specialized with the + // appropriate definition for each declared type. + template + INL static bool ReadPrimitive(io::CodedInputStream* input, CType* value); + + // Reads repeated primitive values, with optimizations for repeats. + // tag_size and tag should both be compile-time constants provided by the + // protocol compiler. + template + INL static bool ReadRepeatedPrimitive(int tag_size, uint32 tag, + io::CodedInputStream* input, + RepeatedField* value); + + // Identical to ReadRepeatedPrimitive, except will not inline the + // implementation. + template + static bool ReadRepeatedPrimitiveNoInline(int tag_size, uint32 tag, + io::CodedInputStream* input, + RepeatedField* value); + + // Reads a primitive value directly from the provided buffer. It returns a + // pointer past the segment of data that was read. + // + // This is only implemented for the types with fixed wire size, e.g. + // float, double, and the (s)fixed* types. + template INL + static const uint8* ReadPrimitiveFromArray(const uint8* buffer, CType* value); + + // Reads a primitive packed field. + // + // This is only implemented for packable types. + template + INL static bool ReadPackedPrimitive(io::CodedInputStream* input, + RepeatedField* value); + + // Identical to ReadPackedPrimitive, except will not inline the + // implementation. + template + static bool ReadPackedPrimitiveNoInline(io::CodedInputStream* input, + RepeatedField* value); + + // Read a packed enum field. If the is_valid function is not NULL, values for + // which is_valid(value) returns false are silently dropped. + static bool ReadPackedEnumNoInline(io::CodedInputStream* input, + bool (*is_valid)(int), + RepeatedField* values); + + // Read a packed enum field. If the is_valid function is not NULL, values for + // which is_valid(value) returns false are appended to unknown_fields_stream. + static bool ReadPackedEnumPreserveUnknowns( + io::CodedInputStream* input, int field_number, bool (*is_valid)(int), + io::CodedOutputStream* unknown_fields_stream, RepeatedField* values); + + // Read a string. ReadString(..., string* value) requires an existing string. + static inline bool ReadString(io::CodedInputStream* input, string* value); + // ReadString(..., string** p) is internal-only, and should only be called + // from generated code. It starts by setting *p to "new string" + // if *p == &GetEmptyStringAlreadyInited(). It then invokes + // ReadString(io::CodedInputStream* input, *p). This is useful for reducing + // code size. + static inline bool ReadString(io::CodedInputStream* input, string** p); + // Analogous to ReadString(). + static bool ReadBytes(io::CodedInputStream* input, string* value); + static bool ReadBytes(io::CodedInputStream* input, string** p); + + enum Operation { + PARSE = 0, + SERIALIZE = 1, + }; + + // Returns true if the data is valid UTF-8. + static bool VerifyUtf8String(const char* data, int size, + Operation op, + const char* field_name); + + template + static inline bool ReadGroup(int field_number, io::CodedInputStream* input, + MessageType* value); + + template + static inline bool ReadMessage(io::CodedInputStream* input, + MessageType* value); + + // Do not use. + template + static inline bool ReadGroupNoVirtual(int field_number, + io::CodedInputStream* input, + MessageType* value) { + return ReadGroup(field_number, input, value); + } + + template + static inline bool ReadMessageNoVirtual(io::CodedInputStream* input, + MessageType* value) { + return ReadMessage(input, value); + } + + // Write a tag. The Write*() functions typically include the tag, so + // normally there's no need to call this unless using the Write*NoTag() + // variants. + INL static void WriteTag(int field_number, WireType type, + io::CodedOutputStream* output); + + // Write fields, without tags. + INL static void WriteInt32NoTag(int32 value, io::CodedOutputStream* output); + INL static void WriteInt64NoTag(int64 value, io::CodedOutputStream* output); + INL static void WriteUInt32NoTag(uint32 value, io::CodedOutputStream* output); + INL static void WriteUInt64NoTag(uint64 value, io::CodedOutputStream* output); + INL static void WriteSInt32NoTag(int32 value, io::CodedOutputStream* output); + INL static void WriteSInt64NoTag(int64 value, io::CodedOutputStream* output); + INL static void WriteFixed32NoTag(uint32 value, + io::CodedOutputStream* output); + INL static void WriteFixed64NoTag(uint64 value, + io::CodedOutputStream* output); + INL static void WriteSFixed32NoTag(int32 value, + io::CodedOutputStream* output); + INL static void WriteSFixed64NoTag(int64 value, + io::CodedOutputStream* output); + INL static void WriteFloatNoTag(float value, io::CodedOutputStream* output); + INL static void WriteDoubleNoTag(double value, io::CodedOutputStream* output); + INL static void WriteBoolNoTag(bool value, io::CodedOutputStream* output); + INL static void WriteEnumNoTag(int value, io::CodedOutputStream* output); + + // Write array of primitive fields, without tags + static void WriteFloatArray(const float* a, int n, + io::CodedOutputStream* output); + static void WriteDoubleArray(const double* a, int n, + io::CodedOutputStream* output); + static void WriteFixed32Array(const uint32* a, int n, + io::CodedOutputStream* output); + static void WriteFixed64Array(const uint64* a, int n, + io::CodedOutputStream* output); + static void WriteSFixed32Array(const int32* a, int n, + io::CodedOutputStream* output); + static void WriteSFixed64Array(const int64* a, int n, + io::CodedOutputStream* output); + static void WriteBoolArray(const bool* a, int n, + io::CodedOutputStream* output); + + // Write fields, including tags. + static void WriteInt32(int field_number, int32 value, + io::CodedOutputStream* output); + static void WriteInt64(int field_number, int64 value, + io::CodedOutputStream* output); + static void WriteUInt32(int field_number, uint32 value, + io::CodedOutputStream* output); + static void WriteUInt64(int field_number, uint64 value, + io::CodedOutputStream* output); + static void WriteSInt32(int field_number, int32 value, + io::CodedOutputStream* output); + static void WriteSInt64(int field_number, int64 value, + io::CodedOutputStream* output); + static void WriteFixed32(int field_number, uint32 value, + io::CodedOutputStream* output); + static void WriteFixed64(int field_number, uint64 value, + io::CodedOutputStream* output); + static void WriteSFixed32(int field_number, int32 value, + io::CodedOutputStream* output); + static void WriteSFixed64(int field_number, int64 value, + io::CodedOutputStream* output); + static void WriteFloat(int field_number, float value, + io::CodedOutputStream* output); + static void WriteDouble(int field_number, double value, + io::CodedOutputStream* output); + static void WriteBool(int field_number, bool value, + io::CodedOutputStream* output); + static void WriteEnum(int field_number, int value, + io::CodedOutputStream* output); + + static void WriteString(int field_number, const string& value, + io::CodedOutputStream* output); + static void WriteBytes(int field_number, const string& value, + io::CodedOutputStream* output); + static void WriteStringMaybeAliased(int field_number, const string& value, + io::CodedOutputStream* output); + static void WriteBytesMaybeAliased(int field_number, const string& value, + io::CodedOutputStream* output); + + static void WriteGroup(int field_number, const MessageLite& value, + io::CodedOutputStream* output); + static void WriteMessage(int field_number, const MessageLite& value, + io::CodedOutputStream* output); + // Like above, but these will check if the output stream has enough + // space to write directly to a flat array. + static void WriteGroupMaybeToArray(int field_number, const MessageLite& value, + io::CodedOutputStream* output); + static void WriteMessageMaybeToArray(int field_number, + const MessageLite& value, + io::CodedOutputStream* output); + + // Like above, but de-virtualize the call to SerializeWithCachedSizes(). The + // pointer must point at an instance of MessageType, *not* a subclass (or + // the subclass must not override SerializeWithCachedSizes()). + template + static inline void WriteGroupNoVirtual(int field_number, + const MessageType& value, + io::CodedOutputStream* output); + template + static inline void WriteMessageNoVirtual(int field_number, + const MessageType& value, + io::CodedOutputStream* output); + + // Like above, but use only *ToArray methods of CodedOutputStream. + INL static uint8* WriteTagToArray(int field_number, WireType type, + uint8* target); + + // Write fields, without tags. + INL static uint8* WriteInt32NoTagToArray(int32 value, uint8* target); + INL static uint8* WriteInt64NoTagToArray(int64 value, uint8* target); + INL static uint8* WriteUInt32NoTagToArray(uint32 value, uint8* target); + INL static uint8* WriteUInt64NoTagToArray(uint64 value, uint8* target); + INL static uint8* WriteSInt32NoTagToArray(int32 value, uint8* target); + INL static uint8* WriteSInt64NoTagToArray(int64 value, uint8* target); + INL static uint8* WriteFixed32NoTagToArray(uint32 value, uint8* target); + INL static uint8* WriteFixed64NoTagToArray(uint64 value, uint8* target); + INL static uint8* WriteSFixed32NoTagToArray(int32 value, uint8* target); + INL static uint8* WriteSFixed64NoTagToArray(int64 value, uint8* target); + INL static uint8* WriteFloatNoTagToArray(float value, uint8* target); + INL static uint8* WriteDoubleNoTagToArray(double value, uint8* target); + INL static uint8* WriteBoolNoTagToArray(bool value, uint8* target); + INL static uint8* WriteEnumNoTagToArray(int value, uint8* target); + + // Write fields, without tags. These require that value.size() > 0. + template + INL static uint8* WritePrimitiveNoTagToArray( + const RepeatedField& value, + uint8* (*Writer)(T, uint8*), uint8* target); + template + INL static uint8* WriteFixedNoTagToArray( + const RepeatedField& value, + uint8* (*Writer)(T, uint8*), uint8* target); + + INL static uint8* WriteInt32NoTagToArray( + const RepeatedField< int32>& value, uint8* output); + INL static uint8* WriteInt64NoTagToArray( + const RepeatedField< int64>& value, uint8* output); + INL static uint8* WriteUInt32NoTagToArray( + const RepeatedField& value, uint8* output); + INL static uint8* WriteUInt64NoTagToArray( + const RepeatedField& value, uint8* output); + INL static uint8* WriteSInt32NoTagToArray( + const RepeatedField< int32>& value, uint8* output); + INL static uint8* WriteSInt64NoTagToArray( + const RepeatedField< int64>& value, uint8* output); + INL static uint8* WriteFixed32NoTagToArray( + const RepeatedField& value, uint8* output); + INL static uint8* WriteFixed64NoTagToArray( + const RepeatedField& value, uint8* output); + INL static uint8* WriteSFixed32NoTagToArray( + const RepeatedField< int32>& value, uint8* output); + INL static uint8* WriteSFixed64NoTagToArray( + const RepeatedField< int64>& value, uint8* output); + INL static uint8* WriteFloatNoTagToArray( + const RepeatedField< float>& value, uint8* output); + INL static uint8* WriteDoubleNoTagToArray( + const RepeatedField& value, uint8* output); + INL static uint8* WriteBoolNoTagToArray( + const RepeatedField< bool>& value, uint8* output); + INL static uint8* WriteEnumNoTagToArray( + const RepeatedField< int>& value, uint8* output); + + // Write fields, including tags. + INL static uint8* WriteInt32ToArray(int field_number, int32 value, + uint8* target); + INL static uint8* WriteInt64ToArray(int field_number, int64 value, + uint8* target); + INL static uint8* WriteUInt32ToArray(int field_number, uint32 value, + uint8* target); + INL static uint8* WriteUInt64ToArray(int field_number, uint64 value, + uint8* target); + INL static uint8* WriteSInt32ToArray(int field_number, int32 value, + uint8* target); + INL static uint8* WriteSInt64ToArray(int field_number, int64 value, + uint8* target); + INL static uint8* WriteFixed32ToArray(int field_number, uint32 value, + uint8* target); + INL static uint8* WriteFixed64ToArray(int field_number, uint64 value, + uint8* target); + INL static uint8* WriteSFixed32ToArray(int field_number, int32 value, + uint8* target); + INL static uint8* WriteSFixed64ToArray(int field_number, int64 value, + uint8* target); + INL static uint8* WriteFloatToArray(int field_number, float value, + uint8* target); + INL static uint8* WriteDoubleToArray(int field_number, double value, + uint8* target); + INL static uint8* WriteBoolToArray(int field_number, bool value, + uint8* target); + INL static uint8* WriteEnumToArray(int field_number, int value, + uint8* target); + + template + INL static uint8* WritePrimitiveToArray( + int field_number, + const RepeatedField& value, + uint8* (*Writer)(int, T, uint8*), uint8* target); + + INL static uint8* WriteInt32ToArray( + int field_number, const RepeatedField< int32>& value, uint8* output); + INL static uint8* WriteInt64ToArray( + int field_number, const RepeatedField< int64>& value, uint8* output); + INL static uint8* WriteUInt32ToArray( + int field_number, const RepeatedField& value, uint8* output); + INL static uint8* WriteUInt64ToArray( + int field_number, const RepeatedField& value, uint8* output); + INL static uint8* WriteSInt32ToArray( + int field_number, const RepeatedField< int32>& value, uint8* output); + INL static uint8* WriteSInt64ToArray( + int field_number, const RepeatedField< int64>& value, uint8* output); + INL static uint8* WriteFixed32ToArray( + int field_number, const RepeatedField& value, uint8* output); + INL static uint8* WriteFixed64ToArray( + int field_number, const RepeatedField& value, uint8* output); + INL static uint8* WriteSFixed32ToArray( + int field_number, const RepeatedField< int32>& value, uint8* output); + INL static uint8* WriteSFixed64ToArray( + int field_number, const RepeatedField< int64>& value, uint8* output); + INL static uint8* WriteFloatToArray( + int field_number, const RepeatedField< float>& value, uint8* output); + INL static uint8* WriteDoubleToArray( + int field_number, const RepeatedField& value, uint8* output); + INL static uint8* WriteBoolToArray( + int field_number, const RepeatedField< bool>& value, uint8* output); + INL static uint8* WriteEnumToArray( + int field_number, const RepeatedField< int>& value, uint8* output); + + INL static uint8* WriteStringToArray(int field_number, const string& value, + uint8* target); + INL static uint8* WriteBytesToArray(int field_number, const string& value, + uint8* target); + + // Whether to serialize deterministically (e.g., map keys are + // sorted) is a property of a CodedOutputStream, and in the process + // of serialization, the "ToArray" variants may be invoked. But they don't + // have a CodedOutputStream available, so they get an additional parameter + // telling them whether to serialize deterministically. + template + INL static uint8* InternalWriteGroupToArray(int field_number, + const MessageType& value, + bool deterministic, + uint8* target); + template + INL static uint8* InternalWriteMessageToArray(int field_number, + const MessageType& value, + bool deterministic, + uint8* target); + + // Like above, but de-virtualize the call to SerializeWithCachedSizes(). The + // pointer must point at an instance of MessageType, *not* a subclass (or + // the subclass must not override SerializeWithCachedSizes()). + template + INL static uint8* InternalWriteGroupNoVirtualToArray(int field_number, + const MessageType& value, + bool deterministic, + uint8* target); + template + INL static uint8* InternalWriteMessageNoVirtualToArray( + int field_number, const MessageType& value, bool deterministic, + uint8* target); + + // For backward-compatibility, the last four methods also have versions + // that are non-deterministic always. + INL static uint8* WriteGroupToArray(int field_number, + const MessageLite& value, uint8* target) { + return InternalWriteGroupToArray(field_number, value, false, target); + } + INL static uint8* WriteMessageToArray(int field_number, + const MessageLite& value, + uint8* target) { + return InternalWriteMessageToArray(field_number, value, false, target); + } + template + INL static uint8* WriteGroupNoVirtualToArray(int field_number, + const MessageType& value, + uint8* target) { + return InternalWriteGroupNoVirtualToArray(field_number, value, false, + target); + } + template + INL static uint8* WriteMessageNoVirtualToArray(int field_number, + const MessageType& value, + uint8* target) { + return InternalWriteMessageNoVirtualToArray(field_number, value, false, + target); + } + +#undef INL + + // Compute the byte size of a field. The XxSize() functions do NOT include + // the tag, so you must also call TagSize(). (This is because, for repeated + // fields, you should only call TagSize() once and multiply it by the element + // count, but you may have to call XxSize() for each individual element.) + static inline size_t Int32Size ( int32 value); + static inline size_t Int64Size ( int64 value); + static inline size_t UInt32Size (uint32 value); + static inline size_t UInt64Size (uint64 value); + static inline size_t SInt32Size ( int32 value); + static inline size_t SInt64Size ( int64 value); + static inline size_t EnumSize ( int value); + + static size_t Int32Size (const RepeatedField< int32>& value); + static size_t Int64Size (const RepeatedField< int64>& value); + static size_t UInt32Size(const RepeatedField& value); + static size_t UInt64Size(const RepeatedField& value); + static size_t SInt32Size(const RepeatedField< int32>& value); + static size_t SInt64Size(const RepeatedField< int64>& value); + static size_t EnumSize (const RepeatedField< int>& value); + + // These types always have the same size. + static const size_t kFixed32Size = 4; + static const size_t kFixed64Size = 8; + static const size_t kSFixed32Size = 4; + static const size_t kSFixed64Size = 8; + static const size_t kFloatSize = 4; + static const size_t kDoubleSize = 8; + static const size_t kBoolSize = 1; + + static inline size_t StringSize(const string& value); + static inline size_t BytesSize (const string& value); + + template + static inline size_t GroupSize (const MessageType& value); + template + static inline size_t MessageSize(const MessageType& value); + + // Like above, but de-virtualize the call to ByteSize(). The + // pointer must point at an instance of MessageType, *not* a subclass (or + // the subclass must not override ByteSize()). + template + static inline size_t GroupSizeNoVirtual (const MessageType& value); + template + static inline size_t MessageSizeNoVirtual(const MessageType& value); + + // Given the length of data, calculate the byte size of the data on the + // wire if we encode the data as a length delimited field. + static inline size_t LengthDelimitedSize(size_t length); + + private: + // A helper method for the repeated primitive reader. This method has + // optimizations for primitive types that have fixed size on the wire, and + // can be read using potentially faster paths. + template + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE + static bool ReadRepeatedFixedSizePrimitive( + int tag_size, + uint32 tag, + google::protobuf::io::CodedInputStream* input, + RepeatedField* value); + + // Like ReadRepeatedFixedSizePrimitive but for packed primitive fields. + template + GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE + static bool ReadPackedFixedSizePrimitive( + google::protobuf::io::CodedInputStream* input, RepeatedField* value); + + static const CppType kFieldTypeToCppTypeMap[]; + static const WireFormatLite::WireType kWireTypeForFieldType[]; + + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(WireFormatLite); +}; + +// A class which deals with unknown values. The default implementation just +// discards them. WireFormat defines a subclass which writes to an +// UnknownFieldSet. This class is used by ExtensionSet::ParseField(), since +// ExtensionSet is part of the lite library but UnknownFieldSet is not. +class LIBPROTOBUF_EXPORT FieldSkipper { + public: + FieldSkipper() {} + virtual ~FieldSkipper() {} + + // Skip a field whose tag has already been consumed. + virtual bool SkipField(io::CodedInputStream* input, uint32 tag); + + // Skip an entire message or group, up to an end-group tag (which is consumed) + // or end-of-stream. + virtual bool SkipMessage(io::CodedInputStream* input); + + // Deal with an already-parsed unrecognized enum value. The default + // implementation does nothing, but the UnknownFieldSet-based implementation + // saves it as an unknown varint. + virtual void SkipUnknownEnum(int field_number, int value); +}; + +// Subclass of FieldSkipper which saves skipped fields to a CodedOutputStream. + +class LIBPROTOBUF_EXPORT CodedOutputStreamFieldSkipper : public FieldSkipper { + public: + explicit CodedOutputStreamFieldSkipper(io::CodedOutputStream* unknown_fields) + : unknown_fields_(unknown_fields) {} + virtual ~CodedOutputStreamFieldSkipper() {} + + // implements FieldSkipper ----------------------------------------- + virtual bool SkipField(io::CodedInputStream* input, uint32 tag); + virtual bool SkipMessage(io::CodedInputStream* input); + virtual void SkipUnknownEnum(int field_number, int value); + + protected: + io::CodedOutputStream* unknown_fields_; +}; + + +// inline methods ==================================================== + +inline WireFormatLite::CppType +WireFormatLite::FieldTypeToCppType(FieldType type) { + return kFieldTypeToCppTypeMap[type]; +} + +inline uint32 WireFormatLite::MakeTag(int field_number, WireType type) { + return GOOGLE_PROTOBUF_WIRE_FORMAT_MAKE_TAG(field_number, type); +} + +inline WireFormatLite::WireType WireFormatLite::GetTagWireType(uint32 tag) { + return static_cast(tag & kTagTypeMask); +} + +inline int WireFormatLite::GetTagFieldNumber(uint32 tag) { + return static_cast(tag >> kTagTypeBits); +} + +inline size_t WireFormatLite::TagSize(int field_number, + WireFormatLite::FieldType type) { + size_t result = io::CodedOutputStream::VarintSize32( + static_cast(field_number << kTagTypeBits)); + if (type == TYPE_GROUP) { + // Groups have both a start and an end tag. + return result * 2; + } else { + return result; + } +} + +inline uint32 WireFormatLite::EncodeFloat(float value) { + union {float f; uint32 i;}; + f = value; + return i; +} + +inline float WireFormatLite::DecodeFloat(uint32 value) { + union {float f; uint32 i;}; + i = value; + return f; +} + +inline uint64 WireFormatLite::EncodeDouble(double value) { + union {double f; uint64 i;}; + f = value; + return i; +} + +inline double WireFormatLite::DecodeDouble(uint64 value) { + union {double f; uint64 i;}; + i = value; + return f; +} + +// ZigZag Transform: Encodes signed integers so that they can be +// effectively used with varint encoding. +// +// varint operates on unsigned integers, encoding smaller numbers into +// fewer bytes. If you try to use it on a signed integer, it will treat +// this number as a very large unsigned integer, which means that even +// small signed numbers like -1 will take the maximum number of bytes +// (10) to encode. ZigZagEncode() maps signed integers to unsigned +// in such a way that those with a small absolute value will have smaller +// encoded values, making them appropriate for encoding using varint. +// +// int32 -> uint32 +// ------------------------- +// 0 -> 0 +// -1 -> 1 +// 1 -> 2 +// -2 -> 3 +// ... -> ... +// 2147483647 -> 4294967294 +// -2147483648 -> 4294967295 +// +// >> encode >> +// << decode << + +inline uint32 WireFormatLite::ZigZagEncode32(int32 n) { + // Note: the right-shift must be arithmetic + // Note: left shift must be unsigned because of overflow + return (static_cast(n) << 1) ^ static_cast(n >> 31); +} + +inline int32 WireFormatLite::ZigZagDecode32(uint32 n) { + // Note: Using unsigned types prevent undefined behavior + return static_cast((n >> 1) ^ (~(n & 1) + 1)); +} + +inline uint64 WireFormatLite::ZigZagEncode64(int64 n) { + // Note: the right-shift must be arithmetic + // Note: left shift must be unsigned because of overflow + return (static_cast(n) << 1) ^ static_cast(n >> 63); +} + +inline int64 WireFormatLite::ZigZagDecode64(uint64 n) { + // Note: Using unsigned types prevent undefined behavior + return static_cast((n >> 1) ^ (~(n & 1) + 1)); +} + +// String is for UTF-8 text only, but, even so, ReadString() can simply +// call ReadBytes(). + +inline bool WireFormatLite::ReadString(io::CodedInputStream* input, + string* value) { + return ReadBytes(input, value); +} + +inline bool WireFormatLite::ReadString(io::CodedInputStream* input, + string** p) { + return ReadBytes(input, p); +} + +} // namespace internal +} // namespace protobuf + +} // namespace google +#endif // GOOGLE_PROTOBUF_WIRE_FORMAT_LITE_H__ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/wire_format_lite_inl.h b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/wire_format_lite_inl.h new file mode 100644 index 0000000000000000000000000000000000000000..6cd2c2fbe2858a04d7dd8afb93cc58e6107ef946 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/wire_format_lite_inl.h @@ -0,0 +1,996 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// wink@google.com (Wink Saville) (refactored from wire_format.h) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. + +#ifndef GOOGLE_PROTOBUF_WIRE_FORMAT_LITE_INL_H__ +#define GOOGLE_PROTOBUF_WIRE_FORMAT_LITE_INL_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace google { +namespace protobuf { +namespace internal { + +// Implementation details of ReadPrimitive. + +template <> +inline bool WireFormatLite::ReadPrimitive( + io::CodedInputStream* input, + int32* value) { + uint32 temp; + if (!input->ReadVarint32(&temp)) return false; + *value = static_cast(temp); + return true; +} +template <> +inline bool WireFormatLite::ReadPrimitive( + io::CodedInputStream* input, + int64* value) { + uint64 temp; + if (!input->ReadVarint64(&temp)) return false; + *value = static_cast(temp); + return true; +} +template <> +inline bool WireFormatLite::ReadPrimitive( + io::CodedInputStream* input, + uint32* value) { + return input->ReadVarint32(value); +} +template <> +inline bool WireFormatLite::ReadPrimitive( + io::CodedInputStream* input, + uint64* value) { + return input->ReadVarint64(value); +} +template <> +inline bool WireFormatLite::ReadPrimitive( + io::CodedInputStream* input, + int32* value) { + uint32 temp; + if (!input->ReadVarint32(&temp)) return false; + *value = ZigZagDecode32(temp); + return true; +} +template <> +inline bool WireFormatLite::ReadPrimitive( + io::CodedInputStream* input, + int64* value) { + uint64 temp; + if (!input->ReadVarint64(&temp)) return false; + *value = ZigZagDecode64(temp); + return true; +} +template <> +inline bool WireFormatLite::ReadPrimitive( + io::CodedInputStream* input, + uint32* value) { + return input->ReadLittleEndian32(value); +} +template <> +inline bool WireFormatLite::ReadPrimitive( + io::CodedInputStream* input, + uint64* value) { + return input->ReadLittleEndian64(value); +} +template <> +inline bool WireFormatLite::ReadPrimitive( + io::CodedInputStream* input, + int32* value) { + uint32 temp; + if (!input->ReadLittleEndian32(&temp)) return false; + *value = static_cast(temp); + return true; +} +template <> +inline bool WireFormatLite::ReadPrimitive( + io::CodedInputStream* input, + int64* value) { + uint64 temp; + if (!input->ReadLittleEndian64(&temp)) return false; + *value = static_cast(temp); + return true; +} +template <> +inline bool WireFormatLite::ReadPrimitive( + io::CodedInputStream* input, + float* value) { + uint32 temp; + if (!input->ReadLittleEndian32(&temp)) return false; + *value = DecodeFloat(temp); + return true; +} +template <> +inline bool WireFormatLite::ReadPrimitive( + io::CodedInputStream* input, + double* value) { + uint64 temp; + if (!input->ReadLittleEndian64(&temp)) return false; + *value = DecodeDouble(temp); + return true; +} +template <> +inline bool WireFormatLite::ReadPrimitive( + io::CodedInputStream* input, + bool* value) { + uint64 temp; + if (!input->ReadVarint64(&temp)) return false; + *value = temp != 0; + return true; +} +template <> +inline bool WireFormatLite::ReadPrimitive( + io::CodedInputStream* input, + int* value) { + uint32 temp; + if (!input->ReadVarint32(&temp)) return false; + *value = static_cast(temp); + return true; +} + +template <> +inline const uint8* WireFormatLite::ReadPrimitiveFromArray< + uint32, WireFormatLite::TYPE_FIXED32>( + const uint8* buffer, + uint32* value) { + return io::CodedInputStream::ReadLittleEndian32FromArray(buffer, value); +} +template <> +inline const uint8* WireFormatLite::ReadPrimitiveFromArray< + uint64, WireFormatLite::TYPE_FIXED64>( + const uint8* buffer, + uint64* value) { + return io::CodedInputStream::ReadLittleEndian64FromArray(buffer, value); +} +template <> +inline const uint8* WireFormatLite::ReadPrimitiveFromArray< + int32, WireFormatLite::TYPE_SFIXED32>( + const uint8* buffer, + int32* value) { + uint32 temp; + buffer = io::CodedInputStream::ReadLittleEndian32FromArray(buffer, &temp); + *value = static_cast(temp); + return buffer; +} +template <> +inline const uint8* WireFormatLite::ReadPrimitiveFromArray< + int64, WireFormatLite::TYPE_SFIXED64>( + const uint8* buffer, + int64* value) { + uint64 temp; + buffer = io::CodedInputStream::ReadLittleEndian64FromArray(buffer, &temp); + *value = static_cast(temp); + return buffer; +} +template <> +inline const uint8* WireFormatLite::ReadPrimitiveFromArray< + float, WireFormatLite::TYPE_FLOAT>( + const uint8* buffer, + float* value) { + uint32 temp; + buffer = io::CodedInputStream::ReadLittleEndian32FromArray(buffer, &temp); + *value = DecodeFloat(temp); + return buffer; +} +template <> +inline const uint8* WireFormatLite::ReadPrimitiveFromArray< + double, WireFormatLite::TYPE_DOUBLE>( + const uint8* buffer, + double* value) { + uint64 temp; + buffer = io::CodedInputStream::ReadLittleEndian64FromArray(buffer, &temp); + *value = DecodeDouble(temp); + return buffer; +} + +template +inline bool WireFormatLite::ReadRepeatedPrimitive( + int, // tag_size, unused. + uint32 tag, + io::CodedInputStream* input, + RepeatedField* values) { + CType value; + if (!ReadPrimitive(input, &value)) return false; + values->Add(value); + int elements_already_reserved = values->Capacity() - values->size(); + while (elements_already_reserved > 0 && input->ExpectTag(tag)) { + if (!ReadPrimitive(input, &value)) return false; + values->AddAlreadyReserved(value); + elements_already_reserved--; + } + return true; +} + +template +inline bool WireFormatLite::ReadRepeatedFixedSizePrimitive( + int tag_size, + uint32 tag, + io::CodedInputStream* input, + RepeatedField* values) { + GOOGLE_DCHECK_EQ(UInt32Size(tag), static_cast(tag_size)); + CType value; + if (!ReadPrimitive(input, &value)) + return false; + values->Add(value); + + // For fixed size values, repeated values can be read more quickly by + // reading directly from a raw array. + // + // We can get a tight loop by only reading as many elements as can be + // added to the RepeatedField without having to do any resizing. Additionally, + // we only try to read as many elements as are available from the current + // buffer space. Doing so avoids having to perform boundary checks when + // reading the value: the maximum number of elements that can be read is + // known outside of the loop. + const void* void_pointer; + int size; + input->GetDirectBufferPointerInline(&void_pointer, &size); + if (size > 0) { + const uint8* buffer = reinterpret_cast(void_pointer); + // The number of bytes each type occupies on the wire. + const int per_value_size = tag_size + static_cast(sizeof(value)); + + // parentheses around (std::min) prevents macro expansion of min(...) + int elements_available = + (std::min)(values->Capacity() - values->size(), size / per_value_size); + int num_read = 0; + while (num_read < elements_available && + (buffer = io::CodedInputStream::ExpectTagFromArray( + buffer, tag)) != NULL) { + buffer = ReadPrimitiveFromArray(buffer, &value); + values->AddAlreadyReserved(value); + ++num_read; + } + const int read_bytes = num_read * per_value_size; + if (read_bytes > 0) { + input->Skip(read_bytes); + } + } + return true; +} + +// Specializations of ReadRepeatedPrimitive for the fixed size types, which use +// the optimized code path. +#define READ_REPEATED_FIXED_SIZE_PRIMITIVE(CPPTYPE, DECLARED_TYPE) \ +template <> \ +inline bool WireFormatLite::ReadRepeatedPrimitive< \ + CPPTYPE, WireFormatLite::DECLARED_TYPE>( \ + int tag_size, \ + uint32 tag, \ + io::CodedInputStream* input, \ + RepeatedField* values) { \ + return ReadRepeatedFixedSizePrimitive< \ + CPPTYPE, WireFormatLite::DECLARED_TYPE>( \ + tag_size, tag, input, values); \ +} + +READ_REPEATED_FIXED_SIZE_PRIMITIVE(uint32, TYPE_FIXED32) +READ_REPEATED_FIXED_SIZE_PRIMITIVE(uint64, TYPE_FIXED64) +READ_REPEATED_FIXED_SIZE_PRIMITIVE(int32, TYPE_SFIXED32) +READ_REPEATED_FIXED_SIZE_PRIMITIVE(int64, TYPE_SFIXED64) +READ_REPEATED_FIXED_SIZE_PRIMITIVE(float, TYPE_FLOAT) +READ_REPEATED_FIXED_SIZE_PRIMITIVE(double, TYPE_DOUBLE) + +#undef READ_REPEATED_FIXED_SIZE_PRIMITIVE + +template +bool WireFormatLite::ReadRepeatedPrimitiveNoInline( + int tag_size, + uint32 tag, + io::CodedInputStream* input, + RepeatedField* value) { + return ReadRepeatedPrimitive( + tag_size, tag, input, value); +} + +template +inline bool WireFormatLite::ReadPackedPrimitive(io::CodedInputStream* input, + RepeatedField* values) { + int length; + if (!input->ReadVarintSizeAsInt(&length)) return false; + io::CodedInputStream::Limit limit = input->PushLimit(length); + while (input->BytesUntilLimit() > 0) { + CType value; + if (!ReadPrimitive(input, &value)) return false; + values->Add(value); + } + input->PopLimit(limit); + return true; +} + +template +inline bool WireFormatLite::ReadPackedFixedSizePrimitive( + io::CodedInputStream* input, RepeatedField* values) { + int length; + if (!input->ReadVarintSizeAsInt(&length)) return false; + const int old_entries = values->size(); + const int new_entries = length / static_cast(sizeof(CType)); + const int new_bytes = new_entries * static_cast(sizeof(CType)); + if (new_bytes != length) return false; + // We would *like* to pre-allocate the buffer to write into (for + // speed), but *must* avoid performing a very large allocation due + // to a malicious user-supplied "length" above. So we have a fast + // path that pre-allocates when the "length" is less than a bound. + // We determine the bound by calling BytesUntilTotalBytesLimit() and + // BytesUntilLimit(). These return -1 to mean "no limit set". + // There are four cases: + // TotalBytesLimit Limit + // -1 -1 Use slow path. + // -1 >= 0 Use fast path if length <= Limit. + // >= 0 -1 Use slow path. + // >= 0 >= 0 Use fast path if length <= min(both limits). + int64 bytes_limit = input->BytesUntilTotalBytesLimit(); + if (bytes_limit == -1) { + bytes_limit = input->BytesUntilLimit(); + } else { + // parentheses around (std::min) prevents macro expansion of min(...) + bytes_limit = + (std::min)(bytes_limit, static_cast(input->BytesUntilLimit())); + } + if (bytes_limit >= new_bytes) { + // Fast-path that pre-allocates *values to the final size. +#if defined(PROTOBUF_LITTLE_ENDIAN) + values->Resize(old_entries + new_entries, 0); + // values->mutable_data() may change after Resize(), so do this after: + void* dest = reinterpret_cast(values->mutable_data() + old_entries); + if (!input->ReadRaw(dest, new_bytes)) { + values->Truncate(old_entries); + return false; + } +#else + values->Reserve(old_entries + new_entries); + CType value; + for (int i = 0; i < new_entries; ++i) { + if (!ReadPrimitive(input, &value)) return false; + values->AddAlreadyReserved(value); + } +#endif + } else { + // This is the slow-path case where "length" may be too large to + // safely allocate. We read as much as we can into *values + // without pre-allocating "length" bytes. + CType value; + for (int i = 0; i < new_entries; ++i) { + if (!ReadPrimitive(input, &value)) return false; + values->Add(value); + } + } + return true; +} + +// Specializations of ReadPackedPrimitive for the fixed size types, which use +// an optimized code path. +#define READ_REPEATED_PACKED_FIXED_SIZE_PRIMITIVE(CPPTYPE, DECLARED_TYPE) \ +template <> \ +inline bool WireFormatLite::ReadPackedPrimitive< \ + CPPTYPE, WireFormatLite::DECLARED_TYPE>( \ + io::CodedInputStream* input, \ + RepeatedField* values) { \ + return ReadPackedFixedSizePrimitive< \ + CPPTYPE, WireFormatLite::DECLARED_TYPE>(input, values); \ +} + +READ_REPEATED_PACKED_FIXED_SIZE_PRIMITIVE(uint32, TYPE_FIXED32) +READ_REPEATED_PACKED_FIXED_SIZE_PRIMITIVE(uint64, TYPE_FIXED64) +READ_REPEATED_PACKED_FIXED_SIZE_PRIMITIVE(int32, TYPE_SFIXED32) +READ_REPEATED_PACKED_FIXED_SIZE_PRIMITIVE(int64, TYPE_SFIXED64) +READ_REPEATED_PACKED_FIXED_SIZE_PRIMITIVE(float, TYPE_FLOAT) +READ_REPEATED_PACKED_FIXED_SIZE_PRIMITIVE(double, TYPE_DOUBLE) + +#undef READ_REPEATED_PACKED_FIXED_SIZE_PRIMITIVE + +template +bool WireFormatLite::ReadPackedPrimitiveNoInline(io::CodedInputStream* input, + RepeatedField* values) { + return ReadPackedPrimitive(input, values); +} + + +template +inline bool WireFormatLite::ReadGroup( + int field_number, io::CodedInputStream* input, + MessageType* value) { + if (!input->IncrementRecursionDepth()) return false; + if (!value->MergePartialFromCodedStream(input)) return false; + input->UnsafeDecrementRecursionDepth(); + // Make sure the last thing read was an end tag for this group. + if (!input->LastTagWas(MakeTag(field_number, WIRETYPE_END_GROUP))) { + return false; + } + return true; +} +template +inline bool WireFormatLite::ReadMessage( + io::CodedInputStream* input, MessageType* value) { + int length; + if (!input->ReadVarintSizeAsInt(&length)) return false; + std::pair p = + input->IncrementRecursionDepthAndPushLimit(length); + if (p.second < 0 || !value->MergePartialFromCodedStream(input)) return false; + // Make sure that parsing stopped when the limit was hit, not at an endgroup + // tag. + return input->DecrementRecursionDepthAndPopLimit(p.first); +} + +// =================================================================== + +inline void WireFormatLite::WriteTag(int field_number, WireType type, + io::CodedOutputStream* output) { + output->WriteTag(MakeTag(field_number, type)); +} + +inline void WireFormatLite::WriteInt32NoTag(int32 value, + io::CodedOutputStream* output) { + output->WriteVarint32SignExtended(value); +} +inline void WireFormatLite::WriteInt64NoTag(int64 value, + io::CodedOutputStream* output) { + output->WriteVarint64(static_cast(value)); +} +inline void WireFormatLite::WriteUInt32NoTag(uint32 value, + io::CodedOutputStream* output) { + output->WriteVarint32(value); +} +inline void WireFormatLite::WriteUInt64NoTag(uint64 value, + io::CodedOutputStream* output) { + output->WriteVarint64(value); +} +inline void WireFormatLite::WriteSInt32NoTag(int32 value, + io::CodedOutputStream* output) { + output->WriteVarint32(ZigZagEncode32(value)); +} +inline void WireFormatLite::WriteSInt64NoTag(int64 value, + io::CodedOutputStream* output) { + output->WriteVarint64(ZigZagEncode64(value)); +} +inline void WireFormatLite::WriteFixed32NoTag(uint32 value, + io::CodedOutputStream* output) { + output->WriteLittleEndian32(value); +} +inline void WireFormatLite::WriteFixed64NoTag(uint64 value, + io::CodedOutputStream* output) { + output->WriteLittleEndian64(value); +} +inline void WireFormatLite::WriteSFixed32NoTag(int32 value, + io::CodedOutputStream* output) { + output->WriteLittleEndian32(static_cast(value)); +} +inline void WireFormatLite::WriteSFixed64NoTag(int64 value, + io::CodedOutputStream* output) { + output->WriteLittleEndian64(static_cast(value)); +} +inline void WireFormatLite::WriteFloatNoTag(float value, + io::CodedOutputStream* output) { + output->WriteLittleEndian32(EncodeFloat(value)); +} +inline void WireFormatLite::WriteDoubleNoTag(double value, + io::CodedOutputStream* output) { + output->WriteLittleEndian64(EncodeDouble(value)); +} +inline void WireFormatLite::WriteBoolNoTag(bool value, + io::CodedOutputStream* output) { + output->WriteVarint32(value ? 1 : 0); +} +inline void WireFormatLite::WriteEnumNoTag(int value, + io::CodedOutputStream* output) { + output->WriteVarint32SignExtended(value); +} + +// See comment on ReadGroupNoVirtual to understand the need for this template +// parameter name. +template +inline void WireFormatLite::WriteGroupNoVirtual( + int field_number, const MessageType_WorkAroundCppLookupDefect& value, + io::CodedOutputStream* output) { + WriteTag(field_number, WIRETYPE_START_GROUP, output); + value.MessageType_WorkAroundCppLookupDefect::SerializeWithCachedSizes(output); + WriteTag(field_number, WIRETYPE_END_GROUP, output); +} +template +inline void WireFormatLite::WriteMessageNoVirtual( + int field_number, const MessageType_WorkAroundCppLookupDefect& value, + io::CodedOutputStream* output) { + WriteTag(field_number, WIRETYPE_LENGTH_DELIMITED, output); + output->WriteVarint32( + value.MessageType_WorkAroundCppLookupDefect::GetCachedSize()); + value.MessageType_WorkAroundCppLookupDefect::SerializeWithCachedSizes(output); +} + +// =================================================================== + +inline uint8* WireFormatLite::WriteTagToArray(int field_number, + WireType type, + uint8* target) { + return io::CodedOutputStream::WriteTagToArray(MakeTag(field_number, type), + target); +} + +inline uint8* WireFormatLite::WriteInt32NoTagToArray(int32 value, + uint8* target) { + return io::CodedOutputStream::WriteVarint32SignExtendedToArray(value, target); +} +inline uint8* WireFormatLite::WriteInt64NoTagToArray(int64 value, + uint8* target) { + return io::CodedOutputStream::WriteVarint64ToArray( + static_cast(value), target); +} +inline uint8* WireFormatLite::WriteUInt32NoTagToArray(uint32 value, + uint8* target) { + return io::CodedOutputStream::WriteVarint32ToArray(value, target); +} +inline uint8* WireFormatLite::WriteUInt64NoTagToArray(uint64 value, + uint8* target) { + return io::CodedOutputStream::WriteVarint64ToArray(value, target); +} +inline uint8* WireFormatLite::WriteSInt32NoTagToArray(int32 value, + uint8* target) { + return io::CodedOutputStream::WriteVarint32ToArray(ZigZagEncode32(value), + target); +} +inline uint8* WireFormatLite::WriteSInt64NoTagToArray(int64 value, + uint8* target) { + return io::CodedOutputStream::WriteVarint64ToArray(ZigZagEncode64(value), + target); +} +inline uint8* WireFormatLite::WriteFixed32NoTagToArray(uint32 value, + uint8* target) { + return io::CodedOutputStream::WriteLittleEndian32ToArray(value, target); +} +inline uint8* WireFormatLite::WriteFixed64NoTagToArray(uint64 value, + uint8* target) { + return io::CodedOutputStream::WriteLittleEndian64ToArray(value, target); +} +inline uint8* WireFormatLite::WriteSFixed32NoTagToArray(int32 value, + uint8* target) { + return io::CodedOutputStream::WriteLittleEndian32ToArray( + static_cast(value), target); +} +inline uint8* WireFormatLite::WriteSFixed64NoTagToArray(int64 value, + uint8* target) { + return io::CodedOutputStream::WriteLittleEndian64ToArray( + static_cast(value), target); +} +inline uint8* WireFormatLite::WriteFloatNoTagToArray(float value, + uint8* target) { + return io::CodedOutputStream::WriteLittleEndian32ToArray(EncodeFloat(value), + target); +} +inline uint8* WireFormatLite::WriteDoubleNoTagToArray(double value, + uint8* target) { + return io::CodedOutputStream::WriteLittleEndian64ToArray(EncodeDouble(value), + target); +} +inline uint8* WireFormatLite::WriteBoolNoTagToArray(bool value, + uint8* target) { + return io::CodedOutputStream::WriteVarint32ToArray(value ? 1 : 0, target); +} +inline uint8* WireFormatLite::WriteEnumNoTagToArray(int value, + uint8* target) { + return io::CodedOutputStream::WriteVarint32SignExtendedToArray(value, target); +} + +template +inline uint8* WireFormatLite::WritePrimitiveNoTagToArray( + const RepeatedField& value, + uint8* (*Writer)(T, uint8*), uint8* target) { + const int n = value.size(); + GOOGLE_DCHECK_GT(n, 0); + + const T* ii = value.unsafe_data(); + int i = 0; + do { + target = Writer(ii[i], target); + } while (++i < n); + + return target; +} + +template +inline uint8* WireFormatLite::WriteFixedNoTagToArray( + const RepeatedField& value, + uint8* (*Writer)(T, uint8*), uint8* target) { +#if defined(PROTOBUF_LITTLE_ENDIAN) + (void) Writer; + + const int n = value.size(); + GOOGLE_DCHECK_GT(n, 0); + + const T* ii = value.unsafe_data(); + const int bytes = n * static_cast(sizeof(ii[0])); + memcpy(target, ii, static_cast(bytes)); + return target + bytes; +#else + return WritePrimitiveNoTagToArray(value, Writer, target); +#endif +} + +inline uint8* WireFormatLite::WriteInt32NoTagToArray( + const RepeatedField< int32>& value, uint8* target) { + return WritePrimitiveNoTagToArray(value, WriteInt32NoTagToArray, target); +} +inline uint8* WireFormatLite::WriteInt64NoTagToArray( + const RepeatedField< int64>& value, uint8* target) { + return WritePrimitiveNoTagToArray(value, WriteInt64NoTagToArray, target); +} +inline uint8* WireFormatLite::WriteUInt32NoTagToArray( + const RepeatedField& value, uint8* target) { + return WritePrimitiveNoTagToArray(value, WriteUInt32NoTagToArray, target); +} +inline uint8* WireFormatLite::WriteUInt64NoTagToArray( + const RepeatedField& value, uint8* target) { + return WritePrimitiveNoTagToArray(value, WriteUInt64NoTagToArray, target); +} +inline uint8* WireFormatLite::WriteSInt32NoTagToArray( + const RepeatedField< int32>& value, uint8* target) { + return WritePrimitiveNoTagToArray(value, WriteSInt32NoTagToArray, target); +} +inline uint8* WireFormatLite::WriteSInt64NoTagToArray( + const RepeatedField< int64>& value, uint8* target) { + return WritePrimitiveNoTagToArray(value, WriteSInt64NoTagToArray, target); +} +inline uint8* WireFormatLite::WriteFixed32NoTagToArray( + const RepeatedField& value, uint8* target) { + return WriteFixedNoTagToArray(value, WriteFixed32NoTagToArray, target); +} +inline uint8* WireFormatLite::WriteFixed64NoTagToArray( + const RepeatedField& value, uint8* target) { + return WriteFixedNoTagToArray(value, WriteFixed64NoTagToArray, target); +} +inline uint8* WireFormatLite::WriteSFixed32NoTagToArray( + const RepeatedField< int32>& value, uint8* target) { + return WriteFixedNoTagToArray(value, WriteSFixed32NoTagToArray, target); +} +inline uint8* WireFormatLite::WriteSFixed64NoTagToArray( + const RepeatedField< int64>& value, uint8* target) { + return WriteFixedNoTagToArray(value, WriteSFixed64NoTagToArray, target); +} +inline uint8* WireFormatLite::WriteFloatNoTagToArray( + const RepeatedField< float>& value, uint8* target) { + return WriteFixedNoTagToArray(value, WriteFloatNoTagToArray, target); +} +inline uint8* WireFormatLite::WriteDoubleNoTagToArray( + const RepeatedField& value, uint8* target) { + return WriteFixedNoTagToArray(value, WriteDoubleNoTagToArray, target); +} +inline uint8* WireFormatLite::WriteBoolNoTagToArray( + const RepeatedField< bool>& value, uint8* target) { + return WritePrimitiveNoTagToArray(value, WriteBoolNoTagToArray, target); +} +inline uint8* WireFormatLite::WriteEnumNoTagToArray( + const RepeatedField< int>& value, uint8* target) { + return WritePrimitiveNoTagToArray(value, WriteEnumNoTagToArray, target); +} + +inline uint8* WireFormatLite::WriteInt32ToArray(int field_number, + int32 value, + uint8* target) { + target = WriteTagToArray(field_number, WIRETYPE_VARINT, target); + return WriteInt32NoTagToArray(value, target); +} +inline uint8* WireFormatLite::WriteInt64ToArray(int field_number, + int64 value, + uint8* target) { + target = WriteTagToArray(field_number, WIRETYPE_VARINT, target); + return WriteInt64NoTagToArray(value, target); +} +inline uint8* WireFormatLite::WriteUInt32ToArray(int field_number, + uint32 value, + uint8* target) { + target = WriteTagToArray(field_number, WIRETYPE_VARINT, target); + return WriteUInt32NoTagToArray(value, target); +} +inline uint8* WireFormatLite::WriteUInt64ToArray(int field_number, + uint64 value, + uint8* target) { + target = WriteTagToArray(field_number, WIRETYPE_VARINT, target); + return WriteUInt64NoTagToArray(value, target); +} +inline uint8* WireFormatLite::WriteSInt32ToArray(int field_number, + int32 value, + uint8* target) { + target = WriteTagToArray(field_number, WIRETYPE_VARINT, target); + return WriteSInt32NoTagToArray(value, target); +} +inline uint8* WireFormatLite::WriteSInt64ToArray(int field_number, + int64 value, + uint8* target) { + target = WriteTagToArray(field_number, WIRETYPE_VARINT, target); + return WriteSInt64NoTagToArray(value, target); +} +inline uint8* WireFormatLite::WriteFixed32ToArray(int field_number, + uint32 value, + uint8* target) { + target = WriteTagToArray(field_number, WIRETYPE_FIXED32, target); + return WriteFixed32NoTagToArray(value, target); +} +inline uint8* WireFormatLite::WriteFixed64ToArray(int field_number, + uint64 value, + uint8* target) { + target = WriteTagToArray(field_number, WIRETYPE_FIXED64, target); + return WriteFixed64NoTagToArray(value, target); +} +inline uint8* WireFormatLite::WriteSFixed32ToArray(int field_number, + int32 value, + uint8* target) { + target = WriteTagToArray(field_number, WIRETYPE_FIXED32, target); + return WriteSFixed32NoTagToArray(value, target); +} +inline uint8* WireFormatLite::WriteSFixed64ToArray(int field_number, + int64 value, + uint8* target) { + target = WriteTagToArray(field_number, WIRETYPE_FIXED64, target); + return WriteSFixed64NoTagToArray(value, target); +} +inline uint8* WireFormatLite::WriteFloatToArray(int field_number, + float value, + uint8* target) { + target = WriteTagToArray(field_number, WIRETYPE_FIXED32, target); + return WriteFloatNoTagToArray(value, target); +} +inline uint8* WireFormatLite::WriteDoubleToArray(int field_number, + double value, + uint8* target) { + target = WriteTagToArray(field_number, WIRETYPE_FIXED64, target); + return WriteDoubleNoTagToArray(value, target); +} +inline uint8* WireFormatLite::WriteBoolToArray(int field_number, + bool value, + uint8* target) { + target = WriteTagToArray(field_number, WIRETYPE_VARINT, target); + return WriteBoolNoTagToArray(value, target); +} +inline uint8* WireFormatLite::WriteEnumToArray(int field_number, + int value, + uint8* target) { + target = WriteTagToArray(field_number, WIRETYPE_VARINT, target); + return WriteEnumNoTagToArray(value, target); +} + +template +inline uint8* WireFormatLite::WritePrimitiveToArray( + int field_number, + const RepeatedField& value, + uint8* (*Writer)(int, T, uint8*), uint8* target) { + const int n = value.size(); + if (n == 0) { + return target; + } + + const T* ii = value.unsafe_data(); + int i = 0; + do { + target = Writer(field_number, ii[i], target); + } while (++i < n); + + return target; +} + +inline uint8* WireFormatLite::WriteInt32ToArray( + int field_number, const RepeatedField< int32>& value, uint8* target) { + return WritePrimitiveToArray(field_number, value, WriteInt32ToArray, target); +} +inline uint8* WireFormatLite::WriteInt64ToArray( + int field_number, const RepeatedField< int64>& value, uint8* target) { + return WritePrimitiveToArray(field_number, value, WriteInt64ToArray, target); +} +inline uint8* WireFormatLite::WriteUInt32ToArray( + int field_number, const RepeatedField& value, uint8* target) { + return WritePrimitiveToArray(field_number, value, WriteUInt32ToArray, target); +} +inline uint8* WireFormatLite::WriteUInt64ToArray( + int field_number, const RepeatedField& value, uint8* target) { + return WritePrimitiveToArray(field_number, value, WriteUInt64ToArray, target); +} +inline uint8* WireFormatLite::WriteSInt32ToArray( + int field_number, const RepeatedField< int32>& value, uint8* target) { + return WritePrimitiveToArray(field_number, value, WriteSInt32ToArray, target); +} +inline uint8* WireFormatLite::WriteSInt64ToArray( + int field_number, const RepeatedField< int64>& value, uint8* target) { + return WritePrimitiveToArray(field_number, value, WriteSInt64ToArray, target); +} +inline uint8* WireFormatLite::WriteFixed32ToArray( + int field_number, const RepeatedField& value, uint8* target) { + return WritePrimitiveToArray( + field_number, value, WriteFixed32ToArray, target); +} +inline uint8* WireFormatLite::WriteFixed64ToArray( + int field_number, const RepeatedField& value, uint8* target) { + return WritePrimitiveToArray( + field_number, value, WriteFixed64ToArray, target); +} +inline uint8* WireFormatLite::WriteSFixed32ToArray( + int field_number, const RepeatedField< int32>& value, uint8* target) { + return WritePrimitiveToArray( + field_number, value, WriteSFixed32ToArray, target); +} +inline uint8* WireFormatLite::WriteSFixed64ToArray( + int field_number, const RepeatedField< int64>& value, uint8* target) { + return WritePrimitiveToArray( + field_number, value, WriteSFixed64ToArray, target); +} +inline uint8* WireFormatLite::WriteFloatToArray( + int field_number, const RepeatedField< float>& value, uint8* target) { + return WritePrimitiveToArray(field_number, value, WriteFloatToArray, target); +} +inline uint8* WireFormatLite::WriteDoubleToArray( + int field_number, const RepeatedField& value, uint8* target) { + return WritePrimitiveToArray(field_number, value, WriteDoubleToArray, target); +} +inline uint8* WireFormatLite::WriteBoolToArray( + int field_number, const RepeatedField< bool>& value, uint8* target) { + return WritePrimitiveToArray(field_number, value, WriteBoolToArray, target); +} +inline uint8* WireFormatLite::WriteEnumToArray( + int field_number, const RepeatedField< int>& value, uint8* target) { + return WritePrimitiveToArray(field_number, value, WriteEnumToArray, target); +} +inline uint8* WireFormatLite::WriteStringToArray(int field_number, + const string& value, + uint8* target) { + // String is for UTF-8 text only + // WARNING: In wire_format.cc, both strings and bytes are handled by + // WriteString() to avoid code duplication. If the implementations become + // different, you will need to update that usage. + target = WriteTagToArray(field_number, WIRETYPE_LENGTH_DELIMITED, target); + return io::CodedOutputStream::WriteStringWithSizeToArray(value, target); +} +inline uint8* WireFormatLite::WriteBytesToArray(int field_number, + const string& value, + uint8* target) { + target = WriteTagToArray(field_number, WIRETYPE_LENGTH_DELIMITED, target); + return io::CodedOutputStream::WriteStringWithSizeToArray(value, target); +} + + +template +inline uint8* WireFormatLite::InternalWriteGroupToArray( + int field_number, const MessageType& value, bool deterministic, + uint8* target) { + target = WriteTagToArray(field_number, WIRETYPE_START_GROUP, target); + target = value.InternalSerializeWithCachedSizesToArray(deterministic, target); + return WriteTagToArray(field_number, WIRETYPE_END_GROUP, target); +} +template +inline uint8* WireFormatLite::InternalWriteMessageToArray( + int field_number, const MessageType& value, bool deterministic, + uint8* target) { + target = WriteTagToArray(field_number, WIRETYPE_LENGTH_DELIMITED, target); + target = io::CodedOutputStream::WriteVarint32ToArray( + static_cast(value.GetCachedSize()), target); + return value.InternalSerializeWithCachedSizesToArray(deterministic, target); +} + +// See comment on ReadGroupNoVirtual to understand the need for this template +// parameter name. +template +inline uint8* WireFormatLite::InternalWriteGroupNoVirtualToArray( + int field_number, const MessageType_WorkAroundCppLookupDefect& value, + bool deterministic, uint8* target) { + target = WriteTagToArray(field_number, WIRETYPE_START_GROUP, target); + target = value.MessageType_WorkAroundCppLookupDefect:: + InternalSerializeWithCachedSizesToArray(deterministic, target); + return WriteTagToArray(field_number, WIRETYPE_END_GROUP, target); +} +template +inline uint8* WireFormatLite::InternalWriteMessageNoVirtualToArray( + int field_number, const MessageType_WorkAroundCppLookupDefect& value, + bool deterministic, uint8* target) { + target = WriteTagToArray(field_number, WIRETYPE_LENGTH_DELIMITED, target); + target = io::CodedOutputStream::WriteVarint32ToArray( + static_cast( + value.MessageType_WorkAroundCppLookupDefect::GetCachedSize()), + target); + return value.MessageType_WorkAroundCppLookupDefect:: + InternalSerializeWithCachedSizesToArray(deterministic, target); +} + +// =================================================================== + +inline size_t WireFormatLite::Int32Size(int32 value) { + return io::CodedOutputStream::VarintSize32SignExtended(value); +} +inline size_t WireFormatLite::Int64Size(int64 value) { + return io::CodedOutputStream::VarintSize64(static_cast(value)); +} +inline size_t WireFormatLite::UInt32Size(uint32 value) { + return io::CodedOutputStream::VarintSize32(value); +} +inline size_t WireFormatLite::UInt64Size(uint64 value) { + return io::CodedOutputStream::VarintSize64(value); +} +inline size_t WireFormatLite::SInt32Size(int32 value) { + return io::CodedOutputStream::VarintSize32(ZigZagEncode32(value)); +} +inline size_t WireFormatLite::SInt64Size(int64 value) { + return io::CodedOutputStream::VarintSize64(ZigZagEncode64(value)); +} +inline size_t WireFormatLite::EnumSize(int value) { + return io::CodedOutputStream::VarintSize32SignExtended(value); +} + +inline size_t WireFormatLite::StringSize(const string& value) { + return LengthDelimitedSize(value.size()); +} +inline size_t WireFormatLite::BytesSize(const string& value) { + return LengthDelimitedSize(value.size()); +} + + +template +inline size_t WireFormatLite::GroupSize(const MessageType& value) { + return value.ByteSizeLong(); +} +template +inline size_t WireFormatLite::MessageSize(const MessageType& value) { + return LengthDelimitedSize(value.ByteSizeLong()); +} + +// See comment on ReadGroupNoVirtual to understand the need for this template +// parameter name. +template +inline size_t WireFormatLite::GroupSizeNoVirtual( + const MessageType_WorkAroundCppLookupDefect& value) { + return value.MessageType_WorkAroundCppLookupDefect::ByteSizeLong(); +} +template +inline size_t WireFormatLite::MessageSizeNoVirtual( + const MessageType_WorkAroundCppLookupDefect& value) { + return LengthDelimitedSize( + value.MessageType_WorkAroundCppLookupDefect::ByteSizeLong()); +} + +inline size_t WireFormatLite::LengthDelimitedSize(size_t length) { + // The static_cast here prevents an error in certain compiler configurations + // but is not technically correct--if length is too large to fit in a uint32 + // then it will be silently truncated. We will need to fix this if we ever + // decide to start supporting serialized messages greater than 2 GiB in size. + return length + io::CodedOutputStream::VarintSize32( + static_cast(length)); +} + +} // namespace internal +} // namespace protobuf + +} // namespace google +#endif // GOOGLE_PROTOBUF_WIRE_FORMAT_LITE_INL_H__ diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/implicit_weak_message.cc b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/implicit_weak_message.cc new file mode 100644 index 0000000000000000000000000000000000000000..7a1d4446bf32d00bfae74c6ee8f32546336ca141 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/implicit_weak_message.cc @@ -0,0 +1,63 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include + +#include +#include +#include + +namespace google { +namespace protobuf { +namespace internal { + +bool ImplicitWeakMessage::MergePartialFromCodedStream(io::CodedInputStream* input) { + io::StringOutputStream string_stream(&data_); + io::CodedOutputStream coded_stream(&string_stream, false); + return WireFormatLite::SkipMessage(input, &coded_stream); +} + +::google::protobuf::internal::ExplicitlyConstructed + implicit_weak_message_default_instance; +GOOGLE_PROTOBUF_DECLARE_ONCE(implicit_weak_message_once_init_); + +void InitImplicitWeakMessageDefaultInstance() { + implicit_weak_message_default_instance.DefaultConstruct(); +} + +const ImplicitWeakMessage* ImplicitWeakMessage::default_instance() { + ::google::protobuf::GoogleOnceInit(&implicit_weak_message_once_init_, + &InitImplicitWeakMessageDefaultInstance); + return &implicit_weak_message_default_instance.get(); +} + +} // namespace internal +} // namespace protobuf +} // namespace google diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/int128.cc b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/int128.cc new file mode 100644 index 0000000000000000000000000000000000000000..a50908014800902db399290e531d5777a7b40346 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/int128.cc @@ -0,0 +1,201 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include + +#include +#include // NOLINT(readability/streams) +#include + +namespace google { +namespace protobuf { + +const uint128_pod kuint128max = { + static_cast(GOOGLE_LONGLONG(0xFFFFFFFFFFFFFFFF)), + static_cast(GOOGLE_LONGLONG(0xFFFFFFFFFFFFFFFF)) +}; + +// Returns the 0-based position of the last set bit (i.e., most significant bit) +// in the given uint64. The argument may not be 0. +// +// For example: +// Given: 5 (decimal) == 101 (binary) +// Returns: 2 +#define STEP(T, n, pos, sh) \ + do { \ + if ((n) >= (static_cast(1) << (sh))) { \ + (n) = (n) >> (sh); \ + (pos) |= (sh); \ + } \ + } while (0) +static inline int Fls64(uint64 n) { + GOOGLE_DCHECK_NE(0, n); + int pos = 0; + STEP(uint64, n, pos, 0x20); + uint32 n32 = n; + STEP(uint32, n32, pos, 0x10); + STEP(uint32, n32, pos, 0x08); + STEP(uint32, n32, pos, 0x04); + return pos + ((GOOGLE_ULONGLONG(0x3333333322221100) >> (n32 << 2)) & 0x3); +} +#undef STEP + +// Like Fls64() above, but returns the 0-based position of the last set bit +// (i.e., most significant bit) in the given uint128. The argument may not be 0. +static inline int Fls128(uint128 n) { + if (uint64 hi = Uint128High64(n)) { + return Fls64(hi) + 64; + } + return Fls64(Uint128Low64(n)); +} + +// Long division/modulo for uint128 implemented using the shift-subtract +// division algorithm adapted from: +// http://stackoverflow.com/questions/5386377/division-without-using +void uint128::DivModImpl(uint128 dividend, uint128 divisor, + uint128* quotient_ret, uint128* remainder_ret) { + if (divisor == 0) { + GOOGLE_LOG(FATAL) << "Division or mod by zero: dividend.hi=" << dividend.hi_ + << ", lo=" << dividend.lo_; + } + + if (divisor > dividend) { + *quotient_ret = 0; + *remainder_ret = dividend; + return; + } + + if (divisor == dividend) { + *quotient_ret = 1; + *remainder_ret = 0; + return; + } + + uint128 denominator = divisor; + uint128 position = 1; + uint128 quotient = 0; + + // Left aligns the MSB of the denominator and the dividend. + int shift = Fls128(dividend) - Fls128(denominator); + denominator <<= shift; + position <<= shift; + + // Uses shift-subtract algorithm to divide dividend by denominator. The + // remainder will be left in dividend. + while (position > 0) { + if (dividend >= denominator) { + dividend -= denominator; + quotient |= position; + } + position >>= 1; + denominator >>= 1; + } + + *quotient_ret = quotient; + *remainder_ret = dividend; +} + +uint128& uint128::operator/=(const uint128& divisor) { + uint128 quotient = 0; + uint128 remainder = 0; + DivModImpl(*this, divisor, "ient, &remainder); + *this = quotient; + return *this; +} +uint128& uint128::operator%=(const uint128& divisor) { + uint128 quotient = 0; + uint128 remainder = 0; + DivModImpl(*this, divisor, "ient, &remainder); + *this = remainder; + return *this; +} + +std::ostream& operator<<(std::ostream& o, const uint128& b) { + std::ios_base::fmtflags flags = o.flags(); + + // Select a divisor which is the largest power of the base < 2^64. + uint128 div; + std::streamsize div_base_log; + switch (flags & std::ios::basefield) { + case std::ios::hex: + div = static_cast(GOOGLE_ULONGLONG(0x1000000000000000)); // 16^15 + div_base_log = 15; + break; + case std::ios::oct: + div = static_cast(GOOGLE_ULONGLONG(01000000000000000000000)); // 8^21 + div_base_log = 21; + break; + default: // std::ios::dec + div = static_cast(GOOGLE_ULONGLONG(10000000000000000000)); // 10^19 + div_base_log = 19; + break; + } + + // Now piece together the uint128 representation from three chunks of + // the original value, each less than "div" and therefore representable + // as a uint64. + std::ostringstream os; + std::ios_base::fmtflags copy_mask = + std::ios::basefield | std::ios::showbase | std::ios::uppercase; + os.setf(flags & copy_mask, copy_mask); + uint128 high = b; + uint128 low; + uint128::DivModImpl(high, div, &high, &low); + uint128 mid; + uint128::DivModImpl(high, div, &high, &mid); + if (high.lo_ != 0) { + os << high.lo_; + os << std::noshowbase << std::setfill('0') << std::setw(div_base_log); + os << mid.lo_; + os << std::setw(div_base_log); + } else if (mid.lo_ != 0) { + os << mid.lo_; + os << std::noshowbase << std::setfill('0') << std::setw(div_base_log); + } + os << low.lo_; + std::string rep = os.str(); + + // Add the requisite padding. + std::streamsize width = o.width(0); + if (width > rep.size()) { + if ((flags & std::ios::adjustfield) == std::ios::left) { + rep.append(width - rep.size(), o.fill()); + } else { + rep.insert(static_cast(0), + width - rep.size(), o.fill()); + } + } + + // Stream the final representation in a single "<<" call. + return o << rep; +} + +} // namespace protobuf +} // namespace google diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/io_win32.cc b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/io_win32.cc new file mode 100644 index 0000000000000000000000000000000000000000..4407facb40fe7f608bb3ddf782ca62d6a6369976 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/io_win32.cc @@ -0,0 +1,414 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: laszlocsomor@google.com (Laszlo Csomor) +// +// Implementation for long-path-aware open/mkdir/access/etc. on Windows, as well +// as for the supporting utility functions. +// +// These functions convert the input path to an absolute Windows path +// with "\\?\" prefix, then pass that to _wopen/_wmkdir/_waccess/etc. +// (declared in ) respectively. This allows working with files/directories +// whose paths are longer than MAX_PATH (260 chars). +// +// This file is only used on Windows, it's empty on other platforms. + +#if defined(_WIN32) + +// Comment this out to fall back to using the ANSI versions (open, mkdir, ...) +// instead of the Unicode ones (_wopen, _wmkdir, ...). Doing so can be useful to +// debug failing tests if that's caused by the long path support. +#define SUPPORT_LONGPATHS + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +namespace google { +namespace protobuf { +namespace internal { +namespace win32 { +namespace { + +using std::string; +using std::wstring; + +template +struct CharTraits { + static bool is_alpha(char_type ch); +}; + +template <> +struct CharTraits { + static bool is_alpha(char ch) { return isalpha(ch); } +}; + +template <> +struct CharTraits { + static bool is_alpha(wchar_t ch) { return iswalpha(ch); } +}; + +template +bool null_or_empty(const char_type* s) { + return s == NULL || *s == 0; +} + +// Returns true if the path starts with a drive letter, e.g. "c:". +// Note that this won't check for the "\" after the drive letter, so this also +// returns true for "c:foo" (which is "c:\${PWD}\foo"). +// This check requires that a path not have a longpath prefix ("\\?\"). +template +bool has_drive_letter(const char_type* ch) { + return CharTraits::is_alpha(ch[0]) && ch[1] == ':'; +} + +// Returns true if the path starts with a longpath prefix ("\\?\"). +template +bool has_longpath_prefix(const char_type* path) { + return path[0] == '\\' && path[1] == '\\' && path[2] == '?' && + path[3] == '\\'; +} + +template +bool is_separator(char_type c) { + return c == '/' || c == '\\'; +} + +// Returns true if the path starts with a drive specifier (e.g. "c:\"). +template +bool is_path_absolute(const char_type* path) { + return has_drive_letter(path) && is_separator(path[2]); +} + +template +bool is_drive_relative(const char_type* path) { + return has_drive_letter(path) && (path[2] == 0 || !is_separator(path[2])); +} + +wstring join_paths(const wstring& path1, const wstring& path2) { + if (path1.empty() || is_path_absolute(path2.c_str()) || + has_longpath_prefix(path2.c_str())) { + return path2; + } + if (path2.empty()) { + return path1; + } + + if (is_separator(path1[path1.size() - 1])) { + return is_separator(path2[0]) ? (path1 + path2.substr(1)) + : (path1 + path2); + } else { + return is_separator(path2[0]) ? (path1 + path2) + : (path1 + L'\\' + path2); + } +} + +wstring normalize(wstring path) { + if (has_longpath_prefix(path.c_str())) { + path = path.substr(4); + } + + static const wstring dot(L"."); + static const wstring dotdot(L".."); + const WCHAR* p = path.c_str(); + + std::vector segments; + int segment_start = -1; + // Find the path segments in `path` (separated by "/"). + for (int i = 0;; ++i) { + if (!is_separator(p[i]) && p[i] != L'\0') { + // The current character does not end a segment, so start one unless it's + // already started. + if (segment_start < 0) { + segment_start = i; + } + } else if (segment_start >= 0 && i > segment_start) { + // The current character is "/" or "\0", so this ends a segment. + // Add that to `segments` if there's anything to add; handle "." and "..". + wstring segment(p, segment_start, i - segment_start); + segment_start = -1; + if (segment == dotdot) { + if (!segments.empty() && + (!has_drive_letter(segments[0].c_str()) || segments.size() > 1)) { + segments.pop_back(); + } + } else if (segment != dot && !segment.empty()) { + segments.push_back(segment); + } + } + if (p[i] == L'\0') { + break; + } + } + + // Handle the case when `path` is just a drive specifier (or some degenerate + // form of it, e.g. "c:\.."). + if (segments.size() == 1 && segments[0].size() == 2 && + has_drive_letter(segments[0].c_str())) { + return segments[0] + L'\\'; + } + + // Join all segments. + bool first = true; + std::wstringstream result; + for (int i = 0; i < segments.size(); ++i) { + if (!first) { + result << L'\\'; + } + first = false; + result << segments[i]; + } + // Preserve trailing separator if the input contained it. + if (!path.empty() && is_separator(p[path.size() - 1])) { + result << L'\\'; + } + return result.str(); +} + +bool as_windows_path(const char* path, wstring* result) { + if (null_or_empty(path)) { + result->clear(); + return true; + } + wstring wpath; + if (!strings::utf8_to_wcs(path, &wpath)) { + return false; + } + if (has_longpath_prefix(wpath.c_str())) { + *result = wpath; + return true; + } + if (is_separator(path[0]) || is_drive_relative(path)) { + return false; + } + + + if (!is_path_absolute(wpath.c_str())) { + int size = ::GetCurrentDirectoryW(0, NULL); + if (size == 0 && GetLastError() != ERROR_INSUFFICIENT_BUFFER) { + return false; + } + std::unique_ptr wcwd(new WCHAR[size]); + ::GetCurrentDirectoryW(size, wcwd.get()); + wpath = join_paths(wcwd.get(), wpath); + } + wpath = normalize(wpath); + if (!has_longpath_prefix(wpath.c_str())) { + // Add the "\\?\" prefix unconditionally. This way we prevent the Win32 API + // from processing the path and "helpfully" removing trailing dots from the + // path, for example. + // See https://github.com/bazelbuild/bazel/issues/2935 + wpath = wstring(L"\\\\?\\") + wpath; + } + *result = wpath; + return true; +} + +} // namespace + +int open(const char* path, int flags, int mode) { +#ifdef SUPPORT_LONGPATHS + wstring wpath; + if (!as_windows_path(path, &wpath)) { + errno = ENOENT; + return -1; + } + return ::_wopen(wpath.c_str(), flags, mode); +#else + return ::_open(path, flags, mode); +#endif +} + +int mkdir(const char* path, int _mode) { +#ifdef SUPPORT_LONGPATHS + wstring wpath; + if (!as_windows_path(path, &wpath)) { + errno = ENOENT; + return -1; + } + return ::_wmkdir(wpath.c_str()); +#else // not SUPPORT_LONGPATHS + return ::_mkdir(path); +#endif // not SUPPORT_LONGPATHS +} + +int access(const char* path, int mode) { +#ifdef SUPPORT_LONGPATHS + wstring wpath; + if (!as_windows_path(path, &wpath)) { + errno = ENOENT; + return -1; + } + return ::_waccess(wpath.c_str(), mode); +#else + return ::_access(path, mode); +#endif +} + +int chdir(const char* path) { +#ifdef SUPPORT_LONGPATHS + wstring wpath; + if (!as_windows_path(path, &wpath)) { + errno = ENOENT; + return -1; + } + return ::_wchdir(wpath.c_str()); +#else + return ::_chdir(path); +#endif +} + +int stat(const char* path, struct _stat* buffer) { +#ifdef SUPPORT_LONGPATHS + wstring wpath; + if (!as_windows_path(path, &wpath)) { + errno = ENOENT; + return -1; + } + return ::_wstat(wpath.c_str(), buffer); +#else // not SUPPORT_LONGPATHS + return ::_stat(path, buffer); +#endif // not SUPPORT_LONGPATHS +} + +FILE* fopen(const char* path, const char* mode) { +#ifdef SUPPORT_LONGPATHS + if (null_or_empty(path)) { + errno = EINVAL; + return NULL; + } + wstring wpath; + if (!as_windows_path(path, &wpath)) { + errno = ENOENT; + return NULL; + } + wstring wmode; + if (!strings::utf8_to_wcs(mode, &wmode)) { + errno = EINVAL; + return NULL; + } + return ::_wfopen(wpath.c_str(), wmode.c_str()); +#else + return ::fopen(path, mode); +#endif +} + +int close(int fd) { return ::close(fd); } + +int dup(int fd) { return ::_dup(fd); } + +int dup2(int fd1, int fd2) { return ::_dup2(fd1, fd2); } + +int read(int fd, void* buffer, size_t size) { + return ::_read(fd, buffer, size); +} + +int setmode(int fd, int mode) { return ::_setmode(fd, mode); } + +int write(int fd, const void* buffer, size_t size) { + return ::_write(fd, buffer, size); +} + +wstring testonly_utf8_to_winpath(const char* path) { + wstring wpath; + return as_windows_path(path, &wpath) ? wpath : wstring(); +} + +namespace strings { + +bool wcs_to_mbs(const WCHAR* s, string* out, bool outUtf8) { + if (null_or_empty(s)) { + out->clear(); + return true; + } + BOOL usedDefaultChar = FALSE; + SetLastError(0); + int size = WideCharToMultiByte( + outUtf8 ? CP_UTF8 : CP_ACP, 0, s, -1, NULL, 0, NULL, + outUtf8 ? NULL : &usedDefaultChar); + if ((size == 0 && GetLastError() != ERROR_INSUFFICIENT_BUFFER) + || usedDefaultChar) { + return false; + } + std::unique_ptr astr(new CHAR[size]); + WideCharToMultiByte( + outUtf8 ? CP_UTF8 : CP_ACP, 0, s, -1, astr.get(), size, NULL, NULL); + out->assign(astr.get()); + return true; +} + +bool mbs_to_wcs(const char* s, wstring* out, bool inUtf8) { + if (null_or_empty(s)) { + out->clear(); + return true; + } + + SetLastError(0); + int size = + MultiByteToWideChar(inUtf8 ? CP_UTF8 : CP_ACP, 0, s, -1, NULL, 0); + if (size == 0 && GetLastError() != ERROR_INSUFFICIENT_BUFFER) { + return false; + } + std::unique_ptr wstr(new WCHAR[size]); + MultiByteToWideChar( + inUtf8 ? CP_UTF8 : CP_ACP, 0, s, -1, wstr.get(), size + 1); + out->assign(wstr.get()); + return true; +} + +bool utf8_to_wcs(const char* input, wstring* out) { + return mbs_to_wcs(input, out, true); +} + +bool wcs_to_utf8(const wchar_t* input, string* out) { + return wcs_to_mbs(input, out, true); +} + +} // namespace strings +} // namespace win32 +} // namespace internal +} // namespace protobuf +} // namespace google + +#endif // defined(_WIN32) diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/message_lite.cc b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/message_lite.cc new file mode 100644 index 0000000000000000000000000000000000000000..123b142d090549723156ed61d1de6bd02b7c69f5 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/message_lite.cc @@ -0,0 +1,407 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Authors: wink@google.com (Wink Saville), +// kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace google { +namespace protobuf { + +string MessageLite::InitializationErrorString() const { + return "(cannot determine missing fields for lite message)"; +} + +namespace { + +// When serializing, we first compute the byte size, then serialize the message. +// If serialization produces a different number of bytes than expected, we +// call this function, which crashes. The problem could be due to a bug in the +// protobuf implementation but is more likely caused by concurrent modification +// of the message. This function attempts to distinguish between the two and +// provide a useful error message. +void ByteSizeConsistencyError(size_t byte_size_before_serialization, + size_t byte_size_after_serialization, + size_t bytes_produced_by_serialization, + const MessageLite& message) { + GOOGLE_CHECK_EQ(byte_size_before_serialization, byte_size_after_serialization) + << message.GetTypeName() + << " was modified concurrently during serialization."; + GOOGLE_CHECK_EQ(bytes_produced_by_serialization, byte_size_before_serialization) + << "Byte size calculation and serialization were inconsistent. This " + "may indicate a bug in protocol buffers or it may be caused by " + "concurrent modification of " << message.GetTypeName() << "."; + GOOGLE_LOG(FATAL) << "This shouldn't be called if all the sizes are equal."; +} + +string InitializationErrorMessage(const char* action, + const MessageLite& message) { + // Note: We want to avoid depending on strutil in the lite library, otherwise + // we'd use: + // + // return strings::Substitute( + // "Can't $0 message of type \"$1\" because it is missing required " + // "fields: $2", + // action, message.GetTypeName(), + // message.InitializationErrorString()); + + string result; + result += "Can't "; + result += action; + result += " message of type \""; + result += message.GetTypeName(); + result += "\" because it is missing required fields: "; + result += message.InitializationErrorString(); + return result; +} + +// Several of the Parse methods below just do one thing and then call another +// method. In a naive implementation, we might have ParseFromString() call +// ParseFromArray() which would call ParseFromZeroCopyStream() which would call +// ParseFromCodedStream() which would call MergeFromCodedStream() which would +// call MergePartialFromCodedStream(). However, when parsing very small +// messages, every function call introduces significant overhead. To avoid +// this without reproducing code, we use these forced-inline helpers. +GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE bool InlineMergeFromCodedStream( + io::CodedInputStream* input, MessageLite* message); +GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE bool InlineParseFromCodedStream( + io::CodedInputStream* input, MessageLite* message); +GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE bool InlineParsePartialFromCodedStream( + io::CodedInputStream* input, MessageLite* message); +GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE bool InlineParseFromArray( + const void* data, int size, MessageLite* message); +GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE bool InlineParsePartialFromArray( + const void* data, int size, MessageLite* message); + +inline bool InlineMergeFromCodedStream(io::CodedInputStream* input, + MessageLite* message) { + if (!message->MergePartialFromCodedStream(input)) return false; + if (!message->IsInitialized()) { + GOOGLE_LOG(ERROR) << InitializationErrorMessage("parse", *message); + return false; + } + return true; +} + +inline bool InlineParseFromCodedStream(io::CodedInputStream* input, + MessageLite* message) { + message->Clear(); + return InlineMergeFromCodedStream(input, message); +} + +inline bool InlineParsePartialFromCodedStream(io::CodedInputStream* input, + MessageLite* message) { + message->Clear(); + return message->MergePartialFromCodedStream(input); +} + +inline bool InlineParseFromArray( + const void* data, int size, MessageLite* message) { + io::CodedInputStream input(reinterpret_cast(data), size); + return InlineParseFromCodedStream(&input, message) && + input.ConsumedEntireMessage(); +} + +inline bool InlineParsePartialFromArray( + const void* data, int size, MessageLite* message) { + io::CodedInputStream input(reinterpret_cast(data), size); + return InlineParsePartialFromCodedStream(&input, message) && + input.ConsumedEntireMessage(); +} + +} // namespace + + +MessageLite* MessageLite::New(::google::protobuf::Arena* arena) const { + MessageLite* message = New(); + if (arena != NULL) { + arena->Own(message); + } + return message; +} + +bool MessageLite::MergeFromCodedStream(io::CodedInputStream* input) { + return InlineMergeFromCodedStream(input, this); +} + +bool MessageLite::ParseFromCodedStream(io::CodedInputStream* input) { + return InlineParseFromCodedStream(input, this); +} + +bool MessageLite::ParsePartialFromCodedStream(io::CodedInputStream* input) { + return InlineParsePartialFromCodedStream(input, this); +} + +bool MessageLite::ParseFromZeroCopyStream(io::ZeroCopyInputStream* input) { + io::CodedInputStream decoder(input); + return ParseFromCodedStream(&decoder) && decoder.ConsumedEntireMessage(); +} + +bool MessageLite::ParsePartialFromZeroCopyStream( + io::ZeroCopyInputStream* input) { + io::CodedInputStream decoder(input); + return ParsePartialFromCodedStream(&decoder) && + decoder.ConsumedEntireMessage(); +} + +bool MessageLite::ParseFromBoundedZeroCopyStream( + io::ZeroCopyInputStream* input, int size) { + io::CodedInputStream decoder(input); + decoder.PushLimit(size); + return ParseFromCodedStream(&decoder) && + decoder.ConsumedEntireMessage() && + decoder.BytesUntilLimit() == 0; +} + +bool MessageLite::ParsePartialFromBoundedZeroCopyStream( + io::ZeroCopyInputStream* input, int size) { + io::CodedInputStream decoder(input); + decoder.PushLimit(size); + return ParsePartialFromCodedStream(&decoder) && + decoder.ConsumedEntireMessage() && + decoder.BytesUntilLimit() == 0; +} + +bool MessageLite::ParseFromString(const string& data) { + return InlineParseFromArray(data.data(), data.size(), this); +} + +bool MessageLite::ParsePartialFromString(const string& data) { + return InlineParsePartialFromArray(data.data(), data.size(), this); +} + +bool MessageLite::ParseFromArray(const void* data, int size) { + return InlineParseFromArray(data, size, this); +} + +bool MessageLite::ParsePartialFromArray(const void* data, int size) { + return InlineParsePartialFromArray(data, size, this); +} + + +// =================================================================== + +uint8* MessageLite::SerializeWithCachedSizesToArray(uint8* target) const { + return InternalSerializeWithCachedSizesToArray( + io::CodedOutputStream::IsDefaultSerializationDeterministic(), target); +} + +bool MessageLite::SerializeToCodedStream(io::CodedOutputStream* output) const { + GOOGLE_DCHECK(IsInitialized()) << InitializationErrorMessage("serialize", *this); + return SerializePartialToCodedStream(output); +} + +bool MessageLite::SerializePartialToCodedStream( + io::CodedOutputStream* output) const { + const size_t size = ByteSizeLong(); // Force size to be cached. + if (size > INT_MAX) { + GOOGLE_LOG(ERROR) << "Exceeded maximum protobuf size of 2GB: " << size; + return false; + } + + uint8* buffer = output->GetDirectBufferForNBytesAndAdvance(size); + if (buffer != NULL) { + uint8* end = InternalSerializeWithCachedSizesToArray( + output->IsSerializationDeterministic(), buffer); + if (end - buffer != size) { + ByteSizeConsistencyError(size, ByteSizeLong(), end - buffer, *this); + } + return true; + } else { + int original_byte_count = output->ByteCount(); + SerializeWithCachedSizes(output); + if (output->HadError()) { + return false; + } + int final_byte_count = output->ByteCount(); + + if (final_byte_count - original_byte_count != size) { + ByteSizeConsistencyError(size, ByteSizeLong(), + final_byte_count - original_byte_count, *this); + } + + return true; + } +} + +bool MessageLite::SerializeToZeroCopyStream( + io::ZeroCopyOutputStream* output) const { + io::CodedOutputStream encoder(output); + return SerializeToCodedStream(&encoder); +} + +bool MessageLite::SerializePartialToZeroCopyStream( + io::ZeroCopyOutputStream* output) const { + io::CodedOutputStream encoder(output); + return SerializePartialToCodedStream(&encoder); +} + +bool MessageLite::AppendToString(string* output) const { + GOOGLE_DCHECK(IsInitialized()) << InitializationErrorMessage("serialize", *this); + return AppendPartialToString(output); +} + +bool MessageLite::AppendPartialToString(string* output) const { + size_t old_size = output->size(); + size_t byte_size = ByteSizeLong(); + if (byte_size > INT_MAX) { + GOOGLE_LOG(ERROR) << "Exceeded maximum protobuf size of 2GB: " << byte_size; + return false; + } + + STLStringResizeUninitialized(output, old_size + byte_size); + uint8* start = + reinterpret_cast(io::mutable_string_data(output) + old_size); + uint8* end = SerializeWithCachedSizesToArray(start); + if (end - start != byte_size) { + ByteSizeConsistencyError(byte_size, ByteSizeLong(), end - start, *this); + } + return true; +} + +bool MessageLite::SerializeToString(string* output) const { + output->clear(); + return AppendToString(output); +} + +bool MessageLite::SerializePartialToString(string* output) const { + output->clear(); + return AppendPartialToString(output); +} + +bool MessageLite::SerializeToArray(void* data, int size) const { + GOOGLE_DCHECK(IsInitialized()) << InitializationErrorMessage("serialize", *this); + return SerializePartialToArray(data, size); +} + +bool MessageLite::SerializePartialToArray(void* data, int size) const { + int byte_size = ByteSizeLong(); + if (size < byte_size) return false; + uint8* start = reinterpret_cast(data); + uint8* end = SerializeWithCachedSizesToArray(start); + if (end - start != byte_size) { + ByteSizeConsistencyError(byte_size, ByteSizeLong(), end - start, *this); + } + return true; +} + +string MessageLite::SerializeAsString() const { + // If the compiler implements the (Named) Return Value Optimization, + // the local variable 'output' will not actually reside on the stack + // of this function, but will be overlaid with the object that the + // caller supplied for the return value to be constructed in. + string output; + if (!AppendToString(&output)) + output.clear(); + return output; +} + +string MessageLite::SerializePartialAsString() const { + string output; + if (!AppendPartialToString(&output)) + output.clear(); + return output; +} + +void MessageLite::SerializeWithCachedSizes( + io::CodedOutputStream* output) const { + GOOGLE_DCHECK(InternalGetTable()); + internal::TableSerialize( + *this, + static_cast(InternalGetTable()), + output); +} + +// The table driven code optimizes the case that the CodedOutputStream buffer +// is large enough to serialize into it directly. +// If the proto is optimized for speed, this method will be overridden by +// generated code for maximum speed. If the proto is optimized for size or +// is lite, then we need to specialize this to avoid infinite recursion. +uint8* MessageLite::InternalSerializeWithCachedSizesToArray( + bool deterministic, uint8* target) const { + const internal::SerializationTable* table = + static_cast(InternalGetTable()); + if (table == NULL) { + // We only optimize this when using optimize_for = SPEED. In other cases + // we just use the CodedOutputStream path. + int size = GetCachedSize(); + io::ArrayOutputStream out(target, size); + io::CodedOutputStream coded_out(&out); + coded_out.SetSerializationDeterministic(deterministic); + SerializeWithCachedSizes(&coded_out); + GOOGLE_CHECK(!coded_out.HadError()); + return target + size; + } else { + return internal::TableSerializeToArray(*this, table, deterministic, target); + } +} + +namespace internal { +template<> +MessageLite* GenericTypeHandler::NewFromPrototype( + const MessageLite* prototype, google::protobuf::Arena* arena) { + return prototype->New(arena); +} +template <> +void GenericTypeHandler::Merge(const MessageLite& from, + MessageLite* to) { + to->CheckTypeAndMergeFrom(from); +} +template<> +void GenericTypeHandler::Merge(const string& from, + string* to) { + *to = from; +} + +bool proto3_preserve_unknown_ = true; + +void SetProto3PreserveUnknownsDefault(bool preserve) { + proto3_preserve_unknown_ = preserve; +} + + +} // namespace internal + +} // namespace protobuf +} // namespace google diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/status.cc b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/status.cc new file mode 100644 index 0000000000000000000000000000000000000000..2bfbe0b42b8e16a8475f87c8aee0f724104f9e81 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/status.cc @@ -0,0 +1,134 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +#include + +#include +#include +#include +#include + +namespace google { +namespace protobuf { +namespace util { +namespace error { +inline string CodeEnumToString(error::Code code) { + switch (code) { + case OK: + return "OK"; + case CANCELLED: + return "CANCELLED"; + case UNKNOWN: + return "UNKNOWN"; + case INVALID_ARGUMENT: + return "INVALID_ARGUMENT"; + case DEADLINE_EXCEEDED: + return "DEADLINE_EXCEEDED"; + case NOT_FOUND: + return "NOT_FOUND"; + case ALREADY_EXISTS: + return "ALREADY_EXISTS"; + case PERMISSION_DENIED: + return "PERMISSION_DENIED"; + case UNAUTHENTICATED: + return "UNAUTHENTICATED"; + case RESOURCE_EXHAUSTED: + return "RESOURCE_EXHAUSTED"; + case FAILED_PRECONDITION: + return "FAILED_PRECONDITION"; + case ABORTED: + return "ABORTED"; + case OUT_OF_RANGE: + return "OUT_OF_RANGE"; + case UNIMPLEMENTED: + return "UNIMPLEMENTED"; + case INTERNAL: + return "INTERNAL"; + case UNAVAILABLE: + return "UNAVAILABLE"; + case DATA_LOSS: + return "DATA_LOSS"; + } + + // No default clause, clang will abort if a code is missing from + // above switch. + return "UNKNOWN"; +} +} // namespace error. + +const Status Status::OK = Status(); +const Status Status::CANCELLED = Status(error::CANCELLED, ""); +const Status Status::UNKNOWN = Status(error::UNKNOWN, ""); + +Status::Status() : error_code_(error::OK) { +} + +Status::Status(error::Code error_code, StringPiece error_message) + : error_code_(error_code) { + if (error_code != error::OK) { + error_message_ = error_message.ToString(); + } +} + +Status::Status(const Status& other) + : error_code_(other.error_code_), error_message_(other.error_message_) { +} + +Status& Status::operator=(const Status& other) { + error_code_ = other.error_code_; + error_message_ = other.error_message_; + return *this; +} + +bool Status::operator==(const Status& x) const { + return error_code_ == x.error_code_ && + error_message_ == x.error_message_; +} + +string Status::ToString() const { + if (error_code_ == error::OK) { + return "OK"; + } else { + if (error_message_.empty()) { + return error::CodeEnumToString(error_code_); + } else { + return error::CodeEnumToString(error_code_) + ":" + + error_message_; + } + } +} + +std::ostream& operator<<(std::ostream& os, const Status& x) { + os << x.ToString(); + return os; +} + +} // namespace util +} // namespace protobuf +} // namespace google diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/statusor.cc b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/statusor.cc new file mode 100644 index 0000000000000000000000000000000000000000..48d1402ae0036d7b29e1415923e16d7311afb4a8 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/statusor.cc @@ -0,0 +1,46 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include + +namespace google { +namespace protobuf { +namespace util { +namespace internal { + +void StatusOrHelper::Crash(const Status& status) { + GOOGLE_LOG(FATAL) << "Attempting to fetch value instead of handling error " + << status.ToString(); +} + +} // namespace internal +} // namespace util +} // namespace protobuf +} // namespace google diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/stringpiece.cc b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/stringpiece.cc new file mode 100644 index 0000000000000000000000000000000000000000..989474b74704d1ad699a4eb79d135a12f9c44760 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/stringpiece.cc @@ -0,0 +1,268 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +#include + +#include +#include +#include +#include +#include + +namespace google { +namespace protobuf { +std::ostream& operator<<(std::ostream& o, StringPiece piece) { + o.write(piece.data(), piece.size()); + return o; +} + +// Out-of-line error path. +void StringPiece::LogFatalSizeTooBig(size_t size, const char* details) { + GOOGLE_LOG(FATAL) << "size too big: " << size << " details: " << details; +} + +StringPiece::StringPiece(StringPiece x, stringpiece_ssize_type pos) + : ptr_(x.ptr_ + pos), length_(x.length_ - pos) { + GOOGLE_DCHECK_LE(0, pos); + GOOGLE_DCHECK_LE(pos, x.length_); +} + +StringPiece::StringPiece(StringPiece x, + stringpiece_ssize_type pos, + stringpiece_ssize_type len) + : ptr_(x.ptr_ + pos), length_(std::min(len, x.length_ - pos)) { + GOOGLE_DCHECK_LE(0, pos); + GOOGLE_DCHECK_LE(pos, x.length_); + GOOGLE_DCHECK_GE(len, 0); +} + +void StringPiece::CopyToString(string* target) const { + target->assign(ptr_, length_); +} + +void StringPiece::AppendToString(string* target) const { + target->append(ptr_, length_); +} + +bool StringPiece::Consume(StringPiece x) { + if (starts_with(x)) { + ptr_ += x.length_; + length_ -= x.length_; + return true; + } + return false; +} + +bool StringPiece::ConsumeFromEnd(StringPiece x) { + if (ends_with(x)) { + length_ -= x.length_; + return true; + } + return false; +} + +stringpiece_ssize_type StringPiece::copy(char* buf, + size_type n, + size_type pos) const { + stringpiece_ssize_type ret = std::min(length_ - pos, n); + memcpy(buf, ptr_ + pos, ret); + return ret; +} + +bool StringPiece::contains(StringPiece s) const { + return find(s, 0) != npos; +} + +stringpiece_ssize_type StringPiece::find(StringPiece s, size_type pos) const { + if (length_ <= 0 || pos > static_cast(length_)) { + if (length_ == 0 && pos == 0 && s.length_ == 0) return 0; + return npos; + } + const char *result = std::search(ptr_ + pos, ptr_ + length_, + s.ptr_, s.ptr_ + s.length_); + return result == ptr_ + length_ ? npos : result - ptr_; +} + +stringpiece_ssize_type StringPiece::find(char c, size_type pos) const { + if (length_ <= 0 || pos >= static_cast(length_)) { + return npos; + } + const char* result = static_cast( + memchr(ptr_ + pos, c, length_ - pos)); + return result != NULL ? result - ptr_ : npos; +} + +stringpiece_ssize_type StringPiece::rfind(StringPiece s, size_type pos) const { + if (length_ < s.length_) return npos; + const size_t ulen = length_; + if (s.length_ == 0) return std::min(ulen, pos); + + const char* last = ptr_ + std::min(ulen - s.length_, pos) + s.length_; + const char* result = std::find_end(ptr_, last, s.ptr_, s.ptr_ + s.length_); + return result != last ? result - ptr_ : npos; +} + +// Search range is [0..pos] inclusive. If pos == npos, search everything. +stringpiece_ssize_type StringPiece::rfind(char c, size_type pos) const { + // Note: memrchr() is not available on Windows. + if (length_ <= 0) return npos; + for (stringpiece_ssize_type i = + std::min(pos, static_cast(length_ - 1)); + i >= 0; --i) { + if (ptr_[i] == c) { + return i; + } + } + return npos; +} + +// For each character in characters_wanted, sets the index corresponding +// to the ASCII code of that character to 1 in table. This is used by +// the find_.*_of methods below to tell whether or not a character is in +// the lookup table in constant time. +// The argument `table' must be an array that is large enough to hold all +// the possible values of an unsigned char. Thus it should be be declared +// as follows: +// bool table[UCHAR_MAX + 1] +static inline void BuildLookupTable(StringPiece characters_wanted, + bool* table) { + const stringpiece_ssize_type length = characters_wanted.length(); + const char* const data = characters_wanted.data(); + for (stringpiece_ssize_type i = 0; i < length; ++i) { + table[static_cast(data[i])] = true; + } +} + +stringpiece_ssize_type StringPiece::find_first_of(StringPiece s, + size_type pos) const { + if (length_ <= 0 || s.length_ <= 0) { + return npos; + } + // Avoid the cost of BuildLookupTable() for a single-character search. + if (s.length_ == 1) return find_first_of(s.ptr_[0], pos); + + bool lookup[UCHAR_MAX + 1] = { false }; + BuildLookupTable(s, lookup); + for (stringpiece_ssize_type i = pos; i < length_; ++i) { + if (lookup[static_cast(ptr_[i])]) { + return i; + } + } + return npos; +} + +stringpiece_ssize_type StringPiece::find_first_not_of(StringPiece s, + size_type pos) const { + if (length_ <= 0) return npos; + if (s.length_ <= 0) return 0; + // Avoid the cost of BuildLookupTable() for a single-character search. + if (s.length_ == 1) return find_first_not_of(s.ptr_[0], pos); + + bool lookup[UCHAR_MAX + 1] = { false }; + BuildLookupTable(s, lookup); + for (stringpiece_ssize_type i = pos; i < length_; ++i) { + if (!lookup[static_cast(ptr_[i])]) { + return i; + } + } + return npos; +} + +stringpiece_ssize_type StringPiece::find_first_not_of(char c, + size_type pos) const { + if (length_ <= 0) return npos; + + for (; pos < static_cast(length_); ++pos) { + if (ptr_[pos] != c) { + return pos; + } + } + return npos; +} + +stringpiece_ssize_type StringPiece::find_last_of(StringPiece s, + size_type pos) const { + if (length_ <= 0 || s.length_ <= 0) return npos; + // Avoid the cost of BuildLookupTable() for a single-character search. + if (s.length_ == 1) return find_last_of(s.ptr_[0], pos); + + bool lookup[UCHAR_MAX + 1] = { false }; + BuildLookupTable(s, lookup); + for (stringpiece_ssize_type i = + std::min(pos, static_cast(length_ - 1)); i >= 0; --i) { + if (lookup[static_cast(ptr_[i])]) { + return i; + } + } + return npos; +} + +stringpiece_ssize_type StringPiece::find_last_not_of(StringPiece s, + size_type pos) const { + if (length_ <= 0) return npos; + + stringpiece_ssize_type i = std::min(pos, static_cast(length_ - 1)); + if (s.length_ <= 0) return i; + + // Avoid the cost of BuildLookupTable() for a single-character search. + if (s.length_ == 1) return find_last_not_of(s.ptr_[0], pos); + + bool lookup[UCHAR_MAX + 1] = { false }; + BuildLookupTable(s, lookup); + for (; i >= 0; --i) { + if (!lookup[static_cast(ptr_[i])]) { + return i; + } + } + return npos; +} + +stringpiece_ssize_type StringPiece::find_last_not_of(char c, + size_type pos) const { + if (length_ <= 0) return npos; + + for (stringpiece_ssize_type i = + std::min(pos, static_cast(length_ - 1)); i >= 0; --i) { + if (ptr_[i] != c) { + return i; + } + } + return npos; +} + +StringPiece StringPiece::substr(size_type pos, size_type n) const { + if (pos > length_) pos = length_; + if (n > length_ - pos) n = length_ - pos; + return StringPiece(ptr_ + pos, n); +} + +const StringPiece::size_type StringPiece::npos = size_type(-1); + +} // namespace protobuf +} // namespace google diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/stringprintf.cc b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/stringprintf.cc new file mode 100644 index 0000000000000000000000000000000000000000..d98b9b8744c508c5c413bc715398aa6a6b934bf6 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/stringprintf.cc @@ -0,0 +1,174 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2012 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// from google3/base/stringprintf.cc + +#include + +#include +#include // For va_list and related operations +#include // MSVC requires this for _vsnprintf +#include +#include + +namespace google { +namespace protobuf { + +#ifdef _MSC_VER +enum { IS_COMPILER_MSVC = 1 }; +#ifndef va_copy +// Define va_copy for MSVC. This is a hack, assuming va_list is simply a +// pointer into the stack and is safe to copy. +#define va_copy(dest, src) ((dest) = (src)) +#endif +#else +enum { IS_COMPILER_MSVC = 0 }; +#endif + +void StringAppendV(string* dst, const char* format, va_list ap) { + // First try with a small fixed size buffer + static const int kSpaceLength = 1024; + char space[kSpaceLength]; + + // It's possible for methods that use a va_list to invalidate + // the data in it upon use. The fix is to make a copy + // of the structure before using it and use that copy instead. + va_list backup_ap; + va_copy(backup_ap, ap); + int result = vsnprintf(space, kSpaceLength, format, backup_ap); + va_end(backup_ap); + + if (result < kSpaceLength) { + if (result >= 0) { + // Normal case -- everything fit. + dst->append(space, result); + return; + } + + if (IS_COMPILER_MSVC) { + // Error or MSVC running out of space. MSVC 8.0 and higher + // can be asked about space needed with the special idiom below: + va_copy(backup_ap, ap); + result = vsnprintf(NULL, 0, format, backup_ap); + va_end(backup_ap); + } + + if (result < 0) { + // Just an error. + return; + } + } + + // Increase the buffer size to the size requested by vsnprintf, + // plus one for the closing \0. + int length = result+1; + char* buf = new char[length]; + + // Restore the va_list before we use it again + va_copy(backup_ap, ap); + result = vsnprintf(buf, length, format, backup_ap); + va_end(backup_ap); + + if (result >= 0 && result < length) { + // It fit + dst->append(buf, result); + } + delete[] buf; +} + + +string StringPrintf(const char* format, ...) { + va_list ap; + va_start(ap, format); + string result; + StringAppendV(&result, format, ap); + va_end(ap); + return result; +} + +const string& SStringPrintf(string* dst, const char* format, ...) { + va_list ap; + va_start(ap, format); + dst->clear(); + StringAppendV(dst, format, ap); + va_end(ap); + return *dst; +} + +void StringAppendF(string* dst, const char* format, ...) { + va_list ap; + va_start(ap, format); + StringAppendV(dst, format, ap); + va_end(ap); +} + +// Max arguments supported by StringPrintVector +const int kStringPrintfVectorMaxArgs = 32; + +// An empty block of zero for filler arguments. This is const so that if +// printf tries to write to it (via %n) then the program gets a SIGSEGV +// and we can fix the problem or protect against an attack. +static const char string_printf_empty_block[256] = { '\0' }; + +string StringPrintfVector(const char* format, const std::vector& v) { + GOOGLE_CHECK_LE(v.size(), kStringPrintfVectorMaxArgs) + << "StringPrintfVector currently only supports up to " + << kStringPrintfVectorMaxArgs << " arguments. " + << "Feel free to add support for more if you need it."; + + // Add filler arguments so that bogus format+args have a harder time + // crashing the program, corrupting the program (%n), + // or displaying random chunks of memory to users. + + const char* cstr[kStringPrintfVectorMaxArgs]; + for (int i = 0; i < v.size(); ++i) { + cstr[i] = v[i].c_str(); + } + for (int i = v.size(); i < GOOGLE_ARRAYSIZE(cstr); ++i) { + cstr[i] = &string_printf_empty_block[0]; + } + + // I do not know any way to pass kStringPrintfVectorMaxArgs arguments, + // or any way to build a va_list by hand, or any API for printf + // that accepts an array of arguments. The best I can do is stick + // this COMPILE_ASSERT right next to the actual statement. + + GOOGLE_COMPILE_ASSERT(kStringPrintfVectorMaxArgs == 32, arg_count_mismatch); + return StringPrintf(format, + cstr[0], cstr[1], cstr[2], cstr[3], cstr[4], + cstr[5], cstr[6], cstr[7], cstr[8], cstr[9], + cstr[10], cstr[11], cstr[12], cstr[13], cstr[14], + cstr[15], cstr[16], cstr[17], cstr[18], cstr[19], + cstr[20], cstr[21], cstr[22], cstr[23], cstr[24], + cstr[25], cstr[26], cstr[27], cstr[28], cstr[29], + cstr[30], cstr[31]); +} +} // namespace protobuf +} // namespace google diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/structurally_valid.cc b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/structurally_valid.cc new file mode 100644 index 0000000000000000000000000000000000000000..b22396829caa3ececbd1a6cee69979ab352c6793 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/structurally_valid.cc @@ -0,0 +1,617 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: jrm@google.com (Jim Meehan) + +#include + +#include + +namespace google { +namespace protobuf { +namespace internal { + +// These four-byte entries compactly encode how many bytes 0..255 to delete +// in making a string replacement, how many bytes to add 0..255, and the offset +// 0..64k-1 of the replacement string in remap_string. +struct RemapEntry { + uint8 delete_bytes; + uint8 add_bytes; + uint16 bytes_offset; +}; + +// Exit type codes for state tables. All but the first get stuffed into +// signed one-byte entries. The first is only generated by executable code. +// To distinguish from next-state entries, these must be contiguous and +// all <= kExitNone +typedef enum { + kExitDstSpaceFull = 239, + kExitIllegalStructure, // 240 + kExitOK, // 241 + kExitReject, // ... + kExitReplace1, + kExitReplace2, + kExitReplace3, + kExitReplace21, + kExitReplace31, + kExitReplace32, + kExitReplaceOffset1, + kExitReplaceOffset2, + kExitReplace1S0, + kExitSpecial, + kExitDoAgain, + kExitRejectAlt, + kExitNone // 255 +} ExitReason; + + +// This struct represents one entire state table. The three initialized byte +// areas are state_table, remap_base, and remap_string. state0 and state0_size +// give the byte offset and length within state_table of the initial state -- +// table lookups are expected to start and end in this state, but for +// truncated UTF-8 strings, may end in a different state. These allow a quick +// test for that condition. entry_shift is 8 for tables subscripted by a full +// byte value and 6 for space-optimized tables subscripted by only six +// significant bits in UTF-8 continuation bytes. +typedef struct { + const uint32 state0; + const uint32 state0_size; + const uint32 total_size; + const int max_expand; + const int entry_shift; + const int bytes_per_entry; + const uint32 losub; + const uint32 hiadd; + const uint8* state_table; + const RemapEntry* remap_base; + const uint8* remap_string; + const uint8* fast_state; +} UTF8StateMachineObj; + +typedef UTF8StateMachineObj UTF8ScanObj; + +#define X__ (kExitIllegalStructure) +#define RJ_ (kExitReject) +#define S1_ (kExitReplace1) +#define S2_ (kExitReplace2) +#define S3_ (kExitReplace3) +#define S21 (kExitReplace21) +#define S31 (kExitReplace31) +#define S32 (kExitReplace32) +#define T1_ (kExitReplaceOffset1) +#define T2_ (kExitReplaceOffset2) +#define S11 (kExitReplace1S0) +#define SP_ (kExitSpecial) +#define D__ (kExitDoAgain) +#define RJA (kExitRejectAlt) + +// Entire table has 9 state blocks of 256 entries each +static const unsigned int utf8acceptnonsurrogates_STATE0 = 0; // state[0] +static const unsigned int utf8acceptnonsurrogates_STATE0_SIZE = 256; // =[1] +static const unsigned int utf8acceptnonsurrogates_TOTAL_SIZE = 2304; +static const unsigned int utf8acceptnonsurrogates_MAX_EXPAND_X4 = 0; +static const unsigned int utf8acceptnonsurrogates_SHIFT = 8; +static const unsigned int utf8acceptnonsurrogates_BYTES = 1; +static const unsigned int utf8acceptnonsurrogates_LOSUB = 0x20202020; +static const unsigned int utf8acceptnonsurrogates_HIADD = 0x00000000; + +static const uint8 utf8acceptnonsurrogates[] = { +// state[0] 0x000000 Byte 1 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, + +X__, X__, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 7, 3, 3, + 4, 5, 5, 5, 6, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, + +// state[1] 0x000080 Byte 2 of 2 +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, + +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, + + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, + +// state[2] 0x000000 Byte 2 of 3 +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, + +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, + +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, + +// state[3] 0x001000 Byte 2 of 3 +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, + +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, + + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, + +// state[4] 0x000000 Byte 2 of 4 +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, + +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, + +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, + +// state[5] 0x040000 Byte 2 of 4 +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, + +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, + + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, + +// state[6] 0x100000 Byte 2 of 4 +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, + +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, + + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, + +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, + +// state[7] 0x00d000 Byte 2 of 3 +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, + +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, + + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, + +// state[8] 0x00d800 Byte 3 of 3 +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, + +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, + +RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, +RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, +RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, +RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, RJ_, + +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, X__, +}; + +// Remap base[0] = (del, add, string_offset) +static const RemapEntry utf8acceptnonsurrogates_remap_base[] = { +{0, 0, 0} }; + +// Remap string[0] +static const unsigned char utf8acceptnonsurrogates_remap_string[] = { +0 }; + +static const unsigned char utf8acceptnonsurrogates_fast[256] = { +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +}; + +static const UTF8ScanObj utf8acceptnonsurrogates_obj = { + utf8acceptnonsurrogates_STATE0, + utf8acceptnonsurrogates_STATE0_SIZE, + utf8acceptnonsurrogates_TOTAL_SIZE, + utf8acceptnonsurrogates_MAX_EXPAND_X4, + utf8acceptnonsurrogates_SHIFT, + utf8acceptnonsurrogates_BYTES, + utf8acceptnonsurrogates_LOSUB, + utf8acceptnonsurrogates_HIADD, + utf8acceptnonsurrogates, + utf8acceptnonsurrogates_remap_base, + utf8acceptnonsurrogates_remap_string, + utf8acceptnonsurrogates_fast +}; + + +#undef X__ +#undef RJ_ +#undef S1_ +#undef S2_ +#undef S3_ +#undef S21 +#undef S31 +#undef S32 +#undef T1_ +#undef T2_ +#undef S11 +#undef SP_ +#undef D__ +#undef RJA + +// Return true if current Tbl pointer is within state0 range +// Note that unsigned compare checks both ends of range simultaneously +static inline bool InStateZero(const UTF8ScanObj* st, const uint8* Tbl) { + const uint8* Tbl0 = &st->state_table[st->state0]; + return (static_cast(Tbl - Tbl0) < st->state0_size); +} + +// Scan a UTF-8 string based on state table. +// Always scan complete UTF-8 characters +// Set number of bytes scanned. Return reason for exiting +int UTF8GenericScan(const UTF8ScanObj* st, + const char * str, + int str_length, + int* bytes_consumed) { + *bytes_consumed = 0; + if (str_length == 0) return kExitOK; + + int eshift = st->entry_shift; + const uint8* isrc = reinterpret_cast(str); + const uint8* src = isrc; + const uint8* srclimit = isrc + str_length; + const uint8* srclimit8 = srclimit - 7; + const uint8* Tbl_0 = &st->state_table[st->state0]; + + DoAgain: + // Do state-table scan + int e = 0; + uint8 c; + const uint8* Tbl2 = &st->fast_state[0]; + const uint32 losub = st->losub; + const uint32 hiadd = st->hiadd; + // Check initial few bytes one at a time until 8-byte aligned + //---------------------------- + while ((((uintptr_t)src & 0x07) != 0) && + (src < srclimit) && + Tbl2[src[0]] == 0) { + src++; + } + if (((uintptr_t)src & 0x07) == 0) { + // Do fast for groups of 8 identity bytes. + // This covers a lot of 7-bit ASCII ~8x faster then the 1-byte loop, + // including slowing slightly on cr/lf/ht + //---------------------------- + while (src < srclimit8) { + uint32 s0123 = (reinterpret_cast(src))[0]; + uint32 s4567 = (reinterpret_cast(src))[1]; + src += 8; + // This is a fast range check for all bytes in [lowsub..0x80-hiadd) + uint32 temp = (s0123 - losub) | (s0123 + hiadd) | + (s4567 - losub) | (s4567 + hiadd); + if ((temp & 0x80808080) != 0) { + // We typically end up here on cr/lf/ht; src was incremented + int e0123 = (Tbl2[src[-8]] | Tbl2[src[-7]]) | + (Tbl2[src[-6]] | Tbl2[src[-5]]); + if (e0123 != 0) { + src -= 8; + break; + } // Exit on Non-interchange + e0123 = (Tbl2[src[-4]] | Tbl2[src[-3]]) | + (Tbl2[src[-2]] | Tbl2[src[-1]]); + if (e0123 != 0) { + src -= 4; + break; + } // Exit on Non-interchange + // Else OK, go around again + } + } + } + //---------------------------- + + // Byte-at-a-time scan + //---------------------------- + const uint8* Tbl = Tbl_0; + while (src < srclimit) { + c = *src; + e = Tbl[c]; + src++; + if (e >= kExitIllegalStructure) {break;} + Tbl = &Tbl_0[e << eshift]; + } + //---------------------------- + + + // Exit posibilities: + // Some exit code, !state0, back up over last char + // Some exit code, state0, back up one byte exactly + // source consumed, !state0, back up over partial char + // source consumed, state0, exit OK + // For illegal byte in state0, avoid backup up over PREVIOUS char + // For truncated last char, back up to beginning of it + + if (e >= kExitIllegalStructure) { + // Back up over exactly one byte of rejected/illegal UTF-8 character + src--; + // Back up more if needed + if (!InStateZero(st, Tbl)) { + do { + src--; + } while ((src > isrc) && ((src[0] & 0xc0) == 0x80)); + } + } else if (!InStateZero(st, Tbl)) { + // Back up over truncated UTF-8 character + e = kExitIllegalStructure; + do { + src--; + } while ((src > isrc) && ((src[0] & 0xc0) == 0x80)); + } else { + // Normal termination, source fully consumed + e = kExitOK; + } + + if (e == kExitDoAgain) { + // Loop back up to the fast scan + goto DoAgain; + } + + *bytes_consumed = src - isrc; + return e; +} + +int UTF8GenericScanFastAscii(const UTF8ScanObj* st, + const char * str, + int str_length, + int* bytes_consumed) { + *bytes_consumed = 0; + if (str_length == 0) return kExitOK; + + const uint8* isrc = reinterpret_cast(str); + const uint8* src = isrc; + const uint8* srclimit = isrc + str_length; + const uint8* srclimit8 = srclimit - 7; + int n; + int rest_consumed; + int exit_reason; + do { + // Check initial few bytes one at a time until 8-byte aligned + while ((((uintptr_t)src & 0x07) != 0) && + (src < srclimit) && (src[0] < 0x80)) { + src++; + } + if (((uintptr_t)src & 0x07) == 0) { + while ((src < srclimit8) && + (((reinterpret_cast(src)[0] | + reinterpret_cast(src)[1]) & 0x80808080) == 0)) { + src += 8; + } + } + while ((src < srclimit) && (src[0] < 0x80)) { + src++; + } + // Run state table on the rest + n = src - isrc; + exit_reason = UTF8GenericScan(st, str + n, str_length - n, &rest_consumed); + src += rest_consumed; + } while ( exit_reason == kExitDoAgain ); + + *bytes_consumed = src - isrc; + return exit_reason; +} + +// Hack: On some compilers the static tables are initialized at startup. +// We can't use them until they are initialized. However, some Protocol +// Buffer parsing happens at static init time and may try to validate +// UTF-8 strings. Since UTF-8 validation is only used for debugging +// anyway, we simply always return success if initialization hasn't +// occurred yet. +namespace { + +bool module_initialized_ = false; + +struct InitDetector { + InitDetector() { + module_initialized_ = true; + } +}; +InitDetector init_detector; + +} // namespace + +bool IsStructurallyValidUTF8(const char* buf, int len) { + if (!module_initialized_) return true; + + int bytes_consumed = 0; + UTF8GenericScanFastAscii(&utf8acceptnonsurrogates_obj, + buf, len, &bytes_consumed); + return (bytes_consumed == len); +} + +int UTF8SpnStructurallyValid(const StringPiece& str) { + if (!module_initialized_) return str.size(); + + int bytes_consumed = 0; + UTF8GenericScanFastAscii(&utf8acceptnonsurrogates_obj, + str.data(), str.size(), &bytes_consumed); + return bytes_consumed; +} + +// Coerce UTF-8 byte string in src_str to be +// a structurally-valid equal-length string by selectively +// overwriting illegal bytes with replace_char (typically blank). +// replace_char must be legal printable 7-bit Ascii 0x20..0x7e. +// src_str is read-only. If any overwriting is needed, a modified byte string +// is created in idst, length isrclen. +// +// Returns pointer to output buffer, isrc if no changes were made, +// or idst if some bytes were changed. +// +// Fast case: all is structurally valid and no byte copying is done. +// +char* UTF8CoerceToStructurallyValid(const StringPiece& src_str, + char* idst, + const char replace_char) { + const char* isrc = src_str.data(); + const int len = src_str.length(); + int n = UTF8SpnStructurallyValid(src_str); + if (n == len) { // Normal case -- all is cool, return + return const_cast(isrc); + } else { // Unusual case -- copy w/o bad bytes + const char* src = isrc; + const char* srclimit = isrc + len; + char* dst = idst; + memmove(dst, src, n); // Copy initial good chunk + src += n; + dst += n; + while (src < srclimit) { // src points to bogus byte or is off the end + dst[0] = replace_char; // replace one bad byte + src++; + dst++; + StringPiece str2(src, srclimit - src); + n = UTF8SpnStructurallyValid(str2); // scan the remainder + memmove(dst, src, n); // copy next good chunk + src += n; + dst += n; + } + } + return idst; +} + +} // namespace internal +} // namespace protobuf +} // namespace google diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/strutil.cc b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/strutil.cc new file mode 100644 index 0000000000000000000000000000000000000000..552d416f6342e2ada1c179b521d783dd3f34e789 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/strutil.cc @@ -0,0 +1,2304 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// from google3/strings/strutil.cc + +#include +#include + +#include +#include // FLT_DIG and DBL_DIG +#include +#include +#include +#include + +#include + +#ifdef _WIN32 +// MSVC has only _snprintf, not snprintf. +// +// MinGW has both snprintf and _snprintf, but they appear to be different +// functions. The former is buggy. When invoked like so: +// char buffer[32]; +// snprintf(buffer, 32, "%.*g\n", FLT_DIG, 1.23e10f); +// it prints "1.23000e+10". This is plainly wrong: %g should never print +// trailing zeros after the decimal point. For some reason this bug only +// occurs with some input values, not all. In any case, _snprintf does the +// right thing, so we use it. +#define snprintf _snprintf +#endif + +namespace google { +namespace protobuf { + +// These are defined as macros on some platforms. #undef them so that we can +// redefine them. +#undef isxdigit +#undef isprint + +// The definitions of these in ctype.h change based on locale. Since our +// string manipulation is all in relation to the protocol buffer and C++ +// languages, we always want to use the C locale. So, we re-define these +// exactly as we want them. +inline bool isxdigit(char c) { + return ('0' <= c && c <= '9') || + ('a' <= c && c <= 'f') || + ('A' <= c && c <= 'F'); +} + +inline bool isprint(char c) { + return c >= 0x20 && c <= 0x7E; +} + +// ---------------------------------------------------------------------- +// StripString +// Replaces any occurrence of the character 'remove' (or the characters +// in 'remove') with the character 'replacewith'. +// ---------------------------------------------------------------------- +void StripString(string* s, const char* remove, char replacewith) { + const char * str_start = s->c_str(); + const char * str = str_start; + for (str = strpbrk(str, remove); + str != NULL; + str = strpbrk(str + 1, remove)) { + (*s)[str - str_start] = replacewith; + } +} + +// ---------------------------------------------------------------------- +// ReplaceCharacters +// Replaces any occurrence of the character 'remove' (or the characters +// in 'remove') with the character 'replacewith'. +// ---------------------------------------------------------------------- +void ReplaceCharacters(string *s, const char *remove, char replacewith) { + const char *str_start = s->c_str(); + const char *str = str_start; + for (str = strpbrk(str, remove); + str != NULL; + str = strpbrk(str + 1, remove)) { + (*s)[str - str_start] = replacewith; + } +} + +void StripWhitespace(string* str) { + int str_length = str->length(); + + // Strip off leading whitespace. + int first = 0; + while (first < str_length && ascii_isspace(str->at(first))) { + ++first; + } + // If entire string is white space. + if (first == str_length) { + str->clear(); + return; + } + if (first > 0) { + str->erase(0, first); + str_length -= first; + } + + // Strip off trailing whitespace. + int last = str_length - 1; + while (last >= 0 && ascii_isspace(str->at(last))) { + --last; + } + if (last != (str_length - 1) && last >= 0) { + str->erase(last + 1, string::npos); + } +} + +// ---------------------------------------------------------------------- +// StringReplace() +// Replace the "old" pattern with the "new" pattern in a string, +// and append the result to "res". If replace_all is false, +// it only replaces the first instance of "old." +// ---------------------------------------------------------------------- + +void StringReplace(const string& s, const string& oldsub, + const string& newsub, bool replace_all, + string* res) { + if (oldsub.empty()) { + res->append(s); // if empty, append the given string. + return; + } + + string::size_type start_pos = 0; + string::size_type pos; + do { + pos = s.find(oldsub, start_pos); + if (pos == string::npos) { + break; + } + res->append(s, start_pos, pos - start_pos); + res->append(newsub); + start_pos = pos + oldsub.size(); // start searching again after the "old" + } while (replace_all); + res->append(s, start_pos, s.length() - start_pos); +} + +// ---------------------------------------------------------------------- +// StringReplace() +// Give me a string and two patterns "old" and "new", and I replace +// the first instance of "old" in the string with "new", if it +// exists. If "global" is true; call this repeatedly until it +// fails. RETURN a new string, regardless of whether the replacement +// happened or not. +// ---------------------------------------------------------------------- + +string StringReplace(const string& s, const string& oldsub, + const string& newsub, bool replace_all) { + string ret; + StringReplace(s, oldsub, newsub, replace_all, &ret); + return ret; +} + +// ---------------------------------------------------------------------- +// SplitStringUsing() +// Split a string using a character delimiter. Append the components +// to 'result'. +// +// Note: For multi-character delimiters, this routine will split on *ANY* of +// the characters in the string, not the entire string as a single delimiter. +// ---------------------------------------------------------------------- +template +static inline +void SplitStringToIteratorUsing(const string& full, + const char* delim, + ITR& result) { + // Optimize the common case where delim is a single character. + if (delim[0] != '\0' && delim[1] == '\0') { + char c = delim[0]; + const char* p = full.data(); + const char* end = p + full.size(); + while (p != end) { + if (*p == c) { + ++p; + } else { + const char* start = p; + while (++p != end && *p != c); + *result++ = string(start, p - start); + } + } + return; + } + + string::size_type begin_index, end_index; + begin_index = full.find_first_not_of(delim); + while (begin_index != string::npos) { + end_index = full.find_first_of(delim, begin_index); + if (end_index == string::npos) { + *result++ = full.substr(begin_index); + return; + } + *result++ = full.substr(begin_index, (end_index - begin_index)); + begin_index = full.find_first_not_of(delim, end_index); + } +} + +void SplitStringUsing(const string& full, + const char* delim, + std::vector* result) { + std::back_insert_iterator< std::vector > it(*result); + SplitStringToIteratorUsing(full, delim, it); +} + +// Split a string using a character delimiter. Append the components +// to 'result'. If there are consecutive delimiters, this function +// will return corresponding empty strings. The string is split into +// at most the specified number of pieces greedily. This means that the +// last piece may possibly be split further. To split into as many pieces +// as possible, specify 0 as the number of pieces. +// +// If "full" is the empty string, yields an empty string as the only value. +// +// If "pieces" is negative for some reason, it returns the whole string +// ---------------------------------------------------------------------- +template +static inline +void SplitStringToIteratorAllowEmpty(const StringType& full, + const char* delim, + int pieces, + ITR& result) { + string::size_type begin_index, end_index; + begin_index = 0; + + for (int i = 0; (i < pieces-1) || (pieces == 0); i++) { + end_index = full.find_first_of(delim, begin_index); + if (end_index == string::npos) { + *result++ = full.substr(begin_index); + return; + } + *result++ = full.substr(begin_index, (end_index - begin_index)); + begin_index = end_index + 1; + } + *result++ = full.substr(begin_index); +} + +void SplitStringAllowEmpty(const string& full, const char* delim, + std::vector* result) { + std::back_insert_iterator > it(*result); + SplitStringToIteratorAllowEmpty(full, delim, 0, it); +} + +// ---------------------------------------------------------------------- +// JoinStrings() +// This merges a vector of string components with delim inserted +// as separaters between components. +// +// ---------------------------------------------------------------------- +template +static void JoinStringsIterator(const ITERATOR& start, + const ITERATOR& end, + const char* delim, + string* result) { + GOOGLE_CHECK(result != NULL); + result->clear(); + int delim_length = strlen(delim); + + // Precompute resulting length so we can reserve() memory in one shot. + int length = 0; + for (ITERATOR iter = start; iter != end; ++iter) { + if (iter != start) { + length += delim_length; + } + length += iter->size(); + } + result->reserve(length); + + // Now combine everything. + for (ITERATOR iter = start; iter != end; ++iter) { + if (iter != start) { + result->append(delim, delim_length); + } + result->append(iter->data(), iter->size()); + } +} + +void JoinStrings(const std::vector& components, + const char* delim, + string * result) { + JoinStringsIterator(components.begin(), components.end(), delim, result); +} + +// ---------------------------------------------------------------------- +// UnescapeCEscapeSequences() +// This does all the unescaping that C does: \ooo, \r, \n, etc +// Returns length of resulting string. +// The implementation of \x parses any positive number of hex digits, +// but it is an error if the value requires more than 8 bits, and the +// result is truncated to 8 bits. +// +// The second call stores its errors in a supplied string vector. +// If the string vector pointer is NULL, it reports the errors with LOG(). +// ---------------------------------------------------------------------- + +#define IS_OCTAL_DIGIT(c) (((c) >= '0') && ((c) <= '7')) + +// Protocol buffers doesn't ever care about errors, but I don't want to remove +// the code. +#define LOG_STRING(LEVEL, VECTOR) GOOGLE_LOG_IF(LEVEL, false) + +int UnescapeCEscapeSequences(const char* source, char* dest) { + return UnescapeCEscapeSequences(source, dest, NULL); +} + +int UnescapeCEscapeSequences(const char* source, char* dest, + std::vector *errors) { + GOOGLE_DCHECK(errors == NULL) << "Error reporting not implemented."; + + char* d = dest; + const char* p = source; + + // Small optimization for case where source = dest and there's no escaping + while ( p == d && *p != '\0' && *p != '\\' ) + p++, d++; + + while (*p != '\0') { + if (*p != '\\') { + *d++ = *p++; + } else { + switch ( *++p ) { // skip past the '\\' + case '\0': + LOG_STRING(ERROR, errors) << "String cannot end with \\"; + *d = '\0'; + return d - dest; // we're done with p + case 'a': *d++ = '\a'; break; + case 'b': *d++ = '\b'; break; + case 'f': *d++ = '\f'; break; + case 'n': *d++ = '\n'; break; + case 'r': *d++ = '\r'; break; + case 't': *d++ = '\t'; break; + case 'v': *d++ = '\v'; break; + case '\\': *d++ = '\\'; break; + case '?': *d++ = '\?'; break; // \? Who knew? + case '\'': *d++ = '\''; break; + case '"': *d++ = '\"'; break; + case '0': case '1': case '2': case '3': // octal digit: 1 to 3 digits + case '4': case '5': case '6': case '7': { + char ch = *p - '0'; + if ( IS_OCTAL_DIGIT(p[1]) ) + ch = ch * 8 + *++p - '0'; + if ( IS_OCTAL_DIGIT(p[1]) ) // safe (and easy) to do this twice + ch = ch * 8 + *++p - '0'; // now points at last digit + *d++ = ch; + break; + } + case 'x': case 'X': { + if (!isxdigit(p[1])) { + if (p[1] == '\0') { + LOG_STRING(ERROR, errors) << "String cannot end with \\x"; + } else { + LOG_STRING(ERROR, errors) << + "\\x cannot be followed by non-hex digit: \\" << *p << p[1]; + } + break; + } + unsigned int ch = 0; + const char *hex_start = p; + while (isxdigit(p[1])) // arbitrarily many hex digits + ch = (ch << 4) + hex_digit_to_int(*++p); + if (ch > 0xFF) + LOG_STRING(ERROR, errors) << "Value of " << + "\\" << string(hex_start, p+1-hex_start) << " exceeds 8 bits"; + *d++ = ch; + break; + } +#if 0 // TODO(kenton): Support \u and \U? Requires runetochar(). + case 'u': { + // \uhhhh => convert 4 hex digits to UTF-8 + char32 rune = 0; + const char *hex_start = p; + for (int i = 0; i < 4; ++i) { + if (isxdigit(p[1])) { // Look one char ahead. + rune = (rune << 4) + hex_digit_to_int(*++p); // Advance p. + } else { + LOG_STRING(ERROR, errors) + << "\\u must be followed by 4 hex digits: \\" + << string(hex_start, p+1-hex_start); + break; + } + } + d += runetochar(d, &rune); + break; + } + case 'U': { + // \Uhhhhhhhh => convert 8 hex digits to UTF-8 + char32 rune = 0; + const char *hex_start = p; + for (int i = 0; i < 8; ++i) { + if (isxdigit(p[1])) { // Look one char ahead. + // Don't change rune until we're sure this + // is within the Unicode limit, but do advance p. + char32 newrune = (rune << 4) + hex_digit_to_int(*++p); + if (newrune > 0x10FFFF) { + LOG_STRING(ERROR, errors) + << "Value of \\" + << string(hex_start, p + 1 - hex_start) + << " exceeds Unicode limit (0x10FFFF)"; + break; + } else { + rune = newrune; + } + } else { + LOG_STRING(ERROR, errors) + << "\\U must be followed by 8 hex digits: \\" + << string(hex_start, p+1-hex_start); + break; + } + } + d += runetochar(d, &rune); + break; + } +#endif + default: + LOG_STRING(ERROR, errors) << "Unknown escape sequence: \\" << *p; + } + p++; // read past letter we escaped + } + } + *d = '\0'; + return d - dest; +} + +// ---------------------------------------------------------------------- +// UnescapeCEscapeString() +// This does the same thing as UnescapeCEscapeSequences, but creates +// a new string. The caller does not need to worry about allocating +// a dest buffer. This should be used for non performance critical +// tasks such as printing debug messages. It is safe for src and dest +// to be the same. +// +// The second call stores its errors in a supplied string vector. +// If the string vector pointer is NULL, it reports the errors with LOG(). +// +// In the first and second calls, the length of dest is returned. In the +// the third call, the new string is returned. +// ---------------------------------------------------------------------- +int UnescapeCEscapeString(const string& src, string* dest) { + return UnescapeCEscapeString(src, dest, NULL); +} + +int UnescapeCEscapeString(const string& src, string* dest, + std::vector *errors) { + std::unique_ptr unescaped(new char[src.size() + 1]); + int len = UnescapeCEscapeSequences(src.c_str(), unescaped.get(), errors); + GOOGLE_CHECK(dest); + dest->assign(unescaped.get(), len); + return len; +} + +string UnescapeCEscapeString(const string& src) { + std::unique_ptr unescaped(new char[src.size() + 1]); + int len = UnescapeCEscapeSequences(src.c_str(), unescaped.get(), NULL); + return string(unescaped.get(), len); +} + +// ---------------------------------------------------------------------- +// CEscapeString() +// CHexEscapeString() +// Copies 'src' to 'dest', escaping dangerous characters using +// C-style escape sequences. This is very useful for preparing query +// flags. 'src' and 'dest' should not overlap. The 'Hex' version uses +// hexadecimal rather than octal sequences. +// Returns the number of bytes written to 'dest' (not including the \0) +// or -1 if there was insufficient space. +// +// Currently only \n, \r, \t, ", ', \ and !isprint() chars are escaped. +// ---------------------------------------------------------------------- +int CEscapeInternal(const char* src, int src_len, char* dest, + int dest_len, bool use_hex, bool utf8_safe) { + const char* src_end = src + src_len; + int used = 0; + bool last_hex_escape = false; // true if last output char was \xNN + + for (; src < src_end; src++) { + if (dest_len - used < 2) // Need space for two letter escape + return -1; + + bool is_hex_escape = false; + switch (*src) { + case '\n': dest[used++] = '\\'; dest[used++] = 'n'; break; + case '\r': dest[used++] = '\\'; dest[used++] = 'r'; break; + case '\t': dest[used++] = '\\'; dest[used++] = 't'; break; + case '\"': dest[used++] = '\\'; dest[used++] = '\"'; break; + case '\'': dest[used++] = '\\'; dest[used++] = '\''; break; + case '\\': dest[used++] = '\\'; dest[used++] = '\\'; break; + default: + // Note that if we emit \xNN and the src character after that is a hex + // digit then that digit must be escaped too to prevent it being + // interpreted as part of the character code by C. + if ((!utf8_safe || static_cast(*src) < 0x80) && + (!isprint(*src) || + (last_hex_escape && isxdigit(*src)))) { + if (dest_len - used < 4) // need space for 4 letter escape + return -1; + sprintf(dest + used, (use_hex ? "\\x%02x" : "\\%03o"), + static_cast(*src)); + is_hex_escape = use_hex; + used += 4; + } else { + dest[used++] = *src; break; + } + } + last_hex_escape = is_hex_escape; + } + + if (dest_len - used < 1) // make sure that there is room for \0 + return -1; + + dest[used] = '\0'; // doesn't count towards return value though + return used; +} + +// Calculates the length of the C-style escaped version of 'src'. +// Assumes that non-printable characters are escaped using octal sequences, and +// that UTF-8 bytes are not handled specially. +static inline size_t CEscapedLength(StringPiece src) { + static char c_escaped_len[256] = { + 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 4, 4, 2, 4, 4, // \t, \n, \r + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, // ", ' + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // '0'..'9' + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 'A'..'O' + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, // 'P'..'Z', '\' + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 'a'..'o' + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, // 'p'..'z', DEL + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + }; + + size_t escaped_len = 0; + for (int i = 0; i < src.size(); ++i) { + unsigned char c = static_cast(src[i]); + escaped_len += c_escaped_len[c]; + } + return escaped_len; +} + +// ---------------------------------------------------------------------- +// Escapes 'src' using C-style escape sequences, and appends the escaped string +// to 'dest'. This version is faster than calling CEscapeInternal as it computes +// the required space using a lookup table, and also does not do any special +// handling for Hex or UTF-8 characters. +// ---------------------------------------------------------------------- +void CEscapeAndAppend(StringPiece src, string* dest) { + size_t escaped_len = CEscapedLength(src); + if (escaped_len == src.size()) { + dest->append(src.data(), src.size()); + return; + } + + size_t cur_dest_len = dest->size(); + dest->resize(cur_dest_len + escaped_len); + char* append_ptr = &(*dest)[cur_dest_len]; + + for (int i = 0; i < src.size(); ++i) { + unsigned char c = static_cast(src[i]); + switch (c) { + case '\n': *append_ptr++ = '\\'; *append_ptr++ = 'n'; break; + case '\r': *append_ptr++ = '\\'; *append_ptr++ = 'r'; break; + case '\t': *append_ptr++ = '\\'; *append_ptr++ = 't'; break; + case '\"': *append_ptr++ = '\\'; *append_ptr++ = '\"'; break; + case '\'': *append_ptr++ = '\\'; *append_ptr++ = '\''; break; + case '\\': *append_ptr++ = '\\'; *append_ptr++ = '\\'; break; + default: + if (!isprint(c)) { + *append_ptr++ = '\\'; + *append_ptr++ = '0' + c / 64; + *append_ptr++ = '0' + (c % 64) / 8; + *append_ptr++ = '0' + c % 8; + } else { + *append_ptr++ = c; + } + break; + } + } +} + +string CEscape(const string& src) { + string dest; + CEscapeAndAppend(src, &dest); + return dest; +} + +namespace strings { + +string Utf8SafeCEscape(const string& src) { + const int dest_length = src.size() * 4 + 1; // Maximum possible expansion + std::unique_ptr dest(new char[dest_length]); + const int len = CEscapeInternal(src.data(), src.size(), + dest.get(), dest_length, false, true); + GOOGLE_DCHECK_GE(len, 0); + return string(dest.get(), len); +} + +string CHexEscape(const string& src) { + const int dest_length = src.size() * 4 + 1; // Maximum possible expansion + std::unique_ptr dest(new char[dest_length]); + const int len = CEscapeInternal(src.data(), src.size(), + dest.get(), dest_length, true, false); + GOOGLE_DCHECK_GE(len, 0); + return string(dest.get(), len); +} + +} // namespace strings + +// ---------------------------------------------------------------------- +// strto32_adaptor() +// strtou32_adaptor() +// Implementation of strto[u]l replacements that have identical +// overflow and underflow characteristics for both ILP-32 and LP-64 +// platforms, including errno preservation in error-free calls. +// ---------------------------------------------------------------------- + +int32 strto32_adaptor(const char *nptr, char **endptr, int base) { + const int saved_errno = errno; + errno = 0; + const long result = strtol(nptr, endptr, base); + if (errno == ERANGE && result == LONG_MIN) { + return kint32min; + } else if (errno == ERANGE && result == LONG_MAX) { + return kint32max; + } else if (errno == 0 && result < kint32min) { + errno = ERANGE; + return kint32min; + } else if (errno == 0 && result > kint32max) { + errno = ERANGE; + return kint32max; + } + if (errno == 0) + errno = saved_errno; + return static_cast(result); +} + +uint32 strtou32_adaptor(const char *nptr, char **endptr, int base) { + const int saved_errno = errno; + errno = 0; + const unsigned long result = strtoul(nptr, endptr, base); + if (errno == ERANGE && result == ULONG_MAX) { + return kuint32max; + } else if (errno == 0 && result > kuint32max) { + errno = ERANGE; + return kuint32max; + } + if (errno == 0) + errno = saved_errno; + return static_cast(result); +} + +inline bool safe_parse_sign(string* text /*inout*/, + bool* negative_ptr /*output*/) { + const char* start = text->data(); + const char* end = start + text->size(); + + // Consume whitespace. + while (start < end && (start[0] == ' ')) { + ++start; + } + while (start < end && (end[-1] == ' ')) { + --end; + } + if (start >= end) { + return false; + } + + // Consume sign. + *negative_ptr = (start[0] == '-'); + if (*negative_ptr || start[0] == '+') { + ++start; + if (start >= end) { + return false; + } + } + *text = text->substr(start - text->data(), end - start); + return true; +} + +template +bool safe_parse_positive_int( + string text, IntType* value_p) { + int base = 10; + IntType value = 0; + const IntType vmax = std::numeric_limits::max(); + assert(vmax > 0); + assert(vmax >= base); + const IntType vmax_over_base = vmax / base; + const char* start = text.data(); + const char* end = start + text.size(); + // loop over digits + for (; start < end; ++start) { + unsigned char c = static_cast(start[0]); + int digit = c - '0'; + if (digit >= base || digit < 0) { + *value_p = value; + return false; + } + if (value > vmax_over_base) { + *value_p = vmax; + return false; + } + value *= base; + if (value > vmax - digit) { + *value_p = vmax; + return false; + } + value += digit; + } + *value_p = value; + return true; +} + +template +bool safe_parse_negative_int( + const string& text, IntType* value_p) { + int base = 10; + IntType value = 0; + const IntType vmin = std::numeric_limits::min(); + assert(vmin < 0); + assert(vmin <= 0 - base); + IntType vmin_over_base = vmin / base; + // 2003 c++ standard [expr.mul] + // "... the sign of the remainder is implementation-defined." + // Although (vmin/base)*base + vmin%base is always vmin. + // 2011 c++ standard tightens the spec but we cannot rely on it. + if (vmin % base > 0) { + vmin_over_base += 1; + } + const char* start = text.data(); + const char* end = start + text.size(); + // loop over digits + for (; start < end; ++start) { + unsigned char c = static_cast(start[0]); + int digit = c - '0'; + if (digit >= base || digit < 0) { + *value_p = value; + return false; + } + if (value < vmin_over_base) { + *value_p = vmin; + return false; + } + value *= base; + if (value < vmin + digit) { + *value_p = vmin; + return false; + } + value -= digit; + } + *value_p = value; + return true; +} + +template +bool safe_int_internal(string text, IntType* value_p) { + *value_p = 0; + bool negative; + if (!safe_parse_sign(&text, &negative)) { + return false; + } + if (!negative) { + return safe_parse_positive_int(text, value_p); + } else { + return safe_parse_negative_int(text, value_p); + } +} + +template +bool safe_uint_internal(string text, IntType* value_p) { + *value_p = 0; + bool negative; + if (!safe_parse_sign(&text, &negative) || negative) { + return false; + } + return safe_parse_positive_int(text, value_p); +} + +// ---------------------------------------------------------------------- +// FastIntToBuffer() +// FastInt64ToBuffer() +// FastHexToBuffer() +// FastHex64ToBuffer() +// FastHex32ToBuffer() +// ---------------------------------------------------------------------- + +// Offset into buffer where FastInt64ToBuffer places the end of string +// null character. Also used by FastInt64ToBufferLeft. +static const int kFastInt64ToBufferOffset = 21; + +char *FastInt64ToBuffer(int64 i, char* buffer) { + // We could collapse the positive and negative sections, but that + // would be slightly slower for positive numbers... + // 22 bytes is enough to store -2**64, -18446744073709551616. + char* p = buffer + kFastInt64ToBufferOffset; + *p-- = '\0'; + if (i >= 0) { + do { + *p-- = '0' + i % 10; + i /= 10; + } while (i > 0); + return p + 1; + } else { + // On different platforms, % and / have different behaviors for + // negative numbers, so we need to jump through hoops to make sure + // we don't divide negative numbers. + if (i > -10) { + i = -i; + *p-- = '0' + i; + *p = '-'; + return p; + } else { + // Make sure we aren't at MIN_INT, in which case we can't say i = -i + i = i + 10; + i = -i; + *p-- = '0' + i % 10; + // Undo what we did a moment ago + i = i / 10 + 1; + do { + *p-- = '0' + i % 10; + i /= 10; + } while (i > 0); + *p = '-'; + return p; + } + } +} + +// Offset into buffer where FastInt32ToBuffer places the end of string +// null character. Also used by FastInt32ToBufferLeft +static const int kFastInt32ToBufferOffset = 11; + +// Yes, this is a duplicate of FastInt64ToBuffer. But, we need this for the +// compiler to generate 32 bit arithmetic instructions. It's much faster, at +// least with 32 bit binaries. +char *FastInt32ToBuffer(int32 i, char* buffer) { + // We could collapse the positive and negative sections, but that + // would be slightly slower for positive numbers... + // 12 bytes is enough to store -2**32, -4294967296. + char* p = buffer + kFastInt32ToBufferOffset; + *p-- = '\0'; + if (i >= 0) { + do { + *p-- = '0' + i % 10; + i /= 10; + } while (i > 0); + return p + 1; + } else { + // On different platforms, % and / have different behaviors for + // negative numbers, so we need to jump through hoops to make sure + // we don't divide negative numbers. + if (i > -10) { + i = -i; + *p-- = '0' + i; + *p = '-'; + return p; + } else { + // Make sure we aren't at MIN_INT, in which case we can't say i = -i + i = i + 10; + i = -i; + *p-- = '0' + i % 10; + // Undo what we did a moment ago + i = i / 10 + 1; + do { + *p-- = '0' + i % 10; + i /= 10; + } while (i > 0); + *p = '-'; + return p; + } + } +} + +char *FastHexToBuffer(int i, char* buffer) { + GOOGLE_CHECK(i >= 0) << "FastHexToBuffer() wants non-negative integers, not " << i; + + static const char *hexdigits = "0123456789abcdef"; + char *p = buffer + 21; + *p-- = '\0'; + do { + *p-- = hexdigits[i & 15]; // mod by 16 + i >>= 4; // divide by 16 + } while (i > 0); + return p + 1; +} + +char *InternalFastHexToBuffer(uint64 value, char* buffer, int num_byte) { + static const char *hexdigits = "0123456789abcdef"; + buffer[num_byte] = '\0'; + for (int i = num_byte - 1; i >= 0; i--) { +#ifdef _M_X64 + // MSVC x64 platform has a bug optimizing the uint32(value) in the #else + // block. Given that the uint32 cast was to improve performance on 32-bit + // platforms, we use 64-bit '&' directly. + buffer[i] = hexdigits[value & 0xf]; +#else + buffer[i] = hexdigits[uint32(value) & 0xf]; +#endif + value >>= 4; + } + return buffer; +} + +char *FastHex64ToBuffer(uint64 value, char* buffer) { + return InternalFastHexToBuffer(value, buffer, 16); +} + +char *FastHex32ToBuffer(uint32 value, char* buffer) { + return InternalFastHexToBuffer(value, buffer, 8); +} + +// ---------------------------------------------------------------------- +// FastInt32ToBufferLeft() +// FastUInt32ToBufferLeft() +// FastInt64ToBufferLeft() +// FastUInt64ToBufferLeft() +// +// Like the Fast*ToBuffer() functions above, these are intended for speed. +// Unlike the Fast*ToBuffer() functions, however, these functions write +// their output to the beginning of the buffer (hence the name, as the +// output is left-aligned). The caller is responsible for ensuring that +// the buffer has enough space to hold the output. +// +// Returns a pointer to the end of the string (i.e. the null character +// terminating the string). +// ---------------------------------------------------------------------- + +static const char two_ASCII_digits[100][2] = { + {'0','0'}, {'0','1'}, {'0','2'}, {'0','3'}, {'0','4'}, + {'0','5'}, {'0','6'}, {'0','7'}, {'0','8'}, {'0','9'}, + {'1','0'}, {'1','1'}, {'1','2'}, {'1','3'}, {'1','4'}, + {'1','5'}, {'1','6'}, {'1','7'}, {'1','8'}, {'1','9'}, + {'2','0'}, {'2','1'}, {'2','2'}, {'2','3'}, {'2','4'}, + {'2','5'}, {'2','6'}, {'2','7'}, {'2','8'}, {'2','9'}, + {'3','0'}, {'3','1'}, {'3','2'}, {'3','3'}, {'3','4'}, + {'3','5'}, {'3','6'}, {'3','7'}, {'3','8'}, {'3','9'}, + {'4','0'}, {'4','1'}, {'4','2'}, {'4','3'}, {'4','4'}, + {'4','5'}, {'4','6'}, {'4','7'}, {'4','8'}, {'4','9'}, + {'5','0'}, {'5','1'}, {'5','2'}, {'5','3'}, {'5','4'}, + {'5','5'}, {'5','6'}, {'5','7'}, {'5','8'}, {'5','9'}, + {'6','0'}, {'6','1'}, {'6','2'}, {'6','3'}, {'6','4'}, + {'6','5'}, {'6','6'}, {'6','7'}, {'6','8'}, {'6','9'}, + {'7','0'}, {'7','1'}, {'7','2'}, {'7','3'}, {'7','4'}, + {'7','5'}, {'7','6'}, {'7','7'}, {'7','8'}, {'7','9'}, + {'8','0'}, {'8','1'}, {'8','2'}, {'8','3'}, {'8','4'}, + {'8','5'}, {'8','6'}, {'8','7'}, {'8','8'}, {'8','9'}, + {'9','0'}, {'9','1'}, {'9','2'}, {'9','3'}, {'9','4'}, + {'9','5'}, {'9','6'}, {'9','7'}, {'9','8'}, {'9','9'} +}; + +char* FastUInt32ToBufferLeft(uint32 u, char* buffer) { + uint32 digits; + const char *ASCII_digits = NULL; + // The idea of this implementation is to trim the number of divides to as few + // as possible by using multiplication and subtraction rather than mod (%), + // and by outputting two digits at a time rather than one. + // The huge-number case is first, in the hopes that the compiler will output + // that case in one branch-free block of code, and only output conditional + // branches into it from below. + if (u >= 1000000000) { // >= 1,000,000,000 + digits = u / 100000000; // 100,000,000 + ASCII_digits = two_ASCII_digits[digits]; + buffer[0] = ASCII_digits[0]; + buffer[1] = ASCII_digits[1]; + buffer += 2; +sublt100_000_000: + u -= digits * 100000000; // 100,000,000 +lt100_000_000: + digits = u / 1000000; // 1,000,000 + ASCII_digits = two_ASCII_digits[digits]; + buffer[0] = ASCII_digits[0]; + buffer[1] = ASCII_digits[1]; + buffer += 2; +sublt1_000_000: + u -= digits * 1000000; // 1,000,000 +lt1_000_000: + digits = u / 10000; // 10,000 + ASCII_digits = two_ASCII_digits[digits]; + buffer[0] = ASCII_digits[0]; + buffer[1] = ASCII_digits[1]; + buffer += 2; +sublt10_000: + u -= digits * 10000; // 10,000 +lt10_000: + digits = u / 100; + ASCII_digits = two_ASCII_digits[digits]; + buffer[0] = ASCII_digits[0]; + buffer[1] = ASCII_digits[1]; + buffer += 2; +sublt100: + u -= digits * 100; +lt100: + digits = u; + ASCII_digits = two_ASCII_digits[digits]; + buffer[0] = ASCII_digits[0]; + buffer[1] = ASCII_digits[1]; + buffer += 2; +done: + *buffer = 0; + return buffer; + } + + if (u < 100) { + digits = u; + if (u >= 10) goto lt100; + *buffer++ = '0' + digits; + goto done; + } + if (u < 10000) { // 10,000 + if (u >= 1000) goto lt10_000; + digits = u / 100; + *buffer++ = '0' + digits; + goto sublt100; + } + if (u < 1000000) { // 1,000,000 + if (u >= 100000) goto lt1_000_000; + digits = u / 10000; // 10,000 + *buffer++ = '0' + digits; + goto sublt10_000; + } + if (u < 100000000) { // 100,000,000 + if (u >= 10000000) goto lt100_000_000; + digits = u / 1000000; // 1,000,000 + *buffer++ = '0' + digits; + goto sublt1_000_000; + } + // we already know that u < 1,000,000,000 + digits = u / 100000000; // 100,000,000 + *buffer++ = '0' + digits; + goto sublt100_000_000; +} + +char* FastInt32ToBufferLeft(int32 i, char* buffer) { + uint32 u = i; + if (i < 0) { + *buffer++ = '-'; + u = -i; + } + return FastUInt32ToBufferLeft(u, buffer); +} + +char* FastUInt64ToBufferLeft(uint64 u64, char* buffer) { + int digits; + const char *ASCII_digits = NULL; + + uint32 u = static_cast(u64); + if (u == u64) return FastUInt32ToBufferLeft(u, buffer); + + uint64 top_11_digits = u64 / 1000000000; + buffer = FastUInt64ToBufferLeft(top_11_digits, buffer); + u = u64 - (top_11_digits * 1000000000); + + digits = u / 10000000; // 10,000,000 + GOOGLE_DCHECK_LT(digits, 100); + ASCII_digits = two_ASCII_digits[digits]; + buffer[0] = ASCII_digits[0]; + buffer[1] = ASCII_digits[1]; + buffer += 2; + u -= digits * 10000000; // 10,000,000 + digits = u / 100000; // 100,000 + ASCII_digits = two_ASCII_digits[digits]; + buffer[0] = ASCII_digits[0]; + buffer[1] = ASCII_digits[1]; + buffer += 2; + u -= digits * 100000; // 100,000 + digits = u / 1000; // 1,000 + ASCII_digits = two_ASCII_digits[digits]; + buffer[0] = ASCII_digits[0]; + buffer[1] = ASCII_digits[1]; + buffer += 2; + u -= digits * 1000; // 1,000 + digits = u / 10; + ASCII_digits = two_ASCII_digits[digits]; + buffer[0] = ASCII_digits[0]; + buffer[1] = ASCII_digits[1]; + buffer += 2; + u -= digits * 10; + digits = u; + *buffer++ = '0' + digits; + *buffer = 0; + return buffer; +} + +char* FastInt64ToBufferLeft(int64 i, char* buffer) { + uint64 u = i; + if (i < 0) { + *buffer++ = '-'; + u = -i; + } + return FastUInt64ToBufferLeft(u, buffer); +} + +// ---------------------------------------------------------------------- +// SimpleItoa() +// Description: converts an integer to a string. +// +// Return value: string +// ---------------------------------------------------------------------- + +string SimpleItoa(int i) { + char buffer[kFastToBufferSize]; + return (sizeof(i) == 4) ? + FastInt32ToBuffer(i, buffer) : + FastInt64ToBuffer(i, buffer); +} + +string SimpleItoa(unsigned int i) { + char buffer[kFastToBufferSize]; + return string(buffer, (sizeof(i) == 4) ? + FastUInt32ToBufferLeft(i, buffer) : + FastUInt64ToBufferLeft(i, buffer)); +} + +string SimpleItoa(long i) { + char buffer[kFastToBufferSize]; + return (sizeof(i) == 4) ? + FastInt32ToBuffer(i, buffer) : + FastInt64ToBuffer(i, buffer); +} + +string SimpleItoa(unsigned long i) { + char buffer[kFastToBufferSize]; + return string(buffer, (sizeof(i) == 4) ? + FastUInt32ToBufferLeft(i, buffer) : + FastUInt64ToBufferLeft(i, buffer)); +} + +string SimpleItoa(long long i) { + char buffer[kFastToBufferSize]; + return (sizeof(i) == 4) ? + FastInt32ToBuffer(i, buffer) : + FastInt64ToBuffer(i, buffer); +} + +string SimpleItoa(unsigned long long i) { + char buffer[kFastToBufferSize]; + return string(buffer, (sizeof(i) == 4) ? + FastUInt32ToBufferLeft(i, buffer) : + FastUInt64ToBufferLeft(i, buffer)); +} + +// ---------------------------------------------------------------------- +// SimpleDtoa() +// SimpleFtoa() +// DoubleToBuffer() +// FloatToBuffer() +// We want to print the value without losing precision, but we also do +// not want to print more digits than necessary. This turns out to be +// trickier than it sounds. Numbers like 0.2 cannot be represented +// exactly in binary. If we print 0.2 with a very large precision, +// e.g. "%.50g", we get "0.2000000000000000111022302462515654042363167". +// On the other hand, if we set the precision too low, we lose +// significant digits when printing numbers that actually need them. +// It turns out there is no precision value that does the right thing +// for all numbers. +// +// Our strategy is to first try printing with a precision that is never +// over-precise, then parse the result with strtod() to see if it +// matches. If not, we print again with a precision that will always +// give a precise result, but may use more digits than necessary. +// +// An arguably better strategy would be to use the algorithm described +// in "How to Print Floating-Point Numbers Accurately" by Steele & +// White, e.g. as implemented by David M. Gay's dtoa(). It turns out, +// however, that the following implementation is about as fast as +// DMG's code. Furthermore, DMG's code locks mutexes, which means it +// will not scale well on multi-core machines. DMG's code is slightly +// more accurate (in that it will never use more digits than +// necessary), but this is probably irrelevant for most users. +// +// Rob Pike and Ken Thompson also have an implementation of dtoa() in +// third_party/fmt/fltfmt.cc. Their implementation is similar to this +// one in that it makes guesses and then uses strtod() to check them. +// Their implementation is faster because they use their own code to +// generate the digits in the first place rather than use snprintf(), +// thus avoiding format string parsing overhead. However, this makes +// it considerably more complicated than the following implementation, +// and it is embedded in a larger library. If speed turns out to be +// an issue, we could re-implement this in terms of their +// implementation. +// ---------------------------------------------------------------------- + +string SimpleDtoa(double value) { + char buffer[kDoubleToBufferSize]; + return DoubleToBuffer(value, buffer); +} + +string SimpleFtoa(float value) { + char buffer[kFloatToBufferSize]; + return FloatToBuffer(value, buffer); +} + +static inline bool IsValidFloatChar(char c) { + return ('0' <= c && c <= '9') || + c == 'e' || c == 'E' || + c == '+' || c == '-'; +} + +void DelocalizeRadix(char* buffer) { + // Fast check: if the buffer has a normal decimal point, assume no + // translation is needed. + if (strchr(buffer, '.') != NULL) return; + + // Find the first unknown character. + while (IsValidFloatChar(*buffer)) ++buffer; + + if (*buffer == '\0') { + // No radix character found. + return; + } + + // We are now pointing at the locale-specific radix character. Replace it + // with '.'. + *buffer = '.'; + ++buffer; + + if (!IsValidFloatChar(*buffer) && *buffer != '\0') { + // It appears the radix was a multi-byte character. We need to remove the + // extra bytes. + char* target = buffer; + do { ++buffer; } while (!IsValidFloatChar(*buffer) && *buffer != '\0'); + memmove(target, buffer, strlen(buffer) + 1); + } +} + +char* DoubleToBuffer(double value, char* buffer) { + // DBL_DIG is 15 for IEEE-754 doubles, which are used on almost all + // platforms these days. Just in case some system exists where DBL_DIG + // is significantly larger -- and risks overflowing our buffer -- we have + // this assert. + GOOGLE_COMPILE_ASSERT(DBL_DIG < 20, DBL_DIG_is_too_big); + + if (value == std::numeric_limits::infinity()) { + strcpy(buffer, "inf"); + return buffer; + } else if (value == -std::numeric_limits::infinity()) { + strcpy(buffer, "-inf"); + return buffer; + } else if (MathLimits::IsNaN(value)) { + strcpy(buffer, "nan"); + return buffer; + } + + int snprintf_result = + snprintf(buffer, kDoubleToBufferSize, "%.*g", DBL_DIG, value); + + // The snprintf should never overflow because the buffer is significantly + // larger than the precision we asked for. + GOOGLE_DCHECK(snprintf_result > 0 && snprintf_result < kDoubleToBufferSize); + + // We need to make parsed_value volatile in order to force the compiler to + // write it out to the stack. Otherwise, it may keep the value in a + // register, and if it does that, it may keep it as a long double instead + // of a double. This long double may have extra bits that make it compare + // unequal to "value" even though it would be exactly equal if it were + // truncated to a double. + volatile double parsed_value = strtod(buffer, NULL); + if (parsed_value != value) { + int snprintf_result = + snprintf(buffer, kDoubleToBufferSize, "%.*g", DBL_DIG+2, value); + + // Should never overflow; see above. + GOOGLE_DCHECK(snprintf_result > 0 && snprintf_result < kDoubleToBufferSize); + } + + DelocalizeRadix(buffer); + return buffer; +} + +static int memcasecmp(const char *s1, const char *s2, size_t len) { + const unsigned char *us1 = reinterpret_cast(s1); + const unsigned char *us2 = reinterpret_cast(s2); + + for ( int i = 0; i < len; i++ ) { + const int diff = + static_cast(static_cast(ascii_tolower(us1[i]))) - + static_cast(static_cast(ascii_tolower(us2[i]))); + if (diff != 0) return diff; + } + return 0; +} + +inline bool CaseEqual(StringPiece s1, StringPiece s2) { + if (s1.size() != s2.size()) return false; + return memcasecmp(s1.data(), s2.data(), s1.size()) == 0; +} + +bool safe_strtob(StringPiece str, bool* value) { + GOOGLE_CHECK(value != NULL) << "NULL output boolean given."; + if (CaseEqual(str, "true") || CaseEqual(str, "t") || + CaseEqual(str, "yes") || CaseEqual(str, "y") || + CaseEqual(str, "1")) { + *value = true; + return true; + } + if (CaseEqual(str, "false") || CaseEqual(str, "f") || + CaseEqual(str, "no") || CaseEqual(str, "n") || + CaseEqual(str, "0")) { + *value = false; + return true; + } + return false; +} + +bool safe_strtof(const char* str, float* value) { + char* endptr; + errno = 0; // errno only gets set on errors +#if defined(_WIN32) || defined (__hpux) // has no strtof() + *value = strtod(str, &endptr); +#else + *value = strtof(str, &endptr); +#endif + return *str != 0 && *endptr == 0 && errno == 0; +} + +bool safe_strtod(const char* str, double* value) { + char* endptr; + *value = strtod(str, &endptr); + if (endptr != str) { + while (ascii_isspace(*endptr)) ++endptr; + } + // Ignore range errors from strtod. The values it + // returns on underflow and overflow are the right + // fallback in a robust setting. + return *str != '\0' && *endptr == '\0'; +} + +bool safe_strto32(const string& str, int32* value) { + return safe_int_internal(str, value); +} + +bool safe_strtou32(const string& str, uint32* value) { + return safe_uint_internal(str, value); +} + +bool safe_strto64(const string& str, int64* value) { + return safe_int_internal(str, value); +} + +bool safe_strtou64(const string& str, uint64* value) { + return safe_uint_internal(str, value); +} + +char* FloatToBuffer(float value, char* buffer) { + // FLT_DIG is 6 for IEEE-754 floats, which are used on almost all + // platforms these days. Just in case some system exists where FLT_DIG + // is significantly larger -- and risks overflowing our buffer -- we have + // this assert. + GOOGLE_COMPILE_ASSERT(FLT_DIG < 10, FLT_DIG_is_too_big); + + if (value == std::numeric_limits::infinity()) { + strcpy(buffer, "inf"); + return buffer; + } else if (value == -std::numeric_limits::infinity()) { + strcpy(buffer, "-inf"); + return buffer; + } else if (MathLimits::IsNaN(value)) { + strcpy(buffer, "nan"); + return buffer; + } + + int snprintf_result = + snprintf(buffer, kFloatToBufferSize, "%.*g", FLT_DIG, value); + + // The snprintf should never overflow because the buffer is significantly + // larger than the precision we asked for. + GOOGLE_DCHECK(snprintf_result > 0 && snprintf_result < kFloatToBufferSize); + + float parsed_value; + if (!safe_strtof(buffer, &parsed_value) || parsed_value != value) { + int snprintf_result = + snprintf(buffer, kFloatToBufferSize, "%.*g", FLT_DIG+3, value); + + // Should never overflow; see above. + GOOGLE_DCHECK(snprintf_result > 0 && snprintf_result < kFloatToBufferSize); + } + + DelocalizeRadix(buffer); + return buffer; +} + +namespace strings { + +AlphaNum::AlphaNum(strings::Hex hex) { + char *const end = &digits[kFastToBufferSize]; + char *writer = end; + uint64 value = hex.value; + uint64 width = hex.spec; + // We accomplish minimum width by OR'ing in 0x10000 to the user's value, + // where 0x10000 is the smallest hex number that is as wide as the user + // asked for. + uint64 mask = ((static_cast(1) << (width - 1) * 4)) | value; + static const char hexdigits[] = "0123456789abcdef"; + do { + *--writer = hexdigits[value & 0xF]; + value >>= 4; + mask >>= 4; + } while (mask != 0); + piece_data_ = writer; + piece_size_ = end - writer; +} + +} // namespace strings + +// ---------------------------------------------------------------------- +// StrCat() +// This merges the given strings or integers, with no delimiter. This +// is designed to be the fastest possible way to construct a string out +// of a mix of raw C strings, C++ strings, and integer values. +// ---------------------------------------------------------------------- + +// Append is merely a version of memcpy that returns the address of the byte +// after the area just overwritten. It comes in multiple flavors to minimize +// call overhead. +static char *Append1(char *out, const AlphaNum &x) { + memcpy(out, x.data(), x.size()); + return out + x.size(); +} + +static char *Append2(char *out, const AlphaNum &x1, const AlphaNum &x2) { + memcpy(out, x1.data(), x1.size()); + out += x1.size(); + + memcpy(out, x2.data(), x2.size()); + return out + x2.size(); +} + +static char *Append4(char *out, + const AlphaNum &x1, const AlphaNum &x2, + const AlphaNum &x3, const AlphaNum &x4) { + memcpy(out, x1.data(), x1.size()); + out += x1.size(); + + memcpy(out, x2.data(), x2.size()); + out += x2.size(); + + memcpy(out, x3.data(), x3.size()); + out += x3.size(); + + memcpy(out, x4.data(), x4.size()); + return out + x4.size(); +} + +string StrCat(const AlphaNum &a, const AlphaNum &b) { + string result; + result.resize(a.size() + b.size()); + char *const begin = &*result.begin(); + char *out = Append2(begin, a, b); + GOOGLE_DCHECK_EQ(out, begin + result.size()); + return result; +} + +string StrCat(const AlphaNum &a, const AlphaNum &b, const AlphaNum &c) { + string result; + result.resize(a.size() + b.size() + c.size()); + char *const begin = &*result.begin(); + char *out = Append2(begin, a, b); + out = Append1(out, c); + GOOGLE_DCHECK_EQ(out, begin + result.size()); + return result; +} + +string StrCat(const AlphaNum &a, const AlphaNum &b, const AlphaNum &c, + const AlphaNum &d) { + string result; + result.resize(a.size() + b.size() + c.size() + d.size()); + char *const begin = &*result.begin(); + char *out = Append4(begin, a, b, c, d); + GOOGLE_DCHECK_EQ(out, begin + result.size()); + return result; +} + +string StrCat(const AlphaNum &a, const AlphaNum &b, const AlphaNum &c, + const AlphaNum &d, const AlphaNum &e) { + string result; + result.resize(a.size() + b.size() + c.size() + d.size() + e.size()); + char *const begin = &*result.begin(); + char *out = Append4(begin, a, b, c, d); + out = Append1(out, e); + GOOGLE_DCHECK_EQ(out, begin + result.size()); + return result; +} + +string StrCat(const AlphaNum &a, const AlphaNum &b, const AlphaNum &c, + const AlphaNum &d, const AlphaNum &e, const AlphaNum &f) { + string result; + result.resize(a.size() + b.size() + c.size() + d.size() + e.size() + + f.size()); + char *const begin = &*result.begin(); + char *out = Append4(begin, a, b, c, d); + out = Append2(out, e, f); + GOOGLE_DCHECK_EQ(out, begin + result.size()); + return result; +} + +string StrCat(const AlphaNum &a, const AlphaNum &b, const AlphaNum &c, + const AlphaNum &d, const AlphaNum &e, const AlphaNum &f, + const AlphaNum &g) { + string result; + result.resize(a.size() + b.size() + c.size() + d.size() + e.size() + + f.size() + g.size()); + char *const begin = &*result.begin(); + char *out = Append4(begin, a, b, c, d); + out = Append2(out, e, f); + out = Append1(out, g); + GOOGLE_DCHECK_EQ(out, begin + result.size()); + return result; +} + +string StrCat(const AlphaNum &a, const AlphaNum &b, const AlphaNum &c, + const AlphaNum &d, const AlphaNum &e, const AlphaNum &f, + const AlphaNum &g, const AlphaNum &h) { + string result; + result.resize(a.size() + b.size() + c.size() + d.size() + e.size() + + f.size() + g.size() + h.size()); + char *const begin = &*result.begin(); + char *out = Append4(begin, a, b, c, d); + out = Append4(out, e, f, g, h); + GOOGLE_DCHECK_EQ(out, begin + result.size()); + return result; +} + +string StrCat(const AlphaNum &a, const AlphaNum &b, const AlphaNum &c, + const AlphaNum &d, const AlphaNum &e, const AlphaNum &f, + const AlphaNum &g, const AlphaNum &h, const AlphaNum &i) { + string result; + result.resize(a.size() + b.size() + c.size() + d.size() + e.size() + + f.size() + g.size() + h.size() + i.size()); + char *const begin = &*result.begin(); + char *out = Append4(begin, a, b, c, d); + out = Append4(out, e, f, g, h); + out = Append1(out, i); + GOOGLE_DCHECK_EQ(out, begin + result.size()); + return result; +} + +// It's possible to call StrAppend with a char * pointer that is partway into +// the string we're appending to. However the results of this are random. +// Therefore, check for this in debug mode. Use unsigned math so we only have +// to do one comparison. +#define GOOGLE_DCHECK_NO_OVERLAP(dest, src) \ + GOOGLE_DCHECK_GT(uintptr_t((src).data() - (dest).data()), \ + uintptr_t((dest).size())) + +void StrAppend(string *result, const AlphaNum &a) { + GOOGLE_DCHECK_NO_OVERLAP(*result, a); + result->append(a.data(), a.size()); +} + +void StrAppend(string *result, const AlphaNum &a, const AlphaNum &b) { + GOOGLE_DCHECK_NO_OVERLAP(*result, a); + GOOGLE_DCHECK_NO_OVERLAP(*result, b); + string::size_type old_size = result->size(); + result->resize(old_size + a.size() + b.size()); + char *const begin = &*result->begin(); + char *out = Append2(begin + old_size, a, b); + GOOGLE_DCHECK_EQ(out, begin + result->size()); +} + +void StrAppend(string *result, + const AlphaNum &a, const AlphaNum &b, const AlphaNum &c) { + GOOGLE_DCHECK_NO_OVERLAP(*result, a); + GOOGLE_DCHECK_NO_OVERLAP(*result, b); + GOOGLE_DCHECK_NO_OVERLAP(*result, c); + string::size_type old_size = result->size(); + result->resize(old_size + a.size() + b.size() + c.size()); + char *const begin = &*result->begin(); + char *out = Append2(begin + old_size, a, b); + out = Append1(out, c); + GOOGLE_DCHECK_EQ(out, begin + result->size()); +} + +void StrAppend(string *result, + const AlphaNum &a, const AlphaNum &b, + const AlphaNum &c, const AlphaNum &d) { + GOOGLE_DCHECK_NO_OVERLAP(*result, a); + GOOGLE_DCHECK_NO_OVERLAP(*result, b); + GOOGLE_DCHECK_NO_OVERLAP(*result, c); + GOOGLE_DCHECK_NO_OVERLAP(*result, d); + string::size_type old_size = result->size(); + result->resize(old_size + a.size() + b.size() + c.size() + d.size()); + char *const begin = &*result->begin(); + char *out = Append4(begin + old_size, a, b, c, d); + GOOGLE_DCHECK_EQ(out, begin + result->size()); +} + +int GlobalReplaceSubstring(const string& substring, + const string& replacement, + string* s) { + GOOGLE_CHECK(s != NULL); + if (s->empty() || substring.empty()) + return 0; + string tmp; + int num_replacements = 0; + int pos = 0; + for (int match_pos = s->find(substring.data(), pos, substring.length()); + match_pos != string::npos; + pos = match_pos + substring.length(), + match_pos = s->find(substring.data(), pos, substring.length())) { + ++num_replacements; + // Append the original content before the match. + tmp.append(*s, pos, match_pos - pos); + // Append the replacement for the match. + tmp.append(replacement.begin(), replacement.end()); + } + // Append the content after the last match. If no replacements were made, the + // original string is left untouched. + if (num_replacements > 0) { + tmp.append(*s, pos, s->length() - pos); + s->swap(tmp); + } + return num_replacements; +} + +int CalculateBase64EscapedLen(int input_len, bool do_padding) { + // Base64 encodes three bytes of input at a time. If the input is not + // divisible by three, we pad as appropriate. + // + // (from http://tools.ietf.org/html/rfc3548) + // Special processing is performed if fewer than 24 bits are available + // at the end of the data being encoded. A full encoding quantum is + // always completed at the end of a quantity. When fewer than 24 input + // bits are available in an input group, zero bits are added (on the + // right) to form an integral number of 6-bit groups. Padding at the + // end of the data is performed using the '=' character. Since all base + // 64 input is an integral number of octets, only the following cases + // can arise: + + + // Base64 encodes each three bytes of input into four bytes of output. + int len = (input_len / 3) * 4; + + if (input_len % 3 == 0) { + // (from http://tools.ietf.org/html/rfc3548) + // (1) the final quantum of encoding input is an integral multiple of 24 + // bits; here, the final unit of encoded output will be an integral + // multiple of 4 characters with no "=" padding, + } else if (input_len % 3 == 1) { + // (from http://tools.ietf.org/html/rfc3548) + // (2) the final quantum of encoding input is exactly 8 bits; here, the + // final unit of encoded output will be two characters followed by two + // "=" padding characters, or + len += 2; + if (do_padding) { + len += 2; + } + } else { // (input_len % 3 == 2) + // (from http://tools.ietf.org/html/rfc3548) + // (3) the final quantum of encoding input is exactly 16 bits; here, the + // final unit of encoded output will be three characters followed by one + // "=" padding character. + len += 3; + if (do_padding) { + len += 1; + } + } + + assert(len >= input_len); // make sure we didn't overflow + return len; +} + +// Base64Escape does padding, so this calculation includes padding. +int CalculateBase64EscapedLen(int input_len) { + return CalculateBase64EscapedLen(input_len, true); +} + +// ---------------------------------------------------------------------- +// int Base64Unescape() - base64 decoder +// int Base64Escape() - base64 encoder +// int WebSafeBase64Unescape() - Google's variation of base64 decoder +// int WebSafeBase64Escape() - Google's variation of base64 encoder +// +// Check out +// http://tools.ietf.org/html/rfc2045 for formal description, but what we +// care about is that... +// Take the encoded stuff in groups of 4 characters and turn each +// character into a code 0 to 63 thus: +// A-Z map to 0 to 25 +// a-z map to 26 to 51 +// 0-9 map to 52 to 61 +// +(- for WebSafe) maps to 62 +// /(_ for WebSafe) maps to 63 +// There will be four numbers, all less than 64 which can be represented +// by a 6 digit binary number (aaaaaa, bbbbbb, cccccc, dddddd respectively). +// Arrange the 6 digit binary numbers into three bytes as such: +// aaaaaabb bbbbcccc ccdddddd +// Equals signs (one or two) are used at the end of the encoded block to +// indicate that the text was not an integer multiple of three bytes long. +// ---------------------------------------------------------------------- + +int Base64UnescapeInternal(const char *src_param, int szsrc, + char *dest, int szdest, + const signed char* unbase64) { + static const char kPad64Equals = '='; + static const char kPad64Dot = '.'; + + int decode = 0; + int destidx = 0; + int state = 0; + unsigned int ch = 0; + unsigned int temp = 0; + + // If "char" is signed by default, using *src as an array index results in + // accessing negative array elements. Treat the input as a pointer to + // unsigned char to avoid this. + const unsigned char *src = reinterpret_cast(src_param); + + // The GET_INPUT macro gets the next input character, skipping + // over any whitespace, and stopping when we reach the end of the + // string or when we read any non-data character. The arguments are + // an arbitrary identifier (used as a label for goto) and the number + // of data bytes that must remain in the input to avoid aborting the + // loop. +#define GET_INPUT(label, remain) \ + label: \ + --szsrc; \ + ch = *src++; \ + decode = unbase64[ch]; \ + if (decode < 0) { \ + if (ascii_isspace(ch) && szsrc >= remain) \ + goto label; \ + state = 4 - remain; \ + break; \ + } + + // if dest is null, we're just checking to see if it's legal input + // rather than producing output. (I suspect this could just be done + // with a regexp...). We duplicate the loop so this test can be + // outside it instead of in every iteration. + + if (dest) { + // This loop consumes 4 input bytes and produces 3 output bytes + // per iteration. We can't know at the start that there is enough + // data left in the string for a full iteration, so the loop may + // break out in the middle; if so 'state' will be set to the + // number of input bytes read. + + while (szsrc >= 4) { + // We'll start by optimistically assuming that the next four + // bytes of the string (src[0..3]) are four good data bytes + // (that is, no nulls, whitespace, padding chars, or illegal + // chars). We need to test src[0..2] for nulls individually + // before constructing temp to preserve the property that we + // never read past a null in the string (no matter how long + // szsrc claims the string is). + + if (!src[0] || !src[1] || !src[2] || + (temp = ((unsigned(unbase64[src[0]]) << 18) | + (unsigned(unbase64[src[1]]) << 12) | + (unsigned(unbase64[src[2]]) << 6) | + (unsigned(unbase64[src[3]])))) & 0x80000000) { + // Iff any of those four characters was bad (null, illegal, + // whitespace, padding), then temp's high bit will be set + // (because unbase64[] is -1 for all bad characters). + // + // We'll back up and resort to the slower decoder, which knows + // how to handle those cases. + + GET_INPUT(first, 4); + temp = decode; + GET_INPUT(second, 3); + temp = (temp << 6) | decode; + GET_INPUT(third, 2); + temp = (temp << 6) | decode; + GET_INPUT(fourth, 1); + temp = (temp << 6) | decode; + } else { + // We really did have four good data bytes, so advance four + // characters in the string. + + szsrc -= 4; + src += 4; + decode = -1; + ch = '\0'; + } + + // temp has 24 bits of input, so write that out as three bytes. + + if (destidx+3 > szdest) return -1; + dest[destidx+2] = temp; + temp >>= 8; + dest[destidx+1] = temp; + temp >>= 8; + dest[destidx] = temp; + destidx += 3; + } + } else { + while (szsrc >= 4) { + if (!src[0] || !src[1] || !src[2] || + (temp = ((unsigned(unbase64[src[0]]) << 18) | + (unsigned(unbase64[src[1]]) << 12) | + (unsigned(unbase64[src[2]]) << 6) | + (unsigned(unbase64[src[3]])))) & 0x80000000) { + GET_INPUT(first_no_dest, 4); + GET_INPUT(second_no_dest, 3); + GET_INPUT(third_no_dest, 2); + GET_INPUT(fourth_no_dest, 1); + } else { + szsrc -= 4; + src += 4; + decode = -1; + ch = '\0'; + } + destidx += 3; + } + } + +#undef GET_INPUT + + // if the loop terminated because we read a bad character, return + // now. + if (decode < 0 && ch != '\0' && + ch != kPad64Equals && ch != kPad64Dot && !ascii_isspace(ch)) + return -1; + + if (ch == kPad64Equals || ch == kPad64Dot) { + // if we stopped by hitting an '=' or '.', un-read that character -- we'll + // look at it again when we count to check for the proper number of + // equals signs at the end. + ++szsrc; + --src; + } else { + // This loop consumes 1 input byte per iteration. It's used to + // clean up the 0-3 input bytes remaining when the first, faster + // loop finishes. 'temp' contains the data from 'state' input + // characters read by the first loop. + while (szsrc > 0) { + --szsrc; + ch = *src++; + decode = unbase64[ch]; + if (decode < 0) { + if (ascii_isspace(ch)) { + continue; + } else if (ch == '\0') { + break; + } else if (ch == kPad64Equals || ch == kPad64Dot) { + // back up one character; we'll read it again when we check + // for the correct number of pad characters at the end. + ++szsrc; + --src; + break; + } else { + return -1; + } + } + + // Each input character gives us six bits of output. + temp = (temp << 6) | decode; + ++state; + if (state == 4) { + // If we've accumulated 24 bits of output, write that out as + // three bytes. + if (dest) { + if (destidx+3 > szdest) return -1; + dest[destidx+2] = temp; + temp >>= 8; + dest[destidx+1] = temp; + temp >>= 8; + dest[destidx] = temp; + } + destidx += 3; + state = 0; + temp = 0; + } + } + } + + // Process the leftover data contained in 'temp' at the end of the input. + int expected_equals = 0; + switch (state) { + case 0: + // Nothing left over; output is a multiple of 3 bytes. + break; + + case 1: + // Bad input; we have 6 bits left over. + return -1; + + case 2: + // Produce one more output byte from the 12 input bits we have left. + if (dest) { + if (destidx+1 > szdest) return -1; + temp >>= 4; + dest[destidx] = temp; + } + ++destidx; + expected_equals = 2; + break; + + case 3: + // Produce two more output bytes from the 18 input bits we have left. + if (dest) { + if (destidx+2 > szdest) return -1; + temp >>= 2; + dest[destidx+1] = temp; + temp >>= 8; + dest[destidx] = temp; + } + destidx += 2; + expected_equals = 1; + break; + + default: + // state should have no other values at this point. + GOOGLE_LOG(FATAL) << "This can't happen; base64 decoder state = " << state; + } + + // The remainder of the string should be all whitespace, mixed with + // exactly 0 equals signs, or exactly 'expected_equals' equals + // signs. (Always accepting 0 equals signs is a google extension + // not covered in the RFC, as is accepting dot as the pad character.) + + int equals = 0; + while (szsrc > 0 && *src) { + if (*src == kPad64Equals || *src == kPad64Dot) + ++equals; + else if (!ascii_isspace(*src)) + return -1; + --szsrc; + ++src; + } + + return (equals == 0 || equals == expected_equals) ? destidx : -1; +} + +// The arrays below were generated by the following code +// #include +// #include +// #include +// main() +// { +// static const char Base64[] = +// "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; +// char *pos; +// int idx, i, j; +// printf(" "); +// for (i = 0; i < 255; i += 8) { +// for (j = i; j < i + 8; j++) { +// pos = strchr(Base64, j); +// if ((pos == NULL) || (j == 0)) +// idx = -1; +// else +// idx = pos - Base64; +// if (idx == -1) +// printf(" %2d, ", idx); +// else +// printf(" %2d/*%c*/,", idx, j); +// } +// printf("\n "); +// } +// } +// +// where the value of "Base64[]" was replaced by one of the base-64 conversion +// tables from the functions below. +static const signed char kUnBase64[] = { + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, 62/*+*/, -1, -1, -1, 63/*/ */, + 52/*0*/, 53/*1*/, 54/*2*/, 55/*3*/, 56/*4*/, 57/*5*/, 58/*6*/, 59/*7*/, + 60/*8*/, 61/*9*/, -1, -1, -1, -1, -1, -1, + -1, 0/*A*/, 1/*B*/, 2/*C*/, 3/*D*/, 4/*E*/, 5/*F*/, 6/*G*/, + 07/*H*/, 8/*I*/, 9/*J*/, 10/*K*/, 11/*L*/, 12/*M*/, 13/*N*/, 14/*O*/, + 15/*P*/, 16/*Q*/, 17/*R*/, 18/*S*/, 19/*T*/, 20/*U*/, 21/*V*/, 22/*W*/, + 23/*X*/, 24/*Y*/, 25/*Z*/, -1, -1, -1, -1, -1, + -1, 26/*a*/, 27/*b*/, 28/*c*/, 29/*d*/, 30/*e*/, 31/*f*/, 32/*g*/, + 33/*h*/, 34/*i*/, 35/*j*/, 36/*k*/, 37/*l*/, 38/*m*/, 39/*n*/, 40/*o*/, + 41/*p*/, 42/*q*/, 43/*r*/, 44/*s*/, 45/*t*/, 46/*u*/, 47/*v*/, 48/*w*/, + 49/*x*/, 50/*y*/, 51/*z*/, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1 +}; +static const signed char kUnWebSafeBase64[] = { + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, 62/*-*/, -1, -1, + 52/*0*/, 53/*1*/, 54/*2*/, 55/*3*/, 56/*4*/, 57/*5*/, 58/*6*/, 59/*7*/, + 60/*8*/, 61/*9*/, -1, -1, -1, -1, -1, -1, + -1, 0/*A*/, 1/*B*/, 2/*C*/, 3/*D*/, 4/*E*/, 5/*F*/, 6/*G*/, + 07/*H*/, 8/*I*/, 9/*J*/, 10/*K*/, 11/*L*/, 12/*M*/, 13/*N*/, 14/*O*/, + 15/*P*/, 16/*Q*/, 17/*R*/, 18/*S*/, 19/*T*/, 20/*U*/, 21/*V*/, 22/*W*/, + 23/*X*/, 24/*Y*/, 25/*Z*/, -1, -1, -1, -1, 63/*_*/, + -1, 26/*a*/, 27/*b*/, 28/*c*/, 29/*d*/, 30/*e*/, 31/*f*/, 32/*g*/, + 33/*h*/, 34/*i*/, 35/*j*/, 36/*k*/, 37/*l*/, 38/*m*/, 39/*n*/, 40/*o*/, + 41/*p*/, 42/*q*/, 43/*r*/, 44/*s*/, 45/*t*/, 46/*u*/, 47/*v*/, 48/*w*/, + 49/*x*/, 50/*y*/, 51/*z*/, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1 +}; + +int WebSafeBase64Unescape(const char *src, int szsrc, char *dest, int szdest) { + return Base64UnescapeInternal(src, szsrc, dest, szdest, kUnWebSafeBase64); +} + +static bool Base64UnescapeInternal(const char* src, int slen, string* dest, + const signed char* unbase64) { + // Determine the size of the output string. Base64 encodes every 3 bytes into + // 4 characters. any leftover chars are added directly for good measure. + // This is documented in the base64 RFC: http://tools.ietf.org/html/rfc3548 + const int dest_len = 3 * (slen / 4) + (slen % 4); + + dest->resize(dest_len); + + // We are getting the destination buffer by getting the beginning of the + // string and converting it into a char *. + const int len = Base64UnescapeInternal(src, slen, string_as_array(dest), + dest_len, unbase64); + if (len < 0) { + dest->clear(); + return false; + } + + // could be shorter if there was padding + GOOGLE_DCHECK_LE(len, dest_len); + dest->erase(len); + + return true; +} + +bool Base64Unescape(StringPiece src, string* dest) { + return Base64UnescapeInternal(src.data(), src.size(), dest, kUnBase64); +} + +bool WebSafeBase64Unescape(StringPiece src, string* dest) { + return Base64UnescapeInternal(src.data(), src.size(), dest, kUnWebSafeBase64); +} + +int Base64EscapeInternal(const unsigned char *src, int szsrc, + char *dest, int szdest, const char *base64, + bool do_padding) { + static const char kPad64 = '='; + + if (szsrc <= 0) return 0; + + if (szsrc * 4 > szdest * 3) return 0; + + char *cur_dest = dest; + const unsigned char *cur_src = src; + + char *limit_dest = dest + szdest; + const unsigned char *limit_src = src + szsrc; + + // Three bytes of data encodes to four characters of cyphertext. + // So we can pump through three-byte chunks atomically. + while (cur_src < limit_src - 3) { // keep going as long as we have >= 32 bits + uint32 in = BigEndian::Load32(cur_src) >> 8; + + cur_dest[0] = base64[in >> 18]; + in &= 0x3FFFF; + cur_dest[1] = base64[in >> 12]; + in &= 0xFFF; + cur_dest[2] = base64[in >> 6]; + in &= 0x3F; + cur_dest[3] = base64[in]; + + cur_dest += 4; + cur_src += 3; + } + // To save time, we didn't update szdest or szsrc in the loop. So do it now. + szdest = limit_dest - cur_dest; + szsrc = limit_src - cur_src; + + /* now deal with the tail (<=3 bytes) */ + switch (szsrc) { + case 0: + // Nothing left; nothing more to do. + break; + case 1: { + // One byte left: this encodes to two characters, and (optionally) + // two pad characters to round out the four-character cypherblock. + if ((szdest -= 2) < 0) return 0; + uint32 in = cur_src[0]; + cur_dest[0] = base64[in >> 2]; + in &= 0x3; + cur_dest[1] = base64[in << 4]; + cur_dest += 2; + if (do_padding) { + if ((szdest -= 2) < 0) return 0; + cur_dest[0] = kPad64; + cur_dest[1] = kPad64; + cur_dest += 2; + } + break; + } + case 2: { + // Two bytes left: this encodes to three characters, and (optionally) + // one pad character to round out the four-character cypherblock. + if ((szdest -= 3) < 0) return 0; + uint32 in = BigEndian::Load16(cur_src); + cur_dest[0] = base64[in >> 10]; + in &= 0x3FF; + cur_dest[1] = base64[in >> 4]; + in &= 0x00F; + cur_dest[2] = base64[in << 2]; + cur_dest += 3; + if (do_padding) { + if ((szdest -= 1) < 0) return 0; + cur_dest[0] = kPad64; + cur_dest += 1; + } + break; + } + case 3: { + // Three bytes left: same as in the big loop above. We can't do this in + // the loop because the loop above always reads 4 bytes, and the fourth + // byte is past the end of the input. + if ((szdest -= 4) < 0) return 0; + uint32 in = (cur_src[0] << 16) + BigEndian::Load16(cur_src + 1); + cur_dest[0] = base64[in >> 18]; + in &= 0x3FFFF; + cur_dest[1] = base64[in >> 12]; + in &= 0xFFF; + cur_dest[2] = base64[in >> 6]; + in &= 0x3F; + cur_dest[3] = base64[in]; + cur_dest += 4; + break; + } + default: + // Should not be reached: blocks of 4 bytes are handled + // in the while loop before this switch statement. + GOOGLE_LOG(FATAL) << "Logic problem? szsrc = " << szsrc; + break; + } + return (cur_dest - dest); +} + +static const char kBase64Chars[] = +"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; + +static const char kWebSafeBase64Chars[] = +"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_"; + +int Base64Escape(const unsigned char *src, int szsrc, char *dest, int szdest) { + return Base64EscapeInternal(src, szsrc, dest, szdest, kBase64Chars, true); +} +int WebSafeBase64Escape(const unsigned char *src, int szsrc, char *dest, + int szdest, bool do_padding) { + return Base64EscapeInternal(src, szsrc, dest, szdest, + kWebSafeBase64Chars, do_padding); +} + +void Base64EscapeInternal(const unsigned char* src, int szsrc, + string* dest, bool do_padding, + const char* base64_chars) { + const int calc_escaped_size = + CalculateBase64EscapedLen(szsrc, do_padding); + dest->resize(calc_escaped_size); + const int escaped_len = Base64EscapeInternal(src, szsrc, + string_as_array(dest), + dest->size(), + base64_chars, + do_padding); + GOOGLE_DCHECK_EQ(calc_escaped_size, escaped_len); + dest->erase(escaped_len); +} + +void Base64Escape(const unsigned char *src, int szsrc, + string* dest, bool do_padding) { + Base64EscapeInternal(src, szsrc, dest, do_padding, kBase64Chars); +} + +void WebSafeBase64Escape(const unsigned char *src, int szsrc, + string *dest, bool do_padding) { + Base64EscapeInternal(src, szsrc, dest, do_padding, kWebSafeBase64Chars); +} + +void Base64Escape(StringPiece src, string* dest) { + Base64Escape(reinterpret_cast(src.data()), + src.size(), dest, true); +} + +void WebSafeBase64Escape(StringPiece src, string* dest) { + WebSafeBase64Escape(reinterpret_cast(src.data()), + src.size(), dest, false); +} + +void WebSafeBase64EscapeWithPadding(StringPiece src, string* dest) { + WebSafeBase64Escape(reinterpret_cast(src.data()), + src.size(), dest, true); +} + +// Helper to append a Unicode code point to a string as UTF8, without bringing +// in any external dependencies. +int EncodeAsUTF8Char(uint32 code_point, char* output) { + uint32 tmp = 0; + int len = 0; + if (code_point <= 0x7f) { + tmp = code_point; + len = 1; + } else if (code_point <= 0x07ff) { + tmp = 0x0000c080 | + ((code_point & 0x07c0) << 2) | + (code_point & 0x003f); + len = 2; + } else if (code_point <= 0xffff) { + tmp = 0x00e08080 | + ((code_point & 0xf000) << 4) | + ((code_point & 0x0fc0) << 2) | + (code_point & 0x003f); + len = 3; + } else { + // UTF-16 is only defined for code points up to 0x10FFFF, and UTF-8 is + // normally only defined up to there as well. + tmp = 0xf0808080 | + ((code_point & 0x1c0000) << 6) | + ((code_point & 0x03f000) << 4) | + ((code_point & 0x000fc0) << 2) | + (code_point & 0x003f); + len = 4; + } + tmp = ghtonl(tmp); + memcpy(output, reinterpret_cast(&tmp) + sizeof(tmp) - len, len); + return len; +} + +// Table of UTF-8 character lengths, based on first byte +static const unsigned char kUTF8LenTbl[256] = { + 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, + 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, + 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, + 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, + + 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, + 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, + 2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, + 3,3,3,3,3,3,3,3, 3,3,3,3,3,3,3,3, 4,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4 +}; + +// Return length of a single UTF-8 source character +int UTF8FirstLetterNumBytes(const char* src, int len) { + if (len == 0) { + return 0; + } + return kUTF8LenTbl[*reinterpret_cast(src)]; +} + +} // namespace protobuf +} // namespace google diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/time.cc b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/time.cc new file mode 100644 index 0000000000000000000000000000000000000000..6def637ee00fc7d78411712ef055650d56601174 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/time.cc @@ -0,0 +1,365 @@ +#include + +#include + +#include +#include + +namespace google { +namespace protobuf { +namespace internal { + +namespace { +static const int64 kSecondsPerMinute = 60; +static const int64 kSecondsPerHour = 3600; +static const int64 kSecondsPerDay = kSecondsPerHour * 24; +static const int64 kSecondsPer400Years = + kSecondsPerDay * (400 * 365 + 400 / 4 - 3); +// Seconds from 0001-01-01T00:00:00 to 1970-01-01T:00:00:00 +static const int64 kSecondsFromEraToEpoch = 62135596800LL; +// The range of timestamp values we support. +static const int64 kMinTime = -62135596800LL; // 0001-01-01T00:00:00 +static const int64 kMaxTime = 253402300799LL; // 9999-12-31T23:59:59 + +static const int kNanosPerMillisecond = 1000000; +static const int kNanosPerMicrosecond = 1000; + +// Count the seconds from the given year (start at Jan 1, 00:00) to 100 years +// after. +int64 SecondsPer100Years(int year) { + if (year % 400 == 0 || year % 400 > 300) { + return kSecondsPerDay * (100 * 365 + 100 / 4); + } else { + return kSecondsPerDay * (100 * 365 + 100 / 4 - 1); + } +} + +// Count the seconds from the given year (start at Jan 1, 00:00) to 4 years +// after. +int64 SecondsPer4Years(int year) { + if ((year % 100 == 0 || year % 100 > 96) && + !(year % 400 == 0 || year % 400 > 396)) { + // No leap years. + return kSecondsPerDay * (4 * 365); + } else { + // One leap years. + return kSecondsPerDay * (4 * 365 + 1); + } +} + +bool IsLeapYear(int year) { + return year % 400 == 0 || (year % 4 == 0 && year % 100 != 0); +} + +int64 SecondsPerYear(int year) { + return kSecondsPerDay * (IsLeapYear(year) ? 366 : 365); +} + +static const int kDaysInMonth[13] = { + 0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 +}; + +int64 SecondsPerMonth(int month, bool leap) { + if (month == 2 && leap) { + return kSecondsPerDay * (kDaysInMonth[month] + 1); + } + return kSecondsPerDay * kDaysInMonth[month]; +} + +static const int kDaysSinceJan[13] = { + 0, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, +}; + +bool ValidateDateTime(const DateTime& time) { + if (time.year < 1 || time.year > 9999 || + time.month < 1 || time.month > 12 || + time.day < 1 || time.day > 31 || + time.hour < 0 || time.hour > 23 || + time.minute < 0 || time.minute > 59 || + time.second < 0 || time.second > 59) { + return false; + } + if (time.month == 2 && IsLeapYear(time.year)) { + return time.day <= kDaysInMonth[time.month] + 1; + } else { + return time.day <= kDaysInMonth[time.month]; + } +} + +// Count the number of seconds elapsed from 0001-01-01T00:00:00 to the given +// time. +int64 SecondsSinceCommonEra(const DateTime& time) { + int64 result = 0; + // Years should be between 1 and 9999. + assert(time.year >= 1 && time.year <= 9999); + int year = 1; + if ((time.year - year) >= 400) { + int count_400years = (time.year - year) / 400; + result += kSecondsPer400Years * count_400years; + year += count_400years * 400; + } + while ((time.year - year) >= 100) { + result += SecondsPer100Years(year); + year += 100; + } + while ((time.year - year) >= 4) { + result += SecondsPer4Years(year); + year += 4; + } + while (time.year > year) { + result += SecondsPerYear(year); + ++year; + } + // Months should be between 1 and 12. + assert(time.month >= 1 && time.month <= 12); + int month = time.month; + result += kSecondsPerDay * kDaysSinceJan[month]; + if (month > 2 && IsLeapYear(year)) { + result += kSecondsPerDay; + } + assert(time.day >= 1 && + time.day <= (month == 2 && IsLeapYear(year) + ? kDaysInMonth[month] + 1 + : kDaysInMonth[month])); + result += kSecondsPerDay * (time.day - 1); + result += kSecondsPerHour * time.hour + + kSecondsPerMinute * time.minute + + time.second; + return result; +} + +// Format nanoseconds with either 3, 6, or 9 digits depending on the required +// precision to represent the exact value. +string FormatNanos(int32 nanos) { + if (nanos % kNanosPerMillisecond == 0) { + return StringPrintf("%03d", nanos / kNanosPerMillisecond); + } else if (nanos % kNanosPerMicrosecond == 0) { + return StringPrintf("%06d", nanos / kNanosPerMicrosecond); + } else { + return StringPrintf("%09d", nanos); + } +} + +// Parses an integer from a null-terminated char sequence. The method +// consumes at most "width" chars. Returns a pointer after the consumed +// integer, or NULL if the data does not start with an integer or the +// integer value does not fall in the range of [min_value, max_value]. +const char* ParseInt(const char* data, int width, int min_value, + int max_value, int* result) { + if (!ascii_isdigit(*data)) { + return NULL; + } + int value = 0; + for (int i = 0; i < width; ++i, ++data) { + if (ascii_isdigit(*data)) { + value = value * 10 + (*data - '0'); + } else { + break; + } + } + if (value >= min_value && value <= max_value) { + *result = value; + return data; + } else { + return NULL; + } +} + +// Consumes the fractional parts of a second into nanos. For example, +// "010" will be parsed to 10000000 nanos. +const char* ParseNanos(const char* data, int32* nanos) { + if (!ascii_isdigit(*data)) { + return NULL; + } + int value = 0; + int len = 0; + // Consume as many digits as there are but only take the first 9 into + // account. + while (ascii_isdigit(*data)) { + if (len < 9) { + value = value * 10 + *data - '0'; + } + ++len; + ++data; + } + while (len < 9) { + value = value * 10; + ++len; + } + *nanos = value; + return data; +} + +const char* ParseTimezoneOffset(const char* data, int64* offset) { + // Accept format "HH:MM". E.g., "08:00" + int hour; + if ((data = ParseInt(data, 2, 0, 23, &hour)) == NULL) { + return NULL; + } + if (*data++ != ':') { + return NULL; + } + int minute; + if ((data = ParseInt(data, 2, 0, 59, &minute)) == NULL) { + return NULL; + } + *offset = (hour * 60 + minute) * 60; + return data; +} +} // namespace + +bool SecondsToDateTime(int64 seconds, DateTime* time) { + if (seconds < kMinTime || seconds > kMaxTime) { + return false; + } + // It's easier to calcuate the DateTime starting from 0001-01-01T00:00:00 + seconds = seconds + kSecondsFromEraToEpoch; + int year = 1; + if (seconds >= kSecondsPer400Years) { + int count_400years = seconds / kSecondsPer400Years; + year += 400 * count_400years; + seconds %= kSecondsPer400Years; + } + while (seconds >= SecondsPer100Years(year)) { + seconds -= SecondsPer100Years(year); + year += 100; + } + while (seconds >= SecondsPer4Years(year)) { + seconds -= SecondsPer4Years(year); + year += 4; + } + while (seconds >= SecondsPerYear(year)) { + seconds -= SecondsPerYear(year); + year += 1; + } + bool leap = IsLeapYear(year); + int month = 1; + while (seconds >= SecondsPerMonth(month, leap)) { + seconds -= SecondsPerMonth(month, leap); + ++month; + } + int day = 1 + seconds / kSecondsPerDay; + seconds %= kSecondsPerDay; + int hour = seconds / kSecondsPerHour; + seconds %= kSecondsPerHour; + int minute = seconds / kSecondsPerMinute; + seconds %= kSecondsPerMinute; + time->year = year; + time->month = month; + time->day = day; + time->hour = hour; + time->minute = minute; + time->second = static_cast(seconds); + return true; +} + +bool DateTimeToSeconds(const DateTime& time, int64* seconds) { + if (!ValidateDateTime(time)) { + return false; + } + *seconds = SecondsSinceCommonEra(time) - kSecondsFromEraToEpoch; + return true; +} + +void GetCurrentTime(int64* seconds, int32* nanos) { + // TODO(xiaofeng): Improve the accuracy of this implementation (or just + // remove this method from protobuf). + *seconds = time(NULL); + *nanos = 0; +} + +string FormatTime(int64 seconds, int32 nanos) { + DateTime time; + if (nanos < 0 || nanos > 999999999 || !SecondsToDateTime(seconds, &time)) { + return "InvalidTime"; + } + string result = StringPrintf("%04d-%02d-%02dT%02d:%02d:%02d", + time.year, time.month, time.day, + time.hour, time.minute, time.second); + if (nanos != 0) { + result += "." + FormatNanos(nanos); + } + return result + "Z"; +} + +bool ParseTime(const string& value, int64* seconds, int32* nanos) { + DateTime time; + const char* data = value.c_str(); + // We only accept: + // Z-normalized: 2015-05-20T13:29:35.120Z + // With UTC offset: 2015-05-20T13:29:35.120-08:00 + + // Parse year + if ((data = ParseInt(data, 4, 1, 9999, &time.year)) == NULL) { + return false; + } + // Expect '-' + if (*data++ != '-') return false; + // Parse month + if ((data = ParseInt(data, 2, 1, 12, &time.month)) == NULL) { + return false; + } + // Expect '-' + if (*data++ != '-') return false; + // Parse day + if ((data = ParseInt(data, 2, 1, 31, &time.day)) == NULL) { + return false; + } + // Expect 'T' + if (*data++ != 'T') return false; + // Parse hour + if ((data = ParseInt(data, 2, 0, 23, &time.hour)) == NULL) { + return false; + } + // Expect ':' + if (*data++ != ':') return false; + // Parse minute + if ((data = ParseInt(data, 2, 0, 59, &time.minute)) == NULL) { + return false; + } + // Expect ':' + if (*data++ != ':') return false; + // Parse second + if ((data = ParseInt(data, 2, 0, 59, &time.second)) == NULL) { + return false; + } + if (!DateTimeToSeconds(time, seconds)) { + return false; + } + // Parse nanoseconds. + if (*data == '.') { + ++data; + // Parse nanoseconds. + if ((data = ParseNanos(data, nanos)) == NULL) { + return false; + } + } else { + *nanos = 0; + } + // Parse UTC offsets. + if (*data == 'Z') { + ++data; + } else if (*data == '+') { + ++data; + int64 offset; + if ((data = ParseTimezoneOffset(data, &offset)) == NULL) { + return false; + } + *seconds -= offset; + } else if (*data == '-') { + ++data; + int64 offset; + if ((data = ParseTimezoneOffset(data, &offset)) == NULL) { + return false; + } + *seconds += offset; + } else { + return false; + } + // Done with parsing. + return *data == 0; +} + +} // namespace internal +} // namespace protobuf +} // namespace google diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/wire_format_lite.cc b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/wire_format_lite.cc new file mode 100644 index 0000000000000000000000000000000000000000..1d8cda5a4ab5b91f665a4205d86f5675f1fd5d96 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/wire_format_lite.cc @@ -0,0 +1,815 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace google { +namespace protobuf { +namespace internal { + + +#if !defined(_MSC_VER) || _MSC_VER >= 1900 +// Old version of MSVC doesn't like definitions of inline constants, GCC +// requires them. +const int WireFormatLite::kMessageSetItemStartTag; +const int WireFormatLite::kMessageSetItemEndTag; +const int WireFormatLite::kMessageSetTypeIdTag; +const int WireFormatLite::kMessageSetMessageTag; + +#endif + +// IBM xlC requires prefixing constants with WireFormatLite:: +const size_t WireFormatLite::kMessageSetItemTagsSize = + io::CodedOutputStream::StaticVarintSize32< + WireFormatLite::kMessageSetItemStartTag>::value + + io::CodedOutputStream::StaticVarintSize32< + WireFormatLite::kMessageSetItemEndTag>::value + + io::CodedOutputStream::StaticVarintSize32< + WireFormatLite::kMessageSetTypeIdTag>::value + + io::CodedOutputStream::StaticVarintSize32< + WireFormatLite::kMessageSetMessageTag>::value; + +const WireFormatLite::CppType +WireFormatLite::kFieldTypeToCppTypeMap[MAX_FIELD_TYPE + 1] = { + static_cast(0), // 0 is reserved for errors + + CPPTYPE_DOUBLE, // TYPE_DOUBLE + CPPTYPE_FLOAT, // TYPE_FLOAT + CPPTYPE_INT64, // TYPE_INT64 + CPPTYPE_UINT64, // TYPE_UINT64 + CPPTYPE_INT32, // TYPE_INT32 + CPPTYPE_UINT64, // TYPE_FIXED64 + CPPTYPE_UINT32, // TYPE_FIXED32 + CPPTYPE_BOOL, // TYPE_BOOL + CPPTYPE_STRING, // TYPE_STRING + CPPTYPE_MESSAGE, // TYPE_GROUP + CPPTYPE_MESSAGE, // TYPE_MESSAGE + CPPTYPE_STRING, // TYPE_BYTES + CPPTYPE_UINT32, // TYPE_UINT32 + CPPTYPE_ENUM, // TYPE_ENUM + CPPTYPE_INT32, // TYPE_SFIXED32 + CPPTYPE_INT64, // TYPE_SFIXED64 + CPPTYPE_INT32, // TYPE_SINT32 + CPPTYPE_INT64, // TYPE_SINT64 +}; + +const WireFormatLite::WireType +WireFormatLite::kWireTypeForFieldType[MAX_FIELD_TYPE + 1] = { + static_cast(-1), // invalid + WireFormatLite::WIRETYPE_FIXED64, // TYPE_DOUBLE + WireFormatLite::WIRETYPE_FIXED32, // TYPE_FLOAT + WireFormatLite::WIRETYPE_VARINT, // TYPE_INT64 + WireFormatLite::WIRETYPE_VARINT, // TYPE_UINT64 + WireFormatLite::WIRETYPE_VARINT, // TYPE_INT32 + WireFormatLite::WIRETYPE_FIXED64, // TYPE_FIXED64 + WireFormatLite::WIRETYPE_FIXED32, // TYPE_FIXED32 + WireFormatLite::WIRETYPE_VARINT, // TYPE_BOOL + WireFormatLite::WIRETYPE_LENGTH_DELIMITED, // TYPE_STRING + WireFormatLite::WIRETYPE_START_GROUP, // TYPE_GROUP + WireFormatLite::WIRETYPE_LENGTH_DELIMITED, // TYPE_MESSAGE + WireFormatLite::WIRETYPE_LENGTH_DELIMITED, // TYPE_BYTES + WireFormatLite::WIRETYPE_VARINT, // TYPE_UINT32 + WireFormatLite::WIRETYPE_VARINT, // TYPE_ENUM + WireFormatLite::WIRETYPE_FIXED32, // TYPE_SFIXED32 + WireFormatLite::WIRETYPE_FIXED64, // TYPE_SFIXED64 + WireFormatLite::WIRETYPE_VARINT, // TYPE_SINT32 + WireFormatLite::WIRETYPE_VARINT, // TYPE_SINT64 +}; + +bool WireFormatLite::SkipField( + io::CodedInputStream* input, uint32 tag) { + // Field number 0 is illegal. + if (WireFormatLite::GetTagFieldNumber(tag) == 0) return false; + switch (WireFormatLite::GetTagWireType(tag)) { + case WireFormatLite::WIRETYPE_VARINT: { + uint64 value; + if (!input->ReadVarint64(&value)) return false; + return true; + } + case WireFormatLite::WIRETYPE_FIXED64: { + uint64 value; + if (!input->ReadLittleEndian64(&value)) return false; + return true; + } + case WireFormatLite::WIRETYPE_LENGTH_DELIMITED: { + uint32 length; + if (!input->ReadVarint32(&length)) return false; + if (!input->Skip(length)) return false; + return true; + } + case WireFormatLite::WIRETYPE_START_GROUP: { + if (!input->IncrementRecursionDepth()) return false; + if (!SkipMessage(input)) return false; + input->DecrementRecursionDepth(); + // Check that the ending tag matched the starting tag. + if (!input->LastTagWas(WireFormatLite::MakeTag( + WireFormatLite::GetTagFieldNumber(tag), + WireFormatLite::WIRETYPE_END_GROUP))) { + return false; + } + return true; + } + case WireFormatLite::WIRETYPE_END_GROUP: { + return false; + } + case WireFormatLite::WIRETYPE_FIXED32: { + uint32 value; + if (!input->ReadLittleEndian32(&value)) return false; + return true; + } + default: { + return false; + } + } +} + +bool WireFormatLite::SkipField( + io::CodedInputStream* input, uint32 tag, io::CodedOutputStream* output) { + // Field number 0 is illegal. + if (WireFormatLite::GetTagFieldNumber(tag) == 0) return false; + switch (WireFormatLite::GetTagWireType(tag)) { + case WireFormatLite::WIRETYPE_VARINT: { + uint64 value; + if (!input->ReadVarint64(&value)) return false; + output->WriteVarint32(tag); + output->WriteVarint64(value); + return true; + } + case WireFormatLite::WIRETYPE_FIXED64: { + uint64 value; + if (!input->ReadLittleEndian64(&value)) return false; + output->WriteVarint32(tag); + output->WriteLittleEndian64(value); + return true; + } + case WireFormatLite::WIRETYPE_LENGTH_DELIMITED: { + uint32 length; + if (!input->ReadVarint32(&length)) return false; + output->WriteVarint32(tag); + output->WriteVarint32(length); + // TODO(mkilavuz): Provide API to prevent extra string copying. + string temp; + if (!input->ReadString(&temp, length)) return false; + output->WriteString(temp); + return true; + } + case WireFormatLite::WIRETYPE_START_GROUP: { + output->WriteVarint32(tag); + if (!input->IncrementRecursionDepth()) return false; + if (!SkipMessage(input, output)) return false; + input->DecrementRecursionDepth(); + // Check that the ending tag matched the starting tag. + if (!input->LastTagWas(WireFormatLite::MakeTag( + WireFormatLite::GetTagFieldNumber(tag), + WireFormatLite::WIRETYPE_END_GROUP))) { + return false; + } + return true; + } + case WireFormatLite::WIRETYPE_END_GROUP: { + return false; + } + case WireFormatLite::WIRETYPE_FIXED32: { + uint32 value; + if (!input->ReadLittleEndian32(&value)) return false; + output->WriteVarint32(tag); + output->WriteLittleEndian32(value); + return true; + } + default: { + return false; + } + } +} + +bool WireFormatLite::SkipMessage(io::CodedInputStream* input) { + while (true) { + uint32 tag = input->ReadTag(); + if (tag == 0) { + // End of input. This is a valid place to end, so return true. + return true; + } + + WireFormatLite::WireType wire_type = WireFormatLite::GetTagWireType(tag); + + if (wire_type == WireFormatLite::WIRETYPE_END_GROUP) { + // Must be the end of the message. + return true; + } + + if (!SkipField(input, tag)) return false; + } +} + +bool WireFormatLite::SkipMessage(io::CodedInputStream* input, + io::CodedOutputStream* output) { + while (true) { + uint32 tag = input->ReadTag(); + if (tag == 0) { + // End of input. This is a valid place to end, so return true. + return true; + } + + WireFormatLite::WireType wire_type = WireFormatLite::GetTagWireType(tag); + + if (wire_type == WireFormatLite::WIRETYPE_END_GROUP) { + output->WriteVarint32(tag); + // Must be the end of the message. + return true; + } + + if (!SkipField(input, tag, output)) return false; + } +} + +bool FieldSkipper::SkipField( + io::CodedInputStream* input, uint32 tag) { + return WireFormatLite::SkipField(input, tag); +} + +bool FieldSkipper::SkipMessage(io::CodedInputStream* input) { + return WireFormatLite::SkipMessage(input); +} + +void FieldSkipper::SkipUnknownEnum( + int /* field_number */, int /* value */) { + // Nothing. +} + +bool CodedOutputStreamFieldSkipper::SkipField( + io::CodedInputStream* input, uint32 tag) { + return WireFormatLite::SkipField(input, tag, unknown_fields_); +} + +bool CodedOutputStreamFieldSkipper::SkipMessage(io::CodedInputStream* input) { + return WireFormatLite::SkipMessage(input, unknown_fields_); +} + +void CodedOutputStreamFieldSkipper::SkipUnknownEnum( + int field_number, int value) { + unknown_fields_->WriteVarint32(field_number); + unknown_fields_->WriteVarint64(value); +} + +bool WireFormatLite::ReadPackedEnumNoInline(io::CodedInputStream* input, + bool (*is_valid)(int), + RepeatedField* values) { + uint32 length; + if (!input->ReadVarint32(&length)) return false; + io::CodedInputStream::Limit limit = input->PushLimit(length); + while (input->BytesUntilLimit() > 0) { + int value; + if (!google::protobuf::internal::WireFormatLite::ReadPrimitive< + int, WireFormatLite::TYPE_ENUM>(input, &value)) { + return false; + } + if (is_valid == NULL || is_valid(value)) { + values->Add(value); + } + } + input->PopLimit(limit); + return true; +} + +bool WireFormatLite::ReadPackedEnumPreserveUnknowns( + io::CodedInputStream* input, + int field_number, + bool (*is_valid)(int), + io::CodedOutputStream* unknown_fields_stream, + RepeatedField* values) { + uint32 length; + if (!input->ReadVarint32(&length)) return false; + io::CodedInputStream::Limit limit = input->PushLimit(length); + while (input->BytesUntilLimit() > 0) { + int value; + if (!google::protobuf::internal::WireFormatLite::ReadPrimitive< + int, WireFormatLite::TYPE_ENUM>(input, &value)) { + return false; + } + if (is_valid == NULL || is_valid(value)) { + values->Add(value); + } else { + uint32 tag = WireFormatLite::MakeTag(field_number, + WireFormatLite::WIRETYPE_VARINT); + unknown_fields_stream->WriteVarint32(tag); + unknown_fields_stream->WriteVarint32(value); + } + } + input->PopLimit(limit); + return true; +} + +#if !defined(PROTOBUF_LITTLE_ENDIAN) + +namespace { +void EncodeFixedSizeValue(float v, uint8* dest) { + WireFormatLite::WriteFloatNoTagToArray(v, dest); +} + +void EncodeFixedSizeValue(double v, uint8* dest) { + WireFormatLite::WriteDoubleNoTagToArray(v, dest); +} + +void EncodeFixedSizeValue(uint32 v, uint8* dest) { + WireFormatLite::WriteFixed32NoTagToArray(v, dest); +} + +void EncodeFixedSizeValue(uint64 v, uint8* dest) { + WireFormatLite::WriteFixed64NoTagToArray(v, dest); +} + +void EncodeFixedSizeValue(int32 v, uint8* dest) { + WireFormatLite::WriteSFixed32NoTagToArray(v, dest); +} + +void EncodeFixedSizeValue(int64 v, uint8* dest) { + WireFormatLite::WriteSFixed64NoTagToArray(v, dest); +} + +void EncodeFixedSizeValue(bool v, uint8* dest) { + WireFormatLite::WriteBoolNoTagToArray(v, dest); +} +} // anonymous namespace + +#endif // !defined(PROTOBUF_LITTLE_ENDIAN) + +template +static void WriteArray(const CType* a, int n, io::CodedOutputStream* output) { +#if defined(PROTOBUF_LITTLE_ENDIAN) + output->WriteRaw(reinterpret_cast(a), n * sizeof(a[0])); +#else + const int kAtATime = 128; + uint8 buf[sizeof(CType) * kAtATime]; + for (int i = 0; i < n; i += kAtATime) { + int to_do = std::min(kAtATime, n - i); + uint8* ptr = buf; + for (int j = 0; j < to_do; j++) { + EncodeFixedSizeValue(a[i+j], ptr); + ptr += sizeof(a[0]); + } + output->WriteRaw(buf, to_do * sizeof(a[0])); + } +#endif +} + +void WireFormatLite::WriteFloatArray(const float* a, int n, + io::CodedOutputStream* output) { + WriteArray(a, n, output); +} + +void WireFormatLite::WriteDoubleArray(const double* a, int n, + io::CodedOutputStream* output) { + WriteArray(a, n, output); +} + +void WireFormatLite::WriteFixed32Array(const uint32* a, int n, + io::CodedOutputStream* output) { + WriteArray(a, n, output); +} + +void WireFormatLite::WriteFixed64Array(const uint64* a, int n, + io::CodedOutputStream* output) { + WriteArray(a, n, output); +} + +void WireFormatLite::WriteSFixed32Array(const int32* a, int n, + io::CodedOutputStream* output) { + WriteArray(a, n, output); +} + +void WireFormatLite::WriteSFixed64Array(const int64* a, int n, + io::CodedOutputStream* output) { + WriteArray(a, n, output); +} + +void WireFormatLite::WriteBoolArray(const bool* a, int n, + io::CodedOutputStream* output) { + WriteArray(a, n, output); +} + +void WireFormatLite::WriteInt32(int field_number, int32 value, + io::CodedOutputStream* output) { + WriteTag(field_number, WIRETYPE_VARINT, output); + WriteInt32NoTag(value, output); +} +void WireFormatLite::WriteInt64(int field_number, int64 value, + io::CodedOutputStream* output) { + WriteTag(field_number, WIRETYPE_VARINT, output); + WriteInt64NoTag(value, output); +} +void WireFormatLite::WriteUInt32(int field_number, uint32 value, + io::CodedOutputStream* output) { + WriteTag(field_number, WIRETYPE_VARINT, output); + WriteUInt32NoTag(value, output); +} +void WireFormatLite::WriteUInt64(int field_number, uint64 value, + io::CodedOutputStream* output) { + WriteTag(field_number, WIRETYPE_VARINT, output); + WriteUInt64NoTag(value, output); +} +void WireFormatLite::WriteSInt32(int field_number, int32 value, + io::CodedOutputStream* output) { + WriteTag(field_number, WIRETYPE_VARINT, output); + WriteSInt32NoTag(value, output); +} +void WireFormatLite::WriteSInt64(int field_number, int64 value, + io::CodedOutputStream* output) { + WriteTag(field_number, WIRETYPE_VARINT, output); + WriteSInt64NoTag(value, output); +} +void WireFormatLite::WriteFixed32(int field_number, uint32 value, + io::CodedOutputStream* output) { + WriteTag(field_number, WIRETYPE_FIXED32, output); + WriteFixed32NoTag(value, output); +} +void WireFormatLite::WriteFixed64(int field_number, uint64 value, + io::CodedOutputStream* output) { + WriteTag(field_number, WIRETYPE_FIXED64, output); + WriteFixed64NoTag(value, output); +} +void WireFormatLite::WriteSFixed32(int field_number, int32 value, + io::CodedOutputStream* output) { + WriteTag(field_number, WIRETYPE_FIXED32, output); + WriteSFixed32NoTag(value, output); +} +void WireFormatLite::WriteSFixed64(int field_number, int64 value, + io::CodedOutputStream* output) { + WriteTag(field_number, WIRETYPE_FIXED64, output); + WriteSFixed64NoTag(value, output); +} +void WireFormatLite::WriteFloat(int field_number, float value, + io::CodedOutputStream* output) { + WriteTag(field_number, WIRETYPE_FIXED32, output); + WriteFloatNoTag(value, output); +} +void WireFormatLite::WriteDouble(int field_number, double value, + io::CodedOutputStream* output) { + WriteTag(field_number, WIRETYPE_FIXED64, output); + WriteDoubleNoTag(value, output); +} +void WireFormatLite::WriteBool(int field_number, bool value, + io::CodedOutputStream* output) { + WriteTag(field_number, WIRETYPE_VARINT, output); + WriteBoolNoTag(value, output); +} +void WireFormatLite::WriteEnum(int field_number, int value, + io::CodedOutputStream* output) { + WriteTag(field_number, WIRETYPE_VARINT, output); + WriteEnumNoTag(value, output); +} + +void WireFormatLite::WriteString(int field_number, const string& value, + io::CodedOutputStream* output) { + // String is for UTF-8 text only + WriteTag(field_number, WIRETYPE_LENGTH_DELIMITED, output); + GOOGLE_CHECK_LE(value.size(), kint32max); + output->WriteVarint32(value.size()); + output->WriteString(value); +} +void WireFormatLite::WriteStringMaybeAliased( + int field_number, const string& value, + io::CodedOutputStream* output) { + // String is for UTF-8 text only + WriteTag(field_number, WIRETYPE_LENGTH_DELIMITED, output); + GOOGLE_CHECK_LE(value.size(), kint32max); + output->WriteVarint32(value.size()); + output->WriteRawMaybeAliased(value.data(), value.size()); +} +void WireFormatLite::WriteBytes(int field_number, const string& value, + io::CodedOutputStream* output) { + WriteTag(field_number, WIRETYPE_LENGTH_DELIMITED, output); + GOOGLE_CHECK_LE(value.size(), kint32max); + output->WriteVarint32(value.size()); + output->WriteString(value); +} +void WireFormatLite::WriteBytesMaybeAliased( + int field_number, const string& value, + io::CodedOutputStream* output) { + WriteTag(field_number, WIRETYPE_LENGTH_DELIMITED, output); + GOOGLE_CHECK_LE(value.size(), kint32max); + output->WriteVarint32(value.size()); + output->WriteRawMaybeAliased(value.data(), value.size()); +} + + +void WireFormatLite::WriteGroup(int field_number, + const MessageLite& value, + io::CodedOutputStream* output) { + WriteTag(field_number, WIRETYPE_START_GROUP, output); + value.SerializeWithCachedSizes(output); + WriteTag(field_number, WIRETYPE_END_GROUP, output); +} + +void WireFormatLite::WriteMessage(int field_number, + const MessageLite& value, + io::CodedOutputStream* output) { + WriteTag(field_number, WIRETYPE_LENGTH_DELIMITED, output); + const int size = value.GetCachedSize(); + output->WriteVarint32(size); + value.SerializeWithCachedSizes(output); +} + +void WireFormatLite::WriteGroupMaybeToArray(int field_number, + const MessageLite& value, + io::CodedOutputStream* output) { + WriteTag(field_number, WIRETYPE_START_GROUP, output); + const int size = value.GetCachedSize(); + uint8* target = output->GetDirectBufferForNBytesAndAdvance(size); + if (target != NULL) { + uint8* end = value.InternalSerializeWithCachedSizesToArray( + output->IsSerializationDeterministic(), target); + GOOGLE_DCHECK_EQ(end - target, size); + } else { + value.SerializeWithCachedSizes(output); + } + WriteTag(field_number, WIRETYPE_END_GROUP, output); +} + +void WireFormatLite::WriteMessageMaybeToArray(int field_number, + const MessageLite& value, + io::CodedOutputStream* output) { + WriteTag(field_number, WIRETYPE_LENGTH_DELIMITED, output); + const int size = value.GetCachedSize(); + output->WriteVarint32(size); + uint8* target = output->GetDirectBufferForNBytesAndAdvance(size); + if (target != NULL) { + uint8* end = value.InternalSerializeWithCachedSizesToArray( + output->IsSerializationDeterministic(), target); + GOOGLE_DCHECK_EQ(end - target, size); + } else { + value.SerializeWithCachedSizes(output); + } +} + +GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE static bool ReadBytesToString( + io::CodedInputStream* input, string* value); +inline static bool ReadBytesToString(io::CodedInputStream* input, + string* value) { + uint32 length; + return input->ReadVarint32(&length) && + input->InternalReadStringInline(value, length); +} + +bool WireFormatLite::ReadBytes(io::CodedInputStream* input, string* value) { + return ReadBytesToString(input, value); +} + +bool WireFormatLite::ReadBytes(io::CodedInputStream* input, string** p) { + if (*p == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + *p = new ::std::string(); + } + return ReadBytesToString(input, *p); +} + +bool WireFormatLite::VerifyUtf8String(const char* data, + int size, + Operation op, + const char* field_name) { + if (!IsStructurallyValidUTF8(data, size)) { + const char* operation_str = NULL; + switch (op) { + case PARSE: + operation_str = "parsing"; + break; + case SERIALIZE: + operation_str = "serializing"; + break; + // no default case: have the compiler warn if a case is not covered. + } + string quoted_field_name = ""; + if (field_name != NULL) { + quoted_field_name = StringPrintf(" '%s'", field_name); + } + // no space below to avoid double space when the field name is missing. + GOOGLE_LOG(ERROR) << "String field" << quoted_field_name << " contains invalid " + << "UTF-8 data when " << operation_str << " a protocol " + << "buffer. Use the 'bytes' type if you intend to send raw " + << "bytes. "; + return false; + } + return true; +} + +// this code is deliberately written such that clang makes it into really +// efficient SSE code. +template +static size_t VarintSize(const T* data, const int n) { +#if __cplusplus >= 201103L + static_assert(sizeof(T) == 4, "This routine only works for 32 bit integers"); + // is_unsigned => !ZigZag + static_assert((std::is_unsigned::value ^ ZigZag) || + std::is_signed::value, + "Cannot ZigZag encode unsigned types"); + // is_unsigned => !SignExtended + static_assert((std::is_unsigned::value ^ SignExtended) || + std::is_signed::value, + "Cannot SignExtended unsigned types"); + static_assert(!(SignExtended && ZigZag), + "Cannot SignExtended and ZigZag on the same type"); +#endif + uint32 sum = n; + uint32 msb_sum = 0; + for (int i = 0; i < n; i++) { + uint32 x = data[i]; + if (ZigZag) { + x = WireFormatLite::ZigZagEncode32(x); + } else if (SignExtended) { + msb_sum += x >> 31; + } + // clang is so smart that it produces optimal SSE sequence unrolling + // the loop 8 ints at a time. With a sequence of 4 + // cmpres = cmpgt x, sizeclass ( -1 or 0) + // sum = sum - cmpres + if (x > 0x7F) sum++; + if (x > 0x3FFF) sum++; + if (x > 0x1FFFFF) sum++; + if (x > 0xFFFFFFF) sum++; + } + if (SignExtended) sum += msb_sum * 5; + return sum; +} + +template +static size_t VarintSize64(const T* data, const int n) { +#if __cplusplus >= 201103L + static_assert(sizeof(T) == 8, "This routine only works for 64 bit integers"); + // is_unsigned => !ZigZag + static_assert(!ZigZag || !std::is_unsigned::value, + "Cannot ZigZag encode unsigned types"); +#endif + uint64 sum = n; + for (int i = 0; i < n; i++) { + uint64 x = data[i]; + if (ZigZag) { + x = WireFormatLite::ZigZagEncode64(x); + } + // First step is a binary search, we can't branch in sse so we use the + // result of the compare to adjust sum and appropriately. This code is + // written to make clang recognize the vectorization. + uint64 tmp = x >= (static_cast(1) << 35) ? -1 : 0; + sum += 5 & tmp; + x >>= 35 & tmp; + if (x > 0x7F) sum++; + if (x > 0x3FFF) sum++; + if (x > 0x1FFFFF) sum++; + if (x > 0xFFFFFFF) sum++; + } + return sum; +} + +// GCC does not recognize the vectorization opportunity +// and other platforms are untested, in those cases using the optimized +// varint size routine for each element is faster. +// Hence we enable it only for clang +#if defined(__SSE__) && defined(__clang__) +size_t WireFormatLite::Int32Size(const RepeatedField& value) { + return VarintSize(value.data(), value.size()); +} + +size_t WireFormatLite::UInt32Size(const RepeatedField& value) { + return VarintSize(value.data(), value.size()); +} + +size_t WireFormatLite::SInt32Size(const RepeatedField& value) { + return VarintSize(value.data(), value.size()); +} + +size_t WireFormatLite::EnumSize(const RepeatedField& value) { + // On ILP64, sizeof(int) == 8, which would require a different template. + return VarintSize(value.data(), value.size()); +} + +#else // !(defined(__SSE4_1__) && defined(__clang__)) + +size_t WireFormatLite::Int32Size(const RepeatedField& value) { + size_t out = 0; + const int n = value.size(); + for (int i = 0; i < n; i++) { + out += Int32Size(value.Get(i)); + } + return out; +} + +size_t WireFormatLite::UInt32Size(const RepeatedField& value) { + size_t out = 0; + const int n = value.size(); + for (int i = 0; i < n; i++) { + out += UInt32Size(value.Get(i)); + } + return out; +} + +size_t WireFormatLite::SInt32Size(const RepeatedField& value) { + size_t out = 0; + const int n = value.size(); + for (int i = 0; i < n; i++) { + out += SInt32Size(value.Get(i)); + } + return out; +} + +size_t WireFormatLite::EnumSize(const RepeatedField& value) { + size_t out = 0; + const int n = value.size(); + for (int i = 0; i < n; i++) { + out += EnumSize(value.Get(i)); + } + return out; +} + +#endif + +// Micro benchmarks show that the SSE improved loop only starts beating +// the normal loop on Haswell platforms and then only for >32 ints. We +// disable this for now. Some specialized users might find it worthwhile to +// enable this. +#define USE_SSE_FOR_64_BIT_INTEGER_ARRAYS 0 +#if USE_SSE_FOR_64_BIT_INTEGER_ARRAYS +size_t WireFormatLite::Int64Size (const RepeatedField< int64>& value) { + return VarintSize64(value.data(), value.size()); +} + +size_t WireFormatLite::UInt64Size(const RepeatedField& value) { + return VarintSize64(value.data(), value.size()); +} + +size_t WireFormatLite::SInt64Size(const RepeatedField< int64>& value) { + return VarintSize64(value.data(), value.size()); +} + +#else + +size_t WireFormatLite::Int64Size (const RepeatedField< int64>& value) { + size_t out = 0; + const int n = value.size(); + for (int i = 0; i < n; i++) { + out += Int64Size(value.Get(i)); + } + return out; +} + +size_t WireFormatLite::UInt64Size(const RepeatedField& value) { + size_t out = 0; + const int n = value.size(); + for (int i = 0; i < n; i++) { + out += UInt64Size(value.Get(i)); + } + return out; +} + +size_t WireFormatLite::SInt64Size(const RepeatedField< int64>& value) { + size_t out = 0; + const int n = value.size(); + for (int i = 0; i < n; i++) { + out += SInt64Size(value.Get(i)); + } + return out; +} + +#endif + +} // namespace internal +} // namespace protobuf +} // namespace google diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/zero_copy_stream.cc b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/zero_copy_stream.cc new file mode 100644 index 0000000000000000000000000000000000000000..f81555e5549554b78f29f77e9359e6c2b30d4d16 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/zero_copy_stream.cc @@ -0,0 +1,55 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. + +#include + +#include +#include + +namespace google { +namespace protobuf { +namespace io { + + +bool ZeroCopyOutputStream::WriteAliasedRaw(const void* /* data */, + int /* size */) { + GOOGLE_LOG(FATAL) << "This ZeroCopyOutputStream doesn't support aliasing. " + "Reaching here usually means a ZeroCopyOutputStream " + "implementation bug."; + return false; +} + +} // namespace io +} // namespace protobuf +} // namespace google diff --git a/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/zero_copy_stream_impl_lite.cc b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/zero_copy_stream_impl_lite.cc new file mode 100644 index 0000000000000000000000000000000000000000..66ad49bcc050f4e04e5bc0192374c19f8a11522e --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/zero_copy_stream_impl_lite.cc @@ -0,0 +1,401 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. + +#include + +#include +#include + +#include +#include +#include +#include + +namespace google { +namespace protobuf { +namespace io { + +namespace { + +// Default block size for Copying{In,Out}putStreamAdaptor. +static const int kDefaultBlockSize = 8192; + +} // namespace + +// =================================================================== + +ArrayInputStream::ArrayInputStream(const void* data, int size, + int block_size) + : data_(reinterpret_cast(data)), + size_(size), + block_size_(block_size > 0 ? block_size : size), + position_(0), + last_returned_size_(0) { +} + +bool ArrayInputStream::Next(const void** data, int* size) { + if (position_ < size_) { + last_returned_size_ = std::min(block_size_, size_ - position_); + *data = data_ + position_; + *size = last_returned_size_; + position_ += last_returned_size_; + return true; + } else { + // We're at the end of the array. + last_returned_size_ = 0; // Don't let caller back up. + return false; + } +} + +void ArrayInputStream::BackUp(int count) { + GOOGLE_CHECK_GT(last_returned_size_, 0) + << "BackUp() can only be called after a successful Next()."; + GOOGLE_CHECK_LE(count, last_returned_size_); + GOOGLE_CHECK_GE(count, 0); + position_ -= count; + last_returned_size_ = 0; // Don't let caller back up further. +} + +bool ArrayInputStream::Skip(int count) { + GOOGLE_CHECK_GE(count, 0); + last_returned_size_ = 0; // Don't let caller back up. + if (count > size_ - position_) { + position_ = size_; + return false; + } else { + position_ += count; + return true; + } +} + +int64 ArrayInputStream::ByteCount() const { + return position_; +} + + +// =================================================================== + +ArrayOutputStream::ArrayOutputStream(void* data, int size, int block_size) + : data_(reinterpret_cast(data)), + size_(size), + block_size_(block_size > 0 ? block_size : size), + position_(0), + last_returned_size_(0) { +} + +bool ArrayOutputStream::Next(void** data, int* size) { + if (position_ < size_) { + last_returned_size_ = std::min(block_size_, size_ - position_); + *data = data_ + position_; + *size = last_returned_size_; + position_ += last_returned_size_; + return true; + } else { + // We're at the end of the array. + last_returned_size_ = 0; // Don't let caller back up. + return false; + } +} + +void ArrayOutputStream::BackUp(int count) { + GOOGLE_CHECK_GT(last_returned_size_, 0) + << "BackUp() can only be called after a successful Next()."; + GOOGLE_CHECK_LE(count, last_returned_size_); + GOOGLE_CHECK_GE(count, 0); + position_ -= count; + last_returned_size_ = 0; // Don't let caller back up further. +} + +int64 ArrayOutputStream::ByteCount() const { + return position_; +} + +// =================================================================== + +StringOutputStream::StringOutputStream(string* target) + : target_(target) { +} + +bool StringOutputStream::Next(void** data, int* size) { + GOOGLE_CHECK(target_ != NULL); + int old_size = target_->size(); + + // Grow the string. + if (old_size < target_->capacity()) { + // Resize the string to match its capacity, since we can get away + // without a memory allocation this way. + STLStringResizeUninitialized(target_, target_->capacity()); + } else { + // Size has reached capacity, try to double the size. + if (old_size > std::numeric_limits::max() / 2) { + // Can not double the size otherwise it is going to cause integer + // overflow in the expression below: old_size * 2 "; + GOOGLE_LOG(ERROR) << "Cannot allocate buffer larger than kint32max for " + << "StringOutputStream."; + return false; + } + // Double the size, also make sure that the new size is at least + // kMinimumSize. + STLStringResizeUninitialized( + target_, + std::max(old_size * 2, + kMinimumSize + 0)); // "+ 0" works around GCC4 weirdness. + } + + *data = mutable_string_data(target_) + old_size; + *size = target_->size() - old_size; + return true; +} + +void StringOutputStream::BackUp(int count) { + GOOGLE_CHECK_GE(count, 0); + GOOGLE_CHECK(target_ != NULL); + GOOGLE_CHECK_LE(count, target_->size()); + target_->resize(target_->size() - count); +} + +int64 StringOutputStream::ByteCount() const { + GOOGLE_CHECK(target_ != NULL); + return target_->size(); +} + +void StringOutputStream::SetString(string* target) { + target_ = target; +} + +// =================================================================== + +int CopyingInputStream::Skip(int count) { + char junk[4096]; + int skipped = 0; + while (skipped < count) { + int bytes = + Read(junk, std::min(count - skipped, implicit_cast(sizeof(junk)))); + if (bytes <= 0) { + // EOF or read error. + return skipped; + } + skipped += bytes; + } + return skipped; +} + +CopyingInputStreamAdaptor::CopyingInputStreamAdaptor( + CopyingInputStream* copying_stream, int block_size) + : copying_stream_(copying_stream), + owns_copying_stream_(false), + failed_(false), + position_(0), + buffer_size_(block_size > 0 ? block_size : kDefaultBlockSize), + buffer_used_(0), + backup_bytes_(0) { +} + +CopyingInputStreamAdaptor::~CopyingInputStreamAdaptor() { + if (owns_copying_stream_) { + delete copying_stream_; + } +} + +bool CopyingInputStreamAdaptor::Next(const void** data, int* size) { + if (failed_) { + // Already failed on a previous read. + return false; + } + + AllocateBufferIfNeeded(); + + if (backup_bytes_ > 0) { + // We have data left over from a previous BackUp(), so just return that. + *data = buffer_.get() + buffer_used_ - backup_bytes_; + *size = backup_bytes_; + backup_bytes_ = 0; + return true; + } + + // Read new data into the buffer. + buffer_used_ = copying_stream_->Read(buffer_.get(), buffer_size_); + if (buffer_used_ <= 0) { + // EOF or read error. We don't need the buffer anymore. + if (buffer_used_ < 0) { + // Read error (not EOF). + failed_ = true; + } + FreeBuffer(); + return false; + } + position_ += buffer_used_; + + *size = buffer_used_; + *data = buffer_.get(); + return true; +} + +void CopyingInputStreamAdaptor::BackUp(int count) { + GOOGLE_CHECK(backup_bytes_ == 0 && buffer_.get() != NULL) + << " BackUp() can only be called after Next()."; + GOOGLE_CHECK_LE(count, buffer_used_) + << " Can't back up over more bytes than were returned by the last call" + " to Next()."; + GOOGLE_CHECK_GE(count, 0) + << " Parameter to BackUp() can't be negative."; + + backup_bytes_ = count; +} + +bool CopyingInputStreamAdaptor::Skip(int count) { + GOOGLE_CHECK_GE(count, 0); + + if (failed_) { + // Already failed on a previous read. + return false; + } + + // First skip any bytes left over from a previous BackUp(). + if (backup_bytes_ >= count) { + // We have more data left over than we're trying to skip. Just chop it. + backup_bytes_ -= count; + return true; + } + + count -= backup_bytes_; + backup_bytes_ = 0; + + int skipped = copying_stream_->Skip(count); + position_ += skipped; + return skipped == count; +} + +int64 CopyingInputStreamAdaptor::ByteCount() const { + return position_ - backup_bytes_; +} + +void CopyingInputStreamAdaptor::AllocateBufferIfNeeded() { + if (buffer_.get() == NULL) { + buffer_.reset(new uint8[buffer_size_]); + } +} + +void CopyingInputStreamAdaptor::FreeBuffer() { + GOOGLE_CHECK_EQ(backup_bytes_, 0); + buffer_used_ = 0; + buffer_.reset(); +} + +// =================================================================== + +CopyingOutputStreamAdaptor::CopyingOutputStreamAdaptor( + CopyingOutputStream* copying_stream, int block_size) + : copying_stream_(copying_stream), + owns_copying_stream_(false), + failed_(false), + position_(0), + buffer_size_(block_size > 0 ? block_size : kDefaultBlockSize), + buffer_used_(0) { +} + +CopyingOutputStreamAdaptor::~CopyingOutputStreamAdaptor() { + WriteBuffer(); + if (owns_copying_stream_) { + delete copying_stream_; + } +} + +bool CopyingOutputStreamAdaptor::Flush() { + return WriteBuffer(); +} + +bool CopyingOutputStreamAdaptor::Next(void** data, int* size) { + if (buffer_used_ == buffer_size_) { + if (!WriteBuffer()) return false; + } + + AllocateBufferIfNeeded(); + + *data = buffer_.get() + buffer_used_; + *size = buffer_size_ - buffer_used_; + buffer_used_ = buffer_size_; + return true; +} + +void CopyingOutputStreamAdaptor::BackUp(int count) { + GOOGLE_CHECK_GE(count, 0); + GOOGLE_CHECK_EQ(buffer_used_, buffer_size_) + << " BackUp() can only be called after Next()."; + GOOGLE_CHECK_LE(count, buffer_used_) + << " Can't back up over more bytes than were returned by the last call" + " to Next()."; + + buffer_used_ -= count; +} + +int64 CopyingOutputStreamAdaptor::ByteCount() const { + return position_ + buffer_used_; +} + +bool CopyingOutputStreamAdaptor::WriteBuffer() { + if (failed_) { + // Already failed on a previous write. + return false; + } + + if (buffer_used_ == 0) return true; + + if (copying_stream_->Write(buffer_.get(), buffer_used_)) { + position_ += buffer_used_; + buffer_used_ = 0; + return true; + } else { + failed_ = true; + FreeBuffer(); + return false; + } +} + +void CopyingOutputStreamAdaptor::AllocateBufferIfNeeded() { + if (buffer_ == NULL) { + buffer_.reset(new uint8[buffer_size_]); + } +} + +void CopyingOutputStreamAdaptor::FreeBuffer() { + buffer_used_ = 0; + buffer_.reset(); +} + +// =================================================================== + +} // namespace io +} // namespace protobuf +} // namespace google