diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/.github/workflows/mac.yml b/cc-multilingual-main/cc_net/third_party/kenlm/.github/workflows/mac.yml new file mode 100644 index 0000000000000000000000000000000000000000..d55a6a3a34e2f35a0ba663e55807c47f7e5a3505 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/.github/workflows/mac.yml @@ -0,0 +1,30 @@ +name: Mac + +on: + push: + branches: master + pull_request: + branches: master + +jobs: + build: + runs-on: macOS-latest + + steps: + - uses: actions/checkout@v2 + - name: Install Boost + run: | + brew install boost + brew install libomp + brew install eigen + - name: cmake + run: | + cmake -E make_directory build + cd build + cmake .. + - name: Compile + working-directory: build + run: cmake --build . -j2 + - name: Test + working-directory: build + run: ctest -j2 diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/.github/workflows/ubuntu.yml b/cc-multilingual-main/cc_net/third_party/kenlm/.github/workflows/ubuntu.yml new file mode 100644 index 0000000000000000000000000000000000000000..9129c8907f6ad9077bdd21eedd4cdbb8f88e78c0 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/.github/workflows/ubuntu.yml @@ -0,0 +1,27 @@ +name: Ubuntu + +on: + push: + branches: [master] + pull_request: + branches: [master] + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + - name: dependencies + run: sudo apt-get install -y build-essential libboost-all-dev cmake zlib1g-dev libbz2-dev liblzma-dev + - name: cmake + run: | + cmake -E make_directory build + cd build + cmake -DCOMPILE_TESTS=ON .. + - name: Compile + working-directory: build + run: cmake --build . -j2 + - name: Test + working-directory: build + run: ctest -j2 diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/.github/workflows/windows.yml b/cc-multilingual-main/cc_net/third_party/kenlm/.github/workflows/windows.yml new file mode 100644 index 0000000000000000000000000000000000000000..35804e7526b43f8a1408ee29962c6e5ed9fa25f7 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/.github/workflows/windows.yml @@ -0,0 +1,25 @@ +name: Windows + +on: + push: + branches: [master] + pull_request: + branches: [master] + +jobs: + build: + runs-on: windows-latest + + steps: + - uses: actions/checkout@v2 + - name: cmake + run: | + cmake -E make_directory build + cd build + cmake -DBOOST_ROOT="${env:BOOST_ROOT_1_72_0}" .. + - name: Compile + working-directory: build + run: cmake --build . -j2 + - name: Test + working-directory: build + run: ctest -j2 diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/bhiksha.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/bhiksha.hh new file mode 100644 index 0000000000000000000000000000000000000000..134beb2f839bb0bd5fc22baaa77f83ba96f84a97 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/bhiksha.hh @@ -0,0 +1,123 @@ +/* Simple implementation of + * @inproceedings{bhikshacompression, + * author={Bhiksha Raj and Ed Whittaker}, + * year={2003}, + * title={Lossless Compression of Language Model Structure and Word Identifiers}, + * booktitle={Proceedings of IEEE International Conference on Acoustics, Speech and Signal Processing}, + * pages={388--391}, + * } + * + * Currently only used for next pointers. + */ + +#ifndef LM_BHIKSHA_H +#define LM_BHIKSHA_H + +#include "lm/model_type.hh" +#include "lm/trie.hh" +#include "util/bit_packing.hh" +#include "util/sorted_uniform.hh" + +#include + +#include +#include + +namespace lm { +namespace ngram { +struct Config; +class BinaryFormat; + +namespace trie { + +class DontBhiksha { + public: + static const ModelType kModelTypeAdd = static_cast(0); + + static void UpdateConfigFromBinary(const BinaryFormat &, uint64_t, Config &/*config*/) {} + + static uint64_t Size(uint64_t /*max_offset*/, uint64_t /*max_next*/, const Config &/*config*/) { return 0; } + + static uint8_t InlineBits(uint64_t /*max_offset*/, uint64_t max_next, const Config &/*config*/) { + return util::RequiredBits(max_next); + } + + DontBhiksha(const void *base, uint64_t max_offset, uint64_t max_next, const Config &config); + + void ReadNext(const void *base, uint64_t bit_offset, uint64_t /*index*/, uint8_t total_bits, NodeRange &out) const { + out.begin = util::ReadInt57(base, bit_offset, next_.bits, next_.mask); + out.end = util::ReadInt57(base, bit_offset + total_bits, next_.bits, next_.mask); + //assert(out.end >= out.begin); + } + + void WriteNext(void *base, uint64_t bit_offset, uint64_t /*index*/, uint64_t value) { + util::WriteInt57(base, bit_offset, next_.bits, value); + } + + void FinishedLoading(const Config &/*config*/) {} + + uint8_t InlineBits() const { return next_.bits; } + + private: + util::BitsMask next_; +}; + +class ArrayBhiksha { + public: + static const ModelType kModelTypeAdd = kArrayAdd; + + static void UpdateConfigFromBinary(const BinaryFormat &file, uint64_t offset, Config &config); + + static uint64_t Size(uint64_t max_offset, uint64_t max_next, const Config &config); + + static uint8_t InlineBits(uint64_t max_offset, uint64_t max_next, const Config &config); + + ArrayBhiksha(void *base, uint64_t max_offset, uint64_t max_value, const Config &config); + + void ReadNext(const void *base, uint64_t bit_offset, uint64_t index, uint8_t total_bits, NodeRange &out) const { + // Some assertions are commented out because they are expensive. + // assert(*offset_begin_ == 0); + // std::upper_bound returns the first element that is greater. Want the + // last element that is <= to the index. + const uint64_t *begin_it = std::upper_bound(offset_begin_, offset_end_, index) - 1; + // Since *offset_begin_ == 0, the position should be in range. + // assert(begin_it >= offset_begin_); + const uint64_t *end_it; + for (end_it = begin_it + 1; (end_it < offset_end_) && (*end_it <= index + 1); ++end_it) {} + // assert(end_it == std::upper_bound(offset_begin_, offset_end_, index + 1)); + --end_it; + // assert(end_it >= begin_it); + out.begin = ((begin_it - offset_begin_) << next_inline_.bits) | + util::ReadInt57(base, bit_offset, next_inline_.bits, next_inline_.mask); + out.end = ((end_it - offset_begin_) << next_inline_.bits) | + util::ReadInt57(base, bit_offset + total_bits, next_inline_.bits, next_inline_.mask); + // If this fails, consider rebuilding your model using KenLM after 1e333d786b748555e8f368d2bbba29a016c98052 + assert(out.end >= out.begin); + } + + void WriteNext(void *base, uint64_t bit_offset, uint64_t index, uint64_t value) { + uint64_t encode = value >> next_inline_.bits; + for (; write_to_ <= offset_begin_ + encode; ++write_to_) *write_to_ = index; + util::WriteInt57(base, bit_offset, next_inline_.bits, value & next_inline_.mask); + } + + void FinishedLoading(const Config &config); + + uint8_t InlineBits() const { return next_inline_.bits; } + + private: + const util::BitsMask next_inline_; + + const uint64_t *const offset_begin_; + const uint64_t *const offset_end_; + + uint64_t *write_to_; + + void *original_base_; +}; + +} // namespace trie +} // namespace ngram +} // namespace lm + +#endif // LM_BHIKSHA_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/binary_format.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/binary_format.hh new file mode 100644 index 0000000000000000000000000000000000000000..136d6b1aa0154a6fbad97d4ff9291bd2cc8912f7 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/binary_format.hh @@ -0,0 +1,106 @@ +#ifndef LM_BINARY_FORMAT_H +#define LM_BINARY_FORMAT_H + +#include "lm/config.hh" +#include "lm/model_type.hh" +#include "lm/read_arpa.hh" + +#include "util/file_piece.hh" +#include "util/mmap.hh" +#include "util/scoped.hh" + +#include +#include + +#include + +namespace lm { +namespace ngram { + +extern const char *kModelNames[6]; + +/*Inspect a file to determine if it is a binary lm. If not, return false. + * If so, return true and set recognized to the type. This is the only API in + * this header designed for use by decoder authors. + */ +bool RecognizeBinary(const char *file, ModelType &recognized); + +struct FixedWidthParameters { + unsigned char order; + float probing_multiplier; + // What type of model is this? + ModelType model_type; + // Does the end of the file have the actual strings in the vocabulary? + bool has_vocabulary; + unsigned int search_version; +}; + +// This is a macro instead of an inline function so constants can be assigned using it. +#define ALIGN8(a) ((std::ptrdiff_t(((a)-1)/8)+1)*8) + +// Parameters stored in the header of a binary file. +struct Parameters { + FixedWidthParameters fixed; + std::vector counts; +}; + +class BinaryFormat { + public: + explicit BinaryFormat(const Config &config); + + // Reading a binary file: + // Takes ownership of fd + void InitializeBinary(int fd, ModelType model_type, unsigned int search_version, Parameters ¶ms); + // Used to read parts of the file to update the config object before figuring out full size. + void ReadForConfig(void *to, std::size_t amount, uint64_t offset_excluding_header) const; + // Actually load the binary file and return a pointer to the beginning of the search area. + void *LoadBinary(std::size_t size); + + uint64_t VocabStringReadingOffset() const { + assert(vocab_string_offset_ != kInvalidOffset); + return vocab_string_offset_; + } + + // Writing a binary file or initializing in RAM from ARPA: + // Size for vocabulary. + void *SetupJustVocab(std::size_t memory_size, uint8_t order); + // Warning: can change the vocaulary base pointer. + void *GrowForSearch(std::size_t memory_size, std::size_t vocab_pad, void *&vocab_base); + // Warning: can change vocabulary and search base addresses. + void WriteVocabWords(const std::string &buffer, void *&vocab_base, void *&search_base); + // Write the header at the beginning of the file. + void FinishFile(const Config &config, ModelType model_type, unsigned int search_version, const std::vector &counts); + + private: + void MapFile(void *&vocab_base, void *&search_base); + + // Copied from configuration. + const Config::WriteMethod write_method_; + const char *write_mmap_; + util::LoadMethod load_method_; + + // File behind memory, if any. + util::scoped_fd file_; + + // If there is a file involved, a single mapping. + util::scoped_memory mapping_; + + // If the data is only in memory, separately allocate each because the trie + // knows vocab's size before it knows search's size (because SRILM might + // have pruned). + util::scoped_memory memory_vocab_, memory_search_; + + // Memory ranges. Note that these may not be contiguous and may not all + // exist. + std::size_t header_size_, vocab_size_, vocab_pad_; + // aka end of search. + uint64_t vocab_string_offset_; + + static const uint64_t kInvalidOffset = (uint64_t)-1; +}; + +bool IsBinaryFormat(int fd); + +} // namespace ngram +} // namespace lm +#endif // LM_BINARY_FORMAT_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/blank.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/blank.hh new file mode 100644 index 0000000000000000000000000000000000000000..94a71ad283c91071bfccf506929acbfb57e2441e --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/blank.hh @@ -0,0 +1,43 @@ +#ifndef LM_BLANK_H +#define LM_BLANK_H + +#include + +#include +#include + +namespace lm { +namespace ngram { + +/* Suppose "foo bar" appears with zero backoff but there is no trigram + * beginning with these words. Then, when scoring "foo bar", the model could + * return out_state containing "bar" or even null context if "bar" also has no + * backoff and is never followed by another word. Then the backoff is set to + * kNoExtensionBackoff. If the n-gram might be extended, then out_state must + * contain the full n-gram, in which case kExtensionBackoff is set. In any + * case, if an n-gram has non-zero backoff, the full state is returned so + * backoff can be properly charged. + * These differ only in sign bit because the backoff is in fact zero in either + * case. + */ +const float kNoExtensionBackoff = -0.0; +const float kExtensionBackoff = 0.0; +const uint64_t kNoExtensionQuant = 0; +const uint64_t kExtensionQuant = 1; + +inline void SetExtension(float &backoff) { + if (backoff == kNoExtensionBackoff) backoff = kExtensionBackoff; +} + +// This compiles down nicely. +inline bool HasExtension(const float &backoff) { + typedef union { float f; uint32_t i; } UnionValue; + UnionValue compare, interpret; + compare.f = kNoExtensionBackoff; + interpret.f = backoff; + return compare.i != interpret.i; +} + +} // namespace ngram +} // namespace lm +#endif // LM_BLANK_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/adjust_counts.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/adjust_counts.hh new file mode 100644 index 0000000000000000000000000000000000000000..b169950e96e4ea0cb7302f8b248e28d8debd26f8 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/adjust_counts.hh @@ -0,0 +1,72 @@ +#ifndef LM_BUILDER_ADJUST_COUNTS_H +#define LM_BUILDER_ADJUST_COUNTS_H + +#include "lm/builder/discount.hh" +#include "lm/lm_exception.hh" +#include "util/exception.hh" + +#include + +#include + +namespace util { namespace stream { class ChainPositions; } } + +namespace lm { +namespace builder { + +class BadDiscountException : public util::Exception { + public: + BadDiscountException() throw(); + ~BadDiscountException() throw(); +}; + +struct DiscountConfig { + // Overrides discounts for orders [1,discount_override.size()]. + std::vector overwrite; + // If discounting fails for an order, copy them from here. + Discount fallback; + // What to do when discounts are out of range or would trigger divison by + // zero. It it does something other than THROW_UP, use fallback_discount. + WarningAction bad_action; +}; + +/* Compute adjusted counts. + * Input: unique suffix sorted N-grams (and just the N-grams) with raw counts. + * Output: [1,N]-grams with adjusted counts. + * [1,N)-grams are in suffix order + * N-grams are in undefined order (they're going to be sorted anyway). + */ +class AdjustCounts { + public: + // counts: output + // counts_pruned: output + // discounts: mostly output. If the input already has entries, they will be kept. + // prune_thresholds: input. n-grams with normal (not adjusted) count below this will be pruned. + AdjustCounts( + const std::vector &prune_thresholds, + std::vector &counts, + std::vector &counts_pruned, + const std::vector &prune_words, + const DiscountConfig &discount_config, + std::vector &discounts) + : prune_thresholds_(prune_thresholds), counts_(counts), counts_pruned_(counts_pruned), + prune_words_(prune_words), discount_config_(discount_config), discounts_(discounts) + {} + + void Run(const util::stream::ChainPositions &positions); + + private: + const std::vector &prune_thresholds_; + std::vector &counts_; + std::vector &counts_pruned_; + const std::vector &prune_words_; + + DiscountConfig discount_config_; + std::vector &discounts_; +}; + +} // namespace builder +} // namespace lm + +#endif // LM_BUILDER_ADJUST_COUNTS_H + diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/discount.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/discount.hh new file mode 100644 index 0000000000000000000000000000000000000000..e2f4084604ca767254818daa15c726eaa5303d4a --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/discount.hh @@ -0,0 +1,26 @@ +#ifndef LM_BUILDER_DISCOUNT_H +#define LM_BUILDER_DISCOUNT_H + +#include + +#include + +namespace lm { +namespace builder { + +struct Discount { + float amount[4]; + + float Get(uint64_t count) const { + return amount[std::min(count, 3)]; + } + + float Apply(uint64_t count) const { + return static_cast(count) - Get(count); + } +}; + +} // namespace builder +} // namespace lm + +#endif // LM_BUILDER_DISCOUNT_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/hash_gamma.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/hash_gamma.hh new file mode 100644 index 0000000000000000000000000000000000000000..4bef47e819f62be4f311a945fa80521f4c61d980 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/hash_gamma.hh @@ -0,0 +1,19 @@ +#ifndef LM_BUILDER_HASH_GAMMA__ +#define LM_BUILDER_HASH_GAMMA__ + +#include + +namespace lm { namespace builder { + +#pragma pack(push) +#pragma pack(4) + +struct HashGamma { + uint64_t hash_value; + float gamma; +}; + +#pragma pack(pop) + +}} // namespaces +#endif // LM_BUILDER_HASH_GAMMA__ diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/header_info.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/header_info.hh new file mode 100644 index 0000000000000000000000000000000000000000..14619523343db6ea7efe52025bd4a455bdf0a9c9 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/header_info.hh @@ -0,0 +1,24 @@ +#ifndef LM_BUILDER_HEADER_INFO_H +#define LM_BUILDER_HEADER_INFO_H + +#include +#include +#include + +// Some configuration info that is used to add +// comments to the beginning of an ARPA file +struct HeaderInfo { + std::string input_file; + uint64_t token_count; + std::vector counts_pruned; + + HeaderInfo() {} + + HeaderInfo(const std::string& input_file_in, uint64_t token_count_in, const std::vector &counts_pruned_in) + : input_file(input_file_in), token_count(token_count_in), counts_pruned(counts_pruned_in) {} + + // TODO: Add smoothing type + // TODO: More info if multiple models were interpolated +}; + +#endif diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/initial_probabilities.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/initial_probabilities.hh new file mode 100644 index 0000000000000000000000000000000000000000..57e09cd51676090e31c091761e80dada4f6100bf --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/initial_probabilities.hh @@ -0,0 +1,42 @@ +#ifndef LM_BUILDER_INITIAL_PROBABILITIES_H +#define LM_BUILDER_INITIAL_PROBABILITIES_H + +#include "lm/builder/discount.hh" +#include "util/stream/config.hh" + +#include + +namespace util { namespace stream { class Chains; } } + +namespace lm { +namespace builder { + +struct InitialProbabilitiesConfig { + // These should be small buffers to keep the adder from getting too far ahead + util::stream::ChainConfig adder_in; + util::stream::ChainConfig adder_out; + // SRILM doesn't normally interpolate unigrams. + bool interpolate_unigrams; +}; + +/* Compute initial (uninterpolated) probabilities + * primary: the normal chain of n-grams. Incoming is context sorted adjusted + * counts. Outgoing has uninterpolated probabilities for use by Interpolate. + * second_in: a second copy of the primary input. Discard the output. + * gamma_out: Computed gamma values are output on these chains in suffix order. + * The values are bare floats and should be buffered for interpolation to + * use. + */ +void InitialProbabilities( + const InitialProbabilitiesConfig &config, + const std::vector &discounts, + util::stream::Chains &primary, + util::stream::Chains &second_in, + util::stream::Chains &gamma_out, + const std::vector &prune_thresholds, + bool prune_vocab); + +} // namespace builder +} // namespace lm + +#endif // LM_BUILDER_INITIAL_PROBABILITIES_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/interpolate.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/interpolate.hh new file mode 100644 index 0000000000000000000000000000000000000000..adfd9198faaa2ef1ed5010754156d433f714d984 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/interpolate.hh @@ -0,0 +1,34 @@ +#ifndef LM_BUILDER_INTERPOLATE_H +#define LM_BUILDER_INTERPOLATE_H + +#include "util/stream/multi_stream.hh" + +#include + +#include + +namespace lm { namespace builder { + +/* Interpolate step. + * Input: suffix sorted n-grams with (p_uninterpolated, gamma) from + * InitialProbabilities. + * Output: suffix sorted n-grams with complete probability + */ +class Interpolate { + public: + // Normally vocab_size is the unigram count-1 (since p() = 0) but might + // be larger when the user specifies a consistent vocabulary size. + explicit Interpolate(uint64_t vocab_size, const util::stream::ChainPositions &backoffs, const std::vector &prune_thresholds, bool prune_vocab, bool output_q_); + + void Run(const util::stream::ChainPositions &positions); + + private: + float uniform_prob_; + util::stream::ChainPositions backoffs_; + const std::vector prune_thresholds_; + bool prune_vocab_; + bool output_q_; +}; + +}} // namespaces +#endif // LM_BUILDER_INTERPOLATE_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/ngram_stream.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/ngram_stream.hh new file mode 100644 index 0000000000000000000000000000000000000000..ab42734c43e5972e1cc30b0796df6de335a0a786 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/ngram_stream.hh @@ -0,0 +1,58 @@ +#ifndef LM_BUILDER_NGRAM_STREAM_H +#define LM_BUILDER_NGRAM_STREAM_H + +#include "lm/builder/ngram.hh" +#include "util/stream/chain.hh" +#include "util/stream/multi_stream.hh" +#include "util/stream/stream.hh" + +#include + +namespace lm { namespace builder { + +class NGramStream { + public: + NGramStream() : gram_(NULL, 0) {} + + NGramStream(const util::stream::ChainPosition &position) : gram_(NULL, 0) { + Init(position); + } + + void Init(const util::stream::ChainPosition &position) { + stream_.Init(position); + gram_ = NGram(stream_.Get(), NGram::OrderFromSize(position.GetChain().EntrySize())); + } + + NGram &operator*() { return gram_; } + const NGram &operator*() const { return gram_; } + + NGram *operator->() { return &gram_; } + const NGram *operator->() const { return &gram_; } + + void *Get() { return stream_.Get(); } + const void *Get() const { return stream_.Get(); } + + operator bool() const { return stream_; } + bool operator!() const { return !stream_; } + void Poison() { stream_.Poison(); } + + NGramStream &operator++() { + ++stream_; + gram_.ReBase(stream_.Get()); + return *this; + } + + private: + NGram gram_; + util::stream::Stream stream_; +}; + +inline util::stream::Chain &operator>>(util::stream::Chain &chain, NGramStream &str) { + str.Init(chain.Add()); + return chain; +} + +typedef util::stream::GenericStreams NGramStreams; + +}} // namespaces +#endif // LM_BUILDER_NGRAM_STREAM_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/output.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/output.hh new file mode 100644 index 0000000000000000000000000000000000000000..0ef769ae29d155ddca5306337490a195def1ef29 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/output.hh @@ -0,0 +1,89 @@ +#ifndef LM_BUILDER_OUTPUT_H +#define LM_BUILDER_OUTPUT_H + +#include "lm/builder/header_info.hh" +#include "util/file.hh" + +#include +#include + +#include + +namespace util { namespace stream { class Chains; class ChainPositions; } } + +/* Outputs from lmplz: ARPA< sharded files, etc */ +namespace lm { namespace builder { + +// These are different types of hooks. Values should be consecutive to enable a vector lookup. +enum HookType { + COUNT_HOOK, // Raw N-gram counts, highest order only. + PROB_PARALLEL_HOOK, // Probability and backoff (or just q). Output must process the orders in parallel or there will be a deadlock. + PROB_SEQUENTIAL_HOOK, // Probability and backoff (or just q). Output can process orders any way it likes. This requires writing the data to disk then reading. Useful for ARPA files, which put unigrams first etc. + NUMBER_OF_HOOKS // Keep this last so we know how many values there are. +}; + +class Output; + +class OutputHook { + public: + explicit OutputHook(HookType hook_type) : type_(hook_type), master_(NULL) {} + + virtual ~OutputHook(); + + virtual void Apply(util::stream::Chains &chains); + + virtual void Run(const util::stream::ChainPositions &positions) = 0; + + protected: + const HeaderInfo &GetHeader() const; + int GetVocabFD() const; + + private: + friend class Output; + const HookType type_; + const Output *master_; +}; + +class Output : boost::noncopyable { + public: + Output() {} + + // Takes ownership. + void Add(OutputHook *hook) { + hook->master_ = this; + outputs_[hook->type_].push_back(hook); + } + + bool Have(HookType hook_type) const { + return !outputs_[hook_type].empty(); + } + + void SetVocabFD(int to) { vocab_fd_ = to; } + int GetVocabFD() const { return vocab_fd_; } + + void SetHeader(const HeaderInfo &header) { header_ = header; } + const HeaderInfo &GetHeader() const { return header_; } + + void Apply(HookType hook_type, util::stream::Chains &chains) { + for (boost::ptr_vector::iterator entry = outputs_[hook_type].begin(); entry != outputs_[hook_type].end(); ++entry) { + entry->Apply(chains); + } + } + + private: + boost::ptr_vector outputs_[NUMBER_OF_HOOKS]; + int vocab_fd_; + HeaderInfo header_; +}; + +inline const HeaderInfo &OutputHook::GetHeader() const { + return master_->GetHeader(); +} + +inline int OutputHook::GetVocabFD() const { + return master_->GetVocabFD(); +} + +}} // namespaces + +#endif // LM_BUILDER_OUTPUT_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/pipeline.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/pipeline.hh new file mode 100644 index 0000000000000000000000000000000000000000..8f4d82103db37cdb7b60e15f0927beb2aadc89bd --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/pipeline.hh @@ -0,0 +1,74 @@ +#ifndef LM_BUILDER_PIPELINE_H +#define LM_BUILDER_PIPELINE_H + +#include "lm/builder/adjust_counts.hh" +#include "lm/builder/initial_probabilities.hh" +#include "lm/builder/header_info.hh" +#include "lm/lm_exception.hh" +#include "lm/word_index.hh" +#include "util/stream/config.hh" +#include "util/file_piece.hh" + +#include +#include + +namespace lm { namespace builder { + +class Output; + +struct PipelineConfig { + std::size_t order; + std::string vocab_file; + util::stream::SortConfig sort; + InitialProbabilitiesConfig initial_probs; + util::stream::ChainConfig read_backoffs; + + // Estimated vocabulary size. Used for sizing CorpusCount memory and + // initial probing hash table sizing, also in CorpusCount. + lm::WordIndex vocab_estimate; + + // Minimum block size to tolerate. + std::size_t minimum_block; + + // Number of blocks to use. This will be overridden to 1 if everything fits. + std::size_t block_count; + + // n-gram count thresholds for pruning. 0 values means no pruning for + // corresponding n-gram order + std::vector prune_thresholds; //mjd + bool prune_vocab; + std::string prune_vocab_file; + + // What to do with discount failures. + DiscountConfig discount; + + // Compute collapsed q values instead of probability and backoff + bool output_q; + + /* Computing the perplexity of LMs with different vocabularies is hard. For + * example, the lowest perplexity is attained by a unigram model that + * predicts p() = 1 and has no other vocabulary. Also, linearly + * interpolated models will sum to more than 1 because is duplicated + * (SRI just pretends p() = 0 for these purposes, which makes it sum to + * 1 but comes with its own problems). This option will make the vocabulary + * a particular size by replicating multiple times for purposes of + * computing vocabulary size. It has no effect if the actual vocabulary is + * larger. This parameter serves the same purpose as IRSTLM's "dub". + */ + uint64_t vocab_size_for_unk; + + /* What to do the first time , , or appears in the input. If + * this is anything but THROW_UP, then the symbol will always be treated as + * whitespace. + */ + WarningAction disallowed_symbol_action; + + const std::string &TempPrefix() const { return sort.temp_prefix; } + std::size_t TotalMemory() const { return sort.total_memory; } +}; + +// Takes ownership of text_file and out_arpa. +void Pipeline(PipelineConfig &config, int text_file, Output &output); + +}} // namespaces +#endif // LM_BUILDER_PIPELINE_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/sort.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/sort.hh new file mode 100644 index 0000000000000000000000000000000000000000..712bb8e3537d37ea1272c1ede238337fc59f32e4 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/sort.hh @@ -0,0 +1,244 @@ +#ifndef LM_BUILDER_SORT_H +#define LM_BUILDER_SORT_H + +#include "lm/builder/ngram_stream.hh" +#include "lm/builder/ngram.hh" +#include "lm/word_index.hh" +#include "util/stream/sort.hh" + +#include "util/stream/timer.hh" + +#include +#include + +namespace lm { +namespace builder { + +/** + * Abstract parent class for defining custom n-gram comparators. + */ +template class Comparator : public std::binary_function { + public: + + /** + * Constructs a comparator capable of comparing two n-grams. + * + * @param order Number of words in each n-gram + */ + explicit Comparator(std::size_t order) : order_(order) {} + + /** + * Applies the comparator using the Compare method that must be defined in any class that inherits from this class. + * + * @param lhs A pointer to the n-gram on the left-hand side of the comparison + * @param rhs A pointer to the n-gram on the right-hand side of the comparison + * + * @see ContextOrder::Compare + * @see PrefixOrder::Compare + * @see SuffixOrder::Compare + */ + inline bool operator()(const void *lhs, const void *rhs) const { + return static_cast(this)->Compare(static_cast(lhs), static_cast(rhs)); + } + + /** Gets the n-gram order defined for this comparator. */ + std::size_t Order() const { return order_; } + + protected: + std::size_t order_; +}; + +/** + * N-gram comparator that compares n-grams according to their reverse (suffix) order. + * + * This comparator compares n-grams lexicographically, one word at a time, + * beginning with the last word of each n-gram and ending with the first word of each n-gram. + * + * Some examples of n-gram comparisons as defined by this comparator: + * - a b c == a b c + * - a b c < a b d + * - a b c > a d b + * - a b c > a b b + * - a b c > x a c + * - a b c < x y z + */ +class SuffixOrder : public Comparator { + public: + + /** + * Constructs a comparator capable of comparing two n-grams. + * + * @param order Number of words in each n-gram + */ + explicit SuffixOrder(std::size_t order) : Comparator(order) {} + + /** + * Compares two n-grams lexicographically, one word at a time, + * beginning with the last word of each n-gram and ending with the first word of each n-gram. + * + * @param lhs A pointer to the n-gram on the left-hand side of the comparison + * @param rhs A pointer to the n-gram on the right-hand side of the comparison + */ + inline bool Compare(const WordIndex *lhs, const WordIndex *rhs) const { + for (std::size_t i = order_ - 1; i != 0; --i) { + if (lhs[i] != rhs[i]) + return lhs[i] < rhs[i]; + } + return lhs[0] < rhs[0]; + } + + static const unsigned kMatchOffset = 1; +}; + + +/** + * N-gram comparator that compares n-grams according to the reverse (suffix) order of the n-gram context. + * + * This comparator compares n-grams lexicographically, one word at a time, + * beginning with the penultimate word of each n-gram and ending with the first word of each n-gram; + * finally, this comparator compares the last word of each n-gram. + * + * Some examples of n-gram comparisons as defined by this comparator: + * - a b c == a b c + * - a b c < a b d + * - a b c < a d b + * - a b c > a b b + * - a b c > x a c + * - a b c < x y z + */ +class ContextOrder : public Comparator { + public: + + /** + * Constructs a comparator capable of comparing two n-grams. + * + * @param order Number of words in each n-gram + */ + explicit ContextOrder(std::size_t order) : Comparator(order) {} + + /** + * Compares two n-grams lexicographically, one word at a time, + * beginning with the penultimate word of each n-gram and ending with the first word of each n-gram; + * finally, this comparator compares the last word of each n-gram. + * + * @param lhs A pointer to the n-gram on the left-hand side of the comparison + * @param rhs A pointer to the n-gram on the right-hand side of the comparison + */ + inline bool Compare(const WordIndex *lhs, const WordIndex *rhs) const { + for (int i = order_ - 2; i >= 0; --i) { + if (lhs[i] != rhs[i]) + return lhs[i] < rhs[i]; + } + return lhs[order_ - 1] < rhs[order_ - 1]; + } +}; + +/** + * N-gram comparator that compares n-grams according to their natural (prefix) order. + * + * This comparator compares n-grams lexicographically, one word at a time, + * beginning with the first word of each n-gram and ending with the last word of each n-gram. + * + * Some examples of n-gram comparisons as defined by this comparator: + * - a b c == a b c + * - a b c < a b d + * - a b c < a d b + * - a b c > a b b + * - a b c < x a c + * - a b c < x y z + */ +class PrefixOrder : public Comparator { + public: + + /** + * Constructs a comparator capable of comparing two n-grams. + * + * @param order Number of words in each n-gram + */ + explicit PrefixOrder(std::size_t order) : Comparator(order) {} + + /** + * Compares two n-grams lexicographically, one word at a time, + * beginning with the first word of each n-gram and ending with the last word of each n-gram. + * + * @param lhs A pointer to the n-gram on the left-hand side of the comparison + * @param rhs A pointer to the n-gram on the right-hand side of the comparison + */ + inline bool Compare(const WordIndex *lhs, const WordIndex *rhs) const { + for (std::size_t i = 0; i < order_; ++i) { + if (lhs[i] != rhs[i]) + return lhs[i] < rhs[i]; + } + return false; + } + + static const unsigned kMatchOffset = 0; +}; + +// Sum counts for the same n-gram. +struct AddCombiner { + bool operator()(void *first_void, const void *second_void, const SuffixOrder &compare) const { + NGram first(first_void, compare.Order()); + // There isn't a const version of NGram. + NGram second(const_cast(second_void), compare.Order()); + if (memcmp(first.begin(), second.begin(), sizeof(WordIndex) * compare.Order())) return false; + first.Count() += second.Count(); + return true; + } +}; + +// The combiner is only used on a single chain, so I didn't bother to allow +// that template. +/** + * Represents an @ref util::FixedArray "array" capable of storing @ref util::stream::Sort "Sort" objects. + * + * In the anticipated use case, an instance of this class will maintain one @ref util::stream::Sort "Sort" object + * for each n-gram order (ranging from 1 up to the maximum n-gram order being processed). + * Use in this manner would enable the n-grams each n-gram order to be sorted, in parallel. + * + * @tparam Compare An @ref Comparator "ngram comparator" to use during sorting. + */ +template class Sorts : public util::FixedArray > { + private: + typedef util::stream::Sort S; + typedef util::FixedArray P; + + public: + + /** + * Constructs, but does not initialize. + * + * @ref util::FixedArray::Init() "Init" must be called before use. + * + * @see util::FixedArray::Init() + */ + Sorts() {} + + /** + * Constructs an @ref util::FixedArray "array" capable of storing a fixed number of @ref util::stream::Sort "Sort" objects. + * + * @param number The maximum number of @ref util::stream::Sort "sorters" that can be held by this @ref util::FixedArray "array" + * @see util::FixedArray::FixedArray() + */ + explicit Sorts(std::size_t number) : util::FixedArray >(number) {} + + /** + * Constructs a new @ref util::stream::Sort "Sort" object which is stored in this @ref util::FixedArray "array". + * + * The new @ref util::stream::Sort "Sort" object is constructed using the provided @ref util::stream::SortConfig "SortConfig" and @ref Comparator "ngram comparator"; + * once constructed, a new worker @ref util::stream::Thread "thread" (owned by the @ref util::stream::Chain "chain") will sort the n-gram data stored + * in the @ref util::stream::Block "blocks" of the provided @ref util::stream::Chain "chain". + * + * @see util::stream::Sort::Sort() + * @see util::stream::Chain::operator>>() + */ + void push_back(util::stream::Chain &chain, const util::stream::SortConfig &config, const Compare &compare) { + new (P::end()) S(chain, config, compare); // use "placement new" syntax to initalize S in an already-allocated memory location + P::Constructed(); + } +}; + +} // namespace builder +} // namespace lm + +#endif // LM_BUILDER_SORT_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/config.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/config.hh new file mode 100644 index 0000000000000000000000000000000000000000..21b9e7eeb76343802a6531c660c3c5b25cded8c3 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/config.hh @@ -0,0 +1,124 @@ +#ifndef LM_CONFIG_H +#define LM_CONFIG_H + +#include "lm/lm_exception.hh" +#include "util/mmap.hh" + +#include +#include +#include + +/* Configuration for ngram model. Separate header to reduce pollution. */ + +namespace lm { + +class EnumerateVocab; + +namespace ngram { + +struct Config { + // EFFECTIVE FOR BOTH ARPA AND BINARY READS + + // (default true) print progress bar to messages + bool show_progress; + + // Where to log messages including the progress bar. Set to NULL for + // silence. + std::ostream *messages; + + std::ostream *ProgressMessages() const { + return show_progress ? messages : 0; + } + + // This will be called with every string in the vocabulary by the + // constructor; it need only exist for the lifetime of the constructor. + // See enumerate_vocab.hh for more detail. Config does not take ownership; + // just delete/let it go out of scope after the constructor exits. + EnumerateVocab *enumerate_vocab; + + + // ONLY EFFECTIVE WHEN READING ARPA + + // What to do when isn't in the provided model. + WarningAction unknown_missing; + // What to do when or is missing from the model. + // If THROW_UP, the exception will be of type util::SpecialWordMissingException. + WarningAction sentence_marker_missing; + + // What to do with a positive log probability. For COMPLAIN and SILENT, map + // to 0. + WarningAction positive_log_probability; + + // The probability to substitute for if it's missing from the model. + // No effect if the model has or unknown_missing == THROW_UP. + float unknown_missing_logprob; + + // Size multiplier for probing hash table. Must be > 1. Space is linear in + // this. Time is probing_multiplier / (probing_multiplier - 1). No effect + // for sorted variant. + // If you find yourself setting this to a low number, consider using the + // TrieModel which has lower memory consumption. + float probing_multiplier; + + // Amount of memory to use for building. The actual memory usage will be + // higher since this just sets sort buffer size. Only applies to trie + // models. + std::size_t building_memory; + + // Template for temporary directory appropriate for passing to mkdtemp. + // The characters XXXXXX are appended before passing to mkdtemp. Only + // applies to trie. If empty, defaults to write_mmap. If that's NULL, + // defaults to input file name. + std::string temporary_directory_prefix; + + // Level of complaining to do when loading from ARPA instead of binary format. + enum ARPALoadComplain {ALL, EXPENSIVE, NONE}; + ARPALoadComplain arpa_complain; + + // While loading an ARPA file, also write out this binary format file. Set + // to NULL to disable. + const char *write_mmap; + + enum WriteMethod { + WRITE_MMAP, // Map the file directly. + WRITE_AFTER // Write after we're done. + }; + WriteMethod write_method; + + // Include the vocab in the binary file? Only effective if write_mmap != NULL. + bool include_vocab; + + + // Left rest options. Only used when the model includes rest costs. + enum RestFunction { + REST_MAX, // Maximum of any score to the left + REST_LOWER, // Use lower-order files given below. + }; + RestFunction rest_function; + // Only used for REST_LOWER. + std::vector rest_lower_files; + + + // Quantization options. Only effective for QuantTrieModel. One value is + // reserved for each of prob and backoff, so 2^bits - 1 buckets will be used + // to quantize (and one of the remaining backoffs will be 0). + uint8_t prob_bits, backoff_bits; + + // Bhiksha compression (simple form). Only works with trie. + uint8_t pointer_bhiksha_bits; + + + // ONLY EFFECTIVE WHEN READING BINARY + + // How to get the giant array into memory: lazy mmap, populate, read etc. + // See util/mmap.hh for details of MapMethod. + util::LoadMethod load_method; + + + // Set defaults. + Config(); +}; + +} /* namespace ngram */ } /* namespace lm */ + +#endif // LM_CONFIG_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/enumerate_vocab.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/enumerate_vocab.hh new file mode 100644 index 0000000000000000000000000000000000000000..f5ce78985db4bbdaf993e4f90189a7a2f2797c50 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/enumerate_vocab.hh @@ -0,0 +1,28 @@ +#ifndef LM_ENUMERATE_VOCAB_H +#define LM_ENUMERATE_VOCAB_H + +#include "lm/word_index.hh" +#include "util/string_piece.hh" + +namespace lm { + +/* If you need the actual strings in the vocabulary, inherit from this class + * and implement Add. Then put a pointer in Config.enumerate_vocab; it does + * not take ownership. Add is called once per vocab word. index starts at 0 + * and increases by 1 each time. This is only used by the Model constructor; + * the pointer is not retained by the class. + */ +class EnumerateVocab { + public: + virtual ~EnumerateVocab() {} + + virtual void Add(WordIndex index, const StringPiece &str) = 0; + + protected: + EnumerateVocab() {} +}; + +} // namespace lm + +#endif // LM_ENUMERATE_VOCAB_H + diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/facade.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/facade.hh new file mode 100644 index 0000000000000000000000000000000000000000..8e12b62ee199e9c72a56a771e73ecf3dfce85eb9 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/facade.hh @@ -0,0 +1,73 @@ +#ifndef LM_FACADE_H +#define LM_FACADE_H + +#include "lm/virtual_interface.hh" +#include "util/string_piece.hh" + +#include + +namespace lm { +namespace base { + +// Common model interface that depends on knowing the specific classes. +// Curiously recurring template pattern. +template class ModelFacade : public Model { + public: + typedef StateT State; + typedef VocabularyT Vocabulary; + + /* Translate from void* to State */ + FullScoreReturn BaseFullScore(const void *in_state, const WordIndex new_word, void *out_state) const { + return static_cast(this)->FullScore( + *reinterpret_cast(in_state), + new_word, + *reinterpret_cast(out_state)); + } + + FullScoreReturn BaseFullScoreForgotState(const WordIndex *context_rbegin, const WordIndex *context_rend, const WordIndex new_word, void *out_state) const { + return static_cast(this)->FullScoreForgotState( + context_rbegin, + context_rend, + new_word, + *reinterpret_cast(out_state)); + } + + // Default Score function calls FullScore. Model can override this. + float Score(const State &in_state, const WordIndex new_word, State &out_state) const { + return static_cast(this)->FullScore(in_state, new_word, out_state).prob; + } + + float BaseScore(const void *in_state, const WordIndex new_word, void *out_state) const { + return static_cast(this)->Score( + *reinterpret_cast(in_state), + new_word, + *reinterpret_cast(out_state)); + } + + const State &BeginSentenceState() const { return begin_sentence_; } + const State &NullContextState() const { return null_context_; } + const Vocabulary &GetVocabulary() const { return *static_cast(&BaseVocabulary()); } + + protected: + ModelFacade() : Model(sizeof(State)) {} + + virtual ~ModelFacade() {} + + // begin_sentence and null_context can disappear after. vocab should stay. + void Init(const State &begin_sentence, const State &null_context, const Vocabulary &vocab, unsigned char order) { + begin_sentence_ = begin_sentence; + null_context_ = null_context; + begin_sentence_memory_ = &begin_sentence_; + null_context_memory_ = &null_context_; + base_vocab_ = &vocab; + order_ = order; + } + + private: + State begin_sentence_, null_context_; +}; + +} // mamespace base +} // namespace lm + +#endif // LM_FACADE_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/filter/arpa_io.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/filter/arpa_io.hh new file mode 100644 index 0000000000000000000000000000000000000000..99c97b11d1a7bff9f367bbb3429716dbd2c67253 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/filter/arpa_io.hh @@ -0,0 +1,114 @@ +#ifndef LM_FILTER_ARPA_IO_H +#define LM_FILTER_ARPA_IO_H +/* Input and output for ARPA format language model files. + */ +#include "lm/read_arpa.hh" +#include "util/exception.hh" +#include "util/string_piece.hh" +#include "util/tokenize_piece.hh" + +#include +#include + +#include +#include +#include + +#include +#include + +namespace util { class FilePiece; } + +namespace lm { + +class ARPAInputException : public util::Exception { + public: + explicit ARPAInputException(const StringPiece &message) throw(); + explicit ARPAInputException(const StringPiece &message, const StringPiece &line) throw(); + virtual ~ARPAInputException() throw(); +}; + +class ARPAOutputException : public util::ErrnoException { + public: + ARPAOutputException(const char *prefix, const std::string &file_name) throw(); + virtual ~ARPAOutputException() throw(); + + const std::string &File() const throw() { return file_name_; } + + private: + const std::string file_name_; +}; + +// Handling for the counts of n-grams at the beginning of ARPA files. +size_t SizeNeededForCounts(const std::vector &number); + +/* Writes an ARPA file. This has to be seekable so the counts can be written + * at the end. Hence, I just have it own a std::fstream instead of accepting + * a separately held std::ostream. TODO: use the fast one from estimation. + */ +class ARPAOutput : boost::noncopyable { + public: + explicit ARPAOutput(const char *name, size_t buffer_size = 65536); + + void ReserveForCounts(std::streampos reserve); + + void BeginLength(unsigned int length); + + void AddNGram(const StringPiece &line) { + try { + file_ << line << '\n'; + } catch (const std::ios_base::failure &f) { + throw ARPAOutputException("Writing an n-gram", file_name_); + } + ++fast_counter_; + } + + void AddNGram(const StringPiece &ngram, const StringPiece &line) { + AddNGram(line); + } + + template void AddNGram(const Iterator &begin, const Iterator &end, const StringPiece &line) { + AddNGram(line); + } + + void EndLength(unsigned int length); + + void Finish(); + + private: + const std::string file_name_; + boost::scoped_array buffer_; + std::fstream file_; + size_t fast_counter_; + std::vector counts_; +}; + + +template void ReadNGrams(util::FilePiece &in, unsigned int length, uint64_t number, Output &out) { + ReadNGramHeader(in, length); + out.BeginLength(length); + for (uint64_t i = 0; i < number; ++i) { + StringPiece line = in.ReadLine(); + util::TokenIter tabber(line, '\t'); + if (!tabber) throw ARPAInputException("blank line", line); + if (!++tabber) throw ARPAInputException("no tab", line); + + out.AddNGram(*tabber, line); + } + out.EndLength(length); +} + +template void ReadARPA(util::FilePiece &in_lm, Output &out) { + std::vector number; + ReadARPACounts(in_lm, number); + out.ReserveForCounts(SizeNeededForCounts(number)); + for (unsigned int i = 0; i < number.size(); ++i) { + ReadNGrams(in_lm, i + 1, number[i], out); + } + ReadEnd(in_lm); + out.Finish(); +} + +} // namespace lm + +#endif // LM_FILTER_ARPA_IO_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/filter/count_io.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/filter/count_io.hh new file mode 100644 index 0000000000000000000000000000000000000000..de894baf80ce2ebba6b935f6428dfff867e3f547 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/filter/count_io.hh @@ -0,0 +1,89 @@ +#ifndef LM_FILTER_COUNT_IO_H +#define LM_FILTER_COUNT_IO_H + +#include +#include +#include + +#include "util/fake_ofstream.hh" +#include "util/file.hh" +#include "util/file_piece.hh" + +namespace lm { + +class CountOutput : boost::noncopyable { + public: + explicit CountOutput(const char *name) : file_(util::CreateOrThrow(name)) {} + + void AddNGram(const StringPiece &line) { + file_ << line << '\n'; + } + + template void AddNGram(const Iterator &begin, const Iterator &end, const StringPiece &line) { + AddNGram(line); + } + + void AddNGram(const StringPiece &ngram, const StringPiece &line) { + AddNGram(line); + } + + private: + util::FakeOFStream file_; +}; + +class CountBatch { + public: + explicit CountBatch(std::streamsize initial_read) + : initial_read_(initial_read) { + buffer_.reserve(initial_read); + } + + void Read(std::istream &in) { + buffer_.resize(initial_read_); + in.read(&*buffer_.begin(), initial_read_); + buffer_.resize(in.gcount()); + char got; + while (in.get(got) && got != '\n') + buffer_.push_back(got); + } + + template void Send(Output &out) { + for (util::TokenIter line(StringPiece(&*buffer_.begin(), buffer_.size()), '\n'); line; ++line) { + util::TokenIter tabber(*line, '\t'); + if (!tabber) { + std::cerr << "Warning: empty n-gram count line being removed\n"; + continue; + } + util::TokenIter words(*tabber, ' '); + if (!words) { + std::cerr << "Line has a tab but no words.\n"; + continue; + } + out.AddNGram(words, util::TokenIter::end(), *line); + } + } + + private: + std::streamsize initial_read_; + + // This could have been a std::string but that's less happy with raw writes. + std::vector buffer_; +}; + +template void ReadCount(util::FilePiece &in_file, Output &out) { + try { + while (true) { + StringPiece line = in_file.ReadLine(); + util::TokenIter tabber(line, '\t'); + if (!tabber) { + std::cerr << "Warning: empty n-gram count line being removed\n"; + continue; + } + out.AddNGram(*tabber, line); + } + } catch (const util::EndOfFileException &e) {} +} + +} // namespace lm + +#endif // LM_FILTER_COUNT_IO_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/filter/format.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/filter/format.hh new file mode 100644 index 0000000000000000000000000000000000000000..5a2e2db3c65ecff03f6dcb09cb105baea6dedeae --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/filter/format.hh @@ -0,0 +1,250 @@ +#ifndef LM_FILTER_FORMAT_H +#define LM_FILTER_FORMAT_H + +#include "lm/filter/arpa_io.hh" +#include "lm/filter/count_io.hh" + +#include +#include + +#include + +namespace lm { + +template class MultipleOutput { + private: + typedef boost::ptr_vector Singles; + typedef typename Singles::iterator SinglesIterator; + + public: + MultipleOutput(const char *prefix, size_t number) { + files_.reserve(number); + std::string tmp; + for (unsigned int i = 0; i < number; ++i) { + tmp = prefix; + tmp += boost::lexical_cast(i); + files_.push_back(new Single(tmp.c_str())); + } + } + + void AddNGram(const StringPiece &line) { + for (SinglesIterator i = files_.begin(); i != files_.end(); ++i) + i->AddNGram(line); + } + + template void AddNGram(const Iterator &begin, const Iterator &end, const StringPiece &line) { + for (SinglesIterator i = files_.begin(); i != files_.end(); ++i) + i->AddNGram(begin, end, line); + } + + void SingleAddNGram(size_t offset, const StringPiece &line) { + files_[offset].AddNGram(line); + } + + template void SingleAddNGram(size_t offset, const Iterator &begin, const Iterator &end, const StringPiece &line) { + files_[offset].AddNGram(begin, end, line); + } + + protected: + Singles files_; +}; + +class MultipleARPAOutput : public MultipleOutput { + public: + MultipleARPAOutput(const char *prefix, size_t number) : MultipleOutput(prefix, number) {} + + void ReserveForCounts(std::streampos reserve) { + for (boost::ptr_vector::iterator i = files_.begin(); i != files_.end(); ++i) + i->ReserveForCounts(reserve); + } + + void BeginLength(unsigned int length) { + for (boost::ptr_vector::iterator i = files_.begin(); i != files_.end(); ++i) + i->BeginLength(length); + } + + void EndLength(unsigned int length) { + for (boost::ptr_vector::iterator i = files_.begin(); i != files_.end(); ++i) + i->EndLength(length); + } + + void Finish() { + for (boost::ptr_vector::iterator i = files_.begin(); i != files_.end(); ++i) + i->Finish(); + } +}; + +template class DispatchInput { + public: + DispatchInput(Filter &filter, Output &output) : filter_(filter), output_(output) {} + +/* template void AddNGram(const Iterator &begin, const Iterator &end, const StringPiece &line) { + filter_.AddNGram(begin, end, line, output_); + }*/ + + void AddNGram(const StringPiece &ngram, const StringPiece &line) { + filter_.AddNGram(ngram, line, output_); + } + + protected: + Filter &filter_; + Output &output_; +}; + +template class DispatchARPAInput : public DispatchInput { + private: + typedef DispatchInput B; + + public: + DispatchARPAInput(Filter &filter, Output &output) : B(filter, output) {} + + void ReserveForCounts(std::streampos reserve) { B::output_.ReserveForCounts(reserve); } + void BeginLength(unsigned int length) { B::output_.BeginLength(length); } + + void EndLength(unsigned int length) { + B::filter_.Flush(); + B::output_.EndLength(length); + } + void Finish() { B::output_.Finish(); } +}; + +struct ARPAFormat { + typedef ARPAOutput Output; + typedef MultipleARPAOutput Multiple; + static void Copy(util::FilePiece &in, Output &out) { + ReadARPA(in, out); + } + template static void RunFilter(util::FilePiece &in, Filter &filter, Out &output) { + DispatchARPAInput dispatcher(filter, output); + ReadARPA(in, dispatcher); + } +}; + +struct CountFormat { + typedef CountOutput Output; + typedef MultipleOutput Multiple; + static void Copy(util::FilePiece &in, Output &out) { + ReadCount(in, out); + } + template static void RunFilter(util::FilePiece &in, Filter &filter, Out &output) { + DispatchInput dispatcher(filter, output); + ReadCount(in, dispatcher); + } +}; + +/* For multithreading, the buffer classes hold batches of filter inputs and + * outputs in memory. The strings get reused a lot, so keep them around + * instead of clearing each time. + */ +class InputBuffer { + public: + InputBuffer() : actual_(0) {} + + void Reserve(size_t size) { lines_.reserve(size); } + + template void AddNGram(const StringPiece &ngram, const StringPiece &line, Output &output) { + if (lines_.size() == actual_) lines_.resize(lines_.size() + 1); + // TODO avoid this copy. + std::string &copied = lines_[actual_].line; + copied.assign(line.data(), line.size()); + lines_[actual_].ngram.set(copied.data() + (ngram.data() - line.data()), ngram.size()); + ++actual_; + } + + template void CallFilter(Filter &filter, Output &output) const { + for (std::vector::const_iterator i = lines_.begin(); i != lines_.begin() + actual_; ++i) { + filter.AddNGram(i->ngram, i->line, output); + } + } + + void Clear() { actual_ = 0; } + bool Empty() { return actual_ == 0; } + size_t Size() { return actual_; } + + private: + struct Line { + std::string line; + StringPiece ngram; + }; + + size_t actual_; + + std::vector lines_; +}; + +class BinaryOutputBuffer { + public: + BinaryOutputBuffer() {} + + void Reserve(size_t size) { + lines_.reserve(size); + } + + void AddNGram(const StringPiece &line) { + lines_.push_back(line); + } + + template void Flush(Output &output) { + for (std::vector::const_iterator i = lines_.begin(); i != lines_.end(); ++i) { + output.AddNGram(*i); + } + lines_.clear(); + } + + private: + std::vector lines_; +}; + +class MultipleOutputBuffer { + public: + MultipleOutputBuffer() : last_(NULL) {} + + void Reserve(size_t size) { + annotated_.reserve(size); + } + + void AddNGram(const StringPiece &line) { + annotated_.resize(annotated_.size() + 1); + annotated_.back().line = line; + } + + void SingleAddNGram(size_t offset, const StringPiece &line) { + if ((line.data() == last_.data()) && (line.length() == last_.length())) { + annotated_.back().systems.push_back(offset); + } else { + annotated_.resize(annotated_.size() + 1); + annotated_.back().systems.push_back(offset); + annotated_.back().line = line; + last_ = line; + } + } + + template void Flush(Output &output) { + for (std::vector::const_iterator i = annotated_.begin(); i != annotated_.end(); ++i) { + if (i->systems.empty()) { + output.AddNGram(i->line); + } else { + for (std::vector::const_iterator j = i->systems.begin(); j != i->systems.end(); ++j) { + output.SingleAddNGram(*j, i->line); + } + } + } + annotated_.clear(); + } + + private: + struct Annotated { + // If this is empty, send to all systems. + // A filter should never send to all systems and send to a single one. + std::vector systems; + StringPiece line; + }; + + StringPiece last_; + + std::vector annotated_; +}; + +} // namespace lm + +#endif // LM_FILTER_FORMAT_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/filter/phrase.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/filter/phrase.hh new file mode 100644 index 0000000000000000000000000000000000000000..e5898c9ae37ae02ed78a0b15d249fa5c90662bed --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/filter/phrase.hh @@ -0,0 +1,168 @@ +#ifndef LM_FILTER_PHRASE_H +#define LM_FILTER_PHRASE_H + +#include "util/murmur_hash.hh" +#include "util/string_piece.hh" +#include "util/tokenize_piece.hh" + +#include + +#include +#include + +#define LM_FILTER_PHRASE_METHOD(caps, lower) \ +bool Find##caps(Hash key, const std::vector *&out) const {\ + Table::const_iterator i(table_.find(key));\ + if (i==table_.end()) return false; \ + out = &i->second.lower; \ + return true; \ +} + +namespace lm { +namespace phrase { + +typedef uint64_t Hash; + +class Substrings { + private: + /* This is the value in a hash table where the key is a string. It indicates + * four sets of sentences: + * substring is sentences with a phrase containing the key as a substring. + * left is sentencess with a phrase that begins with the key (left aligned). + * right is sentences with a phrase that ends with the key (right aligned). + * phrase is sentences where the key is a phrase. + * Each set is encoded as a vector of sentence ids in increasing order. + */ + struct SentenceRelation { + std::vector substring, left, right, phrase; + }; + /* Most of the CPU is hash table lookups, so let's not complicate it with + * vector equality comparisons. If a collision happens, the SentenceRelation + * structure will contain the union of sentence ids over the colliding strings. + * In that case, the filter will be slightly more permissive. + * The key here is the same as boost's hash of std::vector. + */ + typedef boost::unordered_map Table; + + public: + Substrings() {} + + /* If the string isn't a substring of any phrase, return NULL. Otherwise, + * return a pointer to std::vector listing sentences with + * matching phrases. This set may be empty for Left, Right, or Phrase. + * Example: const std::vector *FindSubstring(Hash key) + */ + LM_FILTER_PHRASE_METHOD(Substring, substring) + LM_FILTER_PHRASE_METHOD(Left, left) + LM_FILTER_PHRASE_METHOD(Right, right) + LM_FILTER_PHRASE_METHOD(Phrase, phrase) + +#pragma GCC diagnostic ignored "-Wuninitialized" // end != finish so there's always an initialization + // sentence_id must be non-decreasing. Iterators are over words in the phrase. + template void AddPhrase(unsigned int sentence_id, const Iterator &begin, const Iterator &end) { + // Iterate over all substrings. + for (Iterator start = begin; start != end; ++start) { + Hash hash = 0; + SentenceRelation *relation; + for (Iterator finish = start; finish != end; ++finish) { + hash = util::MurmurHashNative(&hash, sizeof(uint64_t), *finish); + // Now hash is of [start, finish]. + relation = &table_[hash]; + AppendSentence(relation->substring, sentence_id); + if (start == begin) AppendSentence(relation->left, sentence_id); + } + AppendSentence(relation->right, sentence_id); + if (start == begin) AppendSentence(relation->phrase, sentence_id); + } + } + + private: + void AppendSentence(std::vector &vec, unsigned int sentence_id) { + if (vec.empty() || vec.back() != sentence_id) vec.push_back(sentence_id); + } + + Table table_; +}; + +// Read a file with one sentence per line containing tab-delimited phrases of +// space-separated words. +unsigned int ReadMultiple(std::istream &in, Substrings &out); + +namespace detail { +extern const StringPiece kEndSentence; + +template void MakeHashes(Iterator i, const Iterator &end, std::vector &hashes) { + hashes.clear(); + if (i == end) return; + // TODO: check strict phrase boundaries after and before . For now, just skip tags. + if ((i->data()[0] == '<') && (i->data()[i->size() - 1] == '>')) { + ++i; + } + for (; i != end && (*i != kEndSentence); ++i) { + hashes.push_back(util::MurmurHashNative(i->data(), i->size())); + } +} + +class Vertex; +class Arc; + +class ConditionCommon { + protected: + ConditionCommon(const Substrings &substrings); + ConditionCommon(const ConditionCommon &from); + + ~ConditionCommon(); + + detail::Vertex &MakeGraph(); + + // Temporaries in PassNGram and Evaluate to avoid reallocation. + std::vector hashes_; + + private: + std::vector vertices_; + std::vector arcs_; + + const Substrings &substrings_; +}; + +} // namespace detail + +class Union : public detail::ConditionCommon { + public: + explicit Union(const Substrings &substrings) : detail::ConditionCommon(substrings) {} + + template bool PassNGram(const Iterator &begin, const Iterator &end) { + detail::MakeHashes(begin, end, hashes_); + return hashes_.empty() || Evaluate(); + } + + private: + bool Evaluate(); +}; + +class Multiple : public detail::ConditionCommon { + public: + explicit Multiple(const Substrings &substrings) : detail::ConditionCommon(substrings) {} + + template void AddNGram(const Iterator &begin, const Iterator &end, const StringPiece &line, Output &output) { + detail::MakeHashes(begin, end, hashes_); + if (hashes_.empty()) { + output.AddNGram(line); + } else { + Evaluate(line, output); + } + } + + template void AddNGram(const StringPiece &ngram, const StringPiece &line, Output &output) { + AddNGram(util::TokenIter(ngram, ' '), util::TokenIter::end(), line, output); + } + + void Flush() const {} + + private: + template void Evaluate(const StringPiece &line, Output &output); +}; + +} // namespace phrase +} // namespace lm +#endif // LM_FILTER_PHRASE_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/filter/thread.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/filter/thread.hh new file mode 100644 index 0000000000000000000000000000000000000000..6a6523f90abad656f26d8c46d7c335233bcb1bf6 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/filter/thread.hh @@ -0,0 +1,167 @@ +#ifndef LM_FILTER_THREAD_H +#define LM_FILTER_THREAD_H + +#include "util/thread_pool.hh" + +#include + +#include +#include + +namespace lm { + +template class ThreadBatch { + public: + ThreadBatch() {} + + void Reserve(size_t size) { + input_.Reserve(size); + output_.Reserve(size); + } + + // File reading thread. + InputBuffer &Fill(uint64_t sequence) { + sequence_ = sequence; + // Why wait until now to clear instead of after output? free in the same + // thread as allocated. + input_.Clear(); + return input_; + } + + // Filter worker thread. + template void CallFilter(Filter &filter) { + input_.CallFilter(filter, output_); + } + + uint64_t Sequence() const { return sequence_; } + + // File writing thread. + template void Flush(RealOutput &output) { + output_.Flush(output); + } + + private: + InputBuffer input_; + OutputBuffer output_; + + uint64_t sequence_; +}; + +template class FilterWorker { + public: + typedef Batch *Request; + + FilterWorker(const Filter &filter, util::PCQueue &done) : filter_(filter), done_(done) {} + + void operator()(Request request) { + request->CallFilter(filter_); + done_.Produce(request); + } + + private: + Filter filter_; + + util::PCQueue &done_; +}; + +// There should only be one OutputWorker. +template class OutputWorker { + public: + typedef Batch *Request; + + OutputWorker(Output &output, util::PCQueue &done) : output_(output), done_(done), base_sequence_(0) {} + + void operator()(Request request) { + assert(request->Sequence() >= base_sequence_); + // Assemble the output in order. + uint64_t pos = request->Sequence() - base_sequence_; + if (pos >= ordering_.size()) { + ordering_.resize(pos + 1, NULL); + } + ordering_[pos] = request; + while (!ordering_.empty() && ordering_.front()) { + ordering_.front()->Flush(output_); + done_.Produce(ordering_.front()); + ordering_.pop_front(); + ++base_sequence_; + } + } + + private: + Output &output_; + + util::PCQueue &done_; + + std::deque ordering_; + + uint64_t base_sequence_; +}; + +template class Controller : boost::noncopyable { + private: + typedef ThreadBatch Batch; + + public: + Controller(size_t batch_size, size_t queue, size_t workers, const Filter &filter, RealOutput &output) + : batch_size_(batch_size), queue_size_(queue), + batches_(queue), + to_read_(queue), + output_(queue, 1, boost::in_place(boost::ref(output), boost::ref(to_read_)), NULL), + filter_(queue, workers, boost::in_place(boost::ref(filter), boost::ref(output_.In())), NULL), + sequence_(0) { + for (size_t i = 0; i < queue; ++i) { + batches_[i].Reserve(batch_size); + local_read_.push(&batches_[i]); + } + NewInput(); + } + + void AddNGram(const StringPiece &ngram, const StringPiece &line, RealOutput &output) { + input_->AddNGram(ngram, line, output); + if (input_->Size() == batch_size_) { + FlushInput(); + NewInput(); + } + } + + void Flush() { + FlushInput(); + while (local_read_.size() < queue_size_) { + MoveRead(); + } + NewInput(); + } + + private: + void FlushInput() { + if (input_->Empty()) return; + filter_.Produce(local_read_.top()); + local_read_.pop(); + if (local_read_.empty()) MoveRead(); + } + + void NewInput() { + input_ = &local_read_.top()->Fill(sequence_++); + } + + void MoveRead() { + local_read_.push(to_read_.Consume()); + } + + const size_t batch_size_; + const size_t queue_size_; + + std::vector batches_; + + util::PCQueue to_read_; + std::stack local_read_; + util::ThreadPool > output_; + util::ThreadPool > filter_; + + uint64_t sequence_; + InputBuffer *input_; +}; + +} // namespace lm + +#endif // LM_FILTER_THREAD_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/filter/vocab.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/filter/vocab.hh new file mode 100644 index 0000000000000000000000000000000000000000..2ee6e1f8aafb2cf77664582edacd8cde276912d3 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/filter/vocab.hh @@ -0,0 +1,133 @@ +#ifndef LM_FILTER_VOCAB_H +#define LM_FILTER_VOCAB_H + +// Vocabulary-based filters for language models. + +#include "util/multi_intersection.hh" +#include "util/string_piece.hh" +#include "util/string_piece_hash.hh" +#include "util/tokenize_piece.hh" + +#include +#include +#include +#include + +#include +#include + +namespace lm { +namespace vocab { + +void ReadSingle(std::istream &in, boost::unordered_set &out); + +// Read one sentence vocabulary per line. Return the number of sentences. +unsigned int ReadMultiple(std::istream &in, boost::unordered_map > &out); + +/* Is this a special tag like or ? This actually includes anything + * surrounded with < and >, which most tokenizers separate for real words, so + * this should not catch real words as it looks at a single token. + */ +inline bool IsTag(const StringPiece &value) { + // The parser should never give an empty string. + assert(!value.empty()); + return (value.data()[0] == '<' && value.data()[value.size() - 1] == '>'); +} + +class Single { + public: + typedef boost::unordered_set Words; + + explicit Single(const Words &vocab) : vocab_(vocab) {} + + template bool PassNGram(const Iterator &begin, const Iterator &end) { + for (Iterator i = begin; i != end; ++i) { + if (IsTag(*i)) continue; + if (FindStringPiece(vocab_, *i) == vocab_.end()) return false; + } + return true; + } + + private: + const Words &vocab_; +}; + +class Union { + public: + typedef boost::unordered_map > Words; + + explicit Union(const Words &vocabs) : vocabs_(vocabs) {} + + template bool PassNGram(const Iterator &begin, const Iterator &end) { + sets_.clear(); + + for (Iterator i(begin); i != end; ++i) { + if (IsTag(*i)) continue; + Words::const_iterator found(FindStringPiece(vocabs_, *i)); + if (vocabs_.end() == found) return false; + sets_.push_back(boost::iterator_range(&*found->second.begin(), &*found->second.end())); + } + return (sets_.empty() || util::FirstIntersection(sets_)); + } + + private: + const Words &vocabs_; + + std::vector > sets_; +}; + +class Multiple { + public: + typedef boost::unordered_map > Words; + + Multiple(const Words &vocabs) : vocabs_(vocabs) {} + + private: + // Callback from AllIntersection that does AddNGram. + template class Callback { + public: + Callback(Output &out, const StringPiece &line) : out_(out), line_(line) {} + + void operator()(unsigned int index) { + out_.SingleAddNGram(index, line_); + } + + private: + Output &out_; + const StringPiece &line_; + }; + + public: + template void AddNGram(const Iterator &begin, const Iterator &end, const StringPiece &line, Output &output) { + sets_.clear(); + for (Iterator i(begin); i != end; ++i) { + if (IsTag(*i)) continue; + Words::const_iterator found(FindStringPiece(vocabs_, *i)); + if (vocabs_.end() == found) return; + sets_.push_back(boost::iterator_range(&*found->second.begin(), &*found->second.end())); + } + if (sets_.empty()) { + output.AddNGram(line); + return; + } + + Callback cb(output, line); + util::AllIntersection(sets_, cb); + } + + template void AddNGram(const StringPiece &ngram, const StringPiece &line, Output &output) { + AddNGram(util::TokenIter(ngram, ' '), util::TokenIter::end(), line, output); + } + + void Flush() const {} + + private: + const Words &vocabs_; + + std::vector > sets_; +}; + +} // namespace vocab +} // namespace lm + +#endif // LM_FILTER_VOCAB_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/filter/wrapper.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/filter/wrapper.hh new file mode 100644 index 0000000000000000000000000000000000000000..822c5c27df4030023a0fddce4cbd56a46a62796f --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/filter/wrapper.hh @@ -0,0 +1,56 @@ +#ifndef LM_FILTER_WRAPPER_H +#define LM_FILTER_WRAPPER_H + +#include "util/string_piece.hh" + +#include +#include +#include + +namespace lm { + +// Provide a single-output filter with the same interface as a +// multiple-output filter so clients code against one interface. +template class BinaryFilter { + public: + // Binary modes are just references (and a set) and it makes the API cleaner to copy them. + explicit BinaryFilter(Binary binary) : binary_(binary) {} + + template void AddNGram(const Iterator &begin, const Iterator &end, const StringPiece &line, Output &output) { + if (binary_.PassNGram(begin, end)) + output.AddNGram(line); + } + + template void AddNGram(const StringPiece &ngram, const StringPiece &line, Output &output) { + AddNGram(util::TokenIter(ngram, ' '), util::TokenIter::end(), line, output); + } + + void Flush() const {} + + private: + Binary binary_; +}; + +// Wrap another filter to pay attention only to context words +template class ContextFilter { + public: + typedef FilterT Filter; + + explicit ContextFilter(Filter &backend) : backend_(backend) {} + + template void AddNGram(const StringPiece &ngram, const StringPiece &line, Output &output) { + // Find beginning of string or last space. + const char *last_space; + for (last_space = ngram.data() + ngram.size() - 1; last_space > ngram.data() && *last_space != ' '; --last_space) {} + backend_.AddNGram(StringPiece(ngram.data(), last_space - ngram.data()), line, output); + } + + void Flush() const {} + + private: + Filter backend_; +}; + +} // namespace lm + +#endif // LM_FILTER_WRAPPER_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/interpolate/arpa_to_stream.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/interpolate/arpa_to_stream.hh new file mode 100644 index 0000000000000000000000000000000000000000..4613998d41a80fb77e9d8aa13b228d89dbfe3a21 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/interpolate/arpa_to_stream.hh @@ -0,0 +1,38 @@ +#include "lm/read_arpa.hh" +#include "util/file_piece.hh" + +#include + +#include + +namespace util { namespace stream { class ChainPositions; } } + +namespace lm { + +namespace ngram { +template class GrowableVocab; +class WriteUniqueWords; +} // namespace ngram + +namespace interpolate { + +class ARPAToStream { + public: + // Takes ownership of fd. + explicit ARPAToStream(int fd, ngram::GrowableVocab &vocab); + + std::size_t Order() const { return counts_.size(); } + + const std::vector &Counts() const { return counts_; } + + void Run(const util::stream::ChainPositions &positions); + + private: + util::FilePiece in_; + + std::vector counts_; + + ngram::GrowableVocab &vocab_; +}; + +}} // namespaces diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/left.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/left.hh new file mode 100644 index 0000000000000000000000000000000000000000..36d613697097e52fe143d3da4bf0bfc9ec15c0ea --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/left.hh @@ -0,0 +1,216 @@ +/* Efficient left and right language model state for sentence fragments. + * Intended usage: + * Store ChartState with every chart entry. + * To do a rule application: + * 1. Make a ChartState object for your new entry. + * 2. Construct RuleScore. + * 3. Going from left to right, call Terminal or NonTerminal. + * For terminals, just pass the vocab id. + * For non-terminals, pass that non-terminal's ChartState. + * If your decoder expects scores inclusive of subtree scores (i.e. you + * label entries with the highest-scoring path), pass the non-terminal's + * score as prob. + * If your decoder expects relative scores and will walk the chart later, + * pass prob = 0.0. + * In other words, the only effect of prob is that it gets added to the + * returned log probability. + * 4. Call Finish. It returns the log probability. + * + * There's a couple more details: + * Do not pass to Terminal as it is formally not a word in the sentence, + * only context. Instead, call BeginSentence. If called, it should be the + * first call after RuleScore is constructed (since is always the + * leftmost). + * + * If the leftmost RHS is a non-terminal, it's faster to call BeginNonTerminal. + * + * Hashing and sorting comparison operators are provided. All state objects + * are POD. If you intend to use memcmp on raw state objects, you must call + * ZeroRemaining first, as the value of array entries beyond length is + * otherwise undefined. + * + * Usage is of course not limited to chart decoding. Anything that generates + * sentence fragments missing left context could benefit. For example, a + * phrase-based decoder could pre-score phrases, storing ChartState with each + * phrase, even if hypotheses are generated left-to-right. + */ + +#ifndef LM_LEFT_H +#define LM_LEFT_H + +#include "lm/max_order.hh" +#include "lm/state.hh" +#include "lm/return.hh" + +#include "util/murmur_hash.hh" + +#include + +namespace lm { +namespace ngram { + +template class RuleScore { + public: + explicit RuleScore(const M &model, ChartState &out) : model_(model), out_(&out), left_done_(false), prob_(0.0) { + out.left.length = 0; + out.right.length = 0; + } + + void BeginSentence() { + out_->right = model_.BeginSentenceState(); + // out_->left is empty. + left_done_ = true; + } + + void Terminal(WordIndex word) { + State copy(out_->right); + FullScoreReturn ret(model_.FullScore(copy, word, out_->right)); + if (left_done_) { prob_ += ret.prob; return; } + if (ret.independent_left) { + prob_ += ret.prob; + left_done_ = true; + return; + } + out_->left.pointers[out_->left.length++] = ret.extend_left; + prob_ += ret.rest; + if (out_->right.length != copy.length + 1) + left_done_ = true; + } + + // Faster version of NonTerminal for the case where the rule begins with a non-terminal. + void BeginNonTerminal(const ChartState &in, float prob = 0.0) { + prob_ = prob; + *out_ = in; + left_done_ = in.left.full; + } + + void NonTerminal(const ChartState &in, float prob = 0.0) { + prob_ += prob; + + if (!in.left.length) { + if (in.left.full) { + for (const float *i = out_->right.backoff; i < out_->right.backoff + out_->right.length; ++i) prob_ += *i; + left_done_ = true; + out_->right = in.right; + } + return; + } + + if (!out_->right.length) { + out_->right = in.right; + if (left_done_) { + prob_ += model_.UnRest(in.left.pointers, in.left.pointers + in.left.length, 1); + return; + } + if (out_->left.length) { + left_done_ = true; + } else { + out_->left = in.left; + left_done_ = in.left.full; + } + return; + } + + float backoffs[KENLM_MAX_ORDER - 1], backoffs2[KENLM_MAX_ORDER - 1]; + float *back = backoffs, *back2 = backoffs2; + unsigned char next_use = out_->right.length; + + // First word + if (ExtendLeft(in, next_use, 1, out_->right.backoff, back)) return; + + // Words after the first, so extending a bigram to begin with + for (unsigned char extend_length = 2; extend_length <= in.left.length; ++extend_length) { + if (ExtendLeft(in, next_use, extend_length, back, back2)) return; + std::swap(back, back2); + } + + if (in.left.full) { + for (const float *i = back; i != back + next_use; ++i) prob_ += *i; + left_done_ = true; + out_->right = in.right; + return; + } + + // Right state was minimized, so it's already independent of the new words to the left. + if (in.right.length < in.left.length) { + out_->right = in.right; + return; + } + + // Shift exisiting words down. + for (WordIndex *i = out_->right.words + next_use - 1; i >= out_->right.words; --i) { + *(i + in.right.length) = *i; + } + // Add words from in.right. + std::copy(in.right.words, in.right.words + in.right.length, out_->right.words); + // Assemble backoff composed on the existing state's backoff followed by the new state's backoff. + std::copy(in.right.backoff, in.right.backoff + in.right.length, out_->right.backoff); + std::copy(back, back + next_use, out_->right.backoff + in.right.length); + out_->right.length = in.right.length + next_use; + } + + float Finish() { + // A N-1-gram might extend left and right but we should still set full to true because it's an N-1-gram. + out_->left.full = left_done_ || (out_->left.length == model_.Order() - 1); + return prob_; + } + + void Reset() { + prob_ = 0.0; + left_done_ = false; + out_->left.length = 0; + out_->right.length = 0; + } + void Reset(ChartState &replacement) { + out_ = &replacement; + Reset(); + } + + private: + bool ExtendLeft(const ChartState &in, unsigned char &next_use, unsigned char extend_length, const float *back_in, float *back_out) { + ProcessRet(model_.ExtendLeft( + out_->right.words, out_->right.words + next_use, // Words to extend into + back_in, // Backoffs to use + in.left.pointers[extend_length - 1], extend_length, // Words to be extended + back_out, // Backoffs for the next score + next_use)); // Length of n-gram to use in next scoring. + if (next_use != out_->right.length) { + left_done_ = true; + if (!next_use) { + // Early exit. + out_->right = in.right; + prob_ += model_.UnRest(in.left.pointers + extend_length, in.left.pointers + in.left.length, extend_length + 1); + return true; + } + } + // Continue scoring. + return false; + } + + void ProcessRet(const FullScoreReturn &ret) { + if (left_done_) { + prob_ += ret.prob; + return; + } + if (ret.independent_left) { + prob_ += ret.prob; + left_done_ = true; + return; + } + out_->left.pointers[out_->left.length++] = ret.extend_left; + prob_ += ret.rest; + } + + const M &model_; + + ChartState *out_; + + bool left_done_; + + float prob_; +}; + +} // namespace ngram +} // namespace lm + +#endif // LM_LEFT_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/lm_exception.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/lm_exception.hh new file mode 100644 index 0000000000000000000000000000000000000000..8bb6108120fe002e01caf8f49d346cb9455562ee --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/lm_exception.hh @@ -0,0 +1,50 @@ +#ifndef LM_LM_EXCEPTION_H +#define LM_LM_EXCEPTION_H + +// Named to avoid conflict with util/exception.hh. + +#include "util/exception.hh" +#include "util/string_piece.hh" + +#include +#include + +namespace lm { + +typedef enum {THROW_UP, COMPLAIN, SILENT} WarningAction; + +class ConfigException : public util::Exception { + public: + ConfigException() throw(); + ~ConfigException() throw(); +}; + +class LoadException : public util::Exception { + public: + virtual ~LoadException() throw(); + + protected: + LoadException() throw(); +}; + +class FormatLoadException : public LoadException { + public: + FormatLoadException() throw(); + ~FormatLoadException() throw(); +}; + +class VocabLoadException : public LoadException { + public: + virtual ~VocabLoadException() throw(); + VocabLoadException() throw(); +}; + +class SpecialWordMissingException : public VocabLoadException { + public: + explicit SpecialWordMissingException() throw(); + ~SpecialWordMissingException() throw(); +}; + +} // namespace lm + +#endif // LM_LM_EXCEPTION diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/max_order.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/max_order.hh new file mode 100644 index 0000000000000000000000000000000000000000..5f181f3fc7514cc33312e3a72c8e77ee3d79dfb2 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/max_order.hh @@ -0,0 +1,13 @@ +#ifndef LM_MAX_ORDER_H +#define LM_MAX_ORDER_H +/* IF YOUR BUILD SYSTEM PASSES -DKENLM_MAX_ORDER, THEN CHANGE THE BUILD SYSTEM. + * If not, this is the default maximum order. + * Having this limit means that State can be + * (kMaxOrder - 1) * sizeof(float) bytes instead of + * sizeof(float*) + (kMaxOrder - 1) * sizeof(float) + malloc overhead + */ +#ifndef KENLM_ORDER_MESSAGE +#define KENLM_ORDER_MESSAGE "If your build system supports changing KENLM_MAX_ORDER, change it there and recompile. In the KenLM tarball or Moses, use e.g. `bjam --max-kenlm-order=6 -a'. Otherwise, edit lm/max_order.hh." +#endif + +#endif // LM_MAX_ORDER_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/model.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/model.hh new file mode 100644 index 0000000000000000000000000000000000000000..6925a56d0c270f42e8bad18d79e0ba73f12636da --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/model.hh @@ -0,0 +1,156 @@ +#ifndef LM_MODEL_H +#define LM_MODEL_H + +#include "lm/bhiksha.hh" +#include "lm/binary_format.hh" +#include "lm/config.hh" +#include "lm/facade.hh" +#include "lm/quantize.hh" +#include "lm/search_hashed.hh" +#include "lm/search_trie.hh" +#include "lm/state.hh" +#include "lm/value.hh" +#include "lm/vocab.hh" +#include "lm/weights.hh" + +#include "util/murmur_hash.hh" + +#include +#include + +#include + +namespace util { class FilePiece; } + +namespace lm { +namespace ngram { +namespace detail { + +// Should return the same results as SRI. +// ModelFacade typedefs Vocabulary so we use VocabularyT to avoid naming conflicts. +template class GenericModel : public base::ModelFacade, State, VocabularyT> { + private: + typedef base::ModelFacade, State, VocabularyT> P; + public: + // This is the model type returned by RecognizeBinary. + static const ModelType kModelType; + + static const unsigned int kVersion = Search::kVersion; + + /* Get the size of memory that will be mapped given ngram counts. This + * does not include small non-mapped control structures, such as this class + * itself. + */ + static uint64_t Size(const std::vector &counts, const Config &config = Config()); + + /* Load the model from a file. It may be an ARPA or binary file. Binary + * files must have the format expected by this class or you'll get an + * exception. So TrieModel can only load ARPA or binary created by + * TrieModel. To classify binary files, call RecognizeBinary in + * lm/binary_format.hh. + */ + explicit GenericModel(const char *file, const Config &config = Config()); + + /* Score p(new_word | in_state) and incorporate new_word into out_state. + * Note that in_state and out_state must be different references: + * &in_state != &out_state. + */ + FullScoreReturn FullScore(const State &in_state, const WordIndex new_word, State &out_state) const; + + /* Slower call without in_state. Try to remember state, but sometimes it + * would cost too much memory or your decoder isn't setup properly. + * To use this function, make an array of WordIndex containing the context + * vocabulary ids in reverse order. Then, pass the bounds of the array: + * [context_rbegin, context_rend). The new_word is not part of the context + * array unless you intend to repeat words. + */ + FullScoreReturn FullScoreForgotState(const WordIndex *context_rbegin, const WordIndex *context_rend, const WordIndex new_word, State &out_state) const; + + /* Get the state for a context. Don't use this if you can avoid it. Use + * BeginSentenceState or NullContextState and extend from those. If + * you're only going to use this state to call FullScore once, use + * FullScoreForgotState. + * To use this function, make an array of WordIndex containing the context + * vocabulary ids in reverse order. Then, pass the bounds of the array: + * [context_rbegin, context_rend). + */ + void GetState(const WordIndex *context_rbegin, const WordIndex *context_rend, State &out_state) const; + + /* More efficient version of FullScore where a partial n-gram has already + * been scored. + * NOTE: THE RETURNED .rest AND .prob ARE RELATIVE TO THE .rest RETURNED BEFORE. + */ + FullScoreReturn ExtendLeft( + // Additional context in reverse order. This will update add_rend to + const WordIndex *add_rbegin, const WordIndex *add_rend, + // Backoff weights to use. + const float *backoff_in, + // extend_left returned by a previous query. + uint64_t extend_pointer, + // Length of n-gram that the pointer corresponds to. + unsigned char extend_length, + // Where to write additional backoffs for [extend_length + 1, min(Order() - 1, return.ngram_length)] + float *backoff_out, + // Amount of additional content that should be considered by the next call. + unsigned char &next_use) const; + + /* Return probabilities minus rest costs for an array of pointers. The + * first length should be the length of the n-gram to which pointers_begin + * points. + */ + float UnRest(const uint64_t *pointers_begin, const uint64_t *pointers_end, unsigned char first_length) const { + // Compiler should optimize this if away. + return Search::kDifferentRest ? InternalUnRest(pointers_begin, pointers_end, first_length) : 0.0; + } + + private: + FullScoreReturn ScoreExceptBackoff(const WordIndex *const context_rbegin, const WordIndex *const context_rend, const WordIndex new_word, State &out_state) const; + + // Score bigrams and above. Do not include backoff. + void ResumeScore(const WordIndex *context_rbegin, const WordIndex *const context_rend, unsigned char starting_order_minus_2, typename Search::Node &node, float *backoff_out, unsigned char &next_use, FullScoreReturn &ret) const; + + // Appears after Size in the cc file. + void SetupMemory(void *start, const std::vector &counts, const Config &config); + + void InitializeFromARPA(int fd, const char *file, const Config &config); + + float InternalUnRest(const uint64_t *pointers_begin, const uint64_t *pointers_end, unsigned char first_length) const; + + BinaryFormat backing_; + + VocabularyT vocab_; + + Search search_; +}; + +} // namespace detail + +// Instead of typedef, inherit. This allows the Model etc to be forward declared. +// Oh the joys of C and C++. +#define LM_COMMA() , +#define LM_NAME_MODEL(name, from)\ +class name : public from {\ + public:\ + name(const char *file, const Config &config = Config()) : from(file, config) {}\ +}; + +LM_NAME_MODEL(ProbingModel, detail::GenericModel LM_COMMA() ProbingVocabulary>); +LM_NAME_MODEL(RestProbingModel, detail::GenericModel LM_COMMA() ProbingVocabulary>); +LM_NAME_MODEL(TrieModel, detail::GenericModel LM_COMMA() SortedVocabulary>); +LM_NAME_MODEL(ArrayTrieModel, detail::GenericModel LM_COMMA() SortedVocabulary>); +LM_NAME_MODEL(QuantTrieModel, detail::GenericModel LM_COMMA() SortedVocabulary>); +LM_NAME_MODEL(QuantArrayTrieModel, detail::GenericModel LM_COMMA() SortedVocabulary>); + +// Default implementation. No real reason for it to be the default. +typedef ::lm::ngram::ProbingVocabulary Vocabulary; +typedef ProbingModel Model; + +/* Autorecognize the file type, load, and return the virtual base class. Don't + * use the virtual base class if you can avoid it. Instead, use the above + * classes as template arguments to your own virtual feature function.*/ +base::Model *LoadVirtual(const char *file_name, const Config &config = Config(), ModelType if_arpa = PROBING); + +} // namespace ngram +} // namespace lm + +#endif // LM_MODEL_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/model_type.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/model_type.hh new file mode 100644 index 0000000000000000000000000000000000000000..fbe1117a515b15b4a5fb656c01fed5db3e0ff7f7 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/model_type.hh @@ -0,0 +1,23 @@ +#ifndef LM_MODEL_TYPE_H +#define LM_MODEL_TYPE_H + +namespace lm { +namespace ngram { + +/* Not the best numbering system, but it grew this way for historical reasons + * and I want to preserve existing binary files. */ +typedef enum {PROBING=0, REST_PROBING=1, TRIE=2, QUANT_TRIE=3, ARRAY_TRIE=4, QUANT_ARRAY_TRIE=5} ModelType; + +// Historical names. +const ModelType HASH_PROBING = PROBING; +const ModelType TRIE_SORTED = TRIE; +const ModelType QUANT_TRIE_SORTED = QUANT_TRIE; +const ModelType ARRAY_TRIE_SORTED = ARRAY_TRIE; +const ModelType QUANT_ARRAY_TRIE_SORTED = QUANT_ARRAY_TRIE; + +const static ModelType kQuantAdd = static_cast(QUANT_TRIE - TRIE); +const static ModelType kArrayAdd = static_cast(ARRAY_TRIE - TRIE); + +} // namespace ngram +} // namespace lm +#endif // LM_MODEL_TYPE_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/neural/wordvecs.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/neural/wordvecs.hh new file mode 100644 index 0000000000000000000000000000000000000000..921a2b22cfcc9c174daee807948984230cbdd4b6 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/neural/wordvecs.hh @@ -0,0 +1,38 @@ +#ifndef LM_NEURAL_WORDVECS_H +#define LM_NEURAL_WORDVECS_H + +#include "util/scoped.hh" +#include "lm/vocab.hh" + +#include + +namespace util { class FilePiece; } + +namespace lm { +namespace neural { + +class WordVecs { + public: + // Columns of the matrix are word vectors. The column index is the word. + typedef Eigen::Matrix Storage; + + /* The file should begin with a line stating the number of word vectors and + * the length of the vectors. Then it's followed by lines containing a + * word followed by floating-point values. + */ + explicit WordVecs(util::FilePiece &in); + + const Storage &Vectors() const { return vecs_; } + + WordIndex Index(StringPiece str) const { return vocab_.Index(str); } + + private: + util::scoped_malloc vocab_backing_; + ngram::ProbingVocabulary vocab_; + + Storage vecs_; +}; + +}} // namespaces + +#endif // LM_NEURAL_WORDVECS_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/ngram_query.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/ngram_query.hh new file mode 100644 index 0000000000000000000000000000000000000000..5f330c5cc14f25639f203fc91b05a8c7f0c94f31 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/ngram_query.hh @@ -0,0 +1,110 @@ +#ifndef LM_NGRAM_QUERY_H +#define LM_NGRAM_QUERY_H + +#include "lm/enumerate_vocab.hh" +#include "lm/model.hh" +#include "util/file_piece.hh" +#include "util/usage.hh" + +#include +#include +#include +#include +#include + +#include + +namespace lm { +namespace ngram { + +struct BasicPrint { + void Word(StringPiece, WordIndex, const FullScoreReturn &) const {} + void Line(uint64_t oov, float total) const { + std::cout << "Total: " << total << " OOV: " << oov << '\n'; + } + void Summary(double, double, uint64_t, uint64_t) {} + +}; + +struct FullPrint : public BasicPrint { + void Word(StringPiece surface, WordIndex vocab, const FullScoreReturn &ret) const { + std::cout << surface << '=' << vocab << ' ' << static_cast(ret.ngram_length) << ' ' << ret.prob << '\t'; + } + + void Summary(double ppl_including_oov, double ppl_excluding_oov, uint64_t corpus_oov, uint64_t corpus_tokens) { + std::cout << + "Perplexity including OOVs:\t" << ppl_including_oov << "\n" + "Perplexity excluding OOVs:\t" << ppl_excluding_oov << "\n" + "OOVs:\t" << corpus_oov << "\n" + "Tokens:\t" << corpus_tokens << '\n' + ; + } +}; + +template void Query(const Model &model, bool sentence_context) { + Printer printer; + typename Model::State state, out; + lm::FullScoreReturn ret; + StringPiece word; + + util::FilePiece in(0); + + double corpus_total = 0.0; + double corpus_total_oov_only = 0.0; + uint64_t corpus_oov = 0; + uint64_t corpus_tokens = 0; + + while (true) { + state = sentence_context ? model.BeginSentenceState() : model.NullContextState(); + float total = 0.0; + uint64_t oov = 0; + + while (in.ReadWordSameLine(word)) { + lm::WordIndex vocab = model.GetVocabulary().Index(word); + ret = model.FullScore(state, vocab, out); + if (vocab == model.GetVocabulary().NotFound()) { + ++oov; + corpus_total_oov_only += ret.prob; + } + total += ret.prob; + printer.Word(word, vocab, ret); + ++corpus_tokens; + state = out; + } + // If people don't have a newline after their last query, this won't add a . + // Sue me. + try { + UTIL_THROW_IF('\n' != in.get(), util::Exception, "FilePiece is confused."); + } catch (const util::EndOfFileException &e) { break; } + if (sentence_context) { + ret = model.FullScore(state, model.GetVocabulary().EndSentence(), out); + total += ret.prob; + ++corpus_tokens; + printer.Word("", model.GetVocabulary().EndSentence(), ret); + } + printer.Line(oov, total); + corpus_total += total; + corpus_oov += oov; + } + printer.Summary( + pow(10.0, -(corpus_total / static_cast(corpus_tokens))), // PPL including OOVs + pow(10.0, -((corpus_total - corpus_total_oov_only) / static_cast(corpus_tokens - corpus_oov))), // PPL excluding OOVs + corpus_oov, + corpus_tokens); +} + +template void Query(const char *file, const Config &config, bool sentence_context, bool show_words) { + Model model(file, config); + if (show_words) { + Query(model, sentence_context); + } else { + Query(model, sentence_context); + } +} + +} // namespace ngram +} // namespace lm + +#endif // LM_NGRAM_QUERY_H + + diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/partial.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/partial.hh new file mode 100644 index 0000000000000000000000000000000000000000..d8adc69651062cffe8febff9b04ea992f38ab94d --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/partial.hh @@ -0,0 +1,167 @@ +#ifndef LM_PARTIAL_H +#define LM_PARTIAL_H + +#include "lm/return.hh" +#include "lm/state.hh" + +#include + +#include + +namespace lm { +namespace ngram { + +struct ExtendReturn { + float adjust; + bool make_full; + unsigned char next_use; +}; + +template ExtendReturn ExtendLoop( + const Model &model, + unsigned char seen, const WordIndex *add_rbegin, const WordIndex *add_rend, const float *backoff_start, + const uint64_t *pointers, const uint64_t *pointers_end, + uint64_t *&pointers_write, + float *backoff_write) { + unsigned char add_length = add_rend - add_rbegin; + + float backoff_buf[2][KENLM_MAX_ORDER - 1]; + float *backoff_in = backoff_buf[0], *backoff_out = backoff_buf[1]; + std::copy(backoff_start, backoff_start + add_length, backoff_in); + + ExtendReturn value; + value.make_full = false; + value.adjust = 0.0; + value.next_use = add_length; + + unsigned char i = 0; + unsigned char length = pointers_end - pointers; + // pointers_write is NULL means that the existing left state is full, so we should use completed probabilities. + if (pointers_write) { + // Using full context, writing to new left state. + for (; i < length; ++i) { + FullScoreReturn ret(model.ExtendLeft( + add_rbegin, add_rbegin + value.next_use, + backoff_in, + pointers[i], i + seen + 1, + backoff_out, + value.next_use)); + std::swap(backoff_in, backoff_out); + if (ret.independent_left) { + value.adjust += ret.prob; + value.make_full = true; + ++i; + break; + } + value.adjust += ret.rest; + *pointers_write++ = ret.extend_left; + if (value.next_use != add_length) { + value.make_full = true; + ++i; + break; + } + } + } + // Using some of the new context. + for (; i < length && value.next_use; ++i) { + FullScoreReturn ret(model.ExtendLeft( + add_rbegin, add_rbegin + value.next_use, + backoff_in, + pointers[i], i + seen + 1, + backoff_out, + value.next_use)); + std::swap(backoff_in, backoff_out); + value.adjust += ret.prob; + } + float unrest = model.UnRest(pointers + i, pointers_end, i + seen + 1); + // Using none of the new context. + value.adjust += unrest; + + std::copy(backoff_in, backoff_in + value.next_use, backoff_write); + return value; +} + +template float RevealBefore(const Model &model, const Right &reveal, const unsigned char seen, bool reveal_full, Left &left, Right &right) { + assert(seen < reveal.length || reveal_full); + uint64_t *pointers_write = reveal_full ? NULL : left.pointers; + float backoff_buffer[KENLM_MAX_ORDER - 1]; + ExtendReturn value(ExtendLoop( + model, + seen, reveal.words + seen, reveal.words + reveal.length, reveal.backoff + seen, + left.pointers, left.pointers + left.length, + pointers_write, + left.full ? backoff_buffer : (right.backoff + right.length))); + if (reveal_full) { + left.length = 0; + value.make_full = true; + } else { + left.length = pointers_write - left.pointers; + value.make_full |= (left.length == model.Order() - 1); + } + if (left.full) { + for (unsigned char i = 0; i < value.next_use; ++i) value.adjust += backoff_buffer[i]; + } else { + // If left wasn't full when it came in, put words into right state. + std::copy(reveal.words + seen, reveal.words + seen + value.next_use, right.words + right.length); + right.length += value.next_use; + left.full = value.make_full || (right.length == model.Order() - 1); + } + return value.adjust; +} + +template float RevealAfter(const Model &model, Left &left, Right &right, const Left &reveal, unsigned char seen) { + assert(seen < reveal.length || reveal.full); + uint64_t *pointers_write = left.full ? NULL : (left.pointers + left.length); + ExtendReturn value(ExtendLoop( + model, + seen, right.words, right.words + right.length, right.backoff, + reveal.pointers + seen, reveal.pointers + reveal.length, + pointers_write, + right.backoff)); + if (reveal.full) { + for (unsigned char i = 0; i < value.next_use; ++i) value.adjust += right.backoff[i]; + right.length = 0; + value.make_full = true; + } else { + right.length = value.next_use; + value.make_full |= (right.length == model.Order() - 1); + } + if (!left.full) { + left.length = pointers_write - left.pointers; + left.full = value.make_full || (left.length == model.Order() - 1); + } + return value.adjust; +} + +template float Subsume(const Model &model, Left &first_left, const Right &first_right, const Left &second_left, Right &second_right, const unsigned int between_length) { + assert(first_right.length < KENLM_MAX_ORDER); + assert(second_left.length < KENLM_MAX_ORDER); + assert(between_length < KENLM_MAX_ORDER - 1); + uint64_t *pointers_write = first_left.full ? NULL : (first_left.pointers + first_left.length); + float backoff_buffer[KENLM_MAX_ORDER - 1]; + ExtendReturn value(ExtendLoop( + model, + between_length, first_right.words, first_right.words + first_right.length, first_right.backoff, + second_left.pointers, second_left.pointers + second_left.length, + pointers_write, + second_left.full ? backoff_buffer : (second_right.backoff + second_right.length))); + if (second_left.full) { + for (unsigned char i = 0; i < value.next_use; ++i) value.adjust += backoff_buffer[i]; + } else { + std::copy(first_right.words, first_right.words + value.next_use, second_right.words + second_right.length); + second_right.length += value.next_use; + value.make_full |= (second_right.length == model.Order() - 1); + } + if (!first_left.full) { + first_left.length = pointers_write - first_left.pointers; + first_left.full = value.make_full || second_left.full || (first_left.length == model.Order() - 1); + } + assert(first_left.length < KENLM_MAX_ORDER); + assert(second_right.length < KENLM_MAX_ORDER); + return value.adjust; +} + +} // namespace ngram +} // namespace lm + +#endif // LM_PARTIAL_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/quantize.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/quantize.hh new file mode 100644 index 0000000000000000000000000000000000000000..84a30872e5a8290a173dbbd6032952e8c466ad4b --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/quantize.hh @@ -0,0 +1,233 @@ +#ifndef LM_QUANTIZE_H +#define LM_QUANTIZE_H + +#include "lm/blank.hh" +#include "lm/config.hh" +#include "lm/max_order.hh" +#include "lm/model_type.hh" +#include "util/bit_packing.hh" + +#include +#include + +#include + +#include + +namespace lm { +namespace ngram { + +struct Config; +class BinaryFormat; + +/* Store values directly and don't quantize. */ +class DontQuantize { + public: + static const ModelType kModelTypeAdd = static_cast(0); + static void UpdateConfigFromBinary(const BinaryFormat &, uint64_t, Config &) {} + static uint64_t Size(uint8_t /*order*/, const Config &/*config*/) { return 0; } + static uint8_t MiddleBits(const Config &/*config*/) { return 63; } + static uint8_t LongestBits(const Config &/*config*/) { return 31; } + + class MiddlePointer { + public: + MiddlePointer(const DontQuantize & /*quant*/, unsigned char /*order_minus_2*/, util::BitAddress address) : address_(address) {} + + MiddlePointer() : address_(NULL, 0) {} + + bool Found() const { + return address_.base != NULL; + } + + float Prob() const { + return util::ReadNonPositiveFloat31(address_.base, address_.offset); + } + + float Backoff() const { + return util::ReadFloat32(address_.base, address_.offset + 31); + } + + float Rest() const { return Prob(); } + + void Write(float prob, float backoff) { + util::WriteNonPositiveFloat31(address_.base, address_.offset, prob); + util::WriteFloat32(address_.base, address_.offset + 31, backoff); + } + + private: + util::BitAddress address_; + }; + + class LongestPointer { + public: + explicit LongestPointer(const DontQuantize &/*quant*/, util::BitAddress address) : address_(address) {} + + LongestPointer() : address_(NULL, 0) {} + + bool Found() const { + return address_.base != NULL; + } + + float Prob() const { + return util::ReadNonPositiveFloat31(address_.base, address_.offset); + } + + void Write(float prob) { + util::WriteNonPositiveFloat31(address_.base, address_.offset, prob); + } + + private: + util::BitAddress address_; + }; + + DontQuantize() {} + + void SetupMemory(void * /*start*/, unsigned char /*order*/, const Config & /*config*/) {} + + static const bool kTrain = false; + // These should never be called because kTrain is false. + void Train(uint8_t /*order*/, std::vector &/*prob*/, std::vector &/*backoff*/) {} + void TrainProb(uint8_t, std::vector &/*prob*/) {} + + void FinishedLoading(const Config &) {} +}; + +class SeparatelyQuantize { + private: + class Bins { + public: + // Sigh C++ default constructor + Bins() {} + + Bins(uint8_t bits, float *begin) : begin_(begin), end_(begin_ + (1ULL << bits)), bits_(bits), mask_((1ULL << bits) - 1) {} + + float *Populate() { return begin_; } + + uint64_t EncodeProb(float value) const { + return Encode(value, 0); + } + + uint64_t EncodeBackoff(float value) const { + if (value == 0.0) { + return HasExtension(value) ? kExtensionQuant : kNoExtensionQuant; + } + return Encode(value, 2); + } + + float Decode(std::size_t off) const { return begin_[off]; } + + uint8_t Bits() const { return bits_; } + + uint64_t Mask() const { return mask_; } + + private: + uint64_t Encode(float value, size_t reserved) const { + const float *above = std::lower_bound(static_cast(begin_) + reserved, end_, value); + if (above == begin_ + reserved) return reserved; + if (above == end_) return end_ - begin_ - 1; + return above - begin_ - (value - *(above - 1) < *above - value); + } + + float *begin_; + const float *end_; + uint8_t bits_; + uint64_t mask_; + }; + + public: + static const ModelType kModelTypeAdd = kQuantAdd; + + static void UpdateConfigFromBinary(const BinaryFormat &file, uint64_t offset, Config &config); + + static uint64_t Size(uint8_t order, const Config &config) { + uint64_t longest_table = (static_cast(1) << static_cast(config.prob_bits)) * sizeof(float); + uint64_t middle_table = (static_cast(1) << static_cast(config.backoff_bits)) * sizeof(float) + longest_table; + // unigrams are currently not quantized so no need for a table. + return (order - 2) * middle_table + longest_table + /* for the bit counts and alignment padding) */ 8; + } + + static uint8_t MiddleBits(const Config &config) { return config.prob_bits + config.backoff_bits; } + static uint8_t LongestBits(const Config &config) { return config.prob_bits; } + + class MiddlePointer { + public: + MiddlePointer(const SeparatelyQuantize &quant, unsigned char order_minus_2, const util::BitAddress &address) : bins_(quant.GetTables(order_minus_2)), address_(address) {} + + MiddlePointer() : address_(NULL, 0) {} + + bool Found() const { return address_.base != NULL; } + + float Prob() const { + return ProbBins().Decode(util::ReadInt25(address_.base, address_.offset + BackoffBins().Bits(), ProbBins().Bits(), ProbBins().Mask())); + } + + float Backoff() const { + return BackoffBins().Decode(util::ReadInt25(address_.base, address_.offset, BackoffBins().Bits(), BackoffBins().Mask())); + } + + float Rest() const { return Prob(); } + + void Write(float prob, float backoff) const { + util::WriteInt57(address_.base, address_.offset, ProbBins().Bits() + BackoffBins().Bits(), + (ProbBins().EncodeProb(prob) << BackoffBins().Bits()) | BackoffBins().EncodeBackoff(backoff)); + } + + private: + const Bins &ProbBins() const { return bins_[0]; } + const Bins &BackoffBins() const { return bins_[1]; } + const Bins *bins_; + + util::BitAddress address_; + }; + + class LongestPointer { + public: + LongestPointer(const SeparatelyQuantize &quant, const util::BitAddress &address) : table_(&quant.LongestTable()), address_(address) {} + + LongestPointer() : address_(NULL, 0) {} + + bool Found() const { return address_.base != NULL; } + + void Write(float prob) const { + util::WriteInt25(address_.base, address_.offset, table_->Bits(), table_->EncodeProb(prob)); + } + + float Prob() const { + return table_->Decode(util::ReadInt25(address_.base, address_.offset, table_->Bits(), table_->Mask())); + } + + private: + const Bins *table_; + util::BitAddress address_; + }; + + SeparatelyQuantize() {} + + void SetupMemory(void *start, unsigned char order, const Config &config); + + static const bool kTrain = true; + // Assumes 0.0 is removed from backoff. + void Train(uint8_t order, std::vector &prob, std::vector &backoff); + // Train just probabilities (for longest order). + void TrainProb(uint8_t order, std::vector &prob); + + void FinishedLoading(const Config &config); + + const Bins *GetTables(unsigned char order_minus_2) const { return tables_[order_minus_2]; } + + const Bins &LongestTable() const { return longest_; } + + private: + Bins tables_[KENLM_MAX_ORDER - 1][2]; + + Bins longest_; + + uint8_t *actual_base_; + + uint8_t prob_bits_, backoff_bits_; +}; + +} // namespace ngram +} // namespace lm + +#endif // LM_QUANTIZE_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/read_arpa.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/read_arpa.hh new file mode 100644 index 0000000000000000000000000000000000000000..64eeef306d3f9f82163e3f8d954dc5976beee7eb --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/read_arpa.hh @@ -0,0 +1,95 @@ +#ifndef LM_READ_ARPA_H +#define LM_READ_ARPA_H + +#include "lm/lm_exception.hh" +#include "lm/word_index.hh" +#include "lm/weights.hh" +#include "util/file_piece.hh" + +#include +#include +#include + +namespace lm { + +void ReadARPACounts(util::FilePiece &in, std::vector &number); +void ReadNGramHeader(util::FilePiece &in, unsigned int length); + +void ReadBackoff(util::FilePiece &in, Prob &weights); +void ReadBackoff(util::FilePiece &in, float &backoff); +inline void ReadBackoff(util::FilePiece &in, ProbBackoff &weights) { + ReadBackoff(in, weights.backoff); +} +inline void ReadBackoff(util::FilePiece &in, RestWeights &weights) { + ReadBackoff(in, weights.backoff); +} + +void ReadEnd(util::FilePiece &in); + +extern const bool kARPASpaces[256]; + +// Positive log probability warning. +class PositiveProbWarn { + public: + PositiveProbWarn() : action_(THROW_UP) {} + + explicit PositiveProbWarn(WarningAction action) : action_(action) {} + + void Warn(float prob); + + private: + WarningAction action_; +}; + +template void Read1Gram(util::FilePiece &f, Voc &vocab, Weights *unigrams, PositiveProbWarn &warn) { + try { + float prob = f.ReadFloat(); + if (prob > 0.0) { + warn.Warn(prob); + prob = 0.0; + } + UTIL_THROW_IF(f.get() != '\t', FormatLoadException, "Expected tab after probability"); + WordIndex word = vocab.Insert(f.ReadDelimited(kARPASpaces)); + Weights &w = unigrams[word]; + w.prob = prob; + ReadBackoff(f, w); + } catch(util::Exception &e) { + e << " in the 1-gram at byte " << f.Offset(); + throw; + } +} + +template void Read1Grams(util::FilePiece &f, std::size_t count, Voc &vocab, Weights *unigrams, PositiveProbWarn &warn) { + ReadNGramHeader(f, 1); + for (std::size_t i = 0; i < count; ++i) { + Read1Gram(f, vocab, unigrams, warn); + } + vocab.FinishedLoading(unigrams); +} + +// Read ngram, write vocab ids to indices_out. +template void ReadNGram(util::FilePiece &f, const unsigned char n, const Voc &vocab, Iterator indices_out, Weights &weights, PositiveProbWarn &warn) { + try { + weights.prob = f.ReadFloat(); + if (weights.prob > 0.0) { + warn.Warn(weights.prob); + weights.prob = 0.0; + } + for (unsigned char i = 0; i < n; ++i, ++indices_out) { + StringPiece word(f.ReadDelimited(kARPASpaces)); + WordIndex index = vocab.Index(word); + *indices_out = index; + // Check for words mapped to that are not the string . + UTIL_THROW_IF(index == 0 /* mapped to */ && (word != StringPiece("", 5)) && (word != StringPiece("", 5)), + FormatLoadException, "Word " << word << " was not seen in the unigrams (which are supposed to list the entire vocabulary) but appears"); + } + ReadBackoff(f, weights); + } catch(util::Exception &e) { + e << " in the " << static_cast(n) << "-gram at byte " << f.Offset(); + throw; + } +} + +} // namespace lm + +#endif // LM_READ_ARPA_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/return.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/return.hh new file mode 100644 index 0000000000000000000000000000000000000000..982ffd66aef8d0c5d07092edf38b63dfc02a5a84 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/return.hh @@ -0,0 +1,42 @@ +#ifndef LM_RETURN_H +#define LM_RETURN_H + +#include + +namespace lm { +/* Structure returned by scoring routines. */ +struct FullScoreReturn { + // log10 probability + float prob; + + /* The length of n-gram matched. Do not use this for recombination. + * Consider a model containing only the following n-grams: + * -1 foo + * -3.14 bar + * -2.718 baz -5 + * -6 foo bar + * + * If you score ``bar'' then ngram_length is 1 and recombination state is the + * empty string because bar has zero backoff and does not extend to the + * right. + * If you score ``foo'' then ngram_length is 1 and recombination state is + * ``foo''. + * + * Ideally, keep output states around and compare them. Failing that, + * get out_state.ValidLength() and use that length for recombination. + */ + unsigned char ngram_length; + + /* Left extension information. If independent_left is set, then prob is + * independent of words to the left (up to additional backoff). Otherwise, + * extend_left indicates how to efficiently extend further to the left. + */ + bool independent_left; + uint64_t extend_left; // Defined only if independent_left + + // Rest cost for extension to the left. + float rest; +}; + +} // namespace lm +#endif // LM_RETURN_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/search_hashed.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/search_hashed.hh new file mode 100644 index 0000000000000000000000000000000000000000..9dc84454c9bd6c7e875594e6cd92ce38132cddcf --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/search_hashed.hh @@ -0,0 +1,192 @@ +#ifndef LM_SEARCH_HASHED_H +#define LM_SEARCH_HASHED_H + +#include "lm/model_type.hh" +#include "lm/config.hh" +#include "lm/read_arpa.hh" +#include "lm/return.hh" +#include "lm/weights.hh" + +#include "util/bit_packing.hh" +#include "util/probing_hash_table.hh" + +#include +#include +#include + +namespace util { class FilePiece; } + +namespace lm { +namespace ngram { +class BinaryFormat; +class ProbingVocabulary; +namespace detail { + +inline uint64_t CombineWordHash(uint64_t current, const WordIndex next) { + uint64_t ret = (current * 8978948897894561157ULL) ^ (static_cast(1 + next) * 17894857484156487943ULL); + return ret; +} + +#pragma pack(push) +#pragma pack(4) +struct ProbEntry { + uint64_t key; + Prob value; + typedef uint64_t Key; + typedef Prob Value; + uint64_t GetKey() const { + return key; + } +}; + +#pragma pack(pop) + +class LongestPointer { + public: + explicit LongestPointer(const float &to) : to_(&to) {} + + LongestPointer() : to_(NULL) {} + + bool Found() const { + return to_ != NULL; + } + + float Prob() const { + return *to_; + } + + private: + const float *to_; +}; + +template class HashedSearch { + public: + typedef uint64_t Node; + + typedef typename Value::ProbingProxy UnigramPointer; + typedef typename Value::ProbingProxy MiddlePointer; + typedef ::lm::ngram::detail::LongestPointer LongestPointer; + + static const ModelType kModelType = Value::kProbingModelType; + static const bool kDifferentRest = Value::kDifferentRest; + static const unsigned int kVersion = 0; + + // TODO: move probing_multiplier here with next binary file format update. + static void UpdateConfigFromBinary(const BinaryFormat &, const std::vector &, uint64_t, Config &) {} + + static uint64_t Size(const std::vector &counts, const Config &config) { + uint64_t ret = Unigram::Size(counts[0]); + for (unsigned char n = 1; n < counts.size() - 1; ++n) { + ret += Middle::Size(counts[n], config.probing_multiplier); + } + return ret + Longest::Size(counts.back(), config.probing_multiplier); + } + + uint8_t *SetupMemory(uint8_t *start, const std::vector &counts, const Config &config); + + void InitializeFromARPA(const char *file, util::FilePiece &f, const std::vector &counts, const Config &config, ProbingVocabulary &vocab, BinaryFormat &backing); + + unsigned char Order() const { + return middle_.size() + 2; + } + + typename Value::Weights &UnknownUnigram() { return unigram_.Unknown(); } + + UnigramPointer LookupUnigram(WordIndex word, Node &next, bool &independent_left, uint64_t &extend_left) const { + extend_left = static_cast(word); + next = extend_left; + UnigramPointer ret(unigram_.Lookup(word)); + independent_left = ret.IndependentLeft(); + return ret; + } + + MiddlePointer Unpack(uint64_t extend_pointer, unsigned char extend_length, Node &node) const { + node = extend_pointer; + return MiddlePointer(middle_[extend_length - 2].MustFind(extend_pointer)->value); + } + + MiddlePointer LookupMiddle(unsigned char order_minus_2, WordIndex word, Node &node, bool &independent_left, uint64_t &extend_pointer) const { + node = CombineWordHash(node, word); + typename Middle::ConstIterator found; + if (!middle_[order_minus_2].Find(node, found)) { + independent_left = true; + return MiddlePointer(); + } + extend_pointer = node; + MiddlePointer ret(found->value); + independent_left = ret.IndependentLeft(); + return ret; + } + + LongestPointer LookupLongest(WordIndex word, const Node &node) const { + // Sign bit is always on because longest n-grams do not extend left. + typename Longest::ConstIterator found; + if (!longest_.Find(CombineWordHash(node, word), found)) return LongestPointer(); + return LongestPointer(found->value.prob); + } + + // Generate a node without necessarily checking that it actually exists. + // Optionally return false if it's know to not exist. + bool FastMakeNode(const WordIndex *begin, const WordIndex *end, Node &node) const { + assert(begin != end); + node = static_cast(*begin); + for (const WordIndex *i = begin + 1; i < end; ++i) { + node = CombineWordHash(node, *i); + } + return true; + } + + private: + // Interpret config's rest cost build policy and pass the right template argument to ApplyBuild. + void DispatchBuild(util::FilePiece &f, const std::vector &counts, const Config &config, const ProbingVocabulary &vocab, PositiveProbWarn &warn); + + template void ApplyBuild(util::FilePiece &f, const std::vector &counts, const ProbingVocabulary &vocab, PositiveProbWarn &warn, const Build &build); + + class Unigram { + public: + Unigram() {} + + Unigram(void *start, uint64_t count) : + unigram_(static_cast(start)) +#ifdef DEBUG + , count_(count) +#endif + {} + + static uint64_t Size(uint64_t count) { + return (count + 1) * sizeof(typename Value::Weights); // +1 for hallucinate + } + + const typename Value::Weights &Lookup(WordIndex index) const { +#ifdef DEBUG + assert(index < count_); +#endif + return unigram_[index]; + } + + typename Value::Weights &Unknown() { return unigram_[0]; } + + // For building. + typename Value::Weights *Raw() { return unigram_; } + + private: + typename Value::Weights *unigram_; +#ifdef DEBUG + uint64_t count_; +#endif + }; + + Unigram unigram_; + + typedef util::ProbingHashTable Middle; + std::vector middle_; + + typedef util::ProbingHashTable Longest; + Longest longest_; +}; + +} // namespace detail +} // namespace ngram +} // namespace lm + +#endif // LM_SEARCH_HASHED_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/search_trie.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/search_trie.hh new file mode 100644 index 0000000000000000000000000000000000000000..d8838d2bafdaf3edc13d260c70e34c2e196de6c8 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/search_trie.hh @@ -0,0 +1,130 @@ +#ifndef LM_SEARCH_TRIE_H +#define LM_SEARCH_TRIE_H + +#include "lm/config.hh" +#include "lm/model_type.hh" +#include "lm/return.hh" +#include "lm/trie.hh" +#include "lm/weights.hh" + +#include "util/file.hh" +#include "util/file_piece.hh" + +#include +#include + +#include + +namespace lm { +namespace ngram { +class BinaryFormat; +class SortedVocabulary; +namespace trie { + +template class TrieSearch; +class SortedFiles; +template void BuildTrie(SortedFiles &files, std::vector &counts, const Config &config, TrieSearch &out, Quant &quant, SortedVocabulary &vocab, BinaryFormat &backing); + +template class TrieSearch { + public: + typedef NodeRange Node; + + typedef ::lm::ngram::trie::UnigramPointer UnigramPointer; + typedef typename Quant::MiddlePointer MiddlePointer; + typedef typename Quant::LongestPointer LongestPointer; + + static const bool kDifferentRest = false; + + static const ModelType kModelType = static_cast(TRIE_SORTED + Quant::kModelTypeAdd + Bhiksha::kModelTypeAdd); + + static const unsigned int kVersion = 1; + + static void UpdateConfigFromBinary(const BinaryFormat &file, const std::vector &counts, uint64_t offset, Config &config) { + Quant::UpdateConfigFromBinary(file, offset, config); + // Currently the unigram pointers are not compresssed, so there will only be a header for order > 2. + if (counts.size() > 2) + Bhiksha::UpdateConfigFromBinary(file, offset + Quant::Size(counts.size(), config) + Unigram::Size(counts[0]), config); + } + + static uint64_t Size(const std::vector &counts, const Config &config) { + uint64_t ret = Quant::Size(counts.size(), config) + Unigram::Size(counts[0]); + for (unsigned char i = 1; i < counts.size() - 1; ++i) { + ret += Middle::Size(Quant::MiddleBits(config), counts[i], counts[0], counts[i+1], config); + } + return ret + Longest::Size(Quant::LongestBits(config), counts.back(), counts[0]); + } + + TrieSearch() : middle_begin_(NULL), middle_end_(NULL) {} + + ~TrieSearch() { FreeMiddles(); } + + uint8_t *SetupMemory(uint8_t *start, const std::vector &counts, const Config &config); + + void InitializeFromARPA(const char *file, util::FilePiece &f, std::vector &counts, const Config &config, SortedVocabulary &vocab, BinaryFormat &backing); + + unsigned char Order() const { + return middle_end_ - middle_begin_ + 2; + } + + ProbBackoff &UnknownUnigram() { return unigram_.Unknown(); } + + UnigramPointer LookupUnigram(WordIndex word, Node &next, bool &independent_left, uint64_t &extend_left) const { + extend_left = static_cast(word); + UnigramPointer ret(unigram_.Find(word, next)); + independent_left = (next.begin == next.end); + return ret; + } + + MiddlePointer Unpack(uint64_t extend_pointer, unsigned char extend_length, Node &node) const { + return MiddlePointer(quant_, extend_length - 2, middle_begin_[extend_length - 2].ReadEntry(extend_pointer, node)); + } + + MiddlePointer LookupMiddle(unsigned char order_minus_2, WordIndex word, Node &node, bool &independent_left, uint64_t &extend_left) const { + util::BitAddress address(middle_begin_[order_minus_2].Find(word, node, extend_left)); + independent_left = (address.base == NULL) || (node.begin == node.end); + return MiddlePointer(quant_, order_minus_2, address); + } + + LongestPointer LookupLongest(WordIndex word, const Node &node) const { + return LongestPointer(quant_, longest_.Find(word, node)); + } + + bool FastMakeNode(const WordIndex *begin, const WordIndex *end, Node &node) const { + assert(begin != end); + bool independent_left; + uint64_t ignored; + LookupUnigram(*begin, node, independent_left, ignored); + for (const WordIndex *i = begin + 1; i < end; ++i) { + if (independent_left || !LookupMiddle(i - begin - 1, *i, node, independent_left, ignored).Found()) return false; + } + return true; + } + + private: + friend void BuildTrie(SortedFiles &files, std::vector &counts, const Config &config, TrieSearch &out, Quant &quant, SortedVocabulary &vocab, BinaryFormat &backing); + + // Middles are managed manually so we can delay construction and they don't have to be copyable. + void FreeMiddles() { + for (const Middle *i = middle_begin_; i != middle_end_; ++i) { + i->~Middle(); + } + std::free(middle_begin_); + } + + typedef trie::BitPackedMiddle Middle; + + typedef trie::BitPackedLongest Longest; + Longest longest_; + + Middle *middle_begin_, *middle_end_; + Quant quant_; + + typedef ::lm::ngram::trie::Unigram Unigram; + Unigram unigram_; +}; + +} // namespace trie +} // namespace ngram +} // namespace lm + +#endif // LM_SEARCH_TRIE_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/sizes.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/sizes.hh new file mode 100644 index 0000000000000000000000000000000000000000..eb7e99de9fd2188e096aa0ff0cb9acccb981139b --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/sizes.hh @@ -0,0 +1,17 @@ +#ifndef LM_SIZES_H +#define LM_SIZES_H + +#include + +#include + +namespace lm { namespace ngram { + +struct Config; + +void ShowSizes(const std::vector &counts, const lm::ngram::Config &config); +void ShowSizes(const std::vector &counts); +void ShowSizes(const char *file, const lm::ngram::Config &config); + +}} // namespaces +#endif // LM_SIZES_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/state.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/state.hh new file mode 100644 index 0000000000000000000000000000000000000000..f6c51d6f1baa58319f111962ed3a989c76b59d49 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/state.hh @@ -0,0 +1,125 @@ +#ifndef LM_STATE_H +#define LM_STATE_H + +#include "lm/max_order.hh" +#include "lm/word_index.hh" +#include "util/murmur_hash.hh" + +#include + +namespace lm { +namespace ngram { + +// This is a POD but if you want memcmp to return the same as operator==, call +// ZeroRemaining first. +class State { + public: + bool operator==(const State &other) const { + if (length != other.length) return false; + return !memcmp(words, other.words, length * sizeof(WordIndex)); + } + + // Three way comparison function. + int Compare(const State &other) const { + if (length != other.length) return length < other.length ? -1 : 1; + return memcmp(words, other.words, length * sizeof(WordIndex)); + } + + bool operator<(const State &other) const { + if (length != other.length) return length < other.length; + return memcmp(words, other.words, length * sizeof(WordIndex)) < 0; + } + + // Call this before using raw memcmp. + void ZeroRemaining() { + for (unsigned char i = length; i < KENLM_MAX_ORDER - 1; ++i) { + words[i] = 0; + backoff[i] = 0.0; + } + } + + unsigned char Length() const { return length; } + + // You shouldn't need to touch anything below this line, but the members are public so FullState will qualify as a POD. + // This order minimizes total size of the struct if WordIndex is 64 bit, float is 32 bit, and alignment of 64 bit integers is 64 bit. + WordIndex words[KENLM_MAX_ORDER - 1]; + float backoff[KENLM_MAX_ORDER - 1]; + unsigned char length; +}; + +typedef State Right; + +inline uint64_t hash_value(const State &state, uint64_t seed = 0) { + return util::MurmurHashNative(state.words, sizeof(WordIndex) * state.length, seed); +} + +struct Left { + bool operator==(const Left &other) const { + return + length == other.length && + (!length || (pointers[length - 1] == other.pointers[length - 1] && full == other.full)); + } + + int Compare(const Left &other) const { + if (length < other.length) return -1; + if (length > other.length) return 1; + if (length == 0) return 0; // Must be full. + if (pointers[length - 1] > other.pointers[length - 1]) return 1; + if (pointers[length - 1] < other.pointers[length - 1]) return -1; + return (int)full - (int)other.full; + } + + bool operator<(const Left &other) const { + return Compare(other) == -1; + } + + void ZeroRemaining() { + for (uint64_t * i = pointers + length; i < pointers + KENLM_MAX_ORDER - 1; ++i) + *i = 0; + } + + uint64_t pointers[KENLM_MAX_ORDER - 1]; + unsigned char length; + bool full; +}; + +inline uint64_t hash_value(const Left &left) { + unsigned char add[2]; + add[0] = left.length; + add[1] = left.full; + return util::MurmurHashNative(add, 2, left.length ? left.pointers[left.length - 1] : 0); +} + +struct ChartState { + bool operator==(const ChartState &other) const { + return (right == other.right) && (left == other.left); + } + + int Compare(const ChartState &other) const { + int lres = left.Compare(other.left); + if (lres) return lres; + return right.Compare(other.right); + } + + bool operator<(const ChartState &other) const { + return Compare(other) < 0; + } + + void ZeroRemaining() { + left.ZeroRemaining(); + right.ZeroRemaining(); + } + + Left left; + State right; +}; + +inline uint64_t hash_value(const ChartState &state) { + return hash_value(state.right, hash_value(state.left)); +} + + +} // namespace ngram +} // namespace lm + +#endif // LM_STATE_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/trie.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/trie.hh new file mode 100644 index 0000000000000000000000000000000000000000..cd39298b53976682d17e2c4dbd11dbb1a15c3d32 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/trie.hh @@ -0,0 +1,146 @@ +#ifndef LM_TRIE_H +#define LM_TRIE_H + +#include "lm/weights.hh" +#include "lm/word_index.hh" +#include "util/bit_packing.hh" + +#include + +#include + +namespace lm { +namespace ngram { +struct Config; +namespace trie { + +struct NodeRange { + uint64_t begin, end; +}; + +// TODO: if the number of unigrams is a concern, also bit pack these records. +struct UnigramValue { + ProbBackoff weights; + uint64_t next; + uint64_t Next() const { return next; } +}; + +class UnigramPointer { + public: + explicit UnigramPointer(const ProbBackoff &to) : to_(&to) {} + + UnigramPointer() : to_(NULL) {} + + bool Found() const { return to_ != NULL; } + + float Prob() const { return to_->prob; } + float Backoff() const { return to_->backoff; } + float Rest() const { return Prob(); } + + private: + const ProbBackoff *to_; +}; + +class Unigram { + public: + Unigram() {} + + void Init(void *start) { + unigram_ = static_cast(start); + } + + static uint64_t Size(uint64_t count) { + // +1 in case unknown doesn't appear. +1 for the final next. + return (count + 2) * sizeof(UnigramValue); + } + + const ProbBackoff &Lookup(WordIndex index) const { return unigram_[index].weights; } + + ProbBackoff &Unknown() { return unigram_[0].weights; } + + UnigramValue *Raw() { + return unigram_; + } + + UnigramPointer Find(WordIndex word, NodeRange &next) const { + UnigramValue *val = unigram_ + word; + next.begin = val->next; + next.end = (val+1)->next; + return UnigramPointer(val->weights); + } + + private: + UnigramValue *unigram_; +}; + +class BitPacked { + public: + BitPacked() {} + + uint64_t InsertIndex() const { + return insert_index_; + } + + protected: + static uint64_t BaseSize(uint64_t entries, uint64_t max_vocab, uint8_t remaining_bits); + + void BaseInit(void *base, uint64_t max_vocab, uint8_t remaining_bits); + + uint8_t word_bits_; + uint8_t total_bits_; + uint64_t word_mask_; + + uint8_t *base_; + + uint64_t insert_index_, max_vocab_; +}; + +template class BitPackedMiddle : public BitPacked { + public: + static uint64_t Size(uint8_t quant_bits, uint64_t entries, uint64_t max_vocab, uint64_t max_next, const Config &config); + + // next_source need not be initialized. + BitPackedMiddle(void *base, uint8_t quant_bits, uint64_t entries, uint64_t max_vocab, uint64_t max_next, const BitPacked &next_source, const Config &config); + + util::BitAddress Insert(WordIndex word); + + void FinishedLoading(uint64_t next_end, const Config &config); + + util::BitAddress Find(WordIndex word, NodeRange &range, uint64_t &pointer) const; + + util::BitAddress ReadEntry(uint64_t pointer, NodeRange &range) { + uint64_t addr = pointer * total_bits_; + addr += word_bits_; + bhiksha_.ReadNext(base_, addr + quant_bits_, pointer, total_bits_, range); + return util::BitAddress(base_, addr); + } + + private: + uint8_t quant_bits_; + Bhiksha bhiksha_; + + const BitPacked *next_source_; +}; + +class BitPackedLongest : public BitPacked { + public: + static uint64_t Size(uint8_t quant_bits, uint64_t entries, uint64_t max_vocab) { + return BaseSize(entries, max_vocab, quant_bits); + } + + BitPackedLongest() {} + + void Init(void *base, uint8_t quant_bits, uint64_t max_vocab) { + BaseInit(base, max_vocab, quant_bits); + } + + util::BitAddress Insert(WordIndex word); + + util::BitAddress Find(WordIndex word, const NodeRange &node) const; +}; + +} // namespace trie +} // namespace ngram +} // namespace lm + +#endif // LM_TRIE_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/trie_sort.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/trie_sort.hh new file mode 100644 index 0000000000000000000000000000000000000000..e5406d9b6a2a5f086cb60e7318a2525d7d8cce75 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/trie_sort.hh @@ -0,0 +1,114 @@ +// Step of trie builder: create sorted files. + +#ifndef LM_TRIE_SORT_H +#define LM_TRIE_SORT_H + +#include "lm/max_order.hh" +#include "lm/word_index.hh" + +#include "util/file.hh" +#include "util/scoped.hh" + +#include +#include +#include +#include + +#include + +namespace util { +class FilePiece; +} // namespace util + +namespace lm { +class PositiveProbWarn; +namespace ngram { +class SortedVocabulary; +struct Config; + +namespace trie { + +class EntryCompare : public std::binary_function { + public: + explicit EntryCompare(unsigned char order) : order_(order) {} + + bool operator()(const void *first_void, const void *second_void) const { + const WordIndex *first = static_cast(first_void); + const WordIndex *second = static_cast(second_void); + const WordIndex *end = first + order_; + for (; first != end; ++first, ++second) { + if (*first < *second) return true; + if (*first > *second) return false; + } + return false; + } + private: + unsigned char order_; +}; + +class RecordReader { + public: + RecordReader() : remains_(true) {} + + void Init(FILE *file, std::size_t entry_size); + + void *Data() { return data_.get(); } + const void *Data() const { return data_.get(); } + + RecordReader &operator++() { + std::size_t ret = fread(data_.get(), entry_size_, 1, file_); + if (!ret) { + UTIL_THROW_IF(!feof(file_), util::ErrnoException, "Error reading temporary file"); + remains_ = false; + } + return *this; + } + + operator bool() const { return remains_; } + + void Rewind(); + + std::size_t EntrySize() const { return entry_size_; } + + void Overwrite(const void *start, std::size_t amount); + + private: + FILE *file_; + + util::scoped_malloc data_; + + bool remains_; + + std::size_t entry_size_; +}; + +class SortedFiles { + public: + // Build from ARPA + SortedFiles(const Config &config, util::FilePiece &f, std::vector &counts, std::size_t buffer, const std::string &file_prefix, SortedVocabulary &vocab); + + int StealUnigram() { + return unigram_.release(); + } + + FILE *Full(unsigned char order) { + return full_[order - 2].get(); + } + + FILE *Context(unsigned char of_order) { + return context_[of_order - 2].get(); + } + + private: + void ConvertToSorted(util::FilePiece &f, const SortedVocabulary &vocab, const std::vector &counts, const std::string &prefix, unsigned char order, PositiveProbWarn &warn, void *mem, std::size_t mem_size); + + util::scoped_fd unigram_; + + util::scoped_FILE full_[KENLM_MAX_ORDER - 1], context_[KENLM_MAX_ORDER - 1]; +}; + +} // namespace trie +} // namespace ngram +} // namespace lm + +#endif // LM_TRIE_SORT_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/value.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/value.hh new file mode 100644 index 0000000000000000000000000000000000000000..36e87084814d826612a9a4c8d282b8940c741b30 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/value.hh @@ -0,0 +1,157 @@ +#ifndef LM_VALUE_H +#define LM_VALUE_H + +#include "lm/model_type.hh" +#include "lm/value_build.hh" +#include "lm/weights.hh" +#include "util/bit_packing.hh" + +#include + +namespace lm { +namespace ngram { + +// Template proxy for probing unigrams and middle. +template class GenericProbingProxy { + public: + explicit GenericProbingProxy(const Weights &to) : to_(&to) {} + + GenericProbingProxy() : to_(0) {} + + bool Found() const { return to_ != 0; } + + float Prob() const { + util::FloatEnc enc; + enc.f = to_->prob; + enc.i |= util::kSignBit; + return enc.f; + } + + float Backoff() const { return to_->backoff; } + + bool IndependentLeft() const { + util::FloatEnc enc; + enc.f = to_->prob; + return enc.i & util::kSignBit; + } + + protected: + const Weights *to_; +}; + +// Basic proxy for trie unigrams. +template class GenericTrieUnigramProxy { + public: + explicit GenericTrieUnigramProxy(const Weights &to) : to_(&to) {} + + GenericTrieUnigramProxy() : to_(0) {} + + bool Found() const { return to_ != 0; } + float Prob() const { return to_->prob; } + float Backoff() const { return to_->backoff; } + float Rest() const { return Prob(); } + + protected: + const Weights *to_; +}; + +struct BackoffValue { + typedef ProbBackoff Weights; + static const ModelType kProbingModelType = PROBING; + + class ProbingProxy : public GenericProbingProxy { + public: + explicit ProbingProxy(const Weights &to) : GenericProbingProxy(to) {} + ProbingProxy() {} + float Rest() const { return Prob(); } + }; + + class TrieUnigramProxy : public GenericTrieUnigramProxy { + public: + explicit TrieUnigramProxy(const Weights &to) : GenericTrieUnigramProxy(to) {} + TrieUnigramProxy() {} + float Rest() const { return Prob(); } + }; + + struct ProbingEntry { + typedef uint64_t Key; + typedef Weights Value; + uint64_t key; + ProbBackoff value; + uint64_t GetKey() const { return key; } + }; + + struct TrieUnigramValue { + Weights weights; + uint64_t next; + uint64_t Next() const { return next; } + }; + + const static bool kDifferentRest = false; + + template void Callback(const Config &, unsigned int, typename Model::Vocabulary &, C &callback) { + NoRestBuild build; + callback(build); + } +}; + +struct RestValue { + typedef RestWeights Weights; + static const ModelType kProbingModelType = REST_PROBING; + + class ProbingProxy : public GenericProbingProxy { + public: + explicit ProbingProxy(const Weights &to) : GenericProbingProxy(to) {} + ProbingProxy() {} + float Rest() const { return to_->rest; } + }; + + class TrieUnigramProxy : public GenericTrieUnigramProxy { + public: + explicit TrieUnigramProxy(const Weights &to) : GenericTrieUnigramProxy(to) {} + TrieUnigramProxy() {} + float Rest() const { return to_->rest; } + }; + +// gcc 4.1 doesn't properly back dependent types :-(. +#pragma pack(push) +#pragma pack(4) + struct ProbingEntry { + typedef uint64_t Key; + typedef Weights Value; + Key key; + Value value; + Key GetKey() const { return key; } + }; + + struct TrieUnigramValue { + Weights weights; + uint64_t next; + uint64_t Next() const { return next; } + }; +#pragma pack(pop) + + const static bool kDifferentRest = true; + + template void Callback(const Config &config, unsigned int order, typename Model::Vocabulary &vocab, C &callback) { + switch (config.rest_function) { + case Config::REST_MAX: + { + MaxRestBuild build; + callback(build); + } + break; + case Config::REST_LOWER: + { + LowerRestBuild build(config, order, vocab); + callback(build); + } + break; + } + } +}; + +} // namespace ngram +} // namespace lm + +#endif // LM_VALUE_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/value_build.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/value_build.hh new file mode 100644 index 0000000000000000000000000000000000000000..6fd26ef8f99617ab34a25f89f9f0b5ed8518b8da --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/value_build.hh @@ -0,0 +1,97 @@ +#ifndef LM_VALUE_BUILD_H +#define LM_VALUE_BUILD_H + +#include "lm/weights.hh" +#include "lm/word_index.hh" +#include "util/bit_packing.hh" + +#include + +namespace lm { +namespace ngram { + +struct Config; +struct BackoffValue; +struct RestValue; + +class NoRestBuild { + public: + typedef BackoffValue Value; + + NoRestBuild() {} + + void SetRest(const WordIndex *, unsigned int, const Prob &/*prob*/) const {} + void SetRest(const WordIndex *, unsigned int, const ProbBackoff &) const {} + + template bool MarkExtends(ProbBackoff &weights, const Second &) const { + util::UnsetSign(weights.prob); + return false; + } + + // Probing doesn't need to go back to unigram. + const static bool kMarkEvenLower = false; +}; + +class MaxRestBuild { + public: + typedef RestValue Value; + + MaxRestBuild() {} + + void SetRest(const WordIndex *, unsigned int, const Prob &/*prob*/) const {} + void SetRest(const WordIndex *, unsigned int, RestWeights &weights) const { + weights.rest = weights.prob; + util::SetSign(weights.rest); + } + + bool MarkExtends(RestWeights &weights, const RestWeights &to) const { + util::UnsetSign(weights.prob); + if (weights.rest >= to.rest) return false; + weights.rest = to.rest; + return true; + } + bool MarkExtends(RestWeights &weights, const Prob &to) const { + util::UnsetSign(weights.prob); + if (weights.rest >= to.prob) return false; + weights.rest = to.prob; + return true; + } + + // Probing does need to go back to unigram. + const static bool kMarkEvenLower = true; +}; + +template class LowerRestBuild { + public: + typedef RestValue Value; + + LowerRestBuild(const Config &config, unsigned int order, const typename Model::Vocabulary &vocab); + + ~LowerRestBuild(); + + void SetRest(const WordIndex *, unsigned int, const Prob &/*prob*/) const {} + void SetRest(const WordIndex *vocab_ids, unsigned int n, RestWeights &weights) const { + typename Model::State ignored; + if (n == 1) { + weights.rest = unigrams_[*vocab_ids]; + } else { + weights.rest = models_[n-2]->FullScoreForgotState(vocab_ids + 1, vocab_ids + n, *vocab_ids, ignored).prob; + } + } + + template bool MarkExtends(RestWeights &weights, const Second &) const { + util::UnsetSign(weights.prob); + return false; + } + + const static bool kMarkEvenLower = false; + + std::vector unigrams_; + + std::vector models_; +}; + +} // namespace ngram +} // namespace lm + +#endif // LM_VALUE_BUILD_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/virtual_interface.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/virtual_interface.hh new file mode 100644 index 0000000000000000000000000000000000000000..2a2690e140de3fa5fae47688243e3c7c39ebb532 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/virtual_interface.hh @@ -0,0 +1,160 @@ +#ifndef LM_VIRTUAL_INTERFACE_H +#define LM_VIRTUAL_INTERFACE_H + +#include "lm/return.hh" +#include "lm/word_index.hh" +#include "util/string_piece.hh" + +#include +#include + +namespace lm { +namespace base { + +template class ModelFacade; + +/* Vocabulary interface. Call Index(string) and get a word index for use in + * calling Model. It provides faster convenience functions for , , and + * although you can also find these using Index. + * + * Some models do not load the mapping from index to string. If you need this, + * check if the model Vocabulary class implements such a function and access it + * directly. + * + * The Vocabulary object is always owned by the Model and can be retrieved from + * the Model using BaseVocabulary() for this abstract interface or + * GetVocabulary() for the actual implementation (in which case you'll need the + * actual implementation of the Model too). + */ +class Vocabulary { + public: + virtual ~Vocabulary(); + + WordIndex BeginSentence() const { return begin_sentence_; } + WordIndex EndSentence() const { return end_sentence_; } + WordIndex NotFound() const { return not_found_; } + + /* Most implementations allow StringPiece lookups and need only override + * Index(StringPiece). SRI requires null termination and overrides all + * three methods. + */ + virtual WordIndex Index(const StringPiece &str) const = 0; + virtual WordIndex Index(const std::string &str) const { + return Index(StringPiece(str)); + } + virtual WordIndex Index(const char *str) const { + return Index(StringPiece(str)); + } + + protected: + // Call SetSpecial afterward. + Vocabulary() {} + + Vocabulary(WordIndex begin_sentence, WordIndex end_sentence, WordIndex not_found) { + SetSpecial(begin_sentence, end_sentence, not_found); + } + + void SetSpecial(WordIndex begin_sentence, WordIndex end_sentence, WordIndex not_found); + + WordIndex begin_sentence_, end_sentence_, not_found_; + + private: + // Disable copy constructors. They're private and undefined. + // Ersatz boost::noncopyable. + Vocabulary(const Vocabulary &); + Vocabulary &operator=(const Vocabulary &); +}; + +/* There are two ways to access a Model. + * + * + * OPTION 1: Access the Model directly (e.g. lm::ngram::Model in model.hh). + * + * Every Model implements the scoring function: + * float Score( + * const Model::State &in_state, + * const WordIndex new_word, + * Model::State &out_state) const; + * + * It can also return the length of n-gram matched by the model: + * FullScoreReturn FullScore( + * const Model::State &in_state, + * const WordIndex new_word, + * Model::State &out_state) const; + * + * + * There are also accessor functions: + * const State &BeginSentenceState() const; + * const State &NullContextState() const; + * const Vocabulary &GetVocabulary() const; + * unsigned int Order() const; + * + * NB: In case you're wondering why the model implementation looks like it's + * missing these methods, see facade.hh. + * + * This is the fastest way to use a model and presents a normal State class to + * be included in a hypothesis state structure. + * + * + * OPTION 2: Use the virtual interface below. + * + * The virtual interface allow you to decide which Model to use at runtime + * without templatizing everything on the Model type. However, each Model has + * its own State class, so a single State cannot be efficiently provided (it + * would require using the maximum memory of any Model's State or memory + * allocation with each lookup). This means you become responsible for + * allocating memory with size StateSize() and passing it to the Score or + * FullScore functions provided here. + * + * For example, cdec has a std::string containing the entire state of a + * hypothesis. It can reserve StateSize bytes in this string for the model + * state. + * + * All the State objects are POD, so it's ok to use raw memory for storing + * State. + * in_state and out_state must not have the same address. + */ +class Model { + public: + virtual ~Model(); + + size_t StateSize() const { return state_size_; } + const void *BeginSentenceMemory() const { return begin_sentence_memory_; } + void BeginSentenceWrite(void *to) const { memcpy(to, begin_sentence_memory_, StateSize()); } + const void *NullContextMemory() const { return null_context_memory_; } + void NullContextWrite(void *to) const { memcpy(to, null_context_memory_, StateSize()); } + + // Requires in_state != out_state + virtual float BaseScore(const void *in_state, const WordIndex new_word, void *out_state) const = 0; + + // Requires in_state != out_state + virtual FullScoreReturn BaseFullScore(const void *in_state, const WordIndex new_word, void *out_state) const = 0; + + // Prefer to use FullScore. The context words should be provided in reverse order. + virtual FullScoreReturn BaseFullScoreForgotState(const WordIndex *context_rbegin, const WordIndex *context_rend, const WordIndex new_word, void *out_state) const = 0; + + unsigned char Order() const { return order_; } + + const Vocabulary &BaseVocabulary() const { return *base_vocab_; } + + private: + template friend class ModelFacade; + explicit Model(size_t state_size) : state_size_(state_size) {} + + const size_t state_size_; + const void *begin_sentence_memory_, *null_context_memory_; + + const Vocabulary *base_vocab_; + + unsigned char order_; + + // Disable copy constructors. They're private and undefined. + // Ersatz boost::noncopyable. + Model(const Model &); + Model &operator=(const Model &); +}; + +} // mamespace base +} // namespace lm + +#endif // LM_VIRTUAL_INTERFACE_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/vocab.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/vocab.hh new file mode 100644 index 0000000000000000000000000000000000000000..d6ae07b834898e206811b530a6fc9092bdf8fdda --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/vocab.hh @@ -0,0 +1,249 @@ +#ifndef LM_VOCAB_H +#define LM_VOCAB_H + +#include "lm/enumerate_vocab.hh" +#include "lm/lm_exception.hh" +#include "lm/virtual_interface.hh" +#include "util/fake_ofstream.hh" +#include "util/murmur_hash.hh" +#include "util/pool.hh" +#include "util/probing_hash_table.hh" +#include "util/sorted_uniform.hh" +#include "util/string_piece.hh" + +#include +#include +#include + +namespace lm { +struct ProbBackoff; +class EnumerateVocab; + +namespace ngram { +struct Config; + +namespace detail { +uint64_t HashForVocab(const char *str, std::size_t len); +inline uint64_t HashForVocab(const StringPiece &str) { + return HashForVocab(str.data(), str.length()); +} +struct ProbingVocabularyHeader; +} // namespace detail + +class WriteWordsWrapper : public EnumerateVocab { + public: + WriteWordsWrapper(EnumerateVocab *inner); + + ~WriteWordsWrapper(); + + void Add(WordIndex index, const StringPiece &str); + + const std::string &Buffer() const { return buffer_; } + + private: + EnumerateVocab *inner_; + + std::string buffer_; +}; + +// Vocabulary based on sorted uniform find storing only uint64_t values and using their offsets as indices. +class SortedVocabulary : public base::Vocabulary { + public: + SortedVocabulary(); + + WordIndex Index(const StringPiece &str) const { + const uint64_t *found; + if (util::BoundedSortedUniformFind, util::Pivot64>( + util::IdentityAccessor(), + begin_ - 1, 0, + end_, std::numeric_limits::max(), + detail::HashForVocab(str), found)) { + return found - begin_ + 1; // +1 because is 0 and does not appear in the lookup table. + } else { + return 0; + } + } + + // Size for purposes of file writing + static uint64_t Size(uint64_t entries, const Config &config); + + // Vocab words are [0, Bound()) Only valid after FinishedLoading/LoadedBinary. + WordIndex Bound() const { return bound_; } + + // Everything else is for populating. I'm too lazy to hide and friend these, but you'll only get a const reference anyway. + void SetupMemory(void *start, std::size_t allocated, std::size_t entries, const Config &config); + + void Relocate(void *new_start); + + void ConfigureEnumerate(EnumerateVocab *to, std::size_t max_entries); + + WordIndex Insert(const StringPiece &str); + + // Reorders reorder_vocab so that the IDs are sorted. + void FinishedLoading(ProbBackoff *reorder_vocab); + + // Trie stores the correct counts including in the header. If this was previously sized based on a count exluding , padding with 8 bytes will make it the correct size based on a count including . + std::size_t UnkCountChangePadding() const { return SawUnk() ? 0 : sizeof(uint64_t); } + + bool SawUnk() const { return saw_unk_; } + + void LoadedBinary(bool have_words, int fd, EnumerateVocab *to, uint64_t offset); + + private: + uint64_t *begin_, *end_; + + WordIndex bound_; + + bool saw_unk_; + + EnumerateVocab *enumerate_; + + // Actual strings. Used only when loading from ARPA and enumerate_ != NULL + util::Pool string_backing_; + + std::vector strings_to_enumerate_; +}; + +#pragma pack(push) +#pragma pack(4) +struct ProbingVocabularyEntry { + uint64_t key; + WordIndex value; + + typedef uint64_t Key; + uint64_t GetKey() const { return key; } + void SetKey(uint64_t to) { key = to; } + + static ProbingVocabularyEntry Make(uint64_t key, WordIndex value) { + ProbingVocabularyEntry ret; + ret.key = key; + ret.value = value; + return ret; + } +}; +#pragma pack(pop) + +// Vocabulary storing a map from uint64_t to WordIndex. +class ProbingVocabulary : public base::Vocabulary { + public: + ProbingVocabulary(); + + WordIndex Index(const StringPiece &str) const { + Lookup::ConstIterator i; + return lookup_.Find(detail::HashForVocab(str), i) ? i->value : 0; + } + + static uint64_t Size(uint64_t entries, float probing_multiplier); + // This just unwraps Config to get the probing_multiplier. + static uint64_t Size(uint64_t entries, const Config &config); + + // Vocab words are [0, Bound()). + WordIndex Bound() const { return bound_; } + + // Everything else is for populating. I'm too lazy to hide and friend these, but you'll only get a const reference anyway. + void SetupMemory(void *start, std::size_t allocated); + void SetupMemory(void *start, std::size_t allocated, std::size_t /*entries*/, const Config &/*config*/) { + SetupMemory(start, allocated); + } + + void Relocate(void *new_start); + + void ConfigureEnumerate(EnumerateVocab *to, std::size_t max_entries); + + WordIndex Insert(const StringPiece &str); + + template void FinishedLoading(Weights * /*reorder_vocab*/) { + FinishedLoading(); + } + void FinishedLoading(); + + std::size_t UnkCountChangePadding() const { return 0; } + + bool SawUnk() const { return saw_unk_; } + + void LoadedBinary(bool have_words, int fd, EnumerateVocab *to, uint64_t offset); + + private: + typedef util::ProbingHashTable Lookup; + + Lookup lookup_; + + WordIndex bound_; + + bool saw_unk_; + + EnumerateVocab *enumerate_; + + detail::ProbingVocabularyHeader *header_; +}; + +void MissingUnknown(const Config &config) throw(SpecialWordMissingException); +void MissingSentenceMarker(const Config &config, const char *str) throw(SpecialWordMissingException); + +template void CheckSpecials(const Config &config, const Vocab &vocab) throw(SpecialWordMissingException) { + if (!vocab.SawUnk()) MissingUnknown(config); + if (vocab.BeginSentence() == vocab.NotFound()) MissingSentenceMarker(config, ""); + if (vocab.EndSentence() == vocab.NotFound()) MissingSentenceMarker(config, ""); +} + +class WriteUniqueWords { + public: + explicit WriteUniqueWords(int fd) : word_list_(fd) {} + + void operator()(const StringPiece &word) { + word_list_ << word << '\0'; + } + + private: + util::FakeOFStream word_list_; +}; + +class NoOpUniqueWords { + public: + NoOpUniqueWords() {} + void operator()(const StringPiece &word) {} +}; + +template class GrowableVocab { + public: + static std::size_t MemUsage(WordIndex content) { + return Lookup::MemUsage(content > 2 ? content : 2); + } + + // Does not take ownership of write_wordi + template GrowableVocab(WordIndex initial_size, const NewWordConstruct &new_word_construct = NewWordAction()) + : lookup_(initial_size), new_word_(new_word_construct) { + FindOrInsert(""); // Force 0 + FindOrInsert(""); // Force 1 + FindOrInsert(""); // Force 2 + } + + WordIndex Index(const StringPiece &str) const { + Lookup::ConstIterator i; + return lookup_.Find(detail::HashForVocab(str), i) ? i->value : 0; + } + + WordIndex FindOrInsert(const StringPiece &word) { + ProbingVocabularyEntry entry = ProbingVocabularyEntry::Make(util::MurmurHashNative(word.data(), word.size()), Size()); + Lookup::MutableIterator it; + if (!lookup_.FindOrInsert(entry, it)) { + new_word_(word); + UTIL_THROW_IF(Size() >= std::numeric_limits::max(), VocabLoadException, "Too many vocabulary words. Change WordIndex to uint64_t in lm/word_index.hh"); + } + return it->value; + } + + WordIndex Size() const { return lookup_.Size(); } + + private: + typedef util::AutoProbing Lookup; + + Lookup lookup_; + + NewWordAction new_word_; +}; + +} // namespace ngram +} // namespace lm + +#endif // LM_VOCAB_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/weights.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/weights.hh new file mode 100644 index 0000000000000000000000000000000000000000..da1963d8346b748db248bd0aae527d290e42b973 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/weights.hh @@ -0,0 +1,22 @@ +#ifndef LM_WEIGHTS_H +#define LM_WEIGHTS_H + +// Weights for n-grams. Probability and possibly a backoff. + +namespace lm { +struct Prob { + float prob; +}; +// No inheritance so this will be a POD. +struct ProbBackoff { + float prob; + float backoff; +}; +struct RestWeights { + float prob; + float backoff; + float rest; +}; + +} // namespace lm +#endif // LM_WEIGHTS_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/word_index.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/word_index.hh new file mode 100644 index 0000000000000000000000000000000000000000..a5a0fda81d634a8434562a29d5ac5af562db3ab5 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/word_index.hh @@ -0,0 +1,14 @@ +// Separate header because this is used often. +#ifndef LM_WORD_INDEX_H +#define LM_WORD_INDEX_H + +#include + +namespace lm { +typedef unsigned int WordIndex; +const WordIndex kMaxWordIndex = UINT_MAX; +} // namespace lm + +typedef lm::WordIndex LMWordIndex; + +#endif diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/wrappers/nplm.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/wrappers/nplm.hh new file mode 100644 index 0000000000000000000000000000000000000000..b7dd4a21e9949d5fa6f09502513d6bcf8a62e7d3 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/lm/wrappers/nplm.hh @@ -0,0 +1,83 @@ +#ifndef LM_WRAPPERS_NPLM_H +#define LM_WRAPPERS_NPLM_H + +#include "lm/facade.hh" +#include "lm/max_order.hh" +#include "util/string_piece.hh" + +#include +#include + +/* Wrapper to NPLM "by Ashish Vaswani, with contributions from David Chiang + * and Victoria Fossum." + * http://nlg.isi.edu/software/nplm/ + */ + +namespace nplm { +class vocabulary; +class neuralLM; +} // namespace nplm + +namespace lm { +namespace np { + +class Vocabulary : public base::Vocabulary { + public: + Vocabulary(const nplm::vocabulary &vocab); + + ~Vocabulary(); + + WordIndex Index(const std::string &str) const; + + // TODO: lobby them to support StringPiece + WordIndex Index(const StringPiece &str) const { + return Index(std::string(str.data(), str.size())); + } + + lm::WordIndex NullWord() const { return null_word_; } + + private: + const nplm::vocabulary &vocab_; + + const lm::WordIndex null_word_; +}; + +// Sorry for imposing my limitations on your code. +#define NPLM_MAX_ORDER 7 + +struct State { + WordIndex words[NPLM_MAX_ORDER - 1]; +}; + +class Model : public lm::base::ModelFacade { + private: + typedef lm::base::ModelFacade P; + + public: + // Does this look like an NPLM? + static bool Recognize(const std::string &file); + + explicit Model(const std::string &file, std::size_t cache_size = 1 << 20); + + ~Model(); + + FullScoreReturn FullScore(const State &from, const WordIndex new_word, State &out_state) const; + + FullScoreReturn FullScoreForgotState(const WordIndex *context_rbegin, const WordIndex *context_rend, const WordIndex new_word, State &out_state) const; + + private: + boost::scoped_ptr base_instance_; + + mutable boost::thread_specific_ptr backend_; + + Vocabulary vocab_; + + lm::WordIndex null_word_; + + const std::size_t cache_size_; +}; + +} // namespace np +} // namespace lm + +#endif // LM_WRAPPERS_NPLM_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/util/bit_packing.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/bit_packing.hh new file mode 100644 index 0000000000000000000000000000000000000000..1e34d9ab1d167e62fb07f6fcc47639ca1581bd8e --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/bit_packing.hh @@ -0,0 +1,186 @@ +#ifndef UTIL_BIT_PACKING_H +#define UTIL_BIT_PACKING_H + +/* Bit-level packing routines + * + * WARNING WARNING WARNING: + * The write functions assume that memory is zero initially. This makes them + * faster and is the appropriate case for mmapped language model construction. + * These routines assume that unaligned access to uint64_t is fast. This is + * the case on x86_64. I'm not sure how fast unaligned 64-bit access is on + * x86 but my target audience is large language models for which 64-bit is + * necessary. + * + * Call the BitPackingSanity function to sanity check. Calling once suffices, + * but it may be called multiple times when that's inconvenient. + * + * ARM and MinGW ports contributed by Hideo Okuma and Tomoyuki Yoshimura at + * NICT. + */ + +#include +#ifdef __APPLE__ +#include +#elif __linux__ +#include +#elif !defined(_WIN32) && !defined(_WIN64) +#include +#endif + +#include + +#include + +namespace util { + +// Fun fact: __BYTE_ORDER is wrong on Solaris Sparc, but the version without __ is correct. +#if BYTE_ORDER == LITTLE_ENDIAN +inline uint8_t BitPackShift(uint8_t bit, uint8_t /*length*/) { + return bit; +} +#elif BYTE_ORDER == BIG_ENDIAN +inline uint8_t BitPackShift(uint8_t bit, uint8_t length) { + return 64 - length - bit; +} +#else +#error "Bit packing code isn't written for your byte order." +#endif + +inline uint64_t ReadOff(const void *base, uint64_t bit_off) { +#if defined(__arm) || defined(__arm__) + const uint8_t *base_off = reinterpret_cast(base) + (bit_off >> 3); + uint64_t value64; + memcpy(&value64, base_off, sizeof(value64)); + return value64; +#else + return *reinterpret_cast(reinterpret_cast(base) + (bit_off >> 3)); +#endif +} + +/* Pack integers up to 57 bits using their least significant digits. + * The length is specified using mask: + * Assumes mask == (1 << length) - 1 where length <= 57. + */ +inline uint64_t ReadInt57(const void *base, uint64_t bit_off, uint8_t length, uint64_t mask) { + return (ReadOff(base, bit_off) >> BitPackShift(bit_off & 7, length)) & mask; +} +/* Assumes value < (1 << length) and length <= 57. + * Assumes the memory is zero initially. + */ +inline void WriteInt57(void *base, uint64_t bit_off, uint8_t length, uint64_t value) { +#if defined(__arm) || defined(__arm__) + uint8_t *base_off = reinterpret_cast(base) + (bit_off >> 3); + uint64_t value64; + memcpy(&value64, base_off, sizeof(value64)); + value64 |= (value << BitPackShift(bit_off & 7, length)); + memcpy(base_off, &value64, sizeof(value64)); +#else + *reinterpret_cast(reinterpret_cast(base) + (bit_off >> 3)) |= + (value << BitPackShift(bit_off & 7, length)); +#endif +} + +/* Same caveats as above, but for a 25 bit limit. */ +inline uint32_t ReadInt25(const void *base, uint64_t bit_off, uint8_t length, uint32_t mask) { +#if defined(__arm) || defined(__arm__) + const uint8_t *base_off = reinterpret_cast(base) + (bit_off >> 3); + uint32_t value32; + memcpy(&value32, base_off, sizeof(value32)); + return (value32 >> BitPackShift(bit_off & 7, length)) & mask; +#else + return (*reinterpret_cast(reinterpret_cast(base) + (bit_off >> 3)) >> BitPackShift(bit_off & 7, length)) & mask; +#endif +} + +inline void WriteInt25(void *base, uint64_t bit_off, uint8_t length, uint32_t value) { +#if defined(__arm) || defined(__arm__) + uint8_t *base_off = reinterpret_cast(base) + (bit_off >> 3); + uint32_t value32; + memcpy(&value32, base_off, sizeof(value32)); + value32 |= (value << BitPackShift(bit_off & 7, length)); + memcpy(base_off, &value32, sizeof(value32)); +#else + *reinterpret_cast(reinterpret_cast(base) + (bit_off >> 3)) |= + (value << BitPackShift(bit_off & 7, length)); +#endif +} + +typedef union { float f; uint32_t i; } FloatEnc; + +inline float ReadFloat32(const void *base, uint64_t bit_off) { + FloatEnc encoded; + encoded.i = ReadOff(base, bit_off) >> BitPackShift(bit_off & 7, 32); + return encoded.f; +} +inline void WriteFloat32(void *base, uint64_t bit_off, float value) { + FloatEnc encoded; + encoded.f = value; + WriteInt57(base, bit_off, 32, encoded.i); +} + +const uint32_t kSignBit = 0x80000000; + +inline void SetSign(float &to) { + FloatEnc enc; + enc.f = to; + enc.i |= kSignBit; + to = enc.f; +} + +inline void UnsetSign(float &to) { + FloatEnc enc; + enc.f = to; + enc.i &= ~kSignBit; + to = enc.f; +} + +inline float ReadNonPositiveFloat31(const void *base, uint64_t bit_off) { + FloatEnc encoded; + encoded.i = ReadOff(base, bit_off) >> BitPackShift(bit_off & 7, 31); + // Sign bit set means negative. + encoded.i |= kSignBit; + return encoded.f; +} +inline void WriteNonPositiveFloat31(void *base, uint64_t bit_off, float value) { + FloatEnc encoded; + encoded.f = value; + encoded.i &= ~kSignBit; + WriteInt57(base, bit_off, 31, encoded.i); +} + +void BitPackingSanity(); + +// Return bits required to store integers upto max_value. Not the most +// efficient implementation, but this is only called a few times to size tries. +uint8_t RequiredBits(uint64_t max_value); + +struct BitsMask { + static BitsMask ByMax(uint64_t max_value) { + BitsMask ret; + ret.FromMax(max_value); + return ret; + } + static BitsMask ByBits(uint8_t bits) { + BitsMask ret; + ret.bits = bits; + ret.mask = (1ULL << bits) - 1; + return ret; + } + void FromMax(uint64_t max_value) { + bits = RequiredBits(max_value); + mask = (1ULL << bits) - 1; + } + uint8_t bits; + uint64_t mask; +}; + +struct BitAddress { + BitAddress(void *in_base, uint64_t in_offset) : base(in_base), offset(in_offset) {} + + void *base; + uint64_t offset; +}; + +} // namespace util + +#endif // UTIL_BIT_PACKING_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/util/exception.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/exception.hh new file mode 100644 index 0000000000000000000000000000000000000000..4e50a6f3a0f31b9d0617d7c5ad56bb129b3a9037 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/exception.hh @@ -0,0 +1,149 @@ +#ifndef UTIL_EXCEPTION_H +#define UTIL_EXCEPTION_H + +#include +#include +#include +#include + +#include + +namespace util { + +template typename Except::template ExceptionTag::Identity operator<<(Except &e, const Data &data); + +class Exception : public std::exception { + public: + Exception() throw(); + virtual ~Exception() throw(); + + Exception(const Exception &from); + Exception &operator=(const Exception &from); + + // Not threadsafe, but probably doesn't matter. FWIW, Boost's exception guidance implies that what() isn't threadsafe. + const char *what() const throw(); + + // For use by the UTIL_THROW macros. + void SetLocation( + const char *file, + unsigned int line, + const char *func, + const char *child_name, + const char *condition); + + private: + template friend typename Except::template ExceptionTag::Identity operator<<(Except &e, const Data &data); + + // This helps restrict operator<< defined below. + template struct ExceptionTag { + typedef T Identity; + }; + + std::stringstream stream_; + mutable std::string text_; +}; + +/* This implements the normal operator<< for Exception and all its children. + * SFINAE means it only applies to Exception. Think of this as an ersatz + * boost::enable_if. + */ +template typename Except::template ExceptionTag::Identity operator<<(Except &e, const Data &data) { + e.stream_ << data; + return e; +} + +#ifdef __GNUC__ +#define UTIL_FUNC_NAME __PRETTY_FUNCTION__ +#else +#ifdef _WIN32 +#define UTIL_FUNC_NAME __FUNCTION__ +#else +#define UTIL_FUNC_NAME NULL +#endif +#endif + +/* Create an instance of Exception, add the message Modify, and throw it. + * Modify is appended to the what() message and can contain << for ostream + * operations. + * + * do .. while kludge to swallow trailing ; character + * http://gcc.gnu.org/onlinedocs/cpp/Swallowing-the-Semicolon.html . + * Arg can be a constructor argument to the exception. + */ +#define UTIL_THROW_BACKEND(Condition, Exception, Arg, Modify) do { \ + Exception UTIL_e Arg; \ + UTIL_e.SetLocation(__FILE__, __LINE__, UTIL_FUNC_NAME, #Exception, Condition); \ + UTIL_e << Modify; \ + throw UTIL_e; \ +} while (0) + +#define UTIL_THROW_ARG(Exception, Arg, Modify) \ + UTIL_THROW_BACKEND(NULL, Exception, Arg, Modify) + +#define UTIL_THROW(Exception, Modify) \ + UTIL_THROW_BACKEND(NULL, Exception, , Modify); + +#define UTIL_THROW2(Modify) \ + UTIL_THROW_BACKEND(NULL, util::Exception, , Modify); + +#if __GNUC__ >= 3 +#define UTIL_UNLIKELY(x) __builtin_expect (!!(x), 0) +#else +#define UTIL_UNLIKELY(x) (x) +#endif + +#define UTIL_THROW_IF_ARG(Condition, Exception, Arg, Modify) do { \ + if (UTIL_UNLIKELY(Condition)) { \ + UTIL_THROW_BACKEND(#Condition, Exception, Arg, Modify); \ + } \ +} while (0) + +#define UTIL_THROW_IF(Condition, Exception, Modify) \ + UTIL_THROW_IF_ARG(Condition, Exception, , Modify) + +#define UTIL_THROW_IF2(Condition, Modify) \ + UTIL_THROW_IF_ARG(Condition, util::Exception, , Modify) + +// Exception that records errno and adds it to the message. +class ErrnoException : public Exception { + public: + ErrnoException() throw(); + + virtual ~ErrnoException() throw(); + + int Error() const throw() { return errno_; } + + private: + int errno_; +}; + +// file wasn't there, or couldn't be open for some reason +class FileOpenException : public Exception { + public: + FileOpenException() throw() {} + ~FileOpenException() throw() {} +}; + +// Utilities for overflow checking. +class OverflowException : public Exception { + public: + OverflowException() throw(); + ~OverflowException() throw(); +}; + +template inline std::size_t CheckOverflowInternal(uint64_t value) { + UTIL_THROW_IF(value > static_cast(std::numeric_limits::max()), OverflowException, "Integer overflow detected. This model is too big for 32-bit code."); + return value; +} + +template <> inline std::size_t CheckOverflowInternal<8>(uint64_t value) { + return value; +} + +inline std::size_t CheckOverflow(uint64_t value) { + return CheckOverflowInternal(value); +} + +} // namespace util + +#endif // UTIL_EXCEPTION_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/util/fake_ofstream.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/fake_ofstream.hh new file mode 100644 index 0000000000000000000000000000000000000000..987fa80151ab18f926949d694d09dd160b5c03ab --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/fake_ofstream.hh @@ -0,0 +1,105 @@ +/* Like std::ofstream but without being incredibly slow. Backed by a raw fd. + * Does not support many data types. Currently, it's targeted at writing ARPA + * files quickly. + */ +#ifndef UTIL_FAKE_OFSTREAM_H +#define UTIL_FAKE_OFSTREAM_H + +#include "util/double-conversion/double-conversion.h" +#include "util/double-conversion/utils.h" +#include "util/file.hh" +#include "util/scoped.hh" +#include "util/string_piece.hh" + +#define BOOST_LEXICAL_CAST_ASSUME_C_LOCALE +#include + +namespace util { +class FakeOFStream { + public: + // Does not take ownership of out. + // Allows default constructor, but must call SetFD. + explicit FakeOFStream(int out = -1, std::size_t buffer_size = 1048576) + : buf_(util::MallocOrThrow(buffer_size)), + builder_(static_cast(buf_.get()), buffer_size), + // Mostly the default but with inf instead. And no flags. + convert_(double_conversion::DoubleToStringConverter::NO_FLAGS, "inf", "NaN", 'e', -6, 21, 6, 0), + fd_(out), + buffer_size_(buffer_size) {} + + ~FakeOFStream() { + if (buf_.get()) Flush(); + } + + void SetFD(int to) { + if (builder_.position()) Flush(); + fd_ = to; + } + + FakeOFStream &operator<<(float value) { + // Odd, but this is the largest number found in the comments. + EnsureRemaining(double_conversion::DoubleToStringConverter::kMaxPrecisionDigits + 8); + convert_.ToShortestSingle(value, &builder_); + return *this; + } + + FakeOFStream &operator<<(double value) { + EnsureRemaining(double_conversion::DoubleToStringConverter::kMaxPrecisionDigits + 8); + convert_.ToShortest(value, &builder_); + return *this; + } + + FakeOFStream &operator<<(StringPiece str) { + if (str.size() > buffer_size_) { + Flush(); + util::WriteOrThrow(fd_, str.data(), str.size()); + } else { + EnsureRemaining(str.size()); + builder_.AddSubstring(str.data(), str.size()); + } + return *this; + } + + // Inefficient! TODO: more efficient implementation + FakeOFStream &operator<<(unsigned value) { + return *this << boost::lexical_cast(value); + } + + FakeOFStream &operator<<(char c) { + EnsureRemaining(1); + builder_.AddCharacter(c); + return *this; + } + + // Note this does not sync. + void Flush() { + util::WriteOrThrow(fd_, buf_.get(), builder_.position()); + builder_.Reset(); + } + + // Not necessary, but does assure the data is cleared. + void Finish() { + Flush(); + // It will segfault trying to null terminate otherwise. + builder_.Finalize(); + buf_.reset(); + util::FSyncOrThrow(fd_); + } + + private: + void EnsureRemaining(std::size_t amount) { + if (static_cast(builder_.size() - builder_.position()) <= amount) { + Flush(); + } + } + + util::scoped_malloc buf_; + double_conversion::StringBuilder builder_; + double_conversion::DoubleToStringConverter convert_; + int fd_; + const std::size_t buffer_size_; +}; + +} // namespace + +#endif diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/util/file_piece.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/file_piece.hh new file mode 100644 index 0000000000000000000000000000000000000000..5495ddccceea10cad3cb57922f440e0140202b74 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/file_piece.hh @@ -0,0 +1,158 @@ +#ifndef UTIL_FILE_PIECE_H +#define UTIL_FILE_PIECE_H + +#include "util/ersatz_progress.hh" +#include "util/exception.hh" +#include "util/file.hh" +#include "util/mmap.hh" +#include "util/read_compressed.hh" +#include "util/string_piece.hh" + +#include +#include +#include + +#include +#include + +namespace util { + +class ParseNumberException : public Exception { + public: + explicit ParseNumberException(StringPiece value) throw(); + ~ParseNumberException() throw() {} +}; + +extern const bool kSpaces[256]; + +// Memory backing the returned StringPiece may vanish on the next call. +class FilePiece { + public: + // 1 MB default. + explicit FilePiece(const char *file, std::ostream *show_progress = NULL, std::size_t min_buffer = 1048576); + // Takes ownership of fd. name is used for messages. + explicit FilePiece(int fd, const char *name = NULL, std::ostream *show_progress = NULL, std::size_t min_buffer = 1048576); + + /* Read from an istream. Don't use this if you can avoid it. Raw fd IO is + * much faster. But sometimes you just have an istream like Boost's HTTP + * server and want to parse it the same way. + * name is just used for messages and FileName(). + */ + explicit FilePiece(std::istream &stream, const char *name = NULL, std::size_t min_buffer = 1048576); + + ~FilePiece(); + + char get() { + if (position_ == position_end_) { + Shift(); + if (at_end_) throw EndOfFileException(); + } + return *(position_++); + } + + // Leaves the delimiter, if any, to be returned by get(). Delimiters defined by isspace(). + StringPiece ReadDelimited(const bool *delim = kSpaces) { + SkipSpaces(delim); + return Consume(FindDelimiterOrEOF(delim)); + } + + // Read word until the line or file ends. + bool ReadWordSameLine(StringPiece &to, const bool *delim = kSpaces) { + assert(delim[static_cast('\n')]); + // Skip non-enter spaces. + for (; ; ++position_) { + if (position_ == position_end_) { + try { + Shift(); + } catch (const util::EndOfFileException &e) { return false; } + // And break out at end of file. + if (position_ == position_end_) return false; + } + if (!delim[static_cast(*position_)]) break; + if (*position_ == '\n') return false; + } + // We can't be at the end of file because there's at least one character open. + to = Consume(FindDelimiterOrEOF(delim)); + return true; + } + + // Unlike ReadDelimited, this includes leading spaces and consumes the delimiter. + // It is similar to getline in that way. + StringPiece ReadLine(char delim = '\n'); + + // Doesn't throw EndOfFileException, just returns false. + bool ReadLineOrEOF(StringPiece &to, char delim = '\n'); + + float ReadFloat(); + double ReadDouble(); + long int ReadLong(); + unsigned long int ReadULong(); + + // Skip spaces defined by isspace. + void SkipSpaces(const bool *delim = kSpaces) { + assert(position_ <= position_end_); + for (; ; ++position_) { + if (position_ == position_end_) { + Shift(); + // And break out at end of file. + if (position_ == position_end_) return; + } + assert(position_ < position_end_); + if (!delim[static_cast(*position_)]) return; + } + } + + uint64_t Offset() const { + return position_ - data_.begin() + mapped_offset_; + } + + const std::string &FileName() const { return file_name_; } + + private: + void InitializeNoRead(const char *name, std::size_t min_buffer); + // Calls InitializeNoRead, so don't call both. + void Initialize(const char *name, std::ostream *show_progress, std::size_t min_buffer); + + template T ReadNumber(); + + StringPiece Consume(const char *to) { + assert(to >= position_); + StringPiece ret(position_, to - position_); + position_ = to; + return ret; + } + + const char *FindDelimiterOrEOF(const bool *delim = kSpaces); + + void Shift(); + // Backends to Shift(). + void MMapShift(uint64_t desired_begin); + + void TransitionToRead(); + void ReadShift(); + + const char *position_, *last_space_, *position_end_; + + scoped_fd file_; + const uint64_t total_size_; + const uint64_t page_; + + std::size_t default_map_size_; + uint64_t mapped_offset_; + + // Order matters: file_ should always be destroyed after this. + scoped_memory data_; + + bool at_end_; + bool fallback_to_read_; + + ErsatzProgress progress_; + + std::string file_name_; + + ReadCompressed fell_back_; +}; + +} // namespace util + +#endif // UTIL_FILE_PIECE_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/util/have.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/have.hh new file mode 100644 index 0000000000000000000000000000000000000000..dc3f63303ca7f061617c1299a2e2885f1f70c281 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/have.hh @@ -0,0 +1,13 @@ +/* Optional packages. You might want to integrate this with your build system e.g. config.h from ./configure. */ +#ifndef UTIL_HAVE_H +#define UTIL_HAVE_H + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#ifndef HAVE_ICU +//#define HAVE_ICU +#endif + +#endif // UTIL_HAVE_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/util/parallel_read.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/parallel_read.hh new file mode 100644 index 0000000000000000000000000000000000000000..1e96e79035a93a4a669a9d7d7bd14b146e0cb96a --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/parallel_read.hh @@ -0,0 +1,16 @@ +#ifndef UTIL_PARALLEL_READ__ +#define UTIL_PARALLEL_READ__ + +/* Read pieces of a file in parallel. This has a very specific use case: + * reading files from Lustre is CPU bound so multiple threads actually + * increases throughput. Speed matters when an LM takes a terabyte. + */ + +#include +#include + +namespace util { +void ParallelRead(int fd, void *to, std::size_t amount, uint64_t offset); +} // namespace util + +#endif // UTIL_PARALLEL_READ__ diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/util/pcqueue.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/pcqueue.hh new file mode 100644 index 0000000000000000000000000000000000000000..d2ffee7775e5565af03bf61ecbb7822d0dfdcf1a --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/pcqueue.hh @@ -0,0 +1,156 @@ +#ifndef UTIL_PCQUEUE_H +#define UTIL_PCQUEUE_H + +#include "util/exception.hh" + +#include +#include +#include +#include + +#include + +#ifdef __APPLE__ +#include +#include +#include +#include +#endif // __APPLE__ + +namespace util { + +/* OS X Maverick and Boost interprocess were doing "Function not implemented." + * So this is my own wrapper around the mach kernel APIs. + */ +#ifdef __APPLE__ + +#define MACH_CALL(call) UTIL_THROW_IF(KERN_SUCCESS != (call), Exception, "Mach call failure") + +class Semaphore { + public: + explicit Semaphore(int value) : task_(mach_task_self()) { + MACH_CALL(semaphore_create(task_, &back_, SYNC_POLICY_FIFO, value)); + } + + ~Semaphore() { + MACH_CALL(semaphore_destroy(task_, back_)); + } + + void wait() { + MACH_CALL(semaphore_wait(back_)); + } + + void post() { + MACH_CALL(semaphore_signal(back_)); + } + + private: + semaphore_t back_; + task_t task_; +}; + +inline void WaitSemaphore(Semaphore &semaphore) { + semaphore.wait(); +} + +#else +typedef boost::interprocess::interprocess_semaphore Semaphore; + +inline void WaitSemaphore (Semaphore &on) { + while (1) { + try { + on.wait(); + break; + } + catch (boost::interprocess::interprocess_exception &e) { + if (e.get_native_error() != EINTR) { + throw; + } + } + } +} + +#endif // __APPLE__ + +/** + * Producer consumer queue safe for multiple producers and multiple consumers. + * T must be default constructable and have operator=. + * The value is copied twice for Consume(T &out) or three times for Consume(), + * so larger objects should be passed via pointer. + * Strong exception guarantee if operator= throws. Undefined if semaphores throw. + */ +template class PCQueue : boost::noncopyable { + public: + explicit PCQueue(size_t size) + : empty_(size), used_(0), + storage_(new T[size]), + end_(storage_.get() + size), + produce_at_(storage_.get()), + consume_at_(storage_.get()) {} + + // Add a value to the queue. + void Produce(const T &val) { + WaitSemaphore(empty_); + { + boost::unique_lock produce_lock(produce_at_mutex_); + try { + *produce_at_ = val; + } + catch (...) { + empty_.post(); + throw; + } + if (++produce_at_ == end_) produce_at_ = storage_.get(); + } + used_.post(); + } + + // Consume a value, assigning it to out. + T& Consume(T &out) { + WaitSemaphore(used_); + { + boost::unique_lock consume_lock(consume_at_mutex_); + try { + out = *consume_at_; + } + catch (...) { + used_.post(); + throw; + } + if (++consume_at_ == end_) consume_at_ = storage_.get(); + } + empty_.post(); + return out; + } + + // Convenience version of Consume that copies the value to return. + // The other version is faster. + T Consume() { + T ret; + Consume(ret); + return ret; + } + + private: + // Number of empty spaces in storage_. + Semaphore empty_; + // Number of occupied spaces in storage_. + Semaphore used_; + + boost::scoped_array storage_; + + T *const end_; + + // Index for next write in storage_. + T *produce_at_; + boost::mutex produce_at_mutex_; + + // Index for next read from storage_. + T *consume_at_; + boost::mutex consume_at_mutex_; + +}; + +} // namespace util + +#endif // UTIL_PCQUEUE_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/util/pool.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/pool.hh new file mode 100644 index 0000000000000000000000000000000000000000..89e793d7e1efe523f481de034c128f331f95f6b2 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/pool.hh @@ -0,0 +1,45 @@ +// Very simple pool. It can only allocate memory. And all of the memory it +// allocates must be freed at the same time. + +#ifndef UTIL_POOL_H +#define UTIL_POOL_H + +#include + +#include + +namespace util { + +class Pool { + public: + Pool(); + + ~Pool(); + + void *Allocate(std::size_t size) { + void *ret = current_; + current_ += size; + if (current_ < current_end_) { + return ret; + } else { + return More(size); + } + } + + void FreeAll(); + + private: + void *More(std::size_t size); + + std::vector free_list_; + + uint8_t *current_, *current_end_; + + // no copying + Pool(const Pool &); + Pool &operator=(const Pool &); +}; + +} // namespace util + +#endif // UTIL_POOL_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/util/probing_hash_table.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/probing_hash_table.hh new file mode 100644 index 0000000000000000000000000000000000000000..ea228dd9ae4a3f10ec2c7ec17341943f612755b5 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/probing_hash_table.hh @@ -0,0 +1,331 @@ +#ifndef UTIL_PROBING_HASH_TABLE_H +#define UTIL_PROBING_HASH_TABLE_H + +#include "util/exception.hh" +#include "util/scoped.hh" + +#include +#include +#include +#include + +#include +#include + +namespace util { + +/* Thrown when table grows too large */ +class ProbingSizeException : public Exception { + public: + ProbingSizeException() throw() {} + ~ProbingSizeException() throw() {} +}; + +// std::identity is an SGI extension :-( +struct IdentityHash { + template T operator()(T arg) const { return arg; } +}; + +template class AutoProbing; + +/* Non-standard hash table + * Buckets must be set at the beginning and must be greater than maximum number + * of elements, else it throws ProbingSizeException. + * Memory management and initialization is externalized to make it easier to + * serialize these to disk and load them quickly. + * Uses linear probing to find value. + * Only insert and lookup operations. + */ +template > class ProbingHashTable { + public: + typedef EntryT Entry; + typedef typename Entry::Key Key; + typedef const Entry *ConstIterator; + typedef Entry *MutableIterator; + typedef HashT Hash; + typedef EqualT Equal; + + static uint64_t Size(uint64_t entries, float multiplier) { + uint64_t buckets = std::max(entries + 1, static_cast(multiplier * static_cast(entries))); + return buckets * sizeof(Entry); + } + + // Must be assigned to later. + ProbingHashTable() : entries_(0) +#ifdef DEBUG + , initialized_(false) +#endif + {} + + ProbingHashTable(void *start, std::size_t allocated, const Key &invalid = Key(), const Hash &hash_func = Hash(), const Equal &equal_func = Equal()) + : begin_(reinterpret_cast(start)), + buckets_(allocated / sizeof(Entry)), + end_(begin_ + buckets_), + invalid_(invalid), + hash_(hash_func), + equal_(equal_func), + entries_(0) +#ifdef DEBUG + , initialized_(true) +#endif + {} + + void Relocate(void *new_base) { + begin_ = reinterpret_cast(new_base); + end_ = begin_ + buckets_; + } + + template MutableIterator Insert(const T &t) { +#ifdef DEBUG + assert(initialized_); +#endif + UTIL_THROW_IF(++entries_ >= buckets_, ProbingSizeException, "Hash table with " << buckets_ << " buckets is full."); + return UncheckedInsert(t); + } + + // Return true if the value was found (and not inserted). This is consistent with Find but the opposite if hash_map! + template bool FindOrInsert(const T &t, MutableIterator &out) { +#ifdef DEBUG + assert(initialized_); +#endif + for (MutableIterator i = Ideal(t);;) { + Key got(i->GetKey()); + if (equal_(got, t.GetKey())) { out = i; return true; } + if (equal_(got, invalid_)) { + UTIL_THROW_IF(++entries_ >= buckets_, ProbingSizeException, "Hash table with " << buckets_ << " buckets is full."); + *i = t; + out = i; + return false; + } + if (++i == end_) i = begin_; + } + } + + void FinishedInserting() {} + + // Don't change anything related to GetKey, + template bool UnsafeMutableFind(const Key key, MutableIterator &out) { +#ifdef DEBUG + assert(initialized_); +#endif + for (MutableIterator i(begin_ + (hash_(key) % buckets_));;) { + Key got(i->GetKey()); + if (equal_(got, key)) { out = i; return true; } + if (equal_(got, invalid_)) return false; + if (++i == end_) i = begin_; + } + } + + // Like UnsafeMutableFind, but the key must be there. + template MutableIterator UnsafeMutableMustFind(const Key key) { + for (MutableIterator i(begin_ + (hash_(key) % buckets_));;) { + Key got(i->GetKey()); + if (equal_(got, key)) { return i; } + assert(!equal_(got, invalid_)); + if (++i == end_) i = begin_; + } + } + + + template bool Find(const Key key, ConstIterator &out) const { +#ifdef DEBUG + assert(initialized_); +#endif + for (ConstIterator i(begin_ + (hash_(key) % buckets_));;) { + Key got(i->GetKey()); + if (equal_(got, key)) { out = i; return true; } + if (equal_(got, invalid_)) return false; + if (++i == end_) i = begin_; + } + } + + // Like Find but we're sure it must be there. + template ConstIterator MustFind(const Key key) const { + for (ConstIterator i(begin_ + (hash_(key) % buckets_));;) { + Key got(i->GetKey()); + if (equal_(got, key)) { return i; } + assert(!equal_(got, invalid_)); + if (++i == end_) i = begin_; + } + } + + void Clear() { + Entry invalid; + invalid.SetKey(invalid_); + std::fill(begin_, end_, invalid); + entries_ = 0; + } + + // Return number of entries assuming no serialization went on. + std::size_t SizeNoSerialization() const { + return entries_; + } + + // Return memory size expected by Double. + std::size_t DoubleTo() const { + return buckets_ * 2 * sizeof(Entry); + } + + // Inform the table that it has double the amount of memory. + // Pass clear_new = false if you are sure the new memory is initialized + // properly (to invalid_) i.e. by mremap. + void Double(void *new_base, bool clear_new = true) { + begin_ = static_cast(new_base); + MutableIterator old_end = begin_ + buckets_; + buckets_ *= 2; + end_ = begin_ + buckets_; + if (clear_new) { + Entry invalid; + invalid.SetKey(invalid_); + std::fill(old_end, end_, invalid); + } + std::vector rolled_over; + // Move roll-over entries to a buffer because they might not roll over anymore. This should be small. + for (MutableIterator i = begin_; i != old_end && !equal_(i->GetKey(), invalid_); ++i) { + rolled_over.push_back(*i); + i->SetKey(invalid_); + } + /* Re-insert everything. Entries might go backwards to take over a + * recently opened gap, stay, move to new territory, or wrap around. If + * an entry wraps around, it might go to a pointer greater than i (which + * can happen at the beginning) and it will be revisited to possibly fill + * in a gap created later. + */ + Entry temp; + for (MutableIterator i = begin_; i != old_end; ++i) { + if (!equal_(i->GetKey(), invalid_)) { + temp = *i; + i->SetKey(invalid_); + UncheckedInsert(temp); + } + } + // Put the roll-over entries back in. + for (typename std::vector::const_iterator i(rolled_over.begin()); i != rolled_over.end(); ++i) { + UncheckedInsert(*i); + } + } + + // Mostly for tests, check consistency of every entry. + void CheckConsistency() { + MutableIterator last; + for (last = end_ - 1; last >= begin_ && !equal_(last->GetKey(), invalid_); --last) {} + UTIL_THROW_IF(last == begin_, ProbingSizeException, "Completely full"); + MutableIterator i; + // Beginning can be wrap-arounds. + for (i = begin_; !equal_(i->GetKey(), invalid_); ++i) { + MutableIterator ideal = Ideal(*i); + UTIL_THROW_IF(ideal > i && ideal <= last, Exception, "Inconsistency at position " << (i - begin_) << " should be at " << (ideal - begin_)); + } + MutableIterator pre_gap = i; + for (; i != end_; ++i) { + if (equal_(i->GetKey(), invalid_)) { + pre_gap = i; + continue; + } + MutableIterator ideal = Ideal(*i); + UTIL_THROW_IF(ideal > i || ideal <= pre_gap, Exception, "Inconsistency at position " << (i - begin_) << " with ideal " << (ideal - begin_)); + } + } + + private: + friend class AutoProbing; + + template MutableIterator Ideal(const T &t) { + return begin_ + (hash_(t.GetKey()) % buckets_); + } + + template MutableIterator UncheckedInsert(const T &t) { + for (MutableIterator i(Ideal(t));;) { + if (equal_(i->GetKey(), invalid_)) { *i = t; return i; } + if (++i == end_) { i = begin_; } + } + } + + MutableIterator begin_; + std::size_t buckets_; + MutableIterator end_; + Key invalid_; + Hash hash_; + Equal equal_; + std::size_t entries_; +#ifdef DEBUG + bool initialized_; +#endif +}; + +// Resizable linear probing hash table. This owns the memory. +template > class AutoProbing { + private: + typedef ProbingHashTable Backend; + public: + static std::size_t MemUsage(std::size_t size, float multiplier = 1.5) { + return Backend::Size(size, multiplier); + } + + typedef EntryT Entry; + typedef typename Entry::Key Key; + typedef const Entry *ConstIterator; + typedef Entry *MutableIterator; + typedef HashT Hash; + typedef EqualT Equal; + + AutoProbing(std::size_t initial_size = 10, const Key &invalid = Key(), const Hash &hash_func = Hash(), const Equal &equal_func = Equal()) : + allocated_(Backend::Size(initial_size, 1.5)), mem_(util::MallocOrThrow(allocated_)), backend_(mem_.get(), allocated_, invalid, hash_func, equal_func) { + threshold_ = initial_size * 1.2; + Clear(); + } + + // Assumes that the key is unique. Multiple insertions won't cause a failure, just inconsistent lookup. + template MutableIterator Insert(const T &t) { + DoubleIfNeeded(); + return backend_.UncheckedInsert(t); + } + + template bool FindOrInsert(const T &t, MutableIterator &out) { + DoubleIfNeeded(); + return backend_.FindOrInsert(t, out); + } + + template bool UnsafeMutableFind(const Key key, MutableIterator &out) { + return backend_.UnsafeMutableFind(key, out); + } + + template MutableIterator UnsafeMutableMustFind(const Key key) { + return backend_.UnsafeMutableMustFind(key); + } + + template bool Find(const Key key, ConstIterator &out) const { + return backend_.Find(key, out); + } + + template ConstIterator MustFind(const Key key) const { + return backend_.MustFind(key); + } + + std::size_t Size() const { + return backend_.SizeNoSerialization(); + } + + void Clear() { + backend_.Clear(); + } + + private: + void DoubleIfNeeded() { + if (Size() < threshold_) + return; + mem_.call_realloc(backend_.DoubleTo()); + allocated_ = backend_.DoubleTo(); + backend_.Double(mem_.get()); + threshold_ *= 2; + } + + std::size_t allocated_; + util::scoped_malloc mem_; + Backend backend_; + std::size_t threshold_; +}; + +} // namespace util + +#endif // UTIL_PROBING_HASH_TABLE_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/util/proxy_iterator.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/proxy_iterator.hh new file mode 100644 index 0000000000000000000000000000000000000000..8aa697bf145ebb068cf9f67e5c5e948bc1268f5b --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/proxy_iterator.hh @@ -0,0 +1,101 @@ +#ifndef UTIL_PROXY_ITERATOR_H +#define UTIL_PROXY_ITERATOR_H + +#include +#include + +/* This is a RandomAccessIterator that uses a proxy to access the underlying + * data. Useful for packing data at bit offsets but still using STL + * algorithms. + * + * Normally I would use boost::iterator_facade but some people are too lazy to + * install boost and still want to use my language model. It's amazing how + * many operators an iterator has. + * + * The Proxy needs to provide: + * class InnerIterator; + * InnerIterator &Inner(); + * const InnerIterator &Inner() const; + * + * InnerIterator has to implement: + * operator==(InnerIterator) + * operator<(InnerIterator) + * operator+=(std::ptrdiff_t) + * operator-(InnerIterator) + * and of course whatever Proxy needs to dereference it. + * + * It's also a good idea to specialize std::swap for Proxy. + */ + +namespace util { +template class ProxyIterator { + private: + // Self. + typedef ProxyIterator S; + typedef typename Proxy::InnerIterator InnerIterator; + + public: + typedef std::random_access_iterator_tag iterator_category; + typedef typename Proxy::value_type value_type; + typedef std::ptrdiff_t difference_type; + typedef Proxy reference; + typedef ProxyIterator * pointer; + + ProxyIterator() {} + + // For cast from non const to const. + template ProxyIterator(const ProxyIterator &in) : p_(*in) {} + explicit ProxyIterator(const Proxy &p) : p_(p) {} + +/* // p_'s swap does value swapping, but here we want iterator swapping + friend inline void swap(ProxyIterator &first, ProxyIterator &second) { + swap(first.I(), second.I()); + }*/ + + // p_'s operator= does value copying, but here we want iterator copying. + S &operator=(const S &other) { + I() = other.I(); + return *this; + } + + bool operator==(const S &other) const { return I() == other.I(); } + bool operator!=(const S &other) const { return !(*this == other); } + bool operator<(const S &other) const { return I() < other.I(); } + bool operator>(const S &other) const { return other < *this; } + bool operator<=(const S &other) const { return !(*this > other); } + bool operator>=(const S &other) const { return !(*this < other); } + + S &operator++() { return *this += 1; } + S operator++(int) { S ret(*this); ++*this; return ret; } + S &operator+=(std::ptrdiff_t amount) { I() += amount; return *this; } + S operator+(std::ptrdiff_t amount) const { S ret(*this); ret += amount; return ret; } + + S &operator--() { return *this -= 1; } + S operator--(int) { S ret(*this); --*this; return ret; } + S &operator-=(std::ptrdiff_t amount) { I() += (-amount); return *this; } + S operator-(std::ptrdiff_t amount) const { S ret(*this); ret -= amount; return ret; } + + std::ptrdiff_t operator-(const S &other) const { return I() - other.I(); } + + Proxy operator*() { return p_; } + const Proxy operator*() const { return p_; } + Proxy *operator->() { return &p_; } + const Proxy *operator->() const { return &p_; } + Proxy operator[](std::ptrdiff_t amount) const { return *(*this + amount); } + + const InnerIterator &Inner() { return p_.Inner(); } + + private: + InnerIterator &I() { return p_.Inner(); } + const InnerIterator &I() const { return p_.Inner(); } + + Proxy p_; +}; + +template ProxyIterator operator+(std::ptrdiff_t amount, const ProxyIterator &it) { + return it + amount; +} + +} // namespace util + +#endif // UTIL_PROXY_ITERATOR_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/util/scoped.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/scoped.hh new file mode 100644 index 0000000000000000000000000000000000000000..60c36c36a95a41e821ecbbbfc37aa77c5aa795fc --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/scoped.hh @@ -0,0 +1,109 @@ +#ifndef UTIL_SCOPED_H +#define UTIL_SCOPED_H +/* Other scoped objects in the style of scoped_ptr. */ + +#include "util/exception.hh" +#include +#include + +namespace util { + +class MallocException : public ErrnoException { + public: + explicit MallocException(std::size_t requested) throw(); + ~MallocException() throw(); +}; + +void *MallocOrThrow(std::size_t requested); +void *CallocOrThrow(std::size_t requested); + +/* Unfortunately, defining the operator* for void * makes the compiler complain. + * So scoped is specialized to void. This includes the functionality common to + * both, namely everything except reference. + */ +template class scoped_base { + public: + explicit scoped_base(T *p = NULL) : p_(p) {} + + ~scoped_base() { Closer::Close(p_); } + + void reset(T *p = NULL) { + scoped_base other(p_); + p_ = p; + } + + T *get() { return p_; } + const T *get() const { return p_; } + + T *operator->() { return p_; } + const T *operator->() const { return p_; } + + T *release() { + T *ret = p_; + p_ = NULL; + return ret; + } + + protected: + T *p_; + + private: + scoped_base(const scoped_base &); + scoped_base &operator=(const scoped_base &); +}; + +template class scoped : public scoped_base { + public: + explicit scoped(T *p = NULL) : scoped_base(p) {} + + T &operator*() { return *scoped_base::p_; } + const T&operator*() const { return *scoped_base::p_; } +}; + +template class scoped : public scoped_base { + public: + explicit scoped(void *p = NULL) : scoped_base(p) {} +}; + +/* Closer for c functions like std::free and cmph cleanup functions */ +template struct scoped_c_forward { + static void Close(T *p) { clean(p); } +}; +// Call a C function to delete stuff +template class scoped_c : public scoped > { + public: + explicit scoped_c(T *p = NULL) : scoped >(p) {} +}; + +class scoped_malloc : public scoped_c { + public: + explicit scoped_malloc(void *p = NULL) : scoped_c(p) {} + + void call_realloc(std::size_t to); +}; + +/* scoped_array using delete[] */ +struct scoped_delete_array_forward { + template static void Close(T *p) { delete [] p; } +}; +// Hat tip to boost. +template class scoped_array : public scoped { + public: + explicit scoped_array(T *p = NULL) : scoped(p) {} + + T &operator[](std::size_t idx) { return scoped::p_[idx]; } + const T &operator[](std::size_t idx) const { return scoped::p_[idx]; } +}; + +/* scoped_ptr using delete. If only there were a template typedef. */ +struct scoped_delete_forward { + template static void Close(T *p) { delete p; } +}; +template class scoped_ptr : public scoped { + public: + explicit scoped_ptr(T *p = NULL) : scoped(p) {} +}; + +} // namespace util + +#endif // UTIL_SCOPED_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/util/sized_iterator.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/sized_iterator.hh new file mode 100644 index 0000000000000000000000000000000000000000..75f6886f77e29628942ecc9da519b763c7d6d2d2 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/sized_iterator.hh @@ -0,0 +1,120 @@ +#ifndef UTIL_SIZED_ITERATOR_H +#define UTIL_SIZED_ITERATOR_H + +#include "util/proxy_iterator.hh" + +#include +#include +#include + +#include +#include + +namespace util { + +class SizedInnerIterator { + public: + SizedInnerIterator() {} + + SizedInnerIterator(void *ptr, std::size_t size) : ptr_(static_cast(ptr)), size_(size) {} + + bool operator==(const SizedInnerIterator &other) const { + return ptr_ == other.ptr_; + } + bool operator<(const SizedInnerIterator &other) const { + return ptr_ < other.ptr_; + } + SizedInnerIterator &operator+=(std::ptrdiff_t amount) { + ptr_ += amount * size_; + return *this; + } + std::ptrdiff_t operator-(const SizedInnerIterator &other) const { + return (ptr_ - other.ptr_) / size_; + } + + const void *Data() const { return ptr_; } + void *Data() { return ptr_; } + std::size_t EntrySize() const { return size_; } + + friend void swap(SizedInnerIterator &first, SizedInnerIterator &second) { + std::swap(first.ptr_, second.ptr_); + std::swap(first.size_, second.size_); + } + + private: + uint8_t *ptr_; + std::size_t size_; +}; + +class SizedProxy { + public: + SizedProxy() {} + + SizedProxy(void *ptr, std::size_t size) : inner_(ptr, size) {} + + operator std::string() const { + return std::string(reinterpret_cast(inner_.Data()), inner_.EntrySize()); + } + + SizedProxy &operator=(const SizedProxy &from) { + memcpy(inner_.Data(), from.inner_.Data(), inner_.EntrySize()); + return *this; + } + + SizedProxy &operator=(const std::string &from) { + memcpy(inner_.Data(), from.data(), inner_.EntrySize()); + return *this; + } + + const void *Data() const { return inner_.Data(); } + void *Data() { return inner_.Data(); } + + friend void swap(SizedProxy first, SizedProxy second) { + std::swap_ranges( + static_cast(first.inner_.Data()), + static_cast(first.inner_.Data()) + first.inner_.EntrySize(), + static_cast(second.inner_.Data())); + } + + private: + friend class util::ProxyIterator; + + typedef std::string value_type; + + typedef SizedInnerIterator InnerIterator; + + InnerIterator &Inner() { return inner_; } + const InnerIterator &Inner() const { return inner_; } + InnerIterator inner_; +}; + +typedef ProxyIterator SizedIterator; + +inline SizedIterator SizedIt(void *ptr, std::size_t size) { return SizedIterator(SizedProxy(ptr, size)); } + +// Useful wrapper for a comparison function i.e. sort. +template class SizedCompare : public std::binary_function { + public: + explicit SizedCompare(const Delegate &delegate = Delegate()) : delegate_(delegate) {} + + bool operator()(const Proxy &first, const Proxy &second) const { + return delegate_(first.Data(), second.Data()); + } + bool operator()(const Proxy &first, const std::string &second) const { + return delegate_(first.Data(), second.data()); + } + bool operator()(const std::string &first, const Proxy &second) const { + return delegate_(first.data(), second.Data()); + } + bool operator()(const std::string &first, const std::string &second) const { + return delegate_(first.data(), second.data()); + } + + const Delegate &GetDelegate() const { return delegate_; } + + private: + const Delegate delegate_; +}; + +} // namespace util +#endif // UTIL_SIZED_ITERATOR_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/util/sorted_uniform.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/sorted_uniform.hh new file mode 100644 index 0000000000000000000000000000000000000000..a3f6d021dc99f364a7c8e9f176cf6decee2b955a --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/sorted_uniform.hh @@ -0,0 +1,106 @@ +#ifndef UTIL_SORTED_UNIFORM_H +#define UTIL_SORTED_UNIFORM_H + +#include +#include + +#include +#include + +namespace util { + +template class IdentityAccessor { + public: + typedef T Key; + T operator()(const T *in) const { return *in; } +}; + +struct Pivot64 { + static inline std::size_t Calc(uint64_t off, uint64_t range, std::size_t width) { + std::size_t ret = static_cast(static_cast(off) / static_cast(range) * static_cast(width)); + // Cap for floating point rounding + return (ret < width) ? ret : width - 1; + } +}; + +// Use when off * width is <2^64. This is guaranteed when each of them is actually a 32-bit value. +struct Pivot32 { + static inline std::size_t Calc(uint64_t off, uint64_t range, uint64_t width) { + return static_cast((off * width) / (range + 1)); + } +}; + +// Usage: PivotSelect::T +template struct PivotSelect; +template <> struct PivotSelect<8> { typedef Pivot64 T; }; +template <> struct PivotSelect<4> { typedef Pivot32 T; }; +template <> struct PivotSelect<2> { typedef Pivot32 T; }; + +/* Binary search. */ +template bool BinaryFind( + const Accessor &accessor, + Iterator begin, + Iterator end, + const typename Accessor::Key key, Iterator &out) { + while (end > begin) { + Iterator pivot(begin + (end - begin) / 2); + typename Accessor::Key mid(accessor(pivot)); + if (mid < key) { + begin = pivot + 1; + } else if (mid > key) { + end = pivot; + } else { + out = pivot; + return true; + } + } + return false; +} + +// Search the range [before_it + 1, after_it - 1] for key. +// Preconditions: +// before_v <= key <= after_v +// before_v <= all values in the range [before_it + 1, after_it - 1] <= after_v +// range is sorted. +template bool BoundedSortedUniformFind( + const Accessor &accessor, + Iterator before_it, typename Accessor::Key before_v, + Iterator after_it, typename Accessor::Key after_v, + const typename Accessor::Key key, Iterator &out) { + while (after_it - before_it > 1) { + Iterator pivot(before_it + (1 + Pivot::Calc(key - before_v, after_v - before_v, after_it - before_it - 1))); + typename Accessor::Key mid(accessor(pivot)); + if (mid < key) { + before_it = pivot; + before_v = mid; + } else if (mid > key) { + after_it = pivot; + after_v = mid; + } else { + out = pivot; + return true; + } + } + return false; +} + +template bool SortedUniformFind(const Accessor &accessor, Iterator begin, Iterator end, const typename Accessor::Key key, Iterator &out) { + if (begin == end) return false; + typename Accessor::Key below(accessor(begin)); + if (key <= below) { + if (key == below) { out = begin; return true; } + return false; + } + // Make the range [begin, end]. + --end; + typename Accessor::Key above(accessor(end)); + if (key >= above) { + if (key == above) { out = end; return true; } + return false; + } + return BoundedSortedUniformFind(accessor, begin, below, end, above, key, out); +} + +} // namespace util + +#endif // UTIL_SORTED_UNIFORM_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/util/stream/block.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/stream/block.hh new file mode 100644 index 0000000000000000000000000000000000000000..aa7e28bb10498b6162f14ba8c8f947af4f67dfcf --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/stream/block.hh @@ -0,0 +1,92 @@ +#ifndef UTIL_STREAM_BLOCK_H +#define UTIL_STREAM_BLOCK_H + +#include +#include + +namespace util { +namespace stream { + +/** + * Encapsulates a block of memory. + */ +class Block { + public: + + /** + * Constructs an empty block. + */ + Block() : mem_(NULL), valid_size_(0) {} + + /** + * Constructs a block that encapsulates a segment of memory. + * + * @param[in] mem The segment of memory to encapsulate + * @param[in] size The size of the memory segment in bytes + */ + Block(void *mem, std::size_t size) : mem_(mem), valid_size_(size) {} + + /** + * Set the number of bytes in this block that should be interpreted as valid. + * + * @param[in] to Number of bytes + */ + void SetValidSize(std::size_t to) { valid_size_ = to; } + + /** + * Gets the number of bytes in this block that should be interpreted as valid. + * This is important because read might fill in less than Allocated at EOF. + */ + std::size_t ValidSize() const { return valid_size_; } + + /** Gets a void pointer to the memory underlying this block. */ + void *Get() { return mem_; } + + /** Gets a const void pointer to the memory underlying this block. */ + const void *Get() const { return mem_; } + + + /** + * Gets a const void pointer to the end of the valid section of memory + * encapsulated by this block. + */ + const void *ValidEnd() const { + return reinterpret_cast(mem_) + valid_size_; + } + + /** + * Returns true if this block encapsulates a valid (non-NULL) block of memory. + * + * This method is a user-defined implicit conversion function to boolean; + * among other things, this method enables bare instances of this class + * to be used as the condition of an if statement. + */ + operator bool() const { return mem_ != NULL; } + + /** + * Returns true if this block is empty. + * + * In other words, if Get()==NULL, this method will return true. + */ + bool operator!() const { return mem_ == NULL; } + + private: + friend class Link; + + /** + * Points this block's memory at NULL. + * + * This class defines poison as a block whose memory pointer is NULL. + */ + void SetToPoison() { + mem_ = NULL; + } + + void *mem_; + std::size_t valid_size_; +}; + +} // namespace stream +} // namespace util + +#endif // UTIL_STREAM_BLOCK_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/util/stream/chain.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/stream/chain.hh new file mode 100644 index 0000000000000000000000000000000000000000..5086508607bdc02132f2db4ab244797fd0857a3f --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/stream/chain.hh @@ -0,0 +1,339 @@ +#ifndef UTIL_STREAM_CHAIN_H +#define UTIL_STREAM_CHAIN_H + +#include "util/stream/block.hh" +#include "util/stream/config.hh" +#include "util/stream/multi_progress.hh" +#include "util/scoped.hh" + +#include +#include + +#include + +#include + +namespace util { +template class PCQueue; +namespace stream { + +class ChainConfigException : public Exception { + public: + ChainConfigException() throw(); + ~ChainConfigException() throw(); +}; + +class Chain; + +/** + * Encapsulates a @ref PCQueue "producer queue" and a @ref PCQueue "consumer queue" within a @ref Chain "chain". + * + * Specifies position in chain for Link constructor. + */ +class ChainPosition { + public: + const Chain &GetChain() const { return *chain_; } + private: + friend class Chain; + friend class Link; + ChainPosition(PCQueue &in, PCQueue &out, Chain *chain, MultiProgress &progress) + : in_(&in), out_(&out), chain_(chain), progress_(progress.Add()) {} + + PCQueue *in_, *out_; + + Chain *chain_; + + WorkerProgress progress_; +}; + + +/** + * Encapsulates a worker thread processing data at a given position in the chain. + * + * Each instance of this class owns one boost thread in which the worker is Run(). + */ +class Thread { + public: + + /** + * Constructs a new Thread in which the provided Worker is Run(). + * + * Position is usually ChainPosition but if there are multiple streams involved, this can be ChainPositions. + * + * After a call to this constructor, the provided worker will be running within a boost thread owned by the newly constructed Thread object. + */ + template Thread(const Position &position, const Worker &worker) + : thread_(boost::ref(*this), position, worker) {} + + ~Thread(); + + /** + * Launches the provided worker in this object's boost thread. + * + * This method is called automatically by this class's @ref Thread() "constructor". + */ + template void operator()(const Position &position, Worker &worker) { + try { + worker.Run(position); + } catch (const std::exception &e) { + UnhandledException(e); + } + } + + private: + void UnhandledException(const std::exception &e); + + boost::thread thread_; +}; + +/** + * This resets blocks to full valid size. Used to close the loop in Chain by recycling blocks. + */ +class Recycler { + public: + /** + * Resets the blocks in the chain such that the blocks' respective valid sizes match the chain's block size. + * + * @see Block::SetValidSize() + * @see Chain::BlockSize() + */ + void Run(const ChainPosition &position); +}; + +extern const Recycler kRecycle; +class WriteAndRecycle; +class PWriteAndRecycle; + +/** + * Represents a sequence of workers, through which @ref Block "blocks" can pass. + */ +class Chain { + private: + template struct CheckForRun { + typedef Chain type; + }; + + public: + + /** + * Constructs a configured Chain. + * + * @param config Specifies how to configure the Chain. + */ + explicit Chain(const ChainConfig &config); + + /** + * Destructs a Chain. + * + * This method waits for the chain's threads to complete, + * and frees the memory held by this chain. + */ + ~Chain(); + + void ActivateProgress() { + assert(!Running()); + progress_.Activate(); + } + + void SetProgressTarget(uint64_t target) { + progress_.SetTarget(target); + } + + /** + * Gets the number of bytes in each record of a Block. + * + * @see ChainConfig::entry_size + */ + std::size_t EntrySize() const { + return config_.entry_size; + } + + /** + * Gets the inital @ref Block::ValidSize "valid size" for @ref Block "blocks" in this chain. + * + * @see Block::ValidSize + */ + std::size_t BlockSize() const { + return block_size_; + } + + /** Two ways to add to the chain: Add() or operator>>. */ + ChainPosition Add(); + + /** + * Adds a new worker to this chain, + * and runs that worker in a new Thread owned by this chain. + * + * The worker must have a Run method that accepts a position argument. + * + * @see Thread::operator()() + */ + template typename CheckForRun::type &operator>>(const Worker &worker) { + assert(!complete_called_); + threads_.push_back(new Thread(Add(), worker)); + return *this; + } + + /** + * Adds a new worker to this chain (but avoids copying that worker), + * and runs that worker in a new Thread owned by this chain. + * + * The worker must have a Run method that accepts a position argument. + * + * @see Thread::operator()() + */ + template typename CheckForRun::type &operator>>(const boost::reference_wrapper &worker) { + assert(!complete_called_); + threads_.push_back(new Thread(Add(), worker)); + return *this; + } + + // Note that Link and Stream also define operator>> outside this class. + + // To complete the loop, call CompleteLoop(), >> kRecycle, or the destructor. + void CompleteLoop() { + threads_.push_back(new Thread(Complete(), kRecycle)); + } + + /** + * Adds a Recycler worker to this chain, + * and runs that worker in a new Thread owned by this chain. + */ + Chain &operator>>(const Recycler &) { + CompleteLoop(); + return *this; + } + + /** + * Adds a WriteAndRecycle worker to this chain, + * and runs that worker in a new Thread owned by this chain. + */ + Chain &operator>>(const WriteAndRecycle &writer); + Chain &operator>>(const PWriteAndRecycle &writer); + + // Chains are reusable. Call Wait to wait for everything to finish and free memory. + void Wait(bool release_memory = true); + + // Waits for the current chain to complete (if any) then starts again. + void Start(); + + bool Running() const { return !queues_.empty(); } + + private: + ChainPosition Complete(); + + ChainConfig config_; + + std::size_t block_size_; + + scoped_malloc memory_; + + boost::ptr_vector > queues_; + + bool complete_called_; + + boost::ptr_vector threads_; + + MultiProgress progress_; +}; + +// Create the link in the worker thread using the position token. +/** + * Represents a C++ style iterator over @ref Block "blocks". + */ +class Link { + public: + + // Either default construct and Init or just construct all at once. + + /** + * Constructs an @ref Init "initialized" link. + * + * @see Init + */ + explicit Link(const ChainPosition &position); + + /** + * Constructs a link that must subsequently be @ref Init "initialized". + * + * @see Init + */ + Link(); + + /** + * Initializes the link with the input @ref PCQueue "consumer queue" and output @ref PCQueue "producer queue" at a given @ref ChainPosition "position" in the @ref Chain "chain". + * + * @see Link() + */ + void Init(const ChainPosition &position); + + /** + * Destructs the link object. + * + * If necessary, this method will pass a poison block + * to this link's output @ref PCQueue "producer queue". + * + * @see Block::SetToPoison() + */ + ~Link(); + + /** + * Gets a reference to the @ref Block "block" at this link. + */ + Block &operator*() { return current_; } + + /** + * Gets a const reference to the @ref Block "block" at this link. + */ + const Block &operator*() const { return current_; } + + /** + * Gets a pointer to the @ref Block "block" at this link. + */ + Block *operator->() { return ¤t_; } + + /** + * Gets a const pointer to the @ref Block "block" at this link. + */ + const Block *operator->() const { return ¤t_; } + + /** + * Gets the link at the next @ref ChainPosition "position" in the @ref Chain "chain". + */ + Link &operator++(); + + /** + * Returns true if the @ref Block "block" at this link encapsulates a valid (non-NULL) block of memory. + * + * This method is a user-defined implicit conversion function to boolean; + * among other things, this method enables bare instances of this class + * to be used as the condition of an if statement. + */ + operator bool() const { return current_; } + + /** + * @ref Block::SetToPoison() "Poisons" the @ref Block "block" at this link, + * and passes this now-poisoned block to this link's output @ref PCQueue "producer queue". + * + * @see Block::SetToPoison() + */ + void Poison(); + + private: + Block current_; + PCQueue *in_, *out_; + + bool poisoned_; + + WorkerProgress progress_; +}; + +inline Chain &operator>>(Chain &chain, Link &link) { + link.Init(chain.Add()); + return chain; +} + +} // namespace stream +} // namespace util + +#endif // UTIL_STREAM_CHAIN_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/util/stream/config.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/stream/config.hh new file mode 100644 index 0000000000000000000000000000000000000000..6bad36bc5a701864b5a7546108184bdd9698ed7f --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/stream/config.hh @@ -0,0 +1,63 @@ +#ifndef UTIL_STREAM_CONFIG_H +#define UTIL_STREAM_CONFIG_H + +#include +#include + +namespace util { namespace stream { + +/** + * Represents how a chain should be configured. + */ +struct ChainConfig { + + /** Constructs an configuration with underspecified (or default) parameters. */ + ChainConfig() {} + + /** + * Constructs a chain configuration object. + * + * @param [in] in_entry_size Number of bytes in each record. + * @param [in] in_block_count Number of blocks in the chain. + * @param [in] in_total_memory Total number of bytes available to the chain. + * This value will be divided amongst the blocks in the chain. + */ + ChainConfig(std::size_t in_entry_size, std::size_t in_block_count, std::size_t in_total_memory) + : entry_size(in_entry_size), block_count(in_block_count), total_memory(in_total_memory) {} + + /** + * Number of bytes in each record. + */ + std::size_t entry_size; + + /** + * Number of blocks in the chain. + */ + std::size_t block_count; + + /** + * Total number of bytes available to the chain. + * This value will be divided amongst the blocks in the chain. + * Chain's constructor will make this a multiple of entry_size. + */ + std::size_t total_memory; +}; + + +/** + * Represents how a sorter should be configured. + */ +struct SortConfig { + + /** Filename prefix where temporary files should be placed. */ + std::string temp_prefix; + + /** Size of each input/output buffer. */ + std::size_t buffer_size; + + /** Total memory to use when running alone. */ + std::size_t total_memory; +}; + +}} // namespaces +#endif // UTIL_STREAM_CONFIG_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/util/stream/io.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/stream/io.hh new file mode 100644 index 0000000000000000000000000000000000000000..8dae2cbff705d7956ff9de77d230d416876bdbe2 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/stream/io.hh @@ -0,0 +1,87 @@ +#ifndef UTIL_STREAM_IO_H +#define UTIL_STREAM_IO_H + +#include "util/exception.hh" +#include "util/file.hh" + +namespace util { +namespace stream { + +class ChainPosition; + +class ReadSizeException : public util::Exception { + public: + ReadSizeException() throw(); + ~ReadSizeException() throw(); +}; + +class Read { + public: + explicit Read(int fd) : file_(fd) {} + void Run(const ChainPosition &position); + private: + int file_; +}; + +// Like read but uses pread so that the file can be accessed from multiple threads. +class PRead { + public: + explicit PRead(int fd, bool take_own = false) : file_(fd), own_(take_own) {} + void Run(const ChainPosition &position); + private: + int file_; + bool own_; +}; + +class Write { + public: + explicit Write(int fd) : file_(fd) {} + void Run(const ChainPosition &position); + private: + int file_; +}; + +// It's a common case that stuff is written and then recycled. So rather than +// spawn another thread to Recycle, this combines the two roles. +class WriteAndRecycle { + public: + explicit WriteAndRecycle(int fd) : file_(fd) {} + void Run(const ChainPosition &position); + private: + int file_; +}; + +class PWriteAndRecycle { + public: + explicit PWriteAndRecycle(int fd) : file_(fd) {} + void Run(const ChainPosition &position); + private: + int file_; +}; + + +// Reuse the same file over and over again to buffer output. +class FileBuffer { + public: + explicit FileBuffer(int fd) : file_(fd) {} + + PWriteAndRecycle Sink() const { + util::SeekOrThrow(file_.get(), 0); + return PWriteAndRecycle(file_.get()); + } + + PRead Source() const { + return PRead(file_.get()); + } + + uint64_t Size() const { + return SizeOrThrow(file_.get()); + } + + private: + scoped_fd file_; +}; + +} // namespace stream +} // namespace util +#endif // UTIL_STREAM_IO_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/util/stream/line_input.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/stream/line_input.hh new file mode 100644 index 0000000000000000000000000000000000000000..a870a6648494775d7c1169e17e2b0a375e984803 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/stream/line_input.hh @@ -0,0 +1,22 @@ +#ifndef UTIL_STREAM_LINE_INPUT_H +#define UTIL_STREAM_LINE_INPUT_H +namespace util {namespace stream { + +class ChainPosition; + +/* Worker that reads input into blocks, ensuring that blocks contain whole + * lines. Assumes that the maximum size of a line is less than the block size + */ +class LineInput { + public: + // Takes ownership upon thread execution. + explicit LineInput(int fd); + + void Run(const ChainPosition &position); + + private: + int fd_; +}; + +}} // namespaces +#endif // UTIL_STREAM_LINE_INPUT_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/util/stream/multi_progress.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/stream/multi_progress.hh new file mode 100644 index 0000000000000000000000000000000000000000..82e698a59c32cdcd839293332781eaf0e2232d17 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/stream/multi_progress.hh @@ -0,0 +1,90 @@ +/* Progress bar suitable for chains of workers */ +#ifndef UTIL_STREAM_MULTI_PROGRESS_H +#define UTIL_STREAM_MULTI_PROGRESS_H + +#include + +#include + +#include + +namespace util { namespace stream { + +class WorkerProgress; + +class MultiProgress { + public: + static const unsigned char kWidth = 100; + + MultiProgress(); + + ~MultiProgress(); + + // Turns on showing (requires SetTarget too). + void Activate(); + + void SetTarget(uint64_t complete); + + WorkerProgress Add(); + + void Finished(); + + private: + friend class WorkerProgress; + void Milestone(WorkerProgress &worker); + + bool active_; + + uint64_t complete_; + + boost::mutex mutex_; + + // \0 at the end. + char display_[kWidth + 1]; + + std::size_t character_handout_; + + MultiProgress(const MultiProgress &); + MultiProgress &operator=(const MultiProgress &); +}; + +class WorkerProgress { + public: + // Default contrutor must be initialized with operator= later. + WorkerProgress() : parent_(NULL) {} + + // Not threadsafe for the same worker by default. + WorkerProgress &operator++() { + if (++current_ >= next_) { + parent_->Milestone(*this); + } + return *this; + } + + WorkerProgress &operator+=(uint64_t amount) { + current_ += amount; + if (current_ >= next_) { + parent_->Milestone(*this); + } + return *this; + } + + private: + friend class MultiProgress; + WorkerProgress(uint64_t next, MultiProgress &parent, char character) + : current_(0), next_(next), parent_(&parent), stone_(0), character_(character) {} + + uint64_t current_, next_; + + MultiProgress *parent_; + + // Previous milestone reached. + unsigned char stone_; + + // Character to display in bar. + char character_; +}; + +}} // namespaces + +#endif // UTIL_STREAM_MULTI_PROGRESS_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/util/stream/multi_stream.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/stream/multi_stream.hh new file mode 100644 index 0000000000000000000000000000000000000000..0ee7fab6fbb9374c2ce919be7cc65965322e49fc --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/stream/multi_stream.hh @@ -0,0 +1,127 @@ +#ifndef UTIL_STREAM_MULTI_STREAM_H +#define UTIL_STREAM_MULTI_STREAM_H + +#include "util/fixed_array.hh" +#include "util/scoped.hh" +#include "util/stream/chain.hh" +#include "util/stream/stream.hh" + +#include +#include + +#include +#include + +namespace util { namespace stream { + +class Chains; + +class ChainPositions : public util::FixedArray { + public: + ChainPositions() {} + + void Init(Chains &chains); + + explicit ChainPositions(Chains &chains) { + Init(chains); + } +}; + +class Chains : public util::FixedArray { + private: + template struct CheckForRun { + typedef Chains type; + }; + + public: + // Must call Init. + Chains() {} + + explicit Chains(std::size_t limit) : util::FixedArray(limit) {} + + template typename CheckForRun::type &operator>>(const Worker &worker) { + threads_.push_back(new util::stream::Thread(ChainPositions(*this), worker)); + return *this; + } + + template typename CheckForRun::type &operator>>(const boost::reference_wrapper &worker) { + threads_.push_back(new util::stream::Thread(ChainPositions(*this), worker)); + return *this; + } + + Chains &operator>>(const util::stream::Recycler &recycler) { + for (util::stream::Chain *i = begin(); i != end(); ++i) + *i >> recycler; + return *this; + } + + void Wait(bool release_memory = true) { + threads_.clear(); + for (util::stream::Chain *i = begin(); i != end(); ++i) { + i->Wait(release_memory); + } + } + + private: + boost::ptr_vector threads_; + + Chains(const Chains &); + void operator=(const Chains &); +}; + +inline void ChainPositions::Init(Chains &chains) { + util::FixedArray::Init(chains.size()); + for (util::stream::Chain *i = chains.begin(); i != chains.end(); ++i) { + // use "placement new" syntax to initalize ChainPosition in an already-allocated memory location + new (end()) util::stream::ChainPosition(i->Add()); Constructed(); + } +} + +inline Chains &operator>>(Chains &chains, ChainPositions &positions) { + positions.Init(chains); + return chains; +} + +template class GenericStreams : public util::FixedArray { + private: + typedef util::FixedArray P; + public: + GenericStreams() {} + + // This puts a dummy T at the beginning (useful to algorithms that need to reference something at the beginning). + void InitWithDummy(const ChainPositions &positions) { + P::Init(positions.size() + 1); + new (P::end()) T(); // use "placement new" syntax to initalize T in an already-allocated memory location + P::Constructed(); + for (const util::stream::ChainPosition *i = positions.begin(); i != positions.end(); ++i) { + P::push_back(*i); + } + } + + // Limit restricts to positions[0,limit) + void Init(const ChainPositions &positions, std::size_t limit) { + P::Init(limit); + for (const util::stream::ChainPosition *i = positions.begin(); i != positions.begin() + limit; ++i) { + P::push_back(*i); + } + } + void Init(const ChainPositions &positions) { + Init(positions, positions.size()); + } + + GenericStreams(const ChainPositions &positions) { + Init(positions); + } +}; + +template inline Chains &operator>>(Chains &chains, GenericStreams &streams) { + ChainPositions positions; + chains >> positions; + streams.Init(positions); + return chains; +} + +typedef GenericStreams Streams; + +}} // namespaces +#endif // UTIL_STREAM_MULTI_STREAM_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/util/stream/sort.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/stream/sort.hh new file mode 100644 index 0000000000000000000000000000000000000000..9082cfddeced4df952b1f0d6aaa2a4eba48ff8e5 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/stream/sort.hh @@ -0,0 +1,550 @@ +/* Usage: + * Sort sorter(temp, compare); + * Chain(config) >> Read(file) >> sorter.Unsorted(); + * Stream stream; + * Chain chain(config) >> sorter.Sorted(internal_config, lazy_config) >> stream; + * + * Note that sorter must outlive any threads that use Unsorted or Sorted. + * + * Combiners take the form: + * bool operator()(void *into, const void *option, const Compare &compare) const + * which returns true iff a combination happened. The sorting algorithm + * guarantees compare(into, option). But it does not guarantee + * compare(option, into). + * Currently, combining is only done in merge steps, not during on-the-fly + * sort. Use a hash table for that. + */ + +#ifndef UTIL_STREAM_SORT_H +#define UTIL_STREAM_SORT_H + +#include "util/stream/chain.hh" +#include "util/stream/config.hh" +#include "util/stream/io.hh" +#include "util/stream/stream.hh" +#include "util/stream/timer.hh" + +#include "util/file.hh" +#include "util/scoped.hh" +#include "util/sized_iterator.hh" + +#include +#include +#include +#include + +namespace util { +namespace stream { + +struct NeverCombine { + template bool operator()(const void *, const void *, const Compare &) const { + return false; + } +}; + +// Manage the offsets of sorted blocks in a file. +class Offsets { + public: + explicit Offsets(int fd) : log_(fd) { + Reset(); + } + + int File() const { return log_; } + + void Append(uint64_t length) { + if (!length) return; + ++block_count_; + if (length == cur_.length) { + ++cur_.run; + return; + } + WriteOrThrow(log_, &cur_, sizeof(Entry)); + cur_.length = length; + cur_.run = 1; + } + + void FinishedAppending() { + WriteOrThrow(log_, &cur_, sizeof(Entry)); + SeekOrThrow(log_, sizeof(Entry)); // Skip 0,0 at beginning. + cur_.run = 0; + if (block_count_) { + ReadOrThrow(log_, &cur_, sizeof(Entry)); + assert(cur_.length); + assert(cur_.run); + } + } + + uint64_t RemainingBlocks() const { return block_count_; } + + uint64_t TotalOffset() const { return output_sum_; } + + uint64_t PeekSize() const { + return cur_.length; + } + + uint64_t NextSize() { + assert(block_count_); + uint64_t ret = cur_.length; + output_sum_ += ret; + + --cur_.run; + --block_count_; + if (!cur_.run && block_count_) { + ReadOrThrow(log_, &cur_, sizeof(Entry)); + assert(cur_.length); + assert(cur_.run); + } + return ret; + } + + void Reset() { + SeekOrThrow(log_, 0); + ResizeOrThrow(log_, 0); + cur_.length = 0; + cur_.run = 0; + block_count_ = 0; + output_sum_ = 0; + } + + private: + int log_; + + struct Entry { + uint64_t length; + uint64_t run; + }; + Entry cur_; + + uint64_t block_count_; + + uint64_t output_sum_; +}; + +// A priority queue of entries backed by file buffers +template class MergeQueue { + public: + MergeQueue(int fd, std::size_t buffer_size, std::size_t entry_size, const Compare &compare) + : queue_(Greater(compare)), in_(fd), buffer_size_(buffer_size), entry_size_(entry_size) {} + + void Push(void *base, uint64_t offset, uint64_t amount) { + queue_.push(Entry(base, in_, offset, amount, buffer_size_)); + } + + const void *Top() const { + return queue_.top().Current(); + } + + void Pop() { + Entry top(queue_.top()); + queue_.pop(); + if (top.Increment(in_, buffer_size_, entry_size_)) + queue_.push(top); + } + + std::size_t Size() const { + return queue_.size(); + } + + bool Empty() const { + return queue_.empty(); + } + + private: + // Priority queue contains these entries. + class Entry { + public: + Entry() {} + + Entry(void *base, int fd, uint64_t offset, uint64_t amount, std::size_t buf_size) { + offset_ = offset; + remaining_ = amount; + buffer_end_ = static_cast(base) + buf_size; + Read(fd, buf_size); + } + + bool Increment(int fd, std::size_t buf_size, std::size_t entry_size) { + current_ += entry_size; + if (current_ != buffer_end_) return true; + return Read(fd, buf_size); + } + + const void *Current() const { return current_; } + + private: + bool Read(int fd, std::size_t buf_size) { + current_ = buffer_end_ - buf_size; + std::size_t amount; + if (static_cast(buf_size) < remaining_) { + amount = buf_size; + } else if (!remaining_) { + return false; + } else { + amount = remaining_; + buffer_end_ = current_ + remaining_; + } + ErsatzPRead(fd, current_, amount, offset_); + offset_ += amount; + assert(current_ <= buffer_end_); + remaining_ -= amount; + return true; + } + + // Buffer + uint8_t *current_, *buffer_end_; + // File + uint64_t remaining_, offset_; + }; + + // Wrapper comparison function for queue entries. + class Greater : public std::binary_function { + public: + explicit Greater(const Compare &compare) : compare_(compare) {} + + bool operator()(const Entry &first, const Entry &second) const { + return compare_(second.Current(), first.Current()); + } + + private: + const Compare compare_; + }; + + typedef std::priority_queue, Greater> Queue; + Queue queue_; + + const int in_; + const std::size_t buffer_size_; + const std::size_t entry_size_; +}; + +/* A worker object that merges. If the number of pieces to merge exceeds the + * arity, it outputs multiple sorted blocks, recording to out_offsets. + * However, users will only every see a single sorted block out output because + * Sort::Sorted insures the arity is higher than the number of pieces before + * returning this. + */ +template class MergingReader { + public: + MergingReader(int in, Offsets *in_offsets, Offsets *out_offsets, std::size_t buffer_size, std::size_t total_memory, const Compare &compare, const Combine &combine) : + compare_(compare), combine_(combine), + in_(in), + in_offsets_(in_offsets), out_offsets_(out_offsets), + buffer_size_(buffer_size), total_memory_(total_memory) {} + + void Run(const ChainPosition &position) { + Run(position, false); + } + + void Run(const ChainPosition &position, bool assert_one) { + // Special case: nothing to read. + if (!in_offsets_->RemainingBlocks()) { + Link l(position); + l.Poison(); + return; + } + // If there's just one entry, just read. + if (in_offsets_->RemainingBlocks() == 1) { + // Sequencing is important. + uint64_t offset = in_offsets_->TotalOffset(); + uint64_t amount = in_offsets_->NextSize(); + ReadSingle(offset, amount, position); + if (out_offsets_) out_offsets_->Append(amount); + return; + } + + Stream str(position); + scoped_malloc buffer(MallocOrThrow(total_memory_)); + uint8_t *const buffer_end = static_cast(buffer.get()) + total_memory_; + + const std::size_t entry_size = position.GetChain().EntrySize(); + + while (in_offsets_->RemainingBlocks()) { + // Use bigger buffers if there's less remaining. + uint64_t per_buffer = static_cast(std::max( + buffer_size_, + static_cast((static_cast(total_memory_) / in_offsets_->RemainingBlocks())))); + per_buffer -= per_buffer % entry_size; + assert(per_buffer); + + // Populate queue. + MergeQueue queue(in_, per_buffer, entry_size, compare_); + for (uint8_t *buf = static_cast(buffer.get()); + in_offsets_->RemainingBlocks() && (buf + std::min(per_buffer, in_offsets_->PeekSize()) <= buffer_end);) { + uint64_t offset = in_offsets_->TotalOffset(); + uint64_t size = in_offsets_->NextSize(); + queue.Push(buf, offset, size); + buf += static_cast(std::min(size, per_buffer)); + } + // This shouldn't happen but it's probably better to die than loop indefinitely. + if (queue.Size() < 2 && in_offsets_->RemainingBlocks()) { + std::cerr << "Bug in sort implementation: not merging at least two stripes." << std::endl; + abort(); + } + if (assert_one && in_offsets_->RemainingBlocks()) { + std::cerr << "Bug in sort implementation: should only be one merge group for lazy sort" << std::endl; + abort(); + } + + uint64_t written = 0; + // Merge including combiner support. + memcpy(str.Get(), queue.Top(), entry_size); + for (queue.Pop(); !queue.Empty(); queue.Pop()) { + if (!combine_(str.Get(), queue.Top(), compare_)) { + ++written; ++str; + memcpy(str.Get(), queue.Top(), entry_size); + } + } + ++written; ++str; + if (out_offsets_) + out_offsets_->Append(written * entry_size); + } + str.Poison(); + } + + private: + void ReadSingle(uint64_t offset, const uint64_t size, const ChainPosition &position) { + // Special case: only one to read. + const uint64_t end = offset + size; + const uint64_t block_size = position.GetChain().BlockSize(); + Link l(position); + for (; offset + block_size < end; ++l, offset += block_size) { + ErsatzPRead(in_, l->Get(), block_size, offset); + l->SetValidSize(block_size); + } + ErsatzPRead(in_, l->Get(), end - offset, offset); + l->SetValidSize(end - offset); + (++l).Poison(); + return; + } + + Compare compare_; + Combine combine_; + + int in_; + + protected: + Offsets *in_offsets_; + + private: + Offsets *out_offsets_; + + std::size_t buffer_size_; + std::size_t total_memory_; +}; + +// The lazy step owns the remaining files. This keeps track of them. +template class OwningMergingReader : public MergingReader { + private: + typedef MergingReader P; + public: + OwningMergingReader(int data, const Offsets &offsets, std::size_t buffer, std::size_t lazy, const Compare &compare, const Combine &combine) + : P(data, NULL, NULL, buffer, lazy, compare, combine), + data_(data), + offsets_(offsets) {} + + void Run(const ChainPosition &position) { + P::in_offsets_ = &offsets_; + scoped_fd data(data_); + scoped_fd offsets_file(offsets_.File()); + P::Run(position, true); + } + + private: + int data_; + Offsets offsets_; +}; + +// Don't use this directly. Worker that sorts blocks. +template class BlockSorter { + public: + BlockSorter(Offsets &offsets, const Compare &compare) : + offsets_(&offsets), compare_(compare) {} + + void Run(const ChainPosition &position) { + const std::size_t entry_size = position.GetChain().EntrySize(); + for (Link link(position); link; ++link) { + // Record the size of each block in a separate file. + offsets_->Append(link->ValidSize()); + void *end = static_cast(link->Get()) + link->ValidSize(); +#if defined(_WIN32) || defined(_WIN64) + std::stable_sort +#else + std::sort +#endif + (SizedIt(link->Get(), entry_size), + SizedIt(end, entry_size), + compare_); + } + offsets_->FinishedAppending(); + } + + private: + Offsets *offsets_; + SizedCompare compare_; +}; + +class BadSortConfig : public Exception { + public: + BadSortConfig() throw() {} + ~BadSortConfig() throw() {} +}; + +/** Sort */ +template class Sort { + public: + /** Constructs an object capable of sorting */ + Sort(Chain &in, const SortConfig &config, const Compare &compare = Compare(), const Combine &combine = Combine()) + : config_(config), + data_(MakeTemp(config.temp_prefix)), + offsets_file_(MakeTemp(config.temp_prefix)), offsets_(offsets_file_.get()), + compare_(compare), combine_(combine), + entry_size_(in.EntrySize()) { + UTIL_THROW_IF(!entry_size_, BadSortConfig, "Sorting entries of size 0"); + // Make buffer_size a multiple of the entry_size. + config_.buffer_size -= config_.buffer_size % entry_size_; + UTIL_THROW_IF(!config_.buffer_size, BadSortConfig, "Sort buffer too small"); + UTIL_THROW_IF(config_.total_memory < config_.buffer_size * 4, BadSortConfig, "Sorting memory " << config_.total_memory << " is too small for four buffers (two read and two write)."); + in >> BlockSorter(offsets_, compare_) >> WriteAndRecycle(data_.get()); + } + + uint64_t Size() const { + return SizeOrThrow(data_.get()); + } + + // Do merge sort, terminating when lazy merge could be done with the + // specified memory. Return the minimum memory necessary to do lazy merge. + std::size_t Merge(std::size_t lazy_memory) { + if (offsets_.RemainingBlocks() <= 1) return 0; + const uint64_t lazy_arity = std::max(1, lazy_memory / config_.buffer_size); + uint64_t size = Size(); + /* No overflow because + * offsets_.RemainingBlocks() * config_.buffer_size <= lazy_memory || + * size < lazy_memory + */ + if (offsets_.RemainingBlocks() <= lazy_arity || size <= static_cast(lazy_memory)) + return std::min(size, offsets_.RemainingBlocks() * config_.buffer_size); + + scoped_fd data2(MakeTemp(config_.temp_prefix)); + int fd_in = data_.get(), fd_out = data2.get(); + scoped_fd offsets2_file(MakeTemp(config_.temp_prefix)); + Offsets offsets2(offsets2_file.get()); + Offsets *offsets_in = &offsets_, *offsets_out = &offsets2; + + // Double buffered writing. + ChainConfig chain_config; + chain_config.entry_size = entry_size_; + chain_config.block_count = 2; + chain_config.total_memory = config_.buffer_size * 2; + Chain chain(chain_config); + + while (offsets_in->RemainingBlocks() > lazy_arity) { + if (size <= static_cast(lazy_memory)) break; + std::size_t reading_memory = config_.total_memory - 2 * config_.buffer_size; + if (size < static_cast(reading_memory)) { + reading_memory = static_cast(size); + } + SeekOrThrow(fd_in, 0); + chain >> + MergingReader( + fd_in, + offsets_in, offsets_out, + config_.buffer_size, + reading_memory, + compare_, combine_) >> + WriteAndRecycle(fd_out); + chain.Wait(); + offsets_out->FinishedAppending(); + ResizeOrThrow(fd_in, 0); + offsets_in->Reset(); + std::swap(fd_in, fd_out); + std::swap(offsets_in, offsets_out); + size = SizeOrThrow(fd_in); + } + + SeekOrThrow(fd_in, 0); + if (fd_in == data2.get()) { + data_.reset(data2.release()); + offsets_file_.reset(offsets2_file.release()); + offsets_ = offsets2; + } + if (offsets_.RemainingBlocks() <= 1) return 0; + // No overflow because the while loop exited. + return std::min(size, offsets_.RemainingBlocks() * static_cast(config_.buffer_size)); + } + + // Output to chain, using this amount of memory, maximum, for lazy merge + // sort. + void Output(Chain &out, std::size_t lazy_memory) { + Merge(lazy_memory); + out.SetProgressTarget(Size()); + out >> OwningMergingReader(data_.get(), offsets_, config_.buffer_size, lazy_memory, compare_, combine_); + data_.release(); + offsets_file_.release(); + } + + /* If a pipeline step is reading sorted input and writing to a different + * sort order, then there's a trade-off between using RAM to read lazily + * (avoiding copying the file) and using RAM to increase block size and, + * therefore, decrease the number of merge sort passes in the next + * iteration. + * + * Merge sort takes log_{arity}(pieces) passes. Thus, each time the chain + * block size is multiplied by arity, the number of output passes decreases + * by one. Up to a constant, then, log_{arity}(chain) is the number of + * passes saved. Chain simply divides the memory evenly over all blocks. + * + * Lazy sort saves this many passes (up to a constant) + * log_{arity}((memory-lazy)/block_count) + 1 + * Non-lazy sort saves this many passes (up to the same constant): + * log_{arity}(memory/block_count) + * Add log_{arity}(block_count) to both: + * log_{arity}(memory-lazy) + 1 versus log_{arity}(memory) + * Take arity to the power of both sizes (arity > 1) + * (memory - lazy)*arity versus memory + * Solve for lazy + * lazy = memory * (arity - 1) / arity + */ + std::size_t DefaultLazy() { + float arity = static_cast(config_.total_memory / config_.buffer_size); + return static_cast(static_cast(config_.total_memory) * (arity - 1.0) / arity); + } + + // Same as Output with default lazy memory setting. + void Output(Chain &out) { + Output(out, DefaultLazy()); + } + + // Completely merge sort and transfer ownership to the caller. + int StealCompleted() { + // Merge all the way. + Merge(0); + SeekOrThrow(data_.get(), 0); + offsets_file_.reset(); + return data_.release(); + } + + private: + SortConfig config_; + + scoped_fd data_; + + scoped_fd offsets_file_; + Offsets offsets_; + + const Compare compare_; + const Combine combine_; + const std::size_t entry_size_; +}; + +// returns bytes to be read on demand. +template uint64_t BlockingSort(Chain &chain, const SortConfig &config, const Compare &compare = Compare(), const Combine &combine = NeverCombine()) { + Sort sorter(chain, config, compare, combine); + chain.Wait(true); + uint64_t size = sorter.Size(); + sorter.Output(chain); + return size; +} + +} // namespace stream +} // namespace util + +#endif // UTIL_STREAM_SORT_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/util/stream/stream.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/stream/stream.hh new file mode 100644 index 0000000000000000000000000000000000000000..7ea1c9f700f36a1f3c15d196a498d6f6a20cc61e --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/stream/stream.hh @@ -0,0 +1,77 @@ +#ifndef UTIL_STREAM_STREAM_H +#define UTIL_STREAM_STREAM_H + +#include "util/stream/chain.hh" + +#include + +#include +#include + +namespace util { +namespace stream { + +class Stream : boost::noncopyable { + public: + Stream() : current_(NULL), end_(NULL) {} + + void Init(const ChainPosition &position) { + entry_size_ = position.GetChain().EntrySize(); + block_size_ = position.GetChain().BlockSize(); + block_it_.Init(position); + StartBlock(); + } + + explicit Stream(const ChainPosition &position) { + Init(position); + } + + operator bool() const { return current_ != NULL; } + bool operator!() const { return current_ == NULL; } + + const void *Get() const { return current_; } + void *Get() { return current_; } + + void Poison() { + block_it_->SetValidSize(current_ - static_cast(block_it_->Get())); + ++block_it_; + block_it_.Poison(); + } + + Stream &operator++() { + assert(*this); + assert(current_ < end_); + current_ += entry_size_; + if (current_ == end_) { + ++block_it_; + StartBlock(); + } + return *this; + } + + private: + void StartBlock() { + for (; block_it_ && !block_it_->ValidSize(); ++block_it_) {} + current_ = static_cast(block_it_->Get()); + end_ = current_ + block_it_->ValidSize(); + } + + // The following are pointers to raw memory + // current_ is the current record + // end_ is the end of the block (so we know when to move to the next block) + uint8_t *current_, *end_; + + std::size_t entry_size_; + std::size_t block_size_; + + Link block_it_; +}; + +inline Chain &operator>>(Chain &chain, Stream &stream) { + stream.Init(chain.Add()); + return chain; +} + +} // namespace stream +} // namespace util +#endif // UTIL_STREAM_STREAM_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/util/stream/timer.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/stream/timer.hh new file mode 100644 index 0000000000000000000000000000000000000000..06488a17e8784831dac676d64c11521aeb5be8e9 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/stream/timer.hh @@ -0,0 +1,16 @@ +#ifndef UTIL_STREAM_TIMER_H +#define UTIL_STREAM_TIMER_H + +// Sorry Jon, this was adding library dependencies in Moses and people complained. + +/*#include + +#if BOOST_VERSION >= 104800 +#include +#define UTIL_TIMER(str) boost::timer::auto_cpu_timer timer(std::cerr, 1, (str)) +#else +//#warning Using Boost older than 1.48. Timing information will not be available.*/ +#define UTIL_TIMER(str) +//#endif + +#endif // UTIL_STREAM_TIMER_H diff --git a/cc-multilingual-main/cc_net/third_party/kenlm/include/util/usage.hh b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/usage.hh new file mode 100644 index 0000000000000000000000000000000000000000..e578b0a65ef4e4f0d6070c23eefcad9aa0b13c18 --- /dev/null +++ b/cc-multilingual-main/cc_net/third_party/kenlm/include/util/usage.hh @@ -0,0 +1,21 @@ +#ifndef UTIL_USAGE_H +#define UTIL_USAGE_H +#include +#include +#include + +#include + +namespace util { +// Time in seconds since process started. Zero on unsupported platforms. +double WallTime(); + +void PrintUsage(std::ostream &to); + +// Determine how much physical memory there is. Return 0 on failure. +uint64_t GuessPhysicalMemory(); + +// Parse a size like unix sort. Sadly, this means the default multiplier is K. +uint64_t ParseSize(const std::string &arg); +} // namespace util +#endif // UTIL_USAGE_H