applied-ai-018 commited on
Commit
8d593ed
·
verified ·
1 Parent(s): d7c1d59

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. cc-multilingual-main/cc_net/third_party/kenlm/.github/workflows/mac.yml +30 -0
  2. cc-multilingual-main/cc_net/third_party/kenlm/.github/workflows/ubuntu.yml +27 -0
  3. cc-multilingual-main/cc_net/third_party/kenlm/.github/workflows/windows.yml +25 -0
  4. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/bhiksha.hh +123 -0
  5. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/binary_format.hh +106 -0
  6. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/blank.hh +43 -0
  7. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/adjust_counts.hh +72 -0
  8. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/discount.hh +26 -0
  9. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/hash_gamma.hh +19 -0
  10. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/header_info.hh +24 -0
  11. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/initial_probabilities.hh +42 -0
  12. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/interpolate.hh +34 -0
  13. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/ngram_stream.hh +58 -0
  14. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/output.hh +89 -0
  15. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/pipeline.hh +74 -0
  16. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/sort.hh +244 -0
  17. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/config.hh +124 -0
  18. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/enumerate_vocab.hh +28 -0
  19. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/facade.hh +73 -0
  20. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/filter/arpa_io.hh +114 -0
  21. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/filter/count_io.hh +89 -0
  22. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/filter/format.hh +250 -0
  23. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/filter/phrase.hh +168 -0
  24. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/filter/thread.hh +167 -0
  25. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/filter/vocab.hh +133 -0
  26. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/filter/wrapper.hh +56 -0
  27. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/interpolate/arpa_to_stream.hh +38 -0
  28. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/left.hh +216 -0
  29. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/lm_exception.hh +50 -0
  30. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/max_order.hh +13 -0
  31. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/model.hh +156 -0
  32. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/model_type.hh +23 -0
  33. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/neural/wordvecs.hh +38 -0
  34. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/ngram_query.hh +110 -0
  35. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/partial.hh +167 -0
  36. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/quantize.hh +233 -0
  37. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/read_arpa.hh +95 -0
  38. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/return.hh +42 -0
  39. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/search_hashed.hh +192 -0
  40. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/search_trie.hh +130 -0
  41. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/sizes.hh +17 -0
  42. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/state.hh +125 -0
  43. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/trie.hh +146 -0
  44. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/trie_sort.hh +114 -0
  45. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/value.hh +157 -0
  46. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/value_build.hh +97 -0
  47. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/virtual_interface.hh +160 -0
  48. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/vocab.hh +249 -0
  49. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/weights.hh +22 -0
  50. cc-multilingual-main/cc_net/third_party/kenlm/include/lm/word_index.hh +14 -0
cc-multilingual-main/cc_net/third_party/kenlm/.github/workflows/mac.yml ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Mac
2
+
3
+ on:
4
+ push:
5
+ branches: master
6
+ pull_request:
7
+ branches: master
8
+
9
+ jobs:
10
+ build:
11
+ runs-on: macOS-latest
12
+
13
+ steps:
14
+ - uses: actions/checkout@v2
15
+ - name: Install Boost
16
+ run: |
17
+ brew install boost
18
+ brew install libomp
19
+ brew install eigen
20
+ - name: cmake
21
+ run: |
22
+ cmake -E make_directory build
23
+ cd build
24
+ cmake ..
25
+ - name: Compile
26
+ working-directory: build
27
+ run: cmake --build . -j2
28
+ - name: Test
29
+ working-directory: build
30
+ run: ctest -j2
cc-multilingual-main/cc_net/third_party/kenlm/.github/workflows/ubuntu.yml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Ubuntu
2
+
3
+ on:
4
+ push:
5
+ branches: [master]
6
+ pull_request:
7
+ branches: [master]
8
+
9
+ jobs:
10
+ build:
11
+ runs-on: ubuntu-latest
12
+
13
+ steps:
14
+ - uses: actions/checkout@v2
15
+ - name: dependencies
16
+ run: sudo apt-get install -y build-essential libboost-all-dev cmake zlib1g-dev libbz2-dev liblzma-dev
17
+ - name: cmake
18
+ run: |
19
+ cmake -E make_directory build
20
+ cd build
21
+ cmake -DCOMPILE_TESTS=ON ..
22
+ - name: Compile
23
+ working-directory: build
24
+ run: cmake --build . -j2
25
+ - name: Test
26
+ working-directory: build
27
+ run: ctest -j2
cc-multilingual-main/cc_net/third_party/kenlm/.github/workflows/windows.yml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Windows
2
+
3
+ on:
4
+ push:
5
+ branches: [master]
6
+ pull_request:
7
+ branches: [master]
8
+
9
+ jobs:
10
+ build:
11
+ runs-on: windows-latest
12
+
13
+ steps:
14
+ - uses: actions/checkout@v2
15
+ - name: cmake
16
+ run: |
17
+ cmake -E make_directory build
18
+ cd build
19
+ cmake -DBOOST_ROOT="${env:BOOST_ROOT_1_72_0}" ..
20
+ - name: Compile
21
+ working-directory: build
22
+ run: cmake --build . -j2
23
+ - name: Test
24
+ working-directory: build
25
+ run: ctest -j2
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/bhiksha.hh ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Simple implementation of
2
+ * @inproceedings{bhikshacompression,
3
+ * author={Bhiksha Raj and Ed Whittaker},
4
+ * year={2003},
5
+ * title={Lossless Compression of Language Model Structure and Word Identifiers},
6
+ * booktitle={Proceedings of IEEE International Conference on Acoustics, Speech and Signal Processing},
7
+ * pages={388--391},
8
+ * }
9
+ *
10
+ * Currently only used for next pointers.
11
+ */
12
+
13
+ #ifndef LM_BHIKSHA_H
14
+ #define LM_BHIKSHA_H
15
+
16
+ #include "lm/model_type.hh"
17
+ #include "lm/trie.hh"
18
+ #include "util/bit_packing.hh"
19
+ #include "util/sorted_uniform.hh"
20
+
21
+ #include <algorithm>
22
+
23
+ #include <stdint.h>
24
+ #include <assert.h>
25
+
26
+ namespace lm {
27
+ namespace ngram {
28
+ struct Config;
29
+ class BinaryFormat;
30
+
31
+ namespace trie {
32
+
33
+ class DontBhiksha {
34
+ public:
35
+ static const ModelType kModelTypeAdd = static_cast<ModelType>(0);
36
+
37
+ static void UpdateConfigFromBinary(const BinaryFormat &, uint64_t, Config &/*config*/) {}
38
+
39
+ static uint64_t Size(uint64_t /*max_offset*/, uint64_t /*max_next*/, const Config &/*config*/) { return 0; }
40
+
41
+ static uint8_t InlineBits(uint64_t /*max_offset*/, uint64_t max_next, const Config &/*config*/) {
42
+ return util::RequiredBits(max_next);
43
+ }
44
+
45
+ DontBhiksha(const void *base, uint64_t max_offset, uint64_t max_next, const Config &config);
46
+
47
+ void ReadNext(const void *base, uint64_t bit_offset, uint64_t /*index*/, uint8_t total_bits, NodeRange &out) const {
48
+ out.begin = util::ReadInt57(base, bit_offset, next_.bits, next_.mask);
49
+ out.end = util::ReadInt57(base, bit_offset + total_bits, next_.bits, next_.mask);
50
+ //assert(out.end >= out.begin);
51
+ }
52
+
53
+ void WriteNext(void *base, uint64_t bit_offset, uint64_t /*index*/, uint64_t value) {
54
+ util::WriteInt57(base, bit_offset, next_.bits, value);
55
+ }
56
+
57
+ void FinishedLoading(const Config &/*config*/) {}
58
+
59
+ uint8_t InlineBits() const { return next_.bits; }
60
+
61
+ private:
62
+ util::BitsMask next_;
63
+ };
64
+
65
+ class ArrayBhiksha {
66
+ public:
67
+ static const ModelType kModelTypeAdd = kArrayAdd;
68
+
69
+ static void UpdateConfigFromBinary(const BinaryFormat &file, uint64_t offset, Config &config);
70
+
71
+ static uint64_t Size(uint64_t max_offset, uint64_t max_next, const Config &config);
72
+
73
+ static uint8_t InlineBits(uint64_t max_offset, uint64_t max_next, const Config &config);
74
+
75
+ ArrayBhiksha(void *base, uint64_t max_offset, uint64_t max_value, const Config &config);
76
+
77
+ void ReadNext(const void *base, uint64_t bit_offset, uint64_t index, uint8_t total_bits, NodeRange &out) const {
78
+ // Some assertions are commented out because they are expensive.
79
+ // assert(*offset_begin_ == 0);
80
+ // std::upper_bound returns the first element that is greater. Want the
81
+ // last element that is <= to the index.
82
+ const uint64_t *begin_it = std::upper_bound(offset_begin_, offset_end_, index) - 1;
83
+ // Since *offset_begin_ == 0, the position should be in range.
84
+ // assert(begin_it >= offset_begin_);
85
+ const uint64_t *end_it;
86
+ for (end_it = begin_it + 1; (end_it < offset_end_) && (*end_it <= index + 1); ++end_it) {}
87
+ // assert(end_it == std::upper_bound(offset_begin_, offset_end_, index + 1));
88
+ --end_it;
89
+ // assert(end_it >= begin_it);
90
+ out.begin = ((begin_it - offset_begin_) << next_inline_.bits) |
91
+ util::ReadInt57(base, bit_offset, next_inline_.bits, next_inline_.mask);
92
+ out.end = ((end_it - offset_begin_) << next_inline_.bits) |
93
+ util::ReadInt57(base, bit_offset + total_bits, next_inline_.bits, next_inline_.mask);
94
+ // If this fails, consider rebuilding your model using KenLM after 1e333d786b748555e8f368d2bbba29a016c98052
95
+ assert(out.end >= out.begin);
96
+ }
97
+
98
+ void WriteNext(void *base, uint64_t bit_offset, uint64_t index, uint64_t value) {
99
+ uint64_t encode = value >> next_inline_.bits;
100
+ for (; write_to_ <= offset_begin_ + encode; ++write_to_) *write_to_ = index;
101
+ util::WriteInt57(base, bit_offset, next_inline_.bits, value & next_inline_.mask);
102
+ }
103
+
104
+ void FinishedLoading(const Config &config);
105
+
106
+ uint8_t InlineBits() const { return next_inline_.bits; }
107
+
108
+ private:
109
+ const util::BitsMask next_inline_;
110
+
111
+ const uint64_t *const offset_begin_;
112
+ const uint64_t *const offset_end_;
113
+
114
+ uint64_t *write_to_;
115
+
116
+ void *original_base_;
117
+ };
118
+
119
+ } // namespace trie
120
+ } // namespace ngram
121
+ } // namespace lm
122
+
123
+ #endif // LM_BHIKSHA_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/binary_format.hh ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_BINARY_FORMAT_H
2
+ #define LM_BINARY_FORMAT_H
3
+
4
+ #include "lm/config.hh"
5
+ #include "lm/model_type.hh"
6
+ #include "lm/read_arpa.hh"
7
+
8
+ #include "util/file_piece.hh"
9
+ #include "util/mmap.hh"
10
+ #include "util/scoped.hh"
11
+
12
+ #include <cstddef>
13
+ #include <vector>
14
+
15
+ #include <stdint.h>
16
+
17
+ namespace lm {
18
+ namespace ngram {
19
+
20
+ extern const char *kModelNames[6];
21
+
22
+ /*Inspect a file to determine if it is a binary lm. If not, return false.
23
+ * If so, return true and set recognized to the type. This is the only API in
24
+ * this header designed for use by decoder authors.
25
+ */
26
+ bool RecognizeBinary(const char *file, ModelType &recognized);
27
+
28
+ struct FixedWidthParameters {
29
+ unsigned char order;
30
+ float probing_multiplier;
31
+ // What type of model is this?
32
+ ModelType model_type;
33
+ // Does the end of the file have the actual strings in the vocabulary?
34
+ bool has_vocabulary;
35
+ unsigned int search_version;
36
+ };
37
+
38
+ // This is a macro instead of an inline function so constants can be assigned using it.
39
+ #define ALIGN8(a) ((std::ptrdiff_t(((a)-1)/8)+1)*8)
40
+
41
+ // Parameters stored in the header of a binary file.
42
+ struct Parameters {
43
+ FixedWidthParameters fixed;
44
+ std::vector<uint64_t> counts;
45
+ };
46
+
47
+ class BinaryFormat {
48
+ public:
49
+ explicit BinaryFormat(const Config &config);
50
+
51
+ // Reading a binary file:
52
+ // Takes ownership of fd
53
+ void InitializeBinary(int fd, ModelType model_type, unsigned int search_version, Parameters &params);
54
+ // Used to read parts of the file to update the config object before figuring out full size.
55
+ void ReadForConfig(void *to, std::size_t amount, uint64_t offset_excluding_header) const;
56
+ // Actually load the binary file and return a pointer to the beginning of the search area.
57
+ void *LoadBinary(std::size_t size);
58
+
59
+ uint64_t VocabStringReadingOffset() const {
60
+ assert(vocab_string_offset_ != kInvalidOffset);
61
+ return vocab_string_offset_;
62
+ }
63
+
64
+ // Writing a binary file or initializing in RAM from ARPA:
65
+ // Size for vocabulary.
66
+ void *SetupJustVocab(std::size_t memory_size, uint8_t order);
67
+ // Warning: can change the vocaulary base pointer.
68
+ void *GrowForSearch(std::size_t memory_size, std::size_t vocab_pad, void *&vocab_base);
69
+ // Warning: can change vocabulary and search base addresses.
70
+ void WriteVocabWords(const std::string &buffer, void *&vocab_base, void *&search_base);
71
+ // Write the header at the beginning of the file.
72
+ void FinishFile(const Config &config, ModelType model_type, unsigned int search_version, const std::vector<uint64_t> &counts);
73
+
74
+ private:
75
+ void MapFile(void *&vocab_base, void *&search_base);
76
+
77
+ // Copied from configuration.
78
+ const Config::WriteMethod write_method_;
79
+ const char *write_mmap_;
80
+ util::LoadMethod load_method_;
81
+
82
+ // File behind memory, if any.
83
+ util::scoped_fd file_;
84
+
85
+ // If there is a file involved, a single mapping.
86
+ util::scoped_memory mapping_;
87
+
88
+ // If the data is only in memory, separately allocate each because the trie
89
+ // knows vocab's size before it knows search's size (because SRILM might
90
+ // have pruned).
91
+ util::scoped_memory memory_vocab_, memory_search_;
92
+
93
+ // Memory ranges. Note that these may not be contiguous and may not all
94
+ // exist.
95
+ std::size_t header_size_, vocab_size_, vocab_pad_;
96
+ // aka end of search.
97
+ uint64_t vocab_string_offset_;
98
+
99
+ static const uint64_t kInvalidOffset = (uint64_t)-1;
100
+ };
101
+
102
+ bool IsBinaryFormat(int fd);
103
+
104
+ } // namespace ngram
105
+ } // namespace lm
106
+ #endif // LM_BINARY_FORMAT_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/blank.hh ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_BLANK_H
2
+ #define LM_BLANK_H
3
+
4
+ #include <limits>
5
+
6
+ #include <stdint.h>
7
+ #include <math.h>
8
+
9
+ namespace lm {
10
+ namespace ngram {
11
+
12
+ /* Suppose "foo bar" appears with zero backoff but there is no trigram
13
+ * beginning with these words. Then, when scoring "foo bar", the model could
14
+ * return out_state containing "bar" or even null context if "bar" also has no
15
+ * backoff and is never followed by another word. Then the backoff is set to
16
+ * kNoExtensionBackoff. If the n-gram might be extended, then out_state must
17
+ * contain the full n-gram, in which case kExtensionBackoff is set. In any
18
+ * case, if an n-gram has non-zero backoff, the full state is returned so
19
+ * backoff can be properly charged.
20
+ * These differ only in sign bit because the backoff is in fact zero in either
21
+ * case.
22
+ */
23
+ const float kNoExtensionBackoff = -0.0;
24
+ const float kExtensionBackoff = 0.0;
25
+ const uint64_t kNoExtensionQuant = 0;
26
+ const uint64_t kExtensionQuant = 1;
27
+
28
+ inline void SetExtension(float &backoff) {
29
+ if (backoff == kNoExtensionBackoff) backoff = kExtensionBackoff;
30
+ }
31
+
32
+ // This compiles down nicely.
33
+ inline bool HasExtension(const float &backoff) {
34
+ typedef union { float f; uint32_t i; } UnionValue;
35
+ UnionValue compare, interpret;
36
+ compare.f = kNoExtensionBackoff;
37
+ interpret.f = backoff;
38
+ return compare.i != interpret.i;
39
+ }
40
+
41
+ } // namespace ngram
42
+ } // namespace lm
43
+ #endif // LM_BLANK_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/adjust_counts.hh ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_BUILDER_ADJUST_COUNTS_H
2
+ #define LM_BUILDER_ADJUST_COUNTS_H
3
+
4
+ #include "lm/builder/discount.hh"
5
+ #include "lm/lm_exception.hh"
6
+ #include "util/exception.hh"
7
+
8
+ #include <vector>
9
+
10
+ #include <stdint.h>
11
+
12
+ namespace util { namespace stream { class ChainPositions; } }
13
+
14
+ namespace lm {
15
+ namespace builder {
16
+
17
+ class BadDiscountException : public util::Exception {
18
+ public:
19
+ BadDiscountException() throw();
20
+ ~BadDiscountException() throw();
21
+ };
22
+
23
+ struct DiscountConfig {
24
+ // Overrides discounts for orders [1,discount_override.size()].
25
+ std::vector<Discount> overwrite;
26
+ // If discounting fails for an order, copy them from here.
27
+ Discount fallback;
28
+ // What to do when discounts are out of range or would trigger divison by
29
+ // zero. It it does something other than THROW_UP, use fallback_discount.
30
+ WarningAction bad_action;
31
+ };
32
+
33
+ /* Compute adjusted counts.
34
+ * Input: unique suffix sorted N-grams (and just the N-grams) with raw counts.
35
+ * Output: [1,N]-grams with adjusted counts.
36
+ * [1,N)-grams are in suffix order
37
+ * N-grams are in undefined order (they're going to be sorted anyway).
38
+ */
39
+ class AdjustCounts {
40
+ public:
41
+ // counts: output
42
+ // counts_pruned: output
43
+ // discounts: mostly output. If the input already has entries, they will be kept.
44
+ // prune_thresholds: input. n-grams with normal (not adjusted) count below this will be pruned.
45
+ AdjustCounts(
46
+ const std::vector<uint64_t> &prune_thresholds,
47
+ std::vector<uint64_t> &counts,
48
+ std::vector<uint64_t> &counts_pruned,
49
+ const std::vector<bool> &prune_words,
50
+ const DiscountConfig &discount_config,
51
+ std::vector<Discount> &discounts)
52
+ : prune_thresholds_(prune_thresholds), counts_(counts), counts_pruned_(counts_pruned),
53
+ prune_words_(prune_words), discount_config_(discount_config), discounts_(discounts)
54
+ {}
55
+
56
+ void Run(const util::stream::ChainPositions &positions);
57
+
58
+ private:
59
+ const std::vector<uint64_t> &prune_thresholds_;
60
+ std::vector<uint64_t> &counts_;
61
+ std::vector<uint64_t> &counts_pruned_;
62
+ const std::vector<bool> &prune_words_;
63
+
64
+ DiscountConfig discount_config_;
65
+ std::vector<Discount> &discounts_;
66
+ };
67
+
68
+ } // namespace builder
69
+ } // namespace lm
70
+
71
+ #endif // LM_BUILDER_ADJUST_COUNTS_H
72
+
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/discount.hh ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_BUILDER_DISCOUNT_H
2
+ #define LM_BUILDER_DISCOUNT_H
3
+
4
+ #include <algorithm>
5
+
6
+ #include <stdint.h>
7
+
8
+ namespace lm {
9
+ namespace builder {
10
+
11
+ struct Discount {
12
+ float amount[4];
13
+
14
+ float Get(uint64_t count) const {
15
+ return amount[std::min<uint64_t>(count, 3)];
16
+ }
17
+
18
+ float Apply(uint64_t count) const {
19
+ return static_cast<float>(count) - Get(count);
20
+ }
21
+ };
22
+
23
+ } // namespace builder
24
+ } // namespace lm
25
+
26
+ #endif // LM_BUILDER_DISCOUNT_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/hash_gamma.hh ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_BUILDER_HASH_GAMMA__
2
+ #define LM_BUILDER_HASH_GAMMA__
3
+
4
+ #include <stdint.h>
5
+
6
+ namespace lm { namespace builder {
7
+
8
+ #pragma pack(push)
9
+ #pragma pack(4)
10
+
11
+ struct HashGamma {
12
+ uint64_t hash_value;
13
+ float gamma;
14
+ };
15
+
16
+ #pragma pack(pop)
17
+
18
+ }} // namespaces
19
+ #endif // LM_BUILDER_HASH_GAMMA__
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/header_info.hh ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_BUILDER_HEADER_INFO_H
2
+ #define LM_BUILDER_HEADER_INFO_H
3
+
4
+ #include <string>
5
+ #include <vector>
6
+ #include <stdint.h>
7
+
8
+ // Some configuration info that is used to add
9
+ // comments to the beginning of an ARPA file
10
+ struct HeaderInfo {
11
+ std::string input_file;
12
+ uint64_t token_count;
13
+ std::vector<uint64_t> counts_pruned;
14
+
15
+ HeaderInfo() {}
16
+
17
+ HeaderInfo(const std::string& input_file_in, uint64_t token_count_in, const std::vector<uint64_t> &counts_pruned_in)
18
+ : input_file(input_file_in), token_count(token_count_in), counts_pruned(counts_pruned_in) {}
19
+
20
+ // TODO: Add smoothing type
21
+ // TODO: More info if multiple models were interpolated
22
+ };
23
+
24
+ #endif
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/initial_probabilities.hh ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_BUILDER_INITIAL_PROBABILITIES_H
2
+ #define LM_BUILDER_INITIAL_PROBABILITIES_H
3
+
4
+ #include "lm/builder/discount.hh"
5
+ #include "util/stream/config.hh"
6
+
7
+ #include <vector>
8
+
9
+ namespace util { namespace stream { class Chains; } }
10
+
11
+ namespace lm {
12
+ namespace builder {
13
+
14
+ struct InitialProbabilitiesConfig {
15
+ // These should be small buffers to keep the adder from getting too far ahead
16
+ util::stream::ChainConfig adder_in;
17
+ util::stream::ChainConfig adder_out;
18
+ // SRILM doesn't normally interpolate unigrams.
19
+ bool interpolate_unigrams;
20
+ };
21
+
22
+ /* Compute initial (uninterpolated) probabilities
23
+ * primary: the normal chain of n-grams. Incoming is context sorted adjusted
24
+ * counts. Outgoing has uninterpolated probabilities for use by Interpolate.
25
+ * second_in: a second copy of the primary input. Discard the output.
26
+ * gamma_out: Computed gamma values are output on these chains in suffix order.
27
+ * The values are bare floats and should be buffered for interpolation to
28
+ * use.
29
+ */
30
+ void InitialProbabilities(
31
+ const InitialProbabilitiesConfig &config,
32
+ const std::vector<Discount> &discounts,
33
+ util::stream::Chains &primary,
34
+ util::stream::Chains &second_in,
35
+ util::stream::Chains &gamma_out,
36
+ const std::vector<uint64_t> &prune_thresholds,
37
+ bool prune_vocab);
38
+
39
+ } // namespace builder
40
+ } // namespace lm
41
+
42
+ #endif // LM_BUILDER_INITIAL_PROBABILITIES_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/interpolate.hh ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_BUILDER_INTERPOLATE_H
2
+ #define LM_BUILDER_INTERPOLATE_H
3
+
4
+ #include "util/stream/multi_stream.hh"
5
+
6
+ #include <vector>
7
+
8
+ #include <stdint.h>
9
+
10
+ namespace lm { namespace builder {
11
+
12
+ /* Interpolate step.
13
+ * Input: suffix sorted n-grams with (p_uninterpolated, gamma) from
14
+ * InitialProbabilities.
15
+ * Output: suffix sorted n-grams with complete probability
16
+ */
17
+ class Interpolate {
18
+ public:
19
+ // Normally vocab_size is the unigram count-1 (since p(<s>) = 0) but might
20
+ // be larger when the user specifies a consistent vocabulary size.
21
+ explicit Interpolate(uint64_t vocab_size, const util::stream::ChainPositions &backoffs, const std::vector<uint64_t> &prune_thresholds, bool prune_vocab, bool output_q_);
22
+
23
+ void Run(const util::stream::ChainPositions &positions);
24
+
25
+ private:
26
+ float uniform_prob_;
27
+ util::stream::ChainPositions backoffs_;
28
+ const std::vector<uint64_t> prune_thresholds_;
29
+ bool prune_vocab_;
30
+ bool output_q_;
31
+ };
32
+
33
+ }} // namespaces
34
+ #endif // LM_BUILDER_INTERPOLATE_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/ngram_stream.hh ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_BUILDER_NGRAM_STREAM_H
2
+ #define LM_BUILDER_NGRAM_STREAM_H
3
+
4
+ #include "lm/builder/ngram.hh"
5
+ #include "util/stream/chain.hh"
6
+ #include "util/stream/multi_stream.hh"
7
+ #include "util/stream/stream.hh"
8
+
9
+ #include <cstddef>
10
+
11
+ namespace lm { namespace builder {
12
+
13
+ class NGramStream {
14
+ public:
15
+ NGramStream() : gram_(NULL, 0) {}
16
+
17
+ NGramStream(const util::stream::ChainPosition &position) : gram_(NULL, 0) {
18
+ Init(position);
19
+ }
20
+
21
+ void Init(const util::stream::ChainPosition &position) {
22
+ stream_.Init(position);
23
+ gram_ = NGram(stream_.Get(), NGram::OrderFromSize(position.GetChain().EntrySize()));
24
+ }
25
+
26
+ NGram &operator*() { return gram_; }
27
+ const NGram &operator*() const { return gram_; }
28
+
29
+ NGram *operator->() { return &gram_; }
30
+ const NGram *operator->() const { return &gram_; }
31
+
32
+ void *Get() { return stream_.Get(); }
33
+ const void *Get() const { return stream_.Get(); }
34
+
35
+ operator bool() const { return stream_; }
36
+ bool operator!() const { return !stream_; }
37
+ void Poison() { stream_.Poison(); }
38
+
39
+ NGramStream &operator++() {
40
+ ++stream_;
41
+ gram_.ReBase(stream_.Get());
42
+ return *this;
43
+ }
44
+
45
+ private:
46
+ NGram gram_;
47
+ util::stream::Stream stream_;
48
+ };
49
+
50
+ inline util::stream::Chain &operator>>(util::stream::Chain &chain, NGramStream &str) {
51
+ str.Init(chain.Add());
52
+ return chain;
53
+ }
54
+
55
+ typedef util::stream::GenericStreams<NGramStream> NGramStreams;
56
+
57
+ }} // namespaces
58
+ #endif // LM_BUILDER_NGRAM_STREAM_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/output.hh ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_BUILDER_OUTPUT_H
2
+ #define LM_BUILDER_OUTPUT_H
3
+
4
+ #include "lm/builder/header_info.hh"
5
+ #include "util/file.hh"
6
+
7
+ #include <boost/ptr_container/ptr_vector.hpp>
8
+ #include <boost/utility.hpp>
9
+
10
+ #include <map>
11
+
12
+ namespace util { namespace stream { class Chains; class ChainPositions; } }
13
+
14
+ /* Outputs from lmplz: ARPA< sharded files, etc */
15
+ namespace lm { namespace builder {
16
+
17
+ // These are different types of hooks. Values should be consecutive to enable a vector lookup.
18
+ enum HookType {
19
+ COUNT_HOOK, // Raw N-gram counts, highest order only.
20
+ PROB_PARALLEL_HOOK, // Probability and backoff (or just q). Output must process the orders in parallel or there will be a deadlock.
21
+ PROB_SEQUENTIAL_HOOK, // Probability and backoff (or just q). Output can process orders any way it likes. This requires writing the data to disk then reading. Useful for ARPA files, which put unigrams first etc.
22
+ NUMBER_OF_HOOKS // Keep this last so we know how many values there are.
23
+ };
24
+
25
+ class Output;
26
+
27
+ class OutputHook {
28
+ public:
29
+ explicit OutputHook(HookType hook_type) : type_(hook_type), master_(NULL) {}
30
+
31
+ virtual ~OutputHook();
32
+
33
+ virtual void Apply(util::stream::Chains &chains);
34
+
35
+ virtual void Run(const util::stream::ChainPositions &positions) = 0;
36
+
37
+ protected:
38
+ const HeaderInfo &GetHeader() const;
39
+ int GetVocabFD() const;
40
+
41
+ private:
42
+ friend class Output;
43
+ const HookType type_;
44
+ const Output *master_;
45
+ };
46
+
47
+ class Output : boost::noncopyable {
48
+ public:
49
+ Output() {}
50
+
51
+ // Takes ownership.
52
+ void Add(OutputHook *hook) {
53
+ hook->master_ = this;
54
+ outputs_[hook->type_].push_back(hook);
55
+ }
56
+
57
+ bool Have(HookType hook_type) const {
58
+ return !outputs_[hook_type].empty();
59
+ }
60
+
61
+ void SetVocabFD(int to) { vocab_fd_ = to; }
62
+ int GetVocabFD() const { return vocab_fd_; }
63
+
64
+ void SetHeader(const HeaderInfo &header) { header_ = header; }
65
+ const HeaderInfo &GetHeader() const { return header_; }
66
+
67
+ void Apply(HookType hook_type, util::stream::Chains &chains) {
68
+ for (boost::ptr_vector<OutputHook>::iterator entry = outputs_[hook_type].begin(); entry != outputs_[hook_type].end(); ++entry) {
69
+ entry->Apply(chains);
70
+ }
71
+ }
72
+
73
+ private:
74
+ boost::ptr_vector<OutputHook> outputs_[NUMBER_OF_HOOKS];
75
+ int vocab_fd_;
76
+ HeaderInfo header_;
77
+ };
78
+
79
+ inline const HeaderInfo &OutputHook::GetHeader() const {
80
+ return master_->GetHeader();
81
+ }
82
+
83
+ inline int OutputHook::GetVocabFD() const {
84
+ return master_->GetVocabFD();
85
+ }
86
+
87
+ }} // namespaces
88
+
89
+ #endif // LM_BUILDER_OUTPUT_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/pipeline.hh ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_BUILDER_PIPELINE_H
2
+ #define LM_BUILDER_PIPELINE_H
3
+
4
+ #include "lm/builder/adjust_counts.hh"
5
+ #include "lm/builder/initial_probabilities.hh"
6
+ #include "lm/builder/header_info.hh"
7
+ #include "lm/lm_exception.hh"
8
+ #include "lm/word_index.hh"
9
+ #include "util/stream/config.hh"
10
+ #include "util/file_piece.hh"
11
+
12
+ #include <string>
13
+ #include <cstddef>
14
+
15
+ namespace lm { namespace builder {
16
+
17
+ class Output;
18
+
19
+ struct PipelineConfig {
20
+ std::size_t order;
21
+ std::string vocab_file;
22
+ util::stream::SortConfig sort;
23
+ InitialProbabilitiesConfig initial_probs;
24
+ util::stream::ChainConfig read_backoffs;
25
+
26
+ // Estimated vocabulary size. Used for sizing CorpusCount memory and
27
+ // initial probing hash table sizing, also in CorpusCount.
28
+ lm::WordIndex vocab_estimate;
29
+
30
+ // Minimum block size to tolerate.
31
+ std::size_t minimum_block;
32
+
33
+ // Number of blocks to use. This will be overridden to 1 if everything fits.
34
+ std::size_t block_count;
35
+
36
+ // n-gram count thresholds for pruning. 0 values means no pruning for
37
+ // corresponding n-gram order
38
+ std::vector<uint64_t> prune_thresholds; //mjd
39
+ bool prune_vocab;
40
+ std::string prune_vocab_file;
41
+
42
+ // What to do with discount failures.
43
+ DiscountConfig discount;
44
+
45
+ // Compute collapsed q values instead of probability and backoff
46
+ bool output_q;
47
+
48
+ /* Computing the perplexity of LMs with different vocabularies is hard. For
49
+ * example, the lowest perplexity is attained by a unigram model that
50
+ * predicts p(<unk>) = 1 and has no other vocabulary. Also, linearly
51
+ * interpolated models will sum to more than 1 because <unk> is duplicated
52
+ * (SRI just pretends p(<unk>) = 0 for these purposes, which makes it sum to
53
+ * 1 but comes with its own problems). This option will make the vocabulary
54
+ * a particular size by replicating <unk> multiple times for purposes of
55
+ * computing vocabulary size. It has no effect if the actual vocabulary is
56
+ * larger. This parameter serves the same purpose as IRSTLM's "dub".
57
+ */
58
+ uint64_t vocab_size_for_unk;
59
+
60
+ /* What to do the first time <s>, </s>, or <unk> appears in the input. If
61
+ * this is anything but THROW_UP, then the symbol will always be treated as
62
+ * whitespace.
63
+ */
64
+ WarningAction disallowed_symbol_action;
65
+
66
+ const std::string &TempPrefix() const { return sort.temp_prefix; }
67
+ std::size_t TotalMemory() const { return sort.total_memory; }
68
+ };
69
+
70
+ // Takes ownership of text_file and out_arpa.
71
+ void Pipeline(PipelineConfig &config, int text_file, Output &output);
72
+
73
+ }} // namespaces
74
+ #endif // LM_BUILDER_PIPELINE_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/builder/sort.hh ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_BUILDER_SORT_H
2
+ #define LM_BUILDER_SORT_H
3
+
4
+ #include "lm/builder/ngram_stream.hh"
5
+ #include "lm/builder/ngram.hh"
6
+ #include "lm/word_index.hh"
7
+ #include "util/stream/sort.hh"
8
+
9
+ #include "util/stream/timer.hh"
10
+
11
+ #include <functional>
12
+ #include <string>
13
+
14
+ namespace lm {
15
+ namespace builder {
16
+
17
+ /**
18
+ * Abstract parent class for defining custom n-gram comparators.
19
+ */
20
+ template <class Child> class Comparator : public std::binary_function<const void *, const void *, bool> {
21
+ public:
22
+
23
+ /**
24
+ * Constructs a comparator capable of comparing two n-grams.
25
+ *
26
+ * @param order Number of words in each n-gram
27
+ */
28
+ explicit Comparator(std::size_t order) : order_(order) {}
29
+
30
+ /**
31
+ * Applies the comparator using the Compare method that must be defined in any class that inherits from this class.
32
+ *
33
+ * @param lhs A pointer to the n-gram on the left-hand side of the comparison
34
+ * @param rhs A pointer to the n-gram on the right-hand side of the comparison
35
+ *
36
+ * @see ContextOrder::Compare
37
+ * @see PrefixOrder::Compare
38
+ * @see SuffixOrder::Compare
39
+ */
40
+ inline bool operator()(const void *lhs, const void *rhs) const {
41
+ return static_cast<const Child*>(this)->Compare(static_cast<const WordIndex*>(lhs), static_cast<const WordIndex*>(rhs));
42
+ }
43
+
44
+ /** Gets the n-gram order defined for this comparator. */
45
+ std::size_t Order() const { return order_; }
46
+
47
+ protected:
48
+ std::size_t order_;
49
+ };
50
+
51
+ /**
52
+ * N-gram comparator that compares n-grams according to their reverse (suffix) order.
53
+ *
54
+ * This comparator compares n-grams lexicographically, one word at a time,
55
+ * beginning with the last word of each n-gram and ending with the first word of each n-gram.
56
+ *
57
+ * Some examples of n-gram comparisons as defined by this comparator:
58
+ * - a b c == a b c
59
+ * - a b c < a b d
60
+ * - a b c > a d b
61
+ * - a b c > a b b
62
+ * - a b c > x a c
63
+ * - a b c < x y z
64
+ */
65
+ class SuffixOrder : public Comparator<SuffixOrder> {
66
+ public:
67
+
68
+ /**
69
+ * Constructs a comparator capable of comparing two n-grams.
70
+ *
71
+ * @param order Number of words in each n-gram
72
+ */
73
+ explicit SuffixOrder(std::size_t order) : Comparator<SuffixOrder>(order) {}
74
+
75
+ /**
76
+ * Compares two n-grams lexicographically, one word at a time,
77
+ * beginning with the last word of each n-gram and ending with the first word of each n-gram.
78
+ *
79
+ * @param lhs A pointer to the n-gram on the left-hand side of the comparison
80
+ * @param rhs A pointer to the n-gram on the right-hand side of the comparison
81
+ */
82
+ inline bool Compare(const WordIndex *lhs, const WordIndex *rhs) const {
83
+ for (std::size_t i = order_ - 1; i != 0; --i) {
84
+ if (lhs[i] != rhs[i])
85
+ return lhs[i] < rhs[i];
86
+ }
87
+ return lhs[0] < rhs[0];
88
+ }
89
+
90
+ static const unsigned kMatchOffset = 1;
91
+ };
92
+
93
+
94
+ /**
95
+ * N-gram comparator that compares n-grams according to the reverse (suffix) order of the n-gram context.
96
+ *
97
+ * This comparator compares n-grams lexicographically, one word at a time,
98
+ * beginning with the penultimate word of each n-gram and ending with the first word of each n-gram;
99
+ * finally, this comparator compares the last word of each n-gram.
100
+ *
101
+ * Some examples of n-gram comparisons as defined by this comparator:
102
+ * - a b c == a b c
103
+ * - a b c < a b d
104
+ * - a b c < a d b
105
+ * - a b c > a b b
106
+ * - a b c > x a c
107
+ * - a b c < x y z
108
+ */
109
+ class ContextOrder : public Comparator<ContextOrder> {
110
+ public:
111
+
112
+ /**
113
+ * Constructs a comparator capable of comparing two n-grams.
114
+ *
115
+ * @param order Number of words in each n-gram
116
+ */
117
+ explicit ContextOrder(std::size_t order) : Comparator<ContextOrder>(order) {}
118
+
119
+ /**
120
+ * Compares two n-grams lexicographically, one word at a time,
121
+ * beginning with the penultimate word of each n-gram and ending with the first word of each n-gram;
122
+ * finally, this comparator compares the last word of each n-gram.
123
+ *
124
+ * @param lhs A pointer to the n-gram on the left-hand side of the comparison
125
+ * @param rhs A pointer to the n-gram on the right-hand side of the comparison
126
+ */
127
+ inline bool Compare(const WordIndex *lhs, const WordIndex *rhs) const {
128
+ for (int i = order_ - 2; i >= 0; --i) {
129
+ if (lhs[i] != rhs[i])
130
+ return lhs[i] < rhs[i];
131
+ }
132
+ return lhs[order_ - 1] < rhs[order_ - 1];
133
+ }
134
+ };
135
+
136
+ /**
137
+ * N-gram comparator that compares n-grams according to their natural (prefix) order.
138
+ *
139
+ * This comparator compares n-grams lexicographically, one word at a time,
140
+ * beginning with the first word of each n-gram and ending with the last word of each n-gram.
141
+ *
142
+ * Some examples of n-gram comparisons as defined by this comparator:
143
+ * - a b c == a b c
144
+ * - a b c < a b d
145
+ * - a b c < a d b
146
+ * - a b c > a b b
147
+ * - a b c < x a c
148
+ * - a b c < x y z
149
+ */
150
+ class PrefixOrder : public Comparator<PrefixOrder> {
151
+ public:
152
+
153
+ /**
154
+ * Constructs a comparator capable of comparing two n-grams.
155
+ *
156
+ * @param order Number of words in each n-gram
157
+ */
158
+ explicit PrefixOrder(std::size_t order) : Comparator<PrefixOrder>(order) {}
159
+
160
+ /**
161
+ * Compares two n-grams lexicographically, one word at a time,
162
+ * beginning with the first word of each n-gram and ending with the last word of each n-gram.
163
+ *
164
+ * @param lhs A pointer to the n-gram on the left-hand side of the comparison
165
+ * @param rhs A pointer to the n-gram on the right-hand side of the comparison
166
+ */
167
+ inline bool Compare(const WordIndex *lhs, const WordIndex *rhs) const {
168
+ for (std::size_t i = 0; i < order_; ++i) {
169
+ if (lhs[i] != rhs[i])
170
+ return lhs[i] < rhs[i];
171
+ }
172
+ return false;
173
+ }
174
+
175
+ static const unsigned kMatchOffset = 0;
176
+ };
177
+
178
+ // Sum counts for the same n-gram.
179
+ struct AddCombiner {
180
+ bool operator()(void *first_void, const void *second_void, const SuffixOrder &compare) const {
181
+ NGram first(first_void, compare.Order());
182
+ // There isn't a const version of NGram.
183
+ NGram second(const_cast<void*>(second_void), compare.Order());
184
+ if (memcmp(first.begin(), second.begin(), sizeof(WordIndex) * compare.Order())) return false;
185
+ first.Count() += second.Count();
186
+ return true;
187
+ }
188
+ };
189
+
190
+ // The combiner is only used on a single chain, so I didn't bother to allow
191
+ // that template.
192
+ /**
193
+ * Represents an @ref util::FixedArray "array" capable of storing @ref util::stream::Sort "Sort" objects.
194
+ *
195
+ * In the anticipated use case, an instance of this class will maintain one @ref util::stream::Sort "Sort" object
196
+ * for each n-gram order (ranging from 1 up to the maximum n-gram order being processed).
197
+ * Use in this manner would enable the n-grams each n-gram order to be sorted, in parallel.
198
+ *
199
+ * @tparam Compare An @ref Comparator "ngram comparator" to use during sorting.
200
+ */
201
+ template <class Compare> class Sorts : public util::FixedArray<util::stream::Sort<Compare> > {
202
+ private:
203
+ typedef util::stream::Sort<Compare> S;
204
+ typedef util::FixedArray<S> P;
205
+
206
+ public:
207
+
208
+ /**
209
+ * Constructs, but does not initialize.
210
+ *
211
+ * @ref util::FixedArray::Init() "Init" must be called before use.
212
+ *
213
+ * @see util::FixedArray::Init()
214
+ */
215
+ Sorts() {}
216
+
217
+ /**
218
+ * Constructs an @ref util::FixedArray "array" capable of storing a fixed number of @ref util::stream::Sort "Sort" objects.
219
+ *
220
+ * @param number The maximum number of @ref util::stream::Sort "sorters" that can be held by this @ref util::FixedArray "array"
221
+ * @see util::FixedArray::FixedArray()
222
+ */
223
+ explicit Sorts(std::size_t number) : util::FixedArray<util::stream::Sort<Compare> >(number) {}
224
+
225
+ /**
226
+ * Constructs a new @ref util::stream::Sort "Sort" object which is stored in this @ref util::FixedArray "array".
227
+ *
228
+ * The new @ref util::stream::Sort "Sort" object is constructed using the provided @ref util::stream::SortConfig "SortConfig" and @ref Comparator "ngram comparator";
229
+ * once constructed, a new worker @ref util::stream::Thread "thread" (owned by the @ref util::stream::Chain "chain") will sort the n-gram data stored
230
+ * in the @ref util::stream::Block "blocks" of the provided @ref util::stream::Chain "chain".
231
+ *
232
+ * @see util::stream::Sort::Sort()
233
+ * @see util::stream::Chain::operator>>()
234
+ */
235
+ void push_back(util::stream::Chain &chain, const util::stream::SortConfig &config, const Compare &compare) {
236
+ new (P::end()) S(chain, config, compare); // use "placement new" syntax to initalize S in an already-allocated memory location
237
+ P::Constructed();
238
+ }
239
+ };
240
+
241
+ } // namespace builder
242
+ } // namespace lm
243
+
244
+ #endif // LM_BUILDER_SORT_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/config.hh ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_CONFIG_H
2
+ #define LM_CONFIG_H
3
+
4
+ #include "lm/lm_exception.hh"
5
+ #include "util/mmap.hh"
6
+
7
+ #include <iosfwd>
8
+ #include <string>
9
+ #include <vector>
10
+
11
+ /* Configuration for ngram model. Separate header to reduce pollution. */
12
+
13
+ namespace lm {
14
+
15
+ class EnumerateVocab;
16
+
17
+ namespace ngram {
18
+
19
+ struct Config {
20
+ // EFFECTIVE FOR BOTH ARPA AND BINARY READS
21
+
22
+ // (default true) print progress bar to messages
23
+ bool show_progress;
24
+
25
+ // Where to log messages including the progress bar. Set to NULL for
26
+ // silence.
27
+ std::ostream *messages;
28
+
29
+ std::ostream *ProgressMessages() const {
30
+ return show_progress ? messages : 0;
31
+ }
32
+
33
+ // This will be called with every string in the vocabulary by the
34
+ // constructor; it need only exist for the lifetime of the constructor.
35
+ // See enumerate_vocab.hh for more detail. Config does not take ownership;
36
+ // just delete/let it go out of scope after the constructor exits.
37
+ EnumerateVocab *enumerate_vocab;
38
+
39
+
40
+ // ONLY EFFECTIVE WHEN READING ARPA
41
+
42
+ // What to do when <unk> isn't in the provided model.
43
+ WarningAction unknown_missing;
44
+ // What to do when <s> or </s> is missing from the model.
45
+ // If THROW_UP, the exception will be of type util::SpecialWordMissingException.
46
+ WarningAction sentence_marker_missing;
47
+
48
+ // What to do with a positive log probability. For COMPLAIN and SILENT, map
49
+ // to 0.
50
+ WarningAction positive_log_probability;
51
+
52
+ // The probability to substitute for <unk> if it's missing from the model.
53
+ // No effect if the model has <unk> or unknown_missing == THROW_UP.
54
+ float unknown_missing_logprob;
55
+
56
+ // Size multiplier for probing hash table. Must be > 1. Space is linear in
57
+ // this. Time is probing_multiplier / (probing_multiplier - 1). No effect
58
+ // for sorted variant.
59
+ // If you find yourself setting this to a low number, consider using the
60
+ // TrieModel which has lower memory consumption.
61
+ float probing_multiplier;
62
+
63
+ // Amount of memory to use for building. The actual memory usage will be
64
+ // higher since this just sets sort buffer size. Only applies to trie
65
+ // models.
66
+ std::size_t building_memory;
67
+
68
+ // Template for temporary directory appropriate for passing to mkdtemp.
69
+ // The characters XXXXXX are appended before passing to mkdtemp. Only
70
+ // applies to trie. If empty, defaults to write_mmap. If that's NULL,
71
+ // defaults to input file name.
72
+ std::string temporary_directory_prefix;
73
+
74
+ // Level of complaining to do when loading from ARPA instead of binary format.
75
+ enum ARPALoadComplain {ALL, EXPENSIVE, NONE};
76
+ ARPALoadComplain arpa_complain;
77
+
78
+ // While loading an ARPA file, also write out this binary format file. Set
79
+ // to NULL to disable.
80
+ const char *write_mmap;
81
+
82
+ enum WriteMethod {
83
+ WRITE_MMAP, // Map the file directly.
84
+ WRITE_AFTER // Write after we're done.
85
+ };
86
+ WriteMethod write_method;
87
+
88
+ // Include the vocab in the binary file? Only effective if write_mmap != NULL.
89
+ bool include_vocab;
90
+
91
+
92
+ // Left rest options. Only used when the model includes rest costs.
93
+ enum RestFunction {
94
+ REST_MAX, // Maximum of any score to the left
95
+ REST_LOWER, // Use lower-order files given below.
96
+ };
97
+ RestFunction rest_function;
98
+ // Only used for REST_LOWER.
99
+ std::vector<std::string> rest_lower_files;
100
+
101
+
102
+ // Quantization options. Only effective for QuantTrieModel. One value is
103
+ // reserved for each of prob and backoff, so 2^bits - 1 buckets will be used
104
+ // to quantize (and one of the remaining backoffs will be 0).
105
+ uint8_t prob_bits, backoff_bits;
106
+
107
+ // Bhiksha compression (simple form). Only works with trie.
108
+ uint8_t pointer_bhiksha_bits;
109
+
110
+
111
+ // ONLY EFFECTIVE WHEN READING BINARY
112
+
113
+ // How to get the giant array into memory: lazy mmap, populate, read etc.
114
+ // See util/mmap.hh for details of MapMethod.
115
+ util::LoadMethod load_method;
116
+
117
+
118
+ // Set defaults.
119
+ Config();
120
+ };
121
+
122
+ } /* namespace ngram */ } /* namespace lm */
123
+
124
+ #endif // LM_CONFIG_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/enumerate_vocab.hh ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_ENUMERATE_VOCAB_H
2
+ #define LM_ENUMERATE_VOCAB_H
3
+
4
+ #include "lm/word_index.hh"
5
+ #include "util/string_piece.hh"
6
+
7
+ namespace lm {
8
+
9
+ /* If you need the actual strings in the vocabulary, inherit from this class
10
+ * and implement Add. Then put a pointer in Config.enumerate_vocab; it does
11
+ * not take ownership. Add is called once per vocab word. index starts at 0
12
+ * and increases by 1 each time. This is only used by the Model constructor;
13
+ * the pointer is not retained by the class.
14
+ */
15
+ class EnumerateVocab {
16
+ public:
17
+ virtual ~EnumerateVocab() {}
18
+
19
+ virtual void Add(WordIndex index, const StringPiece &str) = 0;
20
+
21
+ protected:
22
+ EnumerateVocab() {}
23
+ };
24
+
25
+ } // namespace lm
26
+
27
+ #endif // LM_ENUMERATE_VOCAB_H
28
+
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/facade.hh ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_FACADE_H
2
+ #define LM_FACADE_H
3
+
4
+ #include "lm/virtual_interface.hh"
5
+ #include "util/string_piece.hh"
6
+
7
+ #include <string>
8
+
9
+ namespace lm {
10
+ namespace base {
11
+
12
+ // Common model interface that depends on knowing the specific classes.
13
+ // Curiously recurring template pattern.
14
+ template <class Child, class StateT, class VocabularyT> class ModelFacade : public Model {
15
+ public:
16
+ typedef StateT State;
17
+ typedef VocabularyT Vocabulary;
18
+
19
+ /* Translate from void* to State */
20
+ FullScoreReturn BaseFullScore(const void *in_state, const WordIndex new_word, void *out_state) const {
21
+ return static_cast<const Child*>(this)->FullScore(
22
+ *reinterpret_cast<const State*>(in_state),
23
+ new_word,
24
+ *reinterpret_cast<State*>(out_state));
25
+ }
26
+
27
+ FullScoreReturn BaseFullScoreForgotState(const WordIndex *context_rbegin, const WordIndex *context_rend, const WordIndex new_word, void *out_state) const {
28
+ return static_cast<const Child*>(this)->FullScoreForgotState(
29
+ context_rbegin,
30
+ context_rend,
31
+ new_word,
32
+ *reinterpret_cast<State*>(out_state));
33
+ }
34
+
35
+ // Default Score function calls FullScore. Model can override this.
36
+ float Score(const State &in_state, const WordIndex new_word, State &out_state) const {
37
+ return static_cast<const Child*>(this)->FullScore(in_state, new_word, out_state).prob;
38
+ }
39
+
40
+ float BaseScore(const void *in_state, const WordIndex new_word, void *out_state) const {
41
+ return static_cast<const Child*>(this)->Score(
42
+ *reinterpret_cast<const State*>(in_state),
43
+ new_word,
44
+ *reinterpret_cast<State*>(out_state));
45
+ }
46
+
47
+ const State &BeginSentenceState() const { return begin_sentence_; }
48
+ const State &NullContextState() const { return null_context_; }
49
+ const Vocabulary &GetVocabulary() const { return *static_cast<const Vocabulary*>(&BaseVocabulary()); }
50
+
51
+ protected:
52
+ ModelFacade() : Model(sizeof(State)) {}
53
+
54
+ virtual ~ModelFacade() {}
55
+
56
+ // begin_sentence and null_context can disappear after. vocab should stay.
57
+ void Init(const State &begin_sentence, const State &null_context, const Vocabulary &vocab, unsigned char order) {
58
+ begin_sentence_ = begin_sentence;
59
+ null_context_ = null_context;
60
+ begin_sentence_memory_ = &begin_sentence_;
61
+ null_context_memory_ = &null_context_;
62
+ base_vocab_ = &vocab;
63
+ order_ = order;
64
+ }
65
+
66
+ private:
67
+ State begin_sentence_, null_context_;
68
+ };
69
+
70
+ } // mamespace base
71
+ } // namespace lm
72
+
73
+ #endif // LM_FACADE_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/filter/arpa_io.hh ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_FILTER_ARPA_IO_H
2
+ #define LM_FILTER_ARPA_IO_H
3
+ /* Input and output for ARPA format language model files.
4
+ */
5
+ #include "lm/read_arpa.hh"
6
+ #include "util/exception.hh"
7
+ #include "util/string_piece.hh"
8
+ #include "util/tokenize_piece.hh"
9
+
10
+ #include <boost/noncopyable.hpp>
11
+ #include <boost/scoped_array.hpp>
12
+
13
+ #include <fstream>
14
+ #include <string>
15
+ #include <vector>
16
+
17
+ #include <string.h>
18
+ #include <stdint.h>
19
+
20
+ namespace util { class FilePiece; }
21
+
22
+ namespace lm {
23
+
24
+ class ARPAInputException : public util::Exception {
25
+ public:
26
+ explicit ARPAInputException(const StringPiece &message) throw();
27
+ explicit ARPAInputException(const StringPiece &message, const StringPiece &line) throw();
28
+ virtual ~ARPAInputException() throw();
29
+ };
30
+
31
+ class ARPAOutputException : public util::ErrnoException {
32
+ public:
33
+ ARPAOutputException(const char *prefix, const std::string &file_name) throw();
34
+ virtual ~ARPAOutputException() throw();
35
+
36
+ const std::string &File() const throw() { return file_name_; }
37
+
38
+ private:
39
+ const std::string file_name_;
40
+ };
41
+
42
+ // Handling for the counts of n-grams at the beginning of ARPA files.
43
+ size_t SizeNeededForCounts(const std::vector<uint64_t> &number);
44
+
45
+ /* Writes an ARPA file. This has to be seekable so the counts can be written
46
+ * at the end. Hence, I just have it own a std::fstream instead of accepting
47
+ * a separately held std::ostream. TODO: use the fast one from estimation.
48
+ */
49
+ class ARPAOutput : boost::noncopyable {
50
+ public:
51
+ explicit ARPAOutput(const char *name, size_t buffer_size = 65536);
52
+
53
+ void ReserveForCounts(std::streampos reserve);
54
+
55
+ void BeginLength(unsigned int length);
56
+
57
+ void AddNGram(const StringPiece &line) {
58
+ try {
59
+ file_ << line << '\n';
60
+ } catch (const std::ios_base::failure &f) {
61
+ throw ARPAOutputException("Writing an n-gram", file_name_);
62
+ }
63
+ ++fast_counter_;
64
+ }
65
+
66
+ void AddNGram(const StringPiece &ngram, const StringPiece &line) {
67
+ AddNGram(line);
68
+ }
69
+
70
+ template <class Iterator> void AddNGram(const Iterator &begin, const Iterator &end, const StringPiece &line) {
71
+ AddNGram(line);
72
+ }
73
+
74
+ void EndLength(unsigned int length);
75
+
76
+ void Finish();
77
+
78
+ private:
79
+ const std::string file_name_;
80
+ boost::scoped_array<char> buffer_;
81
+ std::fstream file_;
82
+ size_t fast_counter_;
83
+ std::vector<uint64_t> counts_;
84
+ };
85
+
86
+
87
+ template <class Output> void ReadNGrams(util::FilePiece &in, unsigned int length, uint64_t number, Output &out) {
88
+ ReadNGramHeader(in, length);
89
+ out.BeginLength(length);
90
+ for (uint64_t i = 0; i < number; ++i) {
91
+ StringPiece line = in.ReadLine();
92
+ util::TokenIter<util::SingleCharacter> tabber(line, '\t');
93
+ if (!tabber) throw ARPAInputException("blank line", line);
94
+ if (!++tabber) throw ARPAInputException("no tab", line);
95
+
96
+ out.AddNGram(*tabber, line);
97
+ }
98
+ out.EndLength(length);
99
+ }
100
+
101
+ template <class Output> void ReadARPA(util::FilePiece &in_lm, Output &out) {
102
+ std::vector<uint64_t> number;
103
+ ReadARPACounts(in_lm, number);
104
+ out.ReserveForCounts(SizeNeededForCounts(number));
105
+ for (unsigned int i = 0; i < number.size(); ++i) {
106
+ ReadNGrams(in_lm, i + 1, number[i], out);
107
+ }
108
+ ReadEnd(in_lm);
109
+ out.Finish();
110
+ }
111
+
112
+ } // namespace lm
113
+
114
+ #endif // LM_FILTER_ARPA_IO_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/filter/count_io.hh ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_FILTER_COUNT_IO_H
2
+ #define LM_FILTER_COUNT_IO_H
3
+
4
+ #include <fstream>
5
+ #include <iostream>
6
+ #include <string>
7
+
8
+ #include "util/fake_ofstream.hh"
9
+ #include "util/file.hh"
10
+ #include "util/file_piece.hh"
11
+
12
+ namespace lm {
13
+
14
+ class CountOutput : boost::noncopyable {
15
+ public:
16
+ explicit CountOutput(const char *name) : file_(util::CreateOrThrow(name)) {}
17
+
18
+ void AddNGram(const StringPiece &line) {
19
+ file_ << line << '\n';
20
+ }
21
+
22
+ template <class Iterator> void AddNGram(const Iterator &begin, const Iterator &end, const StringPiece &line) {
23
+ AddNGram(line);
24
+ }
25
+
26
+ void AddNGram(const StringPiece &ngram, const StringPiece &line) {
27
+ AddNGram(line);
28
+ }
29
+
30
+ private:
31
+ util::FakeOFStream file_;
32
+ };
33
+
34
+ class CountBatch {
35
+ public:
36
+ explicit CountBatch(std::streamsize initial_read)
37
+ : initial_read_(initial_read) {
38
+ buffer_.reserve(initial_read);
39
+ }
40
+
41
+ void Read(std::istream &in) {
42
+ buffer_.resize(initial_read_);
43
+ in.read(&*buffer_.begin(), initial_read_);
44
+ buffer_.resize(in.gcount());
45
+ char got;
46
+ while (in.get(got) && got != '\n')
47
+ buffer_.push_back(got);
48
+ }
49
+
50
+ template <class Output> void Send(Output &out) {
51
+ for (util::TokenIter<util::SingleCharacter> line(StringPiece(&*buffer_.begin(), buffer_.size()), '\n'); line; ++line) {
52
+ util::TokenIter<util::SingleCharacter> tabber(*line, '\t');
53
+ if (!tabber) {
54
+ std::cerr << "Warning: empty n-gram count line being removed\n";
55
+ continue;
56
+ }
57
+ util::TokenIter<util::SingleCharacter, true> words(*tabber, ' ');
58
+ if (!words) {
59
+ std::cerr << "Line has a tab but no words.\n";
60
+ continue;
61
+ }
62
+ out.AddNGram(words, util::TokenIter<util::SingleCharacter, true>::end(), *line);
63
+ }
64
+ }
65
+
66
+ private:
67
+ std::streamsize initial_read_;
68
+
69
+ // This could have been a std::string but that's less happy with raw writes.
70
+ std::vector<char> buffer_;
71
+ };
72
+
73
+ template <class Output> void ReadCount(util::FilePiece &in_file, Output &out) {
74
+ try {
75
+ while (true) {
76
+ StringPiece line = in_file.ReadLine();
77
+ util::TokenIter<util::SingleCharacter> tabber(line, '\t');
78
+ if (!tabber) {
79
+ std::cerr << "Warning: empty n-gram count line being removed\n";
80
+ continue;
81
+ }
82
+ out.AddNGram(*tabber, line);
83
+ }
84
+ } catch (const util::EndOfFileException &e) {}
85
+ }
86
+
87
+ } // namespace lm
88
+
89
+ #endif // LM_FILTER_COUNT_IO_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/filter/format.hh ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_FILTER_FORMAT_H
2
+ #define LM_FILTER_FORMAT_H
3
+
4
+ #include "lm/filter/arpa_io.hh"
5
+ #include "lm/filter/count_io.hh"
6
+
7
+ #include <boost/lexical_cast.hpp>
8
+ #include <boost/ptr_container/ptr_vector.hpp>
9
+
10
+ #include <iosfwd>
11
+
12
+ namespace lm {
13
+
14
+ template <class Single> class MultipleOutput {
15
+ private:
16
+ typedef boost::ptr_vector<Single> Singles;
17
+ typedef typename Singles::iterator SinglesIterator;
18
+
19
+ public:
20
+ MultipleOutput(const char *prefix, size_t number) {
21
+ files_.reserve(number);
22
+ std::string tmp;
23
+ for (unsigned int i = 0; i < number; ++i) {
24
+ tmp = prefix;
25
+ tmp += boost::lexical_cast<std::string>(i);
26
+ files_.push_back(new Single(tmp.c_str()));
27
+ }
28
+ }
29
+
30
+ void AddNGram(const StringPiece &line) {
31
+ for (SinglesIterator i = files_.begin(); i != files_.end(); ++i)
32
+ i->AddNGram(line);
33
+ }
34
+
35
+ template <class Iterator> void AddNGram(const Iterator &begin, const Iterator &end, const StringPiece &line) {
36
+ for (SinglesIterator i = files_.begin(); i != files_.end(); ++i)
37
+ i->AddNGram(begin, end, line);
38
+ }
39
+
40
+ void SingleAddNGram(size_t offset, const StringPiece &line) {
41
+ files_[offset].AddNGram(line);
42
+ }
43
+
44
+ template <class Iterator> void SingleAddNGram(size_t offset, const Iterator &begin, const Iterator &end, const StringPiece &line) {
45
+ files_[offset].AddNGram(begin, end, line);
46
+ }
47
+
48
+ protected:
49
+ Singles files_;
50
+ };
51
+
52
+ class MultipleARPAOutput : public MultipleOutput<ARPAOutput> {
53
+ public:
54
+ MultipleARPAOutput(const char *prefix, size_t number) : MultipleOutput<ARPAOutput>(prefix, number) {}
55
+
56
+ void ReserveForCounts(std::streampos reserve) {
57
+ for (boost::ptr_vector<ARPAOutput>::iterator i = files_.begin(); i != files_.end(); ++i)
58
+ i->ReserveForCounts(reserve);
59
+ }
60
+
61
+ void BeginLength(unsigned int length) {
62
+ for (boost::ptr_vector<ARPAOutput>::iterator i = files_.begin(); i != files_.end(); ++i)
63
+ i->BeginLength(length);
64
+ }
65
+
66
+ void EndLength(unsigned int length) {
67
+ for (boost::ptr_vector<ARPAOutput>::iterator i = files_.begin(); i != files_.end(); ++i)
68
+ i->EndLength(length);
69
+ }
70
+
71
+ void Finish() {
72
+ for (boost::ptr_vector<ARPAOutput>::iterator i = files_.begin(); i != files_.end(); ++i)
73
+ i->Finish();
74
+ }
75
+ };
76
+
77
+ template <class Filter, class Output> class DispatchInput {
78
+ public:
79
+ DispatchInput(Filter &filter, Output &output) : filter_(filter), output_(output) {}
80
+
81
+ /* template <class Iterator> void AddNGram(const Iterator &begin, const Iterator &end, const StringPiece &line) {
82
+ filter_.AddNGram(begin, end, line, output_);
83
+ }*/
84
+
85
+ void AddNGram(const StringPiece &ngram, const StringPiece &line) {
86
+ filter_.AddNGram(ngram, line, output_);
87
+ }
88
+
89
+ protected:
90
+ Filter &filter_;
91
+ Output &output_;
92
+ };
93
+
94
+ template <class Filter, class Output> class DispatchARPAInput : public DispatchInput<Filter, Output> {
95
+ private:
96
+ typedef DispatchInput<Filter, Output> B;
97
+
98
+ public:
99
+ DispatchARPAInput(Filter &filter, Output &output) : B(filter, output) {}
100
+
101
+ void ReserveForCounts(std::streampos reserve) { B::output_.ReserveForCounts(reserve); }
102
+ void BeginLength(unsigned int length) { B::output_.BeginLength(length); }
103
+
104
+ void EndLength(unsigned int length) {
105
+ B::filter_.Flush();
106
+ B::output_.EndLength(length);
107
+ }
108
+ void Finish() { B::output_.Finish(); }
109
+ };
110
+
111
+ struct ARPAFormat {
112
+ typedef ARPAOutput Output;
113
+ typedef MultipleARPAOutput Multiple;
114
+ static void Copy(util::FilePiece &in, Output &out) {
115
+ ReadARPA(in, out);
116
+ }
117
+ template <class Filter, class Out> static void RunFilter(util::FilePiece &in, Filter &filter, Out &output) {
118
+ DispatchARPAInput<Filter, Out> dispatcher(filter, output);
119
+ ReadARPA(in, dispatcher);
120
+ }
121
+ };
122
+
123
+ struct CountFormat {
124
+ typedef CountOutput Output;
125
+ typedef MultipleOutput<Output> Multiple;
126
+ static void Copy(util::FilePiece &in, Output &out) {
127
+ ReadCount(in, out);
128
+ }
129
+ template <class Filter, class Out> static void RunFilter(util::FilePiece &in, Filter &filter, Out &output) {
130
+ DispatchInput<Filter, Out> dispatcher(filter, output);
131
+ ReadCount(in, dispatcher);
132
+ }
133
+ };
134
+
135
+ /* For multithreading, the buffer classes hold batches of filter inputs and
136
+ * outputs in memory. The strings get reused a lot, so keep them around
137
+ * instead of clearing each time.
138
+ */
139
+ class InputBuffer {
140
+ public:
141
+ InputBuffer() : actual_(0) {}
142
+
143
+ void Reserve(size_t size) { lines_.reserve(size); }
144
+
145
+ template <class Output> void AddNGram(const StringPiece &ngram, const StringPiece &line, Output &output) {
146
+ if (lines_.size() == actual_) lines_.resize(lines_.size() + 1);
147
+ // TODO avoid this copy.
148
+ std::string &copied = lines_[actual_].line;
149
+ copied.assign(line.data(), line.size());
150
+ lines_[actual_].ngram.set(copied.data() + (ngram.data() - line.data()), ngram.size());
151
+ ++actual_;
152
+ }
153
+
154
+ template <class Filter, class Output> void CallFilter(Filter &filter, Output &output) const {
155
+ for (std::vector<Line>::const_iterator i = lines_.begin(); i != lines_.begin() + actual_; ++i) {
156
+ filter.AddNGram(i->ngram, i->line, output);
157
+ }
158
+ }
159
+
160
+ void Clear() { actual_ = 0; }
161
+ bool Empty() { return actual_ == 0; }
162
+ size_t Size() { return actual_; }
163
+
164
+ private:
165
+ struct Line {
166
+ std::string line;
167
+ StringPiece ngram;
168
+ };
169
+
170
+ size_t actual_;
171
+
172
+ std::vector<Line> lines_;
173
+ };
174
+
175
+ class BinaryOutputBuffer {
176
+ public:
177
+ BinaryOutputBuffer() {}
178
+
179
+ void Reserve(size_t size) {
180
+ lines_.reserve(size);
181
+ }
182
+
183
+ void AddNGram(const StringPiece &line) {
184
+ lines_.push_back(line);
185
+ }
186
+
187
+ template <class Output> void Flush(Output &output) {
188
+ for (std::vector<StringPiece>::const_iterator i = lines_.begin(); i != lines_.end(); ++i) {
189
+ output.AddNGram(*i);
190
+ }
191
+ lines_.clear();
192
+ }
193
+
194
+ private:
195
+ std::vector<StringPiece> lines_;
196
+ };
197
+
198
+ class MultipleOutputBuffer {
199
+ public:
200
+ MultipleOutputBuffer() : last_(NULL) {}
201
+
202
+ void Reserve(size_t size) {
203
+ annotated_.reserve(size);
204
+ }
205
+
206
+ void AddNGram(const StringPiece &line) {
207
+ annotated_.resize(annotated_.size() + 1);
208
+ annotated_.back().line = line;
209
+ }
210
+
211
+ void SingleAddNGram(size_t offset, const StringPiece &line) {
212
+ if ((line.data() == last_.data()) && (line.length() == last_.length())) {
213
+ annotated_.back().systems.push_back(offset);
214
+ } else {
215
+ annotated_.resize(annotated_.size() + 1);
216
+ annotated_.back().systems.push_back(offset);
217
+ annotated_.back().line = line;
218
+ last_ = line;
219
+ }
220
+ }
221
+
222
+ template <class Output> void Flush(Output &output) {
223
+ for (std::vector<Annotated>::const_iterator i = annotated_.begin(); i != annotated_.end(); ++i) {
224
+ if (i->systems.empty()) {
225
+ output.AddNGram(i->line);
226
+ } else {
227
+ for (std::vector<size_t>::const_iterator j = i->systems.begin(); j != i->systems.end(); ++j) {
228
+ output.SingleAddNGram(*j, i->line);
229
+ }
230
+ }
231
+ }
232
+ annotated_.clear();
233
+ }
234
+
235
+ private:
236
+ struct Annotated {
237
+ // If this is empty, send to all systems.
238
+ // A filter should never send to all systems and send to a single one.
239
+ std::vector<size_t> systems;
240
+ StringPiece line;
241
+ };
242
+
243
+ StringPiece last_;
244
+
245
+ std::vector<Annotated> annotated_;
246
+ };
247
+
248
+ } // namespace lm
249
+
250
+ #endif // LM_FILTER_FORMAT_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/filter/phrase.hh ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_FILTER_PHRASE_H
2
+ #define LM_FILTER_PHRASE_H
3
+
4
+ #include "util/murmur_hash.hh"
5
+ #include "util/string_piece.hh"
6
+ #include "util/tokenize_piece.hh"
7
+
8
+ #include <boost/unordered_map.hpp>
9
+
10
+ #include <iosfwd>
11
+ #include <vector>
12
+
13
+ #define LM_FILTER_PHRASE_METHOD(caps, lower) \
14
+ bool Find##caps(Hash key, const std::vector<unsigned int> *&out) const {\
15
+ Table::const_iterator i(table_.find(key));\
16
+ if (i==table_.end()) return false; \
17
+ out = &i->second.lower; \
18
+ return true; \
19
+ }
20
+
21
+ namespace lm {
22
+ namespace phrase {
23
+
24
+ typedef uint64_t Hash;
25
+
26
+ class Substrings {
27
+ private:
28
+ /* This is the value in a hash table where the key is a string. It indicates
29
+ * four sets of sentences:
30
+ * substring is sentences with a phrase containing the key as a substring.
31
+ * left is sentencess with a phrase that begins with the key (left aligned).
32
+ * right is sentences with a phrase that ends with the key (right aligned).
33
+ * phrase is sentences where the key is a phrase.
34
+ * Each set is encoded as a vector of sentence ids in increasing order.
35
+ */
36
+ struct SentenceRelation {
37
+ std::vector<unsigned int> substring, left, right, phrase;
38
+ };
39
+ /* Most of the CPU is hash table lookups, so let's not complicate it with
40
+ * vector equality comparisons. If a collision happens, the SentenceRelation
41
+ * structure will contain the union of sentence ids over the colliding strings.
42
+ * In that case, the filter will be slightly more permissive.
43
+ * The key here is the same as boost's hash of std::vector<std::string>.
44
+ */
45
+ typedef boost::unordered_map<Hash, SentenceRelation> Table;
46
+
47
+ public:
48
+ Substrings() {}
49
+
50
+ /* If the string isn't a substring of any phrase, return NULL. Otherwise,
51
+ * return a pointer to std::vector<unsigned int> listing sentences with
52
+ * matching phrases. This set may be empty for Left, Right, or Phrase.
53
+ * Example: const std::vector<unsigned int> *FindSubstring(Hash key)
54
+ */
55
+ LM_FILTER_PHRASE_METHOD(Substring, substring)
56
+ LM_FILTER_PHRASE_METHOD(Left, left)
57
+ LM_FILTER_PHRASE_METHOD(Right, right)
58
+ LM_FILTER_PHRASE_METHOD(Phrase, phrase)
59
+
60
+ #pragma GCC diagnostic ignored "-Wuninitialized" // end != finish so there's always an initialization
61
+ // sentence_id must be non-decreasing. Iterators are over words in the phrase.
62
+ template <class Iterator> void AddPhrase(unsigned int sentence_id, const Iterator &begin, const Iterator &end) {
63
+ // Iterate over all substrings.
64
+ for (Iterator start = begin; start != end; ++start) {
65
+ Hash hash = 0;
66
+ SentenceRelation *relation;
67
+ for (Iterator finish = start; finish != end; ++finish) {
68
+ hash = util::MurmurHashNative(&hash, sizeof(uint64_t), *finish);
69
+ // Now hash is of [start, finish].
70
+ relation = &table_[hash];
71
+ AppendSentence(relation->substring, sentence_id);
72
+ if (start == begin) AppendSentence(relation->left, sentence_id);
73
+ }
74
+ AppendSentence(relation->right, sentence_id);
75
+ if (start == begin) AppendSentence(relation->phrase, sentence_id);
76
+ }
77
+ }
78
+
79
+ private:
80
+ void AppendSentence(std::vector<unsigned int> &vec, unsigned int sentence_id) {
81
+ if (vec.empty() || vec.back() != sentence_id) vec.push_back(sentence_id);
82
+ }
83
+
84
+ Table table_;
85
+ };
86
+
87
+ // Read a file with one sentence per line containing tab-delimited phrases of
88
+ // space-separated words.
89
+ unsigned int ReadMultiple(std::istream &in, Substrings &out);
90
+
91
+ namespace detail {
92
+ extern const StringPiece kEndSentence;
93
+
94
+ template <class Iterator> void MakeHashes(Iterator i, const Iterator &end, std::vector<Hash> &hashes) {
95
+ hashes.clear();
96
+ if (i == end) return;
97
+ // TODO: check strict phrase boundaries after <s> and before </s>. For now, just skip tags.
98
+ if ((i->data()[0] == '<') && (i->data()[i->size() - 1] == '>')) {
99
+ ++i;
100
+ }
101
+ for (; i != end && (*i != kEndSentence); ++i) {
102
+ hashes.push_back(util::MurmurHashNative(i->data(), i->size()));
103
+ }
104
+ }
105
+
106
+ class Vertex;
107
+ class Arc;
108
+
109
+ class ConditionCommon {
110
+ protected:
111
+ ConditionCommon(const Substrings &substrings);
112
+ ConditionCommon(const ConditionCommon &from);
113
+
114
+ ~ConditionCommon();
115
+
116
+ detail::Vertex &MakeGraph();
117
+
118
+ // Temporaries in PassNGram and Evaluate to avoid reallocation.
119
+ std::vector<Hash> hashes_;
120
+
121
+ private:
122
+ std::vector<detail::Vertex> vertices_;
123
+ std::vector<detail::Arc> arcs_;
124
+
125
+ const Substrings &substrings_;
126
+ };
127
+
128
+ } // namespace detail
129
+
130
+ class Union : public detail::ConditionCommon {
131
+ public:
132
+ explicit Union(const Substrings &substrings) : detail::ConditionCommon(substrings) {}
133
+
134
+ template <class Iterator> bool PassNGram(const Iterator &begin, const Iterator &end) {
135
+ detail::MakeHashes(begin, end, hashes_);
136
+ return hashes_.empty() || Evaluate();
137
+ }
138
+
139
+ private:
140
+ bool Evaluate();
141
+ };
142
+
143
+ class Multiple : public detail::ConditionCommon {
144
+ public:
145
+ explicit Multiple(const Substrings &substrings) : detail::ConditionCommon(substrings) {}
146
+
147
+ template <class Iterator, class Output> void AddNGram(const Iterator &begin, const Iterator &end, const StringPiece &line, Output &output) {
148
+ detail::MakeHashes(begin, end, hashes_);
149
+ if (hashes_.empty()) {
150
+ output.AddNGram(line);
151
+ } else {
152
+ Evaluate(line, output);
153
+ }
154
+ }
155
+
156
+ template <class Output> void AddNGram(const StringPiece &ngram, const StringPiece &line, Output &output) {
157
+ AddNGram(util::TokenIter<util::SingleCharacter, true>(ngram, ' '), util::TokenIter<util::SingleCharacter, true>::end(), line, output);
158
+ }
159
+
160
+ void Flush() const {}
161
+
162
+ private:
163
+ template <class Output> void Evaluate(const StringPiece &line, Output &output);
164
+ };
165
+
166
+ } // namespace phrase
167
+ } // namespace lm
168
+ #endif // LM_FILTER_PHRASE_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/filter/thread.hh ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_FILTER_THREAD_H
2
+ #define LM_FILTER_THREAD_H
3
+
4
+ #include "util/thread_pool.hh"
5
+
6
+ #include <boost/utility/in_place_factory.hpp>
7
+
8
+ #include <deque>
9
+ #include <stack>
10
+
11
+ namespace lm {
12
+
13
+ template <class OutputBuffer> class ThreadBatch {
14
+ public:
15
+ ThreadBatch() {}
16
+
17
+ void Reserve(size_t size) {
18
+ input_.Reserve(size);
19
+ output_.Reserve(size);
20
+ }
21
+
22
+ // File reading thread.
23
+ InputBuffer &Fill(uint64_t sequence) {
24
+ sequence_ = sequence;
25
+ // Why wait until now to clear instead of after output? free in the same
26
+ // thread as allocated.
27
+ input_.Clear();
28
+ return input_;
29
+ }
30
+
31
+ // Filter worker thread.
32
+ template <class Filter> void CallFilter(Filter &filter) {
33
+ input_.CallFilter(filter, output_);
34
+ }
35
+
36
+ uint64_t Sequence() const { return sequence_; }
37
+
38
+ // File writing thread.
39
+ template <class RealOutput> void Flush(RealOutput &output) {
40
+ output_.Flush(output);
41
+ }
42
+
43
+ private:
44
+ InputBuffer input_;
45
+ OutputBuffer output_;
46
+
47
+ uint64_t sequence_;
48
+ };
49
+
50
+ template <class Batch, class Filter> class FilterWorker {
51
+ public:
52
+ typedef Batch *Request;
53
+
54
+ FilterWorker(const Filter &filter, util::PCQueue<Request> &done) : filter_(filter), done_(done) {}
55
+
56
+ void operator()(Request request) {
57
+ request->CallFilter(filter_);
58
+ done_.Produce(request);
59
+ }
60
+
61
+ private:
62
+ Filter filter_;
63
+
64
+ util::PCQueue<Request> &done_;
65
+ };
66
+
67
+ // There should only be one OutputWorker.
68
+ template <class Batch, class Output> class OutputWorker {
69
+ public:
70
+ typedef Batch *Request;
71
+
72
+ OutputWorker(Output &output, util::PCQueue<Request> &done) : output_(output), done_(done), base_sequence_(0) {}
73
+
74
+ void operator()(Request request) {
75
+ assert(request->Sequence() >= base_sequence_);
76
+ // Assemble the output in order.
77
+ uint64_t pos = request->Sequence() - base_sequence_;
78
+ if (pos >= ordering_.size()) {
79
+ ordering_.resize(pos + 1, NULL);
80
+ }
81
+ ordering_[pos] = request;
82
+ while (!ordering_.empty() && ordering_.front()) {
83
+ ordering_.front()->Flush(output_);
84
+ done_.Produce(ordering_.front());
85
+ ordering_.pop_front();
86
+ ++base_sequence_;
87
+ }
88
+ }
89
+
90
+ private:
91
+ Output &output_;
92
+
93
+ util::PCQueue<Request> &done_;
94
+
95
+ std::deque<Request> ordering_;
96
+
97
+ uint64_t base_sequence_;
98
+ };
99
+
100
+ template <class Filter, class OutputBuffer, class RealOutput> class Controller : boost::noncopyable {
101
+ private:
102
+ typedef ThreadBatch<OutputBuffer> Batch;
103
+
104
+ public:
105
+ Controller(size_t batch_size, size_t queue, size_t workers, const Filter &filter, RealOutput &output)
106
+ : batch_size_(batch_size), queue_size_(queue),
107
+ batches_(queue),
108
+ to_read_(queue),
109
+ output_(queue, 1, boost::in_place(boost::ref(output), boost::ref(to_read_)), NULL),
110
+ filter_(queue, workers, boost::in_place(boost::ref(filter), boost::ref(output_.In())), NULL),
111
+ sequence_(0) {
112
+ for (size_t i = 0; i < queue; ++i) {
113
+ batches_[i].Reserve(batch_size);
114
+ local_read_.push(&batches_[i]);
115
+ }
116
+ NewInput();
117
+ }
118
+
119
+ void AddNGram(const StringPiece &ngram, const StringPiece &line, RealOutput &output) {
120
+ input_->AddNGram(ngram, line, output);
121
+ if (input_->Size() == batch_size_) {
122
+ FlushInput();
123
+ NewInput();
124
+ }
125
+ }
126
+
127
+ void Flush() {
128
+ FlushInput();
129
+ while (local_read_.size() < queue_size_) {
130
+ MoveRead();
131
+ }
132
+ NewInput();
133
+ }
134
+
135
+ private:
136
+ void FlushInput() {
137
+ if (input_->Empty()) return;
138
+ filter_.Produce(local_read_.top());
139
+ local_read_.pop();
140
+ if (local_read_.empty()) MoveRead();
141
+ }
142
+
143
+ void NewInput() {
144
+ input_ = &local_read_.top()->Fill(sequence_++);
145
+ }
146
+
147
+ void MoveRead() {
148
+ local_read_.push(to_read_.Consume());
149
+ }
150
+
151
+ const size_t batch_size_;
152
+ const size_t queue_size_;
153
+
154
+ std::vector<Batch> batches_;
155
+
156
+ util::PCQueue<Batch*> to_read_;
157
+ std::stack<Batch*> local_read_;
158
+ util::ThreadPool<OutputWorker<Batch, RealOutput> > output_;
159
+ util::ThreadPool<FilterWorker<Batch, Filter> > filter_;
160
+
161
+ uint64_t sequence_;
162
+ InputBuffer *input_;
163
+ };
164
+
165
+ } // namespace lm
166
+
167
+ #endif // LM_FILTER_THREAD_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/filter/vocab.hh ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_FILTER_VOCAB_H
2
+ #define LM_FILTER_VOCAB_H
3
+
4
+ // Vocabulary-based filters for language models.
5
+
6
+ #include "util/multi_intersection.hh"
7
+ #include "util/string_piece.hh"
8
+ #include "util/string_piece_hash.hh"
9
+ #include "util/tokenize_piece.hh"
10
+
11
+ #include <boost/noncopyable.hpp>
12
+ #include <boost/range/iterator_range.hpp>
13
+ #include <boost/unordered/unordered_map.hpp>
14
+ #include <boost/unordered/unordered_set.hpp>
15
+
16
+ #include <string>
17
+ #include <vector>
18
+
19
+ namespace lm {
20
+ namespace vocab {
21
+
22
+ void ReadSingle(std::istream &in, boost::unordered_set<std::string> &out);
23
+
24
+ // Read one sentence vocabulary per line. Return the number of sentences.
25
+ unsigned int ReadMultiple(std::istream &in, boost::unordered_map<std::string, std::vector<unsigned int> > &out);
26
+
27
+ /* Is this a special tag like <s> or <UNK>? This actually includes anything
28
+ * surrounded with < and >, which most tokenizers separate for real words, so
29
+ * this should not catch real words as it looks at a single token.
30
+ */
31
+ inline bool IsTag(const StringPiece &value) {
32
+ // The parser should never give an empty string.
33
+ assert(!value.empty());
34
+ return (value.data()[0] == '<' && value.data()[value.size() - 1] == '>');
35
+ }
36
+
37
+ class Single {
38
+ public:
39
+ typedef boost::unordered_set<std::string> Words;
40
+
41
+ explicit Single(const Words &vocab) : vocab_(vocab) {}
42
+
43
+ template <class Iterator> bool PassNGram(const Iterator &begin, const Iterator &end) {
44
+ for (Iterator i = begin; i != end; ++i) {
45
+ if (IsTag(*i)) continue;
46
+ if (FindStringPiece(vocab_, *i) == vocab_.end()) return false;
47
+ }
48
+ return true;
49
+ }
50
+
51
+ private:
52
+ const Words &vocab_;
53
+ };
54
+
55
+ class Union {
56
+ public:
57
+ typedef boost::unordered_map<std::string, std::vector<unsigned int> > Words;
58
+
59
+ explicit Union(const Words &vocabs) : vocabs_(vocabs) {}
60
+
61
+ template <class Iterator> bool PassNGram(const Iterator &begin, const Iterator &end) {
62
+ sets_.clear();
63
+
64
+ for (Iterator i(begin); i != end; ++i) {
65
+ if (IsTag(*i)) continue;
66
+ Words::const_iterator found(FindStringPiece(vocabs_, *i));
67
+ if (vocabs_.end() == found) return false;
68
+ sets_.push_back(boost::iterator_range<const unsigned int*>(&*found->second.begin(), &*found->second.end()));
69
+ }
70
+ return (sets_.empty() || util::FirstIntersection(sets_));
71
+ }
72
+
73
+ private:
74
+ const Words &vocabs_;
75
+
76
+ std::vector<boost::iterator_range<const unsigned int*> > sets_;
77
+ };
78
+
79
+ class Multiple {
80
+ public:
81
+ typedef boost::unordered_map<std::string, std::vector<unsigned int> > Words;
82
+
83
+ Multiple(const Words &vocabs) : vocabs_(vocabs) {}
84
+
85
+ private:
86
+ // Callback from AllIntersection that does AddNGram.
87
+ template <class Output> class Callback {
88
+ public:
89
+ Callback(Output &out, const StringPiece &line) : out_(out), line_(line) {}
90
+
91
+ void operator()(unsigned int index) {
92
+ out_.SingleAddNGram(index, line_);
93
+ }
94
+
95
+ private:
96
+ Output &out_;
97
+ const StringPiece &line_;
98
+ };
99
+
100
+ public:
101
+ template <class Iterator, class Output> void AddNGram(const Iterator &begin, const Iterator &end, const StringPiece &line, Output &output) {
102
+ sets_.clear();
103
+ for (Iterator i(begin); i != end; ++i) {
104
+ if (IsTag(*i)) continue;
105
+ Words::const_iterator found(FindStringPiece(vocabs_, *i));
106
+ if (vocabs_.end() == found) return;
107
+ sets_.push_back(boost::iterator_range<const unsigned int*>(&*found->second.begin(), &*found->second.end()));
108
+ }
109
+ if (sets_.empty()) {
110
+ output.AddNGram(line);
111
+ return;
112
+ }
113
+
114
+ Callback<Output> cb(output, line);
115
+ util::AllIntersection(sets_, cb);
116
+ }
117
+
118
+ template <class Output> void AddNGram(const StringPiece &ngram, const StringPiece &line, Output &output) {
119
+ AddNGram(util::TokenIter<util::SingleCharacter, true>(ngram, ' '), util::TokenIter<util::SingleCharacter, true>::end(), line, output);
120
+ }
121
+
122
+ void Flush() const {}
123
+
124
+ private:
125
+ const Words &vocabs_;
126
+
127
+ std::vector<boost::iterator_range<const unsigned int*> > sets_;
128
+ };
129
+
130
+ } // namespace vocab
131
+ } // namespace lm
132
+
133
+ #endif // LM_FILTER_VOCAB_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/filter/wrapper.hh ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_FILTER_WRAPPER_H
2
+ #define LM_FILTER_WRAPPER_H
3
+
4
+ #include "util/string_piece.hh"
5
+
6
+ #include <algorithm>
7
+ #include <string>
8
+ #include <vector>
9
+
10
+ namespace lm {
11
+
12
+ // Provide a single-output filter with the same interface as a
13
+ // multiple-output filter so clients code against one interface.
14
+ template <class Binary> class BinaryFilter {
15
+ public:
16
+ // Binary modes are just references (and a set) and it makes the API cleaner to copy them.
17
+ explicit BinaryFilter(Binary binary) : binary_(binary) {}
18
+
19
+ template <class Iterator, class Output> void AddNGram(const Iterator &begin, const Iterator &end, const StringPiece &line, Output &output) {
20
+ if (binary_.PassNGram(begin, end))
21
+ output.AddNGram(line);
22
+ }
23
+
24
+ template <class Output> void AddNGram(const StringPiece &ngram, const StringPiece &line, Output &output) {
25
+ AddNGram(util::TokenIter<util::SingleCharacter, true>(ngram, ' '), util::TokenIter<util::SingleCharacter, true>::end(), line, output);
26
+ }
27
+
28
+ void Flush() const {}
29
+
30
+ private:
31
+ Binary binary_;
32
+ };
33
+
34
+ // Wrap another filter to pay attention only to context words
35
+ template <class FilterT> class ContextFilter {
36
+ public:
37
+ typedef FilterT Filter;
38
+
39
+ explicit ContextFilter(Filter &backend) : backend_(backend) {}
40
+
41
+ template <class Output> void AddNGram(const StringPiece &ngram, const StringPiece &line, Output &output) {
42
+ // Find beginning of string or last space.
43
+ const char *last_space;
44
+ for (last_space = ngram.data() + ngram.size() - 1; last_space > ngram.data() && *last_space != ' '; --last_space) {}
45
+ backend_.AddNGram(StringPiece(ngram.data(), last_space - ngram.data()), line, output);
46
+ }
47
+
48
+ void Flush() const {}
49
+
50
+ private:
51
+ Filter backend_;
52
+ };
53
+
54
+ } // namespace lm
55
+
56
+ #endif // LM_FILTER_WRAPPER_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/interpolate/arpa_to_stream.hh ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "lm/read_arpa.hh"
2
+ #include "util/file_piece.hh"
3
+
4
+ #include <vector>
5
+
6
+ #include <stdint.h>
7
+
8
+ namespace util { namespace stream { class ChainPositions; } }
9
+
10
+ namespace lm {
11
+
12
+ namespace ngram {
13
+ template <class T> class GrowableVocab;
14
+ class WriteUniqueWords;
15
+ } // namespace ngram
16
+
17
+ namespace interpolate {
18
+
19
+ class ARPAToStream {
20
+ public:
21
+ // Takes ownership of fd.
22
+ explicit ARPAToStream(int fd, ngram::GrowableVocab<ngram::WriteUniqueWords> &vocab);
23
+
24
+ std::size_t Order() const { return counts_.size(); }
25
+
26
+ const std::vector<uint64_t> &Counts() const { return counts_; }
27
+
28
+ void Run(const util::stream::ChainPositions &positions);
29
+
30
+ private:
31
+ util::FilePiece in_;
32
+
33
+ std::vector<uint64_t> counts_;
34
+
35
+ ngram::GrowableVocab<ngram::WriteUniqueWords> &vocab_;
36
+ };
37
+
38
+ }} // namespaces
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/left.hh ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Efficient left and right language model state for sentence fragments.
2
+ * Intended usage:
3
+ * Store ChartState with every chart entry.
4
+ * To do a rule application:
5
+ * 1. Make a ChartState object for your new entry.
6
+ * 2. Construct RuleScore.
7
+ * 3. Going from left to right, call Terminal or NonTerminal.
8
+ * For terminals, just pass the vocab id.
9
+ * For non-terminals, pass that non-terminal's ChartState.
10
+ * If your decoder expects scores inclusive of subtree scores (i.e. you
11
+ * label entries with the highest-scoring path), pass the non-terminal's
12
+ * score as prob.
13
+ * If your decoder expects relative scores and will walk the chart later,
14
+ * pass prob = 0.0.
15
+ * In other words, the only effect of prob is that it gets added to the
16
+ * returned log probability.
17
+ * 4. Call Finish. It returns the log probability.
18
+ *
19
+ * There's a couple more details:
20
+ * Do not pass <s> to Terminal as it is formally not a word in the sentence,
21
+ * only context. Instead, call BeginSentence. If called, it should be the
22
+ * first call after RuleScore is constructed (since <s> is always the
23
+ * leftmost).
24
+ *
25
+ * If the leftmost RHS is a non-terminal, it's faster to call BeginNonTerminal.
26
+ *
27
+ * Hashing and sorting comparison operators are provided. All state objects
28
+ * are POD. If you intend to use memcmp on raw state objects, you must call
29
+ * ZeroRemaining first, as the value of array entries beyond length is
30
+ * otherwise undefined.
31
+ *
32
+ * Usage is of course not limited to chart decoding. Anything that generates
33
+ * sentence fragments missing left context could benefit. For example, a
34
+ * phrase-based decoder could pre-score phrases, storing ChartState with each
35
+ * phrase, even if hypotheses are generated left-to-right.
36
+ */
37
+
38
+ #ifndef LM_LEFT_H
39
+ #define LM_LEFT_H
40
+
41
+ #include "lm/max_order.hh"
42
+ #include "lm/state.hh"
43
+ #include "lm/return.hh"
44
+
45
+ #include "util/murmur_hash.hh"
46
+
47
+ #include <algorithm>
48
+
49
+ namespace lm {
50
+ namespace ngram {
51
+
52
+ template <class M> class RuleScore {
53
+ public:
54
+ explicit RuleScore(const M &model, ChartState &out) : model_(model), out_(&out), left_done_(false), prob_(0.0) {
55
+ out.left.length = 0;
56
+ out.right.length = 0;
57
+ }
58
+
59
+ void BeginSentence() {
60
+ out_->right = model_.BeginSentenceState();
61
+ // out_->left is empty.
62
+ left_done_ = true;
63
+ }
64
+
65
+ void Terminal(WordIndex word) {
66
+ State copy(out_->right);
67
+ FullScoreReturn ret(model_.FullScore(copy, word, out_->right));
68
+ if (left_done_) { prob_ += ret.prob; return; }
69
+ if (ret.independent_left) {
70
+ prob_ += ret.prob;
71
+ left_done_ = true;
72
+ return;
73
+ }
74
+ out_->left.pointers[out_->left.length++] = ret.extend_left;
75
+ prob_ += ret.rest;
76
+ if (out_->right.length != copy.length + 1)
77
+ left_done_ = true;
78
+ }
79
+
80
+ // Faster version of NonTerminal for the case where the rule begins with a non-terminal.
81
+ void BeginNonTerminal(const ChartState &in, float prob = 0.0) {
82
+ prob_ = prob;
83
+ *out_ = in;
84
+ left_done_ = in.left.full;
85
+ }
86
+
87
+ void NonTerminal(const ChartState &in, float prob = 0.0) {
88
+ prob_ += prob;
89
+
90
+ if (!in.left.length) {
91
+ if (in.left.full) {
92
+ for (const float *i = out_->right.backoff; i < out_->right.backoff + out_->right.length; ++i) prob_ += *i;
93
+ left_done_ = true;
94
+ out_->right = in.right;
95
+ }
96
+ return;
97
+ }
98
+
99
+ if (!out_->right.length) {
100
+ out_->right = in.right;
101
+ if (left_done_) {
102
+ prob_ += model_.UnRest(in.left.pointers, in.left.pointers + in.left.length, 1);
103
+ return;
104
+ }
105
+ if (out_->left.length) {
106
+ left_done_ = true;
107
+ } else {
108
+ out_->left = in.left;
109
+ left_done_ = in.left.full;
110
+ }
111
+ return;
112
+ }
113
+
114
+ float backoffs[KENLM_MAX_ORDER - 1], backoffs2[KENLM_MAX_ORDER - 1];
115
+ float *back = backoffs, *back2 = backoffs2;
116
+ unsigned char next_use = out_->right.length;
117
+
118
+ // First word
119
+ if (ExtendLeft(in, next_use, 1, out_->right.backoff, back)) return;
120
+
121
+ // Words after the first, so extending a bigram to begin with
122
+ for (unsigned char extend_length = 2; extend_length <= in.left.length; ++extend_length) {
123
+ if (ExtendLeft(in, next_use, extend_length, back, back2)) return;
124
+ std::swap(back, back2);
125
+ }
126
+
127
+ if (in.left.full) {
128
+ for (const float *i = back; i != back + next_use; ++i) prob_ += *i;
129
+ left_done_ = true;
130
+ out_->right = in.right;
131
+ return;
132
+ }
133
+
134
+ // Right state was minimized, so it's already independent of the new words to the left.
135
+ if (in.right.length < in.left.length) {
136
+ out_->right = in.right;
137
+ return;
138
+ }
139
+
140
+ // Shift exisiting words down.
141
+ for (WordIndex *i = out_->right.words + next_use - 1; i >= out_->right.words; --i) {
142
+ *(i + in.right.length) = *i;
143
+ }
144
+ // Add words from in.right.
145
+ std::copy(in.right.words, in.right.words + in.right.length, out_->right.words);
146
+ // Assemble backoff composed on the existing state's backoff followed by the new state's backoff.
147
+ std::copy(in.right.backoff, in.right.backoff + in.right.length, out_->right.backoff);
148
+ std::copy(back, back + next_use, out_->right.backoff + in.right.length);
149
+ out_->right.length = in.right.length + next_use;
150
+ }
151
+
152
+ float Finish() {
153
+ // A N-1-gram might extend left and right but we should still set full to true because it's an N-1-gram.
154
+ out_->left.full = left_done_ || (out_->left.length == model_.Order() - 1);
155
+ return prob_;
156
+ }
157
+
158
+ void Reset() {
159
+ prob_ = 0.0;
160
+ left_done_ = false;
161
+ out_->left.length = 0;
162
+ out_->right.length = 0;
163
+ }
164
+ void Reset(ChartState &replacement) {
165
+ out_ = &replacement;
166
+ Reset();
167
+ }
168
+
169
+ private:
170
+ bool ExtendLeft(const ChartState &in, unsigned char &next_use, unsigned char extend_length, const float *back_in, float *back_out) {
171
+ ProcessRet(model_.ExtendLeft(
172
+ out_->right.words, out_->right.words + next_use, // Words to extend into
173
+ back_in, // Backoffs to use
174
+ in.left.pointers[extend_length - 1], extend_length, // Words to be extended
175
+ back_out, // Backoffs for the next score
176
+ next_use)); // Length of n-gram to use in next scoring.
177
+ if (next_use != out_->right.length) {
178
+ left_done_ = true;
179
+ if (!next_use) {
180
+ // Early exit.
181
+ out_->right = in.right;
182
+ prob_ += model_.UnRest(in.left.pointers + extend_length, in.left.pointers + in.left.length, extend_length + 1);
183
+ return true;
184
+ }
185
+ }
186
+ // Continue scoring.
187
+ return false;
188
+ }
189
+
190
+ void ProcessRet(const FullScoreReturn &ret) {
191
+ if (left_done_) {
192
+ prob_ += ret.prob;
193
+ return;
194
+ }
195
+ if (ret.independent_left) {
196
+ prob_ += ret.prob;
197
+ left_done_ = true;
198
+ return;
199
+ }
200
+ out_->left.pointers[out_->left.length++] = ret.extend_left;
201
+ prob_ += ret.rest;
202
+ }
203
+
204
+ const M &model_;
205
+
206
+ ChartState *out_;
207
+
208
+ bool left_done_;
209
+
210
+ float prob_;
211
+ };
212
+
213
+ } // namespace ngram
214
+ } // namespace lm
215
+
216
+ #endif // LM_LEFT_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/lm_exception.hh ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_LM_EXCEPTION_H
2
+ #define LM_LM_EXCEPTION_H
3
+
4
+ // Named to avoid conflict with util/exception.hh.
5
+
6
+ #include "util/exception.hh"
7
+ #include "util/string_piece.hh"
8
+
9
+ #include <exception>
10
+ #include <string>
11
+
12
+ namespace lm {
13
+
14
+ typedef enum {THROW_UP, COMPLAIN, SILENT} WarningAction;
15
+
16
+ class ConfigException : public util::Exception {
17
+ public:
18
+ ConfigException() throw();
19
+ ~ConfigException() throw();
20
+ };
21
+
22
+ class LoadException : public util::Exception {
23
+ public:
24
+ virtual ~LoadException() throw();
25
+
26
+ protected:
27
+ LoadException() throw();
28
+ };
29
+
30
+ class FormatLoadException : public LoadException {
31
+ public:
32
+ FormatLoadException() throw();
33
+ ~FormatLoadException() throw();
34
+ };
35
+
36
+ class VocabLoadException : public LoadException {
37
+ public:
38
+ virtual ~VocabLoadException() throw();
39
+ VocabLoadException() throw();
40
+ };
41
+
42
+ class SpecialWordMissingException : public VocabLoadException {
43
+ public:
44
+ explicit SpecialWordMissingException() throw();
45
+ ~SpecialWordMissingException() throw();
46
+ };
47
+
48
+ } // namespace lm
49
+
50
+ #endif // LM_LM_EXCEPTION
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/max_order.hh ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_MAX_ORDER_H
2
+ #define LM_MAX_ORDER_H
3
+ /* IF YOUR BUILD SYSTEM PASSES -DKENLM_MAX_ORDER, THEN CHANGE THE BUILD SYSTEM.
4
+ * If not, this is the default maximum order.
5
+ * Having this limit means that State can be
6
+ * (kMaxOrder - 1) * sizeof(float) bytes instead of
7
+ * sizeof(float*) + (kMaxOrder - 1) * sizeof(float) + malloc overhead
8
+ */
9
+ #ifndef KENLM_ORDER_MESSAGE
10
+ #define KENLM_ORDER_MESSAGE "If your build system supports changing KENLM_MAX_ORDER, change it there and recompile. In the KenLM tarball or Moses, use e.g. `bjam --max-kenlm-order=6 -a'. Otherwise, edit lm/max_order.hh."
11
+ #endif
12
+
13
+ #endif // LM_MAX_ORDER_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/model.hh ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_MODEL_H
2
+ #define LM_MODEL_H
3
+
4
+ #include "lm/bhiksha.hh"
5
+ #include "lm/binary_format.hh"
6
+ #include "lm/config.hh"
7
+ #include "lm/facade.hh"
8
+ #include "lm/quantize.hh"
9
+ #include "lm/search_hashed.hh"
10
+ #include "lm/search_trie.hh"
11
+ #include "lm/state.hh"
12
+ #include "lm/value.hh"
13
+ #include "lm/vocab.hh"
14
+ #include "lm/weights.hh"
15
+
16
+ #include "util/murmur_hash.hh"
17
+
18
+ #include <algorithm>
19
+ #include <vector>
20
+
21
+ #include <string.h>
22
+
23
+ namespace util { class FilePiece; }
24
+
25
+ namespace lm {
26
+ namespace ngram {
27
+ namespace detail {
28
+
29
+ // Should return the same results as SRI.
30
+ // ModelFacade typedefs Vocabulary so we use VocabularyT to avoid naming conflicts.
31
+ template <class Search, class VocabularyT> class GenericModel : public base::ModelFacade<GenericModel<Search, VocabularyT>, State, VocabularyT> {
32
+ private:
33
+ typedef base::ModelFacade<GenericModel<Search, VocabularyT>, State, VocabularyT> P;
34
+ public:
35
+ // This is the model type returned by RecognizeBinary.
36
+ static const ModelType kModelType;
37
+
38
+ static const unsigned int kVersion = Search::kVersion;
39
+
40
+ /* Get the size of memory that will be mapped given ngram counts. This
41
+ * does not include small non-mapped control structures, such as this class
42
+ * itself.
43
+ */
44
+ static uint64_t Size(const std::vector<uint64_t> &counts, const Config &config = Config());
45
+
46
+ /* Load the model from a file. It may be an ARPA or binary file. Binary
47
+ * files must have the format expected by this class or you'll get an
48
+ * exception. So TrieModel can only load ARPA or binary created by
49
+ * TrieModel. To classify binary files, call RecognizeBinary in
50
+ * lm/binary_format.hh.
51
+ */
52
+ explicit GenericModel(const char *file, const Config &config = Config());
53
+
54
+ /* Score p(new_word | in_state) and incorporate new_word into out_state.
55
+ * Note that in_state and out_state must be different references:
56
+ * &in_state != &out_state.
57
+ */
58
+ FullScoreReturn FullScore(const State &in_state, const WordIndex new_word, State &out_state) const;
59
+
60
+ /* Slower call without in_state. Try to remember state, but sometimes it
61
+ * would cost too much memory or your decoder isn't setup properly.
62
+ * To use this function, make an array of WordIndex containing the context
63
+ * vocabulary ids in reverse order. Then, pass the bounds of the array:
64
+ * [context_rbegin, context_rend). The new_word is not part of the context
65
+ * array unless you intend to repeat words.
66
+ */
67
+ FullScoreReturn FullScoreForgotState(const WordIndex *context_rbegin, const WordIndex *context_rend, const WordIndex new_word, State &out_state) const;
68
+
69
+ /* Get the state for a context. Don't use this if you can avoid it. Use
70
+ * BeginSentenceState or NullContextState and extend from those. If
71
+ * you're only going to use this state to call FullScore once, use
72
+ * FullScoreForgotState.
73
+ * To use this function, make an array of WordIndex containing the context
74
+ * vocabulary ids in reverse order. Then, pass the bounds of the array:
75
+ * [context_rbegin, context_rend).
76
+ */
77
+ void GetState(const WordIndex *context_rbegin, const WordIndex *context_rend, State &out_state) const;
78
+
79
+ /* More efficient version of FullScore where a partial n-gram has already
80
+ * been scored.
81
+ * NOTE: THE RETURNED .rest AND .prob ARE RELATIVE TO THE .rest RETURNED BEFORE.
82
+ */
83
+ FullScoreReturn ExtendLeft(
84
+ // Additional context in reverse order. This will update add_rend to
85
+ const WordIndex *add_rbegin, const WordIndex *add_rend,
86
+ // Backoff weights to use.
87
+ const float *backoff_in,
88
+ // extend_left returned by a previous query.
89
+ uint64_t extend_pointer,
90
+ // Length of n-gram that the pointer corresponds to.
91
+ unsigned char extend_length,
92
+ // Where to write additional backoffs for [extend_length + 1, min(Order() - 1, return.ngram_length)]
93
+ float *backoff_out,
94
+ // Amount of additional content that should be considered by the next call.
95
+ unsigned char &next_use) const;
96
+
97
+ /* Return probabilities minus rest costs for an array of pointers. The
98
+ * first length should be the length of the n-gram to which pointers_begin
99
+ * points.
100
+ */
101
+ float UnRest(const uint64_t *pointers_begin, const uint64_t *pointers_end, unsigned char first_length) const {
102
+ // Compiler should optimize this if away.
103
+ return Search::kDifferentRest ? InternalUnRest(pointers_begin, pointers_end, first_length) : 0.0;
104
+ }
105
+
106
+ private:
107
+ FullScoreReturn ScoreExceptBackoff(const WordIndex *const context_rbegin, const WordIndex *const context_rend, const WordIndex new_word, State &out_state) const;
108
+
109
+ // Score bigrams and above. Do not include backoff.
110
+ void ResumeScore(const WordIndex *context_rbegin, const WordIndex *const context_rend, unsigned char starting_order_minus_2, typename Search::Node &node, float *backoff_out, unsigned char &next_use, FullScoreReturn &ret) const;
111
+
112
+ // Appears after Size in the cc file.
113
+ void SetupMemory(void *start, const std::vector<uint64_t> &counts, const Config &config);
114
+
115
+ void InitializeFromARPA(int fd, const char *file, const Config &config);
116
+
117
+ float InternalUnRest(const uint64_t *pointers_begin, const uint64_t *pointers_end, unsigned char first_length) const;
118
+
119
+ BinaryFormat backing_;
120
+
121
+ VocabularyT vocab_;
122
+
123
+ Search search_;
124
+ };
125
+
126
+ } // namespace detail
127
+
128
+ // Instead of typedef, inherit. This allows the Model etc to be forward declared.
129
+ // Oh the joys of C and C++.
130
+ #define LM_COMMA() ,
131
+ #define LM_NAME_MODEL(name, from)\
132
+ class name : public from {\
133
+ public:\
134
+ name(const char *file, const Config &config = Config()) : from(file, config) {}\
135
+ };
136
+
137
+ LM_NAME_MODEL(ProbingModel, detail::GenericModel<detail::HashedSearch<BackoffValue> LM_COMMA() ProbingVocabulary>);
138
+ LM_NAME_MODEL(RestProbingModel, detail::GenericModel<detail::HashedSearch<RestValue> LM_COMMA() ProbingVocabulary>);
139
+ LM_NAME_MODEL(TrieModel, detail::GenericModel<trie::TrieSearch<DontQuantize LM_COMMA() trie::DontBhiksha> LM_COMMA() SortedVocabulary>);
140
+ LM_NAME_MODEL(ArrayTrieModel, detail::GenericModel<trie::TrieSearch<DontQuantize LM_COMMA() trie::ArrayBhiksha> LM_COMMA() SortedVocabulary>);
141
+ LM_NAME_MODEL(QuantTrieModel, detail::GenericModel<trie::TrieSearch<SeparatelyQuantize LM_COMMA() trie::DontBhiksha> LM_COMMA() SortedVocabulary>);
142
+ LM_NAME_MODEL(QuantArrayTrieModel, detail::GenericModel<trie::TrieSearch<SeparatelyQuantize LM_COMMA() trie::ArrayBhiksha> LM_COMMA() SortedVocabulary>);
143
+
144
+ // Default implementation. No real reason for it to be the default.
145
+ typedef ::lm::ngram::ProbingVocabulary Vocabulary;
146
+ typedef ProbingModel Model;
147
+
148
+ /* Autorecognize the file type, load, and return the virtual base class. Don't
149
+ * use the virtual base class if you can avoid it. Instead, use the above
150
+ * classes as template arguments to your own virtual feature function.*/
151
+ base::Model *LoadVirtual(const char *file_name, const Config &config = Config(), ModelType if_arpa = PROBING);
152
+
153
+ } // namespace ngram
154
+ } // namespace lm
155
+
156
+ #endif // LM_MODEL_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/model_type.hh ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_MODEL_TYPE_H
2
+ #define LM_MODEL_TYPE_H
3
+
4
+ namespace lm {
5
+ namespace ngram {
6
+
7
+ /* Not the best numbering system, but it grew this way for historical reasons
8
+ * and I want to preserve existing binary files. */
9
+ typedef enum {PROBING=0, REST_PROBING=1, TRIE=2, QUANT_TRIE=3, ARRAY_TRIE=4, QUANT_ARRAY_TRIE=5} ModelType;
10
+
11
+ // Historical names.
12
+ const ModelType HASH_PROBING = PROBING;
13
+ const ModelType TRIE_SORTED = TRIE;
14
+ const ModelType QUANT_TRIE_SORTED = QUANT_TRIE;
15
+ const ModelType ARRAY_TRIE_SORTED = ARRAY_TRIE;
16
+ const ModelType QUANT_ARRAY_TRIE_SORTED = QUANT_ARRAY_TRIE;
17
+
18
+ const static ModelType kQuantAdd = static_cast<ModelType>(QUANT_TRIE - TRIE);
19
+ const static ModelType kArrayAdd = static_cast<ModelType>(ARRAY_TRIE - TRIE);
20
+
21
+ } // namespace ngram
22
+ } // namespace lm
23
+ #endif // LM_MODEL_TYPE_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/neural/wordvecs.hh ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_NEURAL_WORDVECS_H
2
+ #define LM_NEURAL_WORDVECS_H
3
+
4
+ #include "util/scoped.hh"
5
+ #include "lm/vocab.hh"
6
+
7
+ #include <Eigen/Dense>
8
+
9
+ namespace util { class FilePiece; }
10
+
11
+ namespace lm {
12
+ namespace neural {
13
+
14
+ class WordVecs {
15
+ public:
16
+ // Columns of the matrix are word vectors. The column index is the word.
17
+ typedef Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::ColMajor> Storage;
18
+
19
+ /* The file should begin with a line stating the number of word vectors and
20
+ * the length of the vectors. Then it's followed by lines containing a
21
+ * word followed by floating-point values.
22
+ */
23
+ explicit WordVecs(util::FilePiece &in);
24
+
25
+ const Storage &Vectors() const { return vecs_; }
26
+
27
+ WordIndex Index(StringPiece str) const { return vocab_.Index(str); }
28
+
29
+ private:
30
+ util::scoped_malloc vocab_backing_;
31
+ ngram::ProbingVocabulary vocab_;
32
+
33
+ Storage vecs_;
34
+ };
35
+
36
+ }} // namespaces
37
+
38
+ #endif // LM_NEURAL_WORDVECS_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/ngram_query.hh ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_NGRAM_QUERY_H
2
+ #define LM_NGRAM_QUERY_H
3
+
4
+ #include "lm/enumerate_vocab.hh"
5
+ #include "lm/model.hh"
6
+ #include "util/file_piece.hh"
7
+ #include "util/usage.hh"
8
+
9
+ #include <cstdlib>
10
+ #include <iostream>
11
+ #include <ostream>
12
+ #include <istream>
13
+ #include <string>
14
+
15
+ #include <math.h>
16
+
17
+ namespace lm {
18
+ namespace ngram {
19
+
20
+ struct BasicPrint {
21
+ void Word(StringPiece, WordIndex, const FullScoreReturn &) const {}
22
+ void Line(uint64_t oov, float total) const {
23
+ std::cout << "Total: " << total << " OOV: " << oov << '\n';
24
+ }
25
+ void Summary(double, double, uint64_t, uint64_t) {}
26
+
27
+ };
28
+
29
+ struct FullPrint : public BasicPrint {
30
+ void Word(StringPiece surface, WordIndex vocab, const FullScoreReturn &ret) const {
31
+ std::cout << surface << '=' << vocab << ' ' << static_cast<unsigned int>(ret.ngram_length) << ' ' << ret.prob << '\t';
32
+ }
33
+
34
+ void Summary(double ppl_including_oov, double ppl_excluding_oov, uint64_t corpus_oov, uint64_t corpus_tokens) {
35
+ std::cout <<
36
+ "Perplexity including OOVs:\t" << ppl_including_oov << "\n"
37
+ "Perplexity excluding OOVs:\t" << ppl_excluding_oov << "\n"
38
+ "OOVs:\t" << corpus_oov << "\n"
39
+ "Tokens:\t" << corpus_tokens << '\n'
40
+ ;
41
+ }
42
+ };
43
+
44
+ template <class Model, class Printer> void Query(const Model &model, bool sentence_context) {
45
+ Printer printer;
46
+ typename Model::State state, out;
47
+ lm::FullScoreReturn ret;
48
+ StringPiece word;
49
+
50
+ util::FilePiece in(0);
51
+
52
+ double corpus_total = 0.0;
53
+ double corpus_total_oov_only = 0.0;
54
+ uint64_t corpus_oov = 0;
55
+ uint64_t corpus_tokens = 0;
56
+
57
+ while (true) {
58
+ state = sentence_context ? model.BeginSentenceState() : model.NullContextState();
59
+ float total = 0.0;
60
+ uint64_t oov = 0;
61
+
62
+ while (in.ReadWordSameLine(word)) {
63
+ lm::WordIndex vocab = model.GetVocabulary().Index(word);
64
+ ret = model.FullScore(state, vocab, out);
65
+ if (vocab == model.GetVocabulary().NotFound()) {
66
+ ++oov;
67
+ corpus_total_oov_only += ret.prob;
68
+ }
69
+ total += ret.prob;
70
+ printer.Word(word, vocab, ret);
71
+ ++corpus_tokens;
72
+ state = out;
73
+ }
74
+ // If people don't have a newline after their last query, this won't add a </s>.
75
+ // Sue me.
76
+ try {
77
+ UTIL_THROW_IF('\n' != in.get(), util::Exception, "FilePiece is confused.");
78
+ } catch (const util::EndOfFileException &e) { break; }
79
+ if (sentence_context) {
80
+ ret = model.FullScore(state, model.GetVocabulary().EndSentence(), out);
81
+ total += ret.prob;
82
+ ++corpus_tokens;
83
+ printer.Word("</s>", model.GetVocabulary().EndSentence(), ret);
84
+ }
85
+ printer.Line(oov, total);
86
+ corpus_total += total;
87
+ corpus_oov += oov;
88
+ }
89
+ printer.Summary(
90
+ pow(10.0, -(corpus_total / static_cast<double>(corpus_tokens))), // PPL including OOVs
91
+ pow(10.0, -((corpus_total - corpus_total_oov_only) / static_cast<double>(corpus_tokens - corpus_oov))), // PPL excluding OOVs
92
+ corpus_oov,
93
+ corpus_tokens);
94
+ }
95
+
96
+ template <class Model> void Query(const char *file, const Config &config, bool sentence_context, bool show_words) {
97
+ Model model(file, config);
98
+ if (show_words) {
99
+ Query<Model, FullPrint>(model, sentence_context);
100
+ } else {
101
+ Query<Model, BasicPrint>(model, sentence_context);
102
+ }
103
+ }
104
+
105
+ } // namespace ngram
106
+ } // namespace lm
107
+
108
+ #endif // LM_NGRAM_QUERY_H
109
+
110
+
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/partial.hh ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_PARTIAL_H
2
+ #define LM_PARTIAL_H
3
+
4
+ #include "lm/return.hh"
5
+ #include "lm/state.hh"
6
+
7
+ #include <algorithm>
8
+
9
+ #include <assert.h>
10
+
11
+ namespace lm {
12
+ namespace ngram {
13
+
14
+ struct ExtendReturn {
15
+ float adjust;
16
+ bool make_full;
17
+ unsigned char next_use;
18
+ };
19
+
20
+ template <class Model> ExtendReturn ExtendLoop(
21
+ const Model &model,
22
+ unsigned char seen, const WordIndex *add_rbegin, const WordIndex *add_rend, const float *backoff_start,
23
+ const uint64_t *pointers, const uint64_t *pointers_end,
24
+ uint64_t *&pointers_write,
25
+ float *backoff_write) {
26
+ unsigned char add_length = add_rend - add_rbegin;
27
+
28
+ float backoff_buf[2][KENLM_MAX_ORDER - 1];
29
+ float *backoff_in = backoff_buf[0], *backoff_out = backoff_buf[1];
30
+ std::copy(backoff_start, backoff_start + add_length, backoff_in);
31
+
32
+ ExtendReturn value;
33
+ value.make_full = false;
34
+ value.adjust = 0.0;
35
+ value.next_use = add_length;
36
+
37
+ unsigned char i = 0;
38
+ unsigned char length = pointers_end - pointers;
39
+ // pointers_write is NULL means that the existing left state is full, so we should use completed probabilities.
40
+ if (pointers_write) {
41
+ // Using full context, writing to new left state.
42
+ for (; i < length; ++i) {
43
+ FullScoreReturn ret(model.ExtendLeft(
44
+ add_rbegin, add_rbegin + value.next_use,
45
+ backoff_in,
46
+ pointers[i], i + seen + 1,
47
+ backoff_out,
48
+ value.next_use));
49
+ std::swap(backoff_in, backoff_out);
50
+ if (ret.independent_left) {
51
+ value.adjust += ret.prob;
52
+ value.make_full = true;
53
+ ++i;
54
+ break;
55
+ }
56
+ value.adjust += ret.rest;
57
+ *pointers_write++ = ret.extend_left;
58
+ if (value.next_use != add_length) {
59
+ value.make_full = true;
60
+ ++i;
61
+ break;
62
+ }
63
+ }
64
+ }
65
+ // Using some of the new context.
66
+ for (; i < length && value.next_use; ++i) {
67
+ FullScoreReturn ret(model.ExtendLeft(
68
+ add_rbegin, add_rbegin + value.next_use,
69
+ backoff_in,
70
+ pointers[i], i + seen + 1,
71
+ backoff_out,
72
+ value.next_use));
73
+ std::swap(backoff_in, backoff_out);
74
+ value.adjust += ret.prob;
75
+ }
76
+ float unrest = model.UnRest(pointers + i, pointers_end, i + seen + 1);
77
+ // Using none of the new context.
78
+ value.adjust += unrest;
79
+
80
+ std::copy(backoff_in, backoff_in + value.next_use, backoff_write);
81
+ return value;
82
+ }
83
+
84
+ template <class Model> float RevealBefore(const Model &model, const Right &reveal, const unsigned char seen, bool reveal_full, Left &left, Right &right) {
85
+ assert(seen < reveal.length || reveal_full);
86
+ uint64_t *pointers_write = reveal_full ? NULL : left.pointers;
87
+ float backoff_buffer[KENLM_MAX_ORDER - 1];
88
+ ExtendReturn value(ExtendLoop(
89
+ model,
90
+ seen, reveal.words + seen, reveal.words + reveal.length, reveal.backoff + seen,
91
+ left.pointers, left.pointers + left.length,
92
+ pointers_write,
93
+ left.full ? backoff_buffer : (right.backoff + right.length)));
94
+ if (reveal_full) {
95
+ left.length = 0;
96
+ value.make_full = true;
97
+ } else {
98
+ left.length = pointers_write - left.pointers;
99
+ value.make_full |= (left.length == model.Order() - 1);
100
+ }
101
+ if (left.full) {
102
+ for (unsigned char i = 0; i < value.next_use; ++i) value.adjust += backoff_buffer[i];
103
+ } else {
104
+ // If left wasn't full when it came in, put words into right state.
105
+ std::copy(reveal.words + seen, reveal.words + seen + value.next_use, right.words + right.length);
106
+ right.length += value.next_use;
107
+ left.full = value.make_full || (right.length == model.Order() - 1);
108
+ }
109
+ return value.adjust;
110
+ }
111
+
112
+ template <class Model> float RevealAfter(const Model &model, Left &left, Right &right, const Left &reveal, unsigned char seen) {
113
+ assert(seen < reveal.length || reveal.full);
114
+ uint64_t *pointers_write = left.full ? NULL : (left.pointers + left.length);
115
+ ExtendReturn value(ExtendLoop(
116
+ model,
117
+ seen, right.words, right.words + right.length, right.backoff,
118
+ reveal.pointers + seen, reveal.pointers + reveal.length,
119
+ pointers_write,
120
+ right.backoff));
121
+ if (reveal.full) {
122
+ for (unsigned char i = 0; i < value.next_use; ++i) value.adjust += right.backoff[i];
123
+ right.length = 0;
124
+ value.make_full = true;
125
+ } else {
126
+ right.length = value.next_use;
127
+ value.make_full |= (right.length == model.Order() - 1);
128
+ }
129
+ if (!left.full) {
130
+ left.length = pointers_write - left.pointers;
131
+ left.full = value.make_full || (left.length == model.Order() - 1);
132
+ }
133
+ return value.adjust;
134
+ }
135
+
136
+ template <class Model> float Subsume(const Model &model, Left &first_left, const Right &first_right, const Left &second_left, Right &second_right, const unsigned int between_length) {
137
+ assert(first_right.length < KENLM_MAX_ORDER);
138
+ assert(second_left.length < KENLM_MAX_ORDER);
139
+ assert(between_length < KENLM_MAX_ORDER - 1);
140
+ uint64_t *pointers_write = first_left.full ? NULL : (first_left.pointers + first_left.length);
141
+ float backoff_buffer[KENLM_MAX_ORDER - 1];
142
+ ExtendReturn value(ExtendLoop(
143
+ model,
144
+ between_length, first_right.words, first_right.words + first_right.length, first_right.backoff,
145
+ second_left.pointers, second_left.pointers + second_left.length,
146
+ pointers_write,
147
+ second_left.full ? backoff_buffer : (second_right.backoff + second_right.length)));
148
+ if (second_left.full) {
149
+ for (unsigned char i = 0; i < value.next_use; ++i) value.adjust += backoff_buffer[i];
150
+ } else {
151
+ std::copy(first_right.words, first_right.words + value.next_use, second_right.words + second_right.length);
152
+ second_right.length += value.next_use;
153
+ value.make_full |= (second_right.length == model.Order() - 1);
154
+ }
155
+ if (!first_left.full) {
156
+ first_left.length = pointers_write - first_left.pointers;
157
+ first_left.full = value.make_full || second_left.full || (first_left.length == model.Order() - 1);
158
+ }
159
+ assert(first_left.length < KENLM_MAX_ORDER);
160
+ assert(second_right.length < KENLM_MAX_ORDER);
161
+ return value.adjust;
162
+ }
163
+
164
+ } // namespace ngram
165
+ } // namespace lm
166
+
167
+ #endif // LM_PARTIAL_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/quantize.hh ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_QUANTIZE_H
2
+ #define LM_QUANTIZE_H
3
+
4
+ #include "lm/blank.hh"
5
+ #include "lm/config.hh"
6
+ #include "lm/max_order.hh"
7
+ #include "lm/model_type.hh"
8
+ #include "util/bit_packing.hh"
9
+
10
+ #include <algorithm>
11
+ #include <vector>
12
+
13
+ #include <stdint.h>
14
+
15
+ #include <iostream>
16
+
17
+ namespace lm {
18
+ namespace ngram {
19
+
20
+ struct Config;
21
+ class BinaryFormat;
22
+
23
+ /* Store values directly and don't quantize. */
24
+ class DontQuantize {
25
+ public:
26
+ static const ModelType kModelTypeAdd = static_cast<ModelType>(0);
27
+ static void UpdateConfigFromBinary(const BinaryFormat &, uint64_t, Config &) {}
28
+ static uint64_t Size(uint8_t /*order*/, const Config &/*config*/) { return 0; }
29
+ static uint8_t MiddleBits(const Config &/*config*/) { return 63; }
30
+ static uint8_t LongestBits(const Config &/*config*/) { return 31; }
31
+
32
+ class MiddlePointer {
33
+ public:
34
+ MiddlePointer(const DontQuantize & /*quant*/, unsigned char /*order_minus_2*/, util::BitAddress address) : address_(address) {}
35
+
36
+ MiddlePointer() : address_(NULL, 0) {}
37
+
38
+ bool Found() const {
39
+ return address_.base != NULL;
40
+ }
41
+
42
+ float Prob() const {
43
+ return util::ReadNonPositiveFloat31(address_.base, address_.offset);
44
+ }
45
+
46
+ float Backoff() const {
47
+ return util::ReadFloat32(address_.base, address_.offset + 31);
48
+ }
49
+
50
+ float Rest() const { return Prob(); }
51
+
52
+ void Write(float prob, float backoff) {
53
+ util::WriteNonPositiveFloat31(address_.base, address_.offset, prob);
54
+ util::WriteFloat32(address_.base, address_.offset + 31, backoff);
55
+ }
56
+
57
+ private:
58
+ util::BitAddress address_;
59
+ };
60
+
61
+ class LongestPointer {
62
+ public:
63
+ explicit LongestPointer(const DontQuantize &/*quant*/, util::BitAddress address) : address_(address) {}
64
+
65
+ LongestPointer() : address_(NULL, 0) {}
66
+
67
+ bool Found() const {
68
+ return address_.base != NULL;
69
+ }
70
+
71
+ float Prob() const {
72
+ return util::ReadNonPositiveFloat31(address_.base, address_.offset);
73
+ }
74
+
75
+ void Write(float prob) {
76
+ util::WriteNonPositiveFloat31(address_.base, address_.offset, prob);
77
+ }
78
+
79
+ private:
80
+ util::BitAddress address_;
81
+ };
82
+
83
+ DontQuantize() {}
84
+
85
+ void SetupMemory(void * /*start*/, unsigned char /*order*/, const Config & /*config*/) {}
86
+
87
+ static const bool kTrain = false;
88
+ // These should never be called because kTrain is false.
89
+ void Train(uint8_t /*order*/, std::vector<float> &/*prob*/, std::vector<float> &/*backoff*/) {}
90
+ void TrainProb(uint8_t, std::vector<float> &/*prob*/) {}
91
+
92
+ void FinishedLoading(const Config &) {}
93
+ };
94
+
95
+ class SeparatelyQuantize {
96
+ private:
97
+ class Bins {
98
+ public:
99
+ // Sigh C++ default constructor
100
+ Bins() {}
101
+
102
+ Bins(uint8_t bits, float *begin) : begin_(begin), end_(begin_ + (1ULL << bits)), bits_(bits), mask_((1ULL << bits) - 1) {}
103
+
104
+ float *Populate() { return begin_; }
105
+
106
+ uint64_t EncodeProb(float value) const {
107
+ return Encode(value, 0);
108
+ }
109
+
110
+ uint64_t EncodeBackoff(float value) const {
111
+ if (value == 0.0) {
112
+ return HasExtension(value) ? kExtensionQuant : kNoExtensionQuant;
113
+ }
114
+ return Encode(value, 2);
115
+ }
116
+
117
+ float Decode(std::size_t off) const { return begin_[off]; }
118
+
119
+ uint8_t Bits() const { return bits_; }
120
+
121
+ uint64_t Mask() const { return mask_; }
122
+
123
+ private:
124
+ uint64_t Encode(float value, size_t reserved) const {
125
+ const float *above = std::lower_bound(static_cast<const float*>(begin_) + reserved, end_, value);
126
+ if (above == begin_ + reserved) return reserved;
127
+ if (above == end_) return end_ - begin_ - 1;
128
+ return above - begin_ - (value - *(above - 1) < *above - value);
129
+ }
130
+
131
+ float *begin_;
132
+ const float *end_;
133
+ uint8_t bits_;
134
+ uint64_t mask_;
135
+ };
136
+
137
+ public:
138
+ static const ModelType kModelTypeAdd = kQuantAdd;
139
+
140
+ static void UpdateConfigFromBinary(const BinaryFormat &file, uint64_t offset, Config &config);
141
+
142
+ static uint64_t Size(uint8_t order, const Config &config) {
143
+ uint64_t longest_table = (static_cast<uint64_t>(1) << static_cast<uint64_t>(config.prob_bits)) * sizeof(float);
144
+ uint64_t middle_table = (static_cast<uint64_t>(1) << static_cast<uint64_t>(config.backoff_bits)) * sizeof(float) + longest_table;
145
+ // unigrams are currently not quantized so no need for a table.
146
+ return (order - 2) * middle_table + longest_table + /* for the bit counts and alignment padding) */ 8;
147
+ }
148
+
149
+ static uint8_t MiddleBits(const Config &config) { return config.prob_bits + config.backoff_bits; }
150
+ static uint8_t LongestBits(const Config &config) { return config.prob_bits; }
151
+
152
+ class MiddlePointer {
153
+ public:
154
+ MiddlePointer(const SeparatelyQuantize &quant, unsigned char order_minus_2, const util::BitAddress &address) : bins_(quant.GetTables(order_minus_2)), address_(address) {}
155
+
156
+ MiddlePointer() : address_(NULL, 0) {}
157
+
158
+ bool Found() const { return address_.base != NULL; }
159
+
160
+ float Prob() const {
161
+ return ProbBins().Decode(util::ReadInt25(address_.base, address_.offset + BackoffBins().Bits(), ProbBins().Bits(), ProbBins().Mask()));
162
+ }
163
+
164
+ float Backoff() const {
165
+ return BackoffBins().Decode(util::ReadInt25(address_.base, address_.offset, BackoffBins().Bits(), BackoffBins().Mask()));
166
+ }
167
+
168
+ float Rest() const { return Prob(); }
169
+
170
+ void Write(float prob, float backoff) const {
171
+ util::WriteInt57(address_.base, address_.offset, ProbBins().Bits() + BackoffBins().Bits(),
172
+ (ProbBins().EncodeProb(prob) << BackoffBins().Bits()) | BackoffBins().EncodeBackoff(backoff));
173
+ }
174
+
175
+ private:
176
+ const Bins &ProbBins() const { return bins_[0]; }
177
+ const Bins &BackoffBins() const { return bins_[1]; }
178
+ const Bins *bins_;
179
+
180
+ util::BitAddress address_;
181
+ };
182
+
183
+ class LongestPointer {
184
+ public:
185
+ LongestPointer(const SeparatelyQuantize &quant, const util::BitAddress &address) : table_(&quant.LongestTable()), address_(address) {}
186
+
187
+ LongestPointer() : address_(NULL, 0) {}
188
+
189
+ bool Found() const { return address_.base != NULL; }
190
+
191
+ void Write(float prob) const {
192
+ util::WriteInt25(address_.base, address_.offset, table_->Bits(), table_->EncodeProb(prob));
193
+ }
194
+
195
+ float Prob() const {
196
+ return table_->Decode(util::ReadInt25(address_.base, address_.offset, table_->Bits(), table_->Mask()));
197
+ }
198
+
199
+ private:
200
+ const Bins *table_;
201
+ util::BitAddress address_;
202
+ };
203
+
204
+ SeparatelyQuantize() {}
205
+
206
+ void SetupMemory(void *start, unsigned char order, const Config &config);
207
+
208
+ static const bool kTrain = true;
209
+ // Assumes 0.0 is removed from backoff.
210
+ void Train(uint8_t order, std::vector<float> &prob, std::vector<float> &backoff);
211
+ // Train just probabilities (for longest order).
212
+ void TrainProb(uint8_t order, std::vector<float> &prob);
213
+
214
+ void FinishedLoading(const Config &config);
215
+
216
+ const Bins *GetTables(unsigned char order_minus_2) const { return tables_[order_minus_2]; }
217
+
218
+ const Bins &LongestTable() const { return longest_; }
219
+
220
+ private:
221
+ Bins tables_[KENLM_MAX_ORDER - 1][2];
222
+
223
+ Bins longest_;
224
+
225
+ uint8_t *actual_base_;
226
+
227
+ uint8_t prob_bits_, backoff_bits_;
228
+ };
229
+
230
+ } // namespace ngram
231
+ } // namespace lm
232
+
233
+ #endif // LM_QUANTIZE_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/read_arpa.hh ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_READ_ARPA_H
2
+ #define LM_READ_ARPA_H
3
+
4
+ #include "lm/lm_exception.hh"
5
+ #include "lm/word_index.hh"
6
+ #include "lm/weights.hh"
7
+ #include "util/file_piece.hh"
8
+
9
+ #include <cstddef>
10
+ #include <iosfwd>
11
+ #include <vector>
12
+
13
+ namespace lm {
14
+
15
+ void ReadARPACounts(util::FilePiece &in, std::vector<uint64_t> &number);
16
+ void ReadNGramHeader(util::FilePiece &in, unsigned int length);
17
+
18
+ void ReadBackoff(util::FilePiece &in, Prob &weights);
19
+ void ReadBackoff(util::FilePiece &in, float &backoff);
20
+ inline void ReadBackoff(util::FilePiece &in, ProbBackoff &weights) {
21
+ ReadBackoff(in, weights.backoff);
22
+ }
23
+ inline void ReadBackoff(util::FilePiece &in, RestWeights &weights) {
24
+ ReadBackoff(in, weights.backoff);
25
+ }
26
+
27
+ void ReadEnd(util::FilePiece &in);
28
+
29
+ extern const bool kARPASpaces[256];
30
+
31
+ // Positive log probability warning.
32
+ class PositiveProbWarn {
33
+ public:
34
+ PositiveProbWarn() : action_(THROW_UP) {}
35
+
36
+ explicit PositiveProbWarn(WarningAction action) : action_(action) {}
37
+
38
+ void Warn(float prob);
39
+
40
+ private:
41
+ WarningAction action_;
42
+ };
43
+
44
+ template <class Voc, class Weights> void Read1Gram(util::FilePiece &f, Voc &vocab, Weights *unigrams, PositiveProbWarn &warn) {
45
+ try {
46
+ float prob = f.ReadFloat();
47
+ if (prob > 0.0) {
48
+ warn.Warn(prob);
49
+ prob = 0.0;
50
+ }
51
+ UTIL_THROW_IF(f.get() != '\t', FormatLoadException, "Expected tab after probability");
52
+ WordIndex word = vocab.Insert(f.ReadDelimited(kARPASpaces));
53
+ Weights &w = unigrams[word];
54
+ w.prob = prob;
55
+ ReadBackoff(f, w);
56
+ } catch(util::Exception &e) {
57
+ e << " in the 1-gram at byte " << f.Offset();
58
+ throw;
59
+ }
60
+ }
61
+
62
+ template <class Voc, class Weights> void Read1Grams(util::FilePiece &f, std::size_t count, Voc &vocab, Weights *unigrams, PositiveProbWarn &warn) {
63
+ ReadNGramHeader(f, 1);
64
+ for (std::size_t i = 0; i < count; ++i) {
65
+ Read1Gram(f, vocab, unigrams, warn);
66
+ }
67
+ vocab.FinishedLoading(unigrams);
68
+ }
69
+
70
+ // Read ngram, write vocab ids to indices_out.
71
+ template <class Voc, class Weights, class Iterator> void ReadNGram(util::FilePiece &f, const unsigned char n, const Voc &vocab, Iterator indices_out, Weights &weights, PositiveProbWarn &warn) {
72
+ try {
73
+ weights.prob = f.ReadFloat();
74
+ if (weights.prob > 0.0) {
75
+ warn.Warn(weights.prob);
76
+ weights.prob = 0.0;
77
+ }
78
+ for (unsigned char i = 0; i < n; ++i, ++indices_out) {
79
+ StringPiece word(f.ReadDelimited(kARPASpaces));
80
+ WordIndex index = vocab.Index(word);
81
+ *indices_out = index;
82
+ // Check for words mapped to <unk> that are not the string <unk>.
83
+ UTIL_THROW_IF(index == 0 /* mapped to <unk> */ && (word != StringPiece("<unk>", 5)) && (word != StringPiece("<UNK>", 5)),
84
+ FormatLoadException, "Word " << word << " was not seen in the unigrams (which are supposed to list the entire vocabulary) but appears");
85
+ }
86
+ ReadBackoff(f, weights);
87
+ } catch(util::Exception &e) {
88
+ e << " in the " << static_cast<unsigned int>(n) << "-gram at byte " << f.Offset();
89
+ throw;
90
+ }
91
+ }
92
+
93
+ } // namespace lm
94
+
95
+ #endif // LM_READ_ARPA_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/return.hh ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_RETURN_H
2
+ #define LM_RETURN_H
3
+
4
+ #include <stdint.h>
5
+
6
+ namespace lm {
7
+ /* Structure returned by scoring routines. */
8
+ struct FullScoreReturn {
9
+ // log10 probability
10
+ float prob;
11
+
12
+ /* The length of n-gram matched. Do not use this for recombination.
13
+ * Consider a model containing only the following n-grams:
14
+ * -1 foo
15
+ * -3.14 bar
16
+ * -2.718 baz -5
17
+ * -6 foo bar
18
+ *
19
+ * If you score ``bar'' then ngram_length is 1 and recombination state is the
20
+ * empty string because bar has zero backoff and does not extend to the
21
+ * right.
22
+ * If you score ``foo'' then ngram_length is 1 and recombination state is
23
+ * ``foo''.
24
+ *
25
+ * Ideally, keep output states around and compare them. Failing that,
26
+ * get out_state.ValidLength() and use that length for recombination.
27
+ */
28
+ unsigned char ngram_length;
29
+
30
+ /* Left extension information. If independent_left is set, then prob is
31
+ * independent of words to the left (up to additional backoff). Otherwise,
32
+ * extend_left indicates how to efficiently extend further to the left.
33
+ */
34
+ bool independent_left;
35
+ uint64_t extend_left; // Defined only if independent_left
36
+
37
+ // Rest cost for extension to the left.
38
+ float rest;
39
+ };
40
+
41
+ } // namespace lm
42
+ #endif // LM_RETURN_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/search_hashed.hh ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_SEARCH_HASHED_H
2
+ #define LM_SEARCH_HASHED_H
3
+
4
+ #include "lm/model_type.hh"
5
+ #include "lm/config.hh"
6
+ #include "lm/read_arpa.hh"
7
+ #include "lm/return.hh"
8
+ #include "lm/weights.hh"
9
+
10
+ #include "util/bit_packing.hh"
11
+ #include "util/probing_hash_table.hh"
12
+
13
+ #include <algorithm>
14
+ #include <iostream>
15
+ #include <vector>
16
+
17
+ namespace util { class FilePiece; }
18
+
19
+ namespace lm {
20
+ namespace ngram {
21
+ class BinaryFormat;
22
+ class ProbingVocabulary;
23
+ namespace detail {
24
+
25
+ inline uint64_t CombineWordHash(uint64_t current, const WordIndex next) {
26
+ uint64_t ret = (current * 8978948897894561157ULL) ^ (static_cast<uint64_t>(1 + next) * 17894857484156487943ULL);
27
+ return ret;
28
+ }
29
+
30
+ #pragma pack(push)
31
+ #pragma pack(4)
32
+ struct ProbEntry {
33
+ uint64_t key;
34
+ Prob value;
35
+ typedef uint64_t Key;
36
+ typedef Prob Value;
37
+ uint64_t GetKey() const {
38
+ return key;
39
+ }
40
+ };
41
+
42
+ #pragma pack(pop)
43
+
44
+ class LongestPointer {
45
+ public:
46
+ explicit LongestPointer(const float &to) : to_(&to) {}
47
+
48
+ LongestPointer() : to_(NULL) {}
49
+
50
+ bool Found() const {
51
+ return to_ != NULL;
52
+ }
53
+
54
+ float Prob() const {
55
+ return *to_;
56
+ }
57
+
58
+ private:
59
+ const float *to_;
60
+ };
61
+
62
+ template <class Value> class HashedSearch {
63
+ public:
64
+ typedef uint64_t Node;
65
+
66
+ typedef typename Value::ProbingProxy UnigramPointer;
67
+ typedef typename Value::ProbingProxy MiddlePointer;
68
+ typedef ::lm::ngram::detail::LongestPointer LongestPointer;
69
+
70
+ static const ModelType kModelType = Value::kProbingModelType;
71
+ static const bool kDifferentRest = Value::kDifferentRest;
72
+ static const unsigned int kVersion = 0;
73
+
74
+ // TODO: move probing_multiplier here with next binary file format update.
75
+ static void UpdateConfigFromBinary(const BinaryFormat &, const std::vector<uint64_t> &, uint64_t, Config &) {}
76
+
77
+ static uint64_t Size(const std::vector<uint64_t> &counts, const Config &config) {
78
+ uint64_t ret = Unigram::Size(counts[0]);
79
+ for (unsigned char n = 1; n < counts.size() - 1; ++n) {
80
+ ret += Middle::Size(counts[n], config.probing_multiplier);
81
+ }
82
+ return ret + Longest::Size(counts.back(), config.probing_multiplier);
83
+ }
84
+
85
+ uint8_t *SetupMemory(uint8_t *start, const std::vector<uint64_t> &counts, const Config &config);
86
+
87
+ void InitializeFromARPA(const char *file, util::FilePiece &f, const std::vector<uint64_t> &counts, const Config &config, ProbingVocabulary &vocab, BinaryFormat &backing);
88
+
89
+ unsigned char Order() const {
90
+ return middle_.size() + 2;
91
+ }
92
+
93
+ typename Value::Weights &UnknownUnigram() { return unigram_.Unknown(); }
94
+
95
+ UnigramPointer LookupUnigram(WordIndex word, Node &next, bool &independent_left, uint64_t &extend_left) const {
96
+ extend_left = static_cast<uint64_t>(word);
97
+ next = extend_left;
98
+ UnigramPointer ret(unigram_.Lookup(word));
99
+ independent_left = ret.IndependentLeft();
100
+ return ret;
101
+ }
102
+
103
+ MiddlePointer Unpack(uint64_t extend_pointer, unsigned char extend_length, Node &node) const {
104
+ node = extend_pointer;
105
+ return MiddlePointer(middle_[extend_length - 2].MustFind(extend_pointer)->value);
106
+ }
107
+
108
+ MiddlePointer LookupMiddle(unsigned char order_minus_2, WordIndex word, Node &node, bool &independent_left, uint64_t &extend_pointer) const {
109
+ node = CombineWordHash(node, word);
110
+ typename Middle::ConstIterator found;
111
+ if (!middle_[order_minus_2].Find(node, found)) {
112
+ independent_left = true;
113
+ return MiddlePointer();
114
+ }
115
+ extend_pointer = node;
116
+ MiddlePointer ret(found->value);
117
+ independent_left = ret.IndependentLeft();
118
+ return ret;
119
+ }
120
+
121
+ LongestPointer LookupLongest(WordIndex word, const Node &node) const {
122
+ // Sign bit is always on because longest n-grams do not extend left.
123
+ typename Longest::ConstIterator found;
124
+ if (!longest_.Find(CombineWordHash(node, word), found)) return LongestPointer();
125
+ return LongestPointer(found->value.prob);
126
+ }
127
+
128
+ // Generate a node without necessarily checking that it actually exists.
129
+ // Optionally return false if it's know to not exist.
130
+ bool FastMakeNode(const WordIndex *begin, const WordIndex *end, Node &node) const {
131
+ assert(begin != end);
132
+ node = static_cast<Node>(*begin);
133
+ for (const WordIndex *i = begin + 1; i < end; ++i) {
134
+ node = CombineWordHash(node, *i);
135
+ }
136
+ return true;
137
+ }
138
+
139
+ private:
140
+ // Interpret config's rest cost build policy and pass the right template argument to ApplyBuild.
141
+ void DispatchBuild(util::FilePiece &f, const std::vector<uint64_t> &counts, const Config &config, const ProbingVocabulary &vocab, PositiveProbWarn &warn);
142
+
143
+ template <class Build> void ApplyBuild(util::FilePiece &f, const std::vector<uint64_t> &counts, const ProbingVocabulary &vocab, PositiveProbWarn &warn, const Build &build);
144
+
145
+ class Unigram {
146
+ public:
147
+ Unigram() {}
148
+
149
+ Unigram(void *start, uint64_t count) :
150
+ unigram_(static_cast<typename Value::Weights*>(start))
151
+ #ifdef DEBUG
152
+ , count_(count)
153
+ #endif
154
+ {}
155
+
156
+ static uint64_t Size(uint64_t count) {
157
+ return (count + 1) * sizeof(typename Value::Weights); // +1 for hallucinate <unk>
158
+ }
159
+
160
+ const typename Value::Weights &Lookup(WordIndex index) const {
161
+ #ifdef DEBUG
162
+ assert(index < count_);
163
+ #endif
164
+ return unigram_[index];
165
+ }
166
+
167
+ typename Value::Weights &Unknown() { return unigram_[0]; }
168
+
169
+ // For building.
170
+ typename Value::Weights *Raw() { return unigram_; }
171
+
172
+ private:
173
+ typename Value::Weights *unigram_;
174
+ #ifdef DEBUG
175
+ uint64_t count_;
176
+ #endif
177
+ };
178
+
179
+ Unigram unigram_;
180
+
181
+ typedef util::ProbingHashTable<typename Value::ProbingEntry, util::IdentityHash> Middle;
182
+ std::vector<Middle> middle_;
183
+
184
+ typedef util::ProbingHashTable<ProbEntry, util::IdentityHash> Longest;
185
+ Longest longest_;
186
+ };
187
+
188
+ } // namespace detail
189
+ } // namespace ngram
190
+ } // namespace lm
191
+
192
+ #endif // LM_SEARCH_HASHED_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/search_trie.hh ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_SEARCH_TRIE_H
2
+ #define LM_SEARCH_TRIE_H
3
+
4
+ #include "lm/config.hh"
5
+ #include "lm/model_type.hh"
6
+ #include "lm/return.hh"
7
+ #include "lm/trie.hh"
8
+ #include "lm/weights.hh"
9
+
10
+ #include "util/file.hh"
11
+ #include "util/file_piece.hh"
12
+
13
+ #include <vector>
14
+ #include <cstdlib>
15
+
16
+ #include <assert.h>
17
+
18
+ namespace lm {
19
+ namespace ngram {
20
+ class BinaryFormat;
21
+ class SortedVocabulary;
22
+ namespace trie {
23
+
24
+ template <class Quant, class Bhiksha> class TrieSearch;
25
+ class SortedFiles;
26
+ template <class Quant, class Bhiksha> void BuildTrie(SortedFiles &files, std::vector<uint64_t> &counts, const Config &config, TrieSearch<Quant, Bhiksha> &out, Quant &quant, SortedVocabulary &vocab, BinaryFormat &backing);
27
+
28
+ template <class Quant, class Bhiksha> class TrieSearch {
29
+ public:
30
+ typedef NodeRange Node;
31
+
32
+ typedef ::lm::ngram::trie::UnigramPointer UnigramPointer;
33
+ typedef typename Quant::MiddlePointer MiddlePointer;
34
+ typedef typename Quant::LongestPointer LongestPointer;
35
+
36
+ static const bool kDifferentRest = false;
37
+
38
+ static const ModelType kModelType = static_cast<ModelType>(TRIE_SORTED + Quant::kModelTypeAdd + Bhiksha::kModelTypeAdd);
39
+
40
+ static const unsigned int kVersion = 1;
41
+
42
+ static void UpdateConfigFromBinary(const BinaryFormat &file, const std::vector<uint64_t> &counts, uint64_t offset, Config &config) {
43
+ Quant::UpdateConfigFromBinary(file, offset, config);
44
+ // Currently the unigram pointers are not compresssed, so there will only be a header for order > 2.
45
+ if (counts.size() > 2)
46
+ Bhiksha::UpdateConfigFromBinary(file, offset + Quant::Size(counts.size(), config) + Unigram::Size(counts[0]), config);
47
+ }
48
+
49
+ static uint64_t Size(const std::vector<uint64_t> &counts, const Config &config) {
50
+ uint64_t ret = Quant::Size(counts.size(), config) + Unigram::Size(counts[0]);
51
+ for (unsigned char i = 1; i < counts.size() - 1; ++i) {
52
+ ret += Middle::Size(Quant::MiddleBits(config), counts[i], counts[0], counts[i+1], config);
53
+ }
54
+ return ret + Longest::Size(Quant::LongestBits(config), counts.back(), counts[0]);
55
+ }
56
+
57
+ TrieSearch() : middle_begin_(NULL), middle_end_(NULL) {}
58
+
59
+ ~TrieSearch() { FreeMiddles(); }
60
+
61
+ uint8_t *SetupMemory(uint8_t *start, const std::vector<uint64_t> &counts, const Config &config);
62
+
63
+ void InitializeFromARPA(const char *file, util::FilePiece &f, std::vector<uint64_t> &counts, const Config &config, SortedVocabulary &vocab, BinaryFormat &backing);
64
+
65
+ unsigned char Order() const {
66
+ return middle_end_ - middle_begin_ + 2;
67
+ }
68
+
69
+ ProbBackoff &UnknownUnigram() { return unigram_.Unknown(); }
70
+
71
+ UnigramPointer LookupUnigram(WordIndex word, Node &next, bool &independent_left, uint64_t &extend_left) const {
72
+ extend_left = static_cast<uint64_t>(word);
73
+ UnigramPointer ret(unigram_.Find(word, next));
74
+ independent_left = (next.begin == next.end);
75
+ return ret;
76
+ }
77
+
78
+ MiddlePointer Unpack(uint64_t extend_pointer, unsigned char extend_length, Node &node) const {
79
+ return MiddlePointer(quant_, extend_length - 2, middle_begin_[extend_length - 2].ReadEntry(extend_pointer, node));
80
+ }
81
+
82
+ MiddlePointer LookupMiddle(unsigned char order_minus_2, WordIndex word, Node &node, bool &independent_left, uint64_t &extend_left) const {
83
+ util::BitAddress address(middle_begin_[order_minus_2].Find(word, node, extend_left));
84
+ independent_left = (address.base == NULL) || (node.begin == node.end);
85
+ return MiddlePointer(quant_, order_minus_2, address);
86
+ }
87
+
88
+ LongestPointer LookupLongest(WordIndex word, const Node &node) const {
89
+ return LongestPointer(quant_, longest_.Find(word, node));
90
+ }
91
+
92
+ bool FastMakeNode(const WordIndex *begin, const WordIndex *end, Node &node) const {
93
+ assert(begin != end);
94
+ bool independent_left;
95
+ uint64_t ignored;
96
+ LookupUnigram(*begin, node, independent_left, ignored);
97
+ for (const WordIndex *i = begin + 1; i < end; ++i) {
98
+ if (independent_left || !LookupMiddle(i - begin - 1, *i, node, independent_left, ignored).Found()) return false;
99
+ }
100
+ return true;
101
+ }
102
+
103
+ private:
104
+ friend void BuildTrie<Quant, Bhiksha>(SortedFiles &files, std::vector<uint64_t> &counts, const Config &config, TrieSearch<Quant, Bhiksha> &out, Quant &quant, SortedVocabulary &vocab, BinaryFormat &backing);
105
+
106
+ // Middles are managed manually so we can delay construction and they don't have to be copyable.
107
+ void FreeMiddles() {
108
+ for (const Middle *i = middle_begin_; i != middle_end_; ++i) {
109
+ i->~Middle();
110
+ }
111
+ std::free(middle_begin_);
112
+ }
113
+
114
+ typedef trie::BitPackedMiddle<Bhiksha> Middle;
115
+
116
+ typedef trie::BitPackedLongest Longest;
117
+ Longest longest_;
118
+
119
+ Middle *middle_begin_, *middle_end_;
120
+ Quant quant_;
121
+
122
+ typedef ::lm::ngram::trie::Unigram Unigram;
123
+ Unigram unigram_;
124
+ };
125
+
126
+ } // namespace trie
127
+ } // namespace ngram
128
+ } // namespace lm
129
+
130
+ #endif // LM_SEARCH_TRIE_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/sizes.hh ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_SIZES_H
2
+ #define LM_SIZES_H
3
+
4
+ #include <vector>
5
+
6
+ #include <stdint.h>
7
+
8
+ namespace lm { namespace ngram {
9
+
10
+ struct Config;
11
+
12
+ void ShowSizes(const std::vector<uint64_t> &counts, const lm::ngram::Config &config);
13
+ void ShowSizes(const std::vector<uint64_t> &counts);
14
+ void ShowSizes(const char *file, const lm::ngram::Config &config);
15
+
16
+ }} // namespaces
17
+ #endif // LM_SIZES_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/state.hh ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_STATE_H
2
+ #define LM_STATE_H
3
+
4
+ #include "lm/max_order.hh"
5
+ #include "lm/word_index.hh"
6
+ #include "util/murmur_hash.hh"
7
+
8
+ #include <string.h>
9
+
10
+ namespace lm {
11
+ namespace ngram {
12
+
13
+ // This is a POD but if you want memcmp to return the same as operator==, call
14
+ // ZeroRemaining first.
15
+ class State {
16
+ public:
17
+ bool operator==(const State &other) const {
18
+ if (length != other.length) return false;
19
+ return !memcmp(words, other.words, length * sizeof(WordIndex));
20
+ }
21
+
22
+ // Three way comparison function.
23
+ int Compare(const State &other) const {
24
+ if (length != other.length) return length < other.length ? -1 : 1;
25
+ return memcmp(words, other.words, length * sizeof(WordIndex));
26
+ }
27
+
28
+ bool operator<(const State &other) const {
29
+ if (length != other.length) return length < other.length;
30
+ return memcmp(words, other.words, length * sizeof(WordIndex)) < 0;
31
+ }
32
+
33
+ // Call this before using raw memcmp.
34
+ void ZeroRemaining() {
35
+ for (unsigned char i = length; i < KENLM_MAX_ORDER - 1; ++i) {
36
+ words[i] = 0;
37
+ backoff[i] = 0.0;
38
+ }
39
+ }
40
+
41
+ unsigned char Length() const { return length; }
42
+
43
+ // You shouldn't need to touch anything below this line, but the members are public so FullState will qualify as a POD.
44
+ // This order minimizes total size of the struct if WordIndex is 64 bit, float is 32 bit, and alignment of 64 bit integers is 64 bit.
45
+ WordIndex words[KENLM_MAX_ORDER - 1];
46
+ float backoff[KENLM_MAX_ORDER - 1];
47
+ unsigned char length;
48
+ };
49
+
50
+ typedef State Right;
51
+
52
+ inline uint64_t hash_value(const State &state, uint64_t seed = 0) {
53
+ return util::MurmurHashNative(state.words, sizeof(WordIndex) * state.length, seed);
54
+ }
55
+
56
+ struct Left {
57
+ bool operator==(const Left &other) const {
58
+ return
59
+ length == other.length &&
60
+ (!length || (pointers[length - 1] == other.pointers[length - 1] && full == other.full));
61
+ }
62
+
63
+ int Compare(const Left &other) const {
64
+ if (length < other.length) return -1;
65
+ if (length > other.length) return 1;
66
+ if (length == 0) return 0; // Must be full.
67
+ if (pointers[length - 1] > other.pointers[length - 1]) return 1;
68
+ if (pointers[length - 1] < other.pointers[length - 1]) return -1;
69
+ return (int)full - (int)other.full;
70
+ }
71
+
72
+ bool operator<(const Left &other) const {
73
+ return Compare(other) == -1;
74
+ }
75
+
76
+ void ZeroRemaining() {
77
+ for (uint64_t * i = pointers + length; i < pointers + KENLM_MAX_ORDER - 1; ++i)
78
+ *i = 0;
79
+ }
80
+
81
+ uint64_t pointers[KENLM_MAX_ORDER - 1];
82
+ unsigned char length;
83
+ bool full;
84
+ };
85
+
86
+ inline uint64_t hash_value(const Left &left) {
87
+ unsigned char add[2];
88
+ add[0] = left.length;
89
+ add[1] = left.full;
90
+ return util::MurmurHashNative(add, 2, left.length ? left.pointers[left.length - 1] : 0);
91
+ }
92
+
93
+ struct ChartState {
94
+ bool operator==(const ChartState &other) const {
95
+ return (right == other.right) && (left == other.left);
96
+ }
97
+
98
+ int Compare(const ChartState &other) const {
99
+ int lres = left.Compare(other.left);
100
+ if (lres) return lres;
101
+ return right.Compare(other.right);
102
+ }
103
+
104
+ bool operator<(const ChartState &other) const {
105
+ return Compare(other) < 0;
106
+ }
107
+
108
+ void ZeroRemaining() {
109
+ left.ZeroRemaining();
110
+ right.ZeroRemaining();
111
+ }
112
+
113
+ Left left;
114
+ State right;
115
+ };
116
+
117
+ inline uint64_t hash_value(const ChartState &state) {
118
+ return hash_value(state.right, hash_value(state.left));
119
+ }
120
+
121
+
122
+ } // namespace ngram
123
+ } // namespace lm
124
+
125
+ #endif // LM_STATE_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/trie.hh ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_TRIE_H
2
+ #define LM_TRIE_H
3
+
4
+ #include "lm/weights.hh"
5
+ #include "lm/word_index.hh"
6
+ #include "util/bit_packing.hh"
7
+
8
+ #include <cstddef>
9
+
10
+ #include <stdint.h>
11
+
12
+ namespace lm {
13
+ namespace ngram {
14
+ struct Config;
15
+ namespace trie {
16
+
17
+ struct NodeRange {
18
+ uint64_t begin, end;
19
+ };
20
+
21
+ // TODO: if the number of unigrams is a concern, also bit pack these records.
22
+ struct UnigramValue {
23
+ ProbBackoff weights;
24
+ uint64_t next;
25
+ uint64_t Next() const { return next; }
26
+ };
27
+
28
+ class UnigramPointer {
29
+ public:
30
+ explicit UnigramPointer(const ProbBackoff &to) : to_(&to) {}
31
+
32
+ UnigramPointer() : to_(NULL) {}
33
+
34
+ bool Found() const { return to_ != NULL; }
35
+
36
+ float Prob() const { return to_->prob; }
37
+ float Backoff() const { return to_->backoff; }
38
+ float Rest() const { return Prob(); }
39
+
40
+ private:
41
+ const ProbBackoff *to_;
42
+ };
43
+
44
+ class Unigram {
45
+ public:
46
+ Unigram() {}
47
+
48
+ void Init(void *start) {
49
+ unigram_ = static_cast<UnigramValue*>(start);
50
+ }
51
+
52
+ static uint64_t Size(uint64_t count) {
53
+ // +1 in case unknown doesn't appear. +1 for the final next.
54
+ return (count + 2) * sizeof(UnigramValue);
55
+ }
56
+
57
+ const ProbBackoff &Lookup(WordIndex index) const { return unigram_[index].weights; }
58
+
59
+ ProbBackoff &Unknown() { return unigram_[0].weights; }
60
+
61
+ UnigramValue *Raw() {
62
+ return unigram_;
63
+ }
64
+
65
+ UnigramPointer Find(WordIndex word, NodeRange &next) const {
66
+ UnigramValue *val = unigram_ + word;
67
+ next.begin = val->next;
68
+ next.end = (val+1)->next;
69
+ return UnigramPointer(val->weights);
70
+ }
71
+
72
+ private:
73
+ UnigramValue *unigram_;
74
+ };
75
+
76
+ class BitPacked {
77
+ public:
78
+ BitPacked() {}
79
+
80
+ uint64_t InsertIndex() const {
81
+ return insert_index_;
82
+ }
83
+
84
+ protected:
85
+ static uint64_t BaseSize(uint64_t entries, uint64_t max_vocab, uint8_t remaining_bits);
86
+
87
+ void BaseInit(void *base, uint64_t max_vocab, uint8_t remaining_bits);
88
+
89
+ uint8_t word_bits_;
90
+ uint8_t total_bits_;
91
+ uint64_t word_mask_;
92
+
93
+ uint8_t *base_;
94
+
95
+ uint64_t insert_index_, max_vocab_;
96
+ };
97
+
98
+ template <class Bhiksha> class BitPackedMiddle : public BitPacked {
99
+ public:
100
+ static uint64_t Size(uint8_t quant_bits, uint64_t entries, uint64_t max_vocab, uint64_t max_next, const Config &config);
101
+
102
+ // next_source need not be initialized.
103
+ BitPackedMiddle(void *base, uint8_t quant_bits, uint64_t entries, uint64_t max_vocab, uint64_t max_next, const BitPacked &next_source, const Config &config);
104
+
105
+ util::BitAddress Insert(WordIndex word);
106
+
107
+ void FinishedLoading(uint64_t next_end, const Config &config);
108
+
109
+ util::BitAddress Find(WordIndex word, NodeRange &range, uint64_t &pointer) const;
110
+
111
+ util::BitAddress ReadEntry(uint64_t pointer, NodeRange &range) {
112
+ uint64_t addr = pointer * total_bits_;
113
+ addr += word_bits_;
114
+ bhiksha_.ReadNext(base_, addr + quant_bits_, pointer, total_bits_, range);
115
+ return util::BitAddress(base_, addr);
116
+ }
117
+
118
+ private:
119
+ uint8_t quant_bits_;
120
+ Bhiksha bhiksha_;
121
+
122
+ const BitPacked *next_source_;
123
+ };
124
+
125
+ class BitPackedLongest : public BitPacked {
126
+ public:
127
+ static uint64_t Size(uint8_t quant_bits, uint64_t entries, uint64_t max_vocab) {
128
+ return BaseSize(entries, max_vocab, quant_bits);
129
+ }
130
+
131
+ BitPackedLongest() {}
132
+
133
+ void Init(void *base, uint8_t quant_bits, uint64_t max_vocab) {
134
+ BaseInit(base, max_vocab, quant_bits);
135
+ }
136
+
137
+ util::BitAddress Insert(WordIndex word);
138
+
139
+ util::BitAddress Find(WordIndex word, const NodeRange &node) const;
140
+ };
141
+
142
+ } // namespace trie
143
+ } // namespace ngram
144
+ } // namespace lm
145
+
146
+ #endif // LM_TRIE_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/trie_sort.hh ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Step of trie builder: create sorted files.
2
+
3
+ #ifndef LM_TRIE_SORT_H
4
+ #define LM_TRIE_SORT_H
5
+
6
+ #include "lm/max_order.hh"
7
+ #include "lm/word_index.hh"
8
+
9
+ #include "util/file.hh"
10
+ #include "util/scoped.hh"
11
+
12
+ #include <cstddef>
13
+ #include <functional>
14
+ #include <string>
15
+ #include <vector>
16
+
17
+ #include <stdint.h>
18
+
19
+ namespace util {
20
+ class FilePiece;
21
+ } // namespace util
22
+
23
+ namespace lm {
24
+ class PositiveProbWarn;
25
+ namespace ngram {
26
+ class SortedVocabulary;
27
+ struct Config;
28
+
29
+ namespace trie {
30
+
31
+ class EntryCompare : public std::binary_function<const void*, const void*, bool> {
32
+ public:
33
+ explicit EntryCompare(unsigned char order) : order_(order) {}
34
+
35
+ bool operator()(const void *first_void, const void *second_void) const {
36
+ const WordIndex *first = static_cast<const WordIndex*>(first_void);
37
+ const WordIndex *second = static_cast<const WordIndex*>(second_void);
38
+ const WordIndex *end = first + order_;
39
+ for (; first != end; ++first, ++second) {
40
+ if (*first < *second) return true;
41
+ if (*first > *second) return false;
42
+ }
43
+ return false;
44
+ }
45
+ private:
46
+ unsigned char order_;
47
+ };
48
+
49
+ class RecordReader {
50
+ public:
51
+ RecordReader() : remains_(true) {}
52
+
53
+ void Init(FILE *file, std::size_t entry_size);
54
+
55
+ void *Data() { return data_.get(); }
56
+ const void *Data() const { return data_.get(); }
57
+
58
+ RecordReader &operator++() {
59
+ std::size_t ret = fread(data_.get(), entry_size_, 1, file_);
60
+ if (!ret) {
61
+ UTIL_THROW_IF(!feof(file_), util::ErrnoException, "Error reading temporary file");
62
+ remains_ = false;
63
+ }
64
+ return *this;
65
+ }
66
+
67
+ operator bool() const { return remains_; }
68
+
69
+ void Rewind();
70
+
71
+ std::size_t EntrySize() const { return entry_size_; }
72
+
73
+ void Overwrite(const void *start, std::size_t amount);
74
+
75
+ private:
76
+ FILE *file_;
77
+
78
+ util::scoped_malloc data_;
79
+
80
+ bool remains_;
81
+
82
+ std::size_t entry_size_;
83
+ };
84
+
85
+ class SortedFiles {
86
+ public:
87
+ // Build from ARPA
88
+ SortedFiles(const Config &config, util::FilePiece &f, std::vector<uint64_t> &counts, std::size_t buffer, const std::string &file_prefix, SortedVocabulary &vocab);
89
+
90
+ int StealUnigram() {
91
+ return unigram_.release();
92
+ }
93
+
94
+ FILE *Full(unsigned char order) {
95
+ return full_[order - 2].get();
96
+ }
97
+
98
+ FILE *Context(unsigned char of_order) {
99
+ return context_[of_order - 2].get();
100
+ }
101
+
102
+ private:
103
+ void ConvertToSorted(util::FilePiece &f, const SortedVocabulary &vocab, const std::vector<uint64_t> &counts, const std::string &prefix, unsigned char order, PositiveProbWarn &warn, void *mem, std::size_t mem_size);
104
+
105
+ util::scoped_fd unigram_;
106
+
107
+ util::scoped_FILE full_[KENLM_MAX_ORDER - 1], context_[KENLM_MAX_ORDER - 1];
108
+ };
109
+
110
+ } // namespace trie
111
+ } // namespace ngram
112
+ } // namespace lm
113
+
114
+ #endif // LM_TRIE_SORT_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/value.hh ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_VALUE_H
2
+ #define LM_VALUE_H
3
+
4
+ #include "lm/model_type.hh"
5
+ #include "lm/value_build.hh"
6
+ #include "lm/weights.hh"
7
+ #include "util/bit_packing.hh"
8
+
9
+ #include <stdint.h>
10
+
11
+ namespace lm {
12
+ namespace ngram {
13
+
14
+ // Template proxy for probing unigrams and middle.
15
+ template <class Weights> class GenericProbingProxy {
16
+ public:
17
+ explicit GenericProbingProxy(const Weights &to) : to_(&to) {}
18
+
19
+ GenericProbingProxy() : to_(0) {}
20
+
21
+ bool Found() const { return to_ != 0; }
22
+
23
+ float Prob() const {
24
+ util::FloatEnc enc;
25
+ enc.f = to_->prob;
26
+ enc.i |= util::kSignBit;
27
+ return enc.f;
28
+ }
29
+
30
+ float Backoff() const { return to_->backoff; }
31
+
32
+ bool IndependentLeft() const {
33
+ util::FloatEnc enc;
34
+ enc.f = to_->prob;
35
+ return enc.i & util::kSignBit;
36
+ }
37
+
38
+ protected:
39
+ const Weights *to_;
40
+ };
41
+
42
+ // Basic proxy for trie unigrams.
43
+ template <class Weights> class GenericTrieUnigramProxy {
44
+ public:
45
+ explicit GenericTrieUnigramProxy(const Weights &to) : to_(&to) {}
46
+
47
+ GenericTrieUnigramProxy() : to_(0) {}
48
+
49
+ bool Found() const { return to_ != 0; }
50
+ float Prob() const { return to_->prob; }
51
+ float Backoff() const { return to_->backoff; }
52
+ float Rest() const { return Prob(); }
53
+
54
+ protected:
55
+ const Weights *to_;
56
+ };
57
+
58
+ struct BackoffValue {
59
+ typedef ProbBackoff Weights;
60
+ static const ModelType kProbingModelType = PROBING;
61
+
62
+ class ProbingProxy : public GenericProbingProxy<Weights> {
63
+ public:
64
+ explicit ProbingProxy(const Weights &to) : GenericProbingProxy<Weights>(to) {}
65
+ ProbingProxy() {}
66
+ float Rest() const { return Prob(); }
67
+ };
68
+
69
+ class TrieUnigramProxy : public GenericTrieUnigramProxy<Weights> {
70
+ public:
71
+ explicit TrieUnigramProxy(const Weights &to) : GenericTrieUnigramProxy<Weights>(to) {}
72
+ TrieUnigramProxy() {}
73
+ float Rest() const { return Prob(); }
74
+ };
75
+
76
+ struct ProbingEntry {
77
+ typedef uint64_t Key;
78
+ typedef Weights Value;
79
+ uint64_t key;
80
+ ProbBackoff value;
81
+ uint64_t GetKey() const { return key; }
82
+ };
83
+
84
+ struct TrieUnigramValue {
85
+ Weights weights;
86
+ uint64_t next;
87
+ uint64_t Next() const { return next; }
88
+ };
89
+
90
+ const static bool kDifferentRest = false;
91
+
92
+ template <class Model, class C> void Callback(const Config &, unsigned int, typename Model::Vocabulary &, C &callback) {
93
+ NoRestBuild build;
94
+ callback(build);
95
+ }
96
+ };
97
+
98
+ struct RestValue {
99
+ typedef RestWeights Weights;
100
+ static const ModelType kProbingModelType = REST_PROBING;
101
+
102
+ class ProbingProxy : public GenericProbingProxy<RestWeights> {
103
+ public:
104
+ explicit ProbingProxy(const Weights &to) : GenericProbingProxy<RestWeights>(to) {}
105
+ ProbingProxy() {}
106
+ float Rest() const { return to_->rest; }
107
+ };
108
+
109
+ class TrieUnigramProxy : public GenericTrieUnigramProxy<Weights> {
110
+ public:
111
+ explicit TrieUnigramProxy(const Weights &to) : GenericTrieUnigramProxy<Weights>(to) {}
112
+ TrieUnigramProxy() {}
113
+ float Rest() const { return to_->rest; }
114
+ };
115
+
116
+ // gcc 4.1 doesn't properly back dependent types :-(.
117
+ #pragma pack(push)
118
+ #pragma pack(4)
119
+ struct ProbingEntry {
120
+ typedef uint64_t Key;
121
+ typedef Weights Value;
122
+ Key key;
123
+ Value value;
124
+ Key GetKey() const { return key; }
125
+ };
126
+
127
+ struct TrieUnigramValue {
128
+ Weights weights;
129
+ uint64_t next;
130
+ uint64_t Next() const { return next; }
131
+ };
132
+ #pragma pack(pop)
133
+
134
+ const static bool kDifferentRest = true;
135
+
136
+ template <class Model, class C> void Callback(const Config &config, unsigned int order, typename Model::Vocabulary &vocab, C &callback) {
137
+ switch (config.rest_function) {
138
+ case Config::REST_MAX:
139
+ {
140
+ MaxRestBuild build;
141
+ callback(build);
142
+ }
143
+ break;
144
+ case Config::REST_LOWER:
145
+ {
146
+ LowerRestBuild<Model> build(config, order, vocab);
147
+ callback(build);
148
+ }
149
+ break;
150
+ }
151
+ }
152
+ };
153
+
154
+ } // namespace ngram
155
+ } // namespace lm
156
+
157
+ #endif // LM_VALUE_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/value_build.hh ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_VALUE_BUILD_H
2
+ #define LM_VALUE_BUILD_H
3
+
4
+ #include "lm/weights.hh"
5
+ #include "lm/word_index.hh"
6
+ #include "util/bit_packing.hh"
7
+
8
+ #include <vector>
9
+
10
+ namespace lm {
11
+ namespace ngram {
12
+
13
+ struct Config;
14
+ struct BackoffValue;
15
+ struct RestValue;
16
+
17
+ class NoRestBuild {
18
+ public:
19
+ typedef BackoffValue Value;
20
+
21
+ NoRestBuild() {}
22
+
23
+ void SetRest(const WordIndex *, unsigned int, const Prob &/*prob*/) const {}
24
+ void SetRest(const WordIndex *, unsigned int, const ProbBackoff &) const {}
25
+
26
+ template <class Second> bool MarkExtends(ProbBackoff &weights, const Second &) const {
27
+ util::UnsetSign(weights.prob);
28
+ return false;
29
+ }
30
+
31
+ // Probing doesn't need to go back to unigram.
32
+ const static bool kMarkEvenLower = false;
33
+ };
34
+
35
+ class MaxRestBuild {
36
+ public:
37
+ typedef RestValue Value;
38
+
39
+ MaxRestBuild() {}
40
+
41
+ void SetRest(const WordIndex *, unsigned int, const Prob &/*prob*/) const {}
42
+ void SetRest(const WordIndex *, unsigned int, RestWeights &weights) const {
43
+ weights.rest = weights.prob;
44
+ util::SetSign(weights.rest);
45
+ }
46
+
47
+ bool MarkExtends(RestWeights &weights, const RestWeights &to) const {
48
+ util::UnsetSign(weights.prob);
49
+ if (weights.rest >= to.rest) return false;
50
+ weights.rest = to.rest;
51
+ return true;
52
+ }
53
+ bool MarkExtends(RestWeights &weights, const Prob &to) const {
54
+ util::UnsetSign(weights.prob);
55
+ if (weights.rest >= to.prob) return false;
56
+ weights.rest = to.prob;
57
+ return true;
58
+ }
59
+
60
+ // Probing does need to go back to unigram.
61
+ const static bool kMarkEvenLower = true;
62
+ };
63
+
64
+ template <class Model> class LowerRestBuild {
65
+ public:
66
+ typedef RestValue Value;
67
+
68
+ LowerRestBuild(const Config &config, unsigned int order, const typename Model::Vocabulary &vocab);
69
+
70
+ ~LowerRestBuild();
71
+
72
+ void SetRest(const WordIndex *, unsigned int, const Prob &/*prob*/) const {}
73
+ void SetRest(const WordIndex *vocab_ids, unsigned int n, RestWeights &weights) const {
74
+ typename Model::State ignored;
75
+ if (n == 1) {
76
+ weights.rest = unigrams_[*vocab_ids];
77
+ } else {
78
+ weights.rest = models_[n-2]->FullScoreForgotState(vocab_ids + 1, vocab_ids + n, *vocab_ids, ignored).prob;
79
+ }
80
+ }
81
+
82
+ template <class Second> bool MarkExtends(RestWeights &weights, const Second &) const {
83
+ util::UnsetSign(weights.prob);
84
+ return false;
85
+ }
86
+
87
+ const static bool kMarkEvenLower = false;
88
+
89
+ std::vector<float> unigrams_;
90
+
91
+ std::vector<const Model*> models_;
92
+ };
93
+
94
+ } // namespace ngram
95
+ } // namespace lm
96
+
97
+ #endif // LM_VALUE_BUILD_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/virtual_interface.hh ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_VIRTUAL_INTERFACE_H
2
+ #define LM_VIRTUAL_INTERFACE_H
3
+
4
+ #include "lm/return.hh"
5
+ #include "lm/word_index.hh"
6
+ #include "util/string_piece.hh"
7
+
8
+ #include <string>
9
+ #include <string.h>
10
+
11
+ namespace lm {
12
+ namespace base {
13
+
14
+ template <class T, class U, class V> class ModelFacade;
15
+
16
+ /* Vocabulary interface. Call Index(string) and get a word index for use in
17
+ * calling Model. It provides faster convenience functions for <s>, </s>, and
18
+ * <unk> although you can also find these using Index.
19
+ *
20
+ * Some models do not load the mapping from index to string. If you need this,
21
+ * check if the model Vocabulary class implements such a function and access it
22
+ * directly.
23
+ *
24
+ * The Vocabulary object is always owned by the Model and can be retrieved from
25
+ * the Model using BaseVocabulary() for this abstract interface or
26
+ * GetVocabulary() for the actual implementation (in which case you'll need the
27
+ * actual implementation of the Model too).
28
+ */
29
+ class Vocabulary {
30
+ public:
31
+ virtual ~Vocabulary();
32
+
33
+ WordIndex BeginSentence() const { return begin_sentence_; }
34
+ WordIndex EndSentence() const { return end_sentence_; }
35
+ WordIndex NotFound() const { return not_found_; }
36
+
37
+ /* Most implementations allow StringPiece lookups and need only override
38
+ * Index(StringPiece). SRI requires null termination and overrides all
39
+ * three methods.
40
+ */
41
+ virtual WordIndex Index(const StringPiece &str) const = 0;
42
+ virtual WordIndex Index(const std::string &str) const {
43
+ return Index(StringPiece(str));
44
+ }
45
+ virtual WordIndex Index(const char *str) const {
46
+ return Index(StringPiece(str));
47
+ }
48
+
49
+ protected:
50
+ // Call SetSpecial afterward.
51
+ Vocabulary() {}
52
+
53
+ Vocabulary(WordIndex begin_sentence, WordIndex end_sentence, WordIndex not_found) {
54
+ SetSpecial(begin_sentence, end_sentence, not_found);
55
+ }
56
+
57
+ void SetSpecial(WordIndex begin_sentence, WordIndex end_sentence, WordIndex not_found);
58
+
59
+ WordIndex begin_sentence_, end_sentence_, not_found_;
60
+
61
+ private:
62
+ // Disable copy constructors. They're private and undefined.
63
+ // Ersatz boost::noncopyable.
64
+ Vocabulary(const Vocabulary &);
65
+ Vocabulary &operator=(const Vocabulary &);
66
+ };
67
+
68
+ /* There are two ways to access a Model.
69
+ *
70
+ *
71
+ * OPTION 1: Access the Model directly (e.g. lm::ngram::Model in model.hh).
72
+ *
73
+ * Every Model implements the scoring function:
74
+ * float Score(
75
+ * const Model::State &in_state,
76
+ * const WordIndex new_word,
77
+ * Model::State &out_state) const;
78
+ *
79
+ * It can also return the length of n-gram matched by the model:
80
+ * FullScoreReturn FullScore(
81
+ * const Model::State &in_state,
82
+ * const WordIndex new_word,
83
+ * Model::State &out_state) const;
84
+ *
85
+ *
86
+ * There are also accessor functions:
87
+ * const State &BeginSentenceState() const;
88
+ * const State &NullContextState() const;
89
+ * const Vocabulary &GetVocabulary() const;
90
+ * unsigned int Order() const;
91
+ *
92
+ * NB: In case you're wondering why the model implementation looks like it's
93
+ * missing these methods, see facade.hh.
94
+ *
95
+ * This is the fastest way to use a model and presents a normal State class to
96
+ * be included in a hypothesis state structure.
97
+ *
98
+ *
99
+ * OPTION 2: Use the virtual interface below.
100
+ *
101
+ * The virtual interface allow you to decide which Model to use at runtime
102
+ * without templatizing everything on the Model type. However, each Model has
103
+ * its own State class, so a single State cannot be efficiently provided (it
104
+ * would require using the maximum memory of any Model's State or memory
105
+ * allocation with each lookup). This means you become responsible for
106
+ * allocating memory with size StateSize() and passing it to the Score or
107
+ * FullScore functions provided here.
108
+ *
109
+ * For example, cdec has a std::string containing the entire state of a
110
+ * hypothesis. It can reserve StateSize bytes in this string for the model
111
+ * state.
112
+ *
113
+ * All the State objects are POD, so it's ok to use raw memory for storing
114
+ * State.
115
+ * in_state and out_state must not have the same address.
116
+ */
117
+ class Model {
118
+ public:
119
+ virtual ~Model();
120
+
121
+ size_t StateSize() const { return state_size_; }
122
+ const void *BeginSentenceMemory() const { return begin_sentence_memory_; }
123
+ void BeginSentenceWrite(void *to) const { memcpy(to, begin_sentence_memory_, StateSize()); }
124
+ const void *NullContextMemory() const { return null_context_memory_; }
125
+ void NullContextWrite(void *to) const { memcpy(to, null_context_memory_, StateSize()); }
126
+
127
+ // Requires in_state != out_state
128
+ virtual float BaseScore(const void *in_state, const WordIndex new_word, void *out_state) const = 0;
129
+
130
+ // Requires in_state != out_state
131
+ virtual FullScoreReturn BaseFullScore(const void *in_state, const WordIndex new_word, void *out_state) const = 0;
132
+
133
+ // Prefer to use FullScore. The context words should be provided in reverse order.
134
+ virtual FullScoreReturn BaseFullScoreForgotState(const WordIndex *context_rbegin, const WordIndex *context_rend, const WordIndex new_word, void *out_state) const = 0;
135
+
136
+ unsigned char Order() const { return order_; }
137
+
138
+ const Vocabulary &BaseVocabulary() const { return *base_vocab_; }
139
+
140
+ private:
141
+ template <class T, class U, class V> friend class ModelFacade;
142
+ explicit Model(size_t state_size) : state_size_(state_size) {}
143
+
144
+ const size_t state_size_;
145
+ const void *begin_sentence_memory_, *null_context_memory_;
146
+
147
+ const Vocabulary *base_vocab_;
148
+
149
+ unsigned char order_;
150
+
151
+ // Disable copy constructors. They're private and undefined.
152
+ // Ersatz boost::noncopyable.
153
+ Model(const Model &);
154
+ Model &operator=(const Model &);
155
+ };
156
+
157
+ } // mamespace base
158
+ } // namespace lm
159
+
160
+ #endif // LM_VIRTUAL_INTERFACE_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/vocab.hh ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_VOCAB_H
2
+ #define LM_VOCAB_H
3
+
4
+ #include "lm/enumerate_vocab.hh"
5
+ #include "lm/lm_exception.hh"
6
+ #include "lm/virtual_interface.hh"
7
+ #include "util/fake_ofstream.hh"
8
+ #include "util/murmur_hash.hh"
9
+ #include "util/pool.hh"
10
+ #include "util/probing_hash_table.hh"
11
+ #include "util/sorted_uniform.hh"
12
+ #include "util/string_piece.hh"
13
+
14
+ #include <limits>
15
+ #include <string>
16
+ #include <vector>
17
+
18
+ namespace lm {
19
+ struct ProbBackoff;
20
+ class EnumerateVocab;
21
+
22
+ namespace ngram {
23
+ struct Config;
24
+
25
+ namespace detail {
26
+ uint64_t HashForVocab(const char *str, std::size_t len);
27
+ inline uint64_t HashForVocab(const StringPiece &str) {
28
+ return HashForVocab(str.data(), str.length());
29
+ }
30
+ struct ProbingVocabularyHeader;
31
+ } // namespace detail
32
+
33
+ class WriteWordsWrapper : public EnumerateVocab {
34
+ public:
35
+ WriteWordsWrapper(EnumerateVocab *inner);
36
+
37
+ ~WriteWordsWrapper();
38
+
39
+ void Add(WordIndex index, const StringPiece &str);
40
+
41
+ const std::string &Buffer() const { return buffer_; }
42
+
43
+ private:
44
+ EnumerateVocab *inner_;
45
+
46
+ std::string buffer_;
47
+ };
48
+
49
+ // Vocabulary based on sorted uniform find storing only uint64_t values and using their offsets as indices.
50
+ class SortedVocabulary : public base::Vocabulary {
51
+ public:
52
+ SortedVocabulary();
53
+
54
+ WordIndex Index(const StringPiece &str) const {
55
+ const uint64_t *found;
56
+ if (util::BoundedSortedUniformFind<const uint64_t*, util::IdentityAccessor<uint64_t>, util::Pivot64>(
57
+ util::IdentityAccessor<uint64_t>(),
58
+ begin_ - 1, 0,
59
+ end_, std::numeric_limits<uint64_t>::max(),
60
+ detail::HashForVocab(str), found)) {
61
+ return found - begin_ + 1; // +1 because <unk> is 0 and does not appear in the lookup table.
62
+ } else {
63
+ return 0;
64
+ }
65
+ }
66
+
67
+ // Size for purposes of file writing
68
+ static uint64_t Size(uint64_t entries, const Config &config);
69
+
70
+ // Vocab words are [0, Bound()) Only valid after FinishedLoading/LoadedBinary.
71
+ WordIndex Bound() const { return bound_; }
72
+
73
+ // Everything else is for populating. I'm too lazy to hide and friend these, but you'll only get a const reference anyway.
74
+ void SetupMemory(void *start, std::size_t allocated, std::size_t entries, const Config &config);
75
+
76
+ void Relocate(void *new_start);
77
+
78
+ void ConfigureEnumerate(EnumerateVocab *to, std::size_t max_entries);
79
+
80
+ WordIndex Insert(const StringPiece &str);
81
+
82
+ // Reorders reorder_vocab so that the IDs are sorted.
83
+ void FinishedLoading(ProbBackoff *reorder_vocab);
84
+
85
+ // Trie stores the correct counts including <unk> in the header. If this was previously sized based on a count exluding <unk>, padding with 8 bytes will make it the correct size based on a count including <unk>.
86
+ std::size_t UnkCountChangePadding() const { return SawUnk() ? 0 : sizeof(uint64_t); }
87
+
88
+ bool SawUnk() const { return saw_unk_; }
89
+
90
+ void LoadedBinary(bool have_words, int fd, EnumerateVocab *to, uint64_t offset);
91
+
92
+ private:
93
+ uint64_t *begin_, *end_;
94
+
95
+ WordIndex bound_;
96
+
97
+ bool saw_unk_;
98
+
99
+ EnumerateVocab *enumerate_;
100
+
101
+ // Actual strings. Used only when loading from ARPA and enumerate_ != NULL
102
+ util::Pool string_backing_;
103
+
104
+ std::vector<StringPiece> strings_to_enumerate_;
105
+ };
106
+
107
+ #pragma pack(push)
108
+ #pragma pack(4)
109
+ struct ProbingVocabularyEntry {
110
+ uint64_t key;
111
+ WordIndex value;
112
+
113
+ typedef uint64_t Key;
114
+ uint64_t GetKey() const { return key; }
115
+ void SetKey(uint64_t to) { key = to; }
116
+
117
+ static ProbingVocabularyEntry Make(uint64_t key, WordIndex value) {
118
+ ProbingVocabularyEntry ret;
119
+ ret.key = key;
120
+ ret.value = value;
121
+ return ret;
122
+ }
123
+ };
124
+ #pragma pack(pop)
125
+
126
+ // Vocabulary storing a map from uint64_t to WordIndex.
127
+ class ProbingVocabulary : public base::Vocabulary {
128
+ public:
129
+ ProbingVocabulary();
130
+
131
+ WordIndex Index(const StringPiece &str) const {
132
+ Lookup::ConstIterator i;
133
+ return lookup_.Find(detail::HashForVocab(str), i) ? i->value : 0;
134
+ }
135
+
136
+ static uint64_t Size(uint64_t entries, float probing_multiplier);
137
+ // This just unwraps Config to get the probing_multiplier.
138
+ static uint64_t Size(uint64_t entries, const Config &config);
139
+
140
+ // Vocab words are [0, Bound()).
141
+ WordIndex Bound() const { return bound_; }
142
+
143
+ // Everything else is for populating. I'm too lazy to hide and friend these, but you'll only get a const reference anyway.
144
+ void SetupMemory(void *start, std::size_t allocated);
145
+ void SetupMemory(void *start, std::size_t allocated, std::size_t /*entries*/, const Config &/*config*/) {
146
+ SetupMemory(start, allocated);
147
+ }
148
+
149
+ void Relocate(void *new_start);
150
+
151
+ void ConfigureEnumerate(EnumerateVocab *to, std::size_t max_entries);
152
+
153
+ WordIndex Insert(const StringPiece &str);
154
+
155
+ template <class Weights> void FinishedLoading(Weights * /*reorder_vocab*/) {
156
+ FinishedLoading();
157
+ }
158
+ void FinishedLoading();
159
+
160
+ std::size_t UnkCountChangePadding() const { return 0; }
161
+
162
+ bool SawUnk() const { return saw_unk_; }
163
+
164
+ void LoadedBinary(bool have_words, int fd, EnumerateVocab *to, uint64_t offset);
165
+
166
+ private:
167
+ typedef util::ProbingHashTable<ProbingVocabularyEntry, util::IdentityHash> Lookup;
168
+
169
+ Lookup lookup_;
170
+
171
+ WordIndex bound_;
172
+
173
+ bool saw_unk_;
174
+
175
+ EnumerateVocab *enumerate_;
176
+
177
+ detail::ProbingVocabularyHeader *header_;
178
+ };
179
+
180
+ void MissingUnknown(const Config &config) throw(SpecialWordMissingException);
181
+ void MissingSentenceMarker(const Config &config, const char *str) throw(SpecialWordMissingException);
182
+
183
+ template <class Vocab> void CheckSpecials(const Config &config, const Vocab &vocab) throw(SpecialWordMissingException) {
184
+ if (!vocab.SawUnk()) MissingUnknown(config);
185
+ if (vocab.BeginSentence() == vocab.NotFound()) MissingSentenceMarker(config, "<s>");
186
+ if (vocab.EndSentence() == vocab.NotFound()) MissingSentenceMarker(config, "</s>");
187
+ }
188
+
189
+ class WriteUniqueWords {
190
+ public:
191
+ explicit WriteUniqueWords(int fd) : word_list_(fd) {}
192
+
193
+ void operator()(const StringPiece &word) {
194
+ word_list_ << word << '\0';
195
+ }
196
+
197
+ private:
198
+ util::FakeOFStream word_list_;
199
+ };
200
+
201
+ class NoOpUniqueWords {
202
+ public:
203
+ NoOpUniqueWords() {}
204
+ void operator()(const StringPiece &word) {}
205
+ };
206
+
207
+ template <class NewWordAction = NoOpUniqueWords> class GrowableVocab {
208
+ public:
209
+ static std::size_t MemUsage(WordIndex content) {
210
+ return Lookup::MemUsage(content > 2 ? content : 2);
211
+ }
212
+
213
+ // Does not take ownership of write_wordi
214
+ template <class NewWordConstruct> GrowableVocab(WordIndex initial_size, const NewWordConstruct &new_word_construct = NewWordAction())
215
+ : lookup_(initial_size), new_word_(new_word_construct) {
216
+ FindOrInsert("<unk>"); // Force 0
217
+ FindOrInsert("<s>"); // Force 1
218
+ FindOrInsert("</s>"); // Force 2
219
+ }
220
+
221
+ WordIndex Index(const StringPiece &str) const {
222
+ Lookup::ConstIterator i;
223
+ return lookup_.Find(detail::HashForVocab(str), i) ? i->value : 0;
224
+ }
225
+
226
+ WordIndex FindOrInsert(const StringPiece &word) {
227
+ ProbingVocabularyEntry entry = ProbingVocabularyEntry::Make(util::MurmurHashNative(word.data(), word.size()), Size());
228
+ Lookup::MutableIterator it;
229
+ if (!lookup_.FindOrInsert(entry, it)) {
230
+ new_word_(word);
231
+ UTIL_THROW_IF(Size() >= std::numeric_limits<lm::WordIndex>::max(), VocabLoadException, "Too many vocabulary words. Change WordIndex to uint64_t in lm/word_index.hh");
232
+ }
233
+ return it->value;
234
+ }
235
+
236
+ WordIndex Size() const { return lookup_.Size(); }
237
+
238
+ private:
239
+ typedef util::AutoProbing<ProbingVocabularyEntry, util::IdentityHash> Lookup;
240
+
241
+ Lookup lookup_;
242
+
243
+ NewWordAction new_word_;
244
+ };
245
+
246
+ } // namespace ngram
247
+ } // namespace lm
248
+
249
+ #endif // LM_VOCAB_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/weights.hh ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_WEIGHTS_H
2
+ #define LM_WEIGHTS_H
3
+
4
+ // Weights for n-grams. Probability and possibly a backoff.
5
+
6
+ namespace lm {
7
+ struct Prob {
8
+ float prob;
9
+ };
10
+ // No inheritance so this will be a POD.
11
+ struct ProbBackoff {
12
+ float prob;
13
+ float backoff;
14
+ };
15
+ struct RestWeights {
16
+ float prob;
17
+ float backoff;
18
+ float rest;
19
+ };
20
+
21
+ } // namespace lm
22
+ #endif // LM_WEIGHTS_H
cc-multilingual-main/cc_net/third_party/kenlm/include/lm/word_index.hh ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Separate header because this is used often.
2
+ #ifndef LM_WORD_INDEX_H
3
+ #define LM_WORD_INDEX_H
4
+
5
+ #include <limits.h>
6
+
7
+ namespace lm {
8
+ typedef unsigned int WordIndex;
9
+ const WordIndex kMaxWordIndex = UINT_MAX;
10
+ } // namespace lm
11
+
12
+ typedef lm::WordIndex LMWordIndex;
13
+
14
+ #endif