applied-ai-018 commited on
Commit
4b6852c
·
verified ·
1 Parent(s): 3573e61

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. cc-multilingual-main/cc_net/third_party/kenlm/build/CMakeFiles/3.22.1/CMakeDetermineCompilerABI_C.bin +3 -0
  3. cc-multilingual-main/cc_net/third_party/kenlm/build/CMakeFiles/3.22.1/CMakeDetermineCompilerABI_CXX.bin +3 -0
  4. cc-multilingual-main/cc_net/third_party/kenlm/build/lib/libkenlm.a +3 -0
  5. cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/CMakeLists.txt +59 -0
  6. cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/README.md +47 -0
  7. cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/TODO +5 -0
  8. cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/adjust_counts.cc +350 -0
  9. cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/adjust_counts.hh +72 -0
  10. cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/adjust_counts_test.cc +112 -0
  11. cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/combine_counts.hh +31 -0
  12. cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/corpus_count.cc +287 -0
  13. cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/corpus_count.hh +56 -0
  14. cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/corpus_count_test.cc +85 -0
  15. cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/count_ngrams_main.cc +99 -0
  16. cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/debug_print.hh +70 -0
  17. cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/discount.hh +26 -0
  18. cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/dump_counts_main.cc +36 -0
  19. cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/hash_gamma.hh +19 -0
  20. cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/header_info.hh +28 -0
  21. cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/initial_probabilities.cc +306 -0
  22. cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/initial_probabilities.hh +45 -0
  23. cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/interpolate.cc +166 -0
  24. cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/interpolate.hh +37 -0
  25. cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/lmplz_main.cc +220 -0
  26. cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/output.cc +52 -0
  27. cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/output.hh +85 -0
  28. cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/payload.hh +48 -0
  29. cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/pipeline.cc +385 -0
  30. cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/pipeline.hh +76 -0
  31. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/bigendian/toy0.1 +0 -0
  32. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/bigendian/toy0.2 +0 -0
  33. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/bigendian/toy0.3 +0 -0
  34. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/bigendian/toy0.kenlm_intermediate +3 -0
  35. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/bigendian/toy0.vocab +0 -0
  36. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/bigendian/toy1.1 +0 -0
  37. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/bigendian/toy1.2 +0 -0
  38. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/bigendian/toy1.3 +0 -0
  39. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/bigendian/toy1.kenlm_intermediate +3 -0
  40. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/bigendian/toy1.vocab +0 -0
  41. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/generate.sh +9 -0
  42. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/littleendian/toy0.1 +0 -0
  43. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/littleendian/toy0.2 +0 -0
  44. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/littleendian/toy0.3 +0 -0
  45. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/littleendian/toy0.kenlm_intermediate +3 -0
  46. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/littleendian/toy0.vocab +0 -0
  47. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/littleendian/toy1.1 +0 -0
  48. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/littleendian/toy1.2 +0 -0
  49. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/littleendian/toy1.3 +0 -0
  50. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/littleendian/toy1.kenlm_intermediate +3 -0
.gitattributes CHANGED
@@ -57,3 +57,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ cc-multilingual-main/cc_net/third_party/kenlm/build/lib/libkenlm.a filter=lfs diff=lfs merge=lfs -text
cc-multilingual-main/cc_net/third_party/kenlm/build/CMakeFiles/3.22.1/CMakeDetermineCompilerABI_C.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef64be9d81b97e019f77c3d838930e9679d8d8f33d6d1f4b5840714e45987128
3
+ size 15968
cc-multilingual-main/cc_net/third_party/kenlm/build/CMakeFiles/3.22.1/CMakeDetermineCompilerABI_CXX.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f642dd64274b2cd71b9149b574006831c3103420c752b32d177540a7296d5c3
3
+ size 15992
cc-multilingual-main/cc_net/third_party/kenlm/build/lib/libkenlm.a ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50924f3535df3e0116dddc11d5e709d3f489be1d58731b5cab9c60ab63f099ed
3
+ size 1359220
cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/CMakeLists.txt ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This CMake file was created by Lane Schwartz <[email protected]>
2
+
3
+ # Explicitly list the source files for this subdirectory
4
+ #
5
+ # If you add any source files to this subdirectory
6
+ # that should be included in the kenlm library,
7
+ # (this excludes any unit test files)
8
+ # you should add them to the following list:
9
+ #
10
+ # In order to set correct paths to these files
11
+ # in case this variable is referenced by CMake files in the parent directory,
12
+ # we prefix all files with ${CMAKE_CURRENT_SOURCE_DIR}.
13
+ #
14
+ set(KENLM_BUILDER_SOURCE
15
+ ${CMAKE_CURRENT_SOURCE_DIR}/adjust_counts.cc
16
+ ${CMAKE_CURRENT_SOURCE_DIR}/corpus_count.cc
17
+ ${CMAKE_CURRENT_SOURCE_DIR}/initial_probabilities.cc
18
+ ${CMAKE_CURRENT_SOURCE_DIR}/interpolate.cc
19
+ ${CMAKE_CURRENT_SOURCE_DIR}/output.cc
20
+ ${CMAKE_CURRENT_SOURCE_DIR}/pipeline.cc
21
+ )
22
+
23
+
24
+ # Group these objects together for later use.
25
+ #
26
+ # Given add_library(foo OBJECT ${my_foo_sources}),
27
+ # refer to these objects as $<TARGET_OBJECTS:foo>
28
+ #
29
+ add_library(kenlm_builder ${KENLM_BUILDER_SOURCE})
30
+
31
+ target_link_libraries(kenlm_builder PUBLIC kenlm kenlm_util Threads::Threads)
32
+ # Since headers are relative to `include/kenlm` at install time, not just `include`
33
+ target_include_directories(kenlm_builder PUBLIC $<INSTALL_INTERFACE:include/kenlm>)
34
+
35
+ AddExes(EXES lmplz
36
+ LIBRARIES kenlm_builder kenlm kenlm_util Threads::Threads)
37
+ AddExes(EXES count_ngrams
38
+ LIBRARIES kenlm_builder kenlm kenlm_util Threads::Threads)
39
+
40
+ install(
41
+ TARGETS kenlm_builder
42
+ EXPORT kenlmTargets
43
+ RUNTIME DESTINATION bin
44
+ LIBRARY DESTINATION lib
45
+ ARCHIVE DESTINATION lib
46
+ INCLUDES DESTINATION include
47
+ )
48
+
49
+ if(BUILD_TESTING)
50
+
51
+ # Explicitly list the Boost test files to be compiled
52
+ set(KENLM_BOOST_TESTS_LIST
53
+ adjust_counts_test
54
+ corpus_count_test
55
+ )
56
+
57
+ AddTests(TESTS ${KENLM_BOOST_TESTS_LIST}
58
+ LIBRARIES kenlm_builder kenlm kenlm_util Threads::Threads)
59
+ endif()
cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/README.md ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Dependencies
2
+ ============
3
+
4
+ Boost >= 1.42.0 is required.
5
+
6
+ For Ubuntu,
7
+ ```bash
8
+ sudo apt-get install libboost1.48-all-dev
9
+ ```
10
+
11
+ Alternatively, you can download, compile, and install it yourself:
12
+
13
+ ```bash
14
+ wget http://sourceforge.net/projects/boost/files/boost/1.52.0/boost_1_52_0.tar.gz/download -O boost_1_52_0.tar.gz
15
+ tar -xvzf boost_1_52_0.tar.gz
16
+ cd boost_1_52_0
17
+ ./bootstrap.sh
18
+ ./b2
19
+ sudo ./b2 install
20
+ ```
21
+
22
+ Local install options (in a user-space prefix directory) are also possible. See http://www.boost.org/doc/libs/1_52_0/doc/html/bbv2/installation.html.
23
+
24
+
25
+ Building
26
+ ========
27
+
28
+ ```bash
29
+ bjam
30
+ ```
31
+ Your distribution might package bjam and boost-build separately from Boost. Both are required.
32
+
33
+ Usage
34
+ =====
35
+
36
+ Run
37
+ ```bash
38
+ $ bin/lmplz
39
+ ```
40
+ to see command line arguments
41
+
42
+ Running
43
+ =======
44
+
45
+ ```bash
46
+ bin/lmplz -o 5 <text >text.arpa
47
+ ```
cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/TODO ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ More tests!
2
+ Sharding.
3
+ Some way to manage all the crazy config options.
4
+ Option to build the binary file directly.
5
+ Interpolation of different orders.
cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/adjust_counts.cc ADDED
@@ -0,0 +1,350 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "adjust_counts.hh"
2
+ #include "../common/ngram_stream.hh"
3
+ #include "payload.hh"
4
+
5
+ #include <algorithm>
6
+ #include <iostream>
7
+ #include <limits>
8
+
9
+ namespace lm { namespace builder {
10
+
11
+ BadDiscountException::BadDiscountException() throw() {}
12
+ BadDiscountException::~BadDiscountException() throw() {}
13
+
14
+ namespace {
15
+ // Return last word in full that is different.
16
+ const WordIndex* FindDifference(const NGram<BuildingPayload> &full, const NGram<BuildingPayload> &lower_last) {
17
+ const WordIndex *cur_word = full.end() - 1;
18
+ const WordIndex *pre_word = lower_last.end() - 1;
19
+ // Find last difference.
20
+ for (; pre_word >= lower_last.begin() && *pre_word == *cur_word; --cur_word, --pre_word) {}
21
+ return cur_word;
22
+ }
23
+
24
+ class StatCollector {
25
+ public:
26
+ StatCollector(std::size_t order, std::vector<uint64_t> &counts, std::vector<uint64_t> &counts_pruned, std::vector<Discount> &discounts)
27
+ : orders_(order), full_(orders_.back()), counts_(counts), counts_pruned_(counts_pruned), discounts_(discounts) {
28
+ memset(&orders_[0], 0, sizeof(OrderStat) * order);
29
+ }
30
+
31
+ ~StatCollector() {}
32
+
33
+ void CalculateDiscounts(const DiscountConfig &config) {
34
+ counts_.resize(orders_.size());
35
+ counts_pruned_.resize(orders_.size());
36
+ for (std::size_t i = 0; i < orders_.size(); ++i) {
37
+ const OrderStat &s = orders_[i];
38
+ counts_[i] = s.count;
39
+ counts_pruned_[i] = s.count_pruned;
40
+ }
41
+
42
+ discounts_ = config.overwrite;
43
+ discounts_.resize(orders_.size());
44
+ for (std::size_t i = config.overwrite.size(); i < orders_.size(); ++i) {
45
+ const OrderStat &s = orders_[i];
46
+ try {
47
+ for (unsigned j = 1; j < 4; ++j) {
48
+ // TODO: Specialize error message for j == 3, meaning 3+
49
+ UTIL_THROW_IF(s.n[j] == 0, BadDiscountException, "Could not calculate Kneser-Ney discounts for "
50
+ << (i+1) << "-grams with adjusted count " << (j+1) << " because we didn't observe any "
51
+ << (i+1) << "-grams with adjusted count " << j << "; Is this small or artificial data?\n"
52
+ << "Try deduplicating the input. To override this error for e.g. a class-based model, rerun with --discount_fallback\n");
53
+ }
54
+
55
+ // See equation (26) in Chen and Goodman.
56
+ discounts_[i].amount[0] = 0.0;
57
+ float y = static_cast<float>(s.n[1]) / static_cast<float>(s.n[1] + 2.0 * s.n[2]);
58
+ for (unsigned j = 1; j < 4; ++j) {
59
+ discounts_[i].amount[j] = static_cast<float>(j) - static_cast<float>(j + 1) * y * static_cast<float>(s.n[j+1]) / static_cast<float>(s.n[j]);
60
+ UTIL_THROW_IF(discounts_[i].amount[j] < 0.0 || discounts_[i].amount[j] > j, BadDiscountException, "ERROR: " << (i+1) << "-gram discount out of range for adjusted count " << j << ": " << discounts_[i].amount[j] << ". This means modified Kneser-Ney smoothing thinks something is weird about your data. To override this error for e.g. a class-based model, rerun with --discount_fallback\n");
61
+ }
62
+ } catch (const BadDiscountException &) {
63
+ switch (config.bad_action) {
64
+ case THROW_UP:
65
+ throw;
66
+ case COMPLAIN:
67
+ std::cerr << "Substituting fallback discounts for order " << i << ": D1=" << config.fallback.amount[1] << " D2=" << config.fallback.amount[2] << " D3+=" << config.fallback.amount[3] << std::endl;
68
+ case SILENT:
69
+ break;
70
+ }
71
+ discounts_[i] = config.fallback;
72
+ }
73
+ }
74
+ }
75
+
76
+ void Add(std::size_t order_minus_1, uint64_t count, bool pruned = false) {
77
+ OrderStat &stat = orders_[order_minus_1];
78
+ ++stat.count;
79
+ if (!pruned)
80
+ ++stat.count_pruned;
81
+ if (count < 5) ++stat.n[count];
82
+ }
83
+
84
+ void AddFull(uint64_t count, bool pruned = false) {
85
+ ++full_.count;
86
+ if (!pruned)
87
+ ++full_.count_pruned;
88
+ if (count < 5) ++full_.n[count];
89
+ }
90
+
91
+ private:
92
+ struct OrderStat {
93
+ // n_1 in equation 26 of Chen and Goodman etc
94
+ uint64_t n[5];
95
+ uint64_t count;
96
+ uint64_t count_pruned;
97
+ };
98
+
99
+ std::vector<OrderStat> orders_;
100
+ OrderStat &full_;
101
+
102
+ std::vector<uint64_t> &counts_;
103
+ std::vector<uint64_t> &counts_pruned_;
104
+ std::vector<Discount> &discounts_;
105
+ };
106
+
107
+ // Reads all entries in order like NGramStream does.
108
+ // But deletes any entries that have <s> in the 1st (not 0th) position on the
109
+ // way out by putting other entries in their place. This disrupts the sort
110
+ // order but we don't care because the data is going to be sorted again.
111
+ class CollapseStream {
112
+ public:
113
+ CollapseStream(const util::stream::ChainPosition &position, uint64_t prune_threshold, const std::vector<bool>& prune_words) :
114
+ current_(NULL, NGram<BuildingPayload>::OrderFromSize(position.GetChain().EntrySize())),
115
+ prune_threshold_(prune_threshold),
116
+ prune_words_(prune_words),
117
+ block_(position) {
118
+ StartBlock();
119
+ }
120
+
121
+ const NGram<BuildingPayload> &operator*() const { return current_; }
122
+ const NGram<BuildingPayload> *operator->() const { return &current_; }
123
+
124
+ operator bool() const { return block_; }
125
+
126
+ CollapseStream &operator++() {
127
+ assert(block_);
128
+
129
+ if (current_.begin()[1] == kBOS && current_.Base() < copy_from_) {
130
+ memcpy(current_.Base(), copy_from_, current_.TotalSize());
131
+ UpdateCopyFrom();
132
+
133
+ // Mark highest order n-grams for later pruning
134
+ if(current_.Value().count <= prune_threshold_) {
135
+ current_.Value().Mark();
136
+ }
137
+
138
+ if(!prune_words_.empty()) {
139
+ for(WordIndex* i = current_.begin(); i != current_.end(); i++) {
140
+ if(prune_words_[*i]) {
141
+ current_.Value().Mark();
142
+ break;
143
+ }
144
+ }
145
+ }
146
+
147
+ }
148
+
149
+ current_.NextInMemory();
150
+ uint8_t *block_base = static_cast<uint8_t*>(block_->Get());
151
+ if (current_.Base() == block_base + block_->ValidSize()) {
152
+ block_->SetValidSize(copy_from_ + current_.TotalSize() - block_base);
153
+ ++block_;
154
+ StartBlock();
155
+ }
156
+
157
+ // Mark highest order n-grams for later pruning
158
+ if(current_.Value().count <= prune_threshold_) {
159
+ current_.Value().Mark();
160
+ }
161
+
162
+ if(!prune_words_.empty()) {
163
+ for(WordIndex* i = current_.begin(); i != current_.end(); i++) {
164
+ if(prune_words_[*i]) {
165
+ current_.Value().Mark();
166
+ break;
167
+ }
168
+ }
169
+ }
170
+
171
+ return *this;
172
+ }
173
+
174
+ private:
175
+ void StartBlock() {
176
+ for (; ; ++block_) {
177
+ if (!block_) return;
178
+ if (block_->ValidSize()) break;
179
+ }
180
+ current_.ReBase(block_->Get());
181
+ copy_from_ = static_cast<uint8_t*>(block_->Get()) + block_->ValidSize();
182
+ UpdateCopyFrom();
183
+
184
+ // Mark highest order n-grams for later pruning
185
+ if(current_.Value().count <= prune_threshold_) {
186
+ current_.Value().Mark();
187
+ }
188
+
189
+ if(!prune_words_.empty()) {
190
+ for(WordIndex* i = current_.begin(); i != current_.end(); i++) {
191
+ if(prune_words_[*i]) {
192
+ current_.Value().Mark();
193
+ break;
194
+ }
195
+ }
196
+ }
197
+
198
+ }
199
+
200
+ // Find last without bos.
201
+ void UpdateCopyFrom() {
202
+ for (copy_from_ -= current_.TotalSize(); copy_from_ >= current_.Base(); copy_from_ -= current_.TotalSize()) {
203
+ if (NGram<BuildingPayload>(copy_from_, current_.Order()).begin()[1] != kBOS) break;
204
+ }
205
+ }
206
+
207
+ NGram<BuildingPayload> current_;
208
+
209
+ // Goes backwards in the block
210
+ uint8_t *copy_from_;
211
+ uint64_t prune_threshold_;
212
+ const std::vector<bool>& prune_words_;
213
+ util::stream::Link block_;
214
+ };
215
+
216
+ } // namespace
217
+
218
+ void AdjustCounts::Run(const util::stream::ChainPositions &positions) {
219
+ const std::size_t order = positions.size();
220
+ StatCollector stats(order, counts_, counts_pruned_, discounts_);
221
+ if (order == 1) {
222
+
223
+ // Only unigrams. Just collect stats.
224
+ for (NGramStream<BuildingPayload> full(positions[0]); full; ++full) {
225
+
226
+ // Do not prune <s> </s> <unk>
227
+ if(*full->begin() > 2) {
228
+ if(full->Value().count <= prune_thresholds_[0])
229
+ full->Value().Mark();
230
+
231
+ if(!prune_words_.empty() && prune_words_[*full->begin()])
232
+ full->Value().Mark();
233
+ }
234
+
235
+ stats.AddFull(full->Value().UnmarkedCount(), full->Value().IsMarked());
236
+ }
237
+
238
+ stats.CalculateDiscounts(discount_config_);
239
+ return;
240
+ }
241
+
242
+ NGramStreams<BuildingPayload> streams;
243
+ streams.Init(positions, positions.size() - 1);
244
+
245
+ CollapseStream full(positions[positions.size() - 1], prune_thresholds_.back(), prune_words_);
246
+
247
+ // Initialization: <unk> has count 0 and so does <s>.
248
+ NGramStream<BuildingPayload> *lower_valid = streams.begin();
249
+ const NGramStream<BuildingPayload> *const streams_begin = streams.begin();
250
+ streams[0]->Value().count = 0;
251
+ *streams[0]->begin() = kUNK;
252
+ stats.Add(0, 0);
253
+ (++streams[0])->Value().count = 0;
254
+ *streams[0]->begin() = kBOS;
255
+ // <s> is not in stats yet because it will get put in later.
256
+
257
+ // This keeps track of actual counts for lower orders. It is not output
258
+ // (only adjusted counts are), but used to determine pruning.
259
+ std::vector<uint64_t> actual_counts(positions.size(), 0);
260
+ // Something of a hack: don't prune <s>.
261
+ actual_counts[0] = std::numeric_limits<uint64_t>::max();
262
+
263
+ // Iterate over full (the stream of the highest order ngrams)
264
+ for (; full; ++full) {
265
+ const WordIndex *different = FindDifference(*full, **lower_valid);
266
+ std::size_t same = full->end() - 1 - different;
267
+
268
+ // STEP 1: Output all the n-grams that changed.
269
+ for (; lower_valid >= streams.begin() + same; --lower_valid) {
270
+ uint64_t order_minus_1 = lower_valid - streams_begin;
271
+ if(actual_counts[order_minus_1] <= prune_thresholds_[order_minus_1])
272
+ (*lower_valid)->Value().Mark();
273
+
274
+ if(!prune_words_.empty()) {
275
+ for(WordIndex* i = (*lower_valid)->begin(); i != (*lower_valid)->end(); i++) {
276
+ if(prune_words_[*i]) {
277
+ (*lower_valid)->Value().Mark();
278
+ break;
279
+ }
280
+ }
281
+ }
282
+
283
+ stats.Add(order_minus_1, (*lower_valid)->Value().UnmarkedCount(), (*lower_valid)->Value().IsMarked());
284
+ ++*lower_valid;
285
+ }
286
+
287
+ // STEP 2: Update n-grams that still match.
288
+ // n-grams that match get count from the full entry.
289
+ for (std::size_t i = 0; i < same; ++i) {
290
+ actual_counts[i] += full->Value().UnmarkedCount();
291
+ }
292
+ // Increment the number of unique extensions for the longest match.
293
+ if (same) ++streams[same - 1]->Value().count;
294
+
295
+ // STEP 3: Initialize new n-grams.
296
+ // This is here because bos is also const WordIndex *, so copy gets
297
+ // consistent argument types.
298
+ const WordIndex *full_end = full->end();
299
+ // Initialize and mark as valid up to bos.
300
+ const WordIndex *bos;
301
+ for (bos = different; (bos > full->begin()) && (*bos != kBOS); --bos) {
302
+ NGramStream<BuildingPayload> &to = *++lower_valid;
303
+ std::copy(bos, full_end, to->begin());
304
+ to->Value().count = 1;
305
+ actual_counts[lower_valid - streams_begin] = full->Value().UnmarkedCount();
306
+ }
307
+ // Now bos indicates where <s> is or is the 0th word of full.
308
+ if (bos != full->begin()) {
309
+ // There is an <s> beyond the 0th word.
310
+ NGramStream<BuildingPayload> &to = *++lower_valid;
311
+ std::copy(bos, full_end, to->begin());
312
+
313
+ // Anything that begins with <s> has full non adjusted count.
314
+ to->Value().count = full->Value().UnmarkedCount();
315
+ actual_counts[lower_valid - streams_begin] = full->Value().UnmarkedCount();
316
+ } else {
317
+ stats.AddFull(full->Value().UnmarkedCount(), full->Value().IsMarked());
318
+ }
319
+ assert(lower_valid >= &streams[0]);
320
+ }
321
+
322
+ // The above loop outputs n-grams when it observes changes. This outputs
323
+ // the last n-grams.
324
+ for (NGramStream<BuildingPayload> *s = streams.begin(); s <= lower_valid; ++s) {
325
+ uint64_t lower_count = actual_counts[(*s)->Order() - 1];
326
+ if(lower_count <= prune_thresholds_[(*s)->Order() - 1])
327
+ (*s)->Value().Mark();
328
+
329
+ if(!prune_words_.empty()) {
330
+ for(WordIndex* i = (*s)->begin(); i != (*s)->end(); i++) {
331
+ if(prune_words_[*i]) {
332
+ (*s)->Value().Mark();
333
+ break;
334
+ }
335
+ }
336
+ }
337
+
338
+ stats.Add(s - streams.begin(), lower_count, (*s)->Value().IsMarked());
339
+ ++*s;
340
+ }
341
+ // Poison everyone! Except the N-grams which were already poisoned by the input.
342
+ for (NGramStream<BuildingPayload> *s = streams.begin(); s != streams.end(); ++s)
343
+ s->Poison();
344
+
345
+ stats.CalculateDiscounts(discount_config_);
346
+
347
+ // NOTE: See special early-return case for unigrams near the top of this function
348
+ }
349
+
350
+ }} // namespaces
cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/adjust_counts.hh ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_BUILDER_ADJUST_COUNTS_H
2
+ #define LM_BUILDER_ADJUST_COUNTS_H
3
+
4
+ #include "discount.hh"
5
+ #include "../lm_exception.hh"
6
+ #include "../../util/exception.hh"
7
+
8
+ #include <vector>
9
+
10
+ #include <stdint.h>
11
+
12
+ namespace util { namespace stream { class ChainPositions; } }
13
+
14
+ namespace lm {
15
+ namespace builder {
16
+
17
+ class BadDiscountException : public util::Exception {
18
+ public:
19
+ BadDiscountException() throw();
20
+ ~BadDiscountException() throw();
21
+ };
22
+
23
+ struct DiscountConfig {
24
+ // Overrides discounts for orders [1,discount_override.size()].
25
+ std::vector<Discount> overwrite;
26
+ // If discounting fails for an order, copy them from here.
27
+ Discount fallback;
28
+ // What to do when discounts are out of range or would trigger divison by
29
+ // zero. It it does something other than THROW_UP, use fallback_discount.
30
+ WarningAction bad_action;
31
+ };
32
+
33
+ /* Compute adjusted counts.
34
+ * Input: unique suffix sorted N-grams (and just the N-grams) with raw counts.
35
+ * Output: [1,N]-grams with adjusted counts.
36
+ * [1,N)-grams are in suffix order
37
+ * N-grams are in undefined order (they're going to be sorted anyway).
38
+ */
39
+ class AdjustCounts {
40
+ public:
41
+ // counts: output
42
+ // counts_pruned: output
43
+ // discounts: mostly output. If the input already has entries, they will be kept.
44
+ // prune_thresholds: input. n-grams with normal (not adjusted) count below this will be pruned.
45
+ AdjustCounts(
46
+ const std::vector<uint64_t> &prune_thresholds,
47
+ std::vector<uint64_t> &counts,
48
+ std::vector<uint64_t> &counts_pruned,
49
+ const std::vector<bool> &prune_words,
50
+ const DiscountConfig &discount_config,
51
+ std::vector<Discount> &discounts)
52
+ : prune_thresholds_(prune_thresholds), counts_(counts), counts_pruned_(counts_pruned),
53
+ prune_words_(prune_words), discount_config_(discount_config), discounts_(discounts)
54
+ {}
55
+
56
+ void Run(const util::stream::ChainPositions &positions);
57
+
58
+ private:
59
+ const std::vector<uint64_t> &prune_thresholds_;
60
+ std::vector<uint64_t> &counts_;
61
+ std::vector<uint64_t> &counts_pruned_;
62
+ const std::vector<bool> &prune_words_;
63
+
64
+ DiscountConfig discount_config_;
65
+ std::vector<Discount> &discounts_;
66
+ };
67
+
68
+ } // namespace builder
69
+ } // namespace lm
70
+
71
+ #endif // LM_BUILDER_ADJUST_COUNTS_H
72
+
cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/adjust_counts_test.cc ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "adjust_counts.hh"
2
+
3
+ #include "../common/ngram_stream.hh"
4
+ #include "payload.hh"
5
+ #include "../../util/scoped.hh"
6
+
7
+ #include <boost/thread/thread.hpp>
8
+ #define BOOST_TEST_MODULE AdjustCounts
9
+ #include <boost/test/unit_test.hpp>
10
+
11
+ namespace lm { namespace builder { namespace {
12
+
13
+ class KeepCopy {
14
+ public:
15
+ KeepCopy() : size_(0) {}
16
+
17
+ void Run(const util::stream::ChainPosition &position) {
18
+ for (util::stream::Link link(position); link; ++link) {
19
+ mem_.call_realloc(size_ + link->ValidSize());
20
+ memcpy(static_cast<uint8_t*>(mem_.get()) + size_, link->Get(), link->ValidSize());
21
+ size_ += link->ValidSize();
22
+ }
23
+ }
24
+
25
+ uint8_t *Get() { return static_cast<uint8_t*>(mem_.get()); }
26
+ std::size_t Size() const { return size_; }
27
+
28
+ private:
29
+ util::scoped_malloc mem_;
30
+ std::size_t size_;
31
+ };
32
+
33
+ struct Gram4 {
34
+ WordIndex ids[4];
35
+ uint64_t count;
36
+ };
37
+
38
+ class WriteInput {
39
+ public:
40
+ void Run(const util::stream::ChainPosition &position) {
41
+ NGramStream<BuildingPayload> input(position);
42
+ Gram4 grams[] = {
43
+ {{0,0,0,0},10},
44
+ {{0,0,3,0},3},
45
+ // bos
46
+ {{1,1,1,2},5},
47
+ {{0,0,3,2},5},
48
+ };
49
+ for (size_t i = 0; i < sizeof(grams) / sizeof(Gram4); ++i, ++input) {
50
+ memcpy(input->begin(), grams[i].ids, sizeof(WordIndex) * 4);
51
+ input->Value().count = grams[i].count;
52
+ }
53
+ input.Poison();
54
+ }
55
+ };
56
+
57
+ BOOST_AUTO_TEST_CASE(Simple) {
58
+ KeepCopy outputs[4];
59
+ std::vector<uint64_t> counts;
60
+ std::vector<Discount> discount;
61
+ {
62
+ util::stream::ChainConfig config;
63
+ config.total_memory = 100;
64
+ config.block_count = 1;
65
+ util::stream::Chains chains(4);
66
+ for (unsigned i = 0; i < 4; ++i) {
67
+ config.entry_size = NGram<BuildingPayload>::TotalSize(i + 1);
68
+ chains.push_back(config);
69
+ }
70
+
71
+ chains[3] >> WriteInput();
72
+ util::stream::ChainPositions for_adjust(chains);
73
+ for (unsigned i = 0; i < 4; ++i) {
74
+ chains[i] >> boost::ref(outputs[i]);
75
+ }
76
+ chains >> util::stream::kRecycle;
77
+ std::vector<uint64_t> counts_pruned(4);
78
+ std::vector<uint64_t> prune_thresholds(4);
79
+ DiscountConfig discount_config;
80
+ discount_config.fallback = Discount();
81
+ discount_config.bad_action = THROW_UP;
82
+ BOOST_CHECK_THROW(AdjustCounts(prune_thresholds, counts, counts_pruned, std::vector<bool>(), discount_config, discount).Run(for_adjust), BadDiscountException);
83
+ }
84
+ BOOST_REQUIRE_EQUAL(4UL, counts.size());
85
+ BOOST_CHECK_EQUAL(4UL, counts[0]);
86
+ // These are no longer set because the discounts are bad.
87
+ /* BOOST_CHECK_EQUAL(4UL, counts[1]);
88
+ BOOST_CHECK_EQUAL(3UL, counts[2]);
89
+ BOOST_CHECK_EQUAL(3UL, counts[3]);*/
90
+ BOOST_REQUIRE_EQUAL(NGram<BuildingPayload>::TotalSize(1) * 4, outputs[0].Size());
91
+ NGram<BuildingPayload> uni(outputs[0].Get(), 1);
92
+ BOOST_CHECK_EQUAL(kUNK, *uni.begin());
93
+ BOOST_CHECK_EQUAL(0ULL, uni.Value().count);
94
+ uni.NextInMemory();
95
+ BOOST_CHECK_EQUAL(kBOS, *uni.begin());
96
+ BOOST_CHECK_EQUAL(0ULL, uni.Value().count);
97
+ uni.NextInMemory();
98
+ BOOST_CHECK_EQUAL(0UL, *uni.begin());
99
+ BOOST_CHECK_EQUAL(2ULL, uni.Value().count);
100
+ uni.NextInMemory();
101
+ BOOST_CHECK_EQUAL(2ULL, uni.Value().count);
102
+ BOOST_CHECK_EQUAL(2UL, *uni.begin());
103
+
104
+ BOOST_REQUIRE_EQUAL(NGram<BuildingPayload>::TotalSize(2) * 4, outputs[1].Size());
105
+ NGram<BuildingPayload> bi(outputs[1].Get(), 2);
106
+ BOOST_CHECK_EQUAL(0UL, *bi.begin());
107
+ BOOST_CHECK_EQUAL(0UL, *(bi.begin() + 1));
108
+ BOOST_CHECK_EQUAL(1ULL, bi.Value().count);
109
+ bi.NextInMemory();
110
+ }
111
+
112
+ }}} // namespaces
cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/combine_counts.hh ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_BUILDER_COMBINE_COUNTS_H
2
+ #define LM_BUILDER_COMBINE_COUNTS_H
3
+
4
+ #include "payload.hh"
5
+ #include "../common/ngram.hh"
6
+ #include "../common/compare.hh"
7
+ #include "../word_index.hh"
8
+ #include "../../util/stream/sort.hh"
9
+
10
+ #include <functional>
11
+ #include <string>
12
+
13
+ namespace lm {
14
+ namespace builder {
15
+
16
+ // Sum counts for the same n-gram.
17
+ struct CombineCounts {
18
+ bool operator()(void *first_void, const void *second_void, const SuffixOrder &compare) const {
19
+ NGram<BuildingPayload> first(first_void, compare.Order());
20
+ // There isn't a const version of NGram.
21
+ NGram<BuildingPayload> second(const_cast<void*>(second_void), compare.Order());
22
+ if (memcmp(first.begin(), second.begin(), sizeof(WordIndex) * compare.Order())) return false;
23
+ first.Value().count += second.Value().count;
24
+ return true;
25
+ }
26
+ };
27
+
28
+ } // namespace builder
29
+ } // namespace lm
30
+
31
+ #endif // LM_BUILDER_COMBINE_COUNTS_H
cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/corpus_count.cc ADDED
@@ -0,0 +1,287 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "corpus_count.hh"
2
+
3
+ #include "payload.hh"
4
+ #include "../common/ngram.hh"
5
+ #include "../lm_exception.hh"
6
+ #include "../vocab.hh"
7
+ #include "../word_index.hh"
8
+ #include "../../util/file_stream.hh"
9
+ #include "../../util/file.hh"
10
+ #include "../../util/file_piece.hh"
11
+ #include "../../util/murmur_hash.hh"
12
+ #include "../../util/probing_hash_table.hh"
13
+ #include "../../util/scoped.hh"
14
+ #include "../../util/stream/chain.hh"
15
+ #include "../../util/tokenize_piece.hh"
16
+
17
+ #include <functional>
18
+
19
+ #include <stdint.h>
20
+
21
+ namespace lm {
22
+ namespace builder {
23
+ namespace {
24
+
25
+ class DedupeHash : public std::unary_function<const WordIndex *, bool> {
26
+ public:
27
+ explicit DedupeHash(std::size_t order) : size_(order * sizeof(WordIndex)) {}
28
+
29
+ std::size_t operator()(const WordIndex *start) const {
30
+ return util::MurmurHashNative(start, size_);
31
+ }
32
+
33
+ private:
34
+ const std::size_t size_;
35
+ };
36
+
37
+ class DedupeEquals : public std::binary_function<const WordIndex *, const WordIndex *, bool> {
38
+ public:
39
+ explicit DedupeEquals(std::size_t order) : size_(order * sizeof(WordIndex)) {}
40
+
41
+ bool operator()(const WordIndex *first, const WordIndex *second) const {
42
+ return !memcmp(first, second, size_);
43
+ }
44
+
45
+ private:
46
+ const std::size_t size_;
47
+ };
48
+
49
+ struct DedupeEntry {
50
+ typedef WordIndex *Key;
51
+ Key GetKey() const { return key; }
52
+ void SetKey(WordIndex *to) { key = to; }
53
+ Key key;
54
+ static DedupeEntry Construct(WordIndex *at) {
55
+ DedupeEntry ret;
56
+ ret.key = at;
57
+ return ret;
58
+ }
59
+ };
60
+
61
+
62
+ // TODO: don't have this here, should be with probing hash table defaults?
63
+ const float kProbingMultiplier = 1.5;
64
+
65
+ typedef util::ProbingHashTable<DedupeEntry, DedupeHash, DedupeEquals> Dedupe;
66
+
67
+ class Writer {
68
+ public:
69
+ Writer(std::size_t order, const util::stream::ChainPosition &position, void *dedupe_mem, std::size_t dedupe_mem_size)
70
+ : block_(position), gram_(block_->Get(), order),
71
+ dedupe_invalid_(order, std::numeric_limits<WordIndex>::max()),
72
+ dedupe_(dedupe_mem, dedupe_mem_size, &dedupe_invalid_[0], DedupeHash(order), DedupeEquals(order)),
73
+ buffer_(new WordIndex[order - 1]),
74
+ block_size_(position.GetChain().BlockSize()) {
75
+ dedupe_.Clear();
76
+ assert(Dedupe::Size(position.GetChain().BlockSize() / position.GetChain().EntrySize(), kProbingMultiplier) == dedupe_mem_size);
77
+ if (order == 1) {
78
+ // Add special words. AdjustCounts is responsible if order != 1.
79
+ AddUnigramWord(kUNK);
80
+ AddUnigramWord(kBOS);
81
+ }
82
+ }
83
+
84
+ ~Writer() {
85
+ block_->SetValidSize(reinterpret_cast<const uint8_t*>(gram_.begin()) - static_cast<const uint8_t*>(block_->Get()));
86
+ (++block_).Poison();
87
+ }
88
+
89
+ // Write context with a bunch of <s>
90
+ void StartSentence() {
91
+ for (WordIndex *i = gram_.begin(); i != gram_.end() - 1; ++i) {
92
+ *i = kBOS;
93
+ }
94
+ }
95
+
96
+ void Append(WordIndex word) {
97
+ *(gram_.end() - 1) = word;
98
+ Dedupe::MutableIterator at;
99
+ bool found = dedupe_.FindOrInsert(DedupeEntry::Construct(gram_.begin()), at);
100
+ if (found) {
101
+ // Already present.
102
+ NGram<BuildingPayload> already(at->key, gram_.Order());
103
+ ++(already.Value().count);
104
+ // Shift left by one.
105
+ memmove(gram_.begin(), gram_.begin() + 1, sizeof(WordIndex) * (gram_.Order() - 1));
106
+ return;
107
+ }
108
+ // Complete the write.
109
+ gram_.Value().count = 1;
110
+ // Prepare the next n-gram.
111
+ if (reinterpret_cast<uint8_t*>(gram_.begin()) + gram_.TotalSize() != static_cast<uint8_t*>(block_->Get()) + block_size_) {
112
+ NGram<BuildingPayload> last(gram_);
113
+ gram_.NextInMemory();
114
+ std::copy(last.begin() + 1, last.end(), gram_.begin());
115
+ return;
116
+ }
117
+ // Block end. Need to store the context in a temporary buffer.
118
+ std::copy(gram_.begin() + 1, gram_.end(), buffer_.get());
119
+ dedupe_.Clear();
120
+ block_->SetValidSize(block_size_);
121
+ gram_.ReBase((++block_)->Get());
122
+ std::copy(buffer_.get(), buffer_.get() + gram_.Order() - 1, gram_.begin());
123
+ }
124
+
125
+ private:
126
+ void AddUnigramWord(WordIndex index) {
127
+ *gram_.begin() = index;
128
+ gram_.Value().count = 0;
129
+ gram_.NextInMemory();
130
+ if (gram_.Base() == static_cast<uint8_t*>(block_->Get()) + block_size_) {
131
+ block_->SetValidSize(block_size_);
132
+ gram_.ReBase((++block_)->Get());
133
+ }
134
+ }
135
+
136
+ util::stream::Link block_;
137
+
138
+ NGram<BuildingPayload> gram_;
139
+
140
+ // This is the memory behind the invalid value in dedupe_.
141
+ std::vector<WordIndex> dedupe_invalid_;
142
+ // Hash table combiner implementation.
143
+ Dedupe dedupe_;
144
+
145
+ // Small buffer to hold existing ngrams when shifting across a block boundary.
146
+ boost::scoped_array<WordIndex> buffer_;
147
+
148
+ const std::size_t block_size_;
149
+ };
150
+
151
+ } // namespace
152
+
153
+ float CorpusCount::DedupeMultiplier(std::size_t order) {
154
+ return kProbingMultiplier * static_cast<float>(sizeof(DedupeEntry)) / static_cast<float>(NGram<BuildingPayload>::TotalSize(order));
155
+ }
156
+
157
+ std::size_t CorpusCount::VocabUsage(std::size_t vocab_estimate) {
158
+ return ngram::GrowableVocab<ngram::WriteUniqueWords>::MemUsage(vocab_estimate);
159
+ }
160
+
161
+ CorpusCount::CorpusCount(util::FilePiece &from, int vocab_write, bool dynamic_vocab, uint64_t &token_count, WordIndex &type_count, std::vector<bool> &prune_words, const std::string& prune_vocab_filename, std::size_t entries_per_block, WarningAction disallowed_symbol)
162
+ : from_(from), vocab_write_(vocab_write), dynamic_vocab_(dynamic_vocab), token_count_(token_count), type_count_(type_count),
163
+ prune_words_(prune_words), prune_vocab_filename_(prune_vocab_filename),
164
+ dedupe_mem_size_(Dedupe::Size(entries_per_block, kProbingMultiplier)),
165
+ dedupe_mem_(util::MallocOrThrow(dedupe_mem_size_)),
166
+ disallowed_symbol_action_(disallowed_symbol) {
167
+ }
168
+
169
+ namespace {
170
+ void ComplainDisallowed(StringPiece word, WarningAction &action) {
171
+ switch (action) {
172
+ case SILENT:
173
+ return;
174
+ case COMPLAIN:
175
+ std::cerr << "Warning: " << word << " appears in the input. All instances of <s>, </s>, and <unk> will be interpreted as whitespace." << std::endl;
176
+ action = SILENT;
177
+ return;
178
+ case THROW_UP:
179
+ UTIL_THROW(FormatLoadException, "Special word " << word << " is not allowed in the corpus. I plan to support models containing <unk> in the future. Pass --skip_symbols to convert these symbols to whitespace.");
180
+ }
181
+ }
182
+
183
+ // Vocab ids are given in a precompiled hash table.
184
+ class VocabGiven {
185
+ public:
186
+ explicit VocabGiven(int fd) {
187
+ util::MapRead(util::POPULATE_OR_READ, fd, 0, util::CheckOverflow(util::SizeOrThrow(fd)), table_backing_);
188
+ // Leave space for header with size.
189
+ table_ = Table(static_cast<char*>(table_backing_.get()) + sizeof(uint64_t), table_backing_.size() - sizeof(uint64_t));
190
+ bos_ = FindOrInsert("<s>");
191
+ eos_ = FindOrInsert("</s>");
192
+ }
193
+
194
+ WordIndex FindOrInsert(const StringPiece &word) const {
195
+ Table::ConstIterator it;
196
+ if (table_.Find(util::MurmurHash64A(word.data(), word.size()), it)) {
197
+ return it->value;
198
+ } else {
199
+ return 0; // <unk>.
200
+ }
201
+ }
202
+
203
+ WordIndex Index(const StringPiece &word) const {
204
+ return FindOrInsert(word);
205
+ }
206
+
207
+ WordIndex Size() const {
208
+ return *static_cast<const uint64_t*>(table_backing_.get());
209
+ }
210
+
211
+ bool IsSpecial(WordIndex word) const {
212
+ return word == 0 || word == bos_ || word == eos_;
213
+ }
214
+
215
+ private:
216
+ util::scoped_memory table_backing_;
217
+
218
+ typedef util::ProbingHashTable<ngram::ProbingVocabularyEntry, util::IdentityHash> Table;
219
+ Table table_;
220
+
221
+ WordIndex bos_, eos_;
222
+ };
223
+ } // namespace
224
+
225
+ void CorpusCount::Run(const util::stream::ChainPosition &position) {
226
+ if (dynamic_vocab_) {
227
+ ngram::GrowableVocab<ngram::WriteUniqueWords> vocab(type_count_, vocab_write_);
228
+ RunWithVocab(position, vocab);
229
+ } else {
230
+ VocabGiven vocab(vocab_write_);
231
+ RunWithVocab(position, vocab);
232
+ }
233
+ }
234
+
235
+ template <class Vocab> void CorpusCount::RunWithVocab(const util::stream::ChainPosition &position, Vocab &vocab) {
236
+ token_count_ = 0;
237
+ type_count_ = 0;
238
+ const WordIndex end_sentence = vocab.FindOrInsert("</s>");
239
+ Writer writer(NGram<BuildingPayload>::OrderFromSize(position.GetChain().EntrySize()), position, dedupe_mem_.get(), dedupe_mem_size_);
240
+ uint64_t count = 0;
241
+ bool delimiters[256];
242
+ util::BoolCharacter::Build("\0\t\n\r ", delimiters);
243
+ StringPiece w;
244
+ while(true) {
245
+ writer.StartSentence();
246
+ while (from_.ReadWordSameLine(w, delimiters)) {
247
+ WordIndex word = vocab.FindOrInsert(w);
248
+ if (UTIL_UNLIKELY(vocab.IsSpecial(word))) {
249
+ ComplainDisallowed(w, disallowed_symbol_action_);
250
+ continue;
251
+ }
252
+ writer.Append(word);
253
+ ++count;
254
+ }
255
+ if (!from_.ReadLineOrEOF(w)) break;
256
+ writer.Append(end_sentence);
257
+ }
258
+ token_count_ = count;
259
+ type_count_ = vocab.Size();
260
+
261
+ // Create list of unigrams that are supposed to be pruned
262
+ if (!prune_vocab_filename_.empty()) {
263
+ try {
264
+ util::FilePiece prune_vocab_file(prune_vocab_filename_.c_str());
265
+
266
+ prune_words_.resize(vocab.Size(), true);
267
+ try {
268
+ while (true) {
269
+ StringPiece word(prune_vocab_file.ReadDelimited(delimiters));
270
+ prune_words_[vocab.Index(word)] = false;
271
+ }
272
+ } catch (const util::EndOfFileException &e) {}
273
+
274
+ // Never prune <unk>, <s>, </s>
275
+ prune_words_[kUNK] = false;
276
+ prune_words_[kBOS] = false;
277
+ prune_words_[kEOS] = false;
278
+
279
+ } catch (const util::Exception &e) {
280
+ std::cerr << e.what() << std::endl;
281
+ abort();
282
+ }
283
+ }
284
+ }
285
+
286
+ } // namespace builder
287
+ } // namespace lm
cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/corpus_count.hh ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_BUILDER_CORPUS_COUNT_H
2
+ #define LM_BUILDER_CORPUS_COUNT_H
3
+
4
+ #include "../lm_exception.hh"
5
+ #include "../word_index.hh"
6
+ #include "../../util/scoped.hh"
7
+
8
+ #include <cstddef>
9
+ #include <string>
10
+ #include <stdint.h>
11
+ #include <vector>
12
+
13
+ namespace util {
14
+ class FilePiece;
15
+ namespace stream {
16
+ class ChainPosition;
17
+ } // namespace stream
18
+ } // namespace util
19
+
20
+ namespace lm {
21
+ namespace builder {
22
+
23
+ class CorpusCount {
24
+ public:
25
+ // Memory usage will be DedupeMultipler(order) * block_size + total_chain_size + unknown vocab_hash_size
26
+ static float DedupeMultiplier(std::size_t order);
27
+
28
+ // How much memory vocabulary will use based on estimated size of the vocab.
29
+ static std::size_t VocabUsage(std::size_t vocab_estimate);
30
+
31
+ // token_count: out.
32
+ // type_count aka vocabulary size. Initialize to an estimate. It is set to the exact value.
33
+ CorpusCount(util::FilePiece &from, int vocab_write, bool dynamic_vocab, uint64_t &token_count, WordIndex &type_count, std::vector<bool> &prune_words, const std::string& prune_vocab_filename, std::size_t entries_per_block, WarningAction disallowed_symbol);
34
+
35
+ void Run(const util::stream::ChainPosition &position);
36
+
37
+ private:
38
+ template <class Vocab> void RunWithVocab(const util::stream::ChainPosition &position, Vocab &vocab);
39
+
40
+ util::FilePiece &from_;
41
+ int vocab_write_;
42
+ bool dynamic_vocab_;
43
+ uint64_t &token_count_;
44
+ WordIndex &type_count_;
45
+ std::vector<bool> &prune_words_;
46
+ const std::string prune_vocab_filename_;
47
+
48
+ std::size_t dedupe_mem_size_;
49
+ util::scoped_malloc dedupe_mem_;
50
+
51
+ WarningAction disallowed_symbol_action_;
52
+ };
53
+
54
+ } // namespace builder
55
+ } // namespace lm
56
+ #endif // LM_BUILDER_CORPUS_COUNT_H
cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/corpus_count_test.cc ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "corpus_count.hh"
2
+
3
+ #include "payload.hh"
4
+ #include "../common/ngram_stream.hh"
5
+ #include "../common/ngram.hh"
6
+
7
+ #include "../../util/file.hh"
8
+ #include "../../util/file_piece.hh"
9
+ #include "../../util/tokenize_piece.hh"
10
+ #include "../../util/stream/chain.hh"
11
+ #include "../../util/stream/stream.hh"
12
+
13
+ #define BOOST_TEST_MODULE CorpusCountTest
14
+ #include <boost/test/unit_test.hpp>
15
+
16
+ namespace lm { namespace builder { namespace {
17
+
18
+ #define Check(str, cnt) { \
19
+ BOOST_REQUIRE(stream); \
20
+ w = stream->begin(); \
21
+ for (util::TokenIter<util::AnyCharacter, true> t(str, " "); t; ++t, ++w) { \
22
+ BOOST_CHECK_EQUAL(*t, v[*w]); \
23
+ } \
24
+ BOOST_CHECK_EQUAL((uint64_t)cnt, stream->Value().count); \
25
+ ++stream; \
26
+ }
27
+
28
+ class CheckAnswers {
29
+ public:
30
+ void Run(const util::stream::ChainPosition &position) {
31
+ NGramStream<BuildingPayload> stream(position);
32
+ const char *v[] = {"<unk>", "<s>", "</s>", "looking", "on", "a", "little", "more", "loin", "foo", "bar"};
33
+ WordIndex *w;
34
+
35
+ Check("<s> <s> looking", 1);
36
+ Check("<s> looking on", 1);
37
+ Check("looking on a", 1);
38
+ Check("on a little", 2);
39
+ Check("a little more", 2);
40
+ Check("little more loin", 2);
41
+ Check("more loin </s>", 2);
42
+ Check("<s> <s> on", 2);
43
+ Check("<s> on a", 1);
44
+ Check("<s> on foo", 1);
45
+ Check("on foo little", 1);
46
+ Check("foo little more", 1);
47
+ Check("little more loin", 1);
48
+ Check("more loin </s>", 1);
49
+ Check("<s> <s> bar", 1);
50
+ Check("<s> bar </s>", 1);
51
+ Check("<s> <s> </s>", 1);
52
+ BOOST_CHECK(!stream);
53
+ }
54
+ };
55
+
56
+ BOOST_AUTO_TEST_CASE(Short) {
57
+ util::scoped_fd input_file(util::MakeTemp("corpus_count_test_temp"));
58
+ const char input[] = "looking on a little more loin\non a little more loin\non foo little more loin\nbar\n\n";
59
+ // Blocks of 10 are
60
+ // looking on a little more loin </s> on a little[duplicate] more[duplicate] loin[duplicate] </s>[duplicate] on[duplicate] foo
61
+ // little more loin </s> bar </s> </s>
62
+
63
+ util::WriteOrThrow(input_file.get(), input, sizeof(input) - 1);
64
+ util::SeekOrThrow(input_file.get(), 0);
65
+ util::FilePiece input_piece(input_file.release(), "temp file");
66
+
67
+ util::stream::ChainConfig config;
68
+ config.entry_size = NGram<BuildingPayload>::TotalSize(3);
69
+ config.total_memory = config.entry_size * 20;
70
+ config.block_count = 2;
71
+
72
+ util::scoped_fd vocab(util::MakeTemp("corpus_count_test_vocab"));
73
+
74
+ uint64_t token_count;
75
+ WordIndex type_count = 10;
76
+ std::vector<bool> prune_words;
77
+ util::stream::Chain chain(config);
78
+ CorpusCount counter(input_piece, vocab.get(), true, token_count, type_count, prune_words, "", chain.BlockSize() / chain.EntrySize(), SILENT);
79
+ chain >> boost::ref(counter) >> CheckAnswers() >> util::stream::kRecycle;
80
+
81
+ chain.Wait();
82
+ BOOST_CHECK_EQUAL(11, type_count);
83
+ }
84
+
85
+ }}} // namespaces
cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/count_ngrams_main.cc ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "combine_counts.hh"
2
+ #include "corpus_count.hh"
3
+ #include "../common/compare.hh"
4
+ #include "../../util/stream/chain.hh"
5
+ #include "../../util/stream/io.hh"
6
+ #include "../../util/stream/sort.hh"
7
+ #include "../../util/file.hh"
8
+ #include "../../util/file_piece.hh"
9
+ #include "../../util/usage.hh"
10
+
11
+ #include <boost/program_options.hpp>
12
+
13
+ #include <string>
14
+
15
+ namespace {
16
+ class SizeNotify {
17
+ public:
18
+ SizeNotify(std::size_t &out) : behind_(out) {}
19
+
20
+ void operator()(const std::string &from) {
21
+ behind_ = util::ParseSize(from);
22
+ }
23
+
24
+ private:
25
+ std::size_t &behind_;
26
+ };
27
+
28
+ boost::program_options::typed_value<std::string> *SizeOption(std::size_t &to, const char *default_value) {
29
+ return boost::program_options::value<std::string>()->notifier(SizeNotify(to))->default_value(default_value);
30
+ }
31
+
32
+ } // namespace
33
+
34
+ int main(int argc, char *argv[]) {
35
+ namespace po = boost::program_options;
36
+ unsigned order;
37
+ std::size_t ram;
38
+ std::string temp_prefix, vocab_table, vocab_list;
39
+ po::options_description options("corpus count");
40
+ options.add_options()
41
+ ("help,h", po::bool_switch(), "Show this help message")
42
+ ("order,o", po::value<unsigned>(&order)->required(), "Order")
43
+ ("temp_prefix,T", po::value<std::string>(&temp_prefix)->default_value(util::DefaultTempDirectory()), "Temporary file prefix")
44
+ ("memory,S", SizeOption(ram, "80%"), "RAM")
45
+ ("read_vocab_table", po::value<std::string>(&vocab_table), "Vocabulary hash table to read. This should be a probing hash table with size at the beginning.")
46
+ ("write_vocab_list", po::value<std::string>(&vocab_list), "Vocabulary list to write as null-delimited strings.");
47
+
48
+ po::variables_map vm;
49
+ po::store(po::parse_command_line(argc, argv, options), vm);
50
+ if (argc == 1 || vm["help"].as<bool>()) {
51
+ std::cerr << "Counts n-grams from standard input.\n" << options << std::endl;
52
+ return 1;
53
+ }
54
+ po::notify(vm);
55
+
56
+ if (!(vocab_table.empty() ^ vocab_list.empty())) {
57
+ std::cerr << "Specify one of --read_vocab_table or --write_vocab_list for vocabulary handling." << std::endl;
58
+ return 1;
59
+ }
60
+
61
+ util::NormalizeTempPrefix(temp_prefix);
62
+
63
+ util::scoped_fd vocab_file(vocab_table.empty() ? util::CreateOrThrow(vocab_list.c_str()) : util::OpenReadOrThrow(vocab_table.c_str()));
64
+
65
+ std::size_t blocks = 2;
66
+ std::size_t remaining_size = ram - util::SizeOrThrow(vocab_file.get());
67
+
68
+ std::size_t memory_for_chain =
69
+ // This much memory to work with after vocab hash table.
70
+ static_cast<float>(remaining_size) /
71
+ // Solve for block size including the dedupe multiplier for one block.
72
+ (static_cast<float>(blocks) + lm::builder::CorpusCount::DedupeMultiplier(order)) *
73
+ // Chain likes memory expressed in terms of total memory.
74
+ static_cast<float>(blocks);
75
+ std::cerr << "Using " << memory_for_chain << " for chains." << std::endl;
76
+
77
+ util::stream::Chain chain(util::stream::ChainConfig(lm::NGram<uint64_t>::TotalSize(order), blocks, memory_for_chain));
78
+ util::FilePiece f(0, NULL, &std::cerr);
79
+ uint64_t token_count = 0;
80
+ lm::WordIndex type_count = 0;
81
+ std::vector<bool> empty_prune;
82
+ std::string empty_string;
83
+ lm::builder::CorpusCount counter(f, vocab_file.get(), vocab_table.empty(), token_count, type_count, empty_prune, empty_string, chain.BlockSize() / chain.EntrySize(), lm::THROW_UP);
84
+ chain >> boost::ref(counter);
85
+
86
+ util::stream::SortConfig sort_config;
87
+ sort_config.temp_prefix = temp_prefix;
88
+ sort_config.buffer_size = 64 * 1024 * 1024;
89
+ // Intended to run in parallel.
90
+ sort_config.total_memory = remaining_size;
91
+ util::stream::Sort<lm::SuffixOrder, lm::builder::CombineCounts> sorted(chain, sort_config, lm::SuffixOrder(order), lm::builder::CombineCounts());
92
+ chain.Wait(true);
93
+ util::stream::Chain chain2(util::stream::ChainConfig(lm::NGram<uint64_t>::TotalSize(order), blocks, sort_config.buffer_size));
94
+ sorted.Output(chain2);
95
+ // Inefficiently copies if there's only one block.
96
+ chain2 >> util::stream::WriteAndRecycle(1);
97
+ chain2.Wait(true);
98
+ return 0;
99
+ }
cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/debug_print.hh ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_BUILDER_DEBUG_PRINT_H
2
+ #define LM_BUILDER_DEBUG_PRINT_H
3
+
4
+ #include "payload.hh"
5
+ #include "../common/print.hh"
6
+ #include "../common/ngram_stream.hh"
7
+ #include "../../util/file_stream.hh"
8
+ #include "../../util/file.hh"
9
+
10
+ #include <boost/lexical_cast.hpp>
11
+
12
+ namespace lm { namespace builder {
13
+ // Not defined, only specialized.
14
+ template <class T> void PrintPayload(util::FileStream &to, const BuildingPayload &payload);
15
+ template <> inline void PrintPayload<uint64_t>(util::FileStream &to, const BuildingPayload &payload) {
16
+ to << payload.count;
17
+ }
18
+ template <> inline void PrintPayload<Uninterpolated>(util::FileStream &to, const BuildingPayload &payload) {
19
+ to << log10(payload.uninterp.prob) << ' ' << log10(payload.uninterp.gamma);
20
+ }
21
+ template <> inline void PrintPayload<ProbBackoff>(util::FileStream &to, const BuildingPayload &payload) {
22
+ to << payload.complete.prob << ' ' << payload.complete.backoff;
23
+ }
24
+
25
+ // template parameter is the type stored.
26
+ template <class V> class Print {
27
+ public:
28
+ static void DumpSeparateFiles(const VocabReconstitute &vocab, const std::string &file_base, util::stream::Chains &chains) {
29
+ for (unsigned int i = 0; i < chains.size(); ++i) {
30
+ std::string file(file_base + boost::lexical_cast<std::string>(i));
31
+ chains[i] >> Print(vocab, util::CreateOrThrow(file.c_str()));
32
+ }
33
+ }
34
+
35
+ explicit Print(const VocabReconstitute &vocab, int fd) : vocab_(vocab), to_(fd) {}
36
+
37
+ void Run(const util::stream::ChainPositions &chains) {
38
+ util::scoped_fd fd(to_);
39
+ util::FileStream out(to_);
40
+ NGramStreams<BuildingPayload> streams(chains);
41
+ for (NGramStream<BuildingPayload> *s = streams.begin(); s != streams.end(); ++s) {
42
+ DumpStream(*s, out);
43
+ }
44
+ }
45
+
46
+ void Run(const util::stream::ChainPosition &position) {
47
+ util::scoped_fd fd(to_);
48
+ util::FileStream out(to_);
49
+ NGramStream<BuildingPayload> stream(position);
50
+ DumpStream(stream, out);
51
+ }
52
+
53
+ private:
54
+ void DumpStream(NGramStream<BuildingPayload> &stream, util::FileStream &to) {
55
+ for (; stream; ++stream) {
56
+ PrintPayload<V>(to, stream->Value());
57
+ for (const WordIndex *w = stream->begin(); w != stream->end(); ++w) {
58
+ to << ' ' << vocab_.Lookup(*w) << '=' << *w;
59
+ }
60
+ to << '\n';
61
+ }
62
+ }
63
+
64
+ const VocabReconstitute &vocab_;
65
+ int to_;
66
+ };
67
+
68
+ }} // namespaces
69
+
70
+ #endif // LM_BUILDER_DEBUG_PRINT_H
cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/discount.hh ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_BUILDER_DISCOUNT_H
2
+ #define LM_BUILDER_DISCOUNT_H
3
+
4
+ #include <algorithm>
5
+
6
+ #include <stdint.h>
7
+
8
+ namespace lm {
9
+ namespace builder {
10
+
11
+ struct Discount {
12
+ float amount[4];
13
+
14
+ float Get(uint64_t count) const {
15
+ return amount[std::min<uint64_t>(count, 3)];
16
+ }
17
+
18
+ float Apply(uint64_t count) const {
19
+ return static_cast<float>(count) - Get(count);
20
+ }
21
+ };
22
+
23
+ } // namespace builder
24
+ } // namespace lm
25
+
26
+ #endif // LM_BUILDER_DISCOUNT_H
cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/dump_counts_main.cc ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "../common/print.hh"
2
+ #include "../word_index.hh"
3
+ #include "../../util/file.hh"
4
+ #include "../../util/read_compressed.hh"
5
+
6
+ #include <boost/lexical_cast.hpp>
7
+
8
+ #include <iostream>
9
+ #include <vector>
10
+
11
+ int main(int argc, char *argv[]) {
12
+ if (argc != 4) {
13
+ std::cerr << "Usage: " << argv[0] << " counts vocabulary order\n"
14
+ "The counts file contains records with 4-byte vocabulary ids followed by 8-byte\n"
15
+ "counts. Each record has order many vocabulary ids.\n"
16
+ "The vocabulary file contains the words delimited by NULL in order of id.\n"
17
+ "The vocabulary file may not be compressed because it is mmapped but the counts\n"
18
+ "file can be compressed.\n";
19
+ return 1;
20
+ }
21
+ util::ReadCompressed counts(util::OpenReadOrThrow(argv[1]));
22
+ util::scoped_fd vocab_file(util::OpenReadOrThrow(argv[2]));
23
+ lm::VocabReconstitute vocab(vocab_file.get());
24
+ unsigned int order = boost::lexical_cast<unsigned int>(argv[3]);
25
+ std::vector<char> record(sizeof(uint32_t) * order + sizeof(uint64_t));
26
+ while (std::size_t got = counts.ReadOrEOF(&*record.begin(), record.size())) {
27
+ UTIL_THROW_IF(got != record.size(), util::Exception, "Read " << got << " bytes at the end of file, which is not a complete record of length " << record.size());
28
+ const lm::WordIndex *words = reinterpret_cast<const lm::WordIndex*>(&*record.begin());
29
+ for (const lm::WordIndex *i = words; i != words + order; ++i) {
30
+ UTIL_THROW_IF(*i >= vocab.Size(), util::Exception, "Vocab ID " << *i << " is larger than the vocab file's maximum of " << vocab.Size() << ". Are you sure you have the right order and vocab file for these counts?");
31
+ std::cout << vocab.Lookup(*i) << ' ';
32
+ }
33
+ // TODO don't use std::cout because it is slow. Add fast uint64_t printing support to FileStream.
34
+ std::cout << *reinterpret_cast<const uint64_t*>(words + order) << '\n';
35
+ }
36
+ }
cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/hash_gamma.hh ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_BUILDER_HASH_GAMMA__
2
+ #define LM_BUILDER_HASH_GAMMA__
3
+
4
+ #include <stdint.h>
5
+
6
+ namespace lm { namespace builder {
7
+
8
+ #pragma pack(push)
9
+ #pragma pack(4)
10
+
11
+ struct HashGamma {
12
+ uint64_t hash_value;
13
+ float gamma;
14
+ };
15
+
16
+ #pragma pack(pop)
17
+
18
+ }} // namespaces
19
+ #endif // LM_BUILDER_HASH_GAMMA__
cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/header_info.hh ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_BUILDER_HEADER_INFO_H
2
+ #define LM_BUILDER_HEADER_INFO_H
3
+
4
+ #include <string>
5
+ #include <vector>
6
+ #include <stdint.h>
7
+
8
+ namespace lm { namespace builder {
9
+
10
+ // Some configuration info that is used to add
11
+ // comments to the beginning of an ARPA file
12
+ struct HeaderInfo {
13
+ std::string input_file;
14
+ uint64_t token_count;
15
+ std::vector<uint64_t> counts_pruned;
16
+
17
+ HeaderInfo() {}
18
+
19
+ HeaderInfo(const std::string& input_file_in, uint64_t token_count_in, const std::vector<uint64_t> &counts_pruned_in)
20
+ : input_file(input_file_in), token_count(token_count_in), counts_pruned(counts_pruned_in) {}
21
+
22
+ // TODO: Add smoothing type
23
+ // TODO: More info if multiple models were interpolated
24
+ };
25
+
26
+ }} // namespaces
27
+
28
+ #endif
cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/initial_probabilities.cc ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "initial_probabilities.hh"
2
+
3
+ #include "discount.hh"
4
+ #include "hash_gamma.hh"
5
+ #include "payload.hh"
6
+ #include "../common/special.hh"
7
+ #include "../common/ngram_stream.hh"
8
+ #include "../../util/murmur_hash.hh"
9
+ #include "../../util/file.hh"
10
+ #include "../../util/stream/chain.hh"
11
+ #include "../../util/stream/io.hh"
12
+ #include "../../util/stream/stream.hh"
13
+
14
+ #include <vector>
15
+
16
+ namespace lm { namespace builder {
17
+
18
+ namespace {
19
+ struct BufferEntry {
20
+ // Gamma from page 20 of Chen and Goodman.
21
+ float gamma;
22
+ // \sum_w a(c w) for all w.
23
+ float denominator;
24
+ };
25
+
26
+ struct HashBufferEntry : public BufferEntry {
27
+ // Hash value of ngram. Used to join contexts with backoffs.
28
+ uint64_t hash_value;
29
+ };
30
+
31
+ // Reads all entries in order like NGramStream does.
32
+ // But deletes any entries that have CutoffCount below or equal to pruning
33
+ // threshold.
34
+ class PruneNGramStream {
35
+ public:
36
+ PruneNGramStream(const util::stream::ChainPosition &position, const SpecialVocab &specials) :
37
+ current_(NULL, NGram<BuildingPayload>::OrderFromSize(position.GetChain().EntrySize())),
38
+ dest_(NULL, NGram<BuildingPayload>::OrderFromSize(position.GetChain().EntrySize())),
39
+ currentCount_(0),
40
+ block_(position),
41
+ specials_(specials)
42
+ {
43
+ StartBlock();
44
+ }
45
+
46
+ NGram<BuildingPayload> &operator*() { return current_; }
47
+ NGram<BuildingPayload> *operator->() { return &current_; }
48
+
49
+ operator bool() const {
50
+ return block_;
51
+ }
52
+
53
+ PruneNGramStream &operator++() {
54
+ assert(block_);
55
+ if(UTIL_UNLIKELY(current_.Order() == 1 && specials_.IsSpecial(*current_.begin())))
56
+ dest_.NextInMemory();
57
+ else if(currentCount_ > 0) {
58
+ if(dest_.Base() < current_.Base()) {
59
+ memcpy(dest_.Base(), current_.Base(), current_.TotalSize());
60
+ }
61
+ dest_.NextInMemory();
62
+ }
63
+
64
+ current_.NextInMemory();
65
+
66
+ uint8_t *block_base = static_cast<uint8_t*>(block_->Get());
67
+ if (current_.Base() == block_base + block_->ValidSize()) {
68
+ block_->SetValidSize(dest_.Base() - block_base);
69
+ ++block_;
70
+ StartBlock();
71
+ if (block_) {
72
+ currentCount_ = current_.Value().CutoffCount();
73
+ }
74
+ } else {
75
+ currentCount_ = current_.Value().CutoffCount();
76
+ }
77
+
78
+ return *this;
79
+ }
80
+
81
+ private:
82
+ void StartBlock() {
83
+ for (; ; ++block_) {
84
+ if (!block_) return;
85
+ if (block_->ValidSize()) break;
86
+ }
87
+ current_.ReBase(block_->Get());
88
+ currentCount_ = current_.Value().CutoffCount();
89
+
90
+ dest_.ReBase(block_->Get());
91
+ }
92
+
93
+ NGram<BuildingPayload> current_; // input iterator
94
+ NGram<BuildingPayload> dest_; // output iterator
95
+
96
+ uint64_t currentCount_;
97
+
98
+ util::stream::Link block_;
99
+
100
+ const SpecialVocab specials_;
101
+ };
102
+
103
+ // Extract an array of HashedGamma from an array of BufferEntry.
104
+ class OnlyGamma {
105
+ public:
106
+ explicit OnlyGamma(bool pruning) : pruning_(pruning) {}
107
+
108
+ void Run(const util::stream::ChainPosition &position) {
109
+ for (util::stream::Link block_it(position); block_it; ++block_it) {
110
+ if(pruning_) {
111
+ const HashBufferEntry *in = static_cast<const HashBufferEntry*>(block_it->Get());
112
+ const HashBufferEntry *end = static_cast<const HashBufferEntry*>(block_it->ValidEnd());
113
+
114
+ // Just make it point to the beginning of the stream so it can be overwritten
115
+ // With HashGamma values. Do not attempt to interpret the values until set below.
116
+ HashGamma *out = static_cast<HashGamma*>(block_it->Get());
117
+ for (; in < end; out += 1, in += 1) {
118
+ // buffering, otherwise might overwrite values too early
119
+ float gamma_buf = in->gamma;
120
+ uint64_t hash_buf = in->hash_value;
121
+
122
+ out->gamma = gamma_buf;
123
+ out->hash_value = hash_buf;
124
+ }
125
+ block_it->SetValidSize((block_it->ValidSize() * sizeof(HashGamma)) / sizeof(HashBufferEntry));
126
+ }
127
+ else {
128
+ float *out = static_cast<float*>(block_it->Get());
129
+ const float *in = out;
130
+ const float *end = static_cast<const float*>(block_it->ValidEnd());
131
+ for (out += 1, in += 2; in < end; out += 1, in += 2) {
132
+ *out = *in;
133
+ }
134
+ block_it->SetValidSize(block_it->ValidSize() / 2);
135
+ }
136
+ }
137
+ }
138
+
139
+ private:
140
+ bool pruning_;
141
+ };
142
+
143
+ class AddRight {
144
+ public:
145
+ AddRight(const Discount &discount, const util::stream::ChainPosition &input, bool pruning)
146
+ : discount_(discount), input_(input), pruning_(pruning) {}
147
+
148
+ void Run(const util::stream::ChainPosition &output) {
149
+ NGramStream<BuildingPayload> in(input_);
150
+ util::stream::Stream out(output);
151
+
152
+ std::vector<WordIndex> previous(in->Order() - 1);
153
+ // Silly windows requires this workaround to just get an invalid pointer when empty.
154
+ void *const previous_raw = previous.empty() ? NULL : static_cast<void*>(&previous[0]);
155
+ const std::size_t size = sizeof(WordIndex) * previous.size();
156
+
157
+ for(; in; ++out) {
158
+ memcpy(previous_raw, in->begin(), size);
159
+ uint64_t denominator = 0;
160
+ uint64_t normalizer = 0;
161
+
162
+ uint64_t counts[4];
163
+ memset(counts, 0, sizeof(counts));
164
+ do {
165
+ denominator += in->Value().UnmarkedCount();
166
+
167
+ // Collect unused probability mass from pruning.
168
+ // Becomes 0 for unpruned ngrams.
169
+ normalizer += in->Value().UnmarkedCount() - in->Value().CutoffCount();
170
+
171
+ // Chen&Goodman do not mention counting based on cutoffs, but
172
+ // backoff becomes larger than 1 otherwise, so probably needs
173
+ // to count cutoffs. Counts normally without pruning.
174
+ if(in->Value().CutoffCount() > 0)
175
+ ++counts[std::min(in->Value().CutoffCount(), static_cast<uint64_t>(3))];
176
+
177
+ } while (++in && !memcmp(previous_raw, in->begin(), size));
178
+
179
+ BufferEntry &entry = *reinterpret_cast<BufferEntry*>(out.Get());
180
+ entry.denominator = static_cast<float>(denominator);
181
+ entry.gamma = 0.0;
182
+ for (unsigned i = 1; i <= 3; ++i) {
183
+ entry.gamma += discount_.Get(i) * static_cast<float>(counts[i]);
184
+ }
185
+
186
+ // Makes model sum to 1 with pruning (I hope).
187
+ entry.gamma += normalizer;
188
+
189
+ entry.gamma /= entry.denominator;
190
+
191
+ if(pruning_) {
192
+ // If pruning is enabled the stream actually contains HashBufferEntry, see InitialProbabilities(...),
193
+ // so add a hash value that identifies the current ngram.
194
+ static_cast<HashBufferEntry*>(&entry)->hash_value = util::MurmurHashNative(previous_raw, size);
195
+ }
196
+ }
197
+ out.Poison();
198
+ }
199
+
200
+ private:
201
+ const Discount &discount_;
202
+ const util::stream::ChainPosition input_;
203
+ bool pruning_;
204
+ };
205
+
206
+ class MergeRight {
207
+ public:
208
+ MergeRight(bool interpolate_unigrams, const util::stream::ChainPosition &from_adder, const Discount &discount, const SpecialVocab &specials)
209
+ : interpolate_unigrams_(interpolate_unigrams), from_adder_(from_adder), discount_(discount), specials_(specials) {}
210
+
211
+ // calculate the initial probability of each n-gram (before order-interpolation)
212
+ // Run() gets invoked once for each order
213
+ void Run(const util::stream::ChainPosition &primary) {
214
+ util::stream::Stream summed(from_adder_);
215
+
216
+ PruneNGramStream grams(primary, specials_);
217
+
218
+ // Without interpolation, the interpolation weight goes to <unk>.
219
+ if (grams->Order() == 1) {
220
+ BufferEntry sums(*static_cast<const BufferEntry*>(summed.Get()));
221
+ // Special case for <unk>
222
+ assert(*grams->begin() == kUNK);
223
+ float gamma_assign;
224
+ if (interpolate_unigrams_) {
225
+ // Default: treat <unk> like a zeroton.
226
+ gamma_assign = sums.gamma;
227
+ grams->Value().uninterp.prob = 0.0;
228
+ } else {
229
+ // SRI: give all the interpolation mass to <unk>
230
+ gamma_assign = 0.0;
231
+ grams->Value().uninterp.prob = sums.gamma;
232
+ }
233
+ grams->Value().uninterp.gamma = gamma_assign;
234
+
235
+ for (++grams; *grams->begin() != specials_.BOS(); ++grams) {
236
+ grams->Value().uninterp.prob = discount_.Apply(grams->Value().count) / sums.denominator;
237
+ grams->Value().uninterp.gamma = gamma_assign;
238
+ }
239
+
240
+ // Special case for <s>: probability 1.0. This allows <s> to be
241
+ // explicitly scored as part of the sentence without impacting
242
+ // probability and computes q correctly as b(<s>).
243
+ assert(*grams->begin() == specials_.BOS());
244
+ grams->Value().uninterp.prob = 1.0;
245
+ grams->Value().uninterp.gamma = 0.0;
246
+
247
+ while (++grams) {
248
+ grams->Value().uninterp.prob = discount_.Apply(grams->Value().count) / sums.denominator;
249
+ grams->Value().uninterp.gamma = gamma_assign;
250
+ }
251
+ ++summed;
252
+ return;
253
+ }
254
+
255
+ std::vector<WordIndex> previous(grams->Order() - 1);
256
+ const std::size_t size = sizeof(WordIndex) * previous.size();
257
+ for (; grams; ++summed) {
258
+ memcpy(&previous[0], grams->begin(), size);
259
+ const BufferEntry &sums = *static_cast<const BufferEntry*>(summed.Get());
260
+
261
+ do {
262
+ BuildingPayload &pay = grams->Value();
263
+ pay.uninterp.prob = discount_.Apply(grams->Value().UnmarkedCount()) / sums.denominator;
264
+ pay.uninterp.gamma = sums.gamma;
265
+ } while (++grams && !memcmp(&previous[0], grams->begin(), size));
266
+ }
267
+ }
268
+
269
+ private:
270
+ bool interpolate_unigrams_;
271
+ util::stream::ChainPosition from_adder_;
272
+ Discount discount_;
273
+ const SpecialVocab specials_;
274
+ };
275
+
276
+ } // namespace
277
+
278
+ void InitialProbabilities(
279
+ const InitialProbabilitiesConfig &config,
280
+ const std::vector<Discount> &discounts,
281
+ util::stream::Chains &primary,
282
+ util::stream::Chains &second_in,
283
+ util::stream::Chains &gamma_out,
284
+ const std::vector<uint64_t> &prune_thresholds,
285
+ bool prune_vocab,
286
+ const SpecialVocab &specials) {
287
+ for (size_t i = 0; i < primary.size(); ++i) {
288
+ util::stream::ChainConfig gamma_config = config.adder_out;
289
+ if(prune_vocab || prune_thresholds[i] > 0)
290
+ gamma_config.entry_size = sizeof(HashBufferEntry);
291
+ else
292
+ gamma_config.entry_size = sizeof(BufferEntry);
293
+
294
+ util::stream::ChainPosition second(second_in[i].Add());
295
+ second_in[i] >> util::stream::kRecycle;
296
+ gamma_out.push_back(gamma_config);
297
+ gamma_out[i] >> AddRight(discounts[i], second, prune_vocab || prune_thresholds[i] > 0);
298
+
299
+ primary[i] >> MergeRight(config.interpolate_unigrams, gamma_out[i].Add(), discounts[i], specials);
300
+
301
+ // Don't bother with the OnlyGamma thread for something to discard.
302
+ if (i) gamma_out[i] >> OnlyGamma(prune_vocab || prune_thresholds[i] > 0);
303
+ }
304
+ }
305
+
306
+ }} // namespaces
cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/initial_probabilities.hh ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_BUILDER_INITIAL_PROBABILITIES_H
2
+ #define LM_BUILDER_INITIAL_PROBABILITIES_H
3
+
4
+ #include "discount.hh"
5
+ #include "../word_index.hh"
6
+ #include "../../util/stream/config.hh"
7
+
8
+ #include <vector>
9
+
10
+ namespace util { namespace stream { class Chains; } }
11
+
12
+ namespace lm {
13
+ class SpecialVocab;
14
+ namespace builder {
15
+
16
+ struct InitialProbabilitiesConfig {
17
+ // These should be small buffers to keep the adder from getting too far ahead
18
+ util::stream::ChainConfig adder_in;
19
+ util::stream::ChainConfig adder_out;
20
+ // SRILM doesn't normally interpolate unigrams.
21
+ bool interpolate_unigrams;
22
+ };
23
+
24
+ /* Compute initial (uninterpolated) probabilities
25
+ * primary: the normal chain of n-grams. Incoming is context sorted adjusted
26
+ * counts. Outgoing has uninterpolated probabilities for use by Interpolate.
27
+ * second_in: a second copy of the primary input. Discard the output.
28
+ * gamma_out: Computed gamma values are output on these chains in suffix order.
29
+ * The values are bare floats and should be buffered for interpolation to
30
+ * use.
31
+ */
32
+ void InitialProbabilities(
33
+ const InitialProbabilitiesConfig &config,
34
+ const std::vector<Discount> &discounts,
35
+ util::stream::Chains &primary,
36
+ util::stream::Chains &second_in,
37
+ util::stream::Chains &gamma_out,
38
+ const std::vector<uint64_t> &prune_thresholds,
39
+ bool prune_vocab,
40
+ const SpecialVocab &vocab);
41
+
42
+ } // namespace builder
43
+ } // namespace lm
44
+
45
+ #endif // LM_BUILDER_INITIAL_PROBABILITIES_H
cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/interpolate.cc ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "interpolate.hh"
2
+
3
+ #include "hash_gamma.hh"
4
+ #include "payload.hh"
5
+ #include "../common/compare.hh"
6
+ #include "../common/joint_order.hh"
7
+ #include "../common/ngram_stream.hh"
8
+ #include "../lm_exception.hh"
9
+ #include "../../util/fixed_array.hh"
10
+ #include "../../util/murmur_hash.hh"
11
+
12
+ #include <iostream>
13
+ #include <cassert>
14
+ #include <cmath>
15
+
16
+ namespace lm { namespace builder {
17
+ namespace {
18
+
19
+ /* Calculate q, the collapsed probability and backoff, as defined in
20
+ * @inproceedings{Heafield-rest,
21
+ * author = {Kenneth Heafield and Philipp Koehn and Alon Lavie},
22
+ * title = {Language Model Rest Costs and Space-Efficient Storage},
23
+ * year = {2012},
24
+ * month = {July},
25
+ * booktitle = {Proceedings of the Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning},
26
+ * address = {Jeju Island, Korea},
27
+ * pages = {1169--1178},
28
+ * url = {http://kheafield.com/professional/edinburgh/rest\_paper.pdf},
29
+ * }
30
+ * This is particularly convenient to calculate during interpolation because
31
+ * the needed backoff terms are already accessed at the same time.
32
+ */
33
+ class OutputQ {
34
+ public:
35
+ explicit OutputQ(std::size_t order) : q_delta_(order) {}
36
+
37
+ void Gram(unsigned order_minus_1, float full_backoff, ProbBackoff &out) {
38
+ float &q_del = q_delta_[order_minus_1];
39
+ if (order_minus_1) {
40
+ // Divide by context's backoff (which comes in as out.backoff)
41
+ q_del = q_delta_[order_minus_1 - 1] / out.backoff * full_backoff;
42
+ } else {
43
+ q_del = full_backoff;
44
+ }
45
+ out.prob = log10f(out.prob * q_del);
46
+ // TODO: stop wastefully outputting this!
47
+ out.backoff = 0.0;
48
+ }
49
+
50
+ private:
51
+ // Product of backoffs in the numerator divided by backoffs in the
52
+ // denominator. Does not include
53
+ std::vector<float> q_delta_;
54
+ };
55
+
56
+ /* Default: output probability and backoff */
57
+ class OutputProbBackoff {
58
+ public:
59
+ explicit OutputProbBackoff(std::size_t /*order*/) {}
60
+
61
+ void Gram(unsigned /*order_minus_1*/, float full_backoff, ProbBackoff &out) const {
62
+ // Correcting for numerical precision issues. Take that IRST.
63
+ out.prob = std::min(0.0f, log10f(out.prob));
64
+ out.backoff = log10f(full_backoff);
65
+ }
66
+ };
67
+
68
+ template <class Output> class Callback {
69
+ public:
70
+ Callback(float uniform_prob, const util::stream::ChainPositions &backoffs, const std::vector<uint64_t> &prune_thresholds, bool prune_vocab, const SpecialVocab &specials)
71
+ : backoffs_(backoffs.size()), probs_(backoffs.size() + 2),
72
+ prune_thresholds_(prune_thresholds),
73
+ prune_vocab_(prune_vocab),
74
+ output_(backoffs.size() + 1 /* order */),
75
+ specials_(specials) {
76
+ probs_[0] = uniform_prob;
77
+ for (std::size_t i = 0; i < backoffs.size(); ++i) {
78
+ backoffs_.push_back(backoffs[i]);
79
+ }
80
+ }
81
+
82
+ ~Callback() {
83
+ for (std::size_t i = 0; i < backoffs_.size(); ++i) {
84
+ if(prune_vocab_ || prune_thresholds_[i + 1] > 0)
85
+ while(backoffs_[i])
86
+ ++backoffs_[i];
87
+
88
+ if (backoffs_[i]) {
89
+ std::cerr << "Backoffs do not match for order " << (i + 1) << std::endl;
90
+ abort();
91
+ }
92
+ }
93
+ }
94
+
95
+ void Enter(unsigned order_minus_1, void *data) {
96
+ NGram<BuildingPayload> gram(data, order_minus_1 + 1);
97
+ BuildingPayload &pay = gram.Value();
98
+ pay.complete.prob = pay.uninterp.prob + pay.uninterp.gamma * probs_[order_minus_1];
99
+ probs_[order_minus_1 + 1] = pay.complete.prob;
100
+
101
+ float out_backoff;
102
+ if (order_minus_1 < backoffs_.size() && *(gram.end() - 1) != specials_.UNK() && *(gram.end() - 1) != specials_.EOS() && backoffs_[order_minus_1]) {
103
+ if(prune_vocab_ || prune_thresholds_[order_minus_1 + 1] > 0) {
104
+ //Compute hash value for current context
105
+ uint64_t current_hash = util::MurmurHashNative(gram.begin(), gram.Order() * sizeof(WordIndex));
106
+
107
+ const HashGamma *hashed_backoff = static_cast<const HashGamma*>(backoffs_[order_minus_1].Get());
108
+ while(current_hash != hashed_backoff->hash_value && ++backoffs_[order_minus_1])
109
+ hashed_backoff = static_cast<const HashGamma*>(backoffs_[order_minus_1].Get());
110
+
111
+ if(current_hash == hashed_backoff->hash_value) {
112
+ out_backoff = hashed_backoff->gamma;
113
+ ++backoffs_[order_minus_1];
114
+ } else {
115
+ // Has been pruned away so it is not a context anymore
116
+ out_backoff = 1.0;
117
+ }
118
+ } else {
119
+ out_backoff = *static_cast<const float*>(backoffs_[order_minus_1].Get());
120
+ ++backoffs_[order_minus_1];
121
+ }
122
+ } else {
123
+ // Not a context.
124
+ out_backoff = 1.0;
125
+ }
126
+
127
+ output_.Gram(order_minus_1, out_backoff, pay.complete);
128
+ }
129
+
130
+ void Exit(unsigned, void *) const {}
131
+
132
+ private:
133
+ util::FixedArray<util::stream::Stream> backoffs_;
134
+
135
+ std::vector<float> probs_;
136
+ const std::vector<uint64_t>& prune_thresholds_;
137
+ bool prune_vocab_;
138
+
139
+ Output output_;
140
+ const SpecialVocab specials_;
141
+ };
142
+ } // namespace
143
+
144
+ Interpolate::Interpolate(uint64_t vocab_size, const util::stream::ChainPositions &backoffs, const std::vector<uint64_t>& prune_thresholds, bool prune_vocab, bool output_q, const SpecialVocab &specials)
145
+ : uniform_prob_(1.0 / static_cast<float>(vocab_size)), // Includes <unk> but excludes <s>.
146
+ backoffs_(backoffs),
147
+ prune_thresholds_(prune_thresholds),
148
+ prune_vocab_(prune_vocab),
149
+ output_q_(output_q),
150
+ specials_(specials) {}
151
+
152
+ // perform order-wise interpolation
153
+ void Interpolate::Run(const util::stream::ChainPositions &positions) {
154
+ assert(positions.size() == backoffs_.size() + 1);
155
+ if (output_q_) {
156
+ typedef Callback<OutputQ> C;
157
+ C callback(uniform_prob_, backoffs_, prune_thresholds_, prune_vocab_, specials_);
158
+ JointOrder<C, SuffixOrder>(positions, callback);
159
+ } else {
160
+ typedef Callback<OutputProbBackoff> C;
161
+ C callback(uniform_prob_, backoffs_, prune_thresholds_, prune_vocab_, specials_);
162
+ JointOrder<C, SuffixOrder>(positions, callback);
163
+ }
164
+ }
165
+
166
+ }} // namespaces
cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/interpolate.hh ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_BUILDER_INTERPOLATE_H
2
+ #define LM_BUILDER_INTERPOLATE_H
3
+
4
+ #include "../common/special.hh"
5
+ #include "../word_index.hh"
6
+ #include "../../util/stream/multi_stream.hh"
7
+
8
+ #include <vector>
9
+
10
+ #include <stdint.h>
11
+
12
+ namespace lm { namespace builder {
13
+
14
+ /* Interpolate step.
15
+ * Input: suffix sorted n-grams with (p_uninterpolated, gamma) from
16
+ * InitialProbabilities.
17
+ * Output: suffix sorted n-grams with complete probability
18
+ */
19
+ class Interpolate {
20
+ public:
21
+ // Normally vocab_size is the unigram count-1 (since p(<s>) = 0) but might
22
+ // be larger when the user specifies a consistent vocabulary size.
23
+ explicit Interpolate(uint64_t vocab_size, const util::stream::ChainPositions &backoffs, const std::vector<uint64_t> &prune_thresholds, bool prune_vocab, bool output_q, const SpecialVocab &specials);
24
+
25
+ void Run(const util::stream::ChainPositions &positions);
26
+
27
+ private:
28
+ float uniform_prob_;
29
+ util::stream::ChainPositions backoffs_;
30
+ const std::vector<uint64_t> prune_thresholds_;
31
+ bool prune_vocab_;
32
+ bool output_q_;
33
+ const SpecialVocab specials_;
34
+ };
35
+
36
+ }} // namespaces
37
+ #endif // LM_BUILDER_INTERPOLATE_H
cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/lmplz_main.cc ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "output.hh"
2
+ #include "pipeline.hh"
3
+ #include "../common/size_option.hh"
4
+ #include "../lm_exception.hh"
5
+ #include "../../util/file.hh"
6
+ #include "../../util/file_piece.hh"
7
+ #include "../../util/usage.hh"
8
+
9
+ #include <iostream>
10
+
11
+ #include <boost/program_options.hpp>
12
+ #include <boost/version.hpp>
13
+ #include <vector>
14
+
15
+ namespace {
16
+
17
+ // Parse and validate pruning thresholds then return vector of threshold counts
18
+ // for each n-grams order.
19
+ std::vector<uint64_t> ParsePruning(const std::vector<std::string> &param, std::size_t order) {
20
+ // convert to vector of integers
21
+ std::vector<uint64_t> prune_thresholds;
22
+ prune_thresholds.reserve(order);
23
+ for (std::vector<std::string>::const_iterator it(param.begin()); it != param.end(); ++it) {
24
+ try {
25
+ prune_thresholds.push_back(boost::lexical_cast<uint64_t>(*it));
26
+ } catch(const boost::bad_lexical_cast &) {
27
+ UTIL_THROW(util::Exception, "Bad pruning threshold " << *it);
28
+ }
29
+ }
30
+
31
+ // Fill with zeros by default.
32
+ if (prune_thresholds.empty()) {
33
+ prune_thresholds.resize(order, 0);
34
+ return prune_thresholds;
35
+ }
36
+
37
+ // validate pruning threshold if specified
38
+ // throw if each n-gram order has not threshold specified
39
+ UTIL_THROW_IF(prune_thresholds.size() > order, util::Exception, "You specified pruning thresholds for orders 1 through " << prune_thresholds.size() << " but the model only has order " << order);
40
+ // threshold for unigram can only be 0 (no pruning)
41
+
42
+ // check if threshold are not in decreasing order
43
+ uint64_t lower_threshold = 0;
44
+ for (std::vector<uint64_t>::iterator it = prune_thresholds.begin(); it != prune_thresholds.end(); ++it) {
45
+ UTIL_THROW_IF(lower_threshold > *it, util::Exception, "Pruning thresholds should be in non-decreasing order. Otherwise substrings would be removed, which is bad for query-time data structures.");
46
+ lower_threshold = *it;
47
+ }
48
+
49
+ // Pad to all orders using the last value.
50
+ prune_thresholds.resize(order, prune_thresholds.back());
51
+ return prune_thresholds;
52
+ }
53
+
54
+ lm::builder::Discount ParseDiscountFallback(const std::vector<std::string> &param) {
55
+ lm::builder::Discount ret;
56
+ UTIL_THROW_IF(param.size() > 3, util::Exception, "Specify at most three fallback discounts: 1, 2, and 3+");
57
+ UTIL_THROW_IF(param.empty(), util::Exception, "Fallback discounting enabled, but no discount specified");
58
+ ret.amount[0] = 0.0;
59
+ for (unsigned i = 0; i < 3; ++i) {
60
+ float discount = boost::lexical_cast<float>(param[i < param.size() ? i : (param.size() - 1)]);
61
+ UTIL_THROW_IF(discount < 0.0 || discount > static_cast<float>(i+1), util::Exception, "The discount for count " << (i+1) << " was parsed as " << discount << " which is not in the range [0, " << (i+1) << "].");
62
+ ret.amount[i + 1] = discount;
63
+ }
64
+ return ret;
65
+ }
66
+
67
+ } // namespace
68
+
69
+ int main(int argc, char *argv[]) {
70
+ try {
71
+ namespace po = boost::program_options;
72
+ po::options_description options("Language model building options");
73
+ lm::builder::PipelineConfig pipeline;
74
+
75
+ std::string text, intermediate, arpa;
76
+ std::vector<std::string> pruning;
77
+ std::vector<std::string> discount_fallback;
78
+ std::vector<std::string> discount_fallback_default;
79
+ discount_fallback_default.push_back("0.5");
80
+ discount_fallback_default.push_back("1");
81
+ discount_fallback_default.push_back("1.5");
82
+ bool verbose_header;
83
+
84
+ options.add_options()
85
+ ("help,h", po::bool_switch(), "Show this help message")
86
+ ("order,o", po::value<std::size_t>(&pipeline.order)
87
+ #if BOOST_VERSION >= 104200
88
+ ->required()
89
+ #endif
90
+ , "Order of the model")
91
+ ("interpolate_unigrams", po::value<bool>(&pipeline.initial_probs.interpolate_unigrams)->default_value(true)->implicit_value(true), "Interpolate the unigrams (default) as opposed to giving lots of mass to <unk> like SRI. If you want SRI's behavior with a large <unk> and the old lmplz default, use --interpolate_unigrams 0.")
92
+ ("skip_symbols", po::bool_switch(), "Treat <s>, </s>, and <unk> as whitespace instead of throwing an exception")
93
+ ("temp_prefix,T", po::value<std::string>(&pipeline.sort.temp_prefix)->default_value(util::DefaultTempDirectory()), "Temporary file prefix")
94
+ ("memory,S", lm:: SizeOption(pipeline.sort.total_memory, util::GuessPhysicalMemory() ? "80%" : "1G"), "Sorting memory")
95
+ ("minimum_block", lm::SizeOption(pipeline.minimum_block, "8K"), "Minimum block size to allow")
96
+ ("sort_block", lm::SizeOption(pipeline.sort.buffer_size, "64M"), "Size of IO operations for sort (determines arity)")
97
+ ("block_count", po::value<std::size_t>(&pipeline.block_count)->default_value(2), "Block count (per order)")
98
+ ("vocab_estimate", po::value<lm::WordIndex>(&pipeline.vocab_estimate)->default_value(1000000), "Assume this vocabulary size for purposes of calculating memory in step 1 (corpus count) and pre-sizing the hash table")
99
+ ("vocab_pad", po::value<uint64_t>(&pipeline.vocab_size_for_unk)->default_value(0), "If the vocabulary is smaller than this value, pad with <unk> to reach this size. Requires --interpolate_unigrams")
100
+ ("verbose_header", po::bool_switch(&verbose_header), "Add a verbose header to the ARPA file that includes information such as token count, smoothing type, etc.")
101
+ ("text", po::value<std::string>(&text), "Read text from a file instead of stdin")
102
+ ("arpa", po::value<std::string>(&arpa), "Write ARPA to a file instead of stdout")
103
+ ("intermediate", po::value<std::string>(&intermediate), "Write ngrams to intermediate files. Turns off ARPA output (which can be reactivated by --arpa file). Forces --renumber on.")
104
+ ("renumber", po::bool_switch(&pipeline.renumber_vocabulary), "Renumber the vocabulary identifiers so that they are monotone with the hash of each string. This is consistent with the ordering used by the trie data structure.")
105
+ ("collapse_values", po::bool_switch(&pipeline.output_q), "Collapse probability and backoff into a single value, q that yields the same sentence-level probabilities. See http://kheafield.com/professional/edinburgh/rest_paper.pdf for more details, including a proof.")
106
+ ("prune", po::value<std::vector<std::string> >(&pruning)->multitoken(), "Prune n-grams with count less than or equal to the given threshold. Specify one value for each order i.e. 0 0 1 to prune singleton trigrams and above. The sequence of values must be non-decreasing and the last value applies to any remaining orders. Default is to not prune, which is equivalent to --prune 0.")
107
+ ("limit_vocab_file", po::value<std::string>(&pipeline.prune_vocab_file)->default_value(""), "Read allowed vocabulary separated by whitespace. N-grams that contain vocabulary items not in this list will be pruned. Can be combined with --prune arg")
108
+ ("discount_fallback", po::value<std::vector<std::string> >(&discount_fallback)->multitoken()->implicit_value(discount_fallback_default, "0.5 1 1.5"), "The closed-form estimate for Kneser-Ney discounts does not work without singletons or doubletons. It can also fail if these values are out of range. This option falls back to user-specified discounts when the closed-form estimate fails. Note that this option is generally a bad idea: you should deduplicate your corpus instead. However, class-based models need custom discounts because they lack singleton unigrams. Provide up to three discounts (for adjusted counts 1, 2, and 3+), which will be applied to all orders where the closed-form estimates fail.");
109
+ po::variables_map vm;
110
+ po::store(po::parse_command_line(argc, argv, options), vm);
111
+
112
+ if (argc == 1 || vm["help"].as<bool>()) {
113
+ std::cerr <<
114
+ "Builds unpruned language models with modified Kneser-Ney smoothing.\n\n"
115
+ "Please cite:\n"
116
+ "@inproceedings{Heafield-estimate,\n"
117
+ " author = {Kenneth Heafield and Ivan Pouzyrevsky and Jonathan H. Clark and Philipp Koehn},\n"
118
+ " title = {Scalable Modified {Kneser-Ney} Language Model Estimation},\n"
119
+ " year = {2013},\n"
120
+ " month = {8},\n"
121
+ " booktitle = {Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics},\n"
122
+ " address = {Sofia, Bulgaria},\n"
123
+ " url = {http://kheafield.com/professional/edinburgh/estimate\\_paper.pdf},\n"
124
+ "}\n\n"
125
+ "Provide the corpus on stdin. The ARPA file will be written to stdout. Order of\n"
126
+ "the model (-o) is the only mandatory option. As this is an on-disk program,\n"
127
+ "setting the temporary file location (-T) and sorting memory (-S) is recommended.\n\n"
128
+ "Memory sizes are specified like GNU sort: a number followed by a unit character.\n"
129
+ "Valid units are \% for percentage of memory (supported platforms only) and (in\n"
130
+ "increasing powers of 1024): b, K, M, G, T, P, E, Z, Y. Default is K (*1024).\n";
131
+ uint64_t mem = util::GuessPhysicalMemory();
132
+ if (mem) {
133
+ std::cerr << "This machine has " << mem << " bytes of memory.\n\n";
134
+ } else {
135
+ std::cerr << "Unable to determine the amount of memory on this machine.\n\n";
136
+ }
137
+ std::cerr << options << std::endl;
138
+ return 1;
139
+ }
140
+
141
+ po::notify(vm);
142
+
143
+ // required() appeared in Boost 1.42.0.
144
+ #if BOOST_VERSION < 104200
145
+ if (!vm.count("order")) {
146
+ std::cerr << "the option '--order' is required but missing" << std::endl;
147
+ return 1;
148
+ }
149
+ #endif
150
+
151
+ if (pipeline.vocab_size_for_unk && !pipeline.initial_probs.interpolate_unigrams) {
152
+ std::cerr << "--vocab_pad requires --interpolate_unigrams be on" << std::endl;
153
+ return 1;
154
+ }
155
+
156
+ if (vm["skip_symbols"].as<bool>()) {
157
+ pipeline.disallowed_symbol_action = lm::COMPLAIN;
158
+ } else {
159
+ pipeline.disallowed_symbol_action = lm::THROW_UP;
160
+ }
161
+
162
+ if (vm.count("discount_fallback")) {
163
+ pipeline.discount.fallback = ParseDiscountFallback(discount_fallback);
164
+ pipeline.discount.bad_action = lm::COMPLAIN;
165
+ } else {
166
+ // Unused, just here to prevent the compiler from complaining about uninitialized.
167
+ pipeline.discount.fallback = lm::builder::Discount();
168
+ pipeline.discount.bad_action = lm::THROW_UP;
169
+ }
170
+
171
+ // parse pruning thresholds. These depend on order, so it is not done as a notifier.
172
+ pipeline.prune_thresholds = ParsePruning(pruning, pipeline.order);
173
+
174
+ if (!vm["limit_vocab_file"].as<std::string>().empty()) {
175
+ pipeline.prune_vocab = true;
176
+ }
177
+ else {
178
+ pipeline.prune_vocab = false;
179
+ }
180
+
181
+ util::NormalizeTempPrefix(pipeline.sort.temp_prefix);
182
+
183
+ lm::builder::InitialProbabilitiesConfig &initial = pipeline.initial_probs;
184
+ // TODO: evaluate options for these.
185
+ initial.adder_in.total_memory = 32768;
186
+ initial.adder_in.block_count = 2;
187
+ initial.adder_out.total_memory = 32768;
188
+ initial.adder_out.block_count = 2;
189
+ pipeline.read_backoffs = initial.adder_out;
190
+
191
+ // Read from stdin, write to stdout by default
192
+ util::scoped_fd in(0), out(1);
193
+ if (vm.count("text")) {
194
+ in.reset(util::OpenReadOrThrow(text.c_str()));
195
+ }
196
+ if (vm.count("arpa")) {
197
+ out.reset(util::CreateOrThrow(arpa.c_str()));
198
+ }
199
+
200
+ try {
201
+ bool writing_intermediate = vm.count("intermediate");
202
+ if (writing_intermediate) {
203
+ pipeline.renumber_vocabulary = true;
204
+ }
205
+ lm::builder::Output output(writing_intermediate ? intermediate : pipeline.sort.temp_prefix, writing_intermediate, pipeline.output_q);
206
+ if (!writing_intermediate || vm.count("arpa")) {
207
+ output.Add(new lm::builder::PrintHook(out.release(), verbose_header));
208
+ }
209
+ lm::builder::Pipeline(pipeline, in.release(), output);
210
+ } catch (const util::MallocException &e) {
211
+ std::cerr << e.what() << std::endl;
212
+ std::cerr << "Try rerunning with a more conservative -S setting than " << vm["memory"].as<std::string>() << std::endl;
213
+ return 1;
214
+ }
215
+ util::PrintUsage(std::cerr);
216
+ } catch (const std::exception &e) {
217
+ std::cerr << e.what() << std::endl;
218
+ return 1;
219
+ }
220
+ }
cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/output.cc ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "output.hh"
2
+
3
+ #include "../common/model_buffer.hh"
4
+ #include "../common/print.hh"
5
+ #include "../../util/file_stream.hh"
6
+ #include "../../util/stream/multi_stream.hh"
7
+
8
+ #include <iostream>
9
+
10
+ namespace lm { namespace builder {
11
+
12
+ OutputHook::~OutputHook() {}
13
+
14
+ Output::Output(StringPiece file_base, bool keep_buffer, bool output_q)
15
+ : buffer_(file_base, keep_buffer, output_q) {}
16
+
17
+ void Output::SinkProbs(util::stream::Chains &chains) {
18
+ Apply(PROB_PARALLEL_HOOK, chains);
19
+ if (!buffer_.Keep() && !Have(PROB_SEQUENTIAL_HOOK)) {
20
+ chains >> util::stream::kRecycle;
21
+ chains.Wait(true);
22
+ return;
23
+ }
24
+ buffer_.Sink(chains, header_.counts_pruned);
25
+ chains >> util::stream::kRecycle;
26
+ chains.Wait(false);
27
+ if (Have(PROB_SEQUENTIAL_HOOK)) {
28
+ std::cerr << "=== 5/5 Writing ARPA model ===" << std::endl;
29
+ buffer_.Source(chains);
30
+ Apply(PROB_SEQUENTIAL_HOOK, chains);
31
+ chains >> util::stream::kRecycle;
32
+ chains.Wait(true);
33
+ }
34
+ }
35
+
36
+ void Output::Apply(HookType hook_type, util::stream::Chains &chains) {
37
+ for (boost::ptr_vector<OutputHook>::iterator entry = outputs_[hook_type].begin(); entry != outputs_[hook_type].end(); ++entry) {
38
+ entry->Sink(header_, VocabFile(), chains);
39
+ }
40
+ }
41
+
42
+ void PrintHook::Sink(const HeaderInfo &info, int vocab_file, util::stream::Chains &chains) {
43
+ if (verbose_header_) {
44
+ util::FileStream out(file_.get(), 50);
45
+ out << "# Input file: " << info.input_file << '\n';
46
+ out << "# Token count: " << info.token_count << '\n';
47
+ out << "# Smoothing: Modified Kneser-Ney" << '\n';
48
+ }
49
+ chains >> PrintARPA(vocab_file, file_.get(), info.counts_pruned);
50
+ }
51
+
52
+ }} // namespaces
cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/output.hh ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_BUILDER_OUTPUT_H
2
+ #define LM_BUILDER_OUTPUT_H
3
+
4
+ #include "header_info.hh"
5
+ #include "../common/model_buffer.hh"
6
+ #include "../../util/file.hh"
7
+
8
+ #include <boost/ptr_container/ptr_vector.hpp>
9
+ #include <boost/utility.hpp>
10
+
11
+ namespace util { namespace stream { class Chains; class ChainPositions; } }
12
+
13
+ /* Outputs from lmplz: ARPA, sharded files, etc */
14
+ namespace lm { namespace builder {
15
+
16
+ // These are different types of hooks. Values should be consecutive to enable a vector lookup.
17
+ enum HookType {
18
+ // TODO: counts.
19
+ PROB_PARALLEL_HOOK, // Probability and backoff (or just q). Output must process the orders in parallel or there will be a deadlock.
20
+ PROB_SEQUENTIAL_HOOK, // Probability and backoff (or just q). Output can process orders any way it likes. This requires writing the data to disk then reading. Useful for ARPA files, which put unigrams first etc.
21
+ NUMBER_OF_HOOKS // Keep this last so we know how many values there are.
22
+ };
23
+
24
+ class OutputHook {
25
+ public:
26
+ explicit OutputHook(HookType hook_type) : type_(hook_type) {}
27
+
28
+ virtual ~OutputHook();
29
+
30
+ virtual void Sink(const HeaderInfo &info, int vocab_file, util::stream::Chains &chains) = 0;
31
+
32
+ HookType Type() const { return type_; }
33
+
34
+ private:
35
+ HookType type_;
36
+ };
37
+
38
+ class Output : boost::noncopyable {
39
+ public:
40
+ Output(StringPiece file_base, bool keep_buffer, bool output_q);
41
+
42
+ // Takes ownership.
43
+ void Add(OutputHook *hook) {
44
+ outputs_[hook->Type()].push_back(hook);
45
+ }
46
+
47
+ bool Have(HookType hook_type) const {
48
+ return !outputs_[hook_type].empty();
49
+ }
50
+
51
+ int VocabFile() const { return buffer_.VocabFile(); }
52
+
53
+ void SetHeader(const HeaderInfo &header) { header_ = header; }
54
+ const HeaderInfo &GetHeader() const { return header_; }
55
+
56
+ // This is called by the pipeline.
57
+ void SinkProbs(util::stream::Chains &chains);
58
+
59
+ unsigned int Steps() const { return Have(PROB_SEQUENTIAL_HOOK); }
60
+
61
+ private:
62
+ void Apply(HookType hook_type, util::stream::Chains &chains);
63
+
64
+ ModelBuffer buffer_;
65
+
66
+ boost::ptr_vector<OutputHook> outputs_[NUMBER_OF_HOOKS];
67
+ HeaderInfo header_;
68
+ };
69
+
70
+ class PrintHook : public OutputHook {
71
+ public:
72
+ // Takes ownership
73
+ PrintHook(int write_fd, bool verbose_header)
74
+ : OutputHook(PROB_SEQUENTIAL_HOOK), file_(write_fd), verbose_header_(verbose_header) {}
75
+
76
+ void Sink(const HeaderInfo &info, int vocab_file, util::stream::Chains &chains);
77
+
78
+ private:
79
+ util::scoped_fd file_;
80
+ bool verbose_header_;
81
+ };
82
+
83
+ }} // namespaces
84
+
85
+ #endif // LM_BUILDER_OUTPUT_H
cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/payload.hh ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_BUILDER_PAYLOAD_H
2
+ #define LM_BUILDER_PAYLOAD_H
3
+
4
+ #include "../weights.hh"
5
+ #include "../word_index.hh"
6
+ #include <stdint.h>
7
+
8
+ namespace lm { namespace builder {
9
+
10
+ struct Uninterpolated {
11
+ float prob; // Uninterpolated probability.
12
+ float gamma; // Interpolation weight for lower order.
13
+ };
14
+
15
+ union BuildingPayload {
16
+ uint64_t count;
17
+ Uninterpolated uninterp;
18
+ ProbBackoff complete;
19
+
20
+ /*mjd**********************************************************************/
21
+ bool IsMarked() const {
22
+ return count >> (sizeof(count) * 8 - 1);
23
+ }
24
+
25
+ void Mark() {
26
+ count |= (1ULL << (sizeof(count) * 8 - 1));
27
+ }
28
+
29
+ void Unmark() {
30
+ count &= ~(1ULL << (sizeof(count) * 8 - 1));
31
+ }
32
+
33
+ uint64_t UnmarkedCount() const {
34
+ return count & ~(1ULL << (sizeof(count) * 8 - 1));
35
+ }
36
+
37
+ uint64_t CutoffCount() const {
38
+ return IsMarked() ? 0 : UnmarkedCount();
39
+ }
40
+ /*mjd**********************************************************************/
41
+ };
42
+
43
+ const WordIndex kBOS = 1;
44
+ const WordIndex kEOS = 2;
45
+
46
+ }} // namespaces
47
+
48
+ #endif // LM_BUILDER_PAYLOAD_H
cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/pipeline.cc ADDED
@@ -0,0 +1,385 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "pipeline.hh"
2
+
3
+ #include "adjust_counts.hh"
4
+ #include "combine_counts.hh"
5
+ #include "corpus_count.hh"
6
+ #include "hash_gamma.hh"
7
+ #include "initial_probabilities.hh"
8
+ #include "interpolate.hh"
9
+ #include "output.hh"
10
+ #include "../common/compare.hh"
11
+ #include "../common/renumber.hh"
12
+
13
+ #include "../sizes.hh"
14
+ #include "../vocab.hh"
15
+
16
+ #include "../../util/exception.hh"
17
+ #include "../../util/file.hh"
18
+ #include "../../util/stream/io.hh"
19
+
20
+ #include <algorithm>
21
+ #include <iostream>
22
+ #include <fstream>
23
+ #include <vector>
24
+
25
+ namespace lm { namespace builder {
26
+
27
+ using util::stream::Sorts;
28
+
29
+ namespace {
30
+
31
+ void PrintStatistics(const std::vector<uint64_t> &counts, const std::vector<uint64_t> &counts_pruned, const std::vector<Discount> &discounts) {
32
+ std::cerr << "Statistics:\n";
33
+ for (size_t i = 0; i < counts.size(); ++i) {
34
+ std::cerr << (i + 1) << ' ' << counts_pruned[i];
35
+ if(counts[i] != counts_pruned[i])
36
+ std::cerr << "/" << counts[i];
37
+
38
+ for (size_t d = 1; d <= 3; ++d)
39
+ std::cerr << " D" << d << (d == 3 ? "+=" : "=") << discounts[i].amount[d];
40
+ std::cerr << '\n';
41
+ }
42
+ }
43
+
44
+ class Master {
45
+ public:
46
+ explicit Master(PipelineConfig &config, unsigned output_steps)
47
+ : config_(config), chains_(config.order), unigrams_(util::MakeTemp(config_.TempPrefix())), steps_(output_steps + 4) {
48
+ config_.minimum_block = std::max(NGram<BuildingPayload>::TotalSize(config_.order), config_.minimum_block);
49
+ }
50
+
51
+ const PipelineConfig &Config() const { return config_; }
52
+
53
+ util::stream::Chains &MutableChains() { return chains_; }
54
+
55
+ template <class T> Master &operator>>(const T &worker) {
56
+ chains_ >> worker;
57
+ return *this;
58
+ }
59
+
60
+ // This takes the (partially) sorted ngrams and sets up for adjusted counts.
61
+ void InitForAdjust(util::stream::Sort<SuffixOrder, CombineCounts> &ngrams, WordIndex types, std::size_t subtract_for_numbering) {
62
+ const std::size_t each_order_min = config_.minimum_block * config_.block_count;
63
+ // We know how many unigrams there are. Don't allocate more than needed to them.
64
+ const std::size_t min_chains = (config_.order - 1) * each_order_min +
65
+ std::min(types * NGram<BuildingPayload>::TotalSize(1), each_order_min);
66
+ // Prevent overflow in subtracting.
67
+ const std::size_t total = std::max<std::size_t>(config_.TotalMemory(), min_chains + subtract_for_numbering + config_.minimum_block);
68
+ // Do merge sort with calculated laziness.
69
+ const std::size_t merge_using = ngrams.Merge(std::min(total - min_chains - subtract_for_numbering, ngrams.DefaultLazy()));
70
+
71
+ std::vector<uint64_t> count_bounds(1, types);
72
+ CreateChains(total - merge_using - subtract_for_numbering, count_bounds);
73
+ ngrams.Output(chains_.back(), merge_using);
74
+ }
75
+
76
+ // For initial probabilities, but this is generic.
77
+ void SortAndReadTwice(const std::vector<uint64_t> &counts, Sorts<ContextOrder> &sorts, util::stream::Chains &second, util::stream::ChainConfig second_config) {
78
+ bool unigrams_are_sorted = !config_.renumber_vocabulary;
79
+ // Do merge first before allocating chain memory.
80
+ for (std::size_t i = 0; i < config_.order - unigrams_are_sorted; ++i) {
81
+ sorts[i].Merge(0);
82
+ }
83
+ // There's no lazy merge, so just divide memory amongst the chains.
84
+ CreateChains(config_.TotalMemory(), counts);
85
+ chains_.back().ActivateProgress();
86
+ if (unigrams_are_sorted) {
87
+ chains_[0] >> unigrams_.Source();
88
+ second_config.entry_size = NGram<BuildingPayload>::TotalSize(1);
89
+ second.push_back(second_config);
90
+ second.back() >> unigrams_.Source();
91
+ }
92
+ for (std::size_t i = unigrams_are_sorted; i < config_.order; ++i) {
93
+ util::scoped_fd fd(sorts[i - unigrams_are_sorted].StealCompleted());
94
+ chains_[i].SetProgressTarget(util::SizeOrThrow(fd.get()));
95
+ chains_[i] >> util::stream::PRead(util::DupOrThrow(fd.get()), true);
96
+ second_config.entry_size = NGram<BuildingPayload>::TotalSize(i + 1);
97
+ second.push_back(second_config);
98
+ second.back() >> util::stream::PRead(fd.release(), true);
99
+ }
100
+ }
101
+
102
+ // There is no sort after this, so go for broke on lazy merging.
103
+ template <class Compare> void MaximumLazyInput(const std::vector<uint64_t> &counts, Sorts<Compare> &sorts) {
104
+ // Determine the minimum we can use for all the chains.
105
+ std::size_t min_chains = 0;
106
+ for (std::size_t i = 0; i < config_.order; ++i) {
107
+ min_chains += std::min(counts[i] * NGram<BuildingPayload>::TotalSize(i + 1), static_cast<uint64_t>(config_.minimum_block));
108
+ }
109
+ std::size_t for_merge = min_chains > config_.TotalMemory() ? 0 : (config_.TotalMemory() - min_chains);
110
+ std::vector<std::size_t> laziness;
111
+ // Prioritize longer n-grams.
112
+ for (util::stream::Sort<SuffixOrder> *i = sorts.end() - 1; i >= sorts.begin(); --i) {
113
+ laziness.push_back(i->Merge(for_merge));
114
+ assert(for_merge >= laziness.back());
115
+ for_merge -= laziness.back();
116
+ }
117
+ std::reverse(laziness.begin(), laziness.end());
118
+
119
+ CreateChains(for_merge + min_chains, counts);
120
+ chains_.back().ActivateProgress();
121
+ chains_[0] >> unigrams_.Source();
122
+ for (std::size_t i = 1; i < config_.order; ++i) {
123
+ sorts[i - 1].Output(chains_[i], laziness[i - 1]);
124
+ }
125
+ }
126
+
127
+ template <class Compare> void SetupSorts(Sorts<Compare> &sorts, bool exclude_unigrams) {
128
+ sorts.Init(config_.order - exclude_unigrams);
129
+ // Unigrams don't get sorted because their order is always the same.
130
+ if (exclude_unigrams) chains_[0] >> unigrams_.Sink() >> util::stream::kRecycle;
131
+ for (std::size_t i = exclude_unigrams; i < config_.order; ++i) {
132
+ sorts.push_back(chains_[i], config_.sort, Compare(i + 1));
133
+ }
134
+ chains_.Wait(true);
135
+ }
136
+
137
+ unsigned int Steps() const { return steps_; }
138
+
139
+ private:
140
+ // Create chains, allocating memory to them. Totally heuristic. Count
141
+ // bounds are upper bounds on the counts or not present.
142
+ void CreateChains(std::size_t remaining_mem, const std::vector<uint64_t> &count_bounds) {
143
+ std::vector<std::size_t> assignments;
144
+ assignments.reserve(config_.order);
145
+ // Start by assigning maximum memory usage (to be refined later).
146
+ for (std::size_t i = 0; i < count_bounds.size(); ++i) {
147
+ assignments.push_back(static_cast<std::size_t>(std::min(
148
+ static_cast<uint64_t>(remaining_mem),
149
+ count_bounds[i] * static_cast<uint64_t>(NGram<BuildingPayload>::TotalSize(i + 1)))));
150
+ }
151
+ assignments.resize(config_.order, remaining_mem);
152
+
153
+ // Now we know how much memory everybody wants. How much will they get?
154
+ // Proportional to this.
155
+ std::vector<float> portions;
156
+ // Indices of orders that have yet to be assigned.
157
+ std::vector<std::size_t> unassigned;
158
+ for (std::size_t i = 0; i < config_.order; ++i) {
159
+ portions.push_back(static_cast<float>((i+1) * NGram<BuildingPayload>::TotalSize(i+1)));
160
+ unassigned.push_back(i);
161
+ }
162
+ /*If somebody doesn't eat their full dinner, give it to the rest of the
163
+ * family. Then somebody else might not eat their full dinner etc. Ends
164
+ * when everybody unassigned is hungry.
165
+ */
166
+ float sum;
167
+ bool found_more;
168
+ std::vector<std::size_t> block_count(config_.order);
169
+ do {
170
+ sum = 0.0;
171
+ for (std::size_t i = 0; i < unassigned.size(); ++i) {
172
+ sum += portions[unassigned[i]];
173
+ }
174
+ found_more = false;
175
+ // If the proportional assignment is more than needed, give it just what it needs.
176
+ for (std::vector<std::size_t>::iterator i = unassigned.begin(); i != unassigned.end();) {
177
+ if (assignments[*i] <= remaining_mem * (portions[*i] / sum)) {
178
+ remaining_mem -= assignments[*i];
179
+ block_count[*i] = 1;
180
+ i = unassigned.erase(i);
181
+ found_more = true;
182
+ } else {
183
+ ++i;
184
+ }
185
+ }
186
+ } while (found_more);
187
+ for (std::vector<std::size_t>::iterator i = unassigned.begin(); i != unassigned.end(); ++i) {
188
+ assignments[*i] = remaining_mem * (portions[*i] / sum);
189
+ block_count[*i] = config_.block_count;
190
+ }
191
+ chains_.clear();
192
+ std::cerr << "Chain sizes:";
193
+ for (std::size_t i = 0; i < config_.order; ++i) {
194
+ // Always have enough for at least one record.
195
+ // This was crashing if e.g. there was no 5-gram.
196
+ assignments[i] = std::max(assignments[i], block_count[i] * NGram<BuildingPayload>::TotalSize(i + 1));
197
+ std::cerr << ' ' << (i+1) << ":" << assignments[i];
198
+ chains_.push_back(util::stream::ChainConfig(NGram<BuildingPayload>::TotalSize(i + 1), block_count[i], assignments[i]));
199
+ }
200
+ std::cerr << std::endl;
201
+ }
202
+
203
+ PipelineConfig &config_;
204
+
205
+ util::stream::Chains chains_;
206
+
207
+ util::stream::FileBuffer unigrams_;
208
+
209
+ const unsigned int steps_;
210
+ };
211
+
212
+ util::stream::Sort<SuffixOrder, CombineCounts> *CountText(int text_file /* input */, int vocab_file /* output */, Master &master, uint64_t &token_count, WordIndex &type_count, std::string &text_file_name, std::vector<bool> &prune_words) {
213
+ const PipelineConfig &config = master.Config();
214
+ std::cerr << "=== 1/" << master.Steps() << " Counting and sorting n-grams ===" << std::endl;
215
+
216
+ const std::size_t vocab_usage = CorpusCount::VocabUsage(config.vocab_estimate);
217
+ UTIL_THROW_IF(config.TotalMemory() < vocab_usage, util::Exception, "Vocab hash size estimate " << vocab_usage << " exceeds total memory " << config.TotalMemory());
218
+ std::size_t memory_for_chain =
219
+ // This much memory to work with after vocab hash table.
220
+ static_cast<float>(config.TotalMemory() - vocab_usage) /
221
+ // Solve for block size including the dedupe multiplier for one block.
222
+ (static_cast<float>(config.block_count) + CorpusCount::DedupeMultiplier(config.order)) *
223
+ // Chain likes memory expressed in terms of total memory.
224
+ static_cast<float>(config.block_count);
225
+ util::stream::Chain chain(util::stream::ChainConfig(NGram<BuildingPayload>::TotalSize(config.order), config.block_count, memory_for_chain));
226
+
227
+ type_count = config.vocab_estimate;
228
+ util::FilePiece text(text_file, NULL, &std::cerr);
229
+ text_file_name = text.FileName();
230
+ CorpusCount counter(text, vocab_file, true, token_count, type_count, prune_words, config.prune_vocab_file, chain.BlockSize() / chain.EntrySize(), config.disallowed_symbol_action);
231
+ chain >> boost::ref(counter);
232
+
233
+ util::scoped_ptr<util::stream::Sort<SuffixOrder, CombineCounts> > sorter(new util::stream::Sort<SuffixOrder, CombineCounts>(chain, config.sort, SuffixOrder(config.order), CombineCounts()));
234
+ chain.Wait(true);
235
+ return sorter.release();
236
+ }
237
+
238
+ void InitialProbabilities(const std::vector<uint64_t> &counts, const std::vector<uint64_t> &counts_pruned, const std::vector<Discount> &discounts, Master &master, Sorts<SuffixOrder> &primary, util::FixedArray<util::stream::FileBuffer> &gammas, const std::vector<uint64_t> &prune_thresholds, bool prune_vocab, const SpecialVocab &specials) {
239
+ const PipelineConfig &config = master.Config();
240
+ util::stream::Chains second(config.order);
241
+
242
+ {
243
+ Sorts<ContextOrder> sorts;
244
+ master.SetupSorts(sorts, !config.renumber_vocabulary);
245
+ PrintStatistics(counts, counts_pruned, discounts);
246
+ lm::ngram::ShowSizes(counts_pruned);
247
+ std::cerr << "=== 3/" << master.Steps() << " Calculating and sorting initial probabilities ===" << std::endl;
248
+ master.SortAndReadTwice(counts_pruned, sorts, second, config.initial_probs.adder_in);
249
+ }
250
+
251
+ util::stream::Chains gamma_chains(config.order);
252
+ InitialProbabilities(config.initial_probs, discounts, master.MutableChains(), second, gamma_chains, prune_thresholds, prune_vocab, specials);
253
+ // Don't care about gamma for 0.
254
+ gamma_chains[0] >> util::stream::kRecycle;
255
+ gammas.Init(config.order - 1);
256
+ for (std::size_t i = 1; i < config.order; ++i) {
257
+ gammas.push_back(util::MakeTemp(config.TempPrefix()));
258
+ gamma_chains[i] >> gammas[i - 1].Sink() >> util::stream::kRecycle;
259
+ }
260
+ // Has to be done here due to gamma_chains scope.
261
+ master.SetupSorts(primary, true);
262
+ }
263
+
264
+ void InterpolateProbabilities(const std::vector<uint64_t> &counts, Master &master, Sorts<SuffixOrder> &primary, util::FixedArray<util::stream::FileBuffer> &gammas, Output &output, const SpecialVocab &specials) {
265
+ std::cerr << "=== 4/" << master.Steps() << " Calculating and writing order-interpolated probabilities ===" << std::endl;
266
+ const PipelineConfig &config = master.Config();
267
+ master.MaximumLazyInput(counts, primary);
268
+
269
+ util::stream::Chains gamma_chains(config.order - 1);
270
+ for (std::size_t i = 0; i < config.order - 1; ++i) {
271
+ util::stream::ChainConfig read_backoffs(config.read_backoffs);
272
+
273
+ if(config.prune_vocab || config.prune_thresholds[i + 1] > 0)
274
+ read_backoffs.entry_size = sizeof(HashGamma);
275
+ else
276
+ read_backoffs.entry_size = sizeof(float);
277
+
278
+ gamma_chains.push_back(read_backoffs);
279
+ gamma_chains.back() >> gammas[i].Source(true);
280
+ }
281
+ master >> Interpolate(std::max(master.Config().vocab_size_for_unk, counts[0] - 1 /* <s> is not included */), util::stream::ChainPositions(gamma_chains), config.prune_thresholds, config.prune_vocab, config.output_q, specials);
282
+ gamma_chains >> util::stream::kRecycle;
283
+ output.SinkProbs(master.MutableChains());
284
+ }
285
+
286
+ class VocabNumbering {
287
+ public:
288
+ VocabNumbering(int final_vocab, StringPiece temp_prefix, bool renumber)
289
+ : final_vocab_(final_vocab),
290
+ renumber_(renumber),
291
+ specials_(kBOS, kEOS) {
292
+ if (renumber) {
293
+ temporary_.reset(util::MakeTemp(temp_prefix));
294
+ }
295
+ }
296
+
297
+ int WriteOnTheFly() const { return renumber_ ? temporary_.get() : final_vocab_; }
298
+
299
+ // Compute the vocabulary mapping and return the memory used.
300
+ std::size_t ComputeMapping(WordIndex type_count) {
301
+ if (!renumber_) return 0;
302
+ ngram::SortedVocabulary::ComputeRenumbering(type_count, temporary_.get(), final_vocab_, vocab_mapping_);
303
+ temporary_.reset();
304
+ return sizeof(WordIndex) * vocab_mapping_.size();
305
+ }
306
+
307
+ void ApplyRenumber(util::stream::Chains &chains) {
308
+ if (!renumber_) return;
309
+ for (std::size_t i = 0; i < chains.size(); ++i) {
310
+ chains[i] >> Renumber(&*vocab_mapping_.begin(), i + 1);
311
+ }
312
+ specials_ = SpecialVocab(vocab_mapping_[specials_.BOS()], vocab_mapping_[specials_.EOS()]);
313
+ }
314
+
315
+ const SpecialVocab &Specials() const { return specials_; }
316
+
317
+ private:
318
+ int final_vocab_;
319
+ // Out of order vocab file created on the fly.
320
+ util::scoped_fd temporary_;
321
+
322
+ bool renumber_;
323
+
324
+ std::vector<WordIndex> vocab_mapping_;
325
+
326
+ SpecialVocab specials_;
327
+ };
328
+
329
+ } // namespace
330
+
331
+ void Pipeline(PipelineConfig &config, int text_file, Output &output) {
332
+ // Some fail-fast sanity checks.
333
+ if (config.sort.buffer_size * 4 > config.TotalMemory()) {
334
+ config.sort.buffer_size = config.TotalMemory() / 4;
335
+ std::cerr << "Warning: changing sort block size to " << config.sort.buffer_size << " bytes due to low total memory." << std::endl;
336
+ }
337
+ if (config.minimum_block < NGram<BuildingPayload>::TotalSize(config.order)) {
338
+ config.minimum_block = NGram<BuildingPayload>::TotalSize(config.order);
339
+ std::cerr << "Warning: raising minimum block to " << config.minimum_block << " to fit an ngram in every block." << std::endl;
340
+ }
341
+ UTIL_THROW_IF(config.sort.buffer_size < config.minimum_block, util::Exception, "Sort block size " << config.sort.buffer_size << " is below the minimum block size " << config.minimum_block << ".");
342
+ UTIL_THROW_IF(config.TotalMemory() < config.minimum_block * config.order * config.block_count, util::Exception,
343
+ "Not enough memory to fit " << (config.order * config.block_count) << " blocks with minimum size " << config.minimum_block << ". Increase memory to " << (config.minimum_block * config.order * config.block_count) << " bytes or decrease the minimum block size.");
344
+
345
+ Master master(config, output.Steps());
346
+ // master's destructor will wait for chains. But they might be deadlocked if
347
+ // this thread dies because e.g. it ran out of memory.
348
+ try {
349
+ VocabNumbering numbering(output.VocabFile(), config.TempPrefix(), config.renumber_vocabulary);
350
+ uint64_t token_count;
351
+ WordIndex type_count;
352
+ std::string text_file_name;
353
+ std::vector<bool> prune_words;
354
+ util::scoped_ptr<util::stream::Sort<SuffixOrder, CombineCounts> > sorted_counts(
355
+ CountText(text_file, numbering.WriteOnTheFly(), master, token_count, type_count, text_file_name, prune_words));
356
+ std::cerr << "Unigram tokens " << token_count << " types " << type_count << std::endl;
357
+
358
+ // Create vocab mapping, which uses temporary memory, while nothing else is happening.
359
+ std::size_t subtract_for_numbering = numbering.ComputeMapping(type_count);
360
+
361
+ std::cerr << "=== 2/" << master.Steps() << " Calculating and sorting adjusted counts ===" << std::endl;
362
+ master.InitForAdjust(*sorted_counts, type_count, subtract_for_numbering);
363
+ sorted_counts.reset();
364
+
365
+ std::vector<uint64_t> counts;
366
+ std::vector<uint64_t> counts_pruned;
367
+ std::vector<Discount> discounts;
368
+ master >> AdjustCounts(config.prune_thresholds, counts, counts_pruned, prune_words, config.discount, discounts);
369
+ numbering.ApplyRenumber(master.MutableChains());
370
+
371
+ {
372
+ util::FixedArray<util::stream::FileBuffer> gammas;
373
+ Sorts<SuffixOrder> primary;
374
+ InitialProbabilities(counts, counts_pruned, discounts, master, primary, gammas, config.prune_thresholds, config.prune_vocab, numbering.Specials());
375
+ output.SetHeader(HeaderInfo(text_file_name, token_count, counts_pruned));
376
+ // Also does output.
377
+ InterpolateProbabilities(counts_pruned, master, primary, gammas, output, numbering.Specials());
378
+ }
379
+ } catch (const util::Exception &e) {
380
+ std::cerr << e.what() << std::endl;
381
+ abort();
382
+ }
383
+ }
384
+
385
+ }} // namespaces
cc-multilingual-main/cc_net/third_party/kenlm/lm/builder/pipeline.hh ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_BUILDER_PIPELINE_H
2
+ #define LM_BUILDER_PIPELINE_H
3
+
4
+ #include "adjust_counts.hh"
5
+ #include "initial_probabilities.hh"
6
+ #include "header_info.hh"
7
+ #include "../lm_exception.hh"
8
+ #include "../word_index.hh"
9
+ #include "../../util/stream/config.hh"
10
+ #include "../../util/file_piece.hh"
11
+
12
+ #include <string>
13
+ #include <cstddef>
14
+
15
+ namespace lm { namespace builder {
16
+
17
+ class Output;
18
+
19
+ struct PipelineConfig {
20
+ std::size_t order;
21
+ util::stream::SortConfig sort;
22
+ InitialProbabilitiesConfig initial_probs;
23
+ util::stream::ChainConfig read_backoffs;
24
+
25
+ // Estimated vocabulary size. Used for sizing CorpusCount memory and
26
+ // initial probing hash table sizing, also in CorpusCount.
27
+ lm::WordIndex vocab_estimate;
28
+
29
+ // Minimum block size to tolerate.
30
+ std::size_t minimum_block;
31
+
32
+ // Number of blocks to use. This will be overridden to 1 if everything fits.
33
+ std::size_t block_count;
34
+
35
+ // n-gram count thresholds for pruning. 0 values means no pruning for
36
+ // corresponding n-gram order
37
+ std::vector<uint64_t> prune_thresholds; //mjd
38
+ bool prune_vocab;
39
+ std::string prune_vocab_file;
40
+
41
+ /* Renumber the vocabulary the way the trie likes it? */
42
+ bool renumber_vocabulary;
43
+
44
+ // What to do with discount failures.
45
+ DiscountConfig discount;
46
+
47
+ // Compute collapsed q values instead of probability and backoff
48
+ bool output_q;
49
+
50
+ /* Computing the perplexity of LMs with different vocabularies is hard. For
51
+ * example, the lowest perplexity is attained by a unigram model that
52
+ * predicts p(<unk>) = 1 and has no other vocabulary. Also, linearly
53
+ * interpolated models will sum to more than 1 because <unk> is duplicated
54
+ * (SRI just pretends p(<unk>) = 0 for these purposes, which makes it sum to
55
+ * 1 but comes with its own problems). This option will make the vocabulary
56
+ * a particular size by replicating <unk> multiple times for purposes of
57
+ * computing vocabulary size. It has no effect if the actual vocabulary is
58
+ * larger. This parameter serves the same purpose as IRSTLM's "dub".
59
+ */
60
+ uint64_t vocab_size_for_unk;
61
+
62
+ /* What to do the first time <s>, </s>, or <unk> appears in the input. If
63
+ * this is anything but THROW_UP, then the symbol will always be treated as
64
+ * whitespace.
65
+ */
66
+ WarningAction disallowed_symbol_action;
67
+
68
+ const std::string &TempPrefix() const { return sort.temp_prefix; }
69
+ std::size_t TotalMemory() const { return sort.total_memory; }
70
+ };
71
+
72
+ // Takes ownership of text_file and out_arpa.
73
+ void Pipeline(PipelineConfig &config, int text_file, Output &output);
74
+
75
+ }} // namespaces
76
+ #endif // LM_BUILDER_PIPELINE_H
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/bigendian/toy0.1 ADDED
Binary file (60 Bytes). View file
 
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/bigendian/toy0.2 ADDED
Binary file (112 Bytes). View file
 
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/bigendian/toy0.3 ADDED
Binary file (140 Bytes). View file
 
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/bigendian/toy0.kenlm_intermediate ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ KenLM intermediate binary file
2
+ Counts 5 7 7
3
+ Payload pb
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/bigendian/toy0.vocab ADDED
Binary file (19 Bytes). View file
 
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/bigendian/toy1.1 ADDED
Binary file (72 Bytes). View file
 
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/bigendian/toy1.2 ADDED
Binary file (112 Bytes). View file
 
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/bigendian/toy1.3 ADDED
Binary file (120 Bytes). View file
 
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/bigendian/toy1.kenlm_intermediate ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ KenLM intermediate binary file
2
+ Counts 6 7 6
3
+ Payload pb
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/bigendian/toy1.vocab ADDED
Binary file (21 Bytes). View file
 
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/generate.sh ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ ../../../../build/bin/lmplz --discount_fallback -o 3 -S 100M --intermediate toy0 --arpa ../toy0.arpa <<EOF
3
+ a a b a
4
+ b a a b
5
+ EOF
6
+ ../../../../build/bin/lmplz --discount_fallback -o 3 -S 100M --intermediate toy1 --arpa ../toy1.arpa <<EOF
7
+ a a b b b b b b b
8
+ c
9
+ EOF
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/littleendian/toy0.1 ADDED
Binary file (60 Bytes). View file
 
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/littleendian/toy0.2 ADDED
Binary file (112 Bytes). View file
 
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/littleendian/toy0.3 ADDED
Binary file (140 Bytes). View file
 
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/littleendian/toy0.kenlm_intermediate ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ KenLM intermediate binary file
2
+ Counts 5 7 7
3
+ Payload pb
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/littleendian/toy0.vocab ADDED
Binary file (19 Bytes). View file
 
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/littleendian/toy1.1 ADDED
Binary file (72 Bytes). View file
 
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/littleendian/toy1.2 ADDED
Binary file (112 Bytes). View file
 
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/littleendian/toy1.3 ADDED
Binary file (120 Bytes). View file
 
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/littleendian/toy1.kenlm_intermediate ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ KenLM intermediate binary file
2
+ Counts 6 7 6
3
+ Payload pb