applied-ai-018 commited on
Commit
3573e61
·
verified ·
1 Parent(s): ec09e60

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. cc-multilingual-main/cc_net/third_party/kenlm/build/CMakeFiles/3.22.1/CMakeCXXCompiler.cmake +83 -0
  2. cc-multilingual-main/cc_net/third_party/kenlm/build/CMakeFiles/3.22.1/CompilerIdC/CMakeCCompilerId.c +803 -0
  3. cc-multilingual-main/cc_net/third_party/kenlm/build/CMakeFiles/3.22.1/CompilerIdC/a.out +0 -0
  4. cc-multilingual-main/cc_net/third_party/kenlm/build/CMakeFiles/3.22.1/CompilerIdCXX/CMakeCXXCompilerId.cpp +791 -0
  5. cc-multilingual-main/cc_net/third_party/kenlm/build/CMakeFiles/3.22.1/CompilerIdCXX/a.out +0 -0
  6. cc-multilingual-main/cc_net/third_party/kenlm/lm/CMakeLists.txt +81 -0
  7. cc-multilingual-main/cc_net/third_party/kenlm/lm/bhiksha.cc +94 -0
  8. cc-multilingual-main/cc_net/third_party/kenlm/lm/bhiksha.hh +122 -0
  9. cc-multilingual-main/cc_net/third_party/kenlm/lm/binary_format.cc +302 -0
  10. cc-multilingual-main/cc_net/third_party/kenlm/lm/binary_format.hh +106 -0
  11. cc-multilingual-main/cc_net/third_party/kenlm/lm/blank.hh +42 -0
  12. cc-multilingual-main/cc_net/third_party/kenlm/lm/build_binary_main.cc +237 -0
  13. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/CMakeLists.txt +25 -0
  14. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/compare.hh +185 -0
  15. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/joint_order.hh +71 -0
  16. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/model_buffer.cc +144 -0
  17. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/model_buffer.hh +74 -0
  18. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/model_buffer_test.cc +52 -0
  19. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/ngram.hh +77 -0
  20. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/ngram_stream.hh +65 -0
  21. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/print.cc +62 -0
  22. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/print.hh +58 -0
  23. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/renumber.cc +17 -0
  24. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/renumber.hh +30 -0
  25. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/size_option.cc +24 -0
  26. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/size_option.hh +11 -0
  27. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/special.hh +27 -0
  28. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/toy0.arpa +31 -0
  29. cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/toy1.arpa +31 -0
  30. cc-multilingual-main/cc_net/third_party/kenlm/lm/config.cc +30 -0
  31. cc-multilingual-main/cc_net/third_party/kenlm/lm/config.hh +124 -0
  32. cc-multilingual-main/cc_net/third_party/kenlm/lm/enumerate_vocab.hh +28 -0
  33. cc-multilingual-main/cc_net/third_party/kenlm/lm/facade.hh +73 -0
  34. cc-multilingual-main/cc_net/third_party/kenlm/lm/fragment_main.cc +37 -0
  35. cc-multilingual-main/cc_net/third_party/kenlm/lm/kenlm_benchmark_main.cc +232 -0
  36. cc-multilingual-main/cc_net/third_party/kenlm/lm/left.hh +216 -0
  37. cc-multilingual-main/cc_net/third_party/kenlm/lm/left_test.cc +397 -0
  38. cc-multilingual-main/cc_net/third_party/kenlm/lm/lm_exception.cc +23 -0
  39. cc-multilingual-main/cc_net/third_party/kenlm/lm/lm_exception.hh +50 -0
  40. cc-multilingual-main/cc_net/third_party/kenlm/lm/max_order.hh +13 -0
  41. cc-multilingual-main/cc_net/third_party/kenlm/lm/model.cc +349 -0
  42. cc-multilingual-main/cc_net/third_party/kenlm/lm/model.hh +155 -0
  43. cc-multilingual-main/cc_net/third_party/kenlm/lm/model_test.cc +448 -0
  44. cc-multilingual-main/cc_net/third_party/kenlm/lm/model_type.hh +23 -0
  45. cc-multilingual-main/cc_net/third_party/kenlm/lm/ngram_query.hh +113 -0
  46. cc-multilingual-main/cc_net/third_party/kenlm/lm/partial.hh +166 -0
  47. cc-multilingual-main/cc_net/third_party/kenlm/lm/partial_test.cc +199 -0
  48. cc-multilingual-main/cc_net/third_party/kenlm/lm/quantize.cc +93 -0
  49. cc-multilingual-main/cc_net/third_party/kenlm/lm/quantize.hh +240 -0
  50. cc-multilingual-main/cc_net/third_party/kenlm/lm/query_main.cc +142 -0
cc-multilingual-main/cc_net/third_party/kenlm/build/CMakeFiles/3.22.1/CMakeCXXCompiler.cmake ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set(CMAKE_CXX_COMPILER "/usr/bin/c++")
2
+ set(CMAKE_CXX_COMPILER_ARG1 "")
3
+ set(CMAKE_CXX_COMPILER_ID "GNU")
4
+ set(CMAKE_CXX_COMPILER_VERSION "11.4.0")
5
+ set(CMAKE_CXX_COMPILER_VERSION_INTERNAL "")
6
+ set(CMAKE_CXX_COMPILER_WRAPPER "")
7
+ set(CMAKE_CXX_STANDARD_COMPUTED_DEFAULT "17")
8
+ set(CMAKE_CXX_EXTENSIONS_COMPUTED_DEFAULT "ON")
9
+ set(CMAKE_CXX_COMPILE_FEATURES "cxx_std_98;cxx_template_template_parameters;cxx_std_11;cxx_alias_templates;cxx_alignas;cxx_alignof;cxx_attributes;cxx_auto_type;cxx_constexpr;cxx_decltype;cxx_decltype_incomplete_return_types;cxx_default_function_template_args;cxx_defaulted_functions;cxx_defaulted_move_initializers;cxx_delegating_constructors;cxx_deleted_functions;cxx_enum_forward_declarations;cxx_explicit_conversions;cxx_extended_friend_declarations;cxx_extern_templates;cxx_final;cxx_func_identifier;cxx_generalized_initializers;cxx_inheriting_constructors;cxx_inline_namespaces;cxx_lambdas;cxx_local_type_template_args;cxx_long_long_type;cxx_noexcept;cxx_nonstatic_member_init;cxx_nullptr;cxx_override;cxx_range_for;cxx_raw_string_literals;cxx_reference_qualified_functions;cxx_right_angle_brackets;cxx_rvalue_references;cxx_sizeof_member;cxx_static_assert;cxx_strong_enums;cxx_thread_local;cxx_trailing_return_types;cxx_unicode_literals;cxx_uniform_initialization;cxx_unrestricted_unions;cxx_user_literals;cxx_variadic_macros;cxx_variadic_templates;cxx_std_14;cxx_aggregate_default_initializers;cxx_attribute_deprecated;cxx_binary_literals;cxx_contextual_conversions;cxx_decltype_auto;cxx_digit_separators;cxx_generic_lambdas;cxx_lambda_init_captures;cxx_relaxed_constexpr;cxx_return_type_deduction;cxx_variable_templates;cxx_std_17;cxx_std_20;cxx_std_23")
10
+ set(CMAKE_CXX98_COMPILE_FEATURES "cxx_std_98;cxx_template_template_parameters")
11
+ set(CMAKE_CXX11_COMPILE_FEATURES "cxx_std_11;cxx_alias_templates;cxx_alignas;cxx_alignof;cxx_attributes;cxx_auto_type;cxx_constexpr;cxx_decltype;cxx_decltype_incomplete_return_types;cxx_default_function_template_args;cxx_defaulted_functions;cxx_defaulted_move_initializers;cxx_delegating_constructors;cxx_deleted_functions;cxx_enum_forward_declarations;cxx_explicit_conversions;cxx_extended_friend_declarations;cxx_extern_templates;cxx_final;cxx_func_identifier;cxx_generalized_initializers;cxx_inheriting_constructors;cxx_inline_namespaces;cxx_lambdas;cxx_local_type_template_args;cxx_long_long_type;cxx_noexcept;cxx_nonstatic_member_init;cxx_nullptr;cxx_override;cxx_range_for;cxx_raw_string_literals;cxx_reference_qualified_functions;cxx_right_angle_brackets;cxx_rvalue_references;cxx_sizeof_member;cxx_static_assert;cxx_strong_enums;cxx_thread_local;cxx_trailing_return_types;cxx_unicode_literals;cxx_uniform_initialization;cxx_unrestricted_unions;cxx_user_literals;cxx_variadic_macros;cxx_variadic_templates")
12
+ set(CMAKE_CXX14_COMPILE_FEATURES "cxx_std_14;cxx_aggregate_default_initializers;cxx_attribute_deprecated;cxx_binary_literals;cxx_contextual_conversions;cxx_decltype_auto;cxx_digit_separators;cxx_generic_lambdas;cxx_lambda_init_captures;cxx_relaxed_constexpr;cxx_return_type_deduction;cxx_variable_templates")
13
+ set(CMAKE_CXX17_COMPILE_FEATURES "cxx_std_17")
14
+ set(CMAKE_CXX20_COMPILE_FEATURES "cxx_std_20")
15
+ set(CMAKE_CXX23_COMPILE_FEATURES "cxx_std_23")
16
+
17
+ set(CMAKE_CXX_PLATFORM_ID "Linux")
18
+ set(CMAKE_CXX_SIMULATE_ID "")
19
+ set(CMAKE_CXX_COMPILER_FRONTEND_VARIANT "")
20
+ set(CMAKE_CXX_SIMULATE_VERSION "")
21
+
22
+
23
+
24
+
25
+ set(CMAKE_AR "/usr/bin/ar")
26
+ set(CMAKE_CXX_COMPILER_AR "/usr/bin/gcc-ar-11")
27
+ set(CMAKE_RANLIB "/usr/bin/ranlib")
28
+ set(CMAKE_CXX_COMPILER_RANLIB "/usr/bin/gcc-ranlib-11")
29
+ set(CMAKE_LINKER "/usr/bin/ld")
30
+ set(CMAKE_MT "")
31
+ set(CMAKE_COMPILER_IS_GNUCXX 1)
32
+ set(CMAKE_CXX_COMPILER_LOADED 1)
33
+ set(CMAKE_CXX_COMPILER_WORKS TRUE)
34
+ set(CMAKE_CXX_ABI_COMPILED TRUE)
35
+
36
+ set(CMAKE_CXX_COMPILER_ENV_VAR "CXX")
37
+
38
+ set(CMAKE_CXX_COMPILER_ID_RUN 1)
39
+ set(CMAKE_CXX_SOURCE_FILE_EXTENSIONS C;M;c++;cc;cpp;cxx;m;mm;mpp;CPP;ixx;cppm)
40
+ set(CMAKE_CXX_IGNORE_EXTENSIONS inl;h;hpp;HPP;H;o;O;obj;OBJ;def;DEF;rc;RC)
41
+
42
+ foreach (lang C OBJC OBJCXX)
43
+ if (CMAKE_${lang}_COMPILER_ID_RUN)
44
+ foreach(extension IN LISTS CMAKE_${lang}_SOURCE_FILE_EXTENSIONS)
45
+ list(REMOVE_ITEM CMAKE_CXX_SOURCE_FILE_EXTENSIONS ${extension})
46
+ endforeach()
47
+ endif()
48
+ endforeach()
49
+
50
+ set(CMAKE_CXX_LINKER_PREFERENCE 30)
51
+ set(CMAKE_CXX_LINKER_PREFERENCE_PROPAGATES 1)
52
+
53
+ # Save compiler ABI information.
54
+ set(CMAKE_CXX_SIZEOF_DATA_PTR "8")
55
+ set(CMAKE_CXX_COMPILER_ABI "ELF")
56
+ set(CMAKE_CXX_BYTE_ORDER "LITTLE_ENDIAN")
57
+ set(CMAKE_CXX_LIBRARY_ARCHITECTURE "x86_64-linux-gnu")
58
+
59
+ if(CMAKE_CXX_SIZEOF_DATA_PTR)
60
+ set(CMAKE_SIZEOF_VOID_P "${CMAKE_CXX_SIZEOF_DATA_PTR}")
61
+ endif()
62
+
63
+ if(CMAKE_CXX_COMPILER_ABI)
64
+ set(CMAKE_INTERNAL_PLATFORM_ABI "${CMAKE_CXX_COMPILER_ABI}")
65
+ endif()
66
+
67
+ if(CMAKE_CXX_LIBRARY_ARCHITECTURE)
68
+ set(CMAKE_LIBRARY_ARCHITECTURE "x86_64-linux-gnu")
69
+ endif()
70
+
71
+ set(CMAKE_CXX_CL_SHOWINCLUDES_PREFIX "")
72
+ if(CMAKE_CXX_CL_SHOWINCLUDES_PREFIX)
73
+ set(CMAKE_CL_SHOWINCLUDES_PREFIX "${CMAKE_CXX_CL_SHOWINCLUDES_PREFIX}")
74
+ endif()
75
+
76
+
77
+
78
+
79
+
80
+ set(CMAKE_CXX_IMPLICIT_INCLUDE_DIRECTORIES "/usr/include/c++/11;/usr/include/x86_64-linux-gnu/c++/11;/usr/include/c++/11/backward;/usr/lib/gcc/x86_64-linux-gnu/11/include;/usr/local/include;/usr/include/x86_64-linux-gnu;/usr/include")
81
+ set(CMAKE_CXX_IMPLICIT_LINK_LIBRARIES "stdc++;m;gcc_s;gcc;c;gcc_s;gcc")
82
+ set(CMAKE_CXX_IMPLICIT_LINK_DIRECTORIES "/usr/lib/gcc/x86_64-linux-gnu/11;/usr/lib/x86_64-linux-gnu;/usr/lib;/lib/x86_64-linux-gnu;/lib")
83
+ set(CMAKE_CXX_IMPLICIT_LINK_FRAMEWORK_DIRECTORIES "")
cc-multilingual-main/cc_net/third_party/kenlm/build/CMakeFiles/3.22.1/CompilerIdC/CMakeCCompilerId.c ADDED
@@ -0,0 +1,803 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifdef __cplusplus
2
+ # error "A C++ compiler has been selected for C."
3
+ #endif
4
+
5
+ #if defined(__18CXX)
6
+ # define ID_VOID_MAIN
7
+ #endif
8
+ #if defined(__CLASSIC_C__)
9
+ /* cv-qualifiers did not exist in K&R C */
10
+ # define const
11
+ # define volatile
12
+ #endif
13
+
14
+ #if !defined(__has_include)
15
+ /* If the compiler does not have __has_include, pretend the answer is
16
+ always no. */
17
+ # define __has_include(x) 0
18
+ #endif
19
+
20
+
21
+ /* Version number components: V=Version, R=Revision, P=Patch
22
+ Version date components: YYYY=Year, MM=Month, DD=Day */
23
+
24
+ #if defined(__INTEL_COMPILER) || defined(__ICC)
25
+ # define COMPILER_ID "Intel"
26
+ # if defined(_MSC_VER)
27
+ # define SIMULATE_ID "MSVC"
28
+ # endif
29
+ # if defined(__GNUC__)
30
+ # define SIMULATE_ID "GNU"
31
+ # endif
32
+ /* __INTEL_COMPILER = VRP prior to 2021, and then VVVV for 2021 and later,
33
+ except that a few beta releases use the old format with V=2021. */
34
+ # if __INTEL_COMPILER < 2021 || __INTEL_COMPILER == 202110 || __INTEL_COMPILER == 202111
35
+ # define COMPILER_VERSION_MAJOR DEC(__INTEL_COMPILER/100)
36
+ # define COMPILER_VERSION_MINOR DEC(__INTEL_COMPILER/10 % 10)
37
+ # if defined(__INTEL_COMPILER_UPDATE)
38
+ # define COMPILER_VERSION_PATCH DEC(__INTEL_COMPILER_UPDATE)
39
+ # else
40
+ # define COMPILER_VERSION_PATCH DEC(__INTEL_COMPILER % 10)
41
+ # endif
42
+ # else
43
+ # define COMPILER_VERSION_MAJOR DEC(__INTEL_COMPILER)
44
+ # define COMPILER_VERSION_MINOR DEC(__INTEL_COMPILER_UPDATE)
45
+ /* The third version component from --version is an update index,
46
+ but no macro is provided for it. */
47
+ # define COMPILER_VERSION_PATCH DEC(0)
48
+ # endif
49
+ # if defined(__INTEL_COMPILER_BUILD_DATE)
50
+ /* __INTEL_COMPILER_BUILD_DATE = YYYYMMDD */
51
+ # define COMPILER_VERSION_TWEAK DEC(__INTEL_COMPILER_BUILD_DATE)
52
+ # endif
53
+ # if defined(_MSC_VER)
54
+ /* _MSC_VER = VVRR */
55
+ # define SIMULATE_VERSION_MAJOR DEC(_MSC_VER / 100)
56
+ # define SIMULATE_VERSION_MINOR DEC(_MSC_VER % 100)
57
+ # endif
58
+ # if defined(__GNUC__)
59
+ # define SIMULATE_VERSION_MAJOR DEC(__GNUC__)
60
+ # elif defined(__GNUG__)
61
+ # define SIMULATE_VERSION_MAJOR DEC(__GNUG__)
62
+ # endif
63
+ # if defined(__GNUC_MINOR__)
64
+ # define SIMULATE_VERSION_MINOR DEC(__GNUC_MINOR__)
65
+ # endif
66
+ # if defined(__GNUC_PATCHLEVEL__)
67
+ # define SIMULATE_VERSION_PATCH DEC(__GNUC_PATCHLEVEL__)
68
+ # endif
69
+
70
+ #elif (defined(__clang__) && defined(__INTEL_CLANG_COMPILER)) || defined(__INTEL_LLVM_COMPILER)
71
+ # define COMPILER_ID "IntelLLVM"
72
+ #if defined(_MSC_VER)
73
+ # define SIMULATE_ID "MSVC"
74
+ #endif
75
+ #if defined(__GNUC__)
76
+ # define SIMULATE_ID "GNU"
77
+ #endif
78
+ /* __INTEL_LLVM_COMPILER = VVVVRP prior to 2021.2.0, VVVVRRPP for 2021.2.0 and
79
+ * later. Look for 6 digit vs. 8 digit version number to decide encoding.
80
+ * VVVV is no smaller than the current year when a version is released.
81
+ */
82
+ #if __INTEL_LLVM_COMPILER < 1000000L
83
+ # define COMPILER_VERSION_MAJOR DEC(__INTEL_LLVM_COMPILER/100)
84
+ # define COMPILER_VERSION_MINOR DEC(__INTEL_LLVM_COMPILER/10 % 10)
85
+ # define COMPILER_VERSION_PATCH DEC(__INTEL_LLVM_COMPILER % 10)
86
+ #else
87
+ # define COMPILER_VERSION_MAJOR DEC(__INTEL_LLVM_COMPILER/10000)
88
+ # define COMPILER_VERSION_MINOR DEC(__INTEL_LLVM_COMPILER/100 % 100)
89
+ # define COMPILER_VERSION_PATCH DEC(__INTEL_LLVM_COMPILER % 100)
90
+ #endif
91
+ #if defined(_MSC_VER)
92
+ /* _MSC_VER = VVRR */
93
+ # define SIMULATE_VERSION_MAJOR DEC(_MSC_VER / 100)
94
+ # define SIMULATE_VERSION_MINOR DEC(_MSC_VER % 100)
95
+ #endif
96
+ #if defined(__GNUC__)
97
+ # define SIMULATE_VERSION_MAJOR DEC(__GNUC__)
98
+ #elif defined(__GNUG__)
99
+ # define SIMULATE_VERSION_MAJOR DEC(__GNUG__)
100
+ #endif
101
+ #if defined(__GNUC_MINOR__)
102
+ # define SIMULATE_VERSION_MINOR DEC(__GNUC_MINOR__)
103
+ #endif
104
+ #if defined(__GNUC_PATCHLEVEL__)
105
+ # define SIMULATE_VERSION_PATCH DEC(__GNUC_PATCHLEVEL__)
106
+ #endif
107
+
108
+ #elif defined(__PATHCC__)
109
+ # define COMPILER_ID "PathScale"
110
+ # define COMPILER_VERSION_MAJOR DEC(__PATHCC__)
111
+ # define COMPILER_VERSION_MINOR DEC(__PATHCC_MINOR__)
112
+ # if defined(__PATHCC_PATCHLEVEL__)
113
+ # define COMPILER_VERSION_PATCH DEC(__PATHCC_PATCHLEVEL__)
114
+ # endif
115
+
116
+ #elif defined(__BORLANDC__) && defined(__CODEGEARC_VERSION__)
117
+ # define COMPILER_ID "Embarcadero"
118
+ # define COMPILER_VERSION_MAJOR HEX(__CODEGEARC_VERSION__>>24 & 0x00FF)
119
+ # define COMPILER_VERSION_MINOR HEX(__CODEGEARC_VERSION__>>16 & 0x00FF)
120
+ # define COMPILER_VERSION_PATCH DEC(__CODEGEARC_VERSION__ & 0xFFFF)
121
+
122
+ #elif defined(__BORLANDC__)
123
+ # define COMPILER_ID "Borland"
124
+ /* __BORLANDC__ = 0xVRR */
125
+ # define COMPILER_VERSION_MAJOR HEX(__BORLANDC__>>8)
126
+ # define COMPILER_VERSION_MINOR HEX(__BORLANDC__ & 0xFF)
127
+
128
+ #elif defined(__WATCOMC__) && __WATCOMC__ < 1200
129
+ # define COMPILER_ID "Watcom"
130
+ /* __WATCOMC__ = VVRR */
131
+ # define COMPILER_VERSION_MAJOR DEC(__WATCOMC__ / 100)
132
+ # define COMPILER_VERSION_MINOR DEC((__WATCOMC__ / 10) % 10)
133
+ # if (__WATCOMC__ % 10) > 0
134
+ # define COMPILER_VERSION_PATCH DEC(__WATCOMC__ % 10)
135
+ # endif
136
+
137
+ #elif defined(__WATCOMC__)
138
+ # define COMPILER_ID "OpenWatcom"
139
+ /* __WATCOMC__ = VVRP + 1100 */
140
+ # define COMPILER_VERSION_MAJOR DEC((__WATCOMC__ - 1100) / 100)
141
+ # define COMPILER_VERSION_MINOR DEC((__WATCOMC__ / 10) % 10)
142
+ # if (__WATCOMC__ % 10) > 0
143
+ # define COMPILER_VERSION_PATCH DEC(__WATCOMC__ % 10)
144
+ # endif
145
+
146
+ #elif defined(__SUNPRO_C)
147
+ # define COMPILER_ID "SunPro"
148
+ # if __SUNPRO_C >= 0x5100
149
+ /* __SUNPRO_C = 0xVRRP */
150
+ # define COMPILER_VERSION_MAJOR HEX(__SUNPRO_C>>12)
151
+ # define COMPILER_VERSION_MINOR HEX(__SUNPRO_C>>4 & 0xFF)
152
+ # define COMPILER_VERSION_PATCH HEX(__SUNPRO_C & 0xF)
153
+ # else
154
+ /* __SUNPRO_CC = 0xVRP */
155
+ # define COMPILER_VERSION_MAJOR HEX(__SUNPRO_C>>8)
156
+ # define COMPILER_VERSION_MINOR HEX(__SUNPRO_C>>4 & 0xF)
157
+ # define COMPILER_VERSION_PATCH HEX(__SUNPRO_C & 0xF)
158
+ # endif
159
+
160
+ #elif defined(__HP_cc)
161
+ # define COMPILER_ID "HP"
162
+ /* __HP_cc = VVRRPP */
163
+ # define COMPILER_VERSION_MAJOR DEC(__HP_cc/10000)
164
+ # define COMPILER_VERSION_MINOR DEC(__HP_cc/100 % 100)
165
+ # define COMPILER_VERSION_PATCH DEC(__HP_cc % 100)
166
+
167
+ #elif defined(__DECC)
168
+ # define COMPILER_ID "Compaq"
169
+ /* __DECC_VER = VVRRTPPPP */
170
+ # define COMPILER_VERSION_MAJOR DEC(__DECC_VER/10000000)
171
+ # define COMPILER_VERSION_MINOR DEC(__DECC_VER/100000 % 100)
172
+ # define COMPILER_VERSION_PATCH DEC(__DECC_VER % 10000)
173
+
174
+ #elif defined(__IBMC__) && defined(__COMPILER_VER__)
175
+ # define COMPILER_ID "zOS"
176
+ /* __IBMC__ = VRP */
177
+ # define COMPILER_VERSION_MAJOR DEC(__IBMC__/100)
178
+ # define COMPILER_VERSION_MINOR DEC(__IBMC__/10 % 10)
179
+ # define COMPILER_VERSION_PATCH DEC(__IBMC__ % 10)
180
+
181
+ #elif defined(__ibmxl__) && defined(__clang__)
182
+ # define COMPILER_ID "XLClang"
183
+ # define COMPILER_VERSION_MAJOR DEC(__ibmxl_version__)
184
+ # define COMPILER_VERSION_MINOR DEC(__ibmxl_release__)
185
+ # define COMPILER_VERSION_PATCH DEC(__ibmxl_modification__)
186
+ # define COMPILER_VERSION_TWEAK DEC(__ibmxl_ptf_fix_level__)
187
+
188
+
189
+ #elif defined(__IBMC__) && !defined(__COMPILER_VER__) && __IBMC__ >= 800
190
+ # define COMPILER_ID "XL"
191
+ /* __IBMC__ = VRP */
192
+ # define COMPILER_VERSION_MAJOR DEC(__IBMC__/100)
193
+ # define COMPILER_VERSION_MINOR DEC(__IBMC__/10 % 10)
194
+ # define COMPILER_VERSION_PATCH DEC(__IBMC__ % 10)
195
+
196
+ #elif defined(__IBMC__) && !defined(__COMPILER_VER__) && __IBMC__ < 800
197
+ # define COMPILER_ID "VisualAge"
198
+ /* __IBMC__ = VRP */
199
+ # define COMPILER_VERSION_MAJOR DEC(__IBMC__/100)
200
+ # define COMPILER_VERSION_MINOR DEC(__IBMC__/10 % 10)
201
+ # define COMPILER_VERSION_PATCH DEC(__IBMC__ % 10)
202
+
203
+ #elif defined(__NVCOMPILER)
204
+ # define COMPILER_ID "NVHPC"
205
+ # define COMPILER_VERSION_MAJOR DEC(__NVCOMPILER_MAJOR__)
206
+ # define COMPILER_VERSION_MINOR DEC(__NVCOMPILER_MINOR__)
207
+ # if defined(__NVCOMPILER_PATCHLEVEL__)
208
+ # define COMPILER_VERSION_PATCH DEC(__NVCOMPILER_PATCHLEVEL__)
209
+ # endif
210
+
211
+ #elif defined(__PGI)
212
+ # define COMPILER_ID "PGI"
213
+ # define COMPILER_VERSION_MAJOR DEC(__PGIC__)
214
+ # define COMPILER_VERSION_MINOR DEC(__PGIC_MINOR__)
215
+ # if defined(__PGIC_PATCHLEVEL__)
216
+ # define COMPILER_VERSION_PATCH DEC(__PGIC_PATCHLEVEL__)
217
+ # endif
218
+
219
+ #elif defined(_CRAYC)
220
+ # define COMPILER_ID "Cray"
221
+ # define COMPILER_VERSION_MAJOR DEC(_RELEASE_MAJOR)
222
+ # define COMPILER_VERSION_MINOR DEC(_RELEASE_MINOR)
223
+
224
+ #elif defined(__TI_COMPILER_VERSION__)
225
+ # define COMPILER_ID "TI"
226
+ /* __TI_COMPILER_VERSION__ = VVVRRRPPP */
227
+ # define COMPILER_VERSION_MAJOR DEC(__TI_COMPILER_VERSION__/1000000)
228
+ # define COMPILER_VERSION_MINOR DEC(__TI_COMPILER_VERSION__/1000 % 1000)
229
+ # define COMPILER_VERSION_PATCH DEC(__TI_COMPILER_VERSION__ % 1000)
230
+
231
+ #elif defined(__CLANG_FUJITSU)
232
+ # define COMPILER_ID "FujitsuClang"
233
+ # define COMPILER_VERSION_MAJOR DEC(__FCC_major__)
234
+ # define COMPILER_VERSION_MINOR DEC(__FCC_minor__)
235
+ # define COMPILER_VERSION_PATCH DEC(__FCC_patchlevel__)
236
+ # define COMPILER_VERSION_INTERNAL_STR __clang_version__
237
+
238
+
239
+ #elif defined(__FUJITSU)
240
+ # define COMPILER_ID "Fujitsu"
241
+ # if defined(__FCC_version__)
242
+ # define COMPILER_VERSION __FCC_version__
243
+ # elif defined(__FCC_major__)
244
+ # define COMPILER_VERSION_MAJOR DEC(__FCC_major__)
245
+ # define COMPILER_VERSION_MINOR DEC(__FCC_minor__)
246
+ # define COMPILER_VERSION_PATCH DEC(__FCC_patchlevel__)
247
+ # endif
248
+ # if defined(__fcc_version)
249
+ # define COMPILER_VERSION_INTERNAL DEC(__fcc_version)
250
+ # elif defined(__FCC_VERSION)
251
+ # define COMPILER_VERSION_INTERNAL DEC(__FCC_VERSION)
252
+ # endif
253
+
254
+
255
+ #elif defined(__ghs__)
256
+ # define COMPILER_ID "GHS"
257
+ /* __GHS_VERSION_NUMBER = VVVVRP */
258
+ # ifdef __GHS_VERSION_NUMBER
259
+ # define COMPILER_VERSION_MAJOR DEC(__GHS_VERSION_NUMBER / 100)
260
+ # define COMPILER_VERSION_MINOR DEC(__GHS_VERSION_NUMBER / 10 % 10)
261
+ # define COMPILER_VERSION_PATCH DEC(__GHS_VERSION_NUMBER % 10)
262
+ # endif
263
+
264
+ #elif defined(__TINYC__)
265
+ # define COMPILER_ID "TinyCC"
266
+
267
+ #elif defined(__BCC__)
268
+ # define COMPILER_ID "Bruce"
269
+
270
+ #elif defined(__SCO_VERSION__)
271
+ # define COMPILER_ID "SCO"
272
+
273
+ #elif defined(__ARMCC_VERSION) && !defined(__clang__)
274
+ # define COMPILER_ID "ARMCC"
275
+ #if __ARMCC_VERSION >= 1000000
276
+ /* __ARMCC_VERSION = VRRPPPP */
277
+ # define COMPILER_VERSION_MAJOR DEC(__ARMCC_VERSION/1000000)
278
+ # define COMPILER_VERSION_MINOR DEC(__ARMCC_VERSION/10000 % 100)
279
+ # define COMPILER_VERSION_PATCH DEC(__ARMCC_VERSION % 10000)
280
+ #else
281
+ /* __ARMCC_VERSION = VRPPPP */
282
+ # define COMPILER_VERSION_MAJOR DEC(__ARMCC_VERSION/100000)
283
+ # define COMPILER_VERSION_MINOR DEC(__ARMCC_VERSION/10000 % 10)
284
+ # define COMPILER_VERSION_PATCH DEC(__ARMCC_VERSION % 10000)
285
+ #endif
286
+
287
+
288
+ #elif defined(__clang__) && defined(__apple_build_version__)
289
+ # define COMPILER_ID "AppleClang"
290
+ # if defined(_MSC_VER)
291
+ # define SIMULATE_ID "MSVC"
292
+ # endif
293
+ # define COMPILER_VERSION_MAJOR DEC(__clang_major__)
294
+ # define COMPILER_VERSION_MINOR DEC(__clang_minor__)
295
+ # define COMPILER_VERSION_PATCH DEC(__clang_patchlevel__)
296
+ # if defined(_MSC_VER)
297
+ /* _MSC_VER = VVRR */
298
+ # define SIMULATE_VERSION_MAJOR DEC(_MSC_VER / 100)
299
+ # define SIMULATE_VERSION_MINOR DEC(_MSC_VER % 100)
300
+ # endif
301
+ # define COMPILER_VERSION_TWEAK DEC(__apple_build_version__)
302
+
303
+ #elif defined(__clang__) && defined(__ARMCOMPILER_VERSION)
304
+ # define COMPILER_ID "ARMClang"
305
+ # define COMPILER_VERSION_MAJOR DEC(__ARMCOMPILER_VERSION/1000000)
306
+ # define COMPILER_VERSION_MINOR DEC(__ARMCOMPILER_VERSION/10000 % 100)
307
+ # define COMPILER_VERSION_PATCH DEC(__ARMCOMPILER_VERSION % 10000)
308
+ # define COMPILER_VERSION_INTERNAL DEC(__ARMCOMPILER_VERSION)
309
+
310
+ #elif defined(__clang__)
311
+ # define COMPILER_ID "Clang"
312
+ # if defined(_MSC_VER)
313
+ # define SIMULATE_ID "MSVC"
314
+ # endif
315
+ # define COMPILER_VERSION_MAJOR DEC(__clang_major__)
316
+ # define COMPILER_VERSION_MINOR DEC(__clang_minor__)
317
+ # define COMPILER_VERSION_PATCH DEC(__clang_patchlevel__)
318
+ # if defined(_MSC_VER)
319
+ /* _MSC_VER = VVRR */
320
+ # define SIMULATE_VERSION_MAJOR DEC(_MSC_VER / 100)
321
+ # define SIMULATE_VERSION_MINOR DEC(_MSC_VER % 100)
322
+ # endif
323
+
324
+ #elif defined(__GNUC__)
325
+ # define COMPILER_ID "GNU"
326
+ # define COMPILER_VERSION_MAJOR DEC(__GNUC__)
327
+ # if defined(__GNUC_MINOR__)
328
+ # define COMPILER_VERSION_MINOR DEC(__GNUC_MINOR__)
329
+ # endif
330
+ # if defined(__GNUC_PATCHLEVEL__)
331
+ # define COMPILER_VERSION_PATCH DEC(__GNUC_PATCHLEVEL__)
332
+ # endif
333
+
334
+ #elif defined(_MSC_VER)
335
+ # define COMPILER_ID "MSVC"
336
+ /* _MSC_VER = VVRR */
337
+ # define COMPILER_VERSION_MAJOR DEC(_MSC_VER / 100)
338
+ # define COMPILER_VERSION_MINOR DEC(_MSC_VER % 100)
339
+ # if defined(_MSC_FULL_VER)
340
+ # if _MSC_VER >= 1400
341
+ /* _MSC_FULL_VER = VVRRPPPPP */
342
+ # define COMPILER_VERSION_PATCH DEC(_MSC_FULL_VER % 100000)
343
+ # else
344
+ /* _MSC_FULL_VER = VVRRPPPP */
345
+ # define COMPILER_VERSION_PATCH DEC(_MSC_FULL_VER % 10000)
346
+ # endif
347
+ # endif
348
+ # if defined(_MSC_BUILD)
349
+ # define COMPILER_VERSION_TWEAK DEC(_MSC_BUILD)
350
+ # endif
351
+
352
+ #elif defined(__VISUALDSPVERSION__) || defined(__ADSPBLACKFIN__) || defined(__ADSPTS__) || defined(__ADSP21000__)
353
+ # define COMPILER_ID "ADSP"
354
+ #if defined(__VISUALDSPVERSION__)
355
+ /* __VISUALDSPVERSION__ = 0xVVRRPP00 */
356
+ # define COMPILER_VERSION_MAJOR HEX(__VISUALDSPVERSION__>>24)
357
+ # define COMPILER_VERSION_MINOR HEX(__VISUALDSPVERSION__>>16 & 0xFF)
358
+ # define COMPILER_VERSION_PATCH HEX(__VISUALDSPVERSION__>>8 & 0xFF)
359
+ #endif
360
+
361
+ #elif defined(__IAR_SYSTEMS_ICC__) || defined(__IAR_SYSTEMS_ICC)
362
+ # define COMPILER_ID "IAR"
363
+ # if defined(__VER__) && defined(__ICCARM__)
364
+ # define COMPILER_VERSION_MAJOR DEC((__VER__) / 1000000)
365
+ # define COMPILER_VERSION_MINOR DEC(((__VER__) / 1000) % 1000)
366
+ # define COMPILER_VERSION_PATCH DEC((__VER__) % 1000)
367
+ # define COMPILER_VERSION_INTERNAL DEC(__IAR_SYSTEMS_ICC__)
368
+ # elif defined(__VER__) && (defined(__ICCAVR__) || defined(__ICCRX__) || defined(__ICCRH850__) || defined(__ICCRL78__) || defined(__ICC430__) || defined(__ICCRISCV__) || defined(__ICCV850__) || defined(__ICC8051__) || defined(__ICCSTM8__))
369
+ # define COMPILER_VERSION_MAJOR DEC((__VER__) / 100)
370
+ # define COMPILER_VERSION_MINOR DEC((__VER__) - (((__VER__) / 100)*100))
371
+ # define COMPILER_VERSION_PATCH DEC(__SUBVERSION__)
372
+ # define COMPILER_VERSION_INTERNAL DEC(__IAR_SYSTEMS_ICC__)
373
+ # endif
374
+
375
+ #elif defined(__SDCC_VERSION_MAJOR) || defined(SDCC)
376
+ # define COMPILER_ID "SDCC"
377
+ # if defined(__SDCC_VERSION_MAJOR)
378
+ # define COMPILER_VERSION_MAJOR DEC(__SDCC_VERSION_MAJOR)
379
+ # define COMPILER_VERSION_MINOR DEC(__SDCC_VERSION_MINOR)
380
+ # define COMPILER_VERSION_PATCH DEC(__SDCC_VERSION_PATCH)
381
+ # else
382
+ /* SDCC = VRP */
383
+ # define COMPILER_VERSION_MAJOR DEC(SDCC/100)
384
+ # define COMPILER_VERSION_MINOR DEC(SDCC/10 % 10)
385
+ # define COMPILER_VERSION_PATCH DEC(SDCC % 10)
386
+ # endif
387
+
388
+
389
+ /* These compilers are either not known or too old to define an
390
+ identification macro. Try to identify the platform and guess that
391
+ it is the native compiler. */
392
+ #elif defined(__hpux) || defined(__hpua)
393
+ # define COMPILER_ID "HP"
394
+
395
+ #else /* unknown compiler */
396
+ # define COMPILER_ID ""
397
+ #endif
398
+
399
+ /* Construct the string literal in pieces to prevent the source from
400
+ getting matched. Store it in a pointer rather than an array
401
+ because some compilers will just produce instructions to fill the
402
+ array rather than assigning a pointer to a static array. */
403
+ char const* info_compiler = "INFO" ":" "compiler[" COMPILER_ID "]";
404
+ #ifdef SIMULATE_ID
405
+ char const* info_simulate = "INFO" ":" "simulate[" SIMULATE_ID "]";
406
+ #endif
407
+
408
+ #ifdef __QNXNTO__
409
+ char const* qnxnto = "INFO" ":" "qnxnto[]";
410
+ #endif
411
+
412
+ #if defined(__CRAYXT_COMPUTE_LINUX_TARGET)
413
+ char const *info_cray = "INFO" ":" "compiler_wrapper[CrayPrgEnv]";
414
+ #endif
415
+
416
+ #define STRINGIFY_HELPER(X) #X
417
+ #define STRINGIFY(X) STRINGIFY_HELPER(X)
418
+
419
+ /* Identify known platforms by name. */
420
+ #if defined(__linux) || defined(__linux__) || defined(linux)
421
+ # define PLATFORM_ID "Linux"
422
+
423
+ #elif defined(__MSYS__)
424
+ # define PLATFORM_ID "MSYS"
425
+
426
+ #elif defined(__CYGWIN__)
427
+ # define PLATFORM_ID "Cygwin"
428
+
429
+ #elif defined(__MINGW32__)
430
+ # define PLATFORM_ID "MinGW"
431
+
432
+ #elif defined(__APPLE__)
433
+ # define PLATFORM_ID "Darwin"
434
+
435
+ #elif defined(_WIN32) || defined(__WIN32__) || defined(WIN32)
436
+ # define PLATFORM_ID "Windows"
437
+
438
+ #elif defined(__FreeBSD__) || defined(__FreeBSD)
439
+ # define PLATFORM_ID "FreeBSD"
440
+
441
+ #elif defined(__NetBSD__) || defined(__NetBSD)
442
+ # define PLATFORM_ID "NetBSD"
443
+
444
+ #elif defined(__OpenBSD__) || defined(__OPENBSD)
445
+ # define PLATFORM_ID "OpenBSD"
446
+
447
+ #elif defined(__sun) || defined(sun)
448
+ # define PLATFORM_ID "SunOS"
449
+
450
+ #elif defined(_AIX) || defined(__AIX) || defined(__AIX__) || defined(__aix) || defined(__aix__)
451
+ # define PLATFORM_ID "AIX"
452
+
453
+ #elif defined(__hpux) || defined(__hpux__)
454
+ # define PLATFORM_ID "HP-UX"
455
+
456
+ #elif defined(__HAIKU__)
457
+ # define PLATFORM_ID "Haiku"
458
+
459
+ #elif defined(__BeOS) || defined(__BEOS__) || defined(_BEOS)
460
+ # define PLATFORM_ID "BeOS"
461
+
462
+ #elif defined(__QNX__) || defined(__QNXNTO__)
463
+ # define PLATFORM_ID "QNX"
464
+
465
+ #elif defined(__tru64) || defined(_tru64) || defined(__TRU64__)
466
+ # define PLATFORM_ID "Tru64"
467
+
468
+ #elif defined(__riscos) || defined(__riscos__)
469
+ # define PLATFORM_ID "RISCos"
470
+
471
+ #elif defined(__sinix) || defined(__sinix__) || defined(__SINIX__)
472
+ # define PLATFORM_ID "SINIX"
473
+
474
+ #elif defined(__UNIX_SV__)
475
+ # define PLATFORM_ID "UNIX_SV"
476
+
477
+ #elif defined(__bsdos__)
478
+ # define PLATFORM_ID "BSDOS"
479
+
480
+ #elif defined(_MPRAS) || defined(MPRAS)
481
+ # define PLATFORM_ID "MP-RAS"
482
+
483
+ #elif defined(__osf) || defined(__osf__)
484
+ # define PLATFORM_ID "OSF1"
485
+
486
+ #elif defined(_SCO_SV) || defined(SCO_SV) || defined(sco_sv)
487
+ # define PLATFORM_ID "SCO_SV"
488
+
489
+ #elif defined(__ultrix) || defined(__ultrix__) || defined(_ULTRIX)
490
+ # define PLATFORM_ID "ULTRIX"
491
+
492
+ #elif defined(__XENIX__) || defined(_XENIX) || defined(XENIX)
493
+ # define PLATFORM_ID "Xenix"
494
+
495
+ #elif defined(__WATCOMC__)
496
+ # if defined(__LINUX__)
497
+ # define PLATFORM_ID "Linux"
498
+
499
+ # elif defined(__DOS__)
500
+ # define PLATFORM_ID "DOS"
501
+
502
+ # elif defined(__OS2__)
503
+ # define PLATFORM_ID "OS2"
504
+
505
+ # elif defined(__WINDOWS__)
506
+ # define PLATFORM_ID "Windows3x"
507
+
508
+ # elif defined(__VXWORKS__)
509
+ # define PLATFORM_ID "VxWorks"
510
+
511
+ # else /* unknown platform */
512
+ # define PLATFORM_ID
513
+ # endif
514
+
515
+ #elif defined(__INTEGRITY)
516
+ # if defined(INT_178B)
517
+ # define PLATFORM_ID "Integrity178"
518
+
519
+ # else /* regular Integrity */
520
+ # define PLATFORM_ID "Integrity"
521
+ # endif
522
+
523
+ #else /* unknown platform */
524
+ # define PLATFORM_ID
525
+
526
+ #endif
527
+
528
+ /* For windows compilers MSVC and Intel we can determine
529
+ the architecture of the compiler being used. This is because
530
+ the compilers do not have flags that can change the architecture,
531
+ but rather depend on which compiler is being used
532
+ */
533
+ #if defined(_WIN32) && defined(_MSC_VER)
534
+ # if defined(_M_IA64)
535
+ # define ARCHITECTURE_ID "IA64"
536
+
537
+ # elif defined(_M_ARM64EC)
538
+ # define ARCHITECTURE_ID "ARM64EC"
539
+
540
+ # elif defined(_M_X64) || defined(_M_AMD64)
541
+ # define ARCHITECTURE_ID "x64"
542
+
543
+ # elif defined(_M_IX86)
544
+ # define ARCHITECTURE_ID "X86"
545
+
546
+ # elif defined(_M_ARM64)
547
+ # define ARCHITECTURE_ID "ARM64"
548
+
549
+ # elif defined(_M_ARM)
550
+ # if _M_ARM == 4
551
+ # define ARCHITECTURE_ID "ARMV4I"
552
+ # elif _M_ARM == 5
553
+ # define ARCHITECTURE_ID "ARMV5I"
554
+ # else
555
+ # define ARCHITECTURE_ID "ARMV" STRINGIFY(_M_ARM)
556
+ # endif
557
+
558
+ # elif defined(_M_MIPS)
559
+ # define ARCHITECTURE_ID "MIPS"
560
+
561
+ # elif defined(_M_SH)
562
+ # define ARCHITECTURE_ID "SHx"
563
+
564
+ # else /* unknown architecture */
565
+ # define ARCHITECTURE_ID ""
566
+ # endif
567
+
568
+ #elif defined(__WATCOMC__)
569
+ # if defined(_M_I86)
570
+ # define ARCHITECTURE_ID "I86"
571
+
572
+ # elif defined(_M_IX86)
573
+ # define ARCHITECTURE_ID "X86"
574
+
575
+ # else /* unknown architecture */
576
+ # define ARCHITECTURE_ID ""
577
+ # endif
578
+
579
+ #elif defined(__IAR_SYSTEMS_ICC__) || defined(__IAR_SYSTEMS_ICC)
580
+ # if defined(__ICCARM__)
581
+ # define ARCHITECTURE_ID "ARM"
582
+
583
+ # elif defined(__ICCRX__)
584
+ # define ARCHITECTURE_ID "RX"
585
+
586
+ # elif defined(__ICCRH850__)
587
+ # define ARCHITECTURE_ID "RH850"
588
+
589
+ # elif defined(__ICCRL78__)
590
+ # define ARCHITECTURE_ID "RL78"
591
+
592
+ # elif defined(__ICCRISCV__)
593
+ # define ARCHITECTURE_ID "RISCV"
594
+
595
+ # elif defined(__ICCAVR__)
596
+ # define ARCHITECTURE_ID "AVR"
597
+
598
+ # elif defined(__ICC430__)
599
+ # define ARCHITECTURE_ID "MSP430"
600
+
601
+ # elif defined(__ICCV850__)
602
+ # define ARCHITECTURE_ID "V850"
603
+
604
+ # elif defined(__ICC8051__)
605
+ # define ARCHITECTURE_ID "8051"
606
+
607
+ # elif defined(__ICCSTM8__)
608
+ # define ARCHITECTURE_ID "STM8"
609
+
610
+ # else /* unknown architecture */
611
+ # define ARCHITECTURE_ID ""
612
+ # endif
613
+
614
+ #elif defined(__ghs__)
615
+ # if defined(__PPC64__)
616
+ # define ARCHITECTURE_ID "PPC64"
617
+
618
+ # elif defined(__ppc__)
619
+ # define ARCHITECTURE_ID "PPC"
620
+
621
+ # elif defined(__ARM__)
622
+ # define ARCHITECTURE_ID "ARM"
623
+
624
+ # elif defined(__x86_64__)
625
+ # define ARCHITECTURE_ID "x64"
626
+
627
+ # elif defined(__i386__)
628
+ # define ARCHITECTURE_ID "X86"
629
+
630
+ # else /* unknown architecture */
631
+ # define ARCHITECTURE_ID ""
632
+ # endif
633
+
634
+ #elif defined(__TI_COMPILER_VERSION__)
635
+ # if defined(__TI_ARM__)
636
+ # define ARCHITECTURE_ID "ARM"
637
+
638
+ # elif defined(__MSP430__)
639
+ # define ARCHITECTURE_ID "MSP430"
640
+
641
+ # elif defined(__TMS320C28XX__)
642
+ # define ARCHITECTURE_ID "TMS320C28x"
643
+
644
+ # elif defined(__TMS320C6X__) || defined(_TMS320C6X)
645
+ # define ARCHITECTURE_ID "TMS320C6x"
646
+
647
+ # else /* unknown architecture */
648
+ # define ARCHITECTURE_ID ""
649
+ # endif
650
+
651
+ #else
652
+ # define ARCHITECTURE_ID
653
+ #endif
654
+
655
+ /* Convert integer to decimal digit literals. */
656
+ #define DEC(n) \
657
+ ('0' + (((n) / 10000000)%10)), \
658
+ ('0' + (((n) / 1000000)%10)), \
659
+ ('0' + (((n) / 100000)%10)), \
660
+ ('0' + (((n) / 10000)%10)), \
661
+ ('0' + (((n) / 1000)%10)), \
662
+ ('0' + (((n) / 100)%10)), \
663
+ ('0' + (((n) / 10)%10)), \
664
+ ('0' + ((n) % 10))
665
+
666
+ /* Convert integer to hex digit literals. */
667
+ #define HEX(n) \
668
+ ('0' + ((n)>>28 & 0xF)), \
669
+ ('0' + ((n)>>24 & 0xF)), \
670
+ ('0' + ((n)>>20 & 0xF)), \
671
+ ('0' + ((n)>>16 & 0xF)), \
672
+ ('0' + ((n)>>12 & 0xF)), \
673
+ ('0' + ((n)>>8 & 0xF)), \
674
+ ('0' + ((n)>>4 & 0xF)), \
675
+ ('0' + ((n) & 0xF))
676
+
677
+ /* Construct a string literal encoding the version number. */
678
+ #ifdef COMPILER_VERSION
679
+ char const* info_version = "INFO" ":" "compiler_version[" COMPILER_VERSION "]";
680
+
681
+ /* Construct a string literal encoding the version number components. */
682
+ #elif defined(COMPILER_VERSION_MAJOR)
683
+ char const info_version[] = {
684
+ 'I', 'N', 'F', 'O', ':',
685
+ 'c','o','m','p','i','l','e','r','_','v','e','r','s','i','o','n','[',
686
+ COMPILER_VERSION_MAJOR,
687
+ # ifdef COMPILER_VERSION_MINOR
688
+ '.', COMPILER_VERSION_MINOR,
689
+ # ifdef COMPILER_VERSION_PATCH
690
+ '.', COMPILER_VERSION_PATCH,
691
+ # ifdef COMPILER_VERSION_TWEAK
692
+ '.', COMPILER_VERSION_TWEAK,
693
+ # endif
694
+ # endif
695
+ # endif
696
+ ']','\0'};
697
+ #endif
698
+
699
+ /* Construct a string literal encoding the internal version number. */
700
+ #ifdef COMPILER_VERSION_INTERNAL
701
+ char const info_version_internal[] = {
702
+ 'I', 'N', 'F', 'O', ':',
703
+ 'c','o','m','p','i','l','e','r','_','v','e','r','s','i','o','n','_',
704
+ 'i','n','t','e','r','n','a','l','[',
705
+ COMPILER_VERSION_INTERNAL,']','\0'};
706
+ #elif defined(COMPILER_VERSION_INTERNAL_STR)
707
+ char const* info_version_internal = "INFO" ":" "compiler_version_internal[" COMPILER_VERSION_INTERNAL_STR "]";
708
+ #endif
709
+
710
+ /* Construct a string literal encoding the version number components. */
711
+ #ifdef SIMULATE_VERSION_MAJOR
712
+ char const info_simulate_version[] = {
713
+ 'I', 'N', 'F', 'O', ':',
714
+ 's','i','m','u','l','a','t','e','_','v','e','r','s','i','o','n','[',
715
+ SIMULATE_VERSION_MAJOR,
716
+ # ifdef SIMULATE_VERSION_MINOR
717
+ '.', SIMULATE_VERSION_MINOR,
718
+ # ifdef SIMULATE_VERSION_PATCH
719
+ '.', SIMULATE_VERSION_PATCH,
720
+ # ifdef SIMULATE_VERSION_TWEAK
721
+ '.', SIMULATE_VERSION_TWEAK,
722
+ # endif
723
+ # endif
724
+ # endif
725
+ ']','\0'};
726
+ #endif
727
+
728
+ /* Construct the string literal in pieces to prevent the source from
729
+ getting matched. Store it in a pointer rather than an array
730
+ because some compilers will just produce instructions to fill the
731
+ array rather than assigning a pointer to a static array. */
732
+ char const* info_platform = "INFO" ":" "platform[" PLATFORM_ID "]";
733
+ char const* info_arch = "INFO" ":" "arch[" ARCHITECTURE_ID "]";
734
+
735
+
736
+
737
+ #if !defined(__STDC__) && !defined(__clang__)
738
+ # if defined(_MSC_VER) || defined(__ibmxl__) || defined(__IBMC__)
739
+ # define C_VERSION "90"
740
+ # else
741
+ # define C_VERSION
742
+ # endif
743
+ #elif __STDC_VERSION__ > 201710L
744
+ # define C_VERSION "23"
745
+ #elif __STDC_VERSION__ >= 201710L
746
+ # define C_VERSION "17"
747
+ #elif __STDC_VERSION__ >= 201000L
748
+ # define C_VERSION "11"
749
+ #elif __STDC_VERSION__ >= 199901L
750
+ # define C_VERSION "99"
751
+ #else
752
+ # define C_VERSION "90"
753
+ #endif
754
+ const char* info_language_standard_default =
755
+ "INFO" ":" "standard_default[" C_VERSION "]";
756
+
757
+ const char* info_language_extensions_default = "INFO" ":" "extensions_default["
758
+ /* !defined(_MSC_VER) to exclude Clang's MSVC compatibility mode. */
759
+ #if (defined(__clang__) || defined(__GNUC__) || \
760
+ defined(__TI_COMPILER_VERSION__)) && \
761
+ !defined(__STRICT_ANSI__) && !defined(_MSC_VER)
762
+ "ON"
763
+ #else
764
+ "OFF"
765
+ #endif
766
+ "]";
767
+
768
+ /*--------------------------------------------------------------------------*/
769
+
770
+ #ifdef ID_VOID_MAIN
771
+ void main() {}
772
+ #else
773
+ # if defined(__CLASSIC_C__)
774
+ int main(argc, argv) int argc; char *argv[];
775
+ # else
776
+ int main(int argc, char* argv[])
777
+ # endif
778
+ {
779
+ int require = 0;
780
+ require += info_compiler[argc];
781
+ require += info_platform[argc];
782
+ require += info_arch[argc];
783
+ #ifdef COMPILER_VERSION_MAJOR
784
+ require += info_version[argc];
785
+ #endif
786
+ #ifdef COMPILER_VERSION_INTERNAL
787
+ require += info_version_internal[argc];
788
+ #endif
789
+ #ifdef SIMULATE_ID
790
+ require += info_simulate[argc];
791
+ #endif
792
+ #ifdef SIMULATE_VERSION_MAJOR
793
+ require += info_simulate_version[argc];
794
+ #endif
795
+ #if defined(__CRAYXT_COMPUTE_LINUX_TARGET)
796
+ require += info_cray[argc];
797
+ #endif
798
+ require += info_language_standard_default[argc];
799
+ require += info_language_extensions_default[argc];
800
+ (void)argv;
801
+ return require;
802
+ }
803
+ #endif
cc-multilingual-main/cc_net/third_party/kenlm/build/CMakeFiles/3.22.1/CompilerIdC/a.out ADDED
Binary file (16.1 kB). View file
 
cc-multilingual-main/cc_net/third_party/kenlm/build/CMakeFiles/3.22.1/CompilerIdCXX/CMakeCXXCompilerId.cpp ADDED
@@ -0,0 +1,791 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* This source file must have a .cpp extension so that all C++ compilers
2
+ recognize the extension without flags. Borland does not know .cxx for
3
+ example. */
4
+ #ifndef __cplusplus
5
+ # error "A C compiler has been selected for C++."
6
+ #endif
7
+
8
+ #if !defined(__has_include)
9
+ /* If the compiler does not have __has_include, pretend the answer is
10
+ always no. */
11
+ # define __has_include(x) 0
12
+ #endif
13
+
14
+
15
+ /* Version number components: V=Version, R=Revision, P=Patch
16
+ Version date components: YYYY=Year, MM=Month, DD=Day */
17
+
18
+ #if defined(__COMO__)
19
+ # define COMPILER_ID "Comeau"
20
+ /* __COMO_VERSION__ = VRR */
21
+ # define COMPILER_VERSION_MAJOR DEC(__COMO_VERSION__ / 100)
22
+ # define COMPILER_VERSION_MINOR DEC(__COMO_VERSION__ % 100)
23
+
24
+ #elif defined(__INTEL_COMPILER) || defined(__ICC)
25
+ # define COMPILER_ID "Intel"
26
+ # if defined(_MSC_VER)
27
+ # define SIMULATE_ID "MSVC"
28
+ # endif
29
+ # if defined(__GNUC__)
30
+ # define SIMULATE_ID "GNU"
31
+ # endif
32
+ /* __INTEL_COMPILER = VRP prior to 2021, and then VVVV for 2021 and later,
33
+ except that a few beta releases use the old format with V=2021. */
34
+ # if __INTEL_COMPILER < 2021 || __INTEL_COMPILER == 202110 || __INTEL_COMPILER == 202111
35
+ # define COMPILER_VERSION_MAJOR DEC(__INTEL_COMPILER/100)
36
+ # define COMPILER_VERSION_MINOR DEC(__INTEL_COMPILER/10 % 10)
37
+ # if defined(__INTEL_COMPILER_UPDATE)
38
+ # define COMPILER_VERSION_PATCH DEC(__INTEL_COMPILER_UPDATE)
39
+ # else
40
+ # define COMPILER_VERSION_PATCH DEC(__INTEL_COMPILER % 10)
41
+ # endif
42
+ # else
43
+ # define COMPILER_VERSION_MAJOR DEC(__INTEL_COMPILER)
44
+ # define COMPILER_VERSION_MINOR DEC(__INTEL_COMPILER_UPDATE)
45
+ /* The third version component from --version is an update index,
46
+ but no macro is provided for it. */
47
+ # define COMPILER_VERSION_PATCH DEC(0)
48
+ # endif
49
+ # if defined(__INTEL_COMPILER_BUILD_DATE)
50
+ /* __INTEL_COMPILER_BUILD_DATE = YYYYMMDD */
51
+ # define COMPILER_VERSION_TWEAK DEC(__INTEL_COMPILER_BUILD_DATE)
52
+ # endif
53
+ # if defined(_MSC_VER)
54
+ /* _MSC_VER = VVRR */
55
+ # define SIMULATE_VERSION_MAJOR DEC(_MSC_VER / 100)
56
+ # define SIMULATE_VERSION_MINOR DEC(_MSC_VER % 100)
57
+ # endif
58
+ # if defined(__GNUC__)
59
+ # define SIMULATE_VERSION_MAJOR DEC(__GNUC__)
60
+ # elif defined(__GNUG__)
61
+ # define SIMULATE_VERSION_MAJOR DEC(__GNUG__)
62
+ # endif
63
+ # if defined(__GNUC_MINOR__)
64
+ # define SIMULATE_VERSION_MINOR DEC(__GNUC_MINOR__)
65
+ # endif
66
+ # if defined(__GNUC_PATCHLEVEL__)
67
+ # define SIMULATE_VERSION_PATCH DEC(__GNUC_PATCHLEVEL__)
68
+ # endif
69
+
70
+ #elif (defined(__clang__) && defined(__INTEL_CLANG_COMPILER)) || defined(__INTEL_LLVM_COMPILER)
71
+ # define COMPILER_ID "IntelLLVM"
72
+ #if defined(_MSC_VER)
73
+ # define SIMULATE_ID "MSVC"
74
+ #endif
75
+ #if defined(__GNUC__)
76
+ # define SIMULATE_ID "GNU"
77
+ #endif
78
+ /* __INTEL_LLVM_COMPILER = VVVVRP prior to 2021.2.0, VVVVRRPP for 2021.2.0 and
79
+ * later. Look for 6 digit vs. 8 digit version number to decide encoding.
80
+ * VVVV is no smaller than the current year when a version is released.
81
+ */
82
+ #if __INTEL_LLVM_COMPILER < 1000000L
83
+ # define COMPILER_VERSION_MAJOR DEC(__INTEL_LLVM_COMPILER/100)
84
+ # define COMPILER_VERSION_MINOR DEC(__INTEL_LLVM_COMPILER/10 % 10)
85
+ # define COMPILER_VERSION_PATCH DEC(__INTEL_LLVM_COMPILER % 10)
86
+ #else
87
+ # define COMPILER_VERSION_MAJOR DEC(__INTEL_LLVM_COMPILER/10000)
88
+ # define COMPILER_VERSION_MINOR DEC(__INTEL_LLVM_COMPILER/100 % 100)
89
+ # define COMPILER_VERSION_PATCH DEC(__INTEL_LLVM_COMPILER % 100)
90
+ #endif
91
+ #if defined(_MSC_VER)
92
+ /* _MSC_VER = VVRR */
93
+ # define SIMULATE_VERSION_MAJOR DEC(_MSC_VER / 100)
94
+ # define SIMULATE_VERSION_MINOR DEC(_MSC_VER % 100)
95
+ #endif
96
+ #if defined(__GNUC__)
97
+ # define SIMULATE_VERSION_MAJOR DEC(__GNUC__)
98
+ #elif defined(__GNUG__)
99
+ # define SIMULATE_VERSION_MAJOR DEC(__GNUG__)
100
+ #endif
101
+ #if defined(__GNUC_MINOR__)
102
+ # define SIMULATE_VERSION_MINOR DEC(__GNUC_MINOR__)
103
+ #endif
104
+ #if defined(__GNUC_PATCHLEVEL__)
105
+ # define SIMULATE_VERSION_PATCH DEC(__GNUC_PATCHLEVEL__)
106
+ #endif
107
+
108
+ #elif defined(__PATHCC__)
109
+ # define COMPILER_ID "PathScale"
110
+ # define COMPILER_VERSION_MAJOR DEC(__PATHCC__)
111
+ # define COMPILER_VERSION_MINOR DEC(__PATHCC_MINOR__)
112
+ # if defined(__PATHCC_PATCHLEVEL__)
113
+ # define COMPILER_VERSION_PATCH DEC(__PATHCC_PATCHLEVEL__)
114
+ # endif
115
+
116
+ #elif defined(__BORLANDC__) && defined(__CODEGEARC_VERSION__)
117
+ # define COMPILER_ID "Embarcadero"
118
+ # define COMPILER_VERSION_MAJOR HEX(__CODEGEARC_VERSION__>>24 & 0x00FF)
119
+ # define COMPILER_VERSION_MINOR HEX(__CODEGEARC_VERSION__>>16 & 0x00FF)
120
+ # define COMPILER_VERSION_PATCH DEC(__CODEGEARC_VERSION__ & 0xFFFF)
121
+
122
+ #elif defined(__BORLANDC__)
123
+ # define COMPILER_ID "Borland"
124
+ /* __BORLANDC__ = 0xVRR */
125
+ # define COMPILER_VERSION_MAJOR HEX(__BORLANDC__>>8)
126
+ # define COMPILER_VERSION_MINOR HEX(__BORLANDC__ & 0xFF)
127
+
128
+ #elif defined(__WATCOMC__) && __WATCOMC__ < 1200
129
+ # define COMPILER_ID "Watcom"
130
+ /* __WATCOMC__ = VVRR */
131
+ # define COMPILER_VERSION_MAJOR DEC(__WATCOMC__ / 100)
132
+ # define COMPILER_VERSION_MINOR DEC((__WATCOMC__ / 10) % 10)
133
+ # if (__WATCOMC__ % 10) > 0
134
+ # define COMPILER_VERSION_PATCH DEC(__WATCOMC__ % 10)
135
+ # endif
136
+
137
+ #elif defined(__WATCOMC__)
138
+ # define COMPILER_ID "OpenWatcom"
139
+ /* __WATCOMC__ = VVRP + 1100 */
140
+ # define COMPILER_VERSION_MAJOR DEC((__WATCOMC__ - 1100) / 100)
141
+ # define COMPILER_VERSION_MINOR DEC((__WATCOMC__ / 10) % 10)
142
+ # if (__WATCOMC__ % 10) > 0
143
+ # define COMPILER_VERSION_PATCH DEC(__WATCOMC__ % 10)
144
+ # endif
145
+
146
+ #elif defined(__SUNPRO_CC)
147
+ # define COMPILER_ID "SunPro"
148
+ # if __SUNPRO_CC >= 0x5100
149
+ /* __SUNPRO_CC = 0xVRRP */
150
+ # define COMPILER_VERSION_MAJOR HEX(__SUNPRO_CC>>12)
151
+ # define COMPILER_VERSION_MINOR HEX(__SUNPRO_CC>>4 & 0xFF)
152
+ # define COMPILER_VERSION_PATCH HEX(__SUNPRO_CC & 0xF)
153
+ # else
154
+ /* __SUNPRO_CC = 0xVRP */
155
+ # define COMPILER_VERSION_MAJOR HEX(__SUNPRO_CC>>8)
156
+ # define COMPILER_VERSION_MINOR HEX(__SUNPRO_CC>>4 & 0xF)
157
+ # define COMPILER_VERSION_PATCH HEX(__SUNPRO_CC & 0xF)
158
+ # endif
159
+
160
+ #elif defined(__HP_aCC)
161
+ # define COMPILER_ID "HP"
162
+ /* __HP_aCC = VVRRPP */
163
+ # define COMPILER_VERSION_MAJOR DEC(__HP_aCC/10000)
164
+ # define COMPILER_VERSION_MINOR DEC(__HP_aCC/100 % 100)
165
+ # define COMPILER_VERSION_PATCH DEC(__HP_aCC % 100)
166
+
167
+ #elif defined(__DECCXX)
168
+ # define COMPILER_ID "Compaq"
169
+ /* __DECCXX_VER = VVRRTPPPP */
170
+ # define COMPILER_VERSION_MAJOR DEC(__DECCXX_VER/10000000)
171
+ # define COMPILER_VERSION_MINOR DEC(__DECCXX_VER/100000 % 100)
172
+ # define COMPILER_VERSION_PATCH DEC(__DECCXX_VER % 10000)
173
+
174
+ #elif defined(__IBMCPP__) && defined(__COMPILER_VER__)
175
+ # define COMPILER_ID "zOS"
176
+ /* __IBMCPP__ = VRP */
177
+ # define COMPILER_VERSION_MAJOR DEC(__IBMCPP__/100)
178
+ # define COMPILER_VERSION_MINOR DEC(__IBMCPP__/10 % 10)
179
+ # define COMPILER_VERSION_PATCH DEC(__IBMCPP__ % 10)
180
+
181
+ #elif defined(__ibmxl__) && defined(__clang__)
182
+ # define COMPILER_ID "XLClang"
183
+ # define COMPILER_VERSION_MAJOR DEC(__ibmxl_version__)
184
+ # define COMPILER_VERSION_MINOR DEC(__ibmxl_release__)
185
+ # define COMPILER_VERSION_PATCH DEC(__ibmxl_modification__)
186
+ # define COMPILER_VERSION_TWEAK DEC(__ibmxl_ptf_fix_level__)
187
+
188
+
189
+ #elif defined(__IBMCPP__) && !defined(__COMPILER_VER__) && __IBMCPP__ >= 800
190
+ # define COMPILER_ID "XL"
191
+ /* __IBMCPP__ = VRP */
192
+ # define COMPILER_VERSION_MAJOR DEC(__IBMCPP__/100)
193
+ # define COMPILER_VERSION_MINOR DEC(__IBMCPP__/10 % 10)
194
+ # define COMPILER_VERSION_PATCH DEC(__IBMCPP__ % 10)
195
+
196
+ #elif defined(__IBMCPP__) && !defined(__COMPILER_VER__) && __IBMCPP__ < 800
197
+ # define COMPILER_ID "VisualAge"
198
+ /* __IBMCPP__ = VRP */
199
+ # define COMPILER_VERSION_MAJOR DEC(__IBMCPP__/100)
200
+ # define COMPILER_VERSION_MINOR DEC(__IBMCPP__/10 % 10)
201
+ # define COMPILER_VERSION_PATCH DEC(__IBMCPP__ % 10)
202
+
203
+ #elif defined(__NVCOMPILER)
204
+ # define COMPILER_ID "NVHPC"
205
+ # define COMPILER_VERSION_MAJOR DEC(__NVCOMPILER_MAJOR__)
206
+ # define COMPILER_VERSION_MINOR DEC(__NVCOMPILER_MINOR__)
207
+ # if defined(__NVCOMPILER_PATCHLEVEL__)
208
+ # define COMPILER_VERSION_PATCH DEC(__NVCOMPILER_PATCHLEVEL__)
209
+ # endif
210
+
211
+ #elif defined(__PGI)
212
+ # define COMPILER_ID "PGI"
213
+ # define COMPILER_VERSION_MAJOR DEC(__PGIC__)
214
+ # define COMPILER_VERSION_MINOR DEC(__PGIC_MINOR__)
215
+ # if defined(__PGIC_PATCHLEVEL__)
216
+ # define COMPILER_VERSION_PATCH DEC(__PGIC_PATCHLEVEL__)
217
+ # endif
218
+
219
+ #elif defined(_CRAYC)
220
+ # define COMPILER_ID "Cray"
221
+ # define COMPILER_VERSION_MAJOR DEC(_RELEASE_MAJOR)
222
+ # define COMPILER_VERSION_MINOR DEC(_RELEASE_MINOR)
223
+
224
+ #elif defined(__TI_COMPILER_VERSION__)
225
+ # define COMPILER_ID "TI"
226
+ /* __TI_COMPILER_VERSION__ = VVVRRRPPP */
227
+ # define COMPILER_VERSION_MAJOR DEC(__TI_COMPILER_VERSION__/1000000)
228
+ # define COMPILER_VERSION_MINOR DEC(__TI_COMPILER_VERSION__/1000 % 1000)
229
+ # define COMPILER_VERSION_PATCH DEC(__TI_COMPILER_VERSION__ % 1000)
230
+
231
+ #elif defined(__CLANG_FUJITSU)
232
+ # define COMPILER_ID "FujitsuClang"
233
+ # define COMPILER_VERSION_MAJOR DEC(__FCC_major__)
234
+ # define COMPILER_VERSION_MINOR DEC(__FCC_minor__)
235
+ # define COMPILER_VERSION_PATCH DEC(__FCC_patchlevel__)
236
+ # define COMPILER_VERSION_INTERNAL_STR __clang_version__
237
+
238
+
239
+ #elif defined(__FUJITSU)
240
+ # define COMPILER_ID "Fujitsu"
241
+ # if defined(__FCC_version__)
242
+ # define COMPILER_VERSION __FCC_version__
243
+ # elif defined(__FCC_major__)
244
+ # define COMPILER_VERSION_MAJOR DEC(__FCC_major__)
245
+ # define COMPILER_VERSION_MINOR DEC(__FCC_minor__)
246
+ # define COMPILER_VERSION_PATCH DEC(__FCC_patchlevel__)
247
+ # endif
248
+ # if defined(__fcc_version)
249
+ # define COMPILER_VERSION_INTERNAL DEC(__fcc_version)
250
+ # elif defined(__FCC_VERSION)
251
+ # define COMPILER_VERSION_INTERNAL DEC(__FCC_VERSION)
252
+ # endif
253
+
254
+
255
+ #elif defined(__ghs__)
256
+ # define COMPILER_ID "GHS"
257
+ /* __GHS_VERSION_NUMBER = VVVVRP */
258
+ # ifdef __GHS_VERSION_NUMBER
259
+ # define COMPILER_VERSION_MAJOR DEC(__GHS_VERSION_NUMBER / 100)
260
+ # define COMPILER_VERSION_MINOR DEC(__GHS_VERSION_NUMBER / 10 % 10)
261
+ # define COMPILER_VERSION_PATCH DEC(__GHS_VERSION_NUMBER % 10)
262
+ # endif
263
+
264
+ #elif defined(__SCO_VERSION__)
265
+ # define COMPILER_ID "SCO"
266
+
267
+ #elif defined(__ARMCC_VERSION) && !defined(__clang__)
268
+ # define COMPILER_ID "ARMCC"
269
+ #if __ARMCC_VERSION >= 1000000
270
+ /* __ARMCC_VERSION = VRRPPPP */
271
+ # define COMPILER_VERSION_MAJOR DEC(__ARMCC_VERSION/1000000)
272
+ # define COMPILER_VERSION_MINOR DEC(__ARMCC_VERSION/10000 % 100)
273
+ # define COMPILER_VERSION_PATCH DEC(__ARMCC_VERSION % 10000)
274
+ #else
275
+ /* __ARMCC_VERSION = VRPPPP */
276
+ # define COMPILER_VERSION_MAJOR DEC(__ARMCC_VERSION/100000)
277
+ # define COMPILER_VERSION_MINOR DEC(__ARMCC_VERSION/10000 % 10)
278
+ # define COMPILER_VERSION_PATCH DEC(__ARMCC_VERSION % 10000)
279
+ #endif
280
+
281
+
282
+ #elif defined(__clang__) && defined(__apple_build_version__)
283
+ # define COMPILER_ID "AppleClang"
284
+ # if defined(_MSC_VER)
285
+ # define SIMULATE_ID "MSVC"
286
+ # endif
287
+ # define COMPILER_VERSION_MAJOR DEC(__clang_major__)
288
+ # define COMPILER_VERSION_MINOR DEC(__clang_minor__)
289
+ # define COMPILER_VERSION_PATCH DEC(__clang_patchlevel__)
290
+ # if defined(_MSC_VER)
291
+ /* _MSC_VER = VVRR */
292
+ # define SIMULATE_VERSION_MAJOR DEC(_MSC_VER / 100)
293
+ # define SIMULATE_VERSION_MINOR DEC(_MSC_VER % 100)
294
+ # endif
295
+ # define COMPILER_VERSION_TWEAK DEC(__apple_build_version__)
296
+
297
+ #elif defined(__clang__) && defined(__ARMCOMPILER_VERSION)
298
+ # define COMPILER_ID "ARMClang"
299
+ # define COMPILER_VERSION_MAJOR DEC(__ARMCOMPILER_VERSION/1000000)
300
+ # define COMPILER_VERSION_MINOR DEC(__ARMCOMPILER_VERSION/10000 % 100)
301
+ # define COMPILER_VERSION_PATCH DEC(__ARMCOMPILER_VERSION % 10000)
302
+ # define COMPILER_VERSION_INTERNAL DEC(__ARMCOMPILER_VERSION)
303
+
304
+ #elif defined(__clang__)
305
+ # define COMPILER_ID "Clang"
306
+ # if defined(_MSC_VER)
307
+ # define SIMULATE_ID "MSVC"
308
+ # endif
309
+ # define COMPILER_VERSION_MAJOR DEC(__clang_major__)
310
+ # define COMPILER_VERSION_MINOR DEC(__clang_minor__)
311
+ # define COMPILER_VERSION_PATCH DEC(__clang_patchlevel__)
312
+ # if defined(_MSC_VER)
313
+ /* _MSC_VER = VVRR */
314
+ # define SIMULATE_VERSION_MAJOR DEC(_MSC_VER / 100)
315
+ # define SIMULATE_VERSION_MINOR DEC(_MSC_VER % 100)
316
+ # endif
317
+
318
+ #elif defined(__GNUC__) || defined(__GNUG__)
319
+ # define COMPILER_ID "GNU"
320
+ # if defined(__GNUC__)
321
+ # define COMPILER_VERSION_MAJOR DEC(__GNUC__)
322
+ # else
323
+ # define COMPILER_VERSION_MAJOR DEC(__GNUG__)
324
+ # endif
325
+ # if defined(__GNUC_MINOR__)
326
+ # define COMPILER_VERSION_MINOR DEC(__GNUC_MINOR__)
327
+ # endif
328
+ # if defined(__GNUC_PATCHLEVEL__)
329
+ # define COMPILER_VERSION_PATCH DEC(__GNUC_PATCHLEVEL__)
330
+ # endif
331
+
332
+ #elif defined(_MSC_VER)
333
+ # define COMPILER_ID "MSVC"
334
+ /* _MSC_VER = VVRR */
335
+ # define COMPILER_VERSION_MAJOR DEC(_MSC_VER / 100)
336
+ # define COMPILER_VERSION_MINOR DEC(_MSC_VER % 100)
337
+ # if defined(_MSC_FULL_VER)
338
+ # if _MSC_VER >= 1400
339
+ /* _MSC_FULL_VER = VVRRPPPPP */
340
+ # define COMPILER_VERSION_PATCH DEC(_MSC_FULL_VER % 100000)
341
+ # else
342
+ /* _MSC_FULL_VER = VVRRPPPP */
343
+ # define COMPILER_VERSION_PATCH DEC(_MSC_FULL_VER % 10000)
344
+ # endif
345
+ # endif
346
+ # if defined(_MSC_BUILD)
347
+ # define COMPILER_VERSION_TWEAK DEC(_MSC_BUILD)
348
+ # endif
349
+
350
+ #elif defined(__VISUALDSPVERSION__) || defined(__ADSPBLACKFIN__) || defined(__ADSPTS__) || defined(__ADSP21000__)
351
+ # define COMPILER_ID "ADSP"
352
+ #if defined(__VISUALDSPVERSION__)
353
+ /* __VISUALDSPVERSION__ = 0xVVRRPP00 */
354
+ # define COMPILER_VERSION_MAJOR HEX(__VISUALDSPVERSION__>>24)
355
+ # define COMPILER_VERSION_MINOR HEX(__VISUALDSPVERSION__>>16 & 0xFF)
356
+ # define COMPILER_VERSION_PATCH HEX(__VISUALDSPVERSION__>>8 & 0xFF)
357
+ #endif
358
+
359
+ #elif defined(__IAR_SYSTEMS_ICC__) || defined(__IAR_SYSTEMS_ICC)
360
+ # define COMPILER_ID "IAR"
361
+ # if defined(__VER__) && defined(__ICCARM__)
362
+ # define COMPILER_VERSION_MAJOR DEC((__VER__) / 1000000)
363
+ # define COMPILER_VERSION_MINOR DEC(((__VER__) / 1000) % 1000)
364
+ # define COMPILER_VERSION_PATCH DEC((__VER__) % 1000)
365
+ # define COMPILER_VERSION_INTERNAL DEC(__IAR_SYSTEMS_ICC__)
366
+ # elif defined(__VER__) && (defined(__ICCAVR__) || defined(__ICCRX__) || defined(__ICCRH850__) || defined(__ICCRL78__) || defined(__ICC430__) || defined(__ICCRISCV__) || defined(__ICCV850__) || defined(__ICC8051__) || defined(__ICCSTM8__))
367
+ # define COMPILER_VERSION_MAJOR DEC((__VER__) / 100)
368
+ # define COMPILER_VERSION_MINOR DEC((__VER__) - (((__VER__) / 100)*100))
369
+ # define COMPILER_VERSION_PATCH DEC(__SUBVERSION__)
370
+ # define COMPILER_VERSION_INTERNAL DEC(__IAR_SYSTEMS_ICC__)
371
+ # endif
372
+
373
+
374
+ /* These compilers are either not known or too old to define an
375
+ identification macro. Try to identify the platform and guess that
376
+ it is the native compiler. */
377
+ #elif defined(__hpux) || defined(__hpua)
378
+ # define COMPILER_ID "HP"
379
+
380
+ #else /* unknown compiler */
381
+ # define COMPILER_ID ""
382
+ #endif
383
+
384
+ /* Construct the string literal in pieces to prevent the source from
385
+ getting matched. Store it in a pointer rather than an array
386
+ because some compilers will just produce instructions to fill the
387
+ array rather than assigning a pointer to a static array. */
388
+ char const* info_compiler = "INFO" ":" "compiler[" COMPILER_ID "]";
389
+ #ifdef SIMULATE_ID
390
+ char const* info_simulate = "INFO" ":" "simulate[" SIMULATE_ID "]";
391
+ #endif
392
+
393
+ #ifdef __QNXNTO__
394
+ char const* qnxnto = "INFO" ":" "qnxnto[]";
395
+ #endif
396
+
397
+ #if defined(__CRAYXT_COMPUTE_LINUX_TARGET)
398
+ char const *info_cray = "INFO" ":" "compiler_wrapper[CrayPrgEnv]";
399
+ #endif
400
+
401
+ #define STRINGIFY_HELPER(X) #X
402
+ #define STRINGIFY(X) STRINGIFY_HELPER(X)
403
+
404
+ /* Identify known platforms by name. */
405
+ #if defined(__linux) || defined(__linux__) || defined(linux)
406
+ # define PLATFORM_ID "Linux"
407
+
408
+ #elif defined(__MSYS__)
409
+ # define PLATFORM_ID "MSYS"
410
+
411
+ #elif defined(__CYGWIN__)
412
+ # define PLATFORM_ID "Cygwin"
413
+
414
+ #elif defined(__MINGW32__)
415
+ # define PLATFORM_ID "MinGW"
416
+
417
+ #elif defined(__APPLE__)
418
+ # define PLATFORM_ID "Darwin"
419
+
420
+ #elif defined(_WIN32) || defined(__WIN32__) || defined(WIN32)
421
+ # define PLATFORM_ID "Windows"
422
+
423
+ #elif defined(__FreeBSD__) || defined(__FreeBSD)
424
+ # define PLATFORM_ID "FreeBSD"
425
+
426
+ #elif defined(__NetBSD__) || defined(__NetBSD)
427
+ # define PLATFORM_ID "NetBSD"
428
+
429
+ #elif defined(__OpenBSD__) || defined(__OPENBSD)
430
+ # define PLATFORM_ID "OpenBSD"
431
+
432
+ #elif defined(__sun) || defined(sun)
433
+ # define PLATFORM_ID "SunOS"
434
+
435
+ #elif defined(_AIX) || defined(__AIX) || defined(__AIX__) || defined(__aix) || defined(__aix__)
436
+ # define PLATFORM_ID "AIX"
437
+
438
+ #elif defined(__hpux) || defined(__hpux__)
439
+ # define PLATFORM_ID "HP-UX"
440
+
441
+ #elif defined(__HAIKU__)
442
+ # define PLATFORM_ID "Haiku"
443
+
444
+ #elif defined(__BeOS) || defined(__BEOS__) || defined(_BEOS)
445
+ # define PLATFORM_ID "BeOS"
446
+
447
+ #elif defined(__QNX__) || defined(__QNXNTO__)
448
+ # define PLATFORM_ID "QNX"
449
+
450
+ #elif defined(__tru64) || defined(_tru64) || defined(__TRU64__)
451
+ # define PLATFORM_ID "Tru64"
452
+
453
+ #elif defined(__riscos) || defined(__riscos__)
454
+ # define PLATFORM_ID "RISCos"
455
+
456
+ #elif defined(__sinix) || defined(__sinix__) || defined(__SINIX__)
457
+ # define PLATFORM_ID "SINIX"
458
+
459
+ #elif defined(__UNIX_SV__)
460
+ # define PLATFORM_ID "UNIX_SV"
461
+
462
+ #elif defined(__bsdos__)
463
+ # define PLATFORM_ID "BSDOS"
464
+
465
+ #elif defined(_MPRAS) || defined(MPRAS)
466
+ # define PLATFORM_ID "MP-RAS"
467
+
468
+ #elif defined(__osf) || defined(__osf__)
469
+ # define PLATFORM_ID "OSF1"
470
+
471
+ #elif defined(_SCO_SV) || defined(SCO_SV) || defined(sco_sv)
472
+ # define PLATFORM_ID "SCO_SV"
473
+
474
+ #elif defined(__ultrix) || defined(__ultrix__) || defined(_ULTRIX)
475
+ # define PLATFORM_ID "ULTRIX"
476
+
477
+ #elif defined(__XENIX__) || defined(_XENIX) || defined(XENIX)
478
+ # define PLATFORM_ID "Xenix"
479
+
480
+ #elif defined(__WATCOMC__)
481
+ # if defined(__LINUX__)
482
+ # define PLATFORM_ID "Linux"
483
+
484
+ # elif defined(__DOS__)
485
+ # define PLATFORM_ID "DOS"
486
+
487
+ # elif defined(__OS2__)
488
+ # define PLATFORM_ID "OS2"
489
+
490
+ # elif defined(__WINDOWS__)
491
+ # define PLATFORM_ID "Windows3x"
492
+
493
+ # elif defined(__VXWORKS__)
494
+ # define PLATFORM_ID "VxWorks"
495
+
496
+ # else /* unknown platform */
497
+ # define PLATFORM_ID
498
+ # endif
499
+
500
+ #elif defined(__INTEGRITY)
501
+ # if defined(INT_178B)
502
+ # define PLATFORM_ID "Integrity178"
503
+
504
+ # else /* regular Integrity */
505
+ # define PLATFORM_ID "Integrity"
506
+ # endif
507
+
508
+ #else /* unknown platform */
509
+ # define PLATFORM_ID
510
+
511
+ #endif
512
+
513
+ /* For windows compilers MSVC and Intel we can determine
514
+ the architecture of the compiler being used. This is because
515
+ the compilers do not have flags that can change the architecture,
516
+ but rather depend on which compiler is being used
517
+ */
518
+ #if defined(_WIN32) && defined(_MSC_VER)
519
+ # if defined(_M_IA64)
520
+ # define ARCHITECTURE_ID "IA64"
521
+
522
+ # elif defined(_M_ARM64EC)
523
+ # define ARCHITECTURE_ID "ARM64EC"
524
+
525
+ # elif defined(_M_X64) || defined(_M_AMD64)
526
+ # define ARCHITECTURE_ID "x64"
527
+
528
+ # elif defined(_M_IX86)
529
+ # define ARCHITECTURE_ID "X86"
530
+
531
+ # elif defined(_M_ARM64)
532
+ # define ARCHITECTURE_ID "ARM64"
533
+
534
+ # elif defined(_M_ARM)
535
+ # if _M_ARM == 4
536
+ # define ARCHITECTURE_ID "ARMV4I"
537
+ # elif _M_ARM == 5
538
+ # define ARCHITECTURE_ID "ARMV5I"
539
+ # else
540
+ # define ARCHITECTURE_ID "ARMV" STRINGIFY(_M_ARM)
541
+ # endif
542
+
543
+ # elif defined(_M_MIPS)
544
+ # define ARCHITECTURE_ID "MIPS"
545
+
546
+ # elif defined(_M_SH)
547
+ # define ARCHITECTURE_ID "SHx"
548
+
549
+ # else /* unknown architecture */
550
+ # define ARCHITECTURE_ID ""
551
+ # endif
552
+
553
+ #elif defined(__WATCOMC__)
554
+ # if defined(_M_I86)
555
+ # define ARCHITECTURE_ID "I86"
556
+
557
+ # elif defined(_M_IX86)
558
+ # define ARCHITECTURE_ID "X86"
559
+
560
+ # else /* unknown architecture */
561
+ # define ARCHITECTURE_ID ""
562
+ # endif
563
+
564
+ #elif defined(__IAR_SYSTEMS_ICC__) || defined(__IAR_SYSTEMS_ICC)
565
+ # if defined(__ICCARM__)
566
+ # define ARCHITECTURE_ID "ARM"
567
+
568
+ # elif defined(__ICCRX__)
569
+ # define ARCHITECTURE_ID "RX"
570
+
571
+ # elif defined(__ICCRH850__)
572
+ # define ARCHITECTURE_ID "RH850"
573
+
574
+ # elif defined(__ICCRL78__)
575
+ # define ARCHITECTURE_ID "RL78"
576
+
577
+ # elif defined(__ICCRISCV__)
578
+ # define ARCHITECTURE_ID "RISCV"
579
+
580
+ # elif defined(__ICCAVR__)
581
+ # define ARCHITECTURE_ID "AVR"
582
+
583
+ # elif defined(__ICC430__)
584
+ # define ARCHITECTURE_ID "MSP430"
585
+
586
+ # elif defined(__ICCV850__)
587
+ # define ARCHITECTURE_ID "V850"
588
+
589
+ # elif defined(__ICC8051__)
590
+ # define ARCHITECTURE_ID "8051"
591
+
592
+ # elif defined(__ICCSTM8__)
593
+ # define ARCHITECTURE_ID "STM8"
594
+
595
+ # else /* unknown architecture */
596
+ # define ARCHITECTURE_ID ""
597
+ # endif
598
+
599
+ #elif defined(__ghs__)
600
+ # if defined(__PPC64__)
601
+ # define ARCHITECTURE_ID "PPC64"
602
+
603
+ # elif defined(__ppc__)
604
+ # define ARCHITECTURE_ID "PPC"
605
+
606
+ # elif defined(__ARM__)
607
+ # define ARCHITECTURE_ID "ARM"
608
+
609
+ # elif defined(__x86_64__)
610
+ # define ARCHITECTURE_ID "x64"
611
+
612
+ # elif defined(__i386__)
613
+ # define ARCHITECTURE_ID "X86"
614
+
615
+ # else /* unknown architecture */
616
+ # define ARCHITECTURE_ID ""
617
+ # endif
618
+
619
+ #elif defined(__TI_COMPILER_VERSION__)
620
+ # if defined(__TI_ARM__)
621
+ # define ARCHITECTURE_ID "ARM"
622
+
623
+ # elif defined(__MSP430__)
624
+ # define ARCHITECTURE_ID "MSP430"
625
+
626
+ # elif defined(__TMS320C28XX__)
627
+ # define ARCHITECTURE_ID "TMS320C28x"
628
+
629
+ # elif defined(__TMS320C6X__) || defined(_TMS320C6X)
630
+ # define ARCHITECTURE_ID "TMS320C6x"
631
+
632
+ # else /* unknown architecture */
633
+ # define ARCHITECTURE_ID ""
634
+ # endif
635
+
636
+ #else
637
+ # define ARCHITECTURE_ID
638
+ #endif
639
+
640
+ /* Convert integer to decimal digit literals. */
641
+ #define DEC(n) \
642
+ ('0' + (((n) / 10000000)%10)), \
643
+ ('0' + (((n) / 1000000)%10)), \
644
+ ('0' + (((n) / 100000)%10)), \
645
+ ('0' + (((n) / 10000)%10)), \
646
+ ('0' + (((n) / 1000)%10)), \
647
+ ('0' + (((n) / 100)%10)), \
648
+ ('0' + (((n) / 10)%10)), \
649
+ ('0' + ((n) % 10))
650
+
651
+ /* Convert integer to hex digit literals. */
652
+ #define HEX(n) \
653
+ ('0' + ((n)>>28 & 0xF)), \
654
+ ('0' + ((n)>>24 & 0xF)), \
655
+ ('0' + ((n)>>20 & 0xF)), \
656
+ ('0' + ((n)>>16 & 0xF)), \
657
+ ('0' + ((n)>>12 & 0xF)), \
658
+ ('0' + ((n)>>8 & 0xF)), \
659
+ ('0' + ((n)>>4 & 0xF)), \
660
+ ('0' + ((n) & 0xF))
661
+
662
+ /* Construct a string literal encoding the version number. */
663
+ #ifdef COMPILER_VERSION
664
+ char const* info_version = "INFO" ":" "compiler_version[" COMPILER_VERSION "]";
665
+
666
+ /* Construct a string literal encoding the version number components. */
667
+ #elif defined(COMPILER_VERSION_MAJOR)
668
+ char const info_version[] = {
669
+ 'I', 'N', 'F', 'O', ':',
670
+ 'c','o','m','p','i','l','e','r','_','v','e','r','s','i','o','n','[',
671
+ COMPILER_VERSION_MAJOR,
672
+ # ifdef COMPILER_VERSION_MINOR
673
+ '.', COMPILER_VERSION_MINOR,
674
+ # ifdef COMPILER_VERSION_PATCH
675
+ '.', COMPILER_VERSION_PATCH,
676
+ # ifdef COMPILER_VERSION_TWEAK
677
+ '.', COMPILER_VERSION_TWEAK,
678
+ # endif
679
+ # endif
680
+ # endif
681
+ ']','\0'};
682
+ #endif
683
+
684
+ /* Construct a string literal encoding the internal version number. */
685
+ #ifdef COMPILER_VERSION_INTERNAL
686
+ char const info_version_internal[] = {
687
+ 'I', 'N', 'F', 'O', ':',
688
+ 'c','o','m','p','i','l','e','r','_','v','e','r','s','i','o','n','_',
689
+ 'i','n','t','e','r','n','a','l','[',
690
+ COMPILER_VERSION_INTERNAL,']','\0'};
691
+ #elif defined(COMPILER_VERSION_INTERNAL_STR)
692
+ char const* info_version_internal = "INFO" ":" "compiler_version_internal[" COMPILER_VERSION_INTERNAL_STR "]";
693
+ #endif
694
+
695
+ /* Construct a string literal encoding the version number components. */
696
+ #ifdef SIMULATE_VERSION_MAJOR
697
+ char const info_simulate_version[] = {
698
+ 'I', 'N', 'F', 'O', ':',
699
+ 's','i','m','u','l','a','t','e','_','v','e','r','s','i','o','n','[',
700
+ SIMULATE_VERSION_MAJOR,
701
+ # ifdef SIMULATE_VERSION_MINOR
702
+ '.', SIMULATE_VERSION_MINOR,
703
+ # ifdef SIMULATE_VERSION_PATCH
704
+ '.', SIMULATE_VERSION_PATCH,
705
+ # ifdef SIMULATE_VERSION_TWEAK
706
+ '.', SIMULATE_VERSION_TWEAK,
707
+ # endif
708
+ # endif
709
+ # endif
710
+ ']','\0'};
711
+ #endif
712
+
713
+ /* Construct the string literal in pieces to prevent the source from
714
+ getting matched. Store it in a pointer rather than an array
715
+ because some compilers will just produce instructions to fill the
716
+ array rather than assigning a pointer to a static array. */
717
+ char const* info_platform = "INFO" ":" "platform[" PLATFORM_ID "]";
718
+ char const* info_arch = "INFO" ":" "arch[" ARCHITECTURE_ID "]";
719
+
720
+
721
+
722
+ #if defined(__INTEL_COMPILER) && defined(_MSVC_LANG) && _MSVC_LANG < 201403L
723
+ # if defined(__INTEL_CXX11_MODE__)
724
+ # if defined(__cpp_aggregate_nsdmi)
725
+ # define CXX_STD 201402L
726
+ # else
727
+ # define CXX_STD 201103L
728
+ # endif
729
+ # else
730
+ # define CXX_STD 199711L
731
+ # endif
732
+ #elif defined(_MSC_VER) && defined(_MSVC_LANG)
733
+ # define CXX_STD _MSVC_LANG
734
+ #else
735
+ # define CXX_STD __cplusplus
736
+ #endif
737
+
738
+ const char* info_language_standard_default = "INFO" ":" "standard_default["
739
+ #if CXX_STD > 202002L
740
+ "23"
741
+ #elif CXX_STD > 201703L
742
+ "20"
743
+ #elif CXX_STD >= 201703L
744
+ "17"
745
+ #elif CXX_STD >= 201402L
746
+ "14"
747
+ #elif CXX_STD >= 201103L
748
+ "11"
749
+ #else
750
+ "98"
751
+ #endif
752
+ "]";
753
+
754
+ const char* info_language_extensions_default = "INFO" ":" "extensions_default["
755
+ /* !defined(_MSC_VER) to exclude Clang's MSVC compatibility mode. */
756
+ #if (defined(__clang__) || defined(__GNUC__) || \
757
+ defined(__TI_COMPILER_VERSION__)) && \
758
+ !defined(__STRICT_ANSI__) && !defined(_MSC_VER)
759
+ "ON"
760
+ #else
761
+ "OFF"
762
+ #endif
763
+ "]";
764
+
765
+ /*--------------------------------------------------------------------------*/
766
+
767
+ int main(int argc, char* argv[])
768
+ {
769
+ int require = 0;
770
+ require += info_compiler[argc];
771
+ require += info_platform[argc];
772
+ #ifdef COMPILER_VERSION_MAJOR
773
+ require += info_version[argc];
774
+ #endif
775
+ #ifdef COMPILER_VERSION_INTERNAL
776
+ require += info_version_internal[argc];
777
+ #endif
778
+ #ifdef SIMULATE_ID
779
+ require += info_simulate[argc];
780
+ #endif
781
+ #ifdef SIMULATE_VERSION_MAJOR
782
+ require += info_simulate_version[argc];
783
+ #endif
784
+ #if defined(__CRAYXT_COMPUTE_LINUX_TARGET)
785
+ require += info_cray[argc];
786
+ #endif
787
+ require += info_language_standard_default[argc];
788
+ require += info_language_extensions_default[argc];
789
+ (void)argv;
790
+ return require;
791
+ }
cc-multilingual-main/cc_net/third_party/kenlm/build/CMakeFiles/3.22.1/CompilerIdCXX/a.out ADDED
Binary file (16.1 kB). View file
 
cc-multilingual-main/cc_net/third_party/kenlm/lm/CMakeLists.txt ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Explicitly list the source files for this subdirectory
2
+ #
3
+ # If you add any source files to this subdirectory
4
+ # that should be included in the kenlm library,
5
+ # (this excludes any unit test files)
6
+ # you should add them to the following list:
7
+ set(KENLM_LM_SOURCE
8
+ bhiksha.cc
9
+ binary_format.cc
10
+ config.cc
11
+ lm_exception.cc
12
+ model.cc
13
+ quantize.cc
14
+ read_arpa.cc
15
+ search_hashed.cc
16
+ search_trie.cc
17
+ sizes.cc
18
+ trie.cc
19
+ trie_sort.cc
20
+ value_build.cc
21
+ virtual_interface.cc
22
+ vocab.cc
23
+ )
24
+
25
+
26
+ # Group these objects together for later use.
27
+ #
28
+ # Given add_library(foo OBJECT ${my_foo_sources}),
29
+ # refer to these objects as $<TARGET_OBJECTS:foo>
30
+ #
31
+ add_subdirectory(common)
32
+
33
+ add_library(kenlm ${KENLM_LM_SOURCE} ${KENLM_LM_COMMON_SOURCE})
34
+ set_target_properties(kenlm PROPERTIES POSITION_INDEPENDENT_CODE ON)
35
+ target_link_libraries(kenlm PUBLIC kenlm_util Threads::Threads)
36
+ # Since headers are relative to `include/kenlm` at install time, not just `include`
37
+ target_include_directories(kenlm PUBLIC $<INSTALL_INTERFACE:include/kenlm>)
38
+
39
+ set(KENLM_MAX_ORDER 6 CACHE STRING "Maximum supported ngram order")
40
+ target_compile_definitions(kenlm PUBLIC -DKENLM_MAX_ORDER=${KENLM_MAX_ORDER})
41
+
42
+ # This directory has children that need to be processed
43
+ add_subdirectory(builder)
44
+ add_subdirectory(filter)
45
+ add_subdirectory(interpolate)
46
+
47
+ # Explicitly list the executable files to be compiled
48
+ set(EXE_LIST
49
+ query
50
+ fragment
51
+ build_binary
52
+ kenlm_benchmark
53
+ )
54
+
55
+ set(LM_LIBS kenlm kenlm_util Threads::Threads)
56
+
57
+ install(
58
+ TARGETS kenlm
59
+ EXPORT kenlmTargets
60
+ RUNTIME DESTINATION bin
61
+ LIBRARY DESTINATION lib
62
+ ARCHIVE DESTINATION lib
63
+ INCLUDES DESTINATION include
64
+ )
65
+
66
+ AddExes(EXES ${EXE_LIST}
67
+ LIBRARIES ${LM_LIBS})
68
+
69
+ if(BUILD_TESTING)
70
+
71
+ set(KENLM_BOOST_TESTS_LIST left_test partial_test)
72
+ AddTests(TESTS ${KENLM_BOOST_TESTS_LIST}
73
+ LIBRARIES ${LM_LIBS}
74
+ TEST_ARGS ${CMAKE_CURRENT_SOURCE_DIR}/test.arpa)
75
+
76
+ # model_test requires an extra command line parameter
77
+ KenLMAddTest(TEST model_test
78
+ LIBRARIES ${LM_LIBS}
79
+ TEST_ARGS ${CMAKE_CURRENT_SOURCE_DIR}/test.arpa
80
+ ${CMAKE_CURRENT_SOURCE_DIR}/test_nounk.arpa)
81
+ endif()
cc-multilingual-main/cc_net/third_party/kenlm/lm/bhiksha.cc ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "bhiksha.hh"
2
+
3
+ #include "binary_format.hh"
4
+ #include "config.hh"
5
+ #include "../util/file.hh"
6
+ #include "../util/exception.hh"
7
+
8
+ #include <limits>
9
+
10
+ namespace lm {
11
+ namespace ngram {
12
+ namespace trie {
13
+
14
+ DontBhiksha::DontBhiksha(const void * /*base*/, uint64_t /*max_offset*/, uint64_t max_next, const Config &/*config*/) :
15
+ next_(util::BitsMask::ByMax(max_next)) {}
16
+
17
+ const uint8_t kArrayBhikshaVersion = 0;
18
+
19
+ // TODO: put this in binary file header instead when I change the binary file format again.
20
+ void ArrayBhiksha::UpdateConfigFromBinary(const BinaryFormat &file, uint64_t offset, Config &config) {
21
+ uint8_t buffer[2];
22
+ file.ReadForConfig(buffer, 2, offset);
23
+ uint8_t version = buffer[0];
24
+ uint8_t configured_bits = buffer[1];
25
+ if (version != kArrayBhikshaVersion) UTIL_THROW(FormatLoadException, "This file has sorted array compression version " << (unsigned) version << " but the code expects version " << (unsigned)kArrayBhikshaVersion);
26
+ config.pointer_bhiksha_bits = configured_bits;
27
+ }
28
+
29
+ namespace {
30
+
31
+ // Find argmin_{chopped \in [0, RequiredBits(max_next)]} ChoppedDelta(max_offset)
32
+ uint8_t ChopBits(uint64_t max_offset, uint64_t max_next, const Config &config) {
33
+ uint8_t required = util::RequiredBits(max_next);
34
+ uint8_t best_chop = 0;
35
+ int64_t lowest_change = std::numeric_limits<int64_t>::max();
36
+ // There are probably faster ways but I don't care because this is only done once per order at construction time.
37
+ for (uint8_t chop = 0; chop <= std::min(required, config.pointer_bhiksha_bits); ++chop) {
38
+ int64_t change = (max_next >> (required - chop)) * 64 /* table cost in bits */
39
+ - max_offset * static_cast<int64_t>(chop); /* savings in bits*/
40
+ if (change < lowest_change) {
41
+ lowest_change = change;
42
+ best_chop = chop;
43
+ }
44
+ }
45
+ return best_chop;
46
+ }
47
+
48
+ std::size_t ArrayCount(uint64_t max_offset, uint64_t max_next, const Config &config) {
49
+ uint8_t required = util::RequiredBits(max_next);
50
+ uint8_t chopping = ChopBits(max_offset, max_next, config);
51
+ return (max_next >> (required - chopping)) + 1 /* we store 0 too */;
52
+ }
53
+ } // namespace
54
+
55
+ uint64_t ArrayBhiksha::Size(uint64_t max_offset, uint64_t max_next, const Config &config) {
56
+ return sizeof(uint64_t) * (1 /* header */ + ArrayCount(max_offset, max_next, config)) + 7 /* 8-byte alignment */;
57
+ }
58
+
59
+ uint8_t ArrayBhiksha::InlineBits(uint64_t max_offset, uint64_t max_next, const Config &config) {
60
+ return util::RequiredBits(max_next) - ChopBits(max_offset, max_next, config);
61
+ }
62
+
63
+ namespace {
64
+
65
+ void *AlignTo8(void *from) {
66
+ uint8_t *val = reinterpret_cast<uint8_t*>(from);
67
+ std::size_t remainder = reinterpret_cast<std::size_t>(val) & 7;
68
+ if (!remainder) return val;
69
+ return val + 8 - remainder;
70
+ }
71
+
72
+ } // namespace
73
+
74
+ ArrayBhiksha::ArrayBhiksha(void *base, uint64_t max_offset, uint64_t max_next, const Config &config)
75
+ : next_inline_(util::BitsMask::ByBits(InlineBits(max_offset, max_next, config))),
76
+ offset_begin_(reinterpret_cast<const uint64_t*>(AlignTo8(base)) + 1 /* 8-byte header */),
77
+ offset_end_(offset_begin_ + ArrayCount(max_offset, max_next, config)),
78
+ write_to_(reinterpret_cast<uint64_t*>(AlignTo8(base)) + 1 /* 8-byte header */ + 1 /* first entry is 0 */),
79
+ original_base_(base) {}
80
+
81
+ void ArrayBhiksha::FinishedLoading(const Config &config) {
82
+ // *offset_begin_ = 0 but without a const_cast.
83
+ *(write_to_ - (write_to_ - offset_begin_)) = 0;
84
+
85
+ if (write_to_ != offset_end_) UTIL_THROW(util::Exception, "Did not get all the array entries that were expected.");
86
+
87
+ uint8_t *head_write = reinterpret_cast<uint8_t*>(original_base_);
88
+ *(head_write++) = kArrayBhikshaVersion;
89
+ *(head_write++) = config.pointer_bhiksha_bits;
90
+ }
91
+
92
+ } // namespace trie
93
+ } // namespace ngram
94
+ } // namespace lm
cc-multilingual-main/cc_net/third_party/kenlm/lm/bhiksha.hh ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Simple implementation of
2
+ * @inproceedings{bhikshacompression,
3
+ * author={Bhiksha Raj and Ed Whittaker},
4
+ * year={2003},
5
+ * title={Lossless Compression of Language Model Structure and Word Identifiers},
6
+ * booktitle={Proceedings of IEEE International Conference on Acoustics, Speech and Signal Processing},
7
+ * pages={388--391},
8
+ * }
9
+ *
10
+ * Currently only used for next pointers.
11
+ */
12
+
13
+ #ifndef LM_BHIKSHA_H
14
+ #define LM_BHIKSHA_H
15
+
16
+ #include "model_type.hh"
17
+ #include "trie.hh"
18
+ #include "../util/bit_packing.hh"
19
+ #include "../util/sorted_uniform.hh"
20
+
21
+ #include <algorithm>
22
+ #include <stdint.h>
23
+ #include <cassert>
24
+
25
+ namespace lm {
26
+ namespace ngram {
27
+ struct Config;
28
+ class BinaryFormat;
29
+
30
+ namespace trie {
31
+
32
+ class DontBhiksha {
33
+ public:
34
+ static const ModelType kModelTypeAdd = static_cast<ModelType>(0);
35
+
36
+ static void UpdateConfigFromBinary(const BinaryFormat &, uint64_t, Config &/*config*/) {}
37
+
38
+ static uint64_t Size(uint64_t /*max_offset*/, uint64_t /*max_next*/, const Config &/*config*/) { return 0; }
39
+
40
+ static uint8_t InlineBits(uint64_t /*max_offset*/, uint64_t max_next, const Config &/*config*/) {
41
+ return util::RequiredBits(max_next);
42
+ }
43
+
44
+ DontBhiksha(const void *base, uint64_t max_offset, uint64_t max_next, const Config &config);
45
+
46
+ void ReadNext(const void *base, uint64_t bit_offset, uint64_t /*index*/, uint8_t total_bits, NodeRange &out) const {
47
+ out.begin = util::ReadInt57(base, bit_offset, next_.bits, next_.mask);
48
+ out.end = util::ReadInt57(base, bit_offset + total_bits, next_.bits, next_.mask);
49
+ //assert(out.end >= out.begin);
50
+ }
51
+
52
+ void WriteNext(void *base, uint64_t bit_offset, uint64_t /*index*/, uint64_t value) {
53
+ util::WriteInt57(base, bit_offset, next_.bits, value);
54
+ }
55
+
56
+ void FinishedLoading(const Config &/*config*/) {}
57
+
58
+ uint8_t InlineBits() const { return next_.bits; }
59
+
60
+ private:
61
+ util::BitsMask next_;
62
+ };
63
+
64
+ class ArrayBhiksha {
65
+ public:
66
+ static const ModelType kModelTypeAdd = kArrayAdd;
67
+
68
+ static void UpdateConfigFromBinary(const BinaryFormat &file, uint64_t offset, Config &config);
69
+
70
+ static uint64_t Size(uint64_t max_offset, uint64_t max_next, const Config &config);
71
+
72
+ static uint8_t InlineBits(uint64_t max_offset, uint64_t max_next, const Config &config);
73
+
74
+ ArrayBhiksha(void *base, uint64_t max_offset, uint64_t max_value, const Config &config);
75
+
76
+ void ReadNext(const void *base, uint64_t bit_offset, uint64_t index, uint8_t total_bits, NodeRange &out) const {
77
+ // Some assertions are commented out because they are expensive.
78
+ // assert(*offset_begin_ == 0);
79
+ // std::upper_bound returns the first element that is greater. Want the
80
+ // last element that is <= to the index.
81
+ const uint64_t *begin_it = std::upper_bound(offset_begin_, offset_end_, index) - 1;
82
+ // Since *offset_begin_ == 0, the position should be in range.
83
+ // assert(begin_it >= offset_begin_);
84
+ const uint64_t *end_it;
85
+ for (end_it = begin_it + 1; (end_it < offset_end_) && (*end_it <= index + 1); ++end_it) {}
86
+ // assert(end_it == std::upper_bound(offset_begin_, offset_end_, index + 1));
87
+ --end_it;
88
+ // assert(end_it >= begin_it);
89
+ out.begin = ((begin_it - offset_begin_) << next_inline_.bits) |
90
+ util::ReadInt57(base, bit_offset, next_inline_.bits, next_inline_.mask);
91
+ out.end = ((end_it - offset_begin_) << next_inline_.bits) |
92
+ util::ReadInt57(base, bit_offset + total_bits, next_inline_.bits, next_inline_.mask);
93
+ // If this fails, consider rebuilding your model using KenLM after 1e333d786b748555e8f368d2bbba29a016c98052
94
+ assert(out.end >= out.begin);
95
+ }
96
+
97
+ void WriteNext(void *base, uint64_t bit_offset, uint64_t index, uint64_t value) {
98
+ uint64_t encode = value >> next_inline_.bits;
99
+ for (; write_to_ <= offset_begin_ + encode; ++write_to_) *write_to_ = index;
100
+ util::WriteInt57(base, bit_offset, next_inline_.bits, value & next_inline_.mask);
101
+ }
102
+
103
+ void FinishedLoading(const Config &config);
104
+
105
+ uint8_t InlineBits() const { return next_inline_.bits; }
106
+
107
+ private:
108
+ const util::BitsMask next_inline_;
109
+
110
+ const uint64_t *const offset_begin_;
111
+ const uint64_t *const offset_end_;
112
+
113
+ uint64_t *write_to_;
114
+
115
+ void *original_base_;
116
+ };
117
+
118
+ } // namespace trie
119
+ } // namespace ngram
120
+ } // namespace lm
121
+
122
+ #endif // LM_BHIKSHA_H
cc-multilingual-main/cc_net/third_party/kenlm/lm/binary_format.cc ADDED
@@ -0,0 +1,302 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "binary_format.hh"
2
+
3
+ #include "lm_exception.hh"
4
+ #include "../util/file.hh"
5
+ #include "../util/file_piece.hh"
6
+
7
+ #include <cstddef>
8
+ #include <cstring>
9
+ #include <limits>
10
+ #include <string>
11
+ #include <cstdlib>
12
+
13
+ #include <stdint.h>
14
+
15
+ namespace lm {
16
+ namespace ngram {
17
+
18
+ const char *kModelNames[6] = {"probing hash tables", "probing hash tables with rest costs", "trie", "trie with quantization", "trie with array-compressed pointers", "trie with quantization and array-compressed pointers"};
19
+
20
+ namespace {
21
+ const char kMagicBeforeVersion[] = "mmap lm http://kheafield.com/code format version";
22
+ const char kMagicBytes[] = "mmap lm http://kheafield.com/code format version 5\n\0";
23
+ // This must be shorter than kMagicBytes and indicates an incomplete binary file (i.e. build failed).
24
+ const char kMagicIncomplete[] = "mmap lm http://kheafield.com/code incomplete\n";
25
+ const long int kMagicVersion = 5;
26
+
27
+ // Old binary files built on 32-bit machines have this header.
28
+ // TODO: eliminate with next binary release.
29
+ struct OldSanity {
30
+ char magic[sizeof(kMagicBytes)];
31
+ float zero_f, one_f, minus_half_f;
32
+ WordIndex one_word_index, max_word_index;
33
+ uint64_t one_uint64;
34
+
35
+ void SetToReference() {
36
+ std::memset(this, 0, sizeof(OldSanity));
37
+ std::memcpy(magic, kMagicBytes, sizeof(magic));
38
+ zero_f = 0.0; one_f = 1.0; minus_half_f = -0.5;
39
+ one_word_index = 1;
40
+ max_word_index = std::numeric_limits<WordIndex>::max();
41
+ one_uint64 = 1;
42
+ }
43
+ };
44
+
45
+
46
+ // Test values aligned to 8 bytes.
47
+ struct Sanity {
48
+ char magic[ALIGN8(sizeof(kMagicBytes))];
49
+ float zero_f, one_f, minus_half_f;
50
+ WordIndex one_word_index, max_word_index, padding_to_8;
51
+ uint64_t one_uint64;
52
+
53
+ void SetToReference() {
54
+ std::memset(this, 0, sizeof(Sanity));
55
+ std::memcpy(magic, kMagicBytes, sizeof(kMagicBytes));
56
+ zero_f = 0.0; one_f = 1.0; minus_half_f = -0.5;
57
+ one_word_index = 1;
58
+ max_word_index = std::numeric_limits<WordIndex>::max();
59
+ padding_to_8 = 0;
60
+ one_uint64 = 1;
61
+ }
62
+ };
63
+
64
+ std::size_t TotalHeaderSize(unsigned char order) {
65
+ return ALIGN8(sizeof(Sanity) + sizeof(FixedWidthParameters) + sizeof(uint64_t) * order);
66
+ }
67
+
68
+ void WriteHeader(void *to, const Parameters &params) {
69
+ Sanity header = Sanity();
70
+ header.SetToReference();
71
+ std::memcpy(to, &header, sizeof(Sanity));
72
+ char *out = reinterpret_cast<char*>(to) + sizeof(Sanity);
73
+
74
+ *reinterpret_cast<FixedWidthParameters*>(out) = params.fixed;
75
+ out += sizeof(FixedWidthParameters);
76
+
77
+ uint64_t *counts = reinterpret_cast<uint64_t*>(out);
78
+ for (std::size_t i = 0; i < params.counts.size(); ++i) {
79
+ counts[i] = params.counts[i];
80
+ }
81
+ }
82
+
83
+ } // namespace
84
+
85
+ bool IsBinaryFormat(int fd) {
86
+ const uint64_t size = util::SizeFile(fd);
87
+ if (size == util::kBadSize || (size <= static_cast<uint64_t>(sizeof(Sanity)))) return false;
88
+ // Try reading the header.
89
+ util::scoped_memory memory;
90
+ try {
91
+ util::MapRead(util::LAZY, fd, 0, sizeof(Sanity), memory);
92
+ } catch (const util::Exception &e) {
93
+ return false;
94
+ }
95
+ Sanity reference_header = Sanity();
96
+ reference_header.SetToReference();
97
+ if (!std::memcmp(memory.get(), &reference_header, sizeof(Sanity))) return true;
98
+ if (!std::memcmp(memory.get(), kMagicIncomplete, strlen(kMagicIncomplete))) {
99
+ UTIL_THROW(FormatLoadException, "This binary file did not finish building");
100
+ }
101
+ if (!std::memcmp(memory.get(), kMagicBeforeVersion, strlen(kMagicBeforeVersion))) {
102
+ char *end_ptr;
103
+ const char *begin_version = static_cast<const char*>(memory.get()) + strlen(kMagicBeforeVersion);
104
+ long int version = std::strtol(begin_version, &end_ptr, 10);
105
+ if ((end_ptr != begin_version) && version != kMagicVersion) {
106
+ UTIL_THROW(FormatLoadException, "Binary file has version " << version << " but this implementation expects version " << kMagicVersion << " so you'll have to use the ARPA to rebuild your binary");
107
+ }
108
+
109
+ OldSanity old_sanity = OldSanity();
110
+ old_sanity.SetToReference();
111
+ UTIL_THROW_IF(!std::memcmp(memory.get(), &old_sanity, sizeof(OldSanity)), FormatLoadException, "Looks like this is an old 32-bit format. The old 32-bit format has been removed so that 64-bit and 32-bit files are exchangeable.");
112
+ UTIL_THROW(FormatLoadException, "File looks like it should be loaded with mmap, but the test values don't match. Try rebuilding the binary format LM using the same code revision, compiler, and architecture");
113
+ }
114
+ return false;
115
+ }
116
+
117
+ void ReadHeader(int fd, Parameters &out) {
118
+ util::SeekOrThrow(fd, sizeof(Sanity));
119
+ util::ReadOrThrow(fd, &out.fixed, sizeof(out.fixed));
120
+ if (out.fixed.probing_multiplier < 1.0)
121
+ UTIL_THROW(FormatLoadException, "Binary format claims to have a probing multiplier of " << out.fixed.probing_multiplier << " which is < 1.0.");
122
+
123
+ out.counts.resize(static_cast<std::size_t>(out.fixed.order));
124
+ if (out.fixed.order) util::ReadOrThrow(fd, &*out.counts.begin(), sizeof(uint64_t) * out.fixed.order);
125
+ }
126
+
127
+ void MatchCheck(ModelType model_type, unsigned int search_version, const Parameters &params) {
128
+ if (params.fixed.model_type != model_type) {
129
+ if (static_cast<unsigned int>(params.fixed.model_type) >= (sizeof(kModelNames) / sizeof(const char *)))
130
+ UTIL_THROW(FormatLoadException, "The binary file claims to be model type " << static_cast<unsigned int>(params.fixed.model_type) << " but this is not implemented for in this inference code.");
131
+ UTIL_THROW(FormatLoadException, "The binary file was built for " << kModelNames[params.fixed.model_type] << " but the inference code is trying to load " << kModelNames[model_type]);
132
+ }
133
+ UTIL_THROW_IF(search_version != params.fixed.search_version, FormatLoadException, "The binary file has " << kModelNames[params.fixed.model_type] << " version " << params.fixed.search_version << " but this code expects " << kModelNames[params.fixed.model_type] << " version " << search_version);
134
+ }
135
+
136
+ const std::size_t kInvalidSize = static_cast<std::size_t>(-1);
137
+
138
+ BinaryFormat::BinaryFormat(const Config &config)
139
+ : write_method_(config.write_method), write_mmap_(config.write_mmap), load_method_(config.load_method),
140
+ header_size_(kInvalidSize), vocab_size_(kInvalidSize), vocab_string_offset_(kInvalidOffset) {}
141
+
142
+ void BinaryFormat::InitializeBinary(int fd, ModelType model_type, unsigned int search_version, Parameters &params) {
143
+ file_.reset(fd);
144
+ write_mmap_ = NULL; // Ignore write requests; this is already in binary format.
145
+ ReadHeader(fd, params);
146
+ MatchCheck(model_type, search_version, params);
147
+ header_size_ = TotalHeaderSize(params.counts.size());
148
+ }
149
+
150
+ void BinaryFormat::ReadForConfig(void *to, std::size_t amount, uint64_t offset_excluding_header) const {
151
+ assert(header_size_ != kInvalidSize);
152
+ util::ErsatzPRead(file_.get(), to, amount, offset_excluding_header + header_size_);
153
+ }
154
+
155
+ void *BinaryFormat::LoadBinary(std::size_t size) {
156
+ assert(header_size_ != kInvalidSize);
157
+ const uint64_t file_size = util::SizeFile(file_.get());
158
+ // The header is smaller than a page, so we have to map the whole header as well.
159
+ uint64_t total_map = static_cast<uint64_t>(header_size_) + static_cast<uint64_t>(size);
160
+ UTIL_THROW_IF(file_size != util::kBadSize && file_size < total_map, FormatLoadException, "Binary file has size " << file_size << " but the headers say it should be at least " << total_map);
161
+
162
+ util::MapRead(load_method_, file_.get(), 0, util::CheckOverflow(total_map), mapping_);
163
+
164
+ vocab_string_offset_ = total_map;
165
+ return reinterpret_cast<uint8_t*>(mapping_.get()) + header_size_;
166
+ }
167
+
168
+ void *BinaryFormat::SetupJustVocab(std::size_t memory_size, uint8_t order) {
169
+ vocab_size_ = memory_size;
170
+ if (!write_mmap_) {
171
+ header_size_ = 0;
172
+ util::HugeMalloc(memory_size, true, memory_vocab_);
173
+ return reinterpret_cast<uint8_t*>(memory_vocab_.get());
174
+ }
175
+ header_size_ = TotalHeaderSize(order);
176
+ std::size_t total = util::CheckOverflow(static_cast<uint64_t>(header_size_) + static_cast<uint64_t>(memory_size));
177
+ file_.reset(util::CreateOrThrow(write_mmap_));
178
+ // some gccs complain about uninitialized variables even though all enum values are covered.
179
+ void *vocab_base = NULL;
180
+ switch (write_method_) {
181
+ case Config::WRITE_MMAP:
182
+ mapping_.reset(util::MapZeroedWrite(file_.get(), total), total, util::scoped_memory::MMAP_ALLOCATED);
183
+ util::AdviseHugePages(vocab_base, total);
184
+ vocab_base = mapping_.get();
185
+ break;
186
+ case Config::WRITE_AFTER:
187
+ util::ResizeOrThrow(file_.get(), 0);
188
+ util::HugeMalloc(total, true, memory_vocab_);
189
+ vocab_base = memory_vocab_.get();
190
+ break;
191
+ }
192
+ strncpy(reinterpret_cast<char*>(vocab_base), kMagicIncomplete, header_size_);
193
+ return reinterpret_cast<uint8_t*>(vocab_base) + header_size_;
194
+ }
195
+
196
+ void *BinaryFormat::GrowForSearch(std::size_t memory_size, std::size_t vocab_pad, void *&vocab_base) {
197
+ assert(vocab_size_ != kInvalidSize);
198
+ vocab_pad_ = vocab_pad;
199
+ std::size_t new_size = header_size_ + vocab_size_ + vocab_pad_ + memory_size;
200
+ vocab_string_offset_ = new_size;
201
+ if (!write_mmap_ || write_method_ == Config::WRITE_AFTER) {
202
+ util::HugeMalloc(memory_size, true, memory_search_);
203
+ assert(header_size_ == 0 || write_mmap_);
204
+ vocab_base = reinterpret_cast<uint8_t*>(memory_vocab_.get()) + header_size_;
205
+ util::AdviseHugePages(memory_search_.get(), memory_size);
206
+ return reinterpret_cast<uint8_t*>(memory_search_.get());
207
+ }
208
+
209
+ assert(write_method_ == Config::WRITE_MMAP);
210
+ // Also known as total size without vocab words.
211
+ // Grow the file to accomodate the search, using zeros.
212
+ // According to man mmap, behavior is undefined when the file is resized
213
+ // underneath a mmap that is not a multiple of the page size. So to be
214
+ // safe, we'll unmap it and map it again.
215
+ mapping_.reset();
216
+ util::ResizeOrThrow(file_.get(), new_size);
217
+ void *ret;
218
+ MapFile(vocab_base, ret);
219
+ util::AdviseHugePages(ret, new_size);
220
+ return ret;
221
+ }
222
+
223
+ void BinaryFormat::WriteVocabWords(const std::string &buffer, void *&vocab_base, void *&search_base) {
224
+ // Checking Config's include_vocab is the responsibility of the caller.
225
+ assert(header_size_ != kInvalidSize && vocab_size_ != kInvalidSize);
226
+ if (!write_mmap_) {
227
+ // Unchanged base.
228
+ vocab_base = reinterpret_cast<uint8_t*>(memory_vocab_.get());
229
+ search_base = reinterpret_cast<uint8_t*>(memory_search_.get());
230
+ return;
231
+ }
232
+ if (write_method_ == Config::WRITE_MMAP) {
233
+ mapping_.reset();
234
+ }
235
+ util::SeekOrThrow(file_.get(), VocabStringReadingOffset());
236
+ util::WriteOrThrow(file_.get(), &buffer[0], buffer.size());
237
+ if (write_method_ == Config::WRITE_MMAP) {
238
+ MapFile(vocab_base, search_base);
239
+ } else {
240
+ vocab_base = reinterpret_cast<uint8_t*>(memory_vocab_.get()) + header_size_;
241
+ search_base = reinterpret_cast<uint8_t*>(memory_search_.get());
242
+ }
243
+ }
244
+
245
+ void BinaryFormat::FinishFile(const Config &config, ModelType model_type, unsigned int search_version, const std::vector<uint64_t> &counts) {
246
+ if (!write_mmap_) return;
247
+ switch (write_method_) {
248
+ case Config::WRITE_MMAP:
249
+ util::SyncOrThrow(mapping_.get(), mapping_.size());
250
+ break;
251
+ case Config::WRITE_AFTER:
252
+ util::SeekOrThrow(file_.get(), 0);
253
+ util::WriteOrThrow(file_.get(), memory_vocab_.get(), memory_vocab_.size());
254
+ util::SeekOrThrow(file_.get(), header_size_ + vocab_size_ + vocab_pad_);
255
+ util::WriteOrThrow(file_.get(), memory_search_.get(), memory_search_.size());
256
+ util::FSyncOrThrow(file_.get());
257
+ break;
258
+ }
259
+ // header and vocab share the same mmap.
260
+ Parameters params = Parameters();
261
+ memset(&params, 0, sizeof(Parameters));
262
+ params.counts = counts;
263
+ params.fixed.order = counts.size();
264
+ params.fixed.probing_multiplier = config.probing_multiplier;
265
+ params.fixed.model_type = model_type;
266
+ params.fixed.has_vocabulary = config.include_vocab;
267
+ params.fixed.search_version = search_version;
268
+ switch (write_method_) {
269
+ case Config::WRITE_MMAP:
270
+ WriteHeader(mapping_.get(), params);
271
+ util::SyncOrThrow(mapping_.get(), mapping_.size());
272
+ break;
273
+ case Config::WRITE_AFTER:
274
+ {
275
+ std::vector<uint8_t> buffer(TotalHeaderSize(counts.size()));
276
+ WriteHeader(&buffer[0], params);
277
+ util::SeekOrThrow(file_.get(), 0);
278
+ util::WriteOrThrow(file_.get(), &buffer[0], buffer.size());
279
+ }
280
+ break;
281
+ }
282
+ }
283
+
284
+ void BinaryFormat::MapFile(void *&vocab_base, void *&search_base) {
285
+ mapping_.reset(util::MapOrThrow(vocab_string_offset_, true, util::kFileFlags, false, file_.get()), vocab_string_offset_, util::scoped_memory::MMAP_ALLOCATED);
286
+ vocab_base = reinterpret_cast<uint8_t*>(mapping_.get()) + header_size_;
287
+ search_base = reinterpret_cast<uint8_t*>(mapping_.get()) + header_size_ + vocab_size_ + vocab_pad_;
288
+ }
289
+
290
+ bool RecognizeBinary(const char *file, ModelType &recognized) {
291
+ util::scoped_fd fd(util::OpenReadOrThrow(file));
292
+ if (!IsBinaryFormat(fd.get())) {
293
+ return false;
294
+ }
295
+ Parameters params;
296
+ ReadHeader(fd.get(), params);
297
+ recognized = params.fixed.model_type;
298
+ return true;
299
+ }
300
+
301
+ } // namespace ngram
302
+ } // namespace lm
cc-multilingual-main/cc_net/third_party/kenlm/lm/binary_format.hh ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_BINARY_FORMAT_H
2
+ #define LM_BINARY_FORMAT_H
3
+
4
+ #include "config.hh"
5
+ #include "model_type.hh"
6
+ #include "read_arpa.hh"
7
+
8
+ #include "../util/file_piece.hh"
9
+ #include "../util/mmap.hh"
10
+ #include "../util/scoped.hh"
11
+
12
+ #include <cstddef>
13
+ #include <vector>
14
+
15
+ #include <stdint.h>
16
+
17
+ namespace lm {
18
+ namespace ngram {
19
+
20
+ extern const char *kModelNames[6];
21
+
22
+ /*Inspect a file to determine if it is a binary lm. If not, return false.
23
+ * If so, return true and set recognized to the type. This is the only API in
24
+ * this header designed for use by decoder authors.
25
+ */
26
+ bool RecognizeBinary(const char *file, ModelType &recognized);
27
+
28
+ struct FixedWidthParameters {
29
+ unsigned char order;
30
+ float probing_multiplier;
31
+ // What type of model is this?
32
+ ModelType model_type;
33
+ // Does the end of the file have the actual strings in the vocabulary?
34
+ bool has_vocabulary;
35
+ unsigned int search_version;
36
+ };
37
+
38
+ // This is a macro instead of an inline function so constants can be assigned using it.
39
+ #define ALIGN8(a) ((std::ptrdiff_t(((a)-1)/8)+1)*8)
40
+
41
+ // Parameters stored in the header of a binary file.
42
+ struct Parameters {
43
+ FixedWidthParameters fixed;
44
+ std::vector<uint64_t> counts;
45
+ };
46
+
47
+ class BinaryFormat {
48
+ public:
49
+ explicit BinaryFormat(const Config &config);
50
+
51
+ // Reading a binary file:
52
+ // Takes ownership of fd
53
+ void InitializeBinary(int fd, ModelType model_type, unsigned int search_version, Parameters &params);
54
+ // Used to read parts of the file to update the config object before figuring out full size.
55
+ void ReadForConfig(void *to, std::size_t amount, uint64_t offset_excluding_header) const;
56
+ // Actually load the binary file and return a pointer to the beginning of the search area.
57
+ void *LoadBinary(std::size_t size);
58
+
59
+ uint64_t VocabStringReadingOffset() const {
60
+ assert(vocab_string_offset_ != kInvalidOffset);
61
+ return vocab_string_offset_;
62
+ }
63
+
64
+ // Writing a binary file or initializing in RAM from ARPA:
65
+ // Size for vocabulary.
66
+ void *SetupJustVocab(std::size_t memory_size, uint8_t order);
67
+ // Warning: can change the vocaulary base pointer.
68
+ void *GrowForSearch(std::size_t memory_size, std::size_t vocab_pad, void *&vocab_base);
69
+ // Warning: can change vocabulary and search base addresses.
70
+ void WriteVocabWords(const std::string &buffer, void *&vocab_base, void *&search_base);
71
+ // Write the header at the beginning of the file.
72
+ void FinishFile(const Config &config, ModelType model_type, unsigned int search_version, const std::vector<uint64_t> &counts);
73
+
74
+ private:
75
+ void MapFile(void *&vocab_base, void *&search_base);
76
+
77
+ // Copied from configuration.
78
+ const Config::WriteMethod write_method_;
79
+ const char *write_mmap_;
80
+ util::LoadMethod load_method_;
81
+
82
+ // File behind memory, if any.
83
+ util::scoped_fd file_;
84
+
85
+ // If there is a file involved, a single mapping.
86
+ util::scoped_memory mapping_;
87
+
88
+ // If the data is only in memory, separately allocate each because the trie
89
+ // knows vocab's size before it knows search's size (because SRILM might
90
+ // have pruned).
91
+ util::scoped_memory memory_vocab_, memory_search_;
92
+
93
+ // Memory ranges. Note that these may not be contiguous and may not all
94
+ // exist.
95
+ std::size_t header_size_, vocab_size_, vocab_pad_;
96
+ // aka end of search.
97
+ uint64_t vocab_string_offset_;
98
+
99
+ static const uint64_t kInvalidOffset = (uint64_t)-1;
100
+ };
101
+
102
+ bool IsBinaryFormat(int fd);
103
+
104
+ } // namespace ngram
105
+ } // namespace lm
106
+ #endif // LM_BINARY_FORMAT_H
cc-multilingual-main/cc_net/third_party/kenlm/lm/blank.hh ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_BLANK_H
2
+ #define LM_BLANK_H
3
+
4
+ #include <limits>
5
+ #include <stdint.h>
6
+ #include <cmath>
7
+
8
+ namespace lm {
9
+ namespace ngram {
10
+
11
+ /* Suppose "foo bar" appears with zero backoff but there is no trigram
12
+ * beginning with these words. Then, when scoring "foo bar", the model could
13
+ * return out_state containing "bar" or even null context if "bar" also has no
14
+ * backoff and is never followed by another word. Then the backoff is set to
15
+ * kNoExtensionBackoff. If the n-gram might be extended, then out_state must
16
+ * contain the full n-gram, in which case kExtensionBackoff is set. In any
17
+ * case, if an n-gram has non-zero backoff, the full state is returned so
18
+ * backoff can be properly charged.
19
+ * These differ only in sign bit because the backoff is in fact zero in either
20
+ * case.
21
+ */
22
+ const float kNoExtensionBackoff = -0.0;
23
+ const float kExtensionBackoff = 0.0;
24
+ const uint64_t kNoExtensionQuant = 0;
25
+ const uint64_t kExtensionQuant = 1;
26
+
27
+ inline void SetExtension(float &backoff) {
28
+ if (backoff == kNoExtensionBackoff) backoff = kExtensionBackoff;
29
+ }
30
+
31
+ // This compiles down nicely.
32
+ inline bool HasExtension(const float &backoff) {
33
+ typedef union { float f; uint32_t i; } UnionValue;
34
+ UnionValue compare, interpret;
35
+ compare.f = kNoExtensionBackoff;
36
+ interpret.f = backoff;
37
+ return compare.i != interpret.i;
38
+ }
39
+
40
+ } // namespace ngram
41
+ } // namespace lm
42
+ #endif // LM_BLANK_H
cc-multilingual-main/cc_net/third_party/kenlm/lm/build_binary_main.cc ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "model.hh"
2
+ #include "sizes.hh"
3
+ #include "../util/file_piece.hh"
4
+ #include "../util/usage.hh"
5
+
6
+ #include <algorithm>
7
+ #include <cstdlib>
8
+ #include <exception>
9
+ #include <iostream>
10
+ #include <iomanip>
11
+ #include <limits>
12
+ #include <cmath>
13
+
14
+ #ifdef WIN32
15
+ #include "../util/getopt.hh"
16
+ #else
17
+ #include <unistd.h>
18
+ #endif
19
+
20
+ namespace lm {
21
+ namespace ngram {
22
+ namespace {
23
+
24
+ void Usage(const char *name, const char *default_mem) {
25
+ std::cerr << "Usage: " << name << " [-u log10_unknown_probability] [-s] [-i] [-v] [-w mmap|after] [-p probing_multiplier] [-T trie_temporary] [-S trie_building_mem] [-q bits] [-b bits] [-a bits] [type] input.arpa [output.mmap]\n\n"
26
+ "-u sets the log10 probability for <unk> if the ARPA file does not have one.\n"
27
+ " Default is -100. The ARPA file will always take precedence.\n"
28
+ "-s allows models to be built even if they do not have <s> and </s>.\n"
29
+ "-i allows buggy models from IRSTLM by mapping positive log probability to 0.\n"
30
+ "-v disables inclusion of the vocabulary in the binary file.\n"
31
+ "-w mmap|after determines how writing is done.\n"
32
+ " mmap maps the binary file and writes to it. Default for trie.\n"
33
+ " after allocates anonymous memory, builds, and writes. Default for probing.\n"
34
+ "-r \"order1.arpa order2 order3 order4\" adds lower-order rest costs from these\n"
35
+ " model files. order1.arpa must be an ARPA file. All others may be ARPA or\n"
36
+ " the same data structure as being built. All files must have the same\n"
37
+ " vocabulary. For probing, the unigrams must be in the same order.\n\n"
38
+ "type is either probing or trie. Default is probing.\n\n"
39
+ "probing uses a probing hash table. It is the fastest but uses the most memory.\n"
40
+ "-p sets the space multiplier and must be >1.0. The default is 1.5.\n\n"
41
+ "trie is a straightforward trie with bit-level packing. It uses the least\n"
42
+ "memory and is still faster than SRI or IRST. Building the trie format uses an\n"
43
+ "on-disk sort to save memory.\n"
44
+ "-T is the temporary directory prefix. Default is the output file name.\n"
45
+ "-S determines memory use for sorting. Default is " << default_mem << ". This is compatible\n"
46
+ " with GNU sort. The number is followed by a unit: \% for percent of physical\n"
47
+ " memory, b for bytes, K for Kilobytes, M for megabytes, then G,T,P,E,Z,Y. \n"
48
+ " Default unit is K for Kilobytes.\n"
49
+ "-q turns quantization on and sets the number of bits (e.g. -q 8).\n"
50
+ "-b sets backoff quantization bits. Requires -q and defaults to that value.\n"
51
+ "-a compresses pointers using an array of offsets. The parameter is the\n"
52
+ " maximum number of bits encoded by the array. Memory is minimized subject\n"
53
+ " to the maximum, so pick 255 to minimize memory.\n\n"
54
+ "-h print this help message.\n\n"
55
+ "Get a memory estimate by passing an ARPA file without an output file name.\n";
56
+ exit(1);
57
+ }
58
+
59
+ // I could really use boost::lexical_cast right about now.
60
+ float ParseFloat(const char *from) {
61
+ char *end;
62
+ float ret = strtod(from, &end);
63
+ if (*end) throw util::ParseNumberException(from);
64
+ return ret;
65
+ }
66
+ unsigned long int ParseUInt(const char *from) {
67
+ char *end;
68
+ unsigned long int ret = strtoul(from, &end, 10);
69
+ if (*end) throw util::ParseNumberException(from);
70
+ return ret;
71
+ }
72
+
73
+ uint8_t ParseBitCount(const char *from) {
74
+ unsigned long val = ParseUInt(from);
75
+ if (val > 25) {
76
+ util::ParseNumberException e(from);
77
+ e << " bit counts are limited to 25.";
78
+ }
79
+ return val;
80
+ }
81
+
82
+ void ParseFileList(const char *from, std::vector<std::string> &to) {
83
+ to.clear();
84
+ while (true) {
85
+ const char *i;
86
+ for (i = from; *i && *i != ' '; ++i) {}
87
+ to.push_back(std::string(from, i - from));
88
+ if (!*i) break;
89
+ from = i + 1;
90
+ }
91
+ }
92
+
93
+ void ProbingQuantizationUnsupported() {
94
+ std::cerr << "Quantization is only implemented in the trie data structure." << std::endl;
95
+ exit(1);
96
+ }
97
+
98
+ } // namespace ngram
99
+ } // namespace lm
100
+ } // namespace
101
+
102
+ int main(int argc, char *argv[]) {
103
+ using namespace lm::ngram;
104
+
105
+ const char *default_mem = util::GuessPhysicalMemory() ? "80%" : "1G";
106
+
107
+ if (argc == 2 && !strcmp(argv[1], "--help"))
108
+ Usage(argv[0], default_mem);
109
+
110
+ try {
111
+ bool quantize = false, set_backoff_bits = false, bhiksha = false, set_write_method = false, rest = false;
112
+ lm::ngram::Config config;
113
+ config.building_memory = util::ParseSize(default_mem);
114
+ int opt;
115
+ while ((opt = getopt(argc, argv, "q:b:a:u:p:t:T:m:S:w:sir:vh")) != -1) {
116
+ switch(opt) {
117
+ case 'q':
118
+ config.prob_bits = ParseBitCount(optarg);
119
+ if (!set_backoff_bits) config.backoff_bits = config.prob_bits;
120
+ quantize = true;
121
+ break;
122
+ case 'b':
123
+ config.backoff_bits = ParseBitCount(optarg);
124
+ set_backoff_bits = true;
125
+ break;
126
+ case 'a':
127
+ config.pointer_bhiksha_bits = ParseBitCount(optarg);
128
+ bhiksha = true;
129
+ break;
130
+ case 'u':
131
+ config.unknown_missing_logprob = ParseFloat(optarg);
132
+ break;
133
+ case 'p':
134
+ config.probing_multiplier = ParseFloat(optarg);
135
+ break;
136
+ case 't': // legacy
137
+ case 'T':
138
+ config.temporary_directory_prefix = optarg;
139
+ util::NormalizeTempPrefix(config.temporary_directory_prefix);
140
+ break;
141
+ case 'm': // legacy
142
+ config.building_memory = ParseUInt(optarg) * 1048576;
143
+ break;
144
+ case 'S':
145
+ config.building_memory = std::min(static_cast<uint64_t>(std::numeric_limits<std::size_t>::max()), util::ParseSize(optarg));
146
+ break;
147
+ case 'w':
148
+ set_write_method = true;
149
+ if (!strcmp(optarg, "mmap")) {
150
+ config.write_method = Config::WRITE_MMAP;
151
+ } else if (!strcmp(optarg, "after")) {
152
+ config.write_method = Config::WRITE_AFTER;
153
+ } else {
154
+ Usage(argv[0], default_mem);
155
+ }
156
+ break;
157
+ case 's':
158
+ config.sentence_marker_missing = lm::SILENT;
159
+ break;
160
+ case 'i':
161
+ config.positive_log_probability = lm::SILENT;
162
+ break;
163
+ case 'r':
164
+ rest = true;
165
+ ParseFileList(optarg, config.rest_lower_files);
166
+ config.rest_function = Config::REST_LOWER;
167
+ break;
168
+ case 'v':
169
+ config.include_vocab = false;
170
+ break;
171
+ case 'h': // help
172
+ default:
173
+ Usage(argv[0], default_mem);
174
+ }
175
+ }
176
+ if (!quantize && set_backoff_bits) {
177
+ std::cerr << "You specified backoff quantization (-b) but not probability quantization (-q)" << std::endl;
178
+ abort();
179
+ }
180
+ if (optind + 1 == argc) {
181
+ ShowSizes(argv[optind], config);
182
+ return 0;
183
+ }
184
+ const char *model_type;
185
+ const char *from_file;
186
+
187
+ if (optind + 2 == argc) {
188
+ model_type = "probing";
189
+ from_file = argv[optind];
190
+ config.write_mmap = argv[optind + 1];
191
+ } else if (optind + 3 == argc) {
192
+ model_type = argv[optind];
193
+ from_file = argv[optind + 1];
194
+ config.write_mmap = argv[optind + 2];
195
+ } else {
196
+ Usage(argv[0], default_mem);
197
+ return 1;
198
+ }
199
+ if (!strcmp(model_type, "probing")) {
200
+ if (!set_write_method) config.write_method = Config::WRITE_AFTER;
201
+ if (quantize || set_backoff_bits) ProbingQuantizationUnsupported();
202
+ if (rest) {
203
+ RestProbingModel(from_file, config);
204
+ } else {
205
+ ProbingModel(from_file, config);
206
+ }
207
+ } else if (!strcmp(model_type, "trie")) {
208
+ if (rest) {
209
+ std::cerr << "Rest + trie is not supported yet." << std::endl;
210
+ return 1;
211
+ }
212
+ if (!set_write_method) config.write_method = Config::WRITE_MMAP;
213
+ if (quantize) {
214
+ if (bhiksha) {
215
+ QuantArrayTrieModel(from_file, config);
216
+ } else {
217
+ QuantTrieModel(from_file, config);
218
+ }
219
+ } else {
220
+ if (bhiksha) {
221
+ ArrayTrieModel(from_file, config);
222
+ } else {
223
+ TrieModel(from_file, config);
224
+ }
225
+ }
226
+ } else {
227
+ Usage(argv[0], default_mem);
228
+ }
229
+ }
230
+ catch (const std::exception &e) {
231
+ std::cerr << e.what() << std::endl;
232
+ std::cerr << "ERROR" << std::endl;
233
+ return 1;
234
+ }
235
+ std::cerr << "SUCCESS" << std::endl;
236
+ return 0;
237
+ }
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/CMakeLists.txt ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This CMake file was created by Lane Schwartz <[email protected]>
2
+
3
+ # Explicitly list the source files for this subdirectory
4
+ #
5
+ # If you add any source files to this subdirectory
6
+ # that should be included in the kenlm library,
7
+ # (this excludes any unit test files)
8
+ # you should add them to the following list:
9
+ #
10
+ # In order to set correct paths to these files
11
+ # in case this variable is referenced by CMake files in the parent directory,
12
+ # we prefix all files with ${CMAKE_CURRENT_SOURCE_DIR}.
13
+ #
14
+ set(KENLM_LM_COMMON_SOURCE
15
+ ${CMAKE_CURRENT_SOURCE_DIR}/model_buffer.cc
16
+ ${CMAKE_CURRENT_SOURCE_DIR}/print.cc
17
+ ${CMAKE_CURRENT_SOURCE_DIR}/renumber.cc
18
+ ${CMAKE_CURRENT_SOURCE_DIR}/size_option.cc
19
+ PARENT_SCOPE)
20
+
21
+ if(BUILD_TESTING)
22
+ KenLMAddTest(TEST model_buffer_test
23
+ LIBRARIES kenlm
24
+ TEST_ARGS ${CMAKE_CURRENT_SOURCE_DIR}/test_data)
25
+ endif()
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/compare.hh ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_COMMON_COMPARE_H
2
+ #define LM_COMMON_COMPARE_H
3
+
4
+ #include "ngram.hh"
5
+ #include "../word_index.hh"
6
+
7
+ #include <functional>
8
+ #include <string>
9
+
10
+ namespace lm {
11
+
12
+ /**
13
+ * Abstract parent class for defining custom n-gram comparators.
14
+ */
15
+ template <class Child> class Comparator : public std::binary_function<const void *, const void *, bool> {
16
+ public:
17
+
18
+ /**
19
+ * Constructs a comparator capable of comparing two n-grams.
20
+ *
21
+ * @param order Number of words in each n-gram
22
+ */
23
+ explicit Comparator(std::size_t order) : order_(order) {}
24
+
25
+ /**
26
+ * Applies the comparator using the Compare method that must be defined in any class that inherits from this class.
27
+ *
28
+ * @param lhs A pointer to the n-gram on the left-hand side of the comparison
29
+ * @param rhs A pointer to the n-gram on the right-hand side of the comparison
30
+ *
31
+ * @see ContextOrder::Compare
32
+ * @see PrefixOrder::Compare
33
+ * @see SuffixOrder::Compare
34
+ */
35
+ inline bool operator()(const void *lhs, const void *rhs) const {
36
+ return static_cast<const Child*>(this)->Compare(static_cast<const WordIndex*>(lhs), static_cast<const WordIndex*>(rhs));
37
+ }
38
+
39
+ /** Gets the n-gram order defined for this comparator. */
40
+ std::size_t Order() const { return order_; }
41
+
42
+ protected:
43
+ std::size_t order_;
44
+ };
45
+
46
+ /**
47
+ * N-gram comparator that compares n-grams according to their reverse (suffix) order.
48
+ *
49
+ * This comparator compares n-grams lexicographically, one word at a time,
50
+ * beginning with the last word of each n-gram and ending with the first word of each n-gram.
51
+ *
52
+ * Some examples of n-gram comparisons as defined by this comparator:
53
+ * - a b c == a b c
54
+ * - a b c < a b d
55
+ * - a b c > a d b
56
+ * - a b c > a b b
57
+ * - a b c > x a c
58
+ * - a b c < x y z
59
+ */
60
+ class SuffixOrder : public Comparator<SuffixOrder> {
61
+ public:
62
+
63
+ /**
64
+ * Constructs a comparator capable of comparing two n-grams.
65
+ *
66
+ * @param order Number of words in each n-gram
67
+ */
68
+ explicit SuffixOrder(std::size_t order) : Comparator<SuffixOrder>(order) {}
69
+
70
+ /**
71
+ * Compares two n-grams lexicographically, one word at a time,
72
+ * beginning with the last word of each n-gram and ending with the first word of each n-gram.
73
+ *
74
+ * @param lhs A pointer to the n-gram on the left-hand side of the comparison
75
+ * @param rhs A pointer to the n-gram on the right-hand side of the comparison
76
+ */
77
+ inline bool Compare(const WordIndex *lhs, const WordIndex *rhs) const {
78
+ for (std::size_t i = order_ - 1; i != 0; --i) {
79
+ if (lhs[i] != rhs[i])
80
+ return lhs[i] < rhs[i];
81
+ }
82
+ return lhs[0] < rhs[0];
83
+ }
84
+
85
+ static const unsigned kMatchOffset = 1;
86
+ };
87
+
88
+
89
+ /**
90
+ * N-gram comparator that compares n-grams according to the reverse (suffix) order of the n-gram context.
91
+ *
92
+ * This comparator compares n-grams lexicographically, one word at a time,
93
+ * beginning with the penultimate word of each n-gram and ending with the first word of each n-gram;
94
+ * finally, this comparator compares the last word of each n-gram.
95
+ *
96
+ * Some examples of n-gram comparisons as defined by this comparator:
97
+ * - a b c == a b c
98
+ * - a b c < a b d
99
+ * - a b c < a d b
100
+ * - a b c > a b b
101
+ * - a b c > x a c
102
+ * - a b c < x y z
103
+ */
104
+ class ContextOrder : public Comparator<ContextOrder> {
105
+ public:
106
+
107
+ /**
108
+ * Constructs a comparator capable of comparing two n-grams.
109
+ *
110
+ * @param order Number of words in each n-gram
111
+ */
112
+ explicit ContextOrder(std::size_t order) : Comparator<ContextOrder>(order) {}
113
+
114
+ /**
115
+ * Compares two n-grams lexicographically, one word at a time,
116
+ * beginning with the penultimate word of each n-gram and ending with the first word of each n-gram;
117
+ * finally, this comparator compares the last word of each n-gram.
118
+ *
119
+ * @param lhs A pointer to the n-gram on the left-hand side of the comparison
120
+ * @param rhs A pointer to the n-gram on the right-hand side of the comparison
121
+ */
122
+ inline bool Compare(const WordIndex *lhs, const WordIndex *rhs) const {
123
+ for (int i = order_ - 2; i >= 0; --i) {
124
+ if (lhs[i] != rhs[i])
125
+ return lhs[i] < rhs[i];
126
+ }
127
+ return lhs[order_ - 1] < rhs[order_ - 1];
128
+ }
129
+ };
130
+
131
+ /**
132
+ * N-gram comparator that compares n-grams according to their natural (prefix) order.
133
+ *
134
+ * This comparator compares n-grams lexicographically, one word at a time,
135
+ * beginning with the first word of each n-gram and ending with the last word of each n-gram.
136
+ *
137
+ * Some examples of n-gram comparisons as defined by this comparator:
138
+ * - a b c == a b c
139
+ * - a b c < a b d
140
+ * - a b c < a d b
141
+ * - a b c > a b b
142
+ * - a b c < x a c
143
+ * - a b c < x y z
144
+ */
145
+ class PrefixOrder : public Comparator<PrefixOrder> {
146
+ public:
147
+
148
+ /**
149
+ * Constructs a comparator capable of comparing two n-grams.
150
+ *
151
+ * @param order Number of words in each n-gram
152
+ */
153
+ explicit PrefixOrder(std::size_t order) : Comparator<PrefixOrder>(order) {}
154
+
155
+ /**
156
+ * Compares two n-grams lexicographically, one word at a time,
157
+ * beginning with the first word of each n-gram and ending with the last word of each n-gram.
158
+ *
159
+ * @param lhs A pointer to the n-gram on the left-hand side of the comparison
160
+ * @param rhs A pointer to the n-gram on the right-hand side of the comparison
161
+ */
162
+ inline bool Compare(const WordIndex *lhs, const WordIndex *rhs) const {
163
+ for (std::size_t i = 0; i < order_; ++i) {
164
+ if (lhs[i] != rhs[i])
165
+ return lhs[i] < rhs[i];
166
+ }
167
+ return false;
168
+ }
169
+
170
+ static const unsigned kMatchOffset = 0;
171
+ };
172
+
173
+ template <class Range> struct SuffixLexicographicLess : public std::binary_function<const Range, const Range, bool> {
174
+ bool operator()(const Range first, const Range second) const {
175
+ for (const WordIndex *f = first.end() - 1, *s = second.end() - 1; f >= first.begin() && s >= second.begin(); --f, --s) {
176
+ if (*f < *s) return true;
177
+ if (*f > *s) return false;
178
+ }
179
+ return first.size() < second.size();
180
+ }
181
+ };
182
+
183
+ } // namespace lm
184
+
185
+ #endif // LM_COMMON_COMPARE_H
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/joint_order.hh ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_COMMON_JOINT_ORDER_H
2
+ #define LM_COMMON_JOINT_ORDER_H
3
+
4
+ #include "ngram_stream.hh"
5
+ #include "../lm_exception.hh"
6
+
7
+ #ifdef DEBUG
8
+ #include "../../util/fixed_array.hh"
9
+ #include <iostream>
10
+ #endif
11
+
12
+ #include <cstring>
13
+
14
+ namespace lm {
15
+
16
+ template <class Callback, class Compare> void JointOrder(const util::stream::ChainPositions &positions, Callback &callback) {
17
+ // Allow matching to reference streams[-1].
18
+ util::FixedArray<ProxyStream<NGramHeader> > streams_with_dummy(positions.size() + 1);
19
+ // A bogus stream for [-1].
20
+ streams_with_dummy.push_back();
21
+ for (std::size_t i = 0; i < positions.size(); ++i) {
22
+ streams_with_dummy.push_back(positions[i], NGramHeader(NULL, i + 1));
23
+ }
24
+ ProxyStream<NGramHeader> *streams = streams_with_dummy.begin() + 1;
25
+
26
+ std::size_t order;
27
+ for (order = 0; order < positions.size() && streams[order]; ++order) {}
28
+ assert(order); // should always have <unk>.
29
+
30
+ // Debugging only: call comparison function to sanity check order.
31
+ #ifdef DEBUG
32
+ util::FixedArray<Compare> less_compare(order);
33
+ for (unsigned i = 0; i < order; ++i)
34
+ less_compare.push_back(i + 1);
35
+ #endif // DEBUG
36
+
37
+ std::size_t current = 0;
38
+ while (true) {
39
+ // Does the context match the lower one?
40
+ if (!memcmp(streams[static_cast<int>(current) - 1]->begin(), streams[current]->begin() + Compare::kMatchOffset, sizeof(WordIndex) * current)) {
41
+ callback.Enter(current, streams[current].Get());
42
+ // Transition to looking for extensions.
43
+ if (++current < order) continue;
44
+ }
45
+ #ifdef DEBUG
46
+ // match_check[current - 1] matches current-grams
47
+ // The lower-order stream (which skips fewer current-grams) should always be <= the higher order-stream (which can skip current-grams).
48
+ else if (!less_compare[current - 1](streams[static_cast<int>(current) - 1]->begin(), streams[current]->begin() + Compare::kMatchOffset)) {
49
+ std::cerr << "Stream out of order detected" << std::endl;
50
+ abort();
51
+ }
52
+ #endif // DEBUG
53
+ // No extension left.
54
+ while(true) {
55
+ assert(current > 0);
56
+ --current;
57
+ callback.Exit(current, streams[current].Get());
58
+
59
+ if (++streams[current]) break;
60
+
61
+ UTIL_THROW_IF(order != current + 1, FormatLoadException, "Detected n-gram without matching suffix");
62
+
63
+ order = current;
64
+ if (!order) return;
65
+ }
66
+ }
67
+ }
68
+
69
+ } // namespaces
70
+
71
+ #endif // LM_COMMON_JOINT_ORDER_H
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/model_buffer.cc ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "model_buffer.hh"
2
+
3
+ #include "compare.hh"
4
+ #include "../state.hh"
5
+ #include "../weights.hh"
6
+ #include "../../util/exception.hh"
7
+ #include "../../util/file_stream.hh"
8
+ #include "../../util/file.hh"
9
+ #include "../../util/file_piece.hh"
10
+ #include "../../util/stream/io.hh"
11
+ #include "../../util/stream/multi_stream.hh"
12
+
13
+ #include <boost/lexical_cast.hpp>
14
+
15
+ #include <numeric>
16
+
17
+ namespace lm {
18
+
19
+ namespace {
20
+ const char kMetadataHeader[] = "KenLM intermediate binary file";
21
+ } // namespace
22
+
23
+ ModelBuffer::ModelBuffer(StringPiece file_base, bool keep_buffer, bool output_q)
24
+ : file_base_(file_base.data(), file_base.size()), keep_buffer_(keep_buffer), output_q_(output_q),
25
+ vocab_file_(keep_buffer ? util::CreateOrThrow((file_base_ + ".vocab").c_str()) : util::MakeTemp(file_base_)) {}
26
+
27
+ ModelBuffer::ModelBuffer(StringPiece file_base)
28
+ : file_base_(file_base.data(), file_base.size()), keep_buffer_(false) {
29
+ const std::string full_name = file_base_ + ".kenlm_intermediate";
30
+ util::FilePiece in(full_name.c_str());
31
+ StringPiece token = in.ReadLine();
32
+ UTIL_THROW_IF2(token != kMetadataHeader, "File " << full_name << " begins with \"" << token << "\" not " << kMetadataHeader);
33
+
34
+ token = in.ReadDelimited();
35
+ UTIL_THROW_IF2(token != "Counts", "Expected Counts, got \"" << token << "\" in " << full_name);
36
+ char got;
37
+ while ((got = in.get()) == ' ') {
38
+ counts_.push_back(in.ReadULong());
39
+ }
40
+ UTIL_THROW_IF2(got != '\n', "Expected newline at end of counts.");
41
+
42
+ token = in.ReadDelimited();
43
+ UTIL_THROW_IF2(token != "Payload", "Expected Payload, got \"" << token << "\" in " << full_name);
44
+ token = in.ReadDelimited();
45
+ if (token == "q") {
46
+ output_q_ = true;
47
+ } else if (token == "pb") {
48
+ output_q_ = false;
49
+ } else {
50
+ UTIL_THROW(util::Exception, "Unknown payload " << token);
51
+ }
52
+
53
+ vocab_file_.reset(util::OpenReadOrThrow((file_base_ + ".vocab").c_str()));
54
+
55
+ files_.Init(counts_.size());
56
+ for (unsigned long i = 0; i < counts_.size(); ++i) {
57
+ files_.push_back(util::OpenReadOrThrow((file_base_ + '.' + boost::lexical_cast<std::string>(i + 1)).c_str()));
58
+ }
59
+ }
60
+
61
+ void ModelBuffer::Sink(util::stream::Chains &chains, const std::vector<uint64_t> &counts) {
62
+ counts_ = counts;
63
+ // Open files.
64
+ files_.Init(chains.size());
65
+ for (std::size_t i = 0; i < chains.size(); ++i) {
66
+ if (keep_buffer_) {
67
+ files_.push_back(util::CreateOrThrow(
68
+ (file_base_ + '.' + boost::lexical_cast<std::string>(i + 1)).c_str()
69
+ ));
70
+ } else {
71
+ files_.push_back(util::MakeTemp(file_base_));
72
+ }
73
+ chains[i] >> util::stream::Write(files_.back().get());
74
+ }
75
+ if (keep_buffer_) {
76
+ util::scoped_fd metadata(util::CreateOrThrow((file_base_ + ".kenlm_intermediate").c_str()));
77
+ util::FileStream meta(metadata.get(), 200);
78
+ meta << kMetadataHeader << "\nCounts";
79
+ for (std::vector<uint64_t>::const_iterator i = counts_.begin(); i != counts_.end(); ++i) {
80
+ meta << ' ' << *i;
81
+ }
82
+ meta << "\nPayload " << (output_q_ ? "q" : "pb") << '\n';
83
+ }
84
+ }
85
+
86
+ void ModelBuffer::Source(util::stream::Chains &chains) {
87
+ assert(chains.size() <= files_.size());
88
+ for (unsigned int i = 0; i < chains.size(); ++i) {
89
+ chains[i].SetProgressTarget(util::SizeOrThrow(files_[i].get()));
90
+ chains[i] >> util::stream::PRead(files_[i].get());
91
+ }
92
+ }
93
+
94
+ void ModelBuffer::Source(std::size_t order_minus_1, util::stream::Chain &chain) {
95
+ chain >> util::stream::PRead(files_[order_minus_1].get());
96
+ }
97
+
98
+ float ModelBuffer::SlowQuery(const ngram::State &context, WordIndex word, ngram::State &out) const {
99
+ // Lookup unigram.
100
+ ProbBackoff value;
101
+ util::ErsatzPRead(RawFile(0), &value, sizeof(value), word * (sizeof(WordIndex) + sizeof(value)) + sizeof(WordIndex));
102
+ out.backoff[0] = value.backoff;
103
+ out.words[0] = word;
104
+ out.length = 1;
105
+
106
+ std::vector<WordIndex> buffer(context.length + 1), query(context.length + 1);
107
+ std::reverse_copy(context.words, context.words + context.length, query.begin());
108
+ query[context.length] = word;
109
+
110
+ for (std::size_t order = 2; order <= query.size() && order <= context.length + 1; ++order) {
111
+ SuffixOrder less(order);
112
+ const WordIndex *key = &*query.end() - order;
113
+ int file = RawFile(order - 1);
114
+ std::size_t length = order * sizeof(WordIndex) + sizeof(ProbBackoff);
115
+ // TODO: cache file size?
116
+ uint64_t begin = 0, end = util::SizeOrThrow(file) / length;
117
+ while (true) {
118
+ if (end <= begin) {
119
+ // Did not find for order.
120
+ return std::accumulate(context.backoff + out.length - 1, context.backoff + context.length, value.prob);
121
+ }
122
+ uint64_t test = begin + (end - begin) / 2;
123
+ util::ErsatzPRead(file, &*buffer.begin(), sizeof(WordIndex) * order, test * length);
124
+
125
+ if (less(&*buffer.begin(), key)) {
126
+ begin = test + 1;
127
+ } else if (less(key, &*buffer.begin())) {
128
+ end = test;
129
+ } else {
130
+ // Found it.
131
+ util::ErsatzPRead(file, &value, sizeof(value), test * length + sizeof(WordIndex) * order);
132
+ if (order != Order()) {
133
+ out.length = order;
134
+ out.backoff[order - 1] = value.backoff;
135
+ out.words[order - 1] = *key;
136
+ }
137
+ break;
138
+ }
139
+ }
140
+ }
141
+ return value.prob;
142
+ }
143
+
144
+ } // namespace
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/model_buffer.hh ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_COMMON_MODEL_BUFFER_H
2
+ #define LM_COMMON_MODEL_BUFFER_H
3
+
4
+ /* Format with separate files in suffix order. Each file contains
5
+ * n-grams of the same order.
6
+ */
7
+ #include "../word_index.hh"
8
+ #include "../../util/file.hh"
9
+ #include "../../util/fixed_array.hh"
10
+ #include "../../util/string_piece.hh"
11
+
12
+ #include <string>
13
+ #include <vector>
14
+
15
+ namespace util { namespace stream {
16
+ class Chains;
17
+ class Chain;
18
+ }} // namespaces
19
+
20
+ namespace lm {
21
+
22
+ namespace ngram { class State; }
23
+
24
+ class ModelBuffer {
25
+ public:
26
+ // Construct for writing. Must call VocabFile() and fill it with null-delimited vocab words.
27
+ ModelBuffer(StringPiece file_base, bool keep_buffer, bool output_q);
28
+
29
+ // Load from file.
30
+ explicit ModelBuffer(StringPiece file_base);
31
+
32
+ // Must call VocabFile and populate before calling this function.
33
+ void Sink(util::stream::Chains &chains, const std::vector<uint64_t> &counts);
34
+
35
+ // Read files and write to the given chains. If fewer chains are provided,
36
+ // only do the lower orders.
37
+ void Source(util::stream::Chains &chains);
38
+
39
+ void Source(std::size_t order_minus_1, util::stream::Chain &chain);
40
+
41
+ // The order of the n-gram model that is associated with the model buffer.
42
+ std::size_t Order() const { return counts_.size(); }
43
+ // Requires Sink or load from file.
44
+ const std::vector<uint64_t> &Counts() const {
45
+ assert(!counts_.empty());
46
+ return counts_;
47
+ }
48
+
49
+ int VocabFile() const { return vocab_file_.get(); }
50
+
51
+ int RawFile(std::size_t order_minus_1) const {
52
+ return files_[order_minus_1].get();
53
+ }
54
+
55
+ bool Keep() const { return keep_buffer_; }
56
+
57
+ // Slowly execute a language model query with binary search.
58
+ // This is used by interpolation to gather tuning probabilities rather than
59
+ // scanning the files.
60
+ float SlowQuery(const ngram::State &context, WordIndex word, ngram::State &out) const;
61
+
62
+ private:
63
+ const std::string file_base_;
64
+ const bool keep_buffer_;
65
+ bool output_q_;
66
+ std::vector<uint64_t> counts_;
67
+
68
+ util::scoped_fd vocab_file_;
69
+ util::FixedArray<util::scoped_fd> files_;
70
+ };
71
+
72
+ } // namespace lm
73
+
74
+ #endif // LM_COMMON_MODEL_BUFFER_H
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/model_buffer_test.cc ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "model_buffer.hh"
2
+ #include "../model.hh"
3
+ #include "../state.hh"
4
+
5
+ #define BOOST_TEST_MODULE ModelBufferTest
6
+ #include <boost/test/unit_test.hpp>
7
+
8
+ namespace lm { namespace {
9
+
10
+ BOOST_AUTO_TEST_CASE(Query) {
11
+ std::string dir("test_data");
12
+ if (boost::unit_test::framework::master_test_suite().argc == 2) {
13
+ dir = boost::unit_test::framework::master_test_suite().argv[1];
14
+ }
15
+ ngram::Model ref((dir + "/toy0.arpa").c_str());
16
+ #if BYTE_ORDER == LITTLE_ENDIAN
17
+ std::string endian = "little";
18
+ #elif BYTE_ORDER == BIG_ENDIAN
19
+ std::string endian = "big";
20
+ #else
21
+ #error "Unsupported byte order."
22
+ #endif
23
+
24
+ ModelBuffer test(dir + "/" + endian + "endian/toy0");
25
+ ngram::State ref_state, test_state;
26
+ WordIndex a = ref.GetVocabulary().Index("a");
27
+ BOOST_CHECK_CLOSE(
28
+ ref.FullScore(ref.BeginSentenceState(), a, ref_state).prob,
29
+ test.SlowQuery(ref.BeginSentenceState(), a, test_state),
30
+ 0.001);
31
+ BOOST_CHECK_EQUAL((unsigned)ref_state.length, (unsigned)test_state.length);
32
+ BOOST_CHECK_EQUAL(ref_state.words[0], test_state.words[0]);
33
+ BOOST_CHECK_EQUAL(ref_state.backoff[0], test_state.backoff[0]);
34
+ BOOST_CHECK(ref_state == test_state);
35
+
36
+ ngram::State ref_state2, test_state2;
37
+ WordIndex b = ref.GetVocabulary().Index("b");
38
+ BOOST_CHECK_CLOSE(
39
+ ref.FullScore(ref_state, b, ref_state2).prob,
40
+ test.SlowQuery(test_state, b, test_state2),
41
+ 0.001);
42
+ BOOST_CHECK(ref_state2 == test_state2);
43
+ BOOST_CHECK_EQUAL(ref_state2.backoff[0], test_state2.backoff[0]);
44
+
45
+ BOOST_CHECK_CLOSE(
46
+ ref.FullScore(ref_state2, 0, ref_state).prob,
47
+ test.SlowQuery(test_state2, 0, test_state),
48
+ 0.001);
49
+ // The reference does state minimization but this doesn't.
50
+ }
51
+
52
+ }} // namespaces
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/ngram.hh ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_COMMON_NGRAM_H
2
+ #define LM_COMMON_NGRAM_H
3
+
4
+ #include "../weights.hh"
5
+ #include "../word_index.hh"
6
+
7
+ #include <cstddef>
8
+ #include <cassert>
9
+ #include <stdint.h>
10
+ #include <cstring>
11
+
12
+ namespace lm {
13
+
14
+ class NGramHeader {
15
+ public:
16
+ NGramHeader(void *begin, std::size_t order)
17
+ : begin_(static_cast<WordIndex*>(begin)), end_(begin_ + order) {}
18
+
19
+ NGramHeader() : begin_(NULL), end_(NULL) {}
20
+
21
+ const uint8_t *Base() const { return reinterpret_cast<const uint8_t*>(begin_); }
22
+ uint8_t *Base() { return reinterpret_cast<uint8_t*>(begin_); }
23
+
24
+ void ReBase(void *to) {
25
+ std::size_t difference = end_ - begin_;
26
+ begin_ = reinterpret_cast<WordIndex*>(to);
27
+ end_ = begin_ + difference;
28
+ }
29
+
30
+ // These are for the vocab index.
31
+ // Lower-case in deference to STL.
32
+ const WordIndex *begin() const { return begin_; }
33
+ WordIndex *begin() { return begin_; }
34
+ const WordIndex *end() const { return end_; }
35
+ WordIndex *end() { return end_; }
36
+
37
+ std::size_t size() const { return end_ - begin_; }
38
+ std::size_t Order() const { return end_ - begin_; }
39
+
40
+ private:
41
+ WordIndex *begin_, *end_;
42
+ };
43
+
44
+ template <class PayloadT> class NGram : public NGramHeader {
45
+ public:
46
+ typedef PayloadT Payload;
47
+
48
+ NGram() : NGramHeader(NULL, 0) {}
49
+
50
+ NGram(void *begin, std::size_t order) : NGramHeader(begin, order) {}
51
+
52
+ // Would do operator++ but that can get confusing for a stream.
53
+ void NextInMemory() {
54
+ ReBase(&Value() + 1);
55
+ }
56
+
57
+ static std::size_t TotalSize(std::size_t order) {
58
+ return order * sizeof(WordIndex) + sizeof(Payload);
59
+ }
60
+ std::size_t TotalSize() const {
61
+ // Compiler should optimize this.
62
+ return TotalSize(Order());
63
+ }
64
+
65
+ static std::size_t OrderFromSize(std::size_t size) {
66
+ std::size_t ret = (size - sizeof(Payload)) / sizeof(WordIndex);
67
+ assert(size == TotalSize(ret));
68
+ return ret;
69
+ }
70
+
71
+ const Payload &Value() const { return *reinterpret_cast<const Payload *>(end()); }
72
+ Payload &Value() { return *reinterpret_cast<Payload *>(end()); }
73
+ };
74
+
75
+ } // namespace lm
76
+
77
+ #endif // LM_COMMON_NGRAM_H
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/ngram_stream.hh ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_BUILDER_NGRAM_STREAM_H
2
+ #define LM_BUILDER_NGRAM_STREAM_H
3
+
4
+ #include "ngram.hh"
5
+ #include "../../util/stream/chain.hh"
6
+ #include "../../util/stream/multi_stream.hh"
7
+ #include "../../util/stream/stream.hh"
8
+
9
+ #include <cstddef>
10
+
11
+ namespace lm {
12
+
13
+ template <class Proxy> class ProxyStream {
14
+ public:
15
+ // Make an invalid stream.
16
+ ProxyStream() {}
17
+
18
+ explicit ProxyStream(const util::stream::ChainPosition &position, const Proxy &proxy = Proxy())
19
+ : proxy_(proxy), stream_(position) {
20
+ proxy_.ReBase(stream_.Get());
21
+ }
22
+
23
+ Proxy &operator*() { return proxy_; }
24
+ const Proxy &operator*() const { return proxy_; }
25
+
26
+ Proxy *operator->() { return &proxy_; }
27
+ const Proxy *operator->() const { return &proxy_; }
28
+
29
+ void *Get() { return stream_.Get(); }
30
+ const void *Get() const { return stream_.Get(); }
31
+
32
+ operator bool() const { return stream_; }
33
+ bool operator!() const { return !stream_; }
34
+ void Poison() { stream_.Poison(); }
35
+
36
+ ProxyStream<Proxy> &operator++() {
37
+ ++stream_;
38
+ proxy_.ReBase(stream_.Get());
39
+ return *this;
40
+ }
41
+
42
+ private:
43
+ Proxy proxy_;
44
+ util::stream::Stream stream_;
45
+ };
46
+
47
+ template <class Payload> class NGramStream : public ProxyStream<NGram<Payload> > {
48
+ public:
49
+ // Make an invalid stream.
50
+ NGramStream() {}
51
+
52
+ explicit NGramStream(const util::stream::ChainPosition &position) :
53
+ ProxyStream<NGram<Payload> >(position, NGram<Payload>(NULL, NGram<Payload>::OrderFromSize(position.GetChain().EntrySize()))) {}
54
+ };
55
+
56
+ template <class Payload> class NGramStreams : public util::stream::GenericStreams<NGramStream<Payload> > {
57
+ private:
58
+ typedef util::stream::GenericStreams<NGramStream<Payload> > P;
59
+ public:
60
+ NGramStreams() : P() {}
61
+ NGramStreams(const util::stream::ChainPositions &positions) : P(positions) {}
62
+ };
63
+
64
+ } // namespace
65
+ #endif // LM_BUILDER_NGRAM_STREAM_H
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/print.cc ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "print.hh"
2
+
3
+ #include "ngram_stream.hh"
4
+ #include "../../util/file_stream.hh"
5
+ #include "../../util/file.hh"
6
+ #include "../../util/mmap.hh"
7
+ #include "../../util/scoped.hh"
8
+
9
+ #include <sstream>
10
+ #include <cstring>
11
+
12
+ namespace lm {
13
+
14
+ VocabReconstitute::VocabReconstitute(int fd) {
15
+ uint64_t size = util::SizeOrThrow(fd);
16
+ util::MapRead(util::POPULATE_OR_READ, fd, 0, size, memory_);
17
+ const char *const start = static_cast<const char*>(memory_.get());
18
+ const char *i;
19
+ for (i = start; i != start + size; i += strlen(i) + 1) {
20
+ map_.push_back(i);
21
+ }
22
+ // Last one for LookupPiece.
23
+ map_.push_back(i);
24
+ }
25
+
26
+ namespace {
27
+ template <class Payload> void PrintLead(const VocabReconstitute &vocab, ProxyStream<Payload> &stream, util::FileStream &out) {
28
+ out << stream->Value().prob << '\t' << vocab.Lookup(*stream->begin());
29
+ for (const WordIndex *i = stream->begin() + 1; i != stream->end(); ++i) {
30
+ out << ' ' << vocab.Lookup(*i);
31
+ }
32
+ }
33
+ } // namespace
34
+
35
+ void PrintARPA::Run(const util::stream::ChainPositions &positions) {
36
+ VocabReconstitute vocab(vocab_fd_);
37
+ util::FileStream out(out_fd_);
38
+ out << "\\data\\\n";
39
+ for (size_t i = 0; i < positions.size(); ++i) {
40
+ out << "ngram " << (i+1) << '=' << counts_[i] << '\n';
41
+ }
42
+ out << '\n';
43
+
44
+ for (unsigned order = 1; order < positions.size(); ++order) {
45
+ out << "\\" << order << "-grams:" << '\n';
46
+ for (ProxyStream<NGram<ProbBackoff> > stream(positions[order - 1], NGram<ProbBackoff>(NULL, order)); stream; ++stream) {
47
+ PrintLead(vocab, stream, out);
48
+ out << '\t' << stream->Value().backoff << '\n';
49
+ }
50
+ out << '\n';
51
+ }
52
+
53
+ out << "\\" << positions.size() << "-grams:" << '\n';
54
+ for (ProxyStream<NGram<Prob> > stream(positions.back(), NGram<Prob>(NULL, positions.size())); stream; ++stream) {
55
+ PrintLead(vocab, stream, out);
56
+ out << '\n';
57
+ }
58
+ out << '\n';
59
+ out << "\\end\\\n";
60
+ }
61
+
62
+ } // namespace lm
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/print.hh ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_COMMON_PRINT_H
2
+ #define LM_COMMON_PRINT_H
3
+
4
+ #include "../word_index.hh"
5
+ #include "../../util/mmap.hh"
6
+ #include "../../util/string_piece.hh"
7
+
8
+ #include <cassert>
9
+ #include <vector>
10
+
11
+ namespace util { namespace stream { class ChainPositions; }}
12
+
13
+ // Warning: PrintARPA routines read all unigrams before all bigrams before all
14
+ // trigrams etc. So if other parts of the chain move jointly, you'll have to
15
+ // buffer.
16
+
17
+ namespace lm {
18
+
19
+ class VocabReconstitute {
20
+ public:
21
+ // fd must be alive for life of this object; does not take ownership.
22
+ explicit VocabReconstitute(int fd);
23
+
24
+ const char *Lookup(WordIndex index) const {
25
+ assert(index < map_.size() - 1);
26
+ return map_[index];
27
+ }
28
+
29
+ StringPiece LookupPiece(WordIndex index) const {
30
+ return StringPiece(map_[index], map_[index + 1] - 1 - map_[index]);
31
+ }
32
+
33
+ std::size_t Size() const {
34
+ // There's an extra entry to support StringPiece lengths.
35
+ return map_.size() - 1;
36
+ }
37
+
38
+ private:
39
+ util::scoped_memory memory_;
40
+ std::vector<const char*> map_;
41
+ };
42
+
43
+ class PrintARPA {
44
+ public:
45
+ // Does not take ownership of vocab_fd or out_fd.
46
+ explicit PrintARPA(int vocab_fd, int out_fd, const std::vector<uint64_t> &counts)
47
+ : vocab_fd_(vocab_fd), out_fd_(out_fd), counts_(counts) {}
48
+
49
+ void Run(const util::stream::ChainPositions &positions);
50
+
51
+ private:
52
+ int vocab_fd_;
53
+ int out_fd_;
54
+ std::vector<uint64_t> counts_;
55
+ };
56
+
57
+ } // namespace lm
58
+ #endif // LM_COMMON_PRINT_H
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/renumber.cc ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "renumber.hh"
2
+ #include "ngram.hh"
3
+
4
+ #include "../../util/stream/stream.hh"
5
+
6
+ namespace lm {
7
+
8
+ void Renumber::Run(const util::stream::ChainPosition &position) {
9
+ for (util::stream::Stream stream(position); stream; ++stream) {
10
+ NGramHeader gram(stream.Get(), order_);
11
+ for (WordIndex *w = gram.begin(); w != gram.end(); ++w) {
12
+ *w = new_numbers_[*w];
13
+ }
14
+ }
15
+ }
16
+
17
+ } // namespace lm
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/renumber.hh ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Map vocab ids. This is useful to merge independently collected counts or
2
+ * change the vocab ids to the order used by the trie.
3
+ */
4
+ #ifndef LM_COMMON_RENUMBER_H
5
+ #define LM_COMMON_RENUMBER_H
6
+
7
+ #include "../word_index.hh"
8
+
9
+ #include <cstddef>
10
+
11
+ namespace util { namespace stream { class ChainPosition; }}
12
+
13
+ namespace lm {
14
+
15
+ class Renumber {
16
+ public:
17
+ // Assumes the array is large enough to map all words and stays alive while
18
+ // the thread is active.
19
+ Renumber(const WordIndex *new_numbers, std::size_t order)
20
+ : new_numbers_(new_numbers), order_(order) {}
21
+
22
+ void Run(const util::stream::ChainPosition &position);
23
+
24
+ private:
25
+ const WordIndex *new_numbers_;
26
+ std::size_t order_;
27
+ };
28
+
29
+ } // namespace lm
30
+ #endif // LM_COMMON_RENUMBER_H
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/size_option.cc ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <boost/program_options.hpp>
2
+ #include "../../util/usage.hh"
3
+
4
+ namespace lm {
5
+
6
+ namespace {
7
+ class SizeNotify {
8
+ public:
9
+ explicit SizeNotify(std::size_t &out) : behind_(out) {}
10
+
11
+ void operator()(const std::string &from) {
12
+ behind_ = util::ParseSize(from);
13
+ }
14
+
15
+ private:
16
+ std::size_t &behind_;
17
+ };
18
+ }
19
+
20
+ boost::program_options::typed_value<std::string> *SizeOption(std::size_t &to, const char *default_value) {
21
+ return boost::program_options::value<std::string>()->notifier(SizeNotify(to))->default_value(default_value);
22
+ }
23
+
24
+ } // namespace lm
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/size_option.hh ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <boost/program_options.hpp>
2
+
3
+ #include <cstddef>
4
+ #include <string>
5
+
6
+ namespace lm {
7
+
8
+ // Create a boost program option for data sizes. This parses sizes like 1T and 10k.
9
+ boost::program_options::typed_value<std::string> *SizeOption(std::size_t &to, const char *default_value);
10
+
11
+ } // namespace lm
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/special.hh ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_COMMON_SPECIAL_H
2
+ #define LM_COMMON_SPECIAL_H
3
+
4
+ #include "../word_index.hh"
5
+
6
+ namespace lm {
7
+
8
+ class SpecialVocab {
9
+ public:
10
+ SpecialVocab(WordIndex bos, WordIndex eos) : bos_(bos), eos_(eos) {}
11
+
12
+ bool IsSpecial(WordIndex word) const {
13
+ return word == kUNK || word == bos_ || word == eos_;
14
+ }
15
+
16
+ WordIndex UNK() const { return kUNK; }
17
+ WordIndex BOS() const { return bos_; }
18
+ WordIndex EOS() const { return eos_; }
19
+
20
+ private:
21
+ WordIndex bos_;
22
+ WordIndex eos_;
23
+ };
24
+
25
+ } // namespace lm
26
+
27
+ #endif // LM_COMMON_SPECIAL_H
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/toy0.arpa ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ \data\
2
+ ngram 1=5
3
+ ngram 2=7
4
+ ngram 3=7
5
+
6
+ \1-grams:
7
+ -0.90309 <unk> 0
8
+ 0 <s> -0.30103
9
+ -0.46943438 a -0.30103
10
+ -0.5720968 </s> 0
11
+ -0.5720968 b -0.30103
12
+
13
+ \2-grams:
14
+ -0.37712017 <s> a -0.30103
15
+ -0.37712017 a a -0.30103
16
+ -0.2984526 b a -0.30103
17
+ -0.58682007 a </s> 0
18
+ -0.5220179 b </s> 0
19
+ -0.41574955 <s> b -0.30103
20
+ -0.58682007 a b -0.30103
21
+
22
+ \3-grams:
23
+ -0.14885087 <s> a a
24
+ -0.33741078 b a a
25
+ -0.124077894 <s> b a
26
+ -0.2997394 a b a
27
+ -0.42082912 b a </s>
28
+ -0.397617 a b </s>
29
+ -0.20102891 a a b
30
+
31
+ \end\
cc-multilingual-main/cc_net/third_party/kenlm/lm/common/test_data/toy1.arpa ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ \data\
2
+ ngram 1=6
3
+ ngram 2=7
4
+ ngram 3=6
5
+
6
+ \1-grams:
7
+ -1 <unk> 0
8
+ 0 <s> -0.30103
9
+ -0.6146491 a -0.30103
10
+ -0.6146491 </s> 0
11
+ -0.7659168 c -0.30103
12
+ -0.6146491 b -0.30103
13
+
14
+ \2-grams:
15
+ -0.4301247 <s> a -0.30103
16
+ -0.4301247 a a -0.30103
17
+ -0.20660876 c </s> 0
18
+ -0.5404639 b </s> 0
19
+ -0.4740302 <s> c -0.30103
20
+ -0.4301247 a b -0.30103
21
+ -0.3422159 b b -0.47712123
22
+
23
+ \3-grams:
24
+ -0.1638568 <s> a a
25
+ -0.09113217 <s> c </s>
26
+ -0.7462621 b b </s>
27
+ -0.1638568 a a b
28
+ -0.13823806 a b b
29
+ -0.13375957 b b b
30
+
31
+ \end\
cc-multilingual-main/cc_net/third_party/kenlm/lm/config.cc ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "config.hh"
2
+
3
+ #include <iostream>
4
+
5
+ namespace lm {
6
+ namespace ngram {
7
+
8
+ Config::Config() :
9
+ show_progress(true),
10
+ messages(&std::cerr),
11
+ enumerate_vocab(NULL),
12
+ unknown_missing(COMPLAIN),
13
+ sentence_marker_missing(THROW_UP),
14
+ positive_log_probability(THROW_UP),
15
+ unknown_missing_logprob(-100.0),
16
+ probing_multiplier(1.5),
17
+ building_memory(1073741824ULL), // 1 GB
18
+ temporary_directory_prefix(""),
19
+ arpa_complain(ALL),
20
+ write_mmap(NULL),
21
+ write_method(WRITE_AFTER),
22
+ include_vocab(true),
23
+ rest_function(REST_MAX),
24
+ prob_bits(8),
25
+ backoff_bits(8),
26
+ pointer_bhiksha_bits(22),
27
+ load_method(util::POPULATE_OR_READ) {}
28
+
29
+ } // namespace ngram
30
+ } // namespace lm
cc-multilingual-main/cc_net/third_party/kenlm/lm/config.hh ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_CONFIG_H
2
+ #define LM_CONFIG_H
3
+
4
+ #include "lm_exception.hh"
5
+ #include "../util/mmap.hh"
6
+
7
+ #include <iosfwd>
8
+ #include <string>
9
+ #include <vector>
10
+
11
+ /* Configuration for ngram model. Separate header to reduce pollution. */
12
+
13
+ namespace lm {
14
+
15
+ class EnumerateVocab;
16
+
17
+ namespace ngram {
18
+
19
+ struct Config {
20
+ // EFFECTIVE FOR BOTH ARPA AND BINARY READS
21
+
22
+ // (default true) print progress bar to messages
23
+ bool show_progress;
24
+
25
+ // Where to log messages including the progress bar. Set to NULL for
26
+ // silence.
27
+ std::ostream *messages;
28
+
29
+ std::ostream *ProgressMessages() const {
30
+ return show_progress ? messages : 0;
31
+ }
32
+
33
+ // This will be called with every string in the vocabulary by the
34
+ // constructor; it need only exist for the lifetime of the constructor.
35
+ // See enumerate_vocab.hh for more detail. Config does not take ownership;
36
+ // just delete/let it go out of scope after the constructor exits.
37
+ EnumerateVocab *enumerate_vocab;
38
+
39
+
40
+ // ONLY EFFECTIVE WHEN READING ARPA
41
+
42
+ // What to do when <unk> isn't in the provided model.
43
+ WarningAction unknown_missing;
44
+ // What to do when <s> or </s> is missing from the model.
45
+ // If THROW_UP, the exception will be of type util::SpecialWordMissingException.
46
+ WarningAction sentence_marker_missing;
47
+
48
+ // What to do with a positive log probability. For COMPLAIN and SILENT, map
49
+ // to 0.
50
+ WarningAction positive_log_probability;
51
+
52
+ // The probability to substitute for <unk> if it's missing from the model.
53
+ // No effect if the model has <unk> or unknown_missing == THROW_UP.
54
+ float unknown_missing_logprob;
55
+
56
+ // Size multiplier for probing hash table. Must be > 1. Space is linear in
57
+ // this. Time is probing_multiplier / (probing_multiplier - 1). No effect
58
+ // for sorted variant.
59
+ // If you find yourself setting this to a low number, consider using the
60
+ // TrieModel which has lower memory consumption.
61
+ float probing_multiplier;
62
+
63
+ // Amount of memory to use for building. The actual memory usage will be
64
+ // higher since this just sets sort buffer size. Only applies to trie
65
+ // models.
66
+ std::size_t building_memory;
67
+
68
+ // Template for temporary directory appropriate for passing to mkdtemp.
69
+ // The characters XXXXXX are appended before passing to mkdtemp. Only
70
+ // applies to trie. If empty, defaults to write_mmap. If that's NULL,
71
+ // defaults to input file name.
72
+ std::string temporary_directory_prefix;
73
+
74
+ // Level of complaining to do when loading from ARPA instead of binary format.
75
+ enum ARPALoadComplain {ALL, EXPENSIVE, NONE};
76
+ ARPALoadComplain arpa_complain;
77
+
78
+ // While loading an ARPA file, also write out this binary format file. Set
79
+ // to NULL to disable.
80
+ const char *write_mmap;
81
+
82
+ enum WriteMethod {
83
+ WRITE_MMAP, // Map the file directly.
84
+ WRITE_AFTER // Write after we're done.
85
+ };
86
+ WriteMethod write_method;
87
+
88
+ // Include the vocab in the binary file? Only effective if write_mmap != NULL.
89
+ bool include_vocab;
90
+
91
+
92
+ // Left rest options. Only used when the model includes rest costs.
93
+ enum RestFunction {
94
+ REST_MAX, // Maximum of any score to the left
95
+ REST_LOWER, // Use lower-order files given below.
96
+ };
97
+ RestFunction rest_function;
98
+ // Only used for REST_LOWER.
99
+ std::vector<std::string> rest_lower_files;
100
+
101
+
102
+ // Quantization options. Only effective for QuantTrieModel. One value is
103
+ // reserved for each of prob and backoff, so 2^bits - 1 buckets will be used
104
+ // to quantize (and one of the remaining backoffs will be 0).
105
+ uint8_t prob_bits, backoff_bits;
106
+
107
+ // Bhiksha compression (simple form). Only works with trie.
108
+ uint8_t pointer_bhiksha_bits;
109
+
110
+
111
+ // ONLY EFFECTIVE WHEN READING BINARY
112
+
113
+ // How to get the giant array into memory: lazy mmap, populate, read etc.
114
+ // See util/mmap.hh for details of MapMethod.
115
+ util::LoadMethod load_method;
116
+
117
+
118
+ // Set defaults.
119
+ Config();
120
+ };
121
+
122
+ } /* namespace ngram */ } /* namespace lm */
123
+
124
+ #endif // LM_CONFIG_H
cc-multilingual-main/cc_net/third_party/kenlm/lm/enumerate_vocab.hh ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_ENUMERATE_VOCAB_H
2
+ #define LM_ENUMERATE_VOCAB_H
3
+
4
+ #include "word_index.hh"
5
+ #include "../util/string_piece.hh"
6
+
7
+ namespace lm {
8
+
9
+ /* If you need the actual strings in the vocabulary, inherit from this class
10
+ * and implement Add. Then put a pointer in Config.enumerate_vocab; it does
11
+ * not take ownership. Add is called once per vocab word. index starts at 0
12
+ * and increases by 1 each time. This is only used by the Model constructor;
13
+ * the pointer is not retained by the class.
14
+ */
15
+ class EnumerateVocab {
16
+ public:
17
+ virtual ~EnumerateVocab() {}
18
+
19
+ virtual void Add(WordIndex index, const StringPiece &str) = 0;
20
+
21
+ protected:
22
+ EnumerateVocab() {}
23
+ };
24
+
25
+ } // namespace lm
26
+
27
+ #endif // LM_ENUMERATE_VOCAB_H
28
+
cc-multilingual-main/cc_net/third_party/kenlm/lm/facade.hh ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_FACADE_H
2
+ #define LM_FACADE_H
3
+
4
+ #include "virtual_interface.hh"
5
+ #include "../util/string_piece.hh"
6
+
7
+ #include <string>
8
+
9
+ namespace lm {
10
+ namespace base {
11
+
12
+ // Common model interface that depends on knowing the specific classes.
13
+ // Curiously recurring template pattern.
14
+ template <class Child, class StateT, class VocabularyT> class ModelFacade : public Model {
15
+ public:
16
+ typedef StateT State;
17
+ typedef VocabularyT Vocabulary;
18
+
19
+ /* Translate from void* to State */
20
+ FullScoreReturn BaseFullScore(const void *in_state, const WordIndex new_word, void *out_state) const {
21
+ return static_cast<const Child*>(this)->FullScore(
22
+ *reinterpret_cast<const State*>(in_state),
23
+ new_word,
24
+ *reinterpret_cast<State*>(out_state));
25
+ }
26
+
27
+ FullScoreReturn BaseFullScoreForgotState(const WordIndex *context_rbegin, const WordIndex *context_rend, const WordIndex new_word, void *out_state) const {
28
+ return static_cast<const Child*>(this)->FullScoreForgotState(
29
+ context_rbegin,
30
+ context_rend,
31
+ new_word,
32
+ *reinterpret_cast<State*>(out_state));
33
+ }
34
+
35
+ // Default Score function calls FullScore. Model can override this.
36
+ float Score(const State &in_state, const WordIndex new_word, State &out_state) const {
37
+ return static_cast<const Child*>(this)->FullScore(in_state, new_word, out_state).prob;
38
+ }
39
+
40
+ float BaseScore(const void *in_state, const WordIndex new_word, void *out_state) const {
41
+ return static_cast<const Child*>(this)->Score(
42
+ *reinterpret_cast<const State*>(in_state),
43
+ new_word,
44
+ *reinterpret_cast<State*>(out_state));
45
+ }
46
+
47
+ const State &BeginSentenceState() const { return begin_sentence_; }
48
+ const State &NullContextState() const { return null_context_; }
49
+ const Vocabulary &GetVocabulary() const { return *static_cast<const Vocabulary*>(&BaseVocabulary()); }
50
+
51
+ protected:
52
+ ModelFacade() : Model(sizeof(State)) {}
53
+
54
+ virtual ~ModelFacade() {}
55
+
56
+ // begin_sentence and null_context can disappear after. vocab should stay.
57
+ void Init(const State &begin_sentence, const State &null_context, const Vocabulary &vocab, unsigned char order) {
58
+ begin_sentence_ = begin_sentence;
59
+ null_context_ = null_context;
60
+ begin_sentence_memory_ = &begin_sentence_;
61
+ null_context_memory_ = &null_context_;
62
+ base_vocab_ = &vocab;
63
+ order_ = order;
64
+ }
65
+
66
+ private:
67
+ State begin_sentence_, null_context_;
68
+ };
69
+
70
+ } // mamespace base
71
+ } // namespace lm
72
+
73
+ #endif // LM_FACADE_H
cc-multilingual-main/cc_net/third_party/kenlm/lm/fragment_main.cc ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "binary_format.hh"
2
+ #include "model.hh"
3
+ #include "left.hh"
4
+ #include "../util/tokenize_piece.hh"
5
+
6
+ template <class Model> void Query(const char *name) {
7
+ Model model(name);
8
+ std::string line;
9
+ lm::ngram::ChartState ignored;
10
+ while (getline(std::cin, line)) {
11
+ lm::ngram::RuleScore<Model> scorer(model, ignored);
12
+ for (util::TokenIter<util::SingleCharacter, true> i(line, ' '); i; ++i) {
13
+ scorer.Terminal(model.GetVocabulary().Index(*i));
14
+ }
15
+ std::cout << scorer.Finish() << '\n';
16
+ }
17
+ }
18
+
19
+ int main(int argc, char *argv[]) {
20
+ if (argc != 2) {
21
+ std::cerr << "Expected model file name." << std::endl;
22
+ return 1;
23
+ }
24
+ const char *name = argv[1];
25
+ lm::ngram::ModelType model_type = lm::ngram::PROBING;
26
+ lm::ngram::RecognizeBinary(name, model_type);
27
+ switch (model_type) {
28
+ case lm::ngram::PROBING:
29
+ Query<lm::ngram::ProbingModel>(name);
30
+ break;
31
+ case lm::ngram::REST_PROBING:
32
+ Query<lm::ngram::RestProbingModel>(name);
33
+ break;
34
+ default:
35
+ std::cerr << "Model type not supported yet." << std::endl;
36
+ }
37
+ }
cc-multilingual-main/cc_net/third_party/kenlm/lm/kenlm_benchmark_main.cc ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "model.hh"
2
+ #include "../util/file_stream.hh"
3
+ #include "../util/file.hh"
4
+ #include "../util/file_piece.hh"
5
+ #include "../util/usage.hh"
6
+ #include "../util/thread_pool.hh"
7
+
8
+ #include <boost/range/iterator_range.hpp>
9
+ #include <boost/program_options.hpp>
10
+
11
+ #include <iostream>
12
+
13
+ #include <stdint.h>
14
+
15
+ namespace {
16
+
17
+ template <class Model, class Width> void ConvertToBytes(const Model &model, int fd_in) {
18
+ util::FilePiece in(fd_in);
19
+ util::FileStream out(1);
20
+ Width width;
21
+ StringPiece word;
22
+ const Width end_sentence = (Width)model.GetVocabulary().EndSentence();
23
+ while (true) {
24
+ while (in.ReadWordSameLine(word)) {
25
+ width = (Width)model.GetVocabulary().Index(word);
26
+ out.write(&width, sizeof(Width));
27
+ }
28
+ if (!in.ReadLineOrEOF(word)) break;
29
+ out.write(&end_sentence, sizeof(Width));
30
+ }
31
+ }
32
+
33
+ template <class Model, class Width> class Worker {
34
+ public:
35
+ explicit Worker(const Model &model, double &add_total) : model_(model), total_(0.0), add_total_(add_total) {}
36
+
37
+ // Destructors happen in the main thread, so there's no race for add_total_.
38
+ ~Worker() { add_total_ += total_; }
39
+
40
+ typedef boost::iterator_range<Width *> Request;
41
+
42
+ void operator()(Request request) {
43
+ const lm::ngram::State *const begin_state = &model_.BeginSentenceState();
44
+ const lm::ngram::State *next_state = begin_state;
45
+ const Width kEOS = model_.GetVocabulary().EndSentence();
46
+ float sum = 0.0;
47
+ // Do even stuff first.
48
+ const Width *even_end = request.begin() + (request.size() & ~1);
49
+ // Alternating states
50
+ const Width *i;
51
+ for (i = request.begin(); i != even_end;) {
52
+ sum += model_.FullScore(*next_state, *i, state_[1]).prob;
53
+ next_state = (*i++ == kEOS) ? begin_state : &state_[1];
54
+ sum += model_.FullScore(*next_state, *i, state_[0]).prob;
55
+ next_state = (*i++ == kEOS) ? begin_state : &state_[0];
56
+ }
57
+ // Odd corner case.
58
+ if (request.size() & 1) {
59
+ sum += model_.FullScore(*next_state, *i, state_[2]).prob;
60
+ next_state = (*i++ == kEOS) ? begin_state : &state_[2];
61
+ }
62
+ total_ += sum;
63
+ }
64
+
65
+ private:
66
+ const Model &model_;
67
+ double total_;
68
+ double &add_total_;
69
+
70
+ lm::ngram::State state_[3];
71
+ };
72
+
73
+ struct Config {
74
+ int fd_in;
75
+ std::size_t threads;
76
+ std::size_t buf_per_thread;
77
+ bool query;
78
+ };
79
+
80
+ template <class Model, class Width> void QueryFromBytes(const Model &model, const Config &config) {
81
+ util::FileStream out(1);
82
+ out << "Threads: " << config.threads << '\n';
83
+ const Width kEOS = model.GetVocabulary().EndSentence();
84
+ double total = 0.0;
85
+ // Number of items to have in queue in addition to everything in flight.
86
+ const std::size_t kInQueue = 3;
87
+ std::size_t total_queue = config.threads + kInQueue;
88
+ std::vector<Width> backing(config.buf_per_thread * total_queue);
89
+ double loaded_cpu;
90
+ double loaded_wall;
91
+ uint64_t queries = 0;
92
+ {
93
+ util::RecyclingThreadPool<Worker<Model, Width> > pool(total_queue, config.threads, Worker<Model, Width>(model, total), boost::iterator_range<Width *>((Width*)0, (Width*)0));
94
+
95
+ for (std::size_t i = 0; i < total_queue; ++i) {
96
+ pool.PopulateRecycling(boost::iterator_range<Width *>(&backing[i * config.buf_per_thread], &backing[i * config.buf_per_thread]));
97
+ }
98
+
99
+ loaded_cpu = util::CPUTime();
100
+ loaded_wall = util::WallTime();
101
+ out << "To Load, CPU: " << loaded_cpu << " Wall: " << loaded_wall << '\n';
102
+ boost::iterator_range<Width *> overhang((Width*)0, (Width*)0);
103
+ while (true) {
104
+ boost::iterator_range<Width *> buf = pool.Consume();
105
+ std::memmove(buf.begin(), overhang.begin(), overhang.size() * sizeof(Width));
106
+ std::size_t got = util::ReadOrEOF(config.fd_in, buf.begin() + overhang.size(), (config.buf_per_thread - overhang.size()) * sizeof(Width));
107
+ if (!got && overhang.empty()) break;
108
+ UTIL_THROW_IF2(got % sizeof(Width), "File size not a multiple of vocab id size " << sizeof(Width));
109
+ Width *read_end = buf.begin() + overhang.size() + got / sizeof(Width);
110
+ Width *last_eos;
111
+ for (last_eos = read_end - 1; ; --last_eos) {
112
+ UTIL_THROW_IF2(last_eos <= buf.begin(), "Encountered a sentence longer than the buffer size of " << config.buf_per_thread << " words. Rerun with increased buffer size. TODO: adaptable buffer");
113
+ if (*last_eos == kEOS) break;
114
+ }
115
+ buf = boost::iterator_range<Width*>(buf.begin(), last_eos + 1);
116
+ overhang = boost::iterator_range<Width*>(last_eos + 1, read_end);
117
+ queries += buf.size();
118
+ pool.Produce(buf);
119
+ }
120
+ } // Drain pool.
121
+
122
+ double after_cpu = util::CPUTime();
123
+ double after_wall = util::WallTime();
124
+ util::FileStream(2, 70) << "Probability sum: " << total << '\n';
125
+ out << "Queries: " << queries << '\n';
126
+ out << "Excluding load, CPU: " << (after_cpu - loaded_cpu) << " Wall: " << (after_wall - loaded_wall) << '\n';
127
+ double cpu_per_entry = ((after_cpu - loaded_cpu) / static_cast<double>(queries));
128
+ double wall_per_entry = ((after_wall - loaded_wall) / static_cast<double>(queries));
129
+ out << "Seconds per query excluding load, CPU: " << cpu_per_entry << " Wall: " << wall_per_entry << '\n';
130
+ out << "Queries per second excluding load, CPU: " << (1.0/cpu_per_entry) << " Wall: " << (1.0/wall_per_entry) << '\n';
131
+ out << "RSSMax: " << util::RSSMax() << '\n';
132
+ }
133
+
134
+ template <class Model, class Width> void DispatchFunction(const Model &model, const Config &config) {
135
+ if (config.query) {
136
+ QueryFromBytes<Model, Width>(model, config);
137
+ } else {
138
+ ConvertToBytes<Model, Width>(model, config.fd_in);
139
+ }
140
+ }
141
+
142
+ template <class Model> void DispatchWidth(const char *file, const Config &config) {
143
+ lm::ngram::Config model_config;
144
+ model_config.load_method = util::READ;
145
+ Model model(file, model_config);
146
+ uint64_t bound = model.GetVocabulary().Bound();
147
+ if (bound <= 256) {
148
+ DispatchFunction<Model, uint8_t>(model, config);
149
+ } else if (bound <= 65536) {
150
+ DispatchFunction<Model, uint16_t>(model, config);
151
+ } else if (bound <= (1ULL << 32)) {
152
+ DispatchFunction<Model, uint32_t>(model, config);
153
+ } else {
154
+ DispatchFunction<Model, uint64_t>(model, config);
155
+ }
156
+ }
157
+
158
+ void Dispatch(const char *file, const Config &config) {
159
+ using namespace lm::ngram;
160
+ lm::ngram::ModelType model_type;
161
+ if (lm::ngram::RecognizeBinary(file, model_type)) {
162
+ switch(model_type) {
163
+ case PROBING:
164
+ DispatchWidth<lm::ngram::ProbingModel>(file, config);
165
+ break;
166
+ case REST_PROBING:
167
+ DispatchWidth<lm::ngram::RestProbingModel>(file, config);
168
+ break;
169
+ case TRIE:
170
+ DispatchWidth<lm::ngram::TrieModel>(file, config);
171
+ break;
172
+ case QUANT_TRIE:
173
+ DispatchWidth<lm::ngram::QuantTrieModel>(file, config);
174
+ break;
175
+ case ARRAY_TRIE:
176
+ DispatchWidth<lm::ngram::ArrayTrieModel>(file, config);
177
+ break;
178
+ case QUANT_ARRAY_TRIE:
179
+ DispatchWidth<lm::ngram::QuantArrayTrieModel>(file, config);
180
+ break;
181
+ default:
182
+ UTIL_THROW(util::Exception, "Unrecognized kenlm model type " << model_type);
183
+ }
184
+ } else {
185
+ UTIL_THROW(util::Exception, "Binarize before running benchmarks.");
186
+ }
187
+ }
188
+
189
+ } // namespace
190
+
191
+ int main(int argc, char *argv[]) {
192
+ try {
193
+ Config config;
194
+ config.fd_in = 0;
195
+ std::string model;
196
+ namespace po = boost::program_options;
197
+ po::options_description options("Benchmark options");
198
+ options.add_options()
199
+ ("help,h", po::bool_switch(), "Show help message")
200
+ ("model,m", po::value<std::string>(&model)->required(), "Model to query or convert vocab ids")
201
+ ("threads,t", po::value<std::size_t>(&config.threads)->default_value(boost::thread::hardware_concurrency()), "Threads to use (querying only; TODO vocab conversion)")
202
+ ("buffer,b", po::value<std::size_t>(&config.buf_per_thread)->default_value(4096), "Number of words to buffer per task.")
203
+ ("vocab,v", po::bool_switch(), "Convert strings to vocab ids")
204
+ ("query,q", po::bool_switch(), "Query from vocab ids");
205
+ po::variables_map vm;
206
+ po::store(po::parse_command_line(argc, argv, options), vm);
207
+ if (argc == 1 || vm["help"].as<bool>()) {
208
+ std::cerr << "Benchmark program for KenLM. Intended usage:\n"
209
+ << "#Convert text to vocabulary ids offline. These ids are tied to a model.\n"
210
+ << argv[0] << " -v -m $model <$text >$text.vocab\n"
211
+ << "#Ensure files are in RAM.\n"
212
+ << "cat $text.vocab $model >/dev/null\n"
213
+ << "#Timed query against the model.\n"
214
+ << argv[0] << " -q -m $model <$text.vocab\n";
215
+ return 0;
216
+ }
217
+ po::notify(vm);
218
+ if (!(vm["vocab"].as<bool>() ^ vm["query"].as<bool>())) {
219
+ std::cerr << "Specify exactly one of -v (vocab conversion) or -q (query)." << std::endl;
220
+ return 0;
221
+ }
222
+ config.query = vm["query"].as<bool>();
223
+ if (!config.threads) {
224
+ std::cerr << "Specify a non-zero number of threads with -t." << std::endl;
225
+ }
226
+ Dispatch(model.c_str(), config);
227
+ } catch (const std::exception &e) {
228
+ std::cerr << e.what() << std::endl;
229
+ return 1;
230
+ }
231
+ return 0;
232
+ }
cc-multilingual-main/cc_net/third_party/kenlm/lm/left.hh ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Efficient left and right language model state for sentence fragments.
2
+ * Intended usage:
3
+ * Store ChartState with every chart entry.
4
+ * To do a rule application:
5
+ * 1. Make a ChartState object for your new entry.
6
+ * 2. Construct RuleScore.
7
+ * 3. Going from left to right, call Terminal or NonTerminal.
8
+ * For terminals, just pass the vocab id.
9
+ * For non-terminals, pass that non-terminal's ChartState.
10
+ * If your decoder expects scores inclusive of subtree scores (i.e. you
11
+ * label entries with the highest-scoring path), pass the non-terminal's
12
+ * score as prob.
13
+ * If your decoder expects relative scores and will walk the chart later,
14
+ * pass prob = 0.0.
15
+ * In other words, the only effect of prob is that it gets added to the
16
+ * returned log probability.
17
+ * 4. Call Finish. It returns the log probability.
18
+ *
19
+ * There's a couple more details:
20
+ * Do not pass <s> to Terminal as it is formally not a word in the sentence,
21
+ * only context. Instead, call BeginSentence. If called, it should be the
22
+ * first call after RuleScore is constructed (since <s> is always the
23
+ * leftmost).
24
+ *
25
+ * If the leftmost RHS is a non-terminal, it's faster to call BeginNonTerminal.
26
+ *
27
+ * Hashing and sorting comparison operators are provided. All state objects
28
+ * are POD. If you intend to use memcmp on raw state objects, you must call
29
+ * ZeroRemaining first, as the value of array entries beyond length is
30
+ * otherwise undefined.
31
+ *
32
+ * Usage is of course not limited to chart decoding. Anything that generates
33
+ * sentence fragments missing left context could benefit. For example, a
34
+ * phrase-based decoder could pre-score phrases, storing ChartState with each
35
+ * phrase, even if hypotheses are generated left-to-right.
36
+ */
37
+
38
+ #ifndef LM_LEFT_H
39
+ #define LM_LEFT_H
40
+
41
+ #include "max_order.hh"
42
+ #include "state.hh"
43
+ #include "return.hh"
44
+
45
+ #include "../util/murmur_hash.hh"
46
+
47
+ #include <algorithm>
48
+
49
+ namespace lm {
50
+ namespace ngram {
51
+
52
+ template <class M> class RuleScore {
53
+ public:
54
+ explicit RuleScore(const M &model, ChartState &out) : model_(model), out_(&out), left_done_(false), prob_(0.0) {
55
+ out.left.length = 0;
56
+ out.right.length = 0;
57
+ }
58
+
59
+ void BeginSentence() {
60
+ out_->right = model_.BeginSentenceState();
61
+ // out_->left is empty.
62
+ left_done_ = true;
63
+ }
64
+
65
+ void Terminal(WordIndex word) {
66
+ State copy(out_->right);
67
+ FullScoreReturn ret(model_.FullScore(copy, word, out_->right));
68
+ if (left_done_) { prob_ += ret.prob; return; }
69
+ if (ret.independent_left) {
70
+ prob_ += ret.prob;
71
+ left_done_ = true;
72
+ return;
73
+ }
74
+ out_->left.pointers[out_->left.length++] = ret.extend_left;
75
+ prob_ += ret.rest;
76
+ if (out_->right.length != copy.length + 1)
77
+ left_done_ = true;
78
+ }
79
+
80
+ // Faster version of NonTerminal for the case where the rule begins with a non-terminal.
81
+ void BeginNonTerminal(const ChartState &in, float prob = 0.0) {
82
+ prob_ = prob;
83
+ *out_ = in;
84
+ left_done_ = in.left.full;
85
+ }
86
+
87
+ void NonTerminal(const ChartState &in, float prob = 0.0) {
88
+ prob_ += prob;
89
+
90
+ if (!in.left.length) {
91
+ if (in.left.full) {
92
+ for (const float *i = out_->right.backoff; i < out_->right.backoff + out_->right.length; ++i) prob_ += *i;
93
+ left_done_ = true;
94
+ out_->right = in.right;
95
+ }
96
+ return;
97
+ }
98
+
99
+ if (!out_->right.length) {
100
+ out_->right = in.right;
101
+ if (left_done_) {
102
+ prob_ += model_.UnRest(in.left.pointers, in.left.pointers + in.left.length, 1);
103
+ return;
104
+ }
105
+ if (out_->left.length) {
106
+ left_done_ = true;
107
+ } else {
108
+ out_->left = in.left;
109
+ left_done_ = in.left.full;
110
+ }
111
+ return;
112
+ }
113
+
114
+ float backoffs[KENLM_MAX_ORDER - 1], backoffs2[KENLM_MAX_ORDER - 1];
115
+ float *back = backoffs, *back2 = backoffs2;
116
+ unsigned char next_use = out_->right.length;
117
+
118
+ // First word
119
+ if (ExtendLeft(in, next_use, 1, out_->right.backoff, back)) return;
120
+
121
+ // Words after the first, so extending a bigram to begin with
122
+ for (unsigned char extend_length = 2; extend_length <= in.left.length; ++extend_length) {
123
+ if (ExtendLeft(in, next_use, extend_length, back, back2)) return;
124
+ std::swap(back, back2);
125
+ }
126
+
127
+ if (in.left.full) {
128
+ for (const float *i = back; i != back + next_use; ++i) prob_ += *i;
129
+ left_done_ = true;
130
+ out_->right = in.right;
131
+ return;
132
+ }
133
+
134
+ // Right state was minimized, so it's already independent of the new words to the left.
135
+ if (in.right.length < in.left.length) {
136
+ out_->right = in.right;
137
+ return;
138
+ }
139
+
140
+ // Shift exisiting words down.
141
+ for (WordIndex *i = out_->right.words + next_use - 1; i >= out_->right.words; --i) {
142
+ *(i + in.right.length) = *i;
143
+ }
144
+ // Add words from in.right.
145
+ std::copy(in.right.words, in.right.words + in.right.length, out_->right.words);
146
+ // Assemble backoff composed on the existing state's backoff followed by the new state's backoff.
147
+ std::copy(in.right.backoff, in.right.backoff + in.right.length, out_->right.backoff);
148
+ std::copy(back, back + next_use, out_->right.backoff + in.right.length);
149
+ out_->right.length = in.right.length + next_use;
150
+ }
151
+
152
+ float Finish() {
153
+ // A N-1-gram might extend left and right but we should still set full to true because it's an N-1-gram.
154
+ out_->left.full = left_done_ || (out_->left.length == model_.Order() - 1);
155
+ return prob_;
156
+ }
157
+
158
+ void Reset() {
159
+ prob_ = 0.0;
160
+ left_done_ = false;
161
+ out_->left.length = 0;
162
+ out_->right.length = 0;
163
+ }
164
+ void Reset(ChartState &replacement) {
165
+ out_ = &replacement;
166
+ Reset();
167
+ }
168
+
169
+ private:
170
+ bool ExtendLeft(const ChartState &in, unsigned char &next_use, unsigned char extend_length, const float *back_in, float *back_out) {
171
+ ProcessRet(model_.ExtendLeft(
172
+ out_->right.words, out_->right.words + next_use, // Words to extend into
173
+ back_in, // Backoffs to use
174
+ in.left.pointers[extend_length - 1], extend_length, // Words to be extended
175
+ back_out, // Backoffs for the next score
176
+ next_use)); // Length of n-gram to use in next scoring.
177
+ if (next_use != out_->right.length) {
178
+ left_done_ = true;
179
+ if (!next_use) {
180
+ // Early exit.
181
+ out_->right = in.right;
182
+ prob_ += model_.UnRest(in.left.pointers + extend_length, in.left.pointers + in.left.length, extend_length + 1);
183
+ return true;
184
+ }
185
+ }
186
+ // Continue scoring.
187
+ return false;
188
+ }
189
+
190
+ void ProcessRet(const FullScoreReturn &ret) {
191
+ if (left_done_) {
192
+ prob_ += ret.prob;
193
+ return;
194
+ }
195
+ if (ret.independent_left) {
196
+ prob_ += ret.prob;
197
+ left_done_ = true;
198
+ return;
199
+ }
200
+ out_->left.pointers[out_->left.length++] = ret.extend_left;
201
+ prob_ += ret.rest;
202
+ }
203
+
204
+ const M &model_;
205
+
206
+ ChartState *out_;
207
+
208
+ bool left_done_;
209
+
210
+ float prob_;
211
+ };
212
+
213
+ } // namespace ngram
214
+ } // namespace lm
215
+
216
+ #endif // LM_LEFT_H
cc-multilingual-main/cc_net/third_party/kenlm/lm/left_test.cc ADDED
@@ -0,0 +1,397 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "left.hh"
2
+ #include "model.hh"
3
+
4
+ #include "../util/tokenize_piece.hh"
5
+
6
+ #include <vector>
7
+
8
+ #define BOOST_TEST_MODULE LeftTest
9
+ #include <boost/test/unit_test.hpp>
10
+ #include <boost/test/floating_point_comparison.hpp>
11
+
12
+ namespace lm {
13
+ namespace ngram {
14
+ namespace {
15
+
16
+ #define Term(word) score.Terminal(m.GetVocabulary().Index(word));
17
+ #define VCheck(word, value) BOOST_CHECK_EQUAL(m.GetVocabulary().Index(word), value);
18
+
19
+ // Apparently some Boost versions use templates and are pretty strict about types matching.
20
+ #define SLOPPY_CHECK_CLOSE(ref, value, tol) BOOST_CHECK_CLOSE(static_cast<double>(ref), static_cast<double>(value), static_cast<double>(tol));
21
+
22
+ template <class M> void Short(const M &m) {
23
+ ChartState base;
24
+ {
25
+ RuleScore<M> score(m, base);
26
+ Term("more");
27
+ Term("loin");
28
+ SLOPPY_CHECK_CLOSE(-1.206319 - 0.3561665, score.Finish(), 0.001);
29
+ }
30
+ BOOST_CHECK(base.left.full);
31
+ BOOST_CHECK_EQUAL(2, base.left.length);
32
+ BOOST_CHECK_EQUAL(1, base.right.length);
33
+ VCheck("loin", base.right.words[0]);
34
+
35
+ ChartState more_left;
36
+ {
37
+ RuleScore<M> score(m, more_left);
38
+ Term("little");
39
+ score.NonTerminal(base, -1.206319 - 0.3561665);
40
+ // p(little more loin | null context)
41
+ SLOPPY_CHECK_CLOSE(-1.56538, score.Finish(), 0.001);
42
+ }
43
+ BOOST_CHECK_EQUAL(3, more_left.left.length);
44
+ BOOST_CHECK_EQUAL(1, more_left.right.length);
45
+ VCheck("loin", more_left.right.words[0]);
46
+ BOOST_CHECK(more_left.left.full);
47
+
48
+ ChartState shorter;
49
+ {
50
+ RuleScore<M> score(m, shorter);
51
+ Term("to");
52
+ score.NonTerminal(base, -1.206319 - 0.3561665);
53
+ SLOPPY_CHECK_CLOSE(-0.30103 - 1.687872 - 1.206319 - 0.3561665, score.Finish(), 0.01);
54
+ }
55
+ BOOST_CHECK_EQUAL(1, shorter.left.length);
56
+ BOOST_CHECK_EQUAL(1, shorter.right.length);
57
+ VCheck("loin", shorter.right.words[0]);
58
+ BOOST_CHECK(shorter.left.full);
59
+ }
60
+
61
+ template <class M> void Charge(const M &m) {
62
+ ChartState base;
63
+ {
64
+ RuleScore<M> score(m, base);
65
+ Term("on");
66
+ Term("more");
67
+ SLOPPY_CHECK_CLOSE(-1.509559 -0.4771212 -1.206319, score.Finish(), 0.001);
68
+ }
69
+ BOOST_CHECK_EQUAL(1, base.left.length);
70
+ BOOST_CHECK_EQUAL(1, base.right.length);
71
+ VCheck("more", base.right.words[0]);
72
+ BOOST_CHECK(base.left.full);
73
+
74
+ ChartState extend;
75
+ {
76
+ RuleScore<M> score(m, extend);
77
+ Term("looking");
78
+ score.NonTerminal(base, -1.509559 -0.4771212 -1.206319);
79
+ SLOPPY_CHECK_CLOSE(-3.91039, score.Finish(), 0.001);
80
+ }
81
+ BOOST_CHECK_EQUAL(2, extend.left.length);
82
+ BOOST_CHECK_EQUAL(1, extend.right.length);
83
+ VCheck("more", extend.right.words[0]);
84
+ BOOST_CHECK(extend.left.full);
85
+
86
+ ChartState tobos;
87
+ {
88
+ RuleScore<M> score(m, tobos);
89
+ score.BeginSentence();
90
+ score.NonTerminal(extend, -3.91039);
91
+ SLOPPY_CHECK_CLOSE(-3.471169, score.Finish(), 0.001);
92
+ }
93
+ BOOST_CHECK_EQUAL(0, tobos.left.length);
94
+ BOOST_CHECK_EQUAL(1, tobos.right.length);
95
+ }
96
+
97
+ template <class M> float LeftToRight(const M &m, const std::vector<WordIndex> &words, bool begin_sentence = false) {
98
+ float ret = 0.0;
99
+ State right = begin_sentence ? m.BeginSentenceState() : m.NullContextState();
100
+ for (std::vector<WordIndex>::const_iterator i = words.begin(); i != words.end(); ++i) {
101
+ State copy(right);
102
+ ret += m.Score(copy, *i, right);
103
+ }
104
+ return ret;
105
+ }
106
+
107
+ template <class M> float RightToLeft(const M &m, const std::vector<WordIndex> &words, bool begin_sentence = false) {
108
+ float ret = 0.0;
109
+ ChartState state;
110
+ state.left.length = 0;
111
+ state.right.length = 0;
112
+ state.left.full = false;
113
+ for (std::vector<WordIndex>::const_reverse_iterator i = words.rbegin(); i != words.rend(); ++i) {
114
+ ChartState copy(state);
115
+ RuleScore<M> score(m, state);
116
+ score.Terminal(*i);
117
+ score.NonTerminal(copy, ret);
118
+ ret = score.Finish();
119
+ }
120
+ if (begin_sentence) {
121
+ ChartState copy(state);
122
+ RuleScore<M> score(m, state);
123
+ score.BeginSentence();
124
+ score.NonTerminal(copy, ret);
125
+ ret = score.Finish();
126
+ }
127
+ return ret;
128
+ }
129
+
130
+ template <class M> float TreeMiddle(const M &m, const std::vector<WordIndex> &words, bool begin_sentence = false) {
131
+ std::vector<std::pair<ChartState, float> > states(words.size());
132
+ for (unsigned int i = 0; i < words.size(); ++i) {
133
+ RuleScore<M> score(m, states[i].first);
134
+ score.Terminal(words[i]);
135
+ states[i].second = score.Finish();
136
+ }
137
+ while (states.size() > 1) {
138
+ std::vector<std::pair<ChartState, float> > upper((states.size() + 1) / 2);
139
+ for (unsigned int i = 0; i < states.size() / 2; ++i) {
140
+ RuleScore<M> score(m, upper[i].first);
141
+ score.NonTerminal(states[i*2].first, states[i*2].second);
142
+ score.NonTerminal(states[i*2+1].first, states[i*2+1].second);
143
+ upper[i].second = score.Finish();
144
+ }
145
+ if (states.size() % 2) {
146
+ upper.back() = states.back();
147
+ }
148
+ std::swap(states, upper);
149
+ }
150
+
151
+ if (states.empty()) return 0.0;
152
+
153
+ if (begin_sentence) {
154
+ ChartState ignored;
155
+ RuleScore<M> score(m, ignored);
156
+ score.BeginSentence();
157
+ score.NonTerminal(states.front().first, states.front().second);
158
+ return score.Finish();
159
+ } else {
160
+ return states.front().second;
161
+ }
162
+
163
+ }
164
+
165
+ template <class M> void LookupVocab(const M &m, const StringPiece &str, std::vector<WordIndex> &out) {
166
+ out.clear();
167
+ for (util::TokenIter<util::SingleCharacter, true> i(str, ' '); i; ++i) {
168
+ out.push_back(m.GetVocabulary().Index(*i));
169
+ }
170
+ }
171
+
172
+ #define TEXT_TEST(str) \
173
+ LookupVocab(m, str, words); \
174
+ expect = LeftToRight(m, words, rest); \
175
+ SLOPPY_CHECK_CLOSE(expect, RightToLeft(m, words, rest), 0.001); \
176
+ SLOPPY_CHECK_CLOSE(expect, TreeMiddle(m, words, rest), 0.001); \
177
+
178
+ // Build sentences, or parts thereof, from right to left.
179
+ template <class M> void GrowBig(const M &m, bool rest = false) {
180
+ std::vector<WordIndex> words;
181
+ float expect;
182
+ TEXT_TEST("in biarritz watching considering looking . on a little more loin also would consider higher to look good unknown the screening foo bar , unknown however unknown </s>");
183
+ TEXT_TEST("on a little more loin also would consider higher to look good unknown the screening foo bar , unknown however unknown </s>");
184
+ TEXT_TEST("on a little more loin also would consider higher to look good");
185
+ TEXT_TEST("more loin also would consider higher to look good");
186
+ TEXT_TEST("more loin also would consider higher to look");
187
+ TEXT_TEST("also would consider higher to look");
188
+ TEXT_TEST("also would consider higher");
189
+ TEXT_TEST("would consider higher to look");
190
+ TEXT_TEST("consider higher to look");
191
+ TEXT_TEST("consider higher to");
192
+ TEXT_TEST("consider higher");
193
+ }
194
+
195
+ template <class M> void GrowSmall(const M &m, bool rest = false) {
196
+ std::vector<WordIndex> words;
197
+ float expect;
198
+ TEXT_TEST("in biarritz watching considering looking . </s>");
199
+ TEXT_TEST("in biarritz watching considering looking .");
200
+ TEXT_TEST("in biarritz");
201
+ }
202
+
203
+ template <class M> void AlsoWouldConsiderHigher(const M &m) {
204
+ ChartState also;
205
+ {
206
+ RuleScore<M> score(m, also);
207
+ score.Terminal(m.GetVocabulary().Index("also"));
208
+ SLOPPY_CHECK_CLOSE(-1.687872, score.Finish(), 0.001);
209
+ }
210
+ ChartState would;
211
+ {
212
+ RuleScore<M> score(m, would);
213
+ score.Terminal(m.GetVocabulary().Index("would"));
214
+ SLOPPY_CHECK_CLOSE(-1.687872, score.Finish(), 0.001);
215
+ }
216
+ ChartState combine_also_would;
217
+ {
218
+ RuleScore<M> score(m, combine_also_would);
219
+ score.NonTerminal(also, -1.687872);
220
+ score.NonTerminal(would, -1.687872);
221
+ SLOPPY_CHECK_CLOSE(-1.687872 - 2.0, score.Finish(), 0.001);
222
+ }
223
+ BOOST_CHECK_EQUAL(2, combine_also_would.right.length);
224
+
225
+ ChartState also_would;
226
+ {
227
+ RuleScore<M> score(m, also_would);
228
+ score.Terminal(m.GetVocabulary().Index("also"));
229
+ score.Terminal(m.GetVocabulary().Index("would"));
230
+ SLOPPY_CHECK_CLOSE(-1.687872 - 2.0, score.Finish(), 0.001);
231
+ }
232
+ BOOST_CHECK_EQUAL(2, also_would.right.length);
233
+
234
+ ChartState consider;
235
+ {
236
+ RuleScore<M> score(m, consider);
237
+ score.Terminal(m.GetVocabulary().Index("consider"));
238
+ SLOPPY_CHECK_CLOSE(-1.687872, score.Finish(), 0.001);
239
+ }
240
+ BOOST_CHECK_EQUAL(1, consider.left.length);
241
+ BOOST_CHECK_EQUAL(1, consider.right.length);
242
+ BOOST_CHECK(!consider.left.full);
243
+
244
+ ChartState higher;
245
+ float higher_score;
246
+ {
247
+ RuleScore<M> score(m, higher);
248
+ score.Terminal(m.GetVocabulary().Index("higher"));
249
+ higher_score = score.Finish();
250
+ }
251
+ SLOPPY_CHECK_CLOSE(-1.509559, higher_score, 0.001);
252
+ BOOST_CHECK_EQUAL(1, higher.left.length);
253
+ BOOST_CHECK_EQUAL(1, higher.right.length);
254
+ BOOST_CHECK(!higher.left.full);
255
+ VCheck("higher", higher.right.words[0]);
256
+ SLOPPY_CHECK_CLOSE(-0.30103, higher.right.backoff[0], 0.001);
257
+
258
+ ChartState consider_higher;
259
+ {
260
+ RuleScore<M> score(m, consider_higher);
261
+ score.NonTerminal(consider, -1.687872);
262
+ score.NonTerminal(higher, higher_score);
263
+ SLOPPY_CHECK_CLOSE(-1.509559 - 1.687872 - 0.30103, score.Finish(), 0.001);
264
+ }
265
+ BOOST_CHECK_EQUAL(2, consider_higher.left.length);
266
+ BOOST_CHECK(!consider_higher.left.full);
267
+
268
+ ChartState full;
269
+ {
270
+ RuleScore<M> score(m, full);
271
+ score.NonTerminal(combine_also_would, -1.687872 - 2.0);
272
+ score.NonTerminal(consider_higher, -1.509559 - 1.687872 - 0.30103);
273
+ SLOPPY_CHECK_CLOSE(-10.6879, score.Finish(), 0.001);
274
+ }
275
+ BOOST_CHECK_EQUAL(4, full.right.length);
276
+ }
277
+
278
+ #define CHECK_SCORE(str, val) \
279
+ { \
280
+ float got = val; \
281
+ std::vector<WordIndex> indices; \
282
+ LookupVocab(m, str, indices); \
283
+ SLOPPY_CHECK_CLOSE(LeftToRight(m, indices), got, 0.001); \
284
+ }
285
+
286
+ template <class M> void FullGrow(const M &m) {
287
+ std::vector<WordIndex> words;
288
+ LookupVocab(m, "in biarritz watching considering looking . </s>", words);
289
+
290
+ ChartState lexical[7];
291
+ float lexical_scores[7];
292
+ for (unsigned int i = 0; i < 7; ++i) {
293
+ RuleScore<M> score(m, lexical[i]);
294
+ score.Terminal(words[i]);
295
+ lexical_scores[i] = score.Finish();
296
+ }
297
+ CHECK_SCORE("in", lexical_scores[0]);
298
+ CHECK_SCORE("biarritz", lexical_scores[1]);
299
+ CHECK_SCORE("watching", lexical_scores[2]);
300
+ CHECK_SCORE("</s>", lexical_scores[6]);
301
+
302
+ ChartState l1[4];
303
+ float l1_scores[4];
304
+ {
305
+ RuleScore<M> score(m, l1[0]);
306
+ score.NonTerminal(lexical[0], lexical_scores[0]);
307
+ score.NonTerminal(lexical[1], lexical_scores[1]);
308
+ CHECK_SCORE("in biarritz", l1_scores[0] = score.Finish());
309
+ }
310
+ {
311
+ RuleScore<M> score(m, l1[1]);
312
+ score.NonTerminal(lexical[2], lexical_scores[2]);
313
+ score.NonTerminal(lexical[3], lexical_scores[3]);
314
+ CHECK_SCORE("watching considering", l1_scores[1] = score.Finish());
315
+ }
316
+ {
317
+ RuleScore<M> score(m, l1[2]);
318
+ score.NonTerminal(lexical[4], lexical_scores[4]);
319
+ score.NonTerminal(lexical[5], lexical_scores[5]);
320
+ CHECK_SCORE("looking .", l1_scores[2] = score.Finish());
321
+ }
322
+ BOOST_CHECK_EQUAL(l1[2].left.length, 1);
323
+ l1[3] = lexical[6];
324
+ l1_scores[3] = lexical_scores[6];
325
+
326
+ ChartState l2[2];
327
+ float l2_scores[2];
328
+ {
329
+ RuleScore<M> score(m, l2[0]);
330
+ score.NonTerminal(l1[0], l1_scores[0]);
331
+ score.NonTerminal(l1[1], l1_scores[1]);
332
+ CHECK_SCORE("in biarritz watching considering", l2_scores[0] = score.Finish());
333
+ }
334
+ {
335
+ RuleScore<M> score(m, l2[1]);
336
+ score.NonTerminal(l1[2], l1_scores[2]);
337
+ score.NonTerminal(l1[3], l1_scores[3]);
338
+ CHECK_SCORE("looking . </s>", l2_scores[1] = score.Finish());
339
+ }
340
+ BOOST_CHECK_EQUAL(l2[1].left.length, 1);
341
+ BOOST_CHECK(l2[1].left.full);
342
+
343
+ ChartState top;
344
+ {
345
+ RuleScore<M> score(m, top);
346
+ score.NonTerminal(l2[0], l2_scores[0]);
347
+ score.NonTerminal(l2[1], l2_scores[1]);
348
+ CHECK_SCORE("in biarritz watching considering looking . </s>", score.Finish());
349
+ }
350
+ }
351
+
352
+ const char *FileLocation() {
353
+ if (boost::unit_test::framework::master_test_suite().argc < 2) {
354
+ return "test.arpa";
355
+ }
356
+ return boost::unit_test::framework::master_test_suite().argv[1];
357
+ }
358
+
359
+ template <class M> void Everything() {
360
+ Config config;
361
+ config.messages = NULL;
362
+ M m(FileLocation(), config);
363
+
364
+ Short(m);
365
+ Charge(m);
366
+ GrowBig(m);
367
+ AlsoWouldConsiderHigher(m);
368
+ GrowSmall(m);
369
+ FullGrow(m);
370
+ }
371
+
372
+ BOOST_AUTO_TEST_CASE(ProbingAll) {
373
+ Everything<Model>();
374
+ }
375
+ BOOST_AUTO_TEST_CASE(TrieAll) {
376
+ Everything<TrieModel>();
377
+ }
378
+ BOOST_AUTO_TEST_CASE(QuantTrieAll) {
379
+ Everything<QuantTrieModel>();
380
+ }
381
+ BOOST_AUTO_TEST_CASE(ArrayQuantTrieAll) {
382
+ Everything<QuantArrayTrieModel>();
383
+ }
384
+ BOOST_AUTO_TEST_CASE(ArrayTrieAll) {
385
+ Everything<ArrayTrieModel>();
386
+ }
387
+
388
+ BOOST_AUTO_TEST_CASE(RestProbing) {
389
+ Config config;
390
+ config.messages = NULL;
391
+ RestProbingModel m(FileLocation(), config);
392
+ GrowBig(m, true);
393
+ }
394
+
395
+ } // namespace
396
+ } // namespace ngram
397
+ } // namespace lm
cc-multilingual-main/cc_net/third_party/kenlm/lm/lm_exception.cc ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "lm_exception.hh"
2
+
3
+ #include <cerrno>
4
+ #include <cstdio>
5
+
6
+ namespace lm {
7
+
8
+ ConfigException::ConfigException() throw() {}
9
+ ConfigException::~ConfigException() throw() {}
10
+
11
+ LoadException::LoadException() throw() {}
12
+ LoadException::~LoadException() throw() {}
13
+
14
+ FormatLoadException::FormatLoadException() throw() {}
15
+ FormatLoadException::~FormatLoadException() throw() {}
16
+
17
+ VocabLoadException::VocabLoadException() throw() {}
18
+ VocabLoadException::~VocabLoadException() throw() {}
19
+
20
+ SpecialWordMissingException::SpecialWordMissingException() throw() {}
21
+ SpecialWordMissingException::~SpecialWordMissingException() throw() {}
22
+
23
+ } // namespace lm
cc-multilingual-main/cc_net/third_party/kenlm/lm/lm_exception.hh ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_LM_EXCEPTION_H
2
+ #define LM_LM_EXCEPTION_H
3
+
4
+ // Named to avoid conflict with util/exception.hh.
5
+
6
+ #include "../util/exception.hh"
7
+ #include "../util/string_piece.hh"
8
+
9
+ #include <exception>
10
+ #include <string>
11
+
12
+ namespace lm {
13
+
14
+ typedef enum {THROW_UP, COMPLAIN, SILENT} WarningAction;
15
+
16
+ class ConfigException : public util::Exception {
17
+ public:
18
+ ConfigException() throw();
19
+ ~ConfigException() throw();
20
+ };
21
+
22
+ class LoadException : public util::Exception {
23
+ public:
24
+ virtual ~LoadException() throw();
25
+
26
+ protected:
27
+ LoadException() throw();
28
+ };
29
+
30
+ class FormatLoadException : public LoadException {
31
+ public:
32
+ FormatLoadException() throw();
33
+ ~FormatLoadException() throw();
34
+ };
35
+
36
+ class VocabLoadException : public LoadException {
37
+ public:
38
+ virtual ~VocabLoadException() throw();
39
+ VocabLoadException() throw();
40
+ };
41
+
42
+ class SpecialWordMissingException : public VocabLoadException {
43
+ public:
44
+ explicit SpecialWordMissingException() throw();
45
+ ~SpecialWordMissingException() throw();
46
+ };
47
+
48
+ } // namespace lm
49
+
50
+ #endif // LM_LM_EXCEPTION
cc-multilingual-main/cc_net/third_party/kenlm/lm/max_order.hh ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_MAX_ORDER_H
2
+ #define LM_MAX_ORDER_H
3
+ /* IF YOUR BUILD SYSTEM PASSES -DKENLM_MAX_ORDER, THEN CHANGE THE BUILD SYSTEM.
4
+ * If not, this is the default maximum order.
5
+ * Having this limit means that State can be
6
+ * (kMaxOrder - 1) * sizeof(float) bytes instead of
7
+ * sizeof(float*) + (kMaxOrder - 1) * sizeof(float) + malloc overhead
8
+ */
9
+ #ifndef KENLM_ORDER_MESSAGE
10
+ #define KENLM_ORDER_MESSAGE "If your build system supports changing KENLM_MAX_ORDER, change it there and recompile. With cmake:\n cmake -DKENLM_MAX_ORDER=10 ..\nWith Moses:\n bjam --max-kenlm-order=10 -a\nOtherwise, edit lm/max_order.hh."
11
+ #endif
12
+
13
+ #endif // LM_MAX_ORDER_H
cc-multilingual-main/cc_net/third_party/kenlm/lm/model.cc ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "model.hh"
2
+
3
+ #include "blank.hh"
4
+ #include "lm_exception.hh"
5
+ #include "search_hashed.hh"
6
+ #include "search_trie.hh"
7
+ #include "read_arpa.hh"
8
+ #include "../util/have.hh"
9
+ #include "../util/murmur_hash.hh"
10
+
11
+ #include <algorithm>
12
+ #include <functional>
13
+ #include <numeric>
14
+ #include <cmath>
15
+ #include <limits>
16
+
17
+ namespace lm {
18
+ namespace ngram {
19
+ namespace detail {
20
+
21
+ template <class Search, class VocabularyT> const ModelType GenericModel<Search, VocabularyT>::kModelType = Search::kModelType;
22
+
23
+ template <class Search, class VocabularyT> uint64_t GenericModel<Search, VocabularyT>::Size(const std::vector<uint64_t> &counts, const Config &config) {
24
+ return VocabularyT::Size(counts[0], config) + Search::Size(counts, config);
25
+ }
26
+
27
+ template <class Search, class VocabularyT> void GenericModel<Search, VocabularyT>::SetupMemory(void *base, const std::vector<uint64_t> &counts, const Config &config) {
28
+ size_t goal_size = util::CheckOverflow(Size(counts, config));
29
+ uint8_t *start = static_cast<uint8_t*>(base);
30
+ size_t allocated = VocabularyT::Size(counts[0], config);
31
+ vocab_.SetupMemory(start, allocated, counts[0], config);
32
+ start += allocated;
33
+ start = search_.SetupMemory(start, counts, config);
34
+ if (static_cast<std::size_t>(start - static_cast<uint8_t*>(base)) != goal_size) UTIL_THROW(FormatLoadException, "The data structures took " << (start - static_cast<uint8_t*>(base)) << " but Size says they should take " << goal_size);
35
+ }
36
+
37
+ namespace {
38
+ void ComplainAboutARPA(const Config &config, ModelType model_type) {
39
+ if (config.write_mmap || !config.messages) return;
40
+ if (config.arpa_complain == Config::ALL) {
41
+ *config.messages << "Loading the LM will be faster if you build a binary file." << std::endl;
42
+ } else if (config.arpa_complain == Config::EXPENSIVE &&
43
+ (model_type == TRIE || model_type == QUANT_TRIE || model_type == ARRAY_TRIE || model_type == QUANT_ARRAY_TRIE)) {
44
+ *config.messages << "Building " << kModelNames[model_type] << " from ARPA is expensive. Save time by building a binary format." << std::endl;
45
+ }
46
+ }
47
+
48
+ void CheckCounts(const std::vector<uint64_t> &counts) {
49
+ UTIL_THROW_IF(counts.size() > KENLM_MAX_ORDER, FormatLoadException, "This model has order " << counts.size() << " but KenLM was compiled to support up to " << KENLM_MAX_ORDER << ". " << KENLM_ORDER_MESSAGE);
50
+ if (sizeof(uint64_t) > sizeof(std::size_t)) {
51
+ for (std::vector<uint64_t>::const_iterator i = counts.begin(); i != counts.end(); ++i) {
52
+ UTIL_THROW_IF(*i > static_cast<uint64_t>(std::numeric_limits<size_t>::max()), util::OverflowException, "This model has " << *i << " " << (i - counts.begin() + 1) << "-grams which is too many for 32-bit machines.");
53
+ }
54
+ }
55
+ }
56
+
57
+ } // namespace
58
+
59
+ template <class Search, class VocabularyT> GenericModel<Search, VocabularyT>::GenericModel(const char *file, const Config &init_config) : backing_(init_config) {
60
+ util::scoped_fd fd(util::OpenReadOrThrow(file));
61
+ if (IsBinaryFormat(fd.get())) {
62
+ Parameters parameters;
63
+ int fd_shallow = fd.release();
64
+ backing_.InitializeBinary(fd_shallow, kModelType, kVersion, parameters);
65
+ CheckCounts(parameters.counts);
66
+
67
+ Config new_config(init_config);
68
+ new_config.probing_multiplier = parameters.fixed.probing_multiplier;
69
+ Search::UpdateConfigFromBinary(backing_, parameters.counts, VocabularyT::Size(parameters.counts[0], new_config), new_config);
70
+ UTIL_THROW_IF(new_config.enumerate_vocab && !parameters.fixed.has_vocabulary, FormatLoadException, "The decoder requested all the vocabulary strings, but this binary file does not have them. You may need to rebuild the binary file with an updated version of build_binary.");
71
+
72
+ SetupMemory(backing_.LoadBinary(Size(parameters.counts, new_config)), parameters.counts, new_config);
73
+ vocab_.LoadedBinary(parameters.fixed.has_vocabulary, fd_shallow, new_config.enumerate_vocab, backing_.VocabStringReadingOffset());
74
+ } else {
75
+ ComplainAboutARPA(init_config, kModelType);
76
+ InitializeFromARPA(fd.release(), file, init_config);
77
+ }
78
+
79
+ // g++ prints warnings unless these are fully initialized.
80
+ State begin_sentence = State();
81
+ begin_sentence.length = 1;
82
+ begin_sentence.words[0] = vocab_.BeginSentence();
83
+ typename Search::Node ignored_node;
84
+ bool ignored_independent_left;
85
+ uint64_t ignored_extend_left;
86
+ begin_sentence.backoff[0] = search_.LookupUnigram(begin_sentence.words[0], ignored_node, ignored_independent_left, ignored_extend_left).Backoff();
87
+ State null_context = State();
88
+ null_context.length = 0;
89
+ P::Init(begin_sentence, null_context, vocab_, search_.Order());
90
+ }
91
+
92
+ template <class Search, class VocabularyT> void GenericModel<Search, VocabularyT>::InitializeFromARPA(int fd, const char *file, const Config &config) {
93
+ // Backing file is the ARPA.
94
+ util::FilePiece f(fd, file, config.ProgressMessages());
95
+ try {
96
+ std::vector<uint64_t> counts;
97
+ // File counts do not include pruned trigrams that extend to quadgrams etc. These will be fixed by search_.
98
+ ReadARPACounts(f, counts);
99
+ CheckCounts(counts);
100
+ if (counts.size() < 2) UTIL_THROW(FormatLoadException, "This ngram implementation assumes at least a bigram model.");
101
+ if (config.probing_multiplier <= 1.0) UTIL_THROW(ConfigException, "probing multiplier must be > 1.0");
102
+
103
+ std::size_t vocab_size = util::CheckOverflow(VocabularyT::Size(counts[0], config));
104
+ // Setup the binary file for writing the vocab lookup table. The search_ is responsible for growing the binary file to its needs.
105
+ vocab_.SetupMemory(backing_.SetupJustVocab(vocab_size, counts.size()), vocab_size, counts[0], config);
106
+
107
+ if (config.write_mmap && config.include_vocab) {
108
+ WriteWordsWrapper wrap(config.enumerate_vocab);
109
+ vocab_.ConfigureEnumerate(&wrap, counts[0]);
110
+ search_.InitializeFromARPA(file, f, counts, config, vocab_, backing_);
111
+ void *vocab_rebase, *search_rebase;
112
+ backing_.WriteVocabWords(wrap.Buffer(), vocab_rebase, search_rebase);
113
+ // Due to writing at the end of file, mmap may have relocated data. So remap.
114
+ vocab_.Relocate(vocab_rebase);
115
+ search_.SetupMemory(reinterpret_cast<uint8_t*>(search_rebase), counts, config);
116
+ } else {
117
+ vocab_.ConfigureEnumerate(config.enumerate_vocab, counts[0]);
118
+ search_.InitializeFromARPA(file, f, counts, config, vocab_, backing_);
119
+ }
120
+
121
+ if (!vocab_.SawUnk()) {
122
+ assert(config.unknown_missing != THROW_UP);
123
+ // Default probabilities for unknown.
124
+ search_.UnknownUnigram().backoff = 0.0;
125
+ search_.UnknownUnigram().prob = config.unknown_missing_logprob;
126
+ }
127
+ backing_.FinishFile(config, kModelType, kVersion, counts);
128
+ } catch (util::Exception &e) {
129
+ e << " Byte: " << f.Offset();
130
+ throw;
131
+ }
132
+ }
133
+
134
+ template <class Search, class VocabularyT> FullScoreReturn GenericModel<Search, VocabularyT>::FullScore(const State &in_state, const WordIndex new_word, State &out_state) const {
135
+ FullScoreReturn ret = ScoreExceptBackoff(in_state.words, in_state.words + in_state.length, new_word, out_state);
136
+ for (const float *i = in_state.backoff + ret.ngram_length - 1; i < in_state.backoff + in_state.length; ++i) {
137
+ ret.prob += *i;
138
+ }
139
+ return ret;
140
+ }
141
+
142
+ template <class Search, class VocabularyT> FullScoreReturn GenericModel<Search, VocabularyT>::FullScoreForgotState(const WordIndex *context_rbegin, const WordIndex *context_rend, const WordIndex new_word, State &out_state) const {
143
+ context_rend = std::min(context_rend, context_rbegin + P::Order() - 1);
144
+ FullScoreReturn ret = ScoreExceptBackoff(context_rbegin, context_rend, new_word, out_state);
145
+
146
+ // Add the backoff weights for n-grams of order start to (context_rend - context_rbegin).
147
+ unsigned char start = ret.ngram_length;
148
+ if (context_rend - context_rbegin < static_cast<std::ptrdiff_t>(start)) return ret;
149
+
150
+ bool independent_left;
151
+ uint64_t extend_left;
152
+ typename Search::Node node;
153
+ if (start <= 1) {
154
+ ret.prob += search_.LookupUnigram(*context_rbegin, node, independent_left, extend_left).Backoff();
155
+ start = 2;
156
+ } else if (!search_.FastMakeNode(context_rbegin, context_rbegin + start - 1, node)) {
157
+ return ret;
158
+ }
159
+ // i is the order of the backoff we're looking for.
160
+ unsigned char order_minus_2 = start - 2;
161
+ for (const WordIndex *i = context_rbegin + start - 1; i < context_rend; ++i, ++order_minus_2) {
162
+ typename Search::MiddlePointer p(search_.LookupMiddle(order_minus_2, *i, node, independent_left, extend_left));
163
+ if (!p.Found()) break;
164
+ ret.prob += p.Backoff();
165
+ }
166
+ return ret;
167
+ }
168
+
169
+ template <class Search, class VocabularyT> void GenericModel<Search, VocabularyT>::GetState(const WordIndex *context_rbegin, const WordIndex *context_rend, State &out_state) const {
170
+ // Generate a state from context.
171
+ context_rend = std::min(context_rend, context_rbegin + P::Order() - 1);
172
+ if (context_rend == context_rbegin) {
173
+ out_state.length = 0;
174
+ return;
175
+ }
176
+ typename Search::Node node;
177
+ bool independent_left;
178
+ uint64_t extend_left;
179
+ out_state.backoff[0] = search_.LookupUnigram(*context_rbegin, node, independent_left, extend_left).Backoff();
180
+ out_state.length = HasExtension(out_state.backoff[0]) ? 1 : 0;
181
+ float *backoff_out = out_state.backoff + 1;
182
+ unsigned char order_minus_2 = 0;
183
+ for (const WordIndex *i = context_rbegin + 1; i < context_rend; ++i, ++backoff_out, ++order_minus_2) {
184
+ typename Search::MiddlePointer p(search_.LookupMiddle(order_minus_2, *i, node, independent_left, extend_left));
185
+ if (!p.Found()) {
186
+ std::copy(context_rbegin, context_rbegin + out_state.length, out_state.words);
187
+ return;
188
+ }
189
+ *backoff_out = p.Backoff();
190
+ if (HasExtension(*backoff_out)) out_state.length = i - context_rbegin + 1;
191
+ }
192
+ std::copy(context_rbegin, context_rbegin + out_state.length, out_state.words);
193
+ }
194
+
195
+ template <class Search, class VocabularyT> FullScoreReturn GenericModel<Search, VocabularyT>::ExtendLeft(
196
+ const WordIndex *add_rbegin, const WordIndex *add_rend,
197
+ const float *backoff_in,
198
+ uint64_t extend_pointer,
199
+ unsigned char extend_length,
200
+ float *backoff_out,
201
+ unsigned char &next_use) const {
202
+ FullScoreReturn ret;
203
+ typename Search::Node node;
204
+ if (extend_length == 1) {
205
+ typename Search::UnigramPointer ptr(search_.LookupUnigram(static_cast<WordIndex>(extend_pointer), node, ret.independent_left, ret.extend_left));
206
+ ret.rest = ptr.Rest();
207
+ ret.prob = ptr.Prob();
208
+ assert(!ret.independent_left);
209
+ } else {
210
+ typename Search::MiddlePointer ptr(search_.Unpack(extend_pointer, extend_length, node));
211
+ ret.rest = ptr.Rest();
212
+ ret.prob = ptr.Prob();
213
+ ret.extend_left = extend_pointer;
214
+ // If this function is called, then it does depend on left words.
215
+ ret.independent_left = false;
216
+ }
217
+ float subtract_me = ret.rest;
218
+ ret.ngram_length = extend_length;
219
+ next_use = extend_length;
220
+ ResumeScore(add_rbegin, add_rend, extend_length - 1, node, backoff_out, next_use, ret);
221
+ next_use -= extend_length;
222
+ // Charge backoffs.
223
+ for (const float *b = backoff_in + ret.ngram_length - extend_length; b < backoff_in + (add_rend - add_rbegin); ++b) ret.prob += *b;
224
+ ret.prob -= subtract_me;
225
+ ret.rest -= subtract_me;
226
+ return ret;
227
+ }
228
+
229
+ namespace {
230
+ // Do a paraonoid copy of history, assuming new_word has already been copied
231
+ // (hence the -1). out_state.length could be zero so I avoided using
232
+ // std::copy.
233
+ void CopyRemainingHistory(const WordIndex *from, State &out_state) {
234
+ WordIndex *out = out_state.words + 1;
235
+ const WordIndex *in_end = from + static_cast<ptrdiff_t>(out_state.length) - 1;
236
+ for (const WordIndex *in = from; in < in_end; ++in, ++out) *out = *in;
237
+ }
238
+ } // namespace
239
+
240
+ /* Ugly optimized function. Produce a score excluding backoff.
241
+ * The search goes in increasing order of ngram length.
242
+ * Context goes backward, so context_begin is the word immediately preceeding
243
+ * new_word.
244
+ */
245
+ template <class Search, class VocabularyT> FullScoreReturn GenericModel<Search, VocabularyT>::ScoreExceptBackoff(
246
+ const WordIndex *const context_rbegin,
247
+ const WordIndex *const context_rend,
248
+ const WordIndex new_word,
249
+ State &out_state) const {
250
+ assert(new_word < vocab_.Bound());
251
+ FullScoreReturn ret;
252
+ // ret.ngram_length contains the last known non-blank ngram length.
253
+ ret.ngram_length = 1;
254
+
255
+ typename Search::Node node;
256
+ typename Search::UnigramPointer uni(search_.LookupUnigram(new_word, node, ret.independent_left, ret.extend_left));
257
+ out_state.backoff[0] = uni.Backoff();
258
+ ret.prob = uni.Prob();
259
+ ret.rest = uni.Rest();
260
+
261
+ // This is the length of the context that should be used for continuation to the right.
262
+ out_state.length = HasExtension(out_state.backoff[0]) ? 1 : 0;
263
+ // We'll write the word anyway since it will probably be used and does no harm being there.
264
+ out_state.words[0] = new_word;
265
+ if (context_rbegin == context_rend) return ret;
266
+
267
+ ResumeScore(context_rbegin, context_rend, 0, node, out_state.backoff + 1, out_state.length, ret);
268
+ CopyRemainingHistory(context_rbegin, out_state);
269
+ return ret;
270
+ }
271
+
272
+ template <class Search, class VocabularyT> void GenericModel<Search, VocabularyT>::ResumeScore(const WordIndex *hist_iter, const WordIndex *const context_rend, unsigned char order_minus_2, typename Search::Node &node, float *backoff_out, unsigned char &next_use, FullScoreReturn &ret) const {
273
+ for (; ; ++order_minus_2, ++hist_iter, ++backoff_out) {
274
+ if (hist_iter == context_rend) return;
275
+ if (ret.independent_left) return;
276
+ if (order_minus_2 == P::Order() - 2) break;
277
+
278
+ typename Search::MiddlePointer pointer(search_.LookupMiddle(order_minus_2, *hist_iter, node, ret.independent_left, ret.extend_left));
279
+ if (!pointer.Found()) return;
280
+ *backoff_out = pointer.Backoff();
281
+ ret.prob = pointer.Prob();
282
+ ret.rest = pointer.Rest();
283
+ ret.ngram_length = order_minus_2 + 2;
284
+ if (HasExtension(*backoff_out)) {
285
+ next_use = ret.ngram_length;
286
+ }
287
+ }
288
+ ret.independent_left = true;
289
+ typename Search::LongestPointer longest(search_.LookupLongest(*hist_iter, node));
290
+ if (longest.Found()) {
291
+ ret.prob = longest.Prob();
292
+ ret.rest = ret.prob;
293
+ // There is no blank in longest_.
294
+ ret.ngram_length = P::Order();
295
+ }
296
+ }
297
+
298
+ template <class Search, class VocabularyT> float GenericModel<Search, VocabularyT>::InternalUnRest(const uint64_t *pointers_begin, const uint64_t *pointers_end, unsigned char first_length) const {
299
+ float ret;
300
+ typename Search::Node node;
301
+ if (first_length == 1) {
302
+ if (pointers_begin >= pointers_end) return 0.0;
303
+ bool independent_left;
304
+ uint64_t extend_left;
305
+ typename Search::UnigramPointer ptr(search_.LookupUnigram(static_cast<WordIndex>(*pointers_begin), node, independent_left, extend_left));
306
+ ret = ptr.Prob() - ptr.Rest();
307
+ ++first_length;
308
+ ++pointers_begin;
309
+ } else {
310
+ ret = 0.0;
311
+ }
312
+ for (const uint64_t *i = pointers_begin; i < pointers_end; ++i, ++first_length) {
313
+ typename Search::MiddlePointer ptr(search_.Unpack(*i, first_length, node));
314
+ ret += ptr.Prob() - ptr.Rest();
315
+ }
316
+ return ret;
317
+ }
318
+
319
+ template class GenericModel<HashedSearch<BackoffValue>, ProbingVocabulary>;
320
+ template class GenericModel<HashedSearch<RestValue>, ProbingVocabulary>;
321
+ template class GenericModel<trie::TrieSearch<DontQuantize, trie::DontBhiksha>, SortedVocabulary>;
322
+ template class GenericModel<trie::TrieSearch<DontQuantize, trie::ArrayBhiksha>, SortedVocabulary>;
323
+ template class GenericModel<trie::TrieSearch<SeparatelyQuantize, trie::DontBhiksha>, SortedVocabulary>;
324
+ template class GenericModel<trie::TrieSearch<SeparatelyQuantize, trie::ArrayBhiksha>, SortedVocabulary>;
325
+
326
+ } // namespace detail
327
+
328
+ base::Model *LoadVirtual(const char *file_name, const Config &config, ModelType model_type) {
329
+ RecognizeBinary(file_name, model_type);
330
+ switch (model_type) {
331
+ case PROBING:
332
+ return new ProbingModel(file_name, config);
333
+ case REST_PROBING:
334
+ return new RestProbingModel(file_name, config);
335
+ case TRIE:
336
+ return new TrieModel(file_name, config);
337
+ case QUANT_TRIE:
338
+ return new QuantTrieModel(file_name, config);
339
+ case ARRAY_TRIE:
340
+ return new ArrayTrieModel(file_name, config);
341
+ case QUANT_ARRAY_TRIE:
342
+ return new QuantArrayTrieModel(file_name, config);
343
+ default:
344
+ UTIL_THROW(FormatLoadException, "Confused by model type " << model_type);
345
+ }
346
+ }
347
+
348
+ } // namespace ngram
349
+ } // namespace lm
cc-multilingual-main/cc_net/third_party/kenlm/lm/model.hh ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_MODEL_H
2
+ #define LM_MODEL_H
3
+
4
+ #include "bhiksha.hh"
5
+ #include "binary_format.hh"
6
+ #include "config.hh"
7
+ #include "facade.hh"
8
+ #include "quantize.hh"
9
+ #include "search_hashed.hh"
10
+ #include "search_trie.hh"
11
+ #include "state.hh"
12
+ #include "value.hh"
13
+ #include "vocab.hh"
14
+ #include "weights.hh"
15
+
16
+ #include "../util/murmur_hash.hh"
17
+
18
+ #include <algorithm>
19
+ #include <vector>
20
+ #include <cstring>
21
+
22
+ namespace util { class FilePiece; }
23
+
24
+ namespace lm {
25
+ namespace ngram {
26
+ namespace detail {
27
+
28
+ // Should return the same results as SRI.
29
+ // ModelFacade typedefs Vocabulary so we use VocabularyT to avoid naming conflicts.
30
+ template <class Search, class VocabularyT> class GenericModel : public base::ModelFacade<GenericModel<Search, VocabularyT>, State, VocabularyT> {
31
+ private:
32
+ typedef base::ModelFacade<GenericModel<Search, VocabularyT>, State, VocabularyT> P;
33
+ public:
34
+ // This is the model type returned by RecognizeBinary.
35
+ static const ModelType kModelType;
36
+
37
+ static const unsigned int kVersion = Search::kVersion;
38
+
39
+ /* Get the size of memory that will be mapped given ngram counts. This
40
+ * does not include small non-mapped control structures, such as this class
41
+ * itself.
42
+ */
43
+ static uint64_t Size(const std::vector<uint64_t> &counts, const Config &config = Config());
44
+
45
+ /* Load the model from a file. It may be an ARPA or binary file. Binary
46
+ * files must have the format expected by this class or you'll get an
47
+ * exception. So TrieModel can only load ARPA or binary created by
48
+ * TrieModel. To classify binary files, call RecognizeBinary in
49
+ * lm/binary_format.hh.
50
+ */
51
+ explicit GenericModel(const char *file, const Config &config = Config());
52
+
53
+ /* Score p(new_word | in_state) and incorporate new_word into out_state.
54
+ * Note that in_state and out_state must be different references:
55
+ * &in_state != &out_state.
56
+ */
57
+ FullScoreReturn FullScore(const State &in_state, const WordIndex new_word, State &out_state) const;
58
+
59
+ /* Slower call without in_state. Try to remember state, but sometimes it
60
+ * would cost too much memory or your decoder isn't setup properly.
61
+ * To use this function, make an array of WordIndex containing the context
62
+ * vocabulary ids in reverse order. Then, pass the bounds of the array:
63
+ * [context_rbegin, context_rend). The new_word is not part of the context
64
+ * array unless you intend to repeat words.
65
+ */
66
+ FullScoreReturn FullScoreForgotState(const WordIndex *context_rbegin, const WordIndex *context_rend, const WordIndex new_word, State &out_state) const;
67
+
68
+ /* Get the state for a context. Don't use this if you can avoid it. Use
69
+ * BeginSentenceState or NullContextState and extend from those. If
70
+ * you're only going to use this state to call FullScore once, use
71
+ * FullScoreForgotState.
72
+ * To use this function, make an array of WordIndex containing the context
73
+ * vocabulary ids in reverse order. Then, pass the bounds of the array:
74
+ * [context_rbegin, context_rend).
75
+ */
76
+ void GetState(const WordIndex *context_rbegin, const WordIndex *context_rend, State &out_state) const;
77
+
78
+ /* More efficient version of FullScore where a partial n-gram has already
79
+ * been scored.
80
+ * NOTE: THE RETURNED .rest AND .prob ARE RELATIVE TO THE .rest RETURNED BEFORE.
81
+ */
82
+ FullScoreReturn ExtendLeft(
83
+ // Additional context in reverse order. This will update add_rend to
84
+ const WordIndex *add_rbegin, const WordIndex *add_rend,
85
+ // Backoff weights to use.
86
+ const float *backoff_in,
87
+ // extend_left returned by a previous query.
88
+ uint64_t extend_pointer,
89
+ // Length of n-gram that the pointer corresponds to.
90
+ unsigned char extend_length,
91
+ // Where to write additional backoffs for [extend_length + 1, min(Order() - 1, return.ngram_length)]
92
+ float *backoff_out,
93
+ // Amount of additional content that should be considered by the next call.
94
+ unsigned char &next_use) const;
95
+
96
+ /* Return probabilities minus rest costs for an array of pointers. The
97
+ * first length should be the length of the n-gram to which pointers_begin
98
+ * points.
99
+ */
100
+ float UnRest(const uint64_t *pointers_begin, const uint64_t *pointers_end, unsigned char first_length) const {
101
+ // Compiler should optimize this if away.
102
+ return Search::kDifferentRest ? InternalUnRest(pointers_begin, pointers_end, first_length) : 0.0;
103
+ }
104
+
105
+ private:
106
+ FullScoreReturn ScoreExceptBackoff(const WordIndex *const context_rbegin, const WordIndex *const context_rend, const WordIndex new_word, State &out_state) const;
107
+
108
+ // Score bigrams and above. Do not include backoff.
109
+ void ResumeScore(const WordIndex *context_rbegin, const WordIndex *const context_rend, unsigned char starting_order_minus_2, typename Search::Node &node, float *backoff_out, unsigned char &next_use, FullScoreReturn &ret) const;
110
+
111
+ // Appears after Size in the cc file.
112
+ void SetupMemory(void *start, const std::vector<uint64_t> &counts, const Config &config);
113
+
114
+ void InitializeFromARPA(int fd, const char *file, const Config &config);
115
+
116
+ float InternalUnRest(const uint64_t *pointers_begin, const uint64_t *pointers_end, unsigned char first_length) const;
117
+
118
+ BinaryFormat backing_;
119
+
120
+ VocabularyT vocab_;
121
+
122
+ Search search_;
123
+ };
124
+
125
+ } // namespace detail
126
+
127
+ // Instead of typedef, inherit. This allows the Model etc to be forward declared.
128
+ // Oh the joys of C and C++.
129
+ #define LM_COMMA() ,
130
+ #define LM_NAME_MODEL(name, from)\
131
+ class name : public from {\
132
+ public:\
133
+ name(const char *file, const Config &config = Config()) : from(file, config) {}\
134
+ };
135
+
136
+ LM_NAME_MODEL(ProbingModel, detail::GenericModel<detail::HashedSearch<BackoffValue> LM_COMMA() ProbingVocabulary>);
137
+ LM_NAME_MODEL(RestProbingModel, detail::GenericModel<detail::HashedSearch<RestValue> LM_COMMA() ProbingVocabulary>);
138
+ LM_NAME_MODEL(TrieModel, detail::GenericModel<trie::TrieSearch<DontQuantize LM_COMMA() trie::DontBhiksha> LM_COMMA() SortedVocabulary>);
139
+ LM_NAME_MODEL(ArrayTrieModel, detail::GenericModel<trie::TrieSearch<DontQuantize LM_COMMA() trie::ArrayBhiksha> LM_COMMA() SortedVocabulary>);
140
+ LM_NAME_MODEL(QuantTrieModel, detail::GenericModel<trie::TrieSearch<SeparatelyQuantize LM_COMMA() trie::DontBhiksha> LM_COMMA() SortedVocabulary>);
141
+ LM_NAME_MODEL(QuantArrayTrieModel, detail::GenericModel<trie::TrieSearch<SeparatelyQuantize LM_COMMA() trie::ArrayBhiksha> LM_COMMA() SortedVocabulary>);
142
+
143
+ // Default implementation. No real reason for it to be the default.
144
+ typedef ::lm::ngram::ProbingVocabulary Vocabulary;
145
+ typedef ProbingModel Model;
146
+
147
+ /* Autorecognize the file type, load, and return the virtual base class. Don't
148
+ * use the virtual base class if you can avoid it. Instead, use the above
149
+ * classes as template arguments to your own virtual feature function.*/
150
+ base::Model *LoadVirtual(const char *file_name, const Config &config = Config(), ModelType if_arpa = PROBING);
151
+
152
+ } // namespace ngram
153
+ } // namespace lm
154
+
155
+ #endif // LM_MODEL_H
cc-multilingual-main/cc_net/third_party/kenlm/lm/model_test.cc ADDED
@@ -0,0 +1,448 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "model.hh"
2
+
3
+ #include <cstdlib>
4
+ #include <cstring>
5
+
6
+ #define BOOST_TEST_MODULE ModelTest
7
+ #include <boost/test/unit_test.hpp>
8
+ #include <boost/test/floating_point_comparison.hpp>
9
+
10
+ // Apparently some Boost versions use templates and are pretty strict about types matching.
11
+ #define SLOPPY_CHECK_CLOSE(ref, value, tol) BOOST_CHECK_CLOSE(static_cast<double>(ref), static_cast<double>(value), static_cast<double>(tol));
12
+
13
+ namespace lm {
14
+ namespace ngram {
15
+
16
+ std::ostream &operator<<(std::ostream &o, const State &state) {
17
+ o << "State length " << static_cast<unsigned int>(state.length) << ':';
18
+ for (const WordIndex *i = state.words; i < state.words + state.length; ++i) {
19
+ o << ' ' << *i;
20
+ }
21
+ return o;
22
+ }
23
+
24
+ namespace {
25
+
26
+ // Stupid bjam reverses the command line arguments randomly.
27
+ const char *TestLocation() {
28
+ if (boost::unit_test::framework::master_test_suite().argc < 3) {
29
+ return "test.arpa";
30
+ }
31
+ char **argv = boost::unit_test::framework::master_test_suite().argv;
32
+ return argv[strstr(argv[1], "nounk") ? 2 : 1];
33
+ }
34
+ const char *TestNoUnkLocation() {
35
+ if (boost::unit_test::framework::master_test_suite().argc < 3) {
36
+ return "test_nounk.arpa";
37
+ }
38
+ char **argv = boost::unit_test::framework::master_test_suite().argv;
39
+ return argv[strstr(argv[1], "nounk") ? 1 : 2];
40
+ }
41
+
42
+ template <class Model> State GetState(const Model &model, const char *word, const State &in) {
43
+ WordIndex context[in.length + 1];
44
+ context[0] = model.GetVocabulary().Index(word);
45
+ std::copy(in.words, in.words + in.length, context + 1);
46
+ State ret;
47
+ model.GetState(context, context + in.length + 1, ret);
48
+ return ret;
49
+ }
50
+
51
+ #define StartTest(word, ngram, score, indep_left) \
52
+ ret = model.FullScore( \
53
+ state, \
54
+ model.GetVocabulary().Index(word), \
55
+ out);\
56
+ SLOPPY_CHECK_CLOSE(score, ret.prob, 0.001); \
57
+ BOOST_CHECK_EQUAL(static_cast<unsigned int>(ngram), ret.ngram_length); \
58
+ BOOST_CHECK_GE(std::min<unsigned char>(ngram, 5 - 1), out.length); \
59
+ BOOST_CHECK_EQUAL(indep_left, ret.independent_left); \
60
+ BOOST_CHECK_EQUAL(out, GetState(model, word, state));
61
+
62
+ #define AppendTest(word, ngram, score, indep_left) \
63
+ StartTest(word, ngram, score, indep_left) \
64
+ state = out;
65
+
66
+ template <class M> void Starters(const M &model) {
67
+ FullScoreReturn ret;
68
+ Model::State state(model.BeginSentenceState());
69
+ Model::State out;
70
+
71
+ StartTest("looking", 2, -0.4846522, true);
72
+
73
+ // , probability plus <s> backoff
74
+ StartTest(",", 1, -1.383514 + -0.4149733, true);
75
+ // <unk> probability plus <s> backoff
76
+ StartTest("this_is_not_found", 1, -1.995635 + -0.4149733, true);
77
+ }
78
+
79
+ template <class M> void Continuation(const M &model) {
80
+ FullScoreReturn ret;
81
+ Model::State state(model.BeginSentenceState());
82
+ Model::State out;
83
+
84
+ AppendTest("looking", 2, -0.484652, true);
85
+ AppendTest("on", 3, -0.348837, true);
86
+ AppendTest("a", 4, -0.0155266, true);
87
+ AppendTest("little", 5, -0.00306122, true);
88
+ State preserve = state;
89
+ AppendTest("the", 1, -4.04005, true);
90
+ AppendTest("biarritz", 1, -1.9889, true);
91
+ AppendTest("not_found", 1, -2.29666, true);
92
+ AppendTest("more", 1, -1.20632 - 20.0, true);
93
+ AppendTest(".", 2, -0.51363, true);
94
+ AppendTest("</s>", 3, -0.0191651, true);
95
+ BOOST_CHECK_EQUAL(0, state.length);
96
+
97
+ state = preserve;
98
+ AppendTest("more", 5, -0.00181395, true);
99
+ BOOST_CHECK_EQUAL(4, state.length);
100
+ AppendTest("loin", 5, -0.0432557, true);
101
+ BOOST_CHECK_EQUAL(1, state.length);
102
+ }
103
+
104
+ template <class M> void Blanks(const M &model) {
105
+ FullScoreReturn ret;
106
+ State state(model.NullContextState());
107
+ State out;
108
+ AppendTest("also", 1, -1.687872, false);
109
+ AppendTest("would", 2, -2, true);
110
+ AppendTest("consider", 3, -3, true);
111
+ State preserve = state;
112
+ AppendTest("higher", 4, -4, true);
113
+ AppendTest("looking", 5, -5, true);
114
+ BOOST_CHECK_EQUAL(1, state.length);
115
+
116
+ state = preserve;
117
+ // also would consider not_found
118
+ AppendTest("not_found", 1, -1.995635 - 7.0 - 0.30103, true);
119
+
120
+ state = model.NullContextState();
121
+ // higher looking is a blank.
122
+ AppendTest("higher", 1, -1.509559, false);
123
+ AppendTest("looking", 2, -1.285941 - 0.30103, false);
124
+
125
+ State higher_looking = state;
126
+
127
+ BOOST_CHECK_EQUAL(1, state.length);
128
+ AppendTest("not_found", 1, -1.995635 - 0.4771212, true);
129
+
130
+ state = higher_looking;
131
+ // higher looking consider
132
+ AppendTest("consider", 1, -1.687872 - 0.4771212, true);
133
+
134
+ state = model.NullContextState();
135
+ AppendTest("would", 1, -1.687872, false);
136
+ BOOST_CHECK_EQUAL(1, state.length);
137
+ AppendTest("consider", 2, -1.687872 -0.30103, false);
138
+ BOOST_CHECK_EQUAL(2, state.length);
139
+ AppendTest("higher", 3, -1.509559 - 0.30103, false);
140
+ BOOST_CHECK_EQUAL(3, state.length);
141
+ AppendTest("looking", 4, -1.285941 - 0.30103, false);
142
+ }
143
+
144
+ template <class M> void Unknowns(const M &model) {
145
+ FullScoreReturn ret;
146
+ State state(model.NullContextState());
147
+ State out;
148
+
149
+ AppendTest("not_found", 1, -1.995635, false);
150
+ State preserve = state;
151
+ AppendTest("not_found2", 2, -15.0, true);
152
+ AppendTest("not_found3", 2, -15.0 - 2.0, true);
153
+
154
+ state = preserve;
155
+ AppendTest("however", 2, -4, true);
156
+ AppendTest("not_found3", 3, -6, true);
157
+ }
158
+
159
+ template <class M> void MinimalState(const M &model) {
160
+ FullScoreReturn ret;
161
+ State state(model.NullContextState());
162
+ State out;
163
+
164
+ AppendTest("baz", 1, -6.535897, true);
165
+ BOOST_CHECK_EQUAL(0, state.length);
166
+ state = model.NullContextState();
167
+ AppendTest("foo", 1, -3.141592, true);
168
+ BOOST_CHECK_EQUAL(1, state.length);
169
+ AppendTest("bar", 2, -6.0, true);
170
+ // Has to include the backoff weight.
171
+ BOOST_CHECK_EQUAL(1, state.length);
172
+ AppendTest("bar", 1, -2.718281 + 3.0, true);
173
+ BOOST_CHECK_EQUAL(1, state.length);
174
+
175
+ state = model.NullContextState();
176
+ AppendTest("to", 1, -1.687872, false);
177
+ AppendTest("look", 2, -0.2922095, true);
178
+ BOOST_CHECK_EQUAL(2, state.length);
179
+ AppendTest("a", 3, -7, true);
180
+ }
181
+
182
+ template <class M> void ExtendLeftTest(const M &model) {
183
+ State right;
184
+ FullScoreReturn little(model.FullScore(model.NullContextState(), model.GetVocabulary().Index("little"), right));
185
+ const float kLittleProb = -1.285941;
186
+ SLOPPY_CHECK_CLOSE(kLittleProb, little.prob, 0.001);
187
+ unsigned char next_use;
188
+ float backoff_out[4];
189
+
190
+ FullScoreReturn extend_none(model.ExtendLeft(NULL, NULL, NULL, little.extend_left, 1, NULL, next_use));
191
+ BOOST_CHECK_EQUAL(0, next_use);
192
+ BOOST_CHECK_EQUAL(little.extend_left, extend_none.extend_left);
193
+ SLOPPY_CHECK_CLOSE(little.prob - little.rest, extend_none.prob, 0.001);
194
+ BOOST_CHECK_EQUAL(1, extend_none.ngram_length);
195
+
196
+ const WordIndex a = model.GetVocabulary().Index("a");
197
+ float backoff_in = 3.14;
198
+ // a little
199
+ FullScoreReturn extend_a(model.ExtendLeft(&a, &a + 1, &backoff_in, little.extend_left, 1, backoff_out, next_use));
200
+ BOOST_CHECK_EQUAL(1, next_use);
201
+ SLOPPY_CHECK_CLOSE(-0.69897, backoff_out[0], 0.001);
202
+ SLOPPY_CHECK_CLOSE(-0.09132547 - little.rest, extend_a.prob, 0.001);
203
+ BOOST_CHECK_EQUAL(2, extend_a.ngram_length);
204
+ BOOST_CHECK(!extend_a.independent_left);
205
+
206
+ const WordIndex on = model.GetVocabulary().Index("on");
207
+ FullScoreReturn extend_on(model.ExtendLeft(&on, &on + 1, &backoff_in, extend_a.extend_left, 2, backoff_out, next_use));
208
+ BOOST_CHECK_EQUAL(1, next_use);
209
+ SLOPPY_CHECK_CLOSE(-0.4771212, backoff_out[0], 0.001);
210
+ SLOPPY_CHECK_CLOSE(-0.0283603 - (extend_a.rest + little.rest), extend_on.prob, 0.001);
211
+ BOOST_CHECK_EQUAL(3, extend_on.ngram_length);
212
+ BOOST_CHECK(!extend_on.independent_left);
213
+
214
+ const WordIndex both[2] = {a, on};
215
+ float backoff_in_arr[4];
216
+ FullScoreReturn extend_both(model.ExtendLeft(both, both + 2, backoff_in_arr, little.extend_left, 1, backoff_out, next_use));
217
+ BOOST_CHECK_EQUAL(2, next_use);
218
+ SLOPPY_CHECK_CLOSE(-0.69897, backoff_out[0], 0.001);
219
+ SLOPPY_CHECK_CLOSE(-0.4771212, backoff_out[1], 0.001);
220
+ SLOPPY_CHECK_CLOSE(-0.0283603 - little.rest, extend_both.prob, 0.001);
221
+ BOOST_CHECK_EQUAL(3, extend_both.ngram_length);
222
+ BOOST_CHECK(!extend_both.independent_left);
223
+ BOOST_CHECK_EQUAL(extend_on.extend_left, extend_both.extend_left);
224
+ }
225
+
226
+ #define StatelessTest(word, provide, ngram, score) \
227
+ ret = model.FullScoreForgotState(indices + num_words - word, indices + num_words - word + provide, indices[num_words - word - 1], state); \
228
+ SLOPPY_CHECK_CLOSE(score, ret.prob, 0.001); \
229
+ BOOST_CHECK_EQUAL(static_cast<unsigned int>(ngram), ret.ngram_length); \
230
+ model.GetState(indices + num_words - word, indices + num_words - word + provide, before); \
231
+ ret = model.FullScore(before, indices[num_words - word - 1], out); \
232
+ BOOST_CHECK(state == out); \
233
+ SLOPPY_CHECK_CLOSE(score, ret.prob, 0.001); \
234
+ BOOST_CHECK_EQUAL(static_cast<unsigned int>(ngram), ret.ngram_length);
235
+
236
+ template <class M> void Stateless(const M &model) {
237
+ const char *words[] = {"<s>", "looking", "on", "a", "little", "the", "biarritz", "not_found", "more", ".", "</s>"};
238
+ const size_t num_words = sizeof(words) / sizeof(const char*);
239
+ // Silience "array subscript is above array bounds" when extracting end pointer.
240
+ WordIndex indices[num_words + 1];
241
+ for (unsigned int i = 0; i < num_words; ++i) {
242
+ indices[num_words - 1 - i] = model.GetVocabulary().Index(words[i]);
243
+ }
244
+ FullScoreReturn ret;
245
+ State state, out, before;
246
+
247
+ ret = model.FullScoreForgotState(indices + num_words - 1, indices + num_words, indices[num_words - 2], state);
248
+ SLOPPY_CHECK_CLOSE(-0.484652, ret.prob, 0.001);
249
+ StatelessTest(1, 1, 2, -0.484652);
250
+
251
+ // looking
252
+ StatelessTest(1, 2, 2, -0.484652);
253
+ // on
254
+ AppendTest("on", 3, -0.348837, true);
255
+ StatelessTest(2, 3, 3, -0.348837);
256
+ StatelessTest(2, 2, 3, -0.348837);
257
+ StatelessTest(2, 1, 2, -0.4638903);
258
+ // a
259
+ StatelessTest(3, 4, 4, -0.0155266);
260
+ // little
261
+ AppendTest("little", 5, -0.00306122, true);
262
+ StatelessTest(4, 5, 5, -0.00306122);
263
+ // the
264
+ AppendTest("the", 1, -4.04005, true);
265
+ StatelessTest(5, 5, 1, -4.04005);
266
+ // No context of the.
267
+ StatelessTest(5, 0, 1, -1.687872);
268
+ // biarritz
269
+ StatelessTest(6, 1, 1, -1.9889);
270
+ // not found
271
+ StatelessTest(7, 1, 1, -2.29666);
272
+ StatelessTest(7, 0, 1, -1.995635);
273
+
274
+ WordIndex unk[1];
275
+ unk[0] = 0;
276
+ model.GetState(unk, unk + 1, state);
277
+ BOOST_CHECK_EQUAL(1, state.length);
278
+ BOOST_CHECK_EQUAL(static_cast<WordIndex>(0), state.words[0]);
279
+ }
280
+
281
+ template <class M> void NoUnkCheck(const M &model) {
282
+ WordIndex unk_index = 0;
283
+ State state;
284
+
285
+ FullScoreReturn ret = model.FullScoreForgotState(&unk_index, &unk_index + 1, unk_index, state);
286
+ SLOPPY_CHECK_CLOSE(-100.0, ret.prob, 0.001);
287
+ }
288
+
289
+ template <class M> void Everything(const M &m) {
290
+ Starters(m);
291
+ Continuation(m);
292
+ Blanks(m);
293
+ Unknowns(m);
294
+ MinimalState(m);
295
+ ExtendLeftTest(m);
296
+ Stateless(m);
297
+ }
298
+
299
+ class ExpectEnumerateVocab : public EnumerateVocab {
300
+ public:
301
+ ExpectEnumerateVocab() {}
302
+
303
+ void Add(WordIndex index, const StringPiece &str) {
304
+ BOOST_CHECK_EQUAL(seen.size(), index);
305
+ seen.push_back(std::string(str.data(), str.length()));
306
+ }
307
+
308
+ void Check(const base::Vocabulary &vocab) {
309
+ BOOST_CHECK_EQUAL(37ULL, seen.size());
310
+ BOOST_REQUIRE(!seen.empty());
311
+ BOOST_CHECK_EQUAL("<unk>", seen[0]);
312
+ for (WordIndex i = 0; i < seen.size(); ++i) {
313
+ BOOST_CHECK_EQUAL(i, vocab.Index(seen[i]));
314
+ }
315
+ }
316
+
317
+ void Clear() {
318
+ seen.clear();
319
+ }
320
+
321
+ std::vector<std::string> seen;
322
+ };
323
+
324
+ template <class ModelT> void LoadingTest() {
325
+ Config config;
326
+ config.arpa_complain = Config::NONE;
327
+ config.messages = NULL;
328
+ config.probing_multiplier = 2.0;
329
+ {
330
+ ExpectEnumerateVocab enumerate;
331
+ config.enumerate_vocab = &enumerate;
332
+ ModelT m(TestLocation(), config);
333
+ enumerate.Check(m.GetVocabulary());
334
+ BOOST_CHECK_EQUAL((WordIndex)37, m.GetVocabulary().Bound());
335
+ Everything(m);
336
+ }
337
+ {
338
+ ExpectEnumerateVocab enumerate;
339
+ config.enumerate_vocab = &enumerate;
340
+ ModelT m(TestNoUnkLocation(), config);
341
+ enumerate.Check(m.GetVocabulary());
342
+ BOOST_CHECK_EQUAL((WordIndex)37, m.GetVocabulary().Bound());
343
+ NoUnkCheck(m);
344
+ }
345
+ }
346
+
347
+ BOOST_AUTO_TEST_CASE(probing) {
348
+ LoadingTest<Model>();
349
+ }
350
+ BOOST_AUTO_TEST_CASE(trie) {
351
+ LoadingTest<TrieModel>();
352
+ }
353
+ BOOST_AUTO_TEST_CASE(quant_trie) {
354
+ LoadingTest<QuantTrieModel>();
355
+ }
356
+ BOOST_AUTO_TEST_CASE(bhiksha_trie) {
357
+ LoadingTest<ArrayTrieModel>();
358
+ }
359
+ BOOST_AUTO_TEST_CASE(quant_bhiksha_trie) {
360
+ LoadingTest<QuantArrayTrieModel>();
361
+ }
362
+
363
+ template <class ModelT> void BinaryTest(Config::WriteMethod write_method) {
364
+ Config config;
365
+ config.write_mmap = "test.binary";
366
+ config.messages = NULL;
367
+ config.write_method = write_method;
368
+ ExpectEnumerateVocab enumerate;
369
+ config.enumerate_vocab = &enumerate;
370
+
371
+ {
372
+ ModelT copy_model(TestLocation(), config);
373
+ enumerate.Check(copy_model.GetVocabulary());
374
+ enumerate.Clear();
375
+ Everything(copy_model);
376
+ }
377
+
378
+ config.write_mmap = NULL;
379
+
380
+ ModelType type;
381
+ BOOST_REQUIRE(RecognizeBinary("test.binary", type));
382
+ BOOST_CHECK_EQUAL(ModelT::kModelType, type);
383
+
384
+ {
385
+ ModelT binary("test.binary", config);
386
+ enumerate.Check(binary.GetVocabulary());
387
+ Everything(binary);
388
+ }
389
+ unlink("test.binary");
390
+
391
+ // Now test without <unk>.
392
+ config.write_mmap = "test_nounk.binary";
393
+ config.messages = NULL;
394
+ enumerate.Clear();
395
+ {
396
+ ModelT copy_model(TestNoUnkLocation(), config);
397
+ enumerate.Check(copy_model.GetVocabulary());
398
+ enumerate.Clear();
399
+ NoUnkCheck(copy_model);
400
+ }
401
+ config.write_mmap = NULL;
402
+ {
403
+ ModelT binary(TestNoUnkLocation(), config);
404
+ enumerate.Check(binary.GetVocabulary());
405
+ NoUnkCheck(binary);
406
+ }
407
+ unlink("test_nounk.binary");
408
+ }
409
+
410
+ template <class ModelT> void BinaryTest() {
411
+ BinaryTest<ModelT>(Config::WRITE_MMAP);
412
+ BinaryTest<ModelT>(Config::WRITE_AFTER);
413
+ }
414
+
415
+ BOOST_AUTO_TEST_CASE(write_and_read_probing) {
416
+ BinaryTest<ProbingModel>();
417
+ }
418
+ BOOST_AUTO_TEST_CASE(write_and_read_rest_probing) {
419
+ BinaryTest<RestProbingModel>();
420
+ }
421
+ BOOST_AUTO_TEST_CASE(write_and_read_trie) {
422
+ BinaryTest<TrieModel>();
423
+ }
424
+ BOOST_AUTO_TEST_CASE(write_and_read_quant_trie) {
425
+ BinaryTest<QuantTrieModel>();
426
+ }
427
+ BOOST_AUTO_TEST_CASE(write_and_read_array_trie) {
428
+ BinaryTest<ArrayTrieModel>();
429
+ }
430
+ BOOST_AUTO_TEST_CASE(write_and_read_quant_array_trie) {
431
+ BinaryTest<QuantArrayTrieModel>();
432
+ }
433
+
434
+ BOOST_AUTO_TEST_CASE(rest_max) {
435
+ Config config;
436
+ config.arpa_complain = Config::NONE;
437
+ config.messages = NULL;
438
+
439
+ RestProbingModel model(TestLocation(), config);
440
+ State state, out;
441
+ FullScoreReturn ret(model.FullScore(model.NullContextState(), model.GetVocabulary().Index("."), state));
442
+ SLOPPY_CHECK_CLOSE(-0.2705918, ret.rest, 0.001);
443
+ SLOPPY_CHECK_CLOSE(-0.01916512, model.FullScore(state, model.GetVocabulary().EndSentence(), out).rest, 0.001);
444
+ }
445
+
446
+ } // namespace
447
+ } // namespace ngram
448
+ } // namespace lm
cc-multilingual-main/cc_net/third_party/kenlm/lm/model_type.hh ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_MODEL_TYPE_H
2
+ #define LM_MODEL_TYPE_H
3
+
4
+ namespace lm {
5
+ namespace ngram {
6
+
7
+ /* Not the best numbering system, but it grew this way for historical reasons
8
+ * and I want to preserve existing binary files. */
9
+ typedef enum {PROBING=0, REST_PROBING=1, TRIE=2, QUANT_TRIE=3, ARRAY_TRIE=4, QUANT_ARRAY_TRIE=5} ModelType;
10
+
11
+ // Historical names.
12
+ const ModelType HASH_PROBING = PROBING;
13
+ const ModelType TRIE_SORTED = TRIE;
14
+ const ModelType QUANT_TRIE_SORTED = QUANT_TRIE;
15
+ const ModelType ARRAY_TRIE_SORTED = ARRAY_TRIE;
16
+ const ModelType QUANT_ARRAY_TRIE_SORTED = QUANT_ARRAY_TRIE;
17
+
18
+ const static ModelType kQuantAdd = static_cast<ModelType>(QUANT_TRIE - TRIE);
19
+ const static ModelType kArrayAdd = static_cast<ModelType>(ARRAY_TRIE - TRIE);
20
+
21
+ } // namespace ngram
22
+ } // namespace lm
23
+ #endif // LM_MODEL_TYPE_H
cc-multilingual-main/cc_net/third_party/kenlm/lm/ngram_query.hh ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_NGRAM_QUERY_H
2
+ #define LM_NGRAM_QUERY_H
3
+
4
+ #include "enumerate_vocab.hh"
5
+ #include "model.hh"
6
+ #include "../util/file_stream.hh"
7
+ #include "../util/file_piece.hh"
8
+ #include "../util/usage.hh"
9
+
10
+ #include <cstdlib>
11
+ #include <string>
12
+ #include <cmath>
13
+
14
+ namespace lm {
15
+ namespace ngram {
16
+
17
+ class QueryPrinter {
18
+ public:
19
+ QueryPrinter(int fd, bool print_word, bool print_line, bool print_summary, bool flush)
20
+ : out_(fd), print_word_(print_word), print_line_(print_line), print_summary_(print_summary), flush_(flush) {}
21
+
22
+ void Word(StringPiece surface, WordIndex vocab, const FullScoreReturn &ret) {
23
+ if (!print_word_) return;
24
+ out_ << surface << '=' << vocab << ' ' << static_cast<unsigned int>(ret.ngram_length) << ' ' << ret.prob << '\t';
25
+ if (flush_) out_.flush();
26
+ }
27
+
28
+ void Line(uint64_t oov, float total) {
29
+ if (!print_line_) return;
30
+ out_ << "Total: " << total << " OOV: " << oov << '\n';
31
+ if (flush_) out_.flush();
32
+ }
33
+
34
+ void Summary(double ppl_including_oov, double ppl_excluding_oov, uint64_t corpus_oov, uint64_t corpus_tokens) {
35
+ if (!print_summary_) return;
36
+ out_ <<
37
+ "Perplexity including OOVs:\t" << ppl_including_oov << "\n"
38
+ "Perplexity excluding OOVs:\t" << ppl_excluding_oov << "\n"
39
+ "OOVs:\t" << corpus_oov << "\n"
40
+ "Tokens:\t" << corpus_tokens << '\n';
41
+ out_.flush();
42
+ }
43
+
44
+ private:
45
+ util::FileStream out_;
46
+ bool print_word_;
47
+ bool print_line_;
48
+ bool print_summary_;
49
+ bool flush_;
50
+ };
51
+
52
+ template <class Model, class Printer> void Query(const Model &model, bool sentence_context, Printer &printer) {
53
+ typename Model::State state, out;
54
+ lm::FullScoreReturn ret;
55
+ StringPiece word;
56
+
57
+ util::FilePiece in(0);
58
+
59
+ double corpus_total = 0.0;
60
+ double corpus_total_oov_only = 0.0;
61
+ uint64_t corpus_oov = 0;
62
+ uint64_t corpus_tokens = 0;
63
+
64
+ while (true) {
65
+ state = sentence_context ? model.BeginSentenceState() : model.NullContextState();
66
+ float total = 0.0;
67
+ uint64_t oov = 0;
68
+
69
+ while (in.ReadWordSameLine(word)) {
70
+ lm::WordIndex vocab = model.GetVocabulary().Index(word);
71
+ ret = model.FullScore(state, vocab, out);
72
+ if (vocab == model.GetVocabulary().NotFound()) {
73
+ ++oov;
74
+ corpus_total_oov_only += ret.prob;
75
+ }
76
+ total += ret.prob;
77
+ printer.Word(word, vocab, ret);
78
+ ++corpus_tokens;
79
+ state = out;
80
+ }
81
+ // If people don't have a newline after their last query, this won't add a </s>.
82
+ // Sue me.
83
+ try {
84
+ UTIL_THROW_IF('\n' != in.get(), util::Exception, "FilePiece is confused.");
85
+ } catch (const util::EndOfFileException &e) { break; }
86
+ if (sentence_context) {
87
+ ret = model.FullScore(state, model.GetVocabulary().EndSentence(), out);
88
+ total += ret.prob;
89
+ ++corpus_tokens;
90
+ printer.Word("</s>", model.GetVocabulary().EndSentence(), ret);
91
+ }
92
+ printer.Line(oov, total);
93
+ corpus_total += total;
94
+ corpus_oov += oov;
95
+ }
96
+ printer.Summary(
97
+ pow(10.0, -(corpus_total / static_cast<double>(corpus_tokens))), // PPL including OOVs
98
+ pow(10.0, -((corpus_total - corpus_total_oov_only) / static_cast<double>(corpus_tokens - corpus_oov))), // PPL excluding OOVs
99
+ corpus_oov,
100
+ corpus_tokens);
101
+ }
102
+
103
+ template <class Model> void Query(const char *file, const Config &config, bool sentence_context, QueryPrinter &printer) {
104
+ Model model(file, config);
105
+ Query<Model, QueryPrinter>(model, sentence_context, printer);
106
+ }
107
+
108
+ } // namespace ngram
109
+ } // namespace lm
110
+
111
+ #endif // LM_NGRAM_QUERY_H
112
+
113
+
cc-multilingual-main/cc_net/third_party/kenlm/lm/partial.hh ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_PARTIAL_H
2
+ #define LM_PARTIAL_H
3
+
4
+ #include "return.hh"
5
+ #include "state.hh"
6
+
7
+ #include <algorithm>
8
+ #include <cassert>
9
+
10
+ namespace lm {
11
+ namespace ngram {
12
+
13
+ struct ExtendReturn {
14
+ float adjust;
15
+ bool make_full;
16
+ unsigned char next_use;
17
+ };
18
+
19
+ template <class Model> ExtendReturn ExtendLoop(
20
+ const Model &model,
21
+ unsigned char seen, const WordIndex *add_rbegin, const WordIndex *add_rend, const float *backoff_start,
22
+ const uint64_t *pointers, const uint64_t *pointers_end,
23
+ uint64_t *&pointers_write,
24
+ float *backoff_write) {
25
+ unsigned char add_length = add_rend - add_rbegin;
26
+
27
+ float backoff_buf[2][KENLM_MAX_ORDER - 1];
28
+ float *backoff_in = backoff_buf[0], *backoff_out = backoff_buf[1];
29
+ std::copy(backoff_start, backoff_start + add_length, backoff_in);
30
+
31
+ ExtendReturn value;
32
+ value.make_full = false;
33
+ value.adjust = 0.0;
34
+ value.next_use = add_length;
35
+
36
+ unsigned char i = 0;
37
+ unsigned char length = pointers_end - pointers;
38
+ // pointers_write is NULL means that the existing left state is full, so we should use completed probabilities.
39
+ if (pointers_write) {
40
+ // Using full context, writing to new left state.
41
+ for (; i < length; ++i) {
42
+ FullScoreReturn ret(model.ExtendLeft(
43
+ add_rbegin, add_rbegin + value.next_use,
44
+ backoff_in,
45
+ pointers[i], i + seen + 1,
46
+ backoff_out,
47
+ value.next_use));
48
+ std::swap(backoff_in, backoff_out);
49
+ if (ret.independent_left) {
50
+ value.adjust += ret.prob;
51
+ value.make_full = true;
52
+ ++i;
53
+ break;
54
+ }
55
+ value.adjust += ret.rest;
56
+ *pointers_write++ = ret.extend_left;
57
+ if (value.next_use != add_length) {
58
+ value.make_full = true;
59
+ ++i;
60
+ break;
61
+ }
62
+ }
63
+ }
64
+ // Using some of the new context.
65
+ for (; i < length && value.next_use; ++i) {
66
+ FullScoreReturn ret(model.ExtendLeft(
67
+ add_rbegin, add_rbegin + value.next_use,
68
+ backoff_in,
69
+ pointers[i], i + seen + 1,
70
+ backoff_out,
71
+ value.next_use));
72
+ std::swap(backoff_in, backoff_out);
73
+ value.adjust += ret.prob;
74
+ }
75
+ float unrest = model.UnRest(pointers + i, pointers_end, i + seen + 1);
76
+ // Using none of the new context.
77
+ value.adjust += unrest;
78
+
79
+ std::copy(backoff_in, backoff_in + value.next_use, backoff_write);
80
+ return value;
81
+ }
82
+
83
+ template <class Model> float RevealBefore(const Model &model, const Right &reveal, const unsigned char seen, bool reveal_full, Left &left, Right &right) {
84
+ assert(seen < reveal.length || reveal_full);
85
+ uint64_t *pointers_write = reveal_full ? NULL : left.pointers;
86
+ float backoff_buffer[KENLM_MAX_ORDER - 1];
87
+ ExtendReturn value(ExtendLoop(
88
+ model,
89
+ seen, reveal.words + seen, reveal.words + reveal.length, reveal.backoff + seen,
90
+ left.pointers, left.pointers + left.length,
91
+ pointers_write,
92
+ left.full ? backoff_buffer : (right.backoff + right.length)));
93
+ if (reveal_full) {
94
+ left.length = 0;
95
+ value.make_full = true;
96
+ } else {
97
+ left.length = pointers_write - left.pointers;
98
+ value.make_full |= (left.length == model.Order() - 1);
99
+ }
100
+ if (left.full) {
101
+ for (unsigned char i = 0; i < value.next_use; ++i) value.adjust += backoff_buffer[i];
102
+ } else {
103
+ // If left wasn't full when it came in, put words into right state.
104
+ std::copy(reveal.words + seen, reveal.words + seen + value.next_use, right.words + right.length);
105
+ right.length += value.next_use;
106
+ left.full = value.make_full || (right.length == model.Order() - 1);
107
+ }
108
+ return value.adjust;
109
+ }
110
+
111
+ template <class Model> float RevealAfter(const Model &model, Left &left, Right &right, const Left &reveal, unsigned char seen) {
112
+ assert(seen < reveal.length || reveal.full);
113
+ uint64_t *pointers_write = left.full ? NULL : (left.pointers + left.length);
114
+ ExtendReturn value(ExtendLoop(
115
+ model,
116
+ seen, right.words, right.words + right.length, right.backoff,
117
+ reveal.pointers + seen, reveal.pointers + reveal.length,
118
+ pointers_write,
119
+ right.backoff));
120
+ if (reveal.full) {
121
+ for (unsigned char i = 0; i < value.next_use; ++i) value.adjust += right.backoff[i];
122
+ right.length = 0;
123
+ value.make_full = true;
124
+ } else {
125
+ right.length = value.next_use;
126
+ value.make_full |= (right.length == model.Order() - 1);
127
+ }
128
+ if (!left.full) {
129
+ left.length = pointers_write - left.pointers;
130
+ left.full = value.make_full || (left.length == model.Order() - 1);
131
+ }
132
+ return value.adjust;
133
+ }
134
+
135
+ template <class Model> float Subsume(const Model &model, Left &first_left, const Right &first_right, const Left &second_left, Right &second_right, const unsigned int between_length) {
136
+ assert(first_right.length < KENLM_MAX_ORDER);
137
+ assert(second_left.length < KENLM_MAX_ORDER);
138
+ assert(between_length < KENLM_MAX_ORDER - 1);
139
+ uint64_t *pointers_write = first_left.full ? NULL : (first_left.pointers + first_left.length);
140
+ float backoff_buffer[KENLM_MAX_ORDER - 1];
141
+ ExtendReturn value(ExtendLoop(
142
+ model,
143
+ between_length, first_right.words, first_right.words + first_right.length, first_right.backoff,
144
+ second_left.pointers, second_left.pointers + second_left.length,
145
+ pointers_write,
146
+ second_left.full ? backoff_buffer : (second_right.backoff + second_right.length)));
147
+ if (second_left.full) {
148
+ for (unsigned char i = 0; i < value.next_use; ++i) value.adjust += backoff_buffer[i];
149
+ } else {
150
+ std::copy(first_right.words, first_right.words + value.next_use, second_right.words + second_right.length);
151
+ second_right.length += value.next_use;
152
+ value.make_full |= (second_right.length == model.Order() - 1);
153
+ }
154
+ if (!first_left.full) {
155
+ first_left.length = pointers_write - first_left.pointers;
156
+ first_left.full = value.make_full || second_left.full || (first_left.length == model.Order() - 1);
157
+ }
158
+ assert(first_left.length < KENLM_MAX_ORDER);
159
+ assert(second_right.length < KENLM_MAX_ORDER);
160
+ return value.adjust;
161
+ }
162
+
163
+ } // namespace ngram
164
+ } // namespace lm
165
+
166
+ #endif // LM_PARTIAL_H
cc-multilingual-main/cc_net/third_party/kenlm/lm/partial_test.cc ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "partial.hh"
2
+
3
+ #include "left.hh"
4
+ #include "model.hh"
5
+ #include "../util/tokenize_piece.hh"
6
+
7
+ #define BOOST_TEST_MODULE PartialTest
8
+ #include <boost/test/unit_test.hpp>
9
+ #include <boost/test/floating_point_comparison.hpp>
10
+
11
+ namespace lm {
12
+ namespace ngram {
13
+ namespace {
14
+
15
+ const char *TestLocation() {
16
+ if (boost::unit_test::framework::master_test_suite().argc < 2) {
17
+ return "test.arpa";
18
+ }
19
+ return boost::unit_test::framework::master_test_suite().argv[1];
20
+ }
21
+
22
+ Config SilentConfig() {
23
+ Config config;
24
+ config.arpa_complain = Config::NONE;
25
+ config.messages = NULL;
26
+ return config;
27
+ }
28
+
29
+ struct ModelFixture {
30
+ ModelFixture() : m(TestLocation(), SilentConfig()) {}
31
+
32
+ RestProbingModel m;
33
+ };
34
+
35
+ BOOST_FIXTURE_TEST_SUITE(suite, ModelFixture)
36
+
37
+ BOOST_AUTO_TEST_CASE(SimpleBefore) {
38
+ Left left;
39
+ left.full = false;
40
+ left.length = 0;
41
+ Right right;
42
+ right.length = 0;
43
+
44
+ Right reveal;
45
+ reveal.length = 1;
46
+ WordIndex period = m.GetVocabulary().Index(".");
47
+ reveal.words[0] = period;
48
+ reveal.backoff[0] = -0.845098;
49
+
50
+ BOOST_CHECK_CLOSE(0.0, RevealBefore(m, reveal, 0, false, left, right), 0.001);
51
+ BOOST_CHECK_EQUAL(0, left.length);
52
+ BOOST_CHECK(!left.full);
53
+ BOOST_CHECK_EQUAL(1, right.length);
54
+ BOOST_CHECK_EQUAL(period, right.words[0]);
55
+ BOOST_CHECK_CLOSE(-0.845098, right.backoff[0], 0.001);
56
+
57
+ WordIndex more = m.GetVocabulary().Index("more");
58
+ reveal.words[1] = more;
59
+ reveal.backoff[1] = -0.4771212;
60
+ reveal.length = 2;
61
+ BOOST_CHECK_CLOSE(0.0, RevealBefore(m, reveal, 1, false, left, right), 0.001);
62
+ BOOST_CHECK_EQUAL(0, left.length);
63
+ BOOST_CHECK(!left.full);
64
+ BOOST_CHECK_EQUAL(2, right.length);
65
+ BOOST_CHECK_EQUAL(period, right.words[0]);
66
+ BOOST_CHECK_EQUAL(more, right.words[1]);
67
+ BOOST_CHECK_CLOSE(-0.845098, right.backoff[0], 0.001);
68
+ BOOST_CHECK_CLOSE(-0.4771212, right.backoff[1], 0.001);
69
+ }
70
+
71
+ BOOST_AUTO_TEST_CASE(AlsoWouldConsider) {
72
+ WordIndex would = m.GetVocabulary().Index("would");
73
+ WordIndex consider = m.GetVocabulary().Index("consider");
74
+
75
+ ChartState current;
76
+ current.left.length = 1;
77
+ current.left.pointers[0] = would;
78
+ current.left.full = false;
79
+ current.right.length = 1;
80
+ current.right.words[0] = would;
81
+ current.right.backoff[0] = -0.30103;
82
+
83
+ Left after;
84
+ after.full = false;
85
+ after.length = 1;
86
+ after.pointers[0] = consider;
87
+
88
+ // adjustment for would consider
89
+ BOOST_CHECK_CLOSE(-1.687872 - -0.2922095 - 0.30103, RevealAfter(m, current.left, current.right, after, 0), 0.001);
90
+
91
+ BOOST_CHECK_EQUAL(2, current.left.length);
92
+ BOOST_CHECK_EQUAL(would, current.left.pointers[0]);
93
+ BOOST_CHECK_EQUAL(false, current.left.full);
94
+
95
+ WordIndex also = m.GetVocabulary().Index("also");
96
+ Right before;
97
+ before.length = 1;
98
+ before.words[0] = also;
99
+ before.backoff[0] = -0.30103;
100
+ // r(would) = -0.2922095 [i would], r(would -> consider) = -1.988902 [b(would) + p(consider)]
101
+ // p(also -> would) = -2, p(also would -> consider) = -3
102
+ BOOST_CHECK_CLOSE(-2 + 0.2922095 -3 + 1.988902, RevealBefore(m, before, 0, false, current.left, current.right), 0.001);
103
+ BOOST_CHECK_EQUAL(0, current.left.length);
104
+ BOOST_CHECK(current.left.full);
105
+ BOOST_CHECK_EQUAL(2, current.right.length);
106
+ BOOST_CHECK_EQUAL(would, current.right.words[0]);
107
+ BOOST_CHECK_EQUAL(also, current.right.words[1]);
108
+ }
109
+
110
+ BOOST_AUTO_TEST_CASE(EndSentence) {
111
+ WordIndex loin = m.GetVocabulary().Index("loin");
112
+ WordIndex period = m.GetVocabulary().Index(".");
113
+ WordIndex eos = m.GetVocabulary().EndSentence();
114
+
115
+ ChartState between;
116
+ between.left.length = 1;
117
+ between.left.pointers[0] = eos;
118
+ between.left.full = true;
119
+ between.right.length = 0;
120
+
121
+ Right before;
122
+ before.words[0] = period;
123
+ before.words[1] = loin;
124
+ before.backoff[0] = -0.845098;
125
+ before.backoff[1] = 0.0;
126
+
127
+ before.length = 1;
128
+ BOOST_CHECK_CLOSE(-0.0410707, RevealBefore(m, before, 0, true, between.left, between.right), 0.001);
129
+ BOOST_CHECK_EQUAL(0, between.left.length);
130
+ }
131
+
132
+ float ScoreFragment(const RestProbingModel &model, unsigned int *begin, unsigned int *end, ChartState &out) {
133
+ RuleScore<RestProbingModel> scorer(model, out);
134
+ for (unsigned int *i = begin; i < end; ++i) {
135
+ scorer.Terminal(*i);
136
+ }
137
+ return scorer.Finish();
138
+ }
139
+
140
+ void CheckAdjustment(const RestProbingModel &model, float expect, const Right &before_in, bool before_full, ChartState between, const Left &after_in) {
141
+ Right before(before_in);
142
+ Left after(after_in);
143
+ after.full = false;
144
+ float got = 0.0;
145
+ for (unsigned int i = 1; i < 5; ++i) {
146
+ if (before_in.length >= i) {
147
+ before.length = i;
148
+ got += RevealBefore(model, before, i - 1, false, between.left, between.right);
149
+ }
150
+ if (after_in.length >= i) {
151
+ after.length = i;
152
+ got += RevealAfter(model, between.left, between.right, after, i - 1);
153
+ }
154
+ }
155
+ if (after_in.full) {
156
+ after.full = true;
157
+ got += RevealAfter(model, between.left, between.right, after, after.length);
158
+ }
159
+ if (before_full) {
160
+ got += RevealBefore(model, before, before.length, true, between.left, between.right);
161
+ }
162
+ // Sometimes they're zero and BOOST_CHECK_CLOSE fails for this.
163
+ BOOST_CHECK(fabs(expect - got) < 0.001);
164
+ }
165
+
166
+ void FullDivide(const RestProbingModel &model, StringPiece str) {
167
+ std::vector<WordIndex> indices;
168
+ for (util::TokenIter<util::SingleCharacter, true> i(str, ' '); i; ++i) {
169
+ indices.push_back(model.GetVocabulary().Index(*i));
170
+ }
171
+ ChartState full_state;
172
+ float full = ScoreFragment(model, &indices.front(), &indices.back() + 1, full_state);
173
+
174
+ ChartState before_state;
175
+ before_state.left.full = false;
176
+ RuleScore<RestProbingModel> before_scorer(model, before_state);
177
+ float before_score = 0.0;
178
+ for (unsigned int before = 0; before < indices.size(); ++before) {
179
+ for (unsigned int after = before; after <= indices.size(); ++after) {
180
+ ChartState after_state, between_state;
181
+ float after_score = ScoreFragment(model, &indices.front() + after, &indices.front() + indices.size(), after_state);
182
+ float between_score = ScoreFragment(model, &indices.front() + before, &indices.front() + after, between_state);
183
+ CheckAdjustment(model, full - before_score - after_score - between_score, before_state.right, before_state.left.full, between_state, after_state.left);
184
+ }
185
+ before_scorer.Terminal(indices[before]);
186
+ before_score = before_scorer.Finish();
187
+ }
188
+ }
189
+
190
+ BOOST_AUTO_TEST_CASE(Strings) {
191
+ FullDivide(m, "also would consider");
192
+ FullDivide(m, "looking on a little more loin . </s>");
193
+ FullDivide(m, "in biarritz watching considering looking . on a little more loin also would consider higher to look good unknown the screening foo bar , unknown however unknown </s>");
194
+ }
195
+
196
+ BOOST_AUTO_TEST_SUITE_END()
197
+ } // namespace
198
+ } // namespace ngram
199
+ } // namespace lm
cc-multilingual-main/cc_net/third_party/kenlm/lm/quantize.cc ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Quantize into bins of equal size as described in
2
+ * M. Federico and N. Bertoldi. 2006. How many bits are needed
3
+ * to store probabilities for phrase-based translation? In Proc.
4
+ * of the Workshop on Statistical Machine Translation, pages
5
+ * 94–101, New York City, June. Association for Computa-
6
+ * tional Linguistics.
7
+ */
8
+
9
+ #include "quantize.hh"
10
+
11
+ #include "binary_format.hh"
12
+ #include "lm_exception.hh"
13
+ #include "../util/file.hh"
14
+
15
+ #include <algorithm>
16
+ #include <numeric>
17
+
18
+ namespace lm {
19
+ namespace ngram {
20
+
21
+ namespace {
22
+
23
+ void MakeBins(std::vector<float> &values, float *centers, uint32_t bins) {
24
+ std::sort(values.begin(), values.end());
25
+ std::vector<float>::const_iterator start = values.begin(), finish;
26
+ for (uint32_t i = 0; i < bins; ++i, ++centers, start = finish) {
27
+ finish = values.begin() + ((values.size() * static_cast<uint64_t>(i + 1)) / bins);
28
+ if (finish == start) {
29
+ // zero length bucket.
30
+ *centers = i ? *(centers - 1) : -std::numeric_limits<float>::infinity();
31
+ } else {
32
+ *centers = std::accumulate(start, finish, 0.0) / static_cast<float>(finish - start);
33
+ }
34
+ }
35
+ }
36
+
37
+ const char kSeparatelyQuantizeVersion = 2;
38
+
39
+ } // namespace
40
+
41
+ void SeparatelyQuantize::UpdateConfigFromBinary(const BinaryFormat &file, uint64_t offset, Config &config) {
42
+ unsigned char buffer[3];
43
+ file.ReadForConfig(buffer, 3, offset);
44
+ char version = buffer[0];
45
+ config.prob_bits = buffer[1];
46
+ config.backoff_bits = buffer[2];
47
+ if (version != kSeparatelyQuantizeVersion) UTIL_THROW(FormatLoadException, "This file has quantization version " << (unsigned)version << " but the code expects version " << (unsigned)kSeparatelyQuantizeVersion);
48
+ }
49
+
50
+ void SeparatelyQuantize::SetupMemory(void *base, unsigned char order, const Config &config) {
51
+ prob_bits_ = config.prob_bits;
52
+ backoff_bits_ = config.backoff_bits;
53
+ // We need the reserved values.
54
+ if (config.prob_bits == 0) UTIL_THROW(ConfigException, "You can't quantize probability to zero");
55
+ if (config.backoff_bits == 0) UTIL_THROW(ConfigException, "You can't quantize backoff to zero");
56
+ if (config.prob_bits > 25) UTIL_THROW(ConfigException, "For efficiency reasons, quantizing probability supports at most 25 bits. Currently you have requested " << static_cast<unsigned>(config.prob_bits) << " bits.");
57
+ if (config.backoff_bits > 25) UTIL_THROW(ConfigException, "For efficiency reasons, quantizing backoff supports at most 25 bits. Currently you have requested " << static_cast<unsigned>(config.backoff_bits) << " bits.");
58
+ // Reserve 8 byte header for bit counts.
59
+ actual_base_ = static_cast<uint8_t*>(base);
60
+ float *start = reinterpret_cast<float*>(actual_base_ + 8);
61
+ for (unsigned char i = 0; i < order - 2; ++i) {
62
+ tables_[i][0] = Bins(prob_bits_, start);
63
+ start += (1ULL << prob_bits_);
64
+ tables_[i][1] = Bins(backoff_bits_, start);
65
+ start += (1ULL << backoff_bits_);
66
+ }
67
+ longest_ = tables_[order - 2][0] = Bins(prob_bits_, start);
68
+ }
69
+
70
+ void SeparatelyQuantize::Train(uint8_t order, std::vector<float> &prob, std::vector<float> &backoff) {
71
+ TrainProb(order, prob);
72
+
73
+ // Backoff
74
+ float *centers = tables_[order - 2][1].Populate();
75
+ *(centers++) = kNoExtensionBackoff;
76
+ *(centers++) = kExtensionBackoff;
77
+ MakeBins(backoff, centers, (1ULL << backoff_bits_) - 2);
78
+ }
79
+
80
+ void SeparatelyQuantize::TrainProb(uint8_t order, std::vector<float> &prob) {
81
+ float *centers = tables_[order - 2][0].Populate();
82
+ MakeBins(prob, centers, (1ULL << prob_bits_));
83
+ }
84
+
85
+ void SeparatelyQuantize::FinishedLoading(const Config &config) {
86
+ uint8_t *actual_base = actual_base_;
87
+ *(actual_base++) = kSeparatelyQuantizeVersion; // version
88
+ *(actual_base++) = config.prob_bits;
89
+ *(actual_base++) = config.backoff_bits;
90
+ }
91
+
92
+ } // namespace ngram
93
+ } // namespace lm
cc-multilingual-main/cc_net/third_party/kenlm/lm/quantize.hh ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef LM_QUANTIZE_H
2
+ #define LM_QUANTIZE_H
3
+
4
+ #include "blank.hh"
5
+ #include "config.hh"
6
+ #include "max_order.hh"
7
+ #include "model_type.hh"
8
+ #include "../util/bit_packing.hh"
9
+
10
+ #include <algorithm>
11
+ #include <vector>
12
+
13
+ #include <stdint.h>
14
+
15
+ #include <iostream>
16
+
17
+ namespace lm {
18
+ namespace ngram {
19
+
20
+ struct Config;
21
+ class BinaryFormat;
22
+
23
+ /* Store values directly and don't quantize. */
24
+ class DontQuantize {
25
+ public:
26
+ static const ModelType kModelTypeAdd = static_cast<ModelType>(0);
27
+ static void UpdateConfigFromBinary(const BinaryFormat &, uint64_t, Config &) {}
28
+ static uint64_t Size(uint8_t /*order*/, const Config &/*config*/) { return 0; }
29
+ static uint8_t MiddleBits(const Config &/*config*/) { return 63; }
30
+ static uint8_t LongestBits(const Config &/*config*/) { return 31; }
31
+
32
+ class MiddlePointer {
33
+ public:
34
+ MiddlePointer(const DontQuantize & /*quant*/, unsigned char /*order_minus_2*/, util::BitAddress address) : address_(address) {}
35
+
36
+ MiddlePointer() : address_(NULL, 0) {}
37
+
38
+ bool Found() const {
39
+ return address_.base != NULL;
40
+ }
41
+
42
+ float Prob() const {
43
+ return util::ReadNonPositiveFloat31(address_.base, address_.offset);
44
+ }
45
+
46
+ float Backoff() const {
47
+ return util::ReadFloat32(address_.base, address_.offset + 31);
48
+ }
49
+
50
+ float Rest() const { return Prob(); }
51
+
52
+ void Write(float prob, float backoff) {
53
+ util::WriteNonPositiveFloat31(address_.base, address_.offset, prob);
54
+ util::WriteFloat32(address_.base, address_.offset + 31, backoff);
55
+ }
56
+
57
+ private:
58
+ util::BitAddress address_;
59
+ };
60
+
61
+ class LongestPointer {
62
+ public:
63
+ explicit LongestPointer(const DontQuantize &/*quant*/, util::BitAddress address) : address_(address) {}
64
+
65
+ LongestPointer() : address_(NULL, 0) {}
66
+
67
+ bool Found() const {
68
+ return address_.base != NULL;
69
+ }
70
+
71
+ float Prob() const {
72
+ return util::ReadNonPositiveFloat31(address_.base, address_.offset);
73
+ }
74
+
75
+ void Write(float prob) {
76
+ util::WriteNonPositiveFloat31(address_.base, address_.offset, prob);
77
+ }
78
+
79
+ private:
80
+ util::BitAddress address_;
81
+ };
82
+
83
+ DontQuantize() {}
84
+
85
+ void SetupMemory(void * /*start*/, unsigned char /*order*/, const Config & /*config*/) {}
86
+
87
+ static const bool kTrain = false;
88
+ // These should never be called because kTrain is false.
89
+ void Train(uint8_t /*order*/, std::vector<float> &/*prob*/, std::vector<float> &/*backoff*/) {}
90
+ void TrainProb(uint8_t, std::vector<float> &/*prob*/) {}
91
+
92
+ void FinishedLoading(const Config &) {}
93
+ };
94
+
95
+ class SeparatelyQuantize {
96
+ private:
97
+ class Bins {
98
+ public:
99
+ // Sigh C++ default constructor
100
+ Bins() {}
101
+
102
+ Bins(uint8_t bits, float *begin) : begin_(begin), end_(begin_ + (1ULL << bits)), bits_(bits), mask_((1ULL << bits) - 1) {}
103
+
104
+ float *Populate() { return begin_; }
105
+
106
+ uint64_t EncodeProb(float value) const {
107
+ return Encode(value, 0);
108
+ }
109
+
110
+ uint64_t EncodeBackoff(float value) const {
111
+ if (value == 0.0) {
112
+ return HasExtension(value) ? kExtensionQuant : kNoExtensionQuant;
113
+ }
114
+ return Encode(value, 2);
115
+ }
116
+
117
+ float Decode(std::size_t off) const { return begin_[off]; }
118
+
119
+ uint8_t Bits() const { return bits_; }
120
+
121
+ uint64_t Mask() const { return mask_; }
122
+
123
+ private:
124
+ uint64_t Encode(float value, size_t reserved) const {
125
+ const float *above = std::lower_bound(static_cast<const float*>(begin_) + reserved, end_, value);
126
+ if (above == begin_ + reserved) return reserved;
127
+ if (above == end_) return end_ - begin_ - 1;
128
+ return above - begin_ - (value - *(above - 1) < *above - value);
129
+ }
130
+
131
+ float *begin_;
132
+ const float *end_;
133
+ uint8_t bits_;
134
+ uint64_t mask_;
135
+ };
136
+
137
+ public:
138
+ static const ModelType kModelTypeAdd = kQuantAdd;
139
+
140
+ static void UpdateConfigFromBinary(const BinaryFormat &file, uint64_t offset, Config &config);
141
+
142
+ static uint64_t Size(uint8_t order, const Config &config) {
143
+ uint64_t longest_table = (static_cast<uint64_t>(1) << static_cast<uint64_t>(config.prob_bits)) * sizeof(float);
144
+ uint64_t middle_table = (static_cast<uint64_t>(1) << static_cast<uint64_t>(config.backoff_bits)) * sizeof(float) + longest_table;
145
+ // unigrams are currently not quantized so no need for a table.
146
+ return (order - 2) * middle_table + longest_table + /* for the bit counts and alignment padding) */ 8;
147
+ }
148
+
149
+ static uint8_t MiddleBits(const Config &config) { return config.prob_bits + config.backoff_bits; }
150
+ static uint8_t LongestBits(const Config &config) { return config.prob_bits; }
151
+
152
+ class MiddlePointer {
153
+ public:
154
+ MiddlePointer(const SeparatelyQuantize &quant, unsigned char order_minus_2, const util::BitAddress &address) : bins_(quant.GetTables(order_minus_2)), address_(address) {}
155
+
156
+ MiddlePointer() : address_(NULL, 0) {}
157
+
158
+ bool Found() const { return address_.base != NULL; }
159
+
160
+ float Prob() const {
161
+ return ProbBins().Decode(util::ReadInt25(address_.base, address_.offset + BackoffBins().Bits(), ProbBins().Bits(), ProbBins().Mask()));
162
+ }
163
+
164
+ float Backoff() const {
165
+ return BackoffBins().Decode(util::ReadInt25(address_.base, address_.offset, BackoffBins().Bits(), BackoffBins().Mask()));
166
+ }
167
+
168
+ float Rest() const { return Prob(); }
169
+
170
+ void Write(float prob, float backoff) const {
171
+ uint64_t prob_encoded = ProbBins().EncodeProb(prob);
172
+ uint64_t backoff_encoded = BackoffBins().EncodeBackoff(backoff);
173
+ #if BYTE_ORDER == LITTLE_ENDIAN
174
+ prob_encoded <<= BackoffBins().Bits();
175
+ #elif BYTE_ORDER == BIG_ENDIAN
176
+ backoff_encoded <<= ProbBins().Bits();
177
+ #endif
178
+ util::WriteInt57(address_.base, address_.offset, ProbBins().Bits() + BackoffBins().Bits(),
179
+ prob_encoded | backoff_encoded);
180
+ }
181
+
182
+ private:
183
+ const Bins &ProbBins() const { return bins_[0]; }
184
+ const Bins &BackoffBins() const { return bins_[1]; }
185
+ const Bins *bins_;
186
+
187
+ util::BitAddress address_;
188
+ };
189
+
190
+ class LongestPointer {
191
+ public:
192
+ LongestPointer(const SeparatelyQuantize &quant, const util::BitAddress &address) : table_(&quant.LongestTable()), address_(address) {}
193
+
194
+ LongestPointer() : address_(NULL, 0) {}
195
+
196
+ bool Found() const { return address_.base != NULL; }
197
+
198
+ void Write(float prob) const {
199
+ util::WriteInt25(address_.base, address_.offset, table_->Bits(), table_->EncodeProb(prob));
200
+ }
201
+
202
+ float Prob() const {
203
+ return table_->Decode(util::ReadInt25(address_.base, address_.offset, table_->Bits(), table_->Mask()));
204
+ }
205
+
206
+ private:
207
+ const Bins *table_;
208
+ util::BitAddress address_;
209
+ };
210
+
211
+ SeparatelyQuantize() {}
212
+
213
+ void SetupMemory(void *start, unsigned char order, const Config &config);
214
+
215
+ static const bool kTrain = true;
216
+ // Assumes 0.0 is removed from backoff.
217
+ void Train(uint8_t order, std::vector<float> &prob, std::vector<float> &backoff);
218
+ // Train just probabilities (for longest order).
219
+ void TrainProb(uint8_t order, std::vector<float> &prob);
220
+
221
+ void FinishedLoading(const Config &config);
222
+
223
+ const Bins *GetTables(unsigned char order_minus_2) const { return tables_[order_minus_2]; }
224
+
225
+ const Bins &LongestTable() const { return longest_; }
226
+
227
+ private:
228
+ Bins tables_[KENLM_MAX_ORDER - 1][2];
229
+
230
+ Bins longest_;
231
+
232
+ uint8_t *actual_base_;
233
+
234
+ uint8_t prob_bits_, backoff_bits_;
235
+ };
236
+
237
+ } // namespace ngram
238
+ } // namespace lm
239
+
240
+ #endif // LM_QUANTIZE_H
cc-multilingual-main/cc_net/third_party/kenlm/lm/query_main.cc ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "ngram_query.hh"
2
+ #include "../util/getopt.hh"
3
+
4
+ #ifdef WITH_NPLM
5
+ #include "wrappers/nplm.hh"
6
+ #endif
7
+
8
+ #include <stdlib.h>
9
+
10
+ void Usage(const char *name) {
11
+ std::cerr <<
12
+ "KenLM was compiled with maximum order " << KENLM_MAX_ORDER << ".\n"
13
+ "Usage: " << name << " [-b] [-n] [-w] [-s] lm_file\n"
14
+ "-b: Do not buffer output.\n"
15
+ "-n: Do not wrap the input in <s> and </s>.\n"
16
+ "-v summary|sentence|word: Print statistics at this level.\n"
17
+ " Can be used multiple times: -v summary -v sentence -v word\n"
18
+ "-l lazy|populate|read|parallel: Load lazily, with populate, or malloc+read\n"
19
+ "The default loading method is populate on Linux and read on others.\n\n"
20
+ "Each word in the output is formatted as:\n"
21
+ " word=vocab_id ngram_length log10(p(word|context))\n"
22
+ "where ngram_length is the length of n-gram matched. A vocab_id of 0 indicates\n"
23
+ "the unknown word. Sentence-level output includes log10 probability of the\n"
24
+ "sentence and OOV count.\n";
25
+ exit(1);
26
+ }
27
+
28
+ int main(int argc, char *argv[]) {
29
+ if (argc == 1 || (argc == 2 && !strcmp(argv[1], "--help")))
30
+ Usage(argv[0]);
31
+
32
+ lm::ngram::Config config;
33
+ bool sentence_context = true;
34
+ bool print_word = false;
35
+ bool print_line = false;
36
+ bool print_summary = false;
37
+ bool flush = false;
38
+
39
+ int opt;
40
+ while ((opt = getopt(argc, argv, "bnv:l:")) != -1) {
41
+ switch (opt) {
42
+ case 'b':
43
+ flush = true;
44
+ break;
45
+ case 'n':
46
+ sentence_context = false;
47
+ break;
48
+ case 'v':
49
+ if (!strcmp(optarg, "2")) {
50
+ print_word = true;
51
+ print_line = true;
52
+ print_summary = true;
53
+ } else if (!strcmp(optarg, "1")) {
54
+ print_word = false;
55
+ print_line = true;
56
+ print_summary = true;
57
+ } else if (!strcmp(optarg, "0")) {
58
+ print_word = false;
59
+ print_line = false;
60
+ print_summary = true;
61
+ } else if (!strcmp(optarg, "word")) {
62
+ print_word = true;
63
+ } else if (!strcmp(optarg, "sentence")) {
64
+ print_line = true;
65
+ } else if (!strcmp(optarg, "summary")) {
66
+ print_summary = true;
67
+ } else {
68
+ Usage(argv[0]);
69
+ }
70
+ break;
71
+ case 'l':
72
+ if (!strcmp(optarg, "lazy")) {
73
+ config.load_method = util::LAZY;
74
+ } else if (!strcmp(optarg, "populate")) {
75
+ config.load_method = util::POPULATE_OR_READ;
76
+ } else if (!strcmp(optarg, "read")) {
77
+ config.load_method = util::READ;
78
+ } else if (!strcmp(optarg, "parallel")) {
79
+ config.load_method = util::PARALLEL_READ;
80
+ } else {
81
+ Usage(argv[0]);
82
+ }
83
+ break;
84
+ case 'h':
85
+ default:
86
+ Usage(argv[0]);
87
+ }
88
+ }
89
+ if (optind + 1 != argc)
90
+ Usage(argv[0]);
91
+ // No verbosity argument specified.
92
+ if (!print_word && !print_line && !print_summary) {
93
+ print_word = true;
94
+ print_line = true;
95
+ print_summary = true;
96
+ }
97
+ lm::ngram::QueryPrinter printer(1, print_word, print_line, print_summary, flush);
98
+ const char *file = argv[optind];
99
+ try {
100
+ using namespace lm::ngram;
101
+ ModelType model_type;
102
+ if (RecognizeBinary(file, model_type)) {
103
+ std::cerr << "This binary file contains " << lm::ngram::kModelNames[model_type] << "." << std::endl;
104
+ switch(model_type) {
105
+ case PROBING:
106
+ Query<lm::ngram::ProbingModel>(file, config, sentence_context, printer);
107
+ break;
108
+ case REST_PROBING:
109
+ Query<lm::ngram::RestProbingModel>(file, config, sentence_context, printer);
110
+ break;
111
+ case TRIE:
112
+ Query<TrieModel>(file, config, sentence_context, printer);
113
+ break;
114
+ case QUANT_TRIE:
115
+ Query<QuantTrieModel>(file, config, sentence_context, printer);
116
+ break;
117
+ case ARRAY_TRIE:
118
+ Query<ArrayTrieModel>(file, config, sentence_context, printer);
119
+ break;
120
+ case QUANT_ARRAY_TRIE:
121
+ Query<QuantArrayTrieModel>(file, config, sentence_context, printer);
122
+ break;
123
+ default:
124
+ std::cerr << "Unrecognized kenlm model type " << model_type << std::endl;
125
+ abort();
126
+ }
127
+ #ifdef WITH_NPLM
128
+ } else if (lm::np::Model::Recognize(file)) {
129
+ lm::np::Model model(file);
130
+ Query<lm::np::Model, lm::ngram::QueryPrinter>(model, sentence_context, printer);
131
+ Query<lm::np::Model, lm::ngram::QueryPrinter>(model, sentence_context, printer);
132
+ #endif
133
+ } else {
134
+ Query<ProbingModel>(file, config, sentence_context, printer);
135
+ }
136
+ util::PrintUsage(std::cerr);
137
+ } catch (const std::exception &e) {
138
+ std::cerr << e.what() << std::endl;
139
+ return 1;
140
+ }
141
+ return 0;
142
+ }