applied-ai-018 commited on
Commit
89f370a
·
verified ·
1 Parent(s): be99f12

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/aiosignal/__init__.py +36 -0
  2. llmeval-env/lib/python3.10/site-packages/aiosignal/__init__.pyi +12 -0
  3. llmeval-env/lib/python3.10/site-packages/aiosignal/__pycache__/__init__.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/aiosignal/py.typed +0 -0
  5. llmeval-env/lib/python3.10/site-packages/charset_normalizer/__init__.py +46 -0
  6. llmeval-env/lib/python3.10/site-packages/charset_normalizer/__main__.py +4 -0
  7. llmeval-env/lib/python3.10/site-packages/charset_normalizer/api.py +626 -0
  8. llmeval-env/lib/python3.10/site-packages/charset_normalizer/cd.py +395 -0
  9. llmeval-env/lib/python3.10/site-packages/charset_normalizer/constant.py +1995 -0
  10. llmeval-env/lib/python3.10/site-packages/charset_normalizer/legacy.py +54 -0
  11. llmeval-env/lib/python3.10/site-packages/charset_normalizer/md.cpython-310-x86_64-linux-gnu.so +0 -0
  12. llmeval-env/lib/python3.10/site-packages/charset_normalizer/md.py +615 -0
  13. llmeval-env/lib/python3.10/site-packages/charset_normalizer/md__mypyc.cpython-310-x86_64-linux-gnu.so +0 -0
  14. llmeval-env/lib/python3.10/site-packages/charset_normalizer/models.py +340 -0
  15. llmeval-env/lib/python3.10/site-packages/charset_normalizer/py.typed +0 -0
  16. llmeval-env/lib/python3.10/site-packages/charset_normalizer/utils.py +421 -0
  17. llmeval-env/lib/python3.10/site-packages/charset_normalizer/version.py +6 -0
  18. llmeval-env/lib/python3.10/site-packages/joblib/externals/__pycache__/__init__.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/joblib/externals/cloudpickle/__init__.py +18 -0
  20. llmeval-env/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/__init__.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle_fast.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle.py +1487 -0
  24. llmeval-env/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle_fast.py +13 -0
  25. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/__init__.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/_base.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/cloudpickle_wrapper.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/initializers.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/process_executor.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/reusable_executor.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__init__.py +14 -0
  32. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/__init__.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_posix_reduction.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_win_reduction.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/context.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/fork_exec.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_posix.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_win32.cpython-310.pyc +0 -0
  39. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/process.cpython-310.pyc +0 -0
  40. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/queues.cpython-310.pyc +0 -0
  41. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/reduction.cpython-310.pyc +0 -0
  42. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/resource_tracker.cpython-310.pyc +0 -0
  43. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/spawn.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/synchronize.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/utils.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/context.py +378 -0
  47. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/popen_loky_win32.py +173 -0
  48. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/queues.py +236 -0
  49. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/reduction.py +224 -0
  50. llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/utils.py +181 -0
llmeval-env/lib/python3.10/site-packages/aiosignal/__init__.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from frozenlist import FrozenList
2
+
3
+ __version__ = "1.3.1"
4
+
5
+ __all__ = ("Signal",)
6
+
7
+
8
+ class Signal(FrozenList):
9
+ """Coroutine-based signal implementation.
10
+
11
+ To connect a callback to a signal, use any list method.
12
+
13
+ Signals are fired using the send() coroutine, which takes named
14
+ arguments.
15
+ """
16
+
17
+ __slots__ = ("_owner",)
18
+
19
+ def __init__(self, owner):
20
+ super().__init__()
21
+ self._owner = owner
22
+
23
+ def __repr__(self):
24
+ return "<Signal owner={}, frozen={}, {!r}>".format(
25
+ self._owner, self.frozen, list(self)
26
+ )
27
+
28
+ async def send(self, *args, **kwargs):
29
+ """
30
+ Sends data to all registered receivers.
31
+ """
32
+ if not self.frozen:
33
+ raise RuntimeError("Cannot send non-frozen signal.")
34
+
35
+ for receiver in self:
36
+ await receiver(*args, **kwargs) # type: ignore
llmeval-env/lib/python3.10/site-packages/aiosignal/__init__.pyi ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Generic, TypeVar
2
+
3
+ from frozenlist import FrozenList
4
+
5
+ __all__ = ("Signal",)
6
+
7
+ _T = TypeVar("_T")
8
+
9
+ class Signal(FrozenList[_T], Generic[_T]):
10
+ def __init__(self, owner: Any) -> None: ...
11
+ def __repr__(self) -> str: ...
12
+ async def send(self, *args: Any, **kwargs: Any) -> None: ...
llmeval-env/lib/python3.10/site-packages/aiosignal/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.34 kB). View file
 
llmeval-env/lib/python3.10/site-packages/aiosignal/py.typed ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/charset_normalizer/__init__.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ Charset-Normalizer
4
+ ~~~~~~~~~~~~~~
5
+ The Real First Universal Charset Detector.
6
+ A library that helps you read text from an unknown charset encoding.
7
+ Motivated by chardet, This package is trying to resolve the issue by taking a new approach.
8
+ All IANA character set names for which the Python core library provides codecs are supported.
9
+
10
+ Basic usage:
11
+ >>> from charset_normalizer import from_bytes
12
+ >>> results = from_bytes('Bсеки човек има право на образование. Oбразованието!'.encode('utf_8'))
13
+ >>> best_guess = results.best()
14
+ >>> str(best_guess)
15
+ 'Bсеки човек има право на образование. Oбразованието!'
16
+
17
+ Others methods and usages are available - see the full documentation
18
+ at <https://github.com/Ousret/charset_normalizer>.
19
+ :copyright: (c) 2021 by Ahmed TAHRI
20
+ :license: MIT, see LICENSE for more details.
21
+ """
22
+ import logging
23
+
24
+ from .api import from_bytes, from_fp, from_path, is_binary
25
+ from .legacy import detect
26
+ from .models import CharsetMatch, CharsetMatches
27
+ from .utils import set_logging_handler
28
+ from .version import VERSION, __version__
29
+
30
+ __all__ = (
31
+ "from_fp",
32
+ "from_path",
33
+ "from_bytes",
34
+ "is_binary",
35
+ "detect",
36
+ "CharsetMatch",
37
+ "CharsetMatches",
38
+ "__version__",
39
+ "VERSION",
40
+ "set_logging_handler",
41
+ )
42
+
43
+ # Attach a NullHandler to the top level logger by default
44
+ # https://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library
45
+
46
+ logging.getLogger("charset_normalizer").addHandler(logging.NullHandler())
llmeval-env/lib/python3.10/site-packages/charset_normalizer/__main__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from .cli import cli_detect
2
+
3
+ if __name__ == "__main__":
4
+ cli_detect()
llmeval-env/lib/python3.10/site-packages/charset_normalizer/api.py ADDED
@@ -0,0 +1,626 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from os import PathLike
3
+ from typing import BinaryIO, List, Optional, Set, Union
4
+
5
+ from .cd import (
6
+ coherence_ratio,
7
+ encoding_languages,
8
+ mb_encoding_languages,
9
+ merge_coherence_ratios,
10
+ )
11
+ from .constant import IANA_SUPPORTED, TOO_BIG_SEQUENCE, TOO_SMALL_SEQUENCE, TRACE
12
+ from .md import mess_ratio
13
+ from .models import CharsetMatch, CharsetMatches
14
+ from .utils import (
15
+ any_specified_encoding,
16
+ cut_sequence_chunks,
17
+ iana_name,
18
+ identify_sig_or_bom,
19
+ is_cp_similar,
20
+ is_multi_byte_encoding,
21
+ should_strip_sig_or_bom,
22
+ )
23
+
24
+ # Will most likely be controversial
25
+ # logging.addLevelName(TRACE, "TRACE")
26
+ logger = logging.getLogger("charset_normalizer")
27
+ explain_handler = logging.StreamHandler()
28
+ explain_handler.setFormatter(
29
+ logging.Formatter("%(asctime)s | %(levelname)s | %(message)s")
30
+ )
31
+
32
+
33
+ def from_bytes(
34
+ sequences: Union[bytes, bytearray],
35
+ steps: int = 5,
36
+ chunk_size: int = 512,
37
+ threshold: float = 0.2,
38
+ cp_isolation: Optional[List[str]] = None,
39
+ cp_exclusion: Optional[List[str]] = None,
40
+ preemptive_behaviour: bool = True,
41
+ explain: bool = False,
42
+ language_threshold: float = 0.1,
43
+ enable_fallback: bool = True,
44
+ ) -> CharsetMatches:
45
+ """
46
+ Given a raw bytes sequence, return the best possibles charset usable to render str objects.
47
+ If there is no results, it is a strong indicator that the source is binary/not text.
48
+ By default, the process will extract 5 blocks of 512o each to assess the mess and coherence of a given sequence.
49
+ And will give up a particular code page after 20% of measured mess. Those criteria are customizable at will.
50
+
51
+ The preemptive behavior DOES NOT replace the traditional detection workflow, it prioritize a particular code page
52
+ but never take it for granted. Can improve the performance.
53
+
54
+ You may want to focus your attention to some code page or/and not others, use cp_isolation and cp_exclusion for that
55
+ purpose.
56
+
57
+ This function will strip the SIG in the payload/sequence every time except on UTF-16, UTF-32.
58
+ By default the library does not setup any handler other than the NullHandler, if you choose to set the 'explain'
59
+ toggle to True it will alter the logger configuration to add a StreamHandler that is suitable for debugging.
60
+ Custom logging format and handler can be set manually.
61
+ """
62
+
63
+ if not isinstance(sequences, (bytearray, bytes)):
64
+ raise TypeError(
65
+ "Expected object of type bytes or bytearray, got: {0}".format(
66
+ type(sequences)
67
+ )
68
+ )
69
+
70
+ if explain:
71
+ previous_logger_level: int = logger.level
72
+ logger.addHandler(explain_handler)
73
+ logger.setLevel(TRACE)
74
+
75
+ length: int = len(sequences)
76
+
77
+ if length == 0:
78
+ logger.debug("Encoding detection on empty bytes, assuming utf_8 intention.")
79
+ if explain:
80
+ logger.removeHandler(explain_handler)
81
+ logger.setLevel(previous_logger_level or logging.WARNING)
82
+ return CharsetMatches([CharsetMatch(sequences, "utf_8", 0.0, False, [], "")])
83
+
84
+ if cp_isolation is not None:
85
+ logger.log(
86
+ TRACE,
87
+ "cp_isolation is set. use this flag for debugging purpose. "
88
+ "limited list of encoding allowed : %s.",
89
+ ", ".join(cp_isolation),
90
+ )
91
+ cp_isolation = [iana_name(cp, False) for cp in cp_isolation]
92
+ else:
93
+ cp_isolation = []
94
+
95
+ if cp_exclusion is not None:
96
+ logger.log(
97
+ TRACE,
98
+ "cp_exclusion is set. use this flag for debugging purpose. "
99
+ "limited list of encoding excluded : %s.",
100
+ ", ".join(cp_exclusion),
101
+ )
102
+ cp_exclusion = [iana_name(cp, False) for cp in cp_exclusion]
103
+ else:
104
+ cp_exclusion = []
105
+
106
+ if length <= (chunk_size * steps):
107
+ logger.log(
108
+ TRACE,
109
+ "override steps (%i) and chunk_size (%i) as content does not fit (%i byte(s) given) parameters.",
110
+ steps,
111
+ chunk_size,
112
+ length,
113
+ )
114
+ steps = 1
115
+ chunk_size = length
116
+
117
+ if steps > 1 and length / steps < chunk_size:
118
+ chunk_size = int(length / steps)
119
+
120
+ is_too_small_sequence: bool = len(sequences) < TOO_SMALL_SEQUENCE
121
+ is_too_large_sequence: bool = len(sequences) >= TOO_BIG_SEQUENCE
122
+
123
+ if is_too_small_sequence:
124
+ logger.log(
125
+ TRACE,
126
+ "Trying to detect encoding from a tiny portion of ({}) byte(s).".format(
127
+ length
128
+ ),
129
+ )
130
+ elif is_too_large_sequence:
131
+ logger.log(
132
+ TRACE,
133
+ "Using lazy str decoding because the payload is quite large, ({}) byte(s).".format(
134
+ length
135
+ ),
136
+ )
137
+
138
+ prioritized_encodings: List[str] = []
139
+
140
+ specified_encoding: Optional[str] = (
141
+ any_specified_encoding(sequences) if preemptive_behaviour else None
142
+ )
143
+
144
+ if specified_encoding is not None:
145
+ prioritized_encodings.append(specified_encoding)
146
+ logger.log(
147
+ TRACE,
148
+ "Detected declarative mark in sequence. Priority +1 given for %s.",
149
+ specified_encoding,
150
+ )
151
+
152
+ tested: Set[str] = set()
153
+ tested_but_hard_failure: List[str] = []
154
+ tested_but_soft_failure: List[str] = []
155
+
156
+ fallback_ascii: Optional[CharsetMatch] = None
157
+ fallback_u8: Optional[CharsetMatch] = None
158
+ fallback_specified: Optional[CharsetMatch] = None
159
+
160
+ results: CharsetMatches = CharsetMatches()
161
+
162
+ sig_encoding, sig_payload = identify_sig_or_bom(sequences)
163
+
164
+ if sig_encoding is not None:
165
+ prioritized_encodings.append(sig_encoding)
166
+ logger.log(
167
+ TRACE,
168
+ "Detected a SIG or BOM mark on first %i byte(s). Priority +1 given for %s.",
169
+ len(sig_payload),
170
+ sig_encoding,
171
+ )
172
+
173
+ prioritized_encodings.append("ascii")
174
+
175
+ if "utf_8" not in prioritized_encodings:
176
+ prioritized_encodings.append("utf_8")
177
+
178
+ for encoding_iana in prioritized_encodings + IANA_SUPPORTED:
179
+ if cp_isolation and encoding_iana not in cp_isolation:
180
+ continue
181
+
182
+ if cp_exclusion and encoding_iana in cp_exclusion:
183
+ continue
184
+
185
+ if encoding_iana in tested:
186
+ continue
187
+
188
+ tested.add(encoding_iana)
189
+
190
+ decoded_payload: Optional[str] = None
191
+ bom_or_sig_available: bool = sig_encoding == encoding_iana
192
+ strip_sig_or_bom: bool = bom_or_sig_available and should_strip_sig_or_bom(
193
+ encoding_iana
194
+ )
195
+
196
+ if encoding_iana in {"utf_16", "utf_32"} and not bom_or_sig_available:
197
+ logger.log(
198
+ TRACE,
199
+ "Encoding %s won't be tested as-is because it require a BOM. Will try some sub-encoder LE/BE.",
200
+ encoding_iana,
201
+ )
202
+ continue
203
+ if encoding_iana in {"utf_7"} and not bom_or_sig_available:
204
+ logger.log(
205
+ TRACE,
206
+ "Encoding %s won't be tested as-is because detection is unreliable without BOM/SIG.",
207
+ encoding_iana,
208
+ )
209
+ continue
210
+
211
+ try:
212
+ is_multi_byte_decoder: bool = is_multi_byte_encoding(encoding_iana)
213
+ except (ModuleNotFoundError, ImportError):
214
+ logger.log(
215
+ TRACE,
216
+ "Encoding %s does not provide an IncrementalDecoder",
217
+ encoding_iana,
218
+ )
219
+ continue
220
+
221
+ try:
222
+ if is_too_large_sequence and is_multi_byte_decoder is False:
223
+ str(
224
+ sequences[: int(50e4)]
225
+ if strip_sig_or_bom is False
226
+ else sequences[len(sig_payload) : int(50e4)],
227
+ encoding=encoding_iana,
228
+ )
229
+ else:
230
+ decoded_payload = str(
231
+ sequences
232
+ if strip_sig_or_bom is False
233
+ else sequences[len(sig_payload) :],
234
+ encoding=encoding_iana,
235
+ )
236
+ except (UnicodeDecodeError, LookupError) as e:
237
+ if not isinstance(e, LookupError):
238
+ logger.log(
239
+ TRACE,
240
+ "Code page %s does not fit given bytes sequence at ALL. %s",
241
+ encoding_iana,
242
+ str(e),
243
+ )
244
+ tested_but_hard_failure.append(encoding_iana)
245
+ continue
246
+
247
+ similar_soft_failure_test: bool = False
248
+
249
+ for encoding_soft_failed in tested_but_soft_failure:
250
+ if is_cp_similar(encoding_iana, encoding_soft_failed):
251
+ similar_soft_failure_test = True
252
+ break
253
+
254
+ if similar_soft_failure_test:
255
+ logger.log(
256
+ TRACE,
257
+ "%s is deemed too similar to code page %s and was consider unsuited already. Continuing!",
258
+ encoding_iana,
259
+ encoding_soft_failed,
260
+ )
261
+ continue
262
+
263
+ r_ = range(
264
+ 0 if not bom_or_sig_available else len(sig_payload),
265
+ length,
266
+ int(length / steps),
267
+ )
268
+
269
+ multi_byte_bonus: bool = (
270
+ is_multi_byte_decoder
271
+ and decoded_payload is not None
272
+ and len(decoded_payload) < length
273
+ )
274
+
275
+ if multi_byte_bonus:
276
+ logger.log(
277
+ TRACE,
278
+ "Code page %s is a multi byte encoding table and it appear that at least one character "
279
+ "was encoded using n-bytes.",
280
+ encoding_iana,
281
+ )
282
+
283
+ max_chunk_gave_up: int = int(len(r_) / 4)
284
+
285
+ max_chunk_gave_up = max(max_chunk_gave_up, 2)
286
+ early_stop_count: int = 0
287
+ lazy_str_hard_failure = False
288
+
289
+ md_chunks: List[str] = []
290
+ md_ratios = []
291
+
292
+ try:
293
+ for chunk in cut_sequence_chunks(
294
+ sequences,
295
+ encoding_iana,
296
+ r_,
297
+ chunk_size,
298
+ bom_or_sig_available,
299
+ strip_sig_or_bom,
300
+ sig_payload,
301
+ is_multi_byte_decoder,
302
+ decoded_payload,
303
+ ):
304
+ md_chunks.append(chunk)
305
+
306
+ md_ratios.append(
307
+ mess_ratio(
308
+ chunk,
309
+ threshold,
310
+ explain is True and 1 <= len(cp_isolation) <= 2,
311
+ )
312
+ )
313
+
314
+ if md_ratios[-1] >= threshold:
315
+ early_stop_count += 1
316
+
317
+ if (early_stop_count >= max_chunk_gave_up) or (
318
+ bom_or_sig_available and strip_sig_or_bom is False
319
+ ):
320
+ break
321
+ except (
322
+ UnicodeDecodeError
323
+ ) as e: # Lazy str loading may have missed something there
324
+ logger.log(
325
+ TRACE,
326
+ "LazyStr Loading: After MD chunk decode, code page %s does not fit given bytes sequence at ALL. %s",
327
+ encoding_iana,
328
+ str(e),
329
+ )
330
+ early_stop_count = max_chunk_gave_up
331
+ lazy_str_hard_failure = True
332
+
333
+ # We might want to check the sequence again with the whole content
334
+ # Only if initial MD tests passes
335
+ if (
336
+ not lazy_str_hard_failure
337
+ and is_too_large_sequence
338
+ and not is_multi_byte_decoder
339
+ ):
340
+ try:
341
+ sequences[int(50e3) :].decode(encoding_iana, errors="strict")
342
+ except UnicodeDecodeError as e:
343
+ logger.log(
344
+ TRACE,
345
+ "LazyStr Loading: After final lookup, code page %s does not fit given bytes sequence at ALL. %s",
346
+ encoding_iana,
347
+ str(e),
348
+ )
349
+ tested_but_hard_failure.append(encoding_iana)
350
+ continue
351
+
352
+ mean_mess_ratio: float = sum(md_ratios) / len(md_ratios) if md_ratios else 0.0
353
+ if mean_mess_ratio >= threshold or early_stop_count >= max_chunk_gave_up:
354
+ tested_but_soft_failure.append(encoding_iana)
355
+ logger.log(
356
+ TRACE,
357
+ "%s was excluded because of initial chaos probing. Gave up %i time(s). "
358
+ "Computed mean chaos is %f %%.",
359
+ encoding_iana,
360
+ early_stop_count,
361
+ round(mean_mess_ratio * 100, ndigits=3),
362
+ )
363
+ # Preparing those fallbacks in case we got nothing.
364
+ if (
365
+ enable_fallback
366
+ and encoding_iana in ["ascii", "utf_8", specified_encoding]
367
+ and not lazy_str_hard_failure
368
+ ):
369
+ fallback_entry = CharsetMatch(
370
+ sequences, encoding_iana, threshold, False, [], decoded_payload
371
+ )
372
+ if encoding_iana == specified_encoding:
373
+ fallback_specified = fallback_entry
374
+ elif encoding_iana == "ascii":
375
+ fallback_ascii = fallback_entry
376
+ else:
377
+ fallback_u8 = fallback_entry
378
+ continue
379
+
380
+ logger.log(
381
+ TRACE,
382
+ "%s passed initial chaos probing. Mean measured chaos is %f %%",
383
+ encoding_iana,
384
+ round(mean_mess_ratio * 100, ndigits=3),
385
+ )
386
+
387
+ if not is_multi_byte_decoder:
388
+ target_languages: List[str] = encoding_languages(encoding_iana)
389
+ else:
390
+ target_languages = mb_encoding_languages(encoding_iana)
391
+
392
+ if target_languages:
393
+ logger.log(
394
+ TRACE,
395
+ "{} should target any language(s) of {}".format(
396
+ encoding_iana, str(target_languages)
397
+ ),
398
+ )
399
+
400
+ cd_ratios = []
401
+
402
+ # We shall skip the CD when its about ASCII
403
+ # Most of the time its not relevant to run "language-detection" on it.
404
+ if encoding_iana != "ascii":
405
+ for chunk in md_chunks:
406
+ chunk_languages = coherence_ratio(
407
+ chunk,
408
+ language_threshold,
409
+ ",".join(target_languages) if target_languages else None,
410
+ )
411
+
412
+ cd_ratios.append(chunk_languages)
413
+
414
+ cd_ratios_merged = merge_coherence_ratios(cd_ratios)
415
+
416
+ if cd_ratios_merged:
417
+ logger.log(
418
+ TRACE,
419
+ "We detected language {} using {}".format(
420
+ cd_ratios_merged, encoding_iana
421
+ ),
422
+ )
423
+
424
+ results.append(
425
+ CharsetMatch(
426
+ sequences,
427
+ encoding_iana,
428
+ mean_mess_ratio,
429
+ bom_or_sig_available,
430
+ cd_ratios_merged,
431
+ decoded_payload,
432
+ )
433
+ )
434
+
435
+ if (
436
+ encoding_iana in [specified_encoding, "ascii", "utf_8"]
437
+ and mean_mess_ratio < 0.1
438
+ ):
439
+ logger.debug(
440
+ "Encoding detection: %s is most likely the one.", encoding_iana
441
+ )
442
+ if explain:
443
+ logger.removeHandler(explain_handler)
444
+ logger.setLevel(previous_logger_level)
445
+ return CharsetMatches([results[encoding_iana]])
446
+
447
+ if encoding_iana == sig_encoding:
448
+ logger.debug(
449
+ "Encoding detection: %s is most likely the one as we detected a BOM or SIG within "
450
+ "the beginning of the sequence.",
451
+ encoding_iana,
452
+ )
453
+ if explain:
454
+ logger.removeHandler(explain_handler)
455
+ logger.setLevel(previous_logger_level)
456
+ return CharsetMatches([results[encoding_iana]])
457
+
458
+ if len(results) == 0:
459
+ if fallback_u8 or fallback_ascii or fallback_specified:
460
+ logger.log(
461
+ TRACE,
462
+ "Nothing got out of the detection process. Using ASCII/UTF-8/Specified fallback.",
463
+ )
464
+
465
+ if fallback_specified:
466
+ logger.debug(
467
+ "Encoding detection: %s will be used as a fallback match",
468
+ fallback_specified.encoding,
469
+ )
470
+ results.append(fallback_specified)
471
+ elif (
472
+ (fallback_u8 and fallback_ascii is None)
473
+ or (
474
+ fallback_u8
475
+ and fallback_ascii
476
+ and fallback_u8.fingerprint != fallback_ascii.fingerprint
477
+ )
478
+ or (fallback_u8 is not None)
479
+ ):
480
+ logger.debug("Encoding detection: utf_8 will be used as a fallback match")
481
+ results.append(fallback_u8)
482
+ elif fallback_ascii:
483
+ logger.debug("Encoding detection: ascii will be used as a fallback match")
484
+ results.append(fallback_ascii)
485
+
486
+ if results:
487
+ logger.debug(
488
+ "Encoding detection: Found %s as plausible (best-candidate) for content. With %i alternatives.",
489
+ results.best().encoding, # type: ignore
490
+ len(results) - 1,
491
+ )
492
+ else:
493
+ logger.debug("Encoding detection: Unable to determine any suitable charset.")
494
+
495
+ if explain:
496
+ logger.removeHandler(explain_handler)
497
+ logger.setLevel(previous_logger_level)
498
+
499
+ return results
500
+
501
+
502
+ def from_fp(
503
+ fp: BinaryIO,
504
+ steps: int = 5,
505
+ chunk_size: int = 512,
506
+ threshold: float = 0.20,
507
+ cp_isolation: Optional[List[str]] = None,
508
+ cp_exclusion: Optional[List[str]] = None,
509
+ preemptive_behaviour: bool = True,
510
+ explain: bool = False,
511
+ language_threshold: float = 0.1,
512
+ enable_fallback: bool = True,
513
+ ) -> CharsetMatches:
514
+ """
515
+ Same thing than the function from_bytes but using a file pointer that is already ready.
516
+ Will not close the file pointer.
517
+ """
518
+ return from_bytes(
519
+ fp.read(),
520
+ steps,
521
+ chunk_size,
522
+ threshold,
523
+ cp_isolation,
524
+ cp_exclusion,
525
+ preemptive_behaviour,
526
+ explain,
527
+ language_threshold,
528
+ enable_fallback,
529
+ )
530
+
531
+
532
+ def from_path(
533
+ path: Union[str, bytes, PathLike], # type: ignore[type-arg]
534
+ steps: int = 5,
535
+ chunk_size: int = 512,
536
+ threshold: float = 0.20,
537
+ cp_isolation: Optional[List[str]] = None,
538
+ cp_exclusion: Optional[List[str]] = None,
539
+ preemptive_behaviour: bool = True,
540
+ explain: bool = False,
541
+ language_threshold: float = 0.1,
542
+ enable_fallback: bool = True,
543
+ ) -> CharsetMatches:
544
+ """
545
+ Same thing than the function from_bytes but with one extra step. Opening and reading given file path in binary mode.
546
+ Can raise IOError.
547
+ """
548
+ with open(path, "rb") as fp:
549
+ return from_fp(
550
+ fp,
551
+ steps,
552
+ chunk_size,
553
+ threshold,
554
+ cp_isolation,
555
+ cp_exclusion,
556
+ preemptive_behaviour,
557
+ explain,
558
+ language_threshold,
559
+ enable_fallback,
560
+ )
561
+
562
+
563
+ def is_binary(
564
+ fp_or_path_or_payload: Union[PathLike, str, BinaryIO, bytes], # type: ignore[type-arg]
565
+ steps: int = 5,
566
+ chunk_size: int = 512,
567
+ threshold: float = 0.20,
568
+ cp_isolation: Optional[List[str]] = None,
569
+ cp_exclusion: Optional[List[str]] = None,
570
+ preemptive_behaviour: bool = True,
571
+ explain: bool = False,
572
+ language_threshold: float = 0.1,
573
+ enable_fallback: bool = False,
574
+ ) -> bool:
575
+ """
576
+ Detect if the given input (file, bytes, or path) points to a binary file. aka. not a string.
577
+ Based on the same main heuristic algorithms and default kwargs at the sole exception that fallbacks match
578
+ are disabled to be stricter around ASCII-compatible but unlikely to be a string.
579
+ """
580
+ if isinstance(fp_or_path_or_payload, (str, PathLike)):
581
+ guesses = from_path(
582
+ fp_or_path_or_payload,
583
+ steps=steps,
584
+ chunk_size=chunk_size,
585
+ threshold=threshold,
586
+ cp_isolation=cp_isolation,
587
+ cp_exclusion=cp_exclusion,
588
+ preemptive_behaviour=preemptive_behaviour,
589
+ explain=explain,
590
+ language_threshold=language_threshold,
591
+ enable_fallback=enable_fallback,
592
+ )
593
+ elif isinstance(
594
+ fp_or_path_or_payload,
595
+ (
596
+ bytes,
597
+ bytearray,
598
+ ),
599
+ ):
600
+ guesses = from_bytes(
601
+ fp_or_path_or_payload,
602
+ steps=steps,
603
+ chunk_size=chunk_size,
604
+ threshold=threshold,
605
+ cp_isolation=cp_isolation,
606
+ cp_exclusion=cp_exclusion,
607
+ preemptive_behaviour=preemptive_behaviour,
608
+ explain=explain,
609
+ language_threshold=language_threshold,
610
+ enable_fallback=enable_fallback,
611
+ )
612
+ else:
613
+ guesses = from_fp(
614
+ fp_or_path_or_payload,
615
+ steps=steps,
616
+ chunk_size=chunk_size,
617
+ threshold=threshold,
618
+ cp_isolation=cp_isolation,
619
+ cp_exclusion=cp_exclusion,
620
+ preemptive_behaviour=preemptive_behaviour,
621
+ explain=explain,
622
+ language_threshold=language_threshold,
623
+ enable_fallback=enable_fallback,
624
+ )
625
+
626
+ return not guesses
llmeval-env/lib/python3.10/site-packages/charset_normalizer/cd.py ADDED
@@ -0,0 +1,395 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ from codecs import IncrementalDecoder
3
+ from collections import Counter
4
+ from functools import lru_cache
5
+ from typing import Counter as TypeCounter, Dict, List, Optional, Tuple
6
+
7
+ from .constant import (
8
+ FREQUENCIES,
9
+ KO_NAMES,
10
+ LANGUAGE_SUPPORTED_COUNT,
11
+ TOO_SMALL_SEQUENCE,
12
+ ZH_NAMES,
13
+ )
14
+ from .md import is_suspiciously_successive_range
15
+ from .models import CoherenceMatches
16
+ from .utils import (
17
+ is_accentuated,
18
+ is_latin,
19
+ is_multi_byte_encoding,
20
+ is_unicode_range_secondary,
21
+ unicode_range,
22
+ )
23
+
24
+
25
+ def encoding_unicode_range(iana_name: str) -> List[str]:
26
+ """
27
+ Return associated unicode ranges in a single byte code page.
28
+ """
29
+ if is_multi_byte_encoding(iana_name):
30
+ raise IOError("Function not supported on multi-byte code page")
31
+
32
+ decoder = importlib.import_module(
33
+ "encodings.{}".format(iana_name)
34
+ ).IncrementalDecoder
35
+
36
+ p: IncrementalDecoder = decoder(errors="ignore")
37
+ seen_ranges: Dict[str, int] = {}
38
+ character_count: int = 0
39
+
40
+ for i in range(0x40, 0xFF):
41
+ chunk: str = p.decode(bytes([i]))
42
+
43
+ if chunk:
44
+ character_range: Optional[str] = unicode_range(chunk)
45
+
46
+ if character_range is None:
47
+ continue
48
+
49
+ if is_unicode_range_secondary(character_range) is False:
50
+ if character_range not in seen_ranges:
51
+ seen_ranges[character_range] = 0
52
+ seen_ranges[character_range] += 1
53
+ character_count += 1
54
+
55
+ return sorted(
56
+ [
57
+ character_range
58
+ for character_range in seen_ranges
59
+ if seen_ranges[character_range] / character_count >= 0.15
60
+ ]
61
+ )
62
+
63
+
64
+ def unicode_range_languages(primary_range: str) -> List[str]:
65
+ """
66
+ Return inferred languages used with a unicode range.
67
+ """
68
+ languages: List[str] = []
69
+
70
+ for language, characters in FREQUENCIES.items():
71
+ for character in characters:
72
+ if unicode_range(character) == primary_range:
73
+ languages.append(language)
74
+ break
75
+
76
+ return languages
77
+
78
+
79
+ @lru_cache()
80
+ def encoding_languages(iana_name: str) -> List[str]:
81
+ """
82
+ Single-byte encoding language association. Some code page are heavily linked to particular language(s).
83
+ This function does the correspondence.
84
+ """
85
+ unicode_ranges: List[str] = encoding_unicode_range(iana_name)
86
+ primary_range: Optional[str] = None
87
+
88
+ for specified_range in unicode_ranges:
89
+ if "Latin" not in specified_range:
90
+ primary_range = specified_range
91
+ break
92
+
93
+ if primary_range is None:
94
+ return ["Latin Based"]
95
+
96
+ return unicode_range_languages(primary_range)
97
+
98
+
99
+ @lru_cache()
100
+ def mb_encoding_languages(iana_name: str) -> List[str]:
101
+ """
102
+ Multi-byte encoding language association. Some code page are heavily linked to particular language(s).
103
+ This function does the correspondence.
104
+ """
105
+ if (
106
+ iana_name.startswith("shift_")
107
+ or iana_name.startswith("iso2022_jp")
108
+ or iana_name.startswith("euc_j")
109
+ or iana_name == "cp932"
110
+ ):
111
+ return ["Japanese"]
112
+ if iana_name.startswith("gb") or iana_name in ZH_NAMES:
113
+ return ["Chinese"]
114
+ if iana_name.startswith("iso2022_kr") or iana_name in KO_NAMES:
115
+ return ["Korean"]
116
+
117
+ return []
118
+
119
+
120
+ @lru_cache(maxsize=LANGUAGE_SUPPORTED_COUNT)
121
+ def get_target_features(language: str) -> Tuple[bool, bool]:
122
+ """
123
+ Determine main aspects from a supported language if it contains accents and if is pure Latin.
124
+ """
125
+ target_have_accents: bool = False
126
+ target_pure_latin: bool = True
127
+
128
+ for character in FREQUENCIES[language]:
129
+ if not target_have_accents and is_accentuated(character):
130
+ target_have_accents = True
131
+ if target_pure_latin and is_latin(character) is False:
132
+ target_pure_latin = False
133
+
134
+ return target_have_accents, target_pure_latin
135
+
136
+
137
+ def alphabet_languages(
138
+ characters: List[str], ignore_non_latin: bool = False
139
+ ) -> List[str]:
140
+ """
141
+ Return associated languages associated to given characters.
142
+ """
143
+ languages: List[Tuple[str, float]] = []
144
+
145
+ source_have_accents = any(is_accentuated(character) for character in characters)
146
+
147
+ for language, language_characters in FREQUENCIES.items():
148
+ target_have_accents, target_pure_latin = get_target_features(language)
149
+
150
+ if ignore_non_latin and target_pure_latin is False:
151
+ continue
152
+
153
+ if target_have_accents is False and source_have_accents:
154
+ continue
155
+
156
+ character_count: int = len(language_characters)
157
+
158
+ character_match_count: int = len(
159
+ [c for c in language_characters if c in characters]
160
+ )
161
+
162
+ ratio: float = character_match_count / character_count
163
+
164
+ if ratio >= 0.2:
165
+ languages.append((language, ratio))
166
+
167
+ languages = sorted(languages, key=lambda x: x[1], reverse=True)
168
+
169
+ return [compatible_language[0] for compatible_language in languages]
170
+
171
+
172
+ def characters_popularity_compare(
173
+ language: str, ordered_characters: List[str]
174
+ ) -> float:
175
+ """
176
+ Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language.
177
+ The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit).
178
+ Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.)
179
+ """
180
+ if language not in FREQUENCIES:
181
+ raise ValueError("{} not available".format(language))
182
+
183
+ character_approved_count: int = 0
184
+ FREQUENCIES_language_set = set(FREQUENCIES[language])
185
+
186
+ ordered_characters_count: int = len(ordered_characters)
187
+ target_language_characters_count: int = len(FREQUENCIES[language])
188
+
189
+ large_alphabet: bool = target_language_characters_count > 26
190
+
191
+ for character, character_rank in zip(
192
+ ordered_characters, range(0, ordered_characters_count)
193
+ ):
194
+ if character not in FREQUENCIES_language_set:
195
+ continue
196
+
197
+ character_rank_in_language: int = FREQUENCIES[language].index(character)
198
+ expected_projection_ratio: float = (
199
+ target_language_characters_count / ordered_characters_count
200
+ )
201
+ character_rank_projection: int = int(character_rank * expected_projection_ratio)
202
+
203
+ if (
204
+ large_alphabet is False
205
+ and abs(character_rank_projection - character_rank_in_language) > 4
206
+ ):
207
+ continue
208
+
209
+ if (
210
+ large_alphabet is True
211
+ and abs(character_rank_projection - character_rank_in_language)
212
+ < target_language_characters_count / 3
213
+ ):
214
+ character_approved_count += 1
215
+ continue
216
+
217
+ characters_before_source: List[str] = FREQUENCIES[language][
218
+ 0:character_rank_in_language
219
+ ]
220
+ characters_after_source: List[str] = FREQUENCIES[language][
221
+ character_rank_in_language:
222
+ ]
223
+ characters_before: List[str] = ordered_characters[0:character_rank]
224
+ characters_after: List[str] = ordered_characters[character_rank:]
225
+
226
+ before_match_count: int = len(
227
+ set(characters_before) & set(characters_before_source)
228
+ )
229
+
230
+ after_match_count: int = len(
231
+ set(characters_after) & set(characters_after_source)
232
+ )
233
+
234
+ if len(characters_before_source) == 0 and before_match_count <= 4:
235
+ character_approved_count += 1
236
+ continue
237
+
238
+ if len(characters_after_source) == 0 and after_match_count <= 4:
239
+ character_approved_count += 1
240
+ continue
241
+
242
+ if (
243
+ before_match_count / len(characters_before_source) >= 0.4
244
+ or after_match_count / len(characters_after_source) >= 0.4
245
+ ):
246
+ character_approved_count += 1
247
+ continue
248
+
249
+ return character_approved_count / len(ordered_characters)
250
+
251
+
252
+ def alpha_unicode_split(decoded_sequence: str) -> List[str]:
253
+ """
254
+ Given a decoded text sequence, return a list of str. Unicode range / alphabet separation.
255
+ Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list;
256
+ One containing the latin letters and the other hebrew.
257
+ """
258
+ layers: Dict[str, str] = {}
259
+
260
+ for character in decoded_sequence:
261
+ if character.isalpha() is False:
262
+ continue
263
+
264
+ character_range: Optional[str] = unicode_range(character)
265
+
266
+ if character_range is None:
267
+ continue
268
+
269
+ layer_target_range: Optional[str] = None
270
+
271
+ for discovered_range in layers:
272
+ if (
273
+ is_suspiciously_successive_range(discovered_range, character_range)
274
+ is False
275
+ ):
276
+ layer_target_range = discovered_range
277
+ break
278
+
279
+ if layer_target_range is None:
280
+ layer_target_range = character_range
281
+
282
+ if layer_target_range not in layers:
283
+ layers[layer_target_range] = character.lower()
284
+ continue
285
+
286
+ layers[layer_target_range] += character.lower()
287
+
288
+ return list(layers.values())
289
+
290
+
291
+ def merge_coherence_ratios(results: List[CoherenceMatches]) -> CoherenceMatches:
292
+ """
293
+ This function merge results previously given by the function coherence_ratio.
294
+ The return type is the same as coherence_ratio.
295
+ """
296
+ per_language_ratios: Dict[str, List[float]] = {}
297
+ for result in results:
298
+ for sub_result in result:
299
+ language, ratio = sub_result
300
+ if language not in per_language_ratios:
301
+ per_language_ratios[language] = [ratio]
302
+ continue
303
+ per_language_ratios[language].append(ratio)
304
+
305
+ merge = [
306
+ (
307
+ language,
308
+ round(
309
+ sum(per_language_ratios[language]) / len(per_language_ratios[language]),
310
+ 4,
311
+ ),
312
+ )
313
+ for language in per_language_ratios
314
+ ]
315
+
316
+ return sorted(merge, key=lambda x: x[1], reverse=True)
317
+
318
+
319
+ def filter_alt_coherence_matches(results: CoherenceMatches) -> CoherenceMatches:
320
+ """
321
+ We shall NOT return "English—" in CoherenceMatches because it is an alternative
322
+ of "English". This function only keeps the best match and remove the em-dash in it.
323
+ """
324
+ index_results: Dict[str, List[float]] = dict()
325
+
326
+ for result in results:
327
+ language, ratio = result
328
+ no_em_name: str = language.replace("—", "")
329
+
330
+ if no_em_name not in index_results:
331
+ index_results[no_em_name] = []
332
+
333
+ index_results[no_em_name].append(ratio)
334
+
335
+ if any(len(index_results[e]) > 1 for e in index_results):
336
+ filtered_results: CoherenceMatches = []
337
+
338
+ for language in index_results:
339
+ filtered_results.append((language, max(index_results[language])))
340
+
341
+ return filtered_results
342
+
343
+ return results
344
+
345
+
346
+ @lru_cache(maxsize=2048)
347
+ def coherence_ratio(
348
+ decoded_sequence: str, threshold: float = 0.1, lg_inclusion: Optional[str] = None
349
+ ) -> CoherenceMatches:
350
+ """
351
+ Detect ANY language that can be identified in given sequence. The sequence will be analysed by layers.
352
+ A layer = Character extraction by alphabets/ranges.
353
+ """
354
+
355
+ results: List[Tuple[str, float]] = []
356
+ ignore_non_latin: bool = False
357
+
358
+ sufficient_match_count: int = 0
359
+
360
+ lg_inclusion_list = lg_inclusion.split(",") if lg_inclusion is not None else []
361
+ if "Latin Based" in lg_inclusion_list:
362
+ ignore_non_latin = True
363
+ lg_inclusion_list.remove("Latin Based")
364
+
365
+ for layer in alpha_unicode_split(decoded_sequence):
366
+ sequence_frequencies: TypeCounter[str] = Counter(layer)
367
+ most_common = sequence_frequencies.most_common()
368
+
369
+ character_count: int = sum(o for c, o in most_common)
370
+
371
+ if character_count <= TOO_SMALL_SEQUENCE:
372
+ continue
373
+
374
+ popular_character_ordered: List[str] = [c for c, o in most_common]
375
+
376
+ for language in lg_inclusion_list or alphabet_languages(
377
+ popular_character_ordered, ignore_non_latin
378
+ ):
379
+ ratio: float = characters_popularity_compare(
380
+ language, popular_character_ordered
381
+ )
382
+
383
+ if ratio < threshold:
384
+ continue
385
+ elif ratio >= 0.8:
386
+ sufficient_match_count += 1
387
+
388
+ results.append((language, round(ratio, 4)))
389
+
390
+ if sufficient_match_count >= 3:
391
+ break
392
+
393
+ return sorted(
394
+ filter_alt_coherence_matches(results), key=lambda x: x[1], reverse=True
395
+ )
llmeval-env/lib/python3.10/site-packages/charset_normalizer/constant.py ADDED
@@ -0,0 +1,1995 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ from codecs import BOM_UTF8, BOM_UTF16_BE, BOM_UTF16_LE, BOM_UTF32_BE, BOM_UTF32_LE
3
+ from encodings.aliases import aliases
4
+ from re import IGNORECASE, compile as re_compile
5
+ from typing import Dict, List, Set, Union
6
+
7
+ # Contain for each eligible encoding a list of/item bytes SIG/BOM
8
+ ENCODING_MARKS: Dict[str, Union[bytes, List[bytes]]] = {
9
+ "utf_8": BOM_UTF8,
10
+ "utf_7": [
11
+ b"\x2b\x2f\x76\x38",
12
+ b"\x2b\x2f\x76\x39",
13
+ b"\x2b\x2f\x76\x2b",
14
+ b"\x2b\x2f\x76\x2f",
15
+ b"\x2b\x2f\x76\x38\x2d",
16
+ ],
17
+ "gb18030": b"\x84\x31\x95\x33",
18
+ "utf_32": [BOM_UTF32_BE, BOM_UTF32_LE],
19
+ "utf_16": [BOM_UTF16_BE, BOM_UTF16_LE],
20
+ }
21
+
22
+ TOO_SMALL_SEQUENCE: int = 32
23
+ TOO_BIG_SEQUENCE: int = int(10e6)
24
+
25
+ UTF8_MAXIMAL_ALLOCATION: int = 1_112_064
26
+
27
+ # Up-to-date Unicode ucd/15.0.0
28
+ UNICODE_RANGES_COMBINED: Dict[str, range] = {
29
+ "Control character": range(32),
30
+ "Basic Latin": range(32, 128),
31
+ "Latin-1 Supplement": range(128, 256),
32
+ "Latin Extended-A": range(256, 384),
33
+ "Latin Extended-B": range(384, 592),
34
+ "IPA Extensions": range(592, 688),
35
+ "Spacing Modifier Letters": range(688, 768),
36
+ "Combining Diacritical Marks": range(768, 880),
37
+ "Greek and Coptic": range(880, 1024),
38
+ "Cyrillic": range(1024, 1280),
39
+ "Cyrillic Supplement": range(1280, 1328),
40
+ "Armenian": range(1328, 1424),
41
+ "Hebrew": range(1424, 1536),
42
+ "Arabic": range(1536, 1792),
43
+ "Syriac": range(1792, 1872),
44
+ "Arabic Supplement": range(1872, 1920),
45
+ "Thaana": range(1920, 1984),
46
+ "NKo": range(1984, 2048),
47
+ "Samaritan": range(2048, 2112),
48
+ "Mandaic": range(2112, 2144),
49
+ "Syriac Supplement": range(2144, 2160),
50
+ "Arabic Extended-B": range(2160, 2208),
51
+ "Arabic Extended-A": range(2208, 2304),
52
+ "Devanagari": range(2304, 2432),
53
+ "Bengali": range(2432, 2560),
54
+ "Gurmukhi": range(2560, 2688),
55
+ "Gujarati": range(2688, 2816),
56
+ "Oriya": range(2816, 2944),
57
+ "Tamil": range(2944, 3072),
58
+ "Telugu": range(3072, 3200),
59
+ "Kannada": range(3200, 3328),
60
+ "Malayalam": range(3328, 3456),
61
+ "Sinhala": range(3456, 3584),
62
+ "Thai": range(3584, 3712),
63
+ "Lao": range(3712, 3840),
64
+ "Tibetan": range(3840, 4096),
65
+ "Myanmar": range(4096, 4256),
66
+ "Georgian": range(4256, 4352),
67
+ "Hangul Jamo": range(4352, 4608),
68
+ "Ethiopic": range(4608, 4992),
69
+ "Ethiopic Supplement": range(4992, 5024),
70
+ "Cherokee": range(5024, 5120),
71
+ "Unified Canadian Aboriginal Syllabics": range(5120, 5760),
72
+ "Ogham": range(5760, 5792),
73
+ "Runic": range(5792, 5888),
74
+ "Tagalog": range(5888, 5920),
75
+ "Hanunoo": range(5920, 5952),
76
+ "Buhid": range(5952, 5984),
77
+ "Tagbanwa": range(5984, 6016),
78
+ "Khmer": range(6016, 6144),
79
+ "Mongolian": range(6144, 6320),
80
+ "Unified Canadian Aboriginal Syllabics Extended": range(6320, 6400),
81
+ "Limbu": range(6400, 6480),
82
+ "Tai Le": range(6480, 6528),
83
+ "New Tai Lue": range(6528, 6624),
84
+ "Khmer Symbols": range(6624, 6656),
85
+ "Buginese": range(6656, 6688),
86
+ "Tai Tham": range(6688, 6832),
87
+ "Combining Diacritical Marks Extended": range(6832, 6912),
88
+ "Balinese": range(6912, 7040),
89
+ "Sundanese": range(7040, 7104),
90
+ "Batak": range(7104, 7168),
91
+ "Lepcha": range(7168, 7248),
92
+ "Ol Chiki": range(7248, 7296),
93
+ "Cyrillic Extended-C": range(7296, 7312),
94
+ "Georgian Extended": range(7312, 7360),
95
+ "Sundanese Supplement": range(7360, 7376),
96
+ "Vedic Extensions": range(7376, 7424),
97
+ "Phonetic Extensions": range(7424, 7552),
98
+ "Phonetic Extensions Supplement": range(7552, 7616),
99
+ "Combining Diacritical Marks Supplement": range(7616, 7680),
100
+ "Latin Extended Additional": range(7680, 7936),
101
+ "Greek Extended": range(7936, 8192),
102
+ "General Punctuation": range(8192, 8304),
103
+ "Superscripts and Subscripts": range(8304, 8352),
104
+ "Currency Symbols": range(8352, 8400),
105
+ "Combining Diacritical Marks for Symbols": range(8400, 8448),
106
+ "Letterlike Symbols": range(8448, 8528),
107
+ "Number Forms": range(8528, 8592),
108
+ "Arrows": range(8592, 8704),
109
+ "Mathematical Operators": range(8704, 8960),
110
+ "Miscellaneous Technical": range(8960, 9216),
111
+ "Control Pictures": range(9216, 9280),
112
+ "Optical Character Recognition": range(9280, 9312),
113
+ "Enclosed Alphanumerics": range(9312, 9472),
114
+ "Box Drawing": range(9472, 9600),
115
+ "Block Elements": range(9600, 9632),
116
+ "Geometric Shapes": range(9632, 9728),
117
+ "Miscellaneous Symbols": range(9728, 9984),
118
+ "Dingbats": range(9984, 10176),
119
+ "Miscellaneous Mathematical Symbols-A": range(10176, 10224),
120
+ "Supplemental Arrows-A": range(10224, 10240),
121
+ "Braille Patterns": range(10240, 10496),
122
+ "Supplemental Arrows-B": range(10496, 10624),
123
+ "Miscellaneous Mathematical Symbols-B": range(10624, 10752),
124
+ "Supplemental Mathematical Operators": range(10752, 11008),
125
+ "Miscellaneous Symbols and Arrows": range(11008, 11264),
126
+ "Glagolitic": range(11264, 11360),
127
+ "Latin Extended-C": range(11360, 11392),
128
+ "Coptic": range(11392, 11520),
129
+ "Georgian Supplement": range(11520, 11568),
130
+ "Tifinagh": range(11568, 11648),
131
+ "Ethiopic Extended": range(11648, 11744),
132
+ "Cyrillic Extended-A": range(11744, 11776),
133
+ "Supplemental Punctuation": range(11776, 11904),
134
+ "CJK Radicals Supplement": range(11904, 12032),
135
+ "Kangxi Radicals": range(12032, 12256),
136
+ "Ideographic Description Characters": range(12272, 12288),
137
+ "CJK Symbols and Punctuation": range(12288, 12352),
138
+ "Hiragana": range(12352, 12448),
139
+ "Katakana": range(12448, 12544),
140
+ "Bopomofo": range(12544, 12592),
141
+ "Hangul Compatibility Jamo": range(12592, 12688),
142
+ "Kanbun": range(12688, 12704),
143
+ "Bopomofo Extended": range(12704, 12736),
144
+ "CJK Strokes": range(12736, 12784),
145
+ "Katakana Phonetic Extensions": range(12784, 12800),
146
+ "Enclosed CJK Letters and Months": range(12800, 13056),
147
+ "CJK Compatibility": range(13056, 13312),
148
+ "CJK Unified Ideographs Extension A": range(13312, 19904),
149
+ "Yijing Hexagram Symbols": range(19904, 19968),
150
+ "CJK Unified Ideographs": range(19968, 40960),
151
+ "Yi Syllables": range(40960, 42128),
152
+ "Yi Radicals": range(42128, 42192),
153
+ "Lisu": range(42192, 42240),
154
+ "Vai": range(42240, 42560),
155
+ "Cyrillic Extended-B": range(42560, 42656),
156
+ "Bamum": range(42656, 42752),
157
+ "Modifier Tone Letters": range(42752, 42784),
158
+ "Latin Extended-D": range(42784, 43008),
159
+ "Syloti Nagri": range(43008, 43056),
160
+ "Common Indic Number Forms": range(43056, 43072),
161
+ "Phags-pa": range(43072, 43136),
162
+ "Saurashtra": range(43136, 43232),
163
+ "Devanagari Extended": range(43232, 43264),
164
+ "Kayah Li": range(43264, 43312),
165
+ "Rejang": range(43312, 43360),
166
+ "Hangul Jamo Extended-A": range(43360, 43392),
167
+ "Javanese": range(43392, 43488),
168
+ "Myanmar Extended-B": range(43488, 43520),
169
+ "Cham": range(43520, 43616),
170
+ "Myanmar Extended-A": range(43616, 43648),
171
+ "Tai Viet": range(43648, 43744),
172
+ "Meetei Mayek Extensions": range(43744, 43776),
173
+ "Ethiopic Extended-A": range(43776, 43824),
174
+ "Latin Extended-E": range(43824, 43888),
175
+ "Cherokee Supplement": range(43888, 43968),
176
+ "Meetei Mayek": range(43968, 44032),
177
+ "Hangul Syllables": range(44032, 55216),
178
+ "Hangul Jamo Extended-B": range(55216, 55296),
179
+ "High Surrogates": range(55296, 56192),
180
+ "High Private Use Surrogates": range(56192, 56320),
181
+ "Low Surrogates": range(56320, 57344),
182
+ "Private Use Area": range(57344, 63744),
183
+ "CJK Compatibility Ideographs": range(63744, 64256),
184
+ "Alphabetic Presentation Forms": range(64256, 64336),
185
+ "Arabic Presentation Forms-A": range(64336, 65024),
186
+ "Variation Selectors": range(65024, 65040),
187
+ "Vertical Forms": range(65040, 65056),
188
+ "Combining Half Marks": range(65056, 65072),
189
+ "CJK Compatibility Forms": range(65072, 65104),
190
+ "Small Form Variants": range(65104, 65136),
191
+ "Arabic Presentation Forms-B": range(65136, 65280),
192
+ "Halfwidth and Fullwidth Forms": range(65280, 65520),
193
+ "Specials": range(65520, 65536),
194
+ "Linear B Syllabary": range(65536, 65664),
195
+ "Linear B Ideograms": range(65664, 65792),
196
+ "Aegean Numbers": range(65792, 65856),
197
+ "Ancient Greek Numbers": range(65856, 65936),
198
+ "Ancient Symbols": range(65936, 66000),
199
+ "Phaistos Disc": range(66000, 66048),
200
+ "Lycian": range(66176, 66208),
201
+ "Carian": range(66208, 66272),
202
+ "Coptic Epact Numbers": range(66272, 66304),
203
+ "Old Italic": range(66304, 66352),
204
+ "Gothic": range(66352, 66384),
205
+ "Old Permic": range(66384, 66432),
206
+ "Ugaritic": range(66432, 66464),
207
+ "Old Persian": range(66464, 66528),
208
+ "Deseret": range(66560, 66640),
209
+ "Shavian": range(66640, 66688),
210
+ "Osmanya": range(66688, 66736),
211
+ "Osage": range(66736, 66816),
212
+ "Elbasan": range(66816, 66864),
213
+ "Caucasian Albanian": range(66864, 66928),
214
+ "Vithkuqi": range(66928, 67008),
215
+ "Linear A": range(67072, 67456),
216
+ "Latin Extended-F": range(67456, 67520),
217
+ "Cypriot Syllabary": range(67584, 67648),
218
+ "Imperial Aramaic": range(67648, 67680),
219
+ "Palmyrene": range(67680, 67712),
220
+ "Nabataean": range(67712, 67760),
221
+ "Hatran": range(67808, 67840),
222
+ "Phoenician": range(67840, 67872),
223
+ "Lydian": range(67872, 67904),
224
+ "Meroitic Hieroglyphs": range(67968, 68000),
225
+ "Meroitic Cursive": range(68000, 68096),
226
+ "Kharoshthi": range(68096, 68192),
227
+ "Old South Arabian": range(68192, 68224),
228
+ "Old North Arabian": range(68224, 68256),
229
+ "Manichaean": range(68288, 68352),
230
+ "Avestan": range(68352, 68416),
231
+ "Inscriptional Parthian": range(68416, 68448),
232
+ "Inscriptional Pahlavi": range(68448, 68480),
233
+ "Psalter Pahlavi": range(68480, 68528),
234
+ "Old Turkic": range(68608, 68688),
235
+ "Old Hungarian": range(68736, 68864),
236
+ "Hanifi Rohingya": range(68864, 68928),
237
+ "Rumi Numeral Symbols": range(69216, 69248),
238
+ "Yezidi": range(69248, 69312),
239
+ "Arabic Extended-C": range(69312, 69376),
240
+ "Old Sogdian": range(69376, 69424),
241
+ "Sogdian": range(69424, 69488),
242
+ "Old Uyghur": range(69488, 69552),
243
+ "Chorasmian": range(69552, 69600),
244
+ "Elymaic": range(69600, 69632),
245
+ "Brahmi": range(69632, 69760),
246
+ "Kaithi": range(69760, 69840),
247
+ "Sora Sompeng": range(69840, 69888),
248
+ "Chakma": range(69888, 69968),
249
+ "Mahajani": range(69968, 70016),
250
+ "Sharada": range(70016, 70112),
251
+ "Sinhala Archaic Numbers": range(70112, 70144),
252
+ "Khojki": range(70144, 70224),
253
+ "Multani": range(70272, 70320),
254
+ "Khudawadi": range(70320, 70400),
255
+ "Grantha": range(70400, 70528),
256
+ "Newa": range(70656, 70784),
257
+ "Tirhuta": range(70784, 70880),
258
+ "Siddham": range(71040, 71168),
259
+ "Modi": range(71168, 71264),
260
+ "Mongolian Supplement": range(71264, 71296),
261
+ "Takri": range(71296, 71376),
262
+ "Ahom": range(71424, 71504),
263
+ "Dogra": range(71680, 71760),
264
+ "Warang Citi": range(71840, 71936),
265
+ "Dives Akuru": range(71936, 72032),
266
+ "Nandinagari": range(72096, 72192),
267
+ "Zanabazar Square": range(72192, 72272),
268
+ "Soyombo": range(72272, 72368),
269
+ "Unified Canadian Aboriginal Syllabics Extended-A": range(72368, 72384),
270
+ "Pau Cin Hau": range(72384, 72448),
271
+ "Devanagari Extended-A": range(72448, 72544),
272
+ "Bhaiksuki": range(72704, 72816),
273
+ "Marchen": range(72816, 72896),
274
+ "Masaram Gondi": range(72960, 73056),
275
+ "Gunjala Gondi": range(73056, 73136),
276
+ "Makasar": range(73440, 73472),
277
+ "Kawi": range(73472, 73568),
278
+ "Lisu Supplement": range(73648, 73664),
279
+ "Tamil Supplement": range(73664, 73728),
280
+ "Cuneiform": range(73728, 74752),
281
+ "Cuneiform Numbers and Punctuation": range(74752, 74880),
282
+ "Early Dynastic Cuneiform": range(74880, 75088),
283
+ "Cypro-Minoan": range(77712, 77824),
284
+ "Egyptian Hieroglyphs": range(77824, 78896),
285
+ "Egyptian Hieroglyph Format Controls": range(78896, 78944),
286
+ "Anatolian Hieroglyphs": range(82944, 83584),
287
+ "Bamum Supplement": range(92160, 92736),
288
+ "Mro": range(92736, 92784),
289
+ "Tangsa": range(92784, 92880),
290
+ "Bassa Vah": range(92880, 92928),
291
+ "Pahawh Hmong": range(92928, 93072),
292
+ "Medefaidrin": range(93760, 93856),
293
+ "Miao": range(93952, 94112),
294
+ "Ideographic Symbols and Punctuation": range(94176, 94208),
295
+ "Tangut": range(94208, 100352),
296
+ "Tangut Components": range(100352, 101120),
297
+ "Khitan Small Script": range(101120, 101632),
298
+ "Tangut Supplement": range(101632, 101760),
299
+ "Kana Extended-B": range(110576, 110592),
300
+ "Kana Supplement": range(110592, 110848),
301
+ "Kana Extended-A": range(110848, 110896),
302
+ "Small Kana Extension": range(110896, 110960),
303
+ "Nushu": range(110960, 111360),
304
+ "Duployan": range(113664, 113824),
305
+ "Shorthand Format Controls": range(113824, 113840),
306
+ "Znamenny Musical Notation": range(118528, 118736),
307
+ "Byzantine Musical Symbols": range(118784, 119040),
308
+ "Musical Symbols": range(119040, 119296),
309
+ "Ancient Greek Musical Notation": range(119296, 119376),
310
+ "Kaktovik Numerals": range(119488, 119520),
311
+ "Mayan Numerals": range(119520, 119552),
312
+ "Tai Xuan Jing Symbols": range(119552, 119648),
313
+ "Counting Rod Numerals": range(119648, 119680),
314
+ "Mathematical Alphanumeric Symbols": range(119808, 120832),
315
+ "Sutton SignWriting": range(120832, 121520),
316
+ "Latin Extended-G": range(122624, 122880),
317
+ "Glagolitic Supplement": range(122880, 122928),
318
+ "Cyrillic Extended-D": range(122928, 123024),
319
+ "Nyiakeng Puachue Hmong": range(123136, 123216),
320
+ "Toto": range(123536, 123584),
321
+ "Wancho": range(123584, 123648),
322
+ "Nag Mundari": range(124112, 124160),
323
+ "Ethiopic Extended-B": range(124896, 124928),
324
+ "Mende Kikakui": range(124928, 125152),
325
+ "Adlam": range(125184, 125280),
326
+ "Indic Siyaq Numbers": range(126064, 126144),
327
+ "Ottoman Siyaq Numbers": range(126208, 126288),
328
+ "Arabic Mathematical Alphabetic Symbols": range(126464, 126720),
329
+ "Mahjong Tiles": range(126976, 127024),
330
+ "Domino Tiles": range(127024, 127136),
331
+ "Playing Cards": range(127136, 127232),
332
+ "Enclosed Alphanumeric Supplement": range(127232, 127488),
333
+ "Enclosed Ideographic Supplement": range(127488, 127744),
334
+ "Miscellaneous Symbols and Pictographs": range(127744, 128512),
335
+ "Emoticons range(Emoji)": range(128512, 128592),
336
+ "Ornamental Dingbats": range(128592, 128640),
337
+ "Transport and Map Symbols": range(128640, 128768),
338
+ "Alchemical Symbols": range(128768, 128896),
339
+ "Geometric Shapes Extended": range(128896, 129024),
340
+ "Supplemental Arrows-C": range(129024, 129280),
341
+ "Supplemental Symbols and Pictographs": range(129280, 129536),
342
+ "Chess Symbols": range(129536, 129648),
343
+ "Symbols and Pictographs Extended-A": range(129648, 129792),
344
+ "Symbols for Legacy Computing": range(129792, 130048),
345
+ "CJK Unified Ideographs Extension B": range(131072, 173792),
346
+ "CJK Unified Ideographs Extension C": range(173824, 177984),
347
+ "CJK Unified Ideographs Extension D": range(177984, 178208),
348
+ "CJK Unified Ideographs Extension E": range(178208, 183984),
349
+ "CJK Unified Ideographs Extension F": range(183984, 191472),
350
+ "CJK Compatibility Ideographs Supplement": range(194560, 195104),
351
+ "CJK Unified Ideographs Extension G": range(196608, 201552),
352
+ "CJK Unified Ideographs Extension H": range(201552, 205744),
353
+ "Tags": range(917504, 917632),
354
+ "Variation Selectors Supplement": range(917760, 918000),
355
+ "Supplementary Private Use Area-A": range(983040, 1048576),
356
+ "Supplementary Private Use Area-B": range(1048576, 1114112),
357
+ }
358
+
359
+
360
+ UNICODE_SECONDARY_RANGE_KEYWORD: List[str] = [
361
+ "Supplement",
362
+ "Extended",
363
+ "Extensions",
364
+ "Modifier",
365
+ "Marks",
366
+ "Punctuation",
367
+ "Symbols",
368
+ "Forms",
369
+ "Operators",
370
+ "Miscellaneous",
371
+ "Drawing",
372
+ "Block",
373
+ "Shapes",
374
+ "Supplemental",
375
+ "Tags",
376
+ ]
377
+
378
+ RE_POSSIBLE_ENCODING_INDICATION = re_compile(
379
+ r"(?:(?:encoding)|(?:charset)|(?:coding))(?:[\:= ]{1,10})(?:[\"\']?)([a-zA-Z0-9\-_]+)(?:[\"\']?)",
380
+ IGNORECASE,
381
+ )
382
+
383
+ IANA_NO_ALIASES = [
384
+ "cp720",
385
+ "cp737",
386
+ "cp856",
387
+ "cp874",
388
+ "cp875",
389
+ "cp1006",
390
+ "koi8_r",
391
+ "koi8_t",
392
+ "koi8_u",
393
+ ]
394
+
395
+ IANA_SUPPORTED: List[str] = sorted(
396
+ filter(
397
+ lambda x: x.endswith("_codec") is False
398
+ and x not in {"rot_13", "tactis", "mbcs"},
399
+ list(set(aliases.values())) + IANA_NO_ALIASES,
400
+ )
401
+ )
402
+
403
+ IANA_SUPPORTED_COUNT: int = len(IANA_SUPPORTED)
404
+
405
+ # pre-computed code page that are similar using the function cp_similarity.
406
+ IANA_SUPPORTED_SIMILAR: Dict[str, List[str]] = {
407
+ "cp037": ["cp1026", "cp1140", "cp273", "cp500"],
408
+ "cp1026": ["cp037", "cp1140", "cp273", "cp500"],
409
+ "cp1125": ["cp866"],
410
+ "cp1140": ["cp037", "cp1026", "cp273", "cp500"],
411
+ "cp1250": ["iso8859_2"],
412
+ "cp1251": ["kz1048", "ptcp154"],
413
+ "cp1252": ["iso8859_15", "iso8859_9", "latin_1"],
414
+ "cp1253": ["iso8859_7"],
415
+ "cp1254": ["iso8859_15", "iso8859_9", "latin_1"],
416
+ "cp1257": ["iso8859_13"],
417
+ "cp273": ["cp037", "cp1026", "cp1140", "cp500"],
418
+ "cp437": ["cp850", "cp858", "cp860", "cp861", "cp862", "cp863", "cp865"],
419
+ "cp500": ["cp037", "cp1026", "cp1140", "cp273"],
420
+ "cp850": ["cp437", "cp857", "cp858", "cp865"],
421
+ "cp857": ["cp850", "cp858", "cp865"],
422
+ "cp858": ["cp437", "cp850", "cp857", "cp865"],
423
+ "cp860": ["cp437", "cp861", "cp862", "cp863", "cp865"],
424
+ "cp861": ["cp437", "cp860", "cp862", "cp863", "cp865"],
425
+ "cp862": ["cp437", "cp860", "cp861", "cp863", "cp865"],
426
+ "cp863": ["cp437", "cp860", "cp861", "cp862", "cp865"],
427
+ "cp865": ["cp437", "cp850", "cp857", "cp858", "cp860", "cp861", "cp862", "cp863"],
428
+ "cp866": ["cp1125"],
429
+ "iso8859_10": ["iso8859_14", "iso8859_15", "iso8859_4", "iso8859_9", "latin_1"],
430
+ "iso8859_11": ["tis_620"],
431
+ "iso8859_13": ["cp1257"],
432
+ "iso8859_14": [
433
+ "iso8859_10",
434
+ "iso8859_15",
435
+ "iso8859_16",
436
+ "iso8859_3",
437
+ "iso8859_9",
438
+ "latin_1",
439
+ ],
440
+ "iso8859_15": [
441
+ "cp1252",
442
+ "cp1254",
443
+ "iso8859_10",
444
+ "iso8859_14",
445
+ "iso8859_16",
446
+ "iso8859_3",
447
+ "iso8859_9",
448
+ "latin_1",
449
+ ],
450
+ "iso8859_16": [
451
+ "iso8859_14",
452
+ "iso8859_15",
453
+ "iso8859_2",
454
+ "iso8859_3",
455
+ "iso8859_9",
456
+ "latin_1",
457
+ ],
458
+ "iso8859_2": ["cp1250", "iso8859_16", "iso8859_4"],
459
+ "iso8859_3": ["iso8859_14", "iso8859_15", "iso8859_16", "iso8859_9", "latin_1"],
460
+ "iso8859_4": ["iso8859_10", "iso8859_2", "iso8859_9", "latin_1"],
461
+ "iso8859_7": ["cp1253"],
462
+ "iso8859_9": [
463
+ "cp1252",
464
+ "cp1254",
465
+ "cp1258",
466
+ "iso8859_10",
467
+ "iso8859_14",
468
+ "iso8859_15",
469
+ "iso8859_16",
470
+ "iso8859_3",
471
+ "iso8859_4",
472
+ "latin_1",
473
+ ],
474
+ "kz1048": ["cp1251", "ptcp154"],
475
+ "latin_1": [
476
+ "cp1252",
477
+ "cp1254",
478
+ "cp1258",
479
+ "iso8859_10",
480
+ "iso8859_14",
481
+ "iso8859_15",
482
+ "iso8859_16",
483
+ "iso8859_3",
484
+ "iso8859_4",
485
+ "iso8859_9",
486
+ ],
487
+ "mac_iceland": ["mac_roman", "mac_turkish"],
488
+ "mac_roman": ["mac_iceland", "mac_turkish"],
489
+ "mac_turkish": ["mac_iceland", "mac_roman"],
490
+ "ptcp154": ["cp1251", "kz1048"],
491
+ "tis_620": ["iso8859_11"],
492
+ }
493
+
494
+
495
+ CHARDET_CORRESPONDENCE: Dict[str, str] = {
496
+ "iso2022_kr": "ISO-2022-KR",
497
+ "iso2022_jp": "ISO-2022-JP",
498
+ "euc_kr": "EUC-KR",
499
+ "tis_620": "TIS-620",
500
+ "utf_32": "UTF-32",
501
+ "euc_jp": "EUC-JP",
502
+ "koi8_r": "KOI8-R",
503
+ "iso8859_1": "ISO-8859-1",
504
+ "iso8859_2": "ISO-8859-2",
505
+ "iso8859_5": "ISO-8859-5",
506
+ "iso8859_6": "ISO-8859-6",
507
+ "iso8859_7": "ISO-8859-7",
508
+ "iso8859_8": "ISO-8859-8",
509
+ "utf_16": "UTF-16",
510
+ "cp855": "IBM855",
511
+ "mac_cyrillic": "MacCyrillic",
512
+ "gb2312": "GB2312",
513
+ "gb18030": "GB18030",
514
+ "cp932": "CP932",
515
+ "cp866": "IBM866",
516
+ "utf_8": "utf-8",
517
+ "utf_8_sig": "UTF-8-SIG",
518
+ "shift_jis": "SHIFT_JIS",
519
+ "big5": "Big5",
520
+ "cp1250": "windows-1250",
521
+ "cp1251": "windows-1251",
522
+ "cp1252": "Windows-1252",
523
+ "cp1253": "windows-1253",
524
+ "cp1255": "windows-1255",
525
+ "cp1256": "windows-1256",
526
+ "cp1254": "Windows-1254",
527
+ "cp949": "CP949",
528
+ }
529
+
530
+
531
+ COMMON_SAFE_ASCII_CHARACTERS: Set[str] = {
532
+ "<",
533
+ ">",
534
+ "=",
535
+ ":",
536
+ "/",
537
+ "&",
538
+ ";",
539
+ "{",
540
+ "}",
541
+ "[",
542
+ "]",
543
+ ",",
544
+ "|",
545
+ '"',
546
+ "-",
547
+ }
548
+
549
+
550
+ KO_NAMES: Set[str] = {"johab", "cp949", "euc_kr"}
551
+ ZH_NAMES: Set[str] = {"big5", "cp950", "big5hkscs", "hz"}
552
+
553
+ # Logging LEVEL below DEBUG
554
+ TRACE: int = 5
555
+
556
+
557
+ # Language label that contain the em dash "—"
558
+ # character are to be considered alternative seq to origin
559
+ FREQUENCIES: Dict[str, List[str]] = {
560
+ "English": [
561
+ "e",
562
+ "a",
563
+ "t",
564
+ "i",
565
+ "o",
566
+ "n",
567
+ "s",
568
+ "r",
569
+ "h",
570
+ "l",
571
+ "d",
572
+ "c",
573
+ "u",
574
+ "m",
575
+ "f",
576
+ "p",
577
+ "g",
578
+ "w",
579
+ "y",
580
+ "b",
581
+ "v",
582
+ "k",
583
+ "x",
584
+ "j",
585
+ "z",
586
+ "q",
587
+ ],
588
+ "English—": [
589
+ "e",
590
+ "a",
591
+ "t",
592
+ "i",
593
+ "o",
594
+ "n",
595
+ "s",
596
+ "r",
597
+ "h",
598
+ "l",
599
+ "d",
600
+ "c",
601
+ "m",
602
+ "u",
603
+ "f",
604
+ "p",
605
+ "g",
606
+ "w",
607
+ "b",
608
+ "y",
609
+ "v",
610
+ "k",
611
+ "j",
612
+ "x",
613
+ "z",
614
+ "q",
615
+ ],
616
+ "German": [
617
+ "e",
618
+ "n",
619
+ "i",
620
+ "r",
621
+ "s",
622
+ "t",
623
+ "a",
624
+ "d",
625
+ "h",
626
+ "u",
627
+ "l",
628
+ "g",
629
+ "o",
630
+ "c",
631
+ "m",
632
+ "b",
633
+ "f",
634
+ "k",
635
+ "w",
636
+ "z",
637
+ "p",
638
+ "v",
639
+ "ü",
640
+ "ä",
641
+ "ö",
642
+ "j",
643
+ ],
644
+ "French": [
645
+ "e",
646
+ "a",
647
+ "s",
648
+ "n",
649
+ "i",
650
+ "t",
651
+ "r",
652
+ "l",
653
+ "u",
654
+ "o",
655
+ "d",
656
+ "c",
657
+ "p",
658
+ "m",
659
+ "é",
660
+ "v",
661
+ "g",
662
+ "f",
663
+ "b",
664
+ "h",
665
+ "q",
666
+ "à",
667
+ "x",
668
+ "è",
669
+ "y",
670
+ "j",
671
+ ],
672
+ "Dutch": [
673
+ "e",
674
+ "n",
675
+ "a",
676
+ "i",
677
+ "r",
678
+ "t",
679
+ "o",
680
+ "d",
681
+ "s",
682
+ "l",
683
+ "g",
684
+ "h",
685
+ "v",
686
+ "m",
687
+ "u",
688
+ "k",
689
+ "c",
690
+ "p",
691
+ "b",
692
+ "w",
693
+ "j",
694
+ "z",
695
+ "f",
696
+ "y",
697
+ "x",
698
+ "ë",
699
+ ],
700
+ "Italian": [
701
+ "e",
702
+ "i",
703
+ "a",
704
+ "o",
705
+ "n",
706
+ "l",
707
+ "t",
708
+ "r",
709
+ "s",
710
+ "c",
711
+ "d",
712
+ "u",
713
+ "p",
714
+ "m",
715
+ "g",
716
+ "v",
717
+ "f",
718
+ "b",
719
+ "z",
720
+ "h",
721
+ "q",
722
+ "è",
723
+ "à",
724
+ "k",
725
+ "y",
726
+ "ò",
727
+ ],
728
+ "Polish": [
729
+ "a",
730
+ "i",
731
+ "o",
732
+ "e",
733
+ "n",
734
+ "r",
735
+ "z",
736
+ "w",
737
+ "s",
738
+ "c",
739
+ "t",
740
+ "k",
741
+ "y",
742
+ "d",
743
+ "p",
744
+ "m",
745
+ "u",
746
+ "l",
747
+ "j",
748
+ "ł",
749
+ "g",
750
+ "b",
751
+ "h",
752
+ "ą",
753
+ "ę",
754
+ "ó",
755
+ ],
756
+ "Spanish": [
757
+ "e",
758
+ "a",
759
+ "o",
760
+ "n",
761
+ "s",
762
+ "r",
763
+ "i",
764
+ "l",
765
+ "d",
766
+ "t",
767
+ "c",
768
+ "u",
769
+ "m",
770
+ "p",
771
+ "b",
772
+ "g",
773
+ "v",
774
+ "f",
775
+ "y",
776
+ "ó",
777
+ "h",
778
+ "q",
779
+ "í",
780
+ "j",
781
+ "z",
782
+ "á",
783
+ ],
784
+ "Russian": [
785
+ "о",
786
+ "а",
787
+ "е",
788
+ "и",
789
+ "н",
790
+ "с",
791
+ "т",
792
+ "р",
793
+ "в",
794
+ "л",
795
+ "к",
796
+ "м",
797
+ "д",
798
+ "п",
799
+ "у",
800
+ "г",
801
+ "я",
802
+ "ы",
803
+ "з",
804
+ "б",
805
+ "й",
806
+ "ь",
807
+ "ч",
808
+ "х",
809
+ "ж",
810
+ "ц",
811
+ ],
812
+ # Jap-Kanji
813
+ "Japanese": [
814
+ "人",
815
+ "一",
816
+ "大",
817
+ "亅",
818
+ "丁",
819
+ "丨",
820
+ "竹",
821
+ "笑",
822
+ "口",
823
+ "日",
824
+ "今",
825
+ "二",
826
+ "彳",
827
+ "行",
828
+ "十",
829
+ "土",
830
+ "丶",
831
+ "寸",
832
+ "寺",
833
+ "時",
834
+ "乙",
835
+ "丿",
836
+ "乂",
837
+ "气",
838
+ "気",
839
+ "冂",
840
+ "巾",
841
+ "亠",
842
+ "市",
843
+ "目",
844
+ "儿",
845
+ "見",
846
+ "八",
847
+ "小",
848
+ "凵",
849
+ "県",
850
+ "月",
851
+ "彐",
852
+ "門",
853
+ "間",
854
+ "木",
855
+ "東",
856
+ "山",
857
+ "出",
858
+ "本",
859
+ "中",
860
+ "刀",
861
+ "分",
862
+ "耳",
863
+ "又",
864
+ "取",
865
+ "最",
866
+ "言",
867
+ "田",
868
+ "心",
869
+ "思",
870
+ "刂",
871
+ "前",
872
+ "京",
873
+ "尹",
874
+ "事",
875
+ "生",
876
+ "厶",
877
+ "云",
878
+ "会",
879
+ "未",
880
+ "来",
881
+ "白",
882
+ "冫",
883
+ "楽",
884
+ "灬",
885
+ "馬",
886
+ "尸",
887
+ "尺",
888
+ "駅",
889
+ "明",
890
+ "耂",
891
+ "者",
892
+ "了",
893
+ "阝",
894
+ "都",
895
+ "高",
896
+ "卜",
897
+ "占",
898
+ "厂",
899
+ "广",
900
+ "店",
901
+ "子",
902
+ "申",
903
+ "奄",
904
+ "亻",
905
+ "俺",
906
+ "上",
907
+ "方",
908
+ "冖",
909
+ "学",
910
+ "衣",
911
+ "艮",
912
+ "食",
913
+ "自",
914
+ ],
915
+ # Jap-Katakana
916
+ "Japanese—": [
917
+ "ー",
918
+ "ン",
919
+ "ス",
920
+ "・",
921
+ "ル",
922
+ "ト",
923
+ "リ",
924
+ "イ",
925
+ "ア",
926
+ "ラ",
927
+ "ッ",
928
+ "ク",
929
+ "ド",
930
+ "シ",
931
+ "レ",
932
+ "ジ",
933
+ "タ",
934
+ "フ",
935
+ "ロ",
936
+ "カ",
937
+ "テ",
938
+ "マ",
939
+ "ィ",
940
+ "グ",
941
+ "バ",
942
+ "ム",
943
+ "プ",
944
+ "オ",
945
+ "コ",
946
+ "デ",
947
+ "ニ",
948
+ "ウ",
949
+ "メ",
950
+ "サ",
951
+ "ビ",
952
+ "ナ",
953
+ "ブ",
954
+ "ャ",
955
+ "エ",
956
+ "ュ",
957
+ "チ",
958
+ "キ",
959
+ "ズ",
960
+ "ダ",
961
+ "パ",
962
+ "ミ",
963
+ "ェ",
964
+ "ョ",
965
+ "ハ",
966
+ "セ",
967
+ "ベ",
968
+ "ガ",
969
+ "モ",
970
+ "ツ",
971
+ "ネ",
972
+ "ボ",
973
+ "ソ",
974
+ "ノ",
975
+ "ァ",
976
+ "ヴ",
977
+ "ワ",
978
+ "ポ",
979
+ "ペ",
980
+ "ピ",
981
+ "ケ",
982
+ "ゴ",
983
+ "ギ",
984
+ "ザ",
985
+ "ホ",
986
+ "ゲ",
987
+ "ォ",
988
+ "ヤ",
989
+ "ヒ",
990
+ "ユ",
991
+ "ヨ",
992
+ "ヘ",
993
+ "ゼ",
994
+ "ヌ",
995
+ "ゥ",
996
+ "ゾ",
997
+ "ヶ",
998
+ "ヂ",
999
+ "ヲ",
1000
+ "ヅ",
1001
+ "ヵ",
1002
+ "ヱ",
1003
+ "ヰ",
1004
+ "ヮ",
1005
+ "ヽ",
1006
+ "゠",
1007
+ "ヾ",
1008
+ "ヷ",
1009
+ "ヿ",
1010
+ "ヸ",
1011
+ "ヹ",
1012
+ "ヺ",
1013
+ ],
1014
+ # Jap-Hiragana
1015
+ "Japanese——": [
1016
+ "の",
1017
+ "に",
1018
+ "る",
1019
+ "た",
1020
+ "と",
1021
+ "は",
1022
+ "し",
1023
+ "い",
1024
+ "を",
1025
+ "で",
1026
+ "て",
1027
+ "が",
1028
+ "な",
1029
+ "れ",
1030
+ "か",
1031
+ "ら",
1032
+ "さ",
1033
+ "っ",
1034
+ "り",
1035
+ "す",
1036
+ "あ",
1037
+ "も",
1038
+ "こ",
1039
+ "ま",
1040
+ "う",
1041
+ "く",
1042
+ "よ",
1043
+ "き",
1044
+ "ん",
1045
+ "め",
1046
+ "お",
1047
+ "け",
1048
+ "そ",
1049
+ "つ",
1050
+ "だ",
1051
+ "や",
1052
+ "え",
1053
+ "ど",
1054
+ "わ",
1055
+ "ち",
1056
+ "み",
1057
+ "せ",
1058
+ "じ",
1059
+ "ば",
1060
+ "へ",
1061
+ "び",
1062
+ "ず",
1063
+ "ろ",
1064
+ "ほ",
1065
+ "げ",
1066
+ "む",
1067
+ "べ",
1068
+ "ひ",
1069
+ "ょ",
1070
+ "ゆ",
1071
+ "ぶ",
1072
+ "ご",
1073
+ "ゃ",
1074
+ "ね",
1075
+ "ふ",
1076
+ "ぐ",
1077
+ "ぎ",
1078
+ "ぼ",
1079
+ "ゅ",
1080
+ "づ",
1081
+ "ざ",
1082
+ "ぞ",
1083
+ "ぬ",
1084
+ "ぜ",
1085
+ "ぱ",
1086
+ "ぽ",
1087
+ "ぷ",
1088
+ "ぴ",
1089
+ "ぃ",
1090
+ "ぁ",
1091
+ "ぇ",
1092
+ "ぺ",
1093
+ "ゞ",
1094
+ "ぢ",
1095
+ "ぉ",
1096
+ "ぅ",
1097
+ "ゐ",
1098
+ "ゝ",
1099
+ "ゑ",
1100
+ "゛",
1101
+ "゜",
1102
+ "ゎ",
1103
+ "ゔ",
1104
+ "゚",
1105
+ "ゟ",
1106
+ "゙",
1107
+ "ゕ",
1108
+ "ゖ",
1109
+ ],
1110
+ "Portuguese": [
1111
+ "a",
1112
+ "e",
1113
+ "o",
1114
+ "s",
1115
+ "i",
1116
+ "r",
1117
+ "d",
1118
+ "n",
1119
+ "t",
1120
+ "m",
1121
+ "u",
1122
+ "c",
1123
+ "l",
1124
+ "p",
1125
+ "g",
1126
+ "v",
1127
+ "b",
1128
+ "f",
1129
+ "h",
1130
+ "ã",
1131
+ "q",
1132
+ "é",
1133
+ "ç",
1134
+ "á",
1135
+ "z",
1136
+ "í",
1137
+ ],
1138
+ "Swedish": [
1139
+ "e",
1140
+ "a",
1141
+ "n",
1142
+ "r",
1143
+ "t",
1144
+ "s",
1145
+ "i",
1146
+ "l",
1147
+ "d",
1148
+ "o",
1149
+ "m",
1150
+ "k",
1151
+ "g",
1152
+ "v",
1153
+ "h",
1154
+ "f",
1155
+ "u",
1156
+ "p",
1157
+ "ä",
1158
+ "c",
1159
+ "b",
1160
+ "ö",
1161
+ "å",
1162
+ "y",
1163
+ "j",
1164
+ "x",
1165
+ ],
1166
+ "Chinese": [
1167
+ "的",
1168
+ "一",
1169
+ "是",
1170
+ "不",
1171
+ "了",
1172
+ "在",
1173
+ "人",
1174
+ "有",
1175
+ "我",
1176
+ "他",
1177
+ "这",
1178
+ "个",
1179
+ "们",
1180
+ "中",
1181
+ "来",
1182
+ "上",
1183
+ "大",
1184
+ "为",
1185
+ "和",
1186
+ "国",
1187
+ "地",
1188
+ "到",
1189
+ "以",
1190
+ "说",
1191
+ "时",
1192
+ "要",
1193
+ "就",
1194
+ "出",
1195
+ "会",
1196
+ "可",
1197
+ "也",
1198
+ "你",
1199
+ "对",
1200
+ "生",
1201
+ "能",
1202
+ "而",
1203
+ "子",
1204
+ "那",
1205
+ "得",
1206
+ "于",
1207
+ "着",
1208
+ "下",
1209
+ "自",
1210
+ "之",
1211
+ "年",
1212
+ "过",
1213
+ "发",
1214
+ "后",
1215
+ "作",
1216
+ "里",
1217
+ "用",
1218
+ "道",
1219
+ "行",
1220
+ "所",
1221
+ "然",
1222
+ "家",
1223
+ "种",
1224
+ "事",
1225
+ "成",
1226
+ "方",
1227
+ "多",
1228
+ "经",
1229
+ "么",
1230
+ "去",
1231
+ "法",
1232
+ "学",
1233
+ "如",
1234
+ "都",
1235
+ "同",
1236
+ "现",
1237
+ "当",
1238
+ "没",
1239
+ "动",
1240
+ "面",
1241
+ "起",
1242
+ "看",
1243
+ "定",
1244
+ "天",
1245
+ "分",
1246
+ "还",
1247
+ "进",
1248
+ "好",
1249
+ "小",
1250
+ "部",
1251
+ "其",
1252
+ "些",
1253
+ "主",
1254
+ "样",
1255
+ "理",
1256
+ "心",
1257
+ "她",
1258
+ "本",
1259
+ "前",
1260
+ "开",
1261
+ "但",
1262
+ "因",
1263
+ "只",
1264
+ "从",
1265
+ "想",
1266
+ "实",
1267
+ ],
1268
+ "Ukrainian": [
1269
+ "о",
1270
+ "а",
1271
+ "н",
1272
+ "і",
1273
+ "и",
1274
+ "р",
1275
+ "в",
1276
+ "т",
1277
+ "е",
1278
+ "с",
1279
+ "к",
1280
+ "л",
1281
+ "у",
1282
+ "д",
1283
+ "м",
1284
+ "п",
1285
+ "з",
1286
+ "я",
1287
+ "ь",
1288
+ "б",
1289
+ "г",
1290
+ "й",
1291
+ "ч",
1292
+ "х",
1293
+ "ц",
1294
+ "ї",
1295
+ ],
1296
+ "Norwegian": [
1297
+ "e",
1298
+ "r",
1299
+ "n",
1300
+ "t",
1301
+ "a",
1302
+ "s",
1303
+ "i",
1304
+ "o",
1305
+ "l",
1306
+ "d",
1307
+ "g",
1308
+ "k",
1309
+ "m",
1310
+ "v",
1311
+ "f",
1312
+ "p",
1313
+ "u",
1314
+ "b",
1315
+ "h",
1316
+ "å",
1317
+ "y",
1318
+ "j",
1319
+ "ø",
1320
+ "c",
1321
+ "æ",
1322
+ "w",
1323
+ ],
1324
+ "Finnish": [
1325
+ "a",
1326
+ "i",
1327
+ "n",
1328
+ "t",
1329
+ "e",
1330
+ "s",
1331
+ "l",
1332
+ "o",
1333
+ "u",
1334
+ "k",
1335
+ "ä",
1336
+ "m",
1337
+ "r",
1338
+ "v",
1339
+ "j",
1340
+ "h",
1341
+ "p",
1342
+ "y",
1343
+ "d",
1344
+ "ö",
1345
+ "g",
1346
+ "c",
1347
+ "b",
1348
+ "f",
1349
+ "w",
1350
+ "z",
1351
+ ],
1352
+ "Vietnamese": [
1353
+ "n",
1354
+ "h",
1355
+ "t",
1356
+ "i",
1357
+ "c",
1358
+ "g",
1359
+ "a",
1360
+ "o",
1361
+ "u",
1362
+ "m",
1363
+ "l",
1364
+ "r",
1365
+ "à",
1366
+ "đ",
1367
+ "s",
1368
+ "e",
1369
+ "v",
1370
+ "p",
1371
+ "b",
1372
+ "y",
1373
+ "ư",
1374
+ "d",
1375
+ "á",
1376
+ "k",
1377
+ "ộ",
1378
+ "ế",
1379
+ ],
1380
+ "Czech": [
1381
+ "o",
1382
+ "e",
1383
+ "a",
1384
+ "n",
1385
+ "t",
1386
+ "s",
1387
+ "i",
1388
+ "l",
1389
+ "v",
1390
+ "r",
1391
+ "k",
1392
+ "d",
1393
+ "u",
1394
+ "m",
1395
+ "p",
1396
+ "í",
1397
+ "c",
1398
+ "h",
1399
+ "z",
1400
+ "á",
1401
+ "y",
1402
+ "j",
1403
+ "b",
1404
+ "ě",
1405
+ "é",
1406
+ "ř",
1407
+ ],
1408
+ "Hungarian": [
1409
+ "e",
1410
+ "a",
1411
+ "t",
1412
+ "l",
1413
+ "s",
1414
+ "n",
1415
+ "k",
1416
+ "r",
1417
+ "i",
1418
+ "o",
1419
+ "z",
1420
+ "á",
1421
+ "é",
1422
+ "g",
1423
+ "m",
1424
+ "b",
1425
+ "y",
1426
+ "v",
1427
+ "d",
1428
+ "h",
1429
+ "u",
1430
+ "p",
1431
+ "j",
1432
+ "ö",
1433
+ "f",
1434
+ "c",
1435
+ ],
1436
+ "Korean": [
1437
+ "이",
1438
+ "다",
1439
+ "에",
1440
+ "의",
1441
+ "는",
1442
+ "로",
1443
+ "하",
1444
+ "을",
1445
+ "가",
1446
+ "고",
1447
+ "지",
1448
+ "서",
1449
+ "한",
1450
+ "은",
1451
+ "기",
1452
+ "으",
1453
+ "년",
1454
+ "대",
1455
+ "사",
1456
+ "시",
1457
+ "를",
1458
+ "리",
1459
+ "도",
1460
+ "인",
1461
+ "스",
1462
+ "일",
1463
+ ],
1464
+ "Indonesian": [
1465
+ "a",
1466
+ "n",
1467
+ "e",
1468
+ "i",
1469
+ "r",
1470
+ "t",
1471
+ "u",
1472
+ "s",
1473
+ "d",
1474
+ "k",
1475
+ "m",
1476
+ "l",
1477
+ "g",
1478
+ "p",
1479
+ "b",
1480
+ "o",
1481
+ "h",
1482
+ "y",
1483
+ "j",
1484
+ "c",
1485
+ "w",
1486
+ "f",
1487
+ "v",
1488
+ "z",
1489
+ "x",
1490
+ "q",
1491
+ ],
1492
+ "Turkish": [
1493
+ "a",
1494
+ "e",
1495
+ "i",
1496
+ "n",
1497
+ "r",
1498
+ "l",
1499
+ "ı",
1500
+ "k",
1501
+ "d",
1502
+ "t",
1503
+ "s",
1504
+ "m",
1505
+ "y",
1506
+ "u",
1507
+ "o",
1508
+ "b",
1509
+ "ü",
1510
+ "ş",
1511
+ "v",
1512
+ "g",
1513
+ "z",
1514
+ "h",
1515
+ "c",
1516
+ "p",
1517
+ "ç",
1518
+ "ğ",
1519
+ ],
1520
+ "Romanian": [
1521
+ "e",
1522
+ "i",
1523
+ "a",
1524
+ "r",
1525
+ "n",
1526
+ "t",
1527
+ "u",
1528
+ "l",
1529
+ "o",
1530
+ "c",
1531
+ "s",
1532
+ "d",
1533
+ "p",
1534
+ "m",
1535
+ "ă",
1536
+ "f",
1537
+ "v",
1538
+ "î",
1539
+ "g",
1540
+ "b",
1541
+ "ș",
1542
+ "ț",
1543
+ "z",
1544
+ "h",
1545
+ "â",
1546
+ "j",
1547
+ ],
1548
+ "Farsi": [
1549
+ "ا",
1550
+ "ی",
1551
+ "ر",
1552
+ "د",
1553
+ "ن",
1554
+ "ه",
1555
+ "و",
1556
+ "م",
1557
+ "ت",
1558
+ "ب",
1559
+ "س",
1560
+ "ل",
1561
+ "ک",
1562
+ "ش",
1563
+ "ز",
1564
+ "ف",
1565
+ "گ",
1566
+ "ع",
1567
+ "خ",
1568
+ "ق",
1569
+ "ج",
1570
+ "آ",
1571
+ "پ",
1572
+ "ح",
1573
+ "ط",
1574
+ "ص",
1575
+ ],
1576
+ "Arabic": [
1577
+ "ا",
1578
+ "ل",
1579
+ "ي",
1580
+ "م",
1581
+ "و",
1582
+ "ن",
1583
+ "ر",
1584
+ "ت",
1585
+ "ب",
1586
+ "ة",
1587
+ "ع",
1588
+ "د",
1589
+ "س",
1590
+ "ف",
1591
+ "ه",
1592
+ "ك",
1593
+ "ق",
1594
+ "أ",
1595
+ "ح",
1596
+ "ج",
1597
+ "ش",
1598
+ "ط",
1599
+ "ص",
1600
+ "ى",
1601
+ "خ",
1602
+ "إ",
1603
+ ],
1604
+ "Danish": [
1605
+ "e",
1606
+ "r",
1607
+ "n",
1608
+ "t",
1609
+ "a",
1610
+ "i",
1611
+ "s",
1612
+ "d",
1613
+ "l",
1614
+ "o",
1615
+ "g",
1616
+ "m",
1617
+ "k",
1618
+ "f",
1619
+ "v",
1620
+ "u",
1621
+ "b",
1622
+ "h",
1623
+ "p",
1624
+ "å",
1625
+ "y",
1626
+ "ø",
1627
+ "æ",
1628
+ "c",
1629
+ "j",
1630
+ "w",
1631
+ ],
1632
+ "Serbian": [
1633
+ "а",
1634
+ "и",
1635
+ "о",
1636
+ "е",
1637
+ "н",
1638
+ "р",
1639
+ "с",
1640
+ "у",
1641
+ "т",
1642
+ "к",
1643
+ "ј",
1644
+ "в",
1645
+ "д",
1646
+ "м",
1647
+ "п",
1648
+ "л",
1649
+ "г",
1650
+ "з",
1651
+ "б",
1652
+ "a",
1653
+ "i",
1654
+ "e",
1655
+ "o",
1656
+ "n",
1657
+ "ц",
1658
+ "ш",
1659
+ ],
1660
+ "Lithuanian": [
1661
+ "i",
1662
+ "a",
1663
+ "s",
1664
+ "o",
1665
+ "r",
1666
+ "e",
1667
+ "t",
1668
+ "n",
1669
+ "u",
1670
+ "k",
1671
+ "m",
1672
+ "l",
1673
+ "p",
1674
+ "v",
1675
+ "d",
1676
+ "j",
1677
+ "g",
1678
+ "ė",
1679
+ "b",
1680
+ "y",
1681
+ "ų",
1682
+ "š",
1683
+ "ž",
1684
+ "c",
1685
+ "ą",
1686
+ "į",
1687
+ ],
1688
+ "Slovene": [
1689
+ "e",
1690
+ "a",
1691
+ "i",
1692
+ "o",
1693
+ "n",
1694
+ "r",
1695
+ "s",
1696
+ "l",
1697
+ "t",
1698
+ "j",
1699
+ "v",
1700
+ "k",
1701
+ "d",
1702
+ "p",
1703
+ "m",
1704
+ "u",
1705
+ "z",
1706
+ "b",
1707
+ "g",
1708
+ "h",
1709
+ "č",
1710
+ "c",
1711
+ "š",
1712
+ "ž",
1713
+ "f",
1714
+ "y",
1715
+ ],
1716
+ "Slovak": [
1717
+ "o",
1718
+ "a",
1719
+ "e",
1720
+ "n",
1721
+ "i",
1722
+ "r",
1723
+ "v",
1724
+ "t",
1725
+ "s",
1726
+ "l",
1727
+ "k",
1728
+ "d",
1729
+ "m",
1730
+ "p",
1731
+ "u",
1732
+ "c",
1733
+ "h",
1734
+ "j",
1735
+ "b",
1736
+ "z",
1737
+ "á",
1738
+ "y",
1739
+ "ý",
1740
+ "í",
1741
+ "č",
1742
+ "é",
1743
+ ],
1744
+ "Hebrew": [
1745
+ "י",
1746
+ "ו",
1747
+ "ה",
1748
+ "ל",
1749
+ "ר",
1750
+ "ב",
1751
+ "ת",
1752
+ "מ",
1753
+ "א",
1754
+ "ש",
1755
+ "נ",
1756
+ "ע",
1757
+ "ם",
1758
+ "ד",
1759
+ "ק",
1760
+ "ח",
1761
+ "פ",
1762
+ "ס",
1763
+ "כ",
1764
+ "ג",
1765
+ "ט",
1766
+ "צ",
1767
+ "ן",
1768
+ "ז",
1769
+ "ך",
1770
+ ],
1771
+ "Bulgarian": [
1772
+ "а",
1773
+ "и",
1774
+ "о",
1775
+ "е",
1776
+ "н",
1777
+ "т",
1778
+ "р",
1779
+ "с",
1780
+ "в",
1781
+ "л",
1782
+ "к",
1783
+ "д",
1784
+ "п",
1785
+ "м",
1786
+ "з",
1787
+ "г",
1788
+ "я",
1789
+ "ъ",
1790
+ "у",
1791
+ "б",
1792
+ "ч",
1793
+ "ц",
1794
+ "й",
1795
+ "ж",
1796
+ "щ",
1797
+ "х",
1798
+ ],
1799
+ "Croatian": [
1800
+ "a",
1801
+ "i",
1802
+ "o",
1803
+ "e",
1804
+ "n",
1805
+ "r",
1806
+ "j",
1807
+ "s",
1808
+ "t",
1809
+ "u",
1810
+ "k",
1811
+ "l",
1812
+ "v",
1813
+ "d",
1814
+ "m",
1815
+ "p",
1816
+ "g",
1817
+ "z",
1818
+ "b",
1819
+ "c",
1820
+ "č",
1821
+ "h",
1822
+ "š",
1823
+ "ž",
1824
+ "ć",
1825
+ "f",
1826
+ ],
1827
+ "Hindi": [
1828
+ "क",
1829
+ "र",
1830
+ "स",
1831
+ "न",
1832
+ "त",
1833
+ "म",
1834
+ "ह",
1835
+ "प",
1836
+ "य",
1837
+ "ल",
1838
+ "व",
1839
+ "ज",
1840
+ "द",
1841
+ "ग",
1842
+ "ब",
1843
+ "श",
1844
+ "ट",
1845
+ "अ",
1846
+ "ए",
1847
+ "थ",
1848
+ "भ",
1849
+ "ड",
1850
+ "च",
1851
+ "ध",
1852
+ "ष",
1853
+ "इ",
1854
+ ],
1855
+ "Estonian": [
1856
+ "a",
1857
+ "i",
1858
+ "e",
1859
+ "s",
1860
+ "t",
1861
+ "l",
1862
+ "u",
1863
+ "n",
1864
+ "o",
1865
+ "k",
1866
+ "r",
1867
+ "d",
1868
+ "m",
1869
+ "v",
1870
+ "g",
1871
+ "p",
1872
+ "j",
1873
+ "h",
1874
+ "ä",
1875
+ "b",
1876
+ "õ",
1877
+ "ü",
1878
+ "f",
1879
+ "c",
1880
+ "ö",
1881
+ "y",
1882
+ ],
1883
+ "Thai": [
1884
+ "า",
1885
+ "น",
1886
+ "ร",
1887
+ "อ",
1888
+ "ก",
1889
+ "เ",
1890
+ "ง",
1891
+ "ม",
1892
+ "ย",
1893
+ "ล",
1894
+ "ว",
1895
+ "ด",
1896
+ "ท",
1897
+ "ส",
1898
+ "ต",
1899
+ "ะ",
1900
+ "ป",
1901
+ "บ",
1902
+ "ค",
1903
+ "ห",
1904
+ "แ",
1905
+ "จ",
1906
+ "พ",
1907
+ "ช",
1908
+ "ข",
1909
+ "ใ",
1910
+ ],
1911
+ "Greek": [
1912
+ "α",
1913
+ "τ",
1914
+ "ο",
1915
+ "ι",
1916
+ "ε",
1917
+ "ν",
1918
+ "ρ",
1919
+ "σ",
1920
+ "κ",
1921
+ "η",
1922
+ "π",
1923
+ "ς",
1924
+ "υ",
1925
+ "μ",
1926
+ "λ",
1927
+ "ί",
1928
+ "ό",
1929
+ "ά",
1930
+ "γ",
1931
+ "έ",
1932
+ "δ",
1933
+ "ή",
1934
+ "ω",
1935
+ "χ",
1936
+ "θ",
1937
+ "ύ",
1938
+ ],
1939
+ "Tamil": [
1940
+ "க",
1941
+ "த",
1942
+ "ப",
1943
+ "ட",
1944
+ "ர",
1945
+ "ம",
1946
+ "ல",
1947
+ "ன",
1948
+ "வ",
1949
+ "ற",
1950
+ "ய",
1951
+ "ள",
1952
+ "ச",
1953
+ "ந",
1954
+ "இ",
1955
+ "ண",
1956
+ "அ",
1957
+ "ஆ",
1958
+ "ழ",
1959
+ "ங",
1960
+ "எ",
1961
+ "உ",
1962
+ "ஒ",
1963
+ "ஸ",
1964
+ ],
1965
+ "Kazakh": [
1966
+ "а",
1967
+ "ы",
1968
+ "е",
1969
+ "н",
1970
+ "т",
1971
+ "р",
1972
+ "л",
1973
+ "і",
1974
+ "д",
1975
+ "с",
1976
+ "м",
1977
+ "қ",
1978
+ "к",
1979
+ "о",
1980
+ "б",
1981
+ "и",
1982
+ "у",
1983
+ "ғ",
1984
+ "ж",
1985
+ "ң",
1986
+ "з",
1987
+ "ш",
1988
+ "й",
1989
+ "п",
1990
+ "г",
1991
+ "ө",
1992
+ ],
1993
+ }
1994
+
1995
+ LANGUAGE_SUPPORTED_COUNT: int = len(FREQUENCIES)
llmeval-env/lib/python3.10/site-packages/charset_normalizer/legacy.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, Optional, Union
2
+ from warnings import warn
3
+
4
+ from .api import from_bytes
5
+ from .constant import CHARDET_CORRESPONDENCE
6
+
7
+
8
+ def detect(
9
+ byte_str: bytes, should_rename_legacy: bool = False, **kwargs: Any
10
+ ) -> Dict[str, Optional[Union[str, float]]]:
11
+ """
12
+ chardet legacy method
13
+ Detect the encoding of the given byte string. It should be mostly backward-compatible.
14
+ Encoding name will match Chardet own writing whenever possible. (Not on encoding name unsupported by it)
15
+ This function is deprecated and should be used to migrate your project easily, consult the documentation for
16
+ further information. Not planned for removal.
17
+
18
+ :param byte_str: The byte sequence to examine.
19
+ :param should_rename_legacy: Should we rename legacy encodings
20
+ to their more modern equivalents?
21
+ """
22
+ if len(kwargs):
23
+ warn(
24
+ f"charset-normalizer disregard arguments '{','.join(list(kwargs.keys()))}' in legacy function detect()"
25
+ )
26
+
27
+ if not isinstance(byte_str, (bytearray, bytes)):
28
+ raise TypeError( # pragma: nocover
29
+ "Expected object of type bytes or bytearray, got: "
30
+ "{0}".format(type(byte_str))
31
+ )
32
+
33
+ if isinstance(byte_str, bytearray):
34
+ byte_str = bytes(byte_str)
35
+
36
+ r = from_bytes(byte_str).best()
37
+
38
+ encoding = r.encoding if r is not None else None
39
+ language = r.language if r is not None and r.language != "Unknown" else ""
40
+ confidence = 1.0 - r.chaos if r is not None else None
41
+
42
+ # Note: CharsetNormalizer does not return 'UTF-8-SIG' as the sig get stripped in the detection/normalization process
43
+ # but chardet does return 'utf-8-sig' and it is a valid codec name.
44
+ if r is not None and encoding == "utf_8" and r.bom:
45
+ encoding += "_sig"
46
+
47
+ if should_rename_legacy is False and encoding in CHARDET_CORRESPONDENCE:
48
+ encoding = CHARDET_CORRESPONDENCE[encoding]
49
+
50
+ return {
51
+ "encoding": encoding,
52
+ "language": language,
53
+ "confidence": confidence,
54
+ }
llmeval-env/lib/python3.10/site-packages/charset_normalizer/md.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (16.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/charset_normalizer/md.py ADDED
@@ -0,0 +1,615 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import lru_cache
2
+ from logging import getLogger
3
+ from typing import List, Optional
4
+
5
+ from .constant import (
6
+ COMMON_SAFE_ASCII_CHARACTERS,
7
+ TRACE,
8
+ UNICODE_SECONDARY_RANGE_KEYWORD,
9
+ )
10
+ from .utils import (
11
+ is_accentuated,
12
+ is_arabic,
13
+ is_arabic_isolated_form,
14
+ is_case_variable,
15
+ is_cjk,
16
+ is_emoticon,
17
+ is_hangul,
18
+ is_hiragana,
19
+ is_katakana,
20
+ is_latin,
21
+ is_punctuation,
22
+ is_separator,
23
+ is_symbol,
24
+ is_thai,
25
+ is_unprintable,
26
+ remove_accent,
27
+ unicode_range,
28
+ )
29
+
30
+
31
+ class MessDetectorPlugin:
32
+ """
33
+ Base abstract class used for mess detection plugins.
34
+ All detectors MUST extend and implement given methods.
35
+ """
36
+
37
+ def eligible(self, character: str) -> bool:
38
+ """
39
+ Determine if given character should be fed in.
40
+ """
41
+ raise NotImplementedError # pragma: nocover
42
+
43
+ def feed(self, character: str) -> None:
44
+ """
45
+ The main routine to be executed upon character.
46
+ Insert the logic in witch the text would be considered chaotic.
47
+ """
48
+ raise NotImplementedError # pragma: nocover
49
+
50
+ def reset(self) -> None: # pragma: no cover
51
+ """
52
+ Permit to reset the plugin to the initial state.
53
+ """
54
+ raise NotImplementedError
55
+
56
+ @property
57
+ def ratio(self) -> float:
58
+ """
59
+ Compute the chaos ratio based on what your feed() has seen.
60
+ Must NOT be lower than 0.; No restriction gt 0.
61
+ """
62
+ raise NotImplementedError # pragma: nocover
63
+
64
+
65
+ class TooManySymbolOrPunctuationPlugin(MessDetectorPlugin):
66
+ def __init__(self) -> None:
67
+ self._punctuation_count: int = 0
68
+ self._symbol_count: int = 0
69
+ self._character_count: int = 0
70
+
71
+ self._last_printable_char: Optional[str] = None
72
+ self._frenzy_symbol_in_word: bool = False
73
+
74
+ def eligible(self, character: str) -> bool:
75
+ return character.isprintable()
76
+
77
+ def feed(self, character: str) -> None:
78
+ self._character_count += 1
79
+
80
+ if (
81
+ character != self._last_printable_char
82
+ and character not in COMMON_SAFE_ASCII_CHARACTERS
83
+ ):
84
+ if is_punctuation(character):
85
+ self._punctuation_count += 1
86
+ elif (
87
+ character.isdigit() is False
88
+ and is_symbol(character)
89
+ and is_emoticon(character) is False
90
+ ):
91
+ self._symbol_count += 2
92
+
93
+ self._last_printable_char = character
94
+
95
+ def reset(self) -> None: # pragma: no cover
96
+ self._punctuation_count = 0
97
+ self._character_count = 0
98
+ self._symbol_count = 0
99
+
100
+ @property
101
+ def ratio(self) -> float:
102
+ if self._character_count == 0:
103
+ return 0.0
104
+
105
+ ratio_of_punctuation: float = (
106
+ self._punctuation_count + self._symbol_count
107
+ ) / self._character_count
108
+
109
+ return ratio_of_punctuation if ratio_of_punctuation >= 0.3 else 0.0
110
+
111
+
112
+ class TooManyAccentuatedPlugin(MessDetectorPlugin):
113
+ def __init__(self) -> None:
114
+ self._character_count: int = 0
115
+ self._accentuated_count: int = 0
116
+
117
+ def eligible(self, character: str) -> bool:
118
+ return character.isalpha()
119
+
120
+ def feed(self, character: str) -> None:
121
+ self._character_count += 1
122
+
123
+ if is_accentuated(character):
124
+ self._accentuated_count += 1
125
+
126
+ def reset(self) -> None: # pragma: no cover
127
+ self._character_count = 0
128
+ self._accentuated_count = 0
129
+
130
+ @property
131
+ def ratio(self) -> float:
132
+ if self._character_count < 8:
133
+ return 0.0
134
+
135
+ ratio_of_accentuation: float = self._accentuated_count / self._character_count
136
+ return ratio_of_accentuation if ratio_of_accentuation >= 0.35 else 0.0
137
+
138
+
139
+ class UnprintablePlugin(MessDetectorPlugin):
140
+ def __init__(self) -> None:
141
+ self._unprintable_count: int = 0
142
+ self._character_count: int = 0
143
+
144
+ def eligible(self, character: str) -> bool:
145
+ return True
146
+
147
+ def feed(self, character: str) -> None:
148
+ if is_unprintable(character):
149
+ self._unprintable_count += 1
150
+ self._character_count += 1
151
+
152
+ def reset(self) -> None: # pragma: no cover
153
+ self._unprintable_count = 0
154
+
155
+ @property
156
+ def ratio(self) -> float:
157
+ if self._character_count == 0:
158
+ return 0.0
159
+
160
+ return (self._unprintable_count * 8) / self._character_count
161
+
162
+
163
+ class SuspiciousDuplicateAccentPlugin(MessDetectorPlugin):
164
+ def __init__(self) -> None:
165
+ self._successive_count: int = 0
166
+ self._character_count: int = 0
167
+
168
+ self._last_latin_character: Optional[str] = None
169
+
170
+ def eligible(self, character: str) -> bool:
171
+ return character.isalpha() and is_latin(character)
172
+
173
+ def feed(self, character: str) -> None:
174
+ self._character_count += 1
175
+ if (
176
+ self._last_latin_character is not None
177
+ and is_accentuated(character)
178
+ and is_accentuated(self._last_latin_character)
179
+ ):
180
+ if character.isupper() and self._last_latin_character.isupper():
181
+ self._successive_count += 1
182
+ # Worse if its the same char duplicated with different accent.
183
+ if remove_accent(character) == remove_accent(self._last_latin_character):
184
+ self._successive_count += 1
185
+ self._last_latin_character = character
186
+
187
+ def reset(self) -> None: # pragma: no cover
188
+ self._successive_count = 0
189
+ self._character_count = 0
190
+ self._last_latin_character = None
191
+
192
+ @property
193
+ def ratio(self) -> float:
194
+ if self._character_count == 0:
195
+ return 0.0
196
+
197
+ return (self._successive_count * 2) / self._character_count
198
+
199
+
200
+ class SuspiciousRange(MessDetectorPlugin):
201
+ def __init__(self) -> None:
202
+ self._suspicious_successive_range_count: int = 0
203
+ self._character_count: int = 0
204
+ self._last_printable_seen: Optional[str] = None
205
+
206
+ def eligible(self, character: str) -> bool:
207
+ return character.isprintable()
208
+
209
+ def feed(self, character: str) -> None:
210
+ self._character_count += 1
211
+
212
+ if (
213
+ character.isspace()
214
+ or is_punctuation(character)
215
+ or character in COMMON_SAFE_ASCII_CHARACTERS
216
+ ):
217
+ self._last_printable_seen = None
218
+ return
219
+
220
+ if self._last_printable_seen is None:
221
+ self._last_printable_seen = character
222
+ return
223
+
224
+ unicode_range_a: Optional[str] = unicode_range(self._last_printable_seen)
225
+ unicode_range_b: Optional[str] = unicode_range(character)
226
+
227
+ if is_suspiciously_successive_range(unicode_range_a, unicode_range_b):
228
+ self._suspicious_successive_range_count += 1
229
+
230
+ self._last_printable_seen = character
231
+
232
+ def reset(self) -> None: # pragma: no cover
233
+ self._character_count = 0
234
+ self._suspicious_successive_range_count = 0
235
+ self._last_printable_seen = None
236
+
237
+ @property
238
+ def ratio(self) -> float:
239
+ if self._character_count <= 24:
240
+ return 0.0
241
+
242
+ ratio_of_suspicious_range_usage: float = (
243
+ self._suspicious_successive_range_count * 2
244
+ ) / self._character_count
245
+
246
+ return ratio_of_suspicious_range_usage
247
+
248
+
249
+ class SuperWeirdWordPlugin(MessDetectorPlugin):
250
+ def __init__(self) -> None:
251
+ self._word_count: int = 0
252
+ self._bad_word_count: int = 0
253
+ self._foreign_long_count: int = 0
254
+
255
+ self._is_current_word_bad: bool = False
256
+ self._foreign_long_watch: bool = False
257
+
258
+ self._character_count: int = 0
259
+ self._bad_character_count: int = 0
260
+
261
+ self._buffer: str = ""
262
+ self._buffer_accent_count: int = 0
263
+
264
+ def eligible(self, character: str) -> bool:
265
+ return True
266
+
267
+ def feed(self, character: str) -> None:
268
+ if character.isalpha():
269
+ self._buffer += character
270
+ if is_accentuated(character):
271
+ self._buffer_accent_count += 1
272
+ if (
273
+ self._foreign_long_watch is False
274
+ and (is_latin(character) is False or is_accentuated(character))
275
+ and is_cjk(character) is False
276
+ and is_hangul(character) is False
277
+ and is_katakana(character) is False
278
+ and is_hiragana(character) is False
279
+ and is_thai(character) is False
280
+ ):
281
+ self._foreign_long_watch = True
282
+ return
283
+ if not self._buffer:
284
+ return
285
+ if (
286
+ character.isspace() or is_punctuation(character) or is_separator(character)
287
+ ) and self._buffer:
288
+ self._word_count += 1
289
+ buffer_length: int = len(self._buffer)
290
+
291
+ self._character_count += buffer_length
292
+
293
+ if buffer_length >= 4:
294
+ if self._buffer_accent_count / buffer_length > 0.34:
295
+ self._is_current_word_bad = True
296
+ # Word/Buffer ending with an upper case accentuated letter are so rare,
297
+ # that we will consider them all as suspicious. Same weight as foreign_long suspicious.
298
+ if (
299
+ is_accentuated(self._buffer[-1])
300
+ and self._buffer[-1].isupper()
301
+ and all(_.isupper() for _ in self._buffer) is False
302
+ ):
303
+ self._foreign_long_count += 1
304
+ self._is_current_word_bad = True
305
+ if buffer_length >= 24 and self._foreign_long_watch:
306
+ camel_case_dst = [
307
+ i
308
+ for c, i in zip(self._buffer, range(0, buffer_length))
309
+ if c.isupper()
310
+ ]
311
+ probable_camel_cased: bool = False
312
+
313
+ if camel_case_dst and (len(camel_case_dst) / buffer_length <= 0.3):
314
+ probable_camel_cased = True
315
+
316
+ if not probable_camel_cased:
317
+ self._foreign_long_count += 1
318
+ self._is_current_word_bad = True
319
+
320
+ if self._is_current_word_bad:
321
+ self._bad_word_count += 1
322
+ self._bad_character_count += len(self._buffer)
323
+ self._is_current_word_bad = False
324
+
325
+ self._foreign_long_watch = False
326
+ self._buffer = ""
327
+ self._buffer_accent_count = 0
328
+ elif (
329
+ character not in {"<", ">", "-", "=", "~", "|", "_"}
330
+ and character.isdigit() is False
331
+ and is_symbol(character)
332
+ ):
333
+ self._is_current_word_bad = True
334
+ self._buffer += character
335
+
336
+ def reset(self) -> None: # pragma: no cover
337
+ self._buffer = ""
338
+ self._is_current_word_bad = False
339
+ self._foreign_long_watch = False
340
+ self._bad_word_count = 0
341
+ self._word_count = 0
342
+ self._character_count = 0
343
+ self._bad_character_count = 0
344
+ self._foreign_long_count = 0
345
+
346
+ @property
347
+ def ratio(self) -> float:
348
+ if self._word_count <= 10 and self._foreign_long_count == 0:
349
+ return 0.0
350
+
351
+ return self._bad_character_count / self._character_count
352
+
353
+
354
+ class CjkInvalidStopPlugin(MessDetectorPlugin):
355
+ """
356
+ GB(Chinese) based encoding often render the stop incorrectly when the content does not fit and
357
+ can be easily detected. Searching for the overuse of '丅' and '丄'.
358
+ """
359
+
360
+ def __init__(self) -> None:
361
+ self._wrong_stop_count: int = 0
362
+ self._cjk_character_count: int = 0
363
+
364
+ def eligible(self, character: str) -> bool:
365
+ return True
366
+
367
+ def feed(self, character: str) -> None:
368
+ if character in {"丅", "丄"}:
369
+ self._wrong_stop_count += 1
370
+ return
371
+ if is_cjk(character):
372
+ self._cjk_character_count += 1
373
+
374
+ def reset(self) -> None: # pragma: no cover
375
+ self._wrong_stop_count = 0
376
+ self._cjk_character_count = 0
377
+
378
+ @property
379
+ def ratio(self) -> float:
380
+ if self._cjk_character_count < 16:
381
+ return 0.0
382
+ return self._wrong_stop_count / self._cjk_character_count
383
+
384
+
385
+ class ArchaicUpperLowerPlugin(MessDetectorPlugin):
386
+ def __init__(self) -> None:
387
+ self._buf: bool = False
388
+
389
+ self._character_count_since_last_sep: int = 0
390
+
391
+ self._successive_upper_lower_count: int = 0
392
+ self._successive_upper_lower_count_final: int = 0
393
+
394
+ self._character_count: int = 0
395
+
396
+ self._last_alpha_seen: Optional[str] = None
397
+ self._current_ascii_only: bool = True
398
+
399
+ def eligible(self, character: str) -> bool:
400
+ return True
401
+
402
+ def feed(self, character: str) -> None:
403
+ is_concerned = character.isalpha() and is_case_variable(character)
404
+ chunk_sep = is_concerned is False
405
+
406
+ if chunk_sep and self._character_count_since_last_sep > 0:
407
+ if (
408
+ self._character_count_since_last_sep <= 64
409
+ and character.isdigit() is False
410
+ and self._current_ascii_only is False
411
+ ):
412
+ self._successive_upper_lower_count_final += (
413
+ self._successive_upper_lower_count
414
+ )
415
+
416
+ self._successive_upper_lower_count = 0
417
+ self._character_count_since_last_sep = 0
418
+ self._last_alpha_seen = None
419
+ self._buf = False
420
+ self._character_count += 1
421
+ self._current_ascii_only = True
422
+
423
+ return
424
+
425
+ if self._current_ascii_only is True and character.isascii() is False:
426
+ self._current_ascii_only = False
427
+
428
+ if self._last_alpha_seen is not None:
429
+ if (character.isupper() and self._last_alpha_seen.islower()) or (
430
+ character.islower() and self._last_alpha_seen.isupper()
431
+ ):
432
+ if self._buf is True:
433
+ self._successive_upper_lower_count += 2
434
+ self._buf = False
435
+ else:
436
+ self._buf = True
437
+ else:
438
+ self._buf = False
439
+
440
+ self._character_count += 1
441
+ self._character_count_since_last_sep += 1
442
+ self._last_alpha_seen = character
443
+
444
+ def reset(self) -> None: # pragma: no cover
445
+ self._character_count = 0
446
+ self._character_count_since_last_sep = 0
447
+ self._successive_upper_lower_count = 0
448
+ self._successive_upper_lower_count_final = 0
449
+ self._last_alpha_seen = None
450
+ self._buf = False
451
+ self._current_ascii_only = True
452
+
453
+ @property
454
+ def ratio(self) -> float:
455
+ if self._character_count == 0:
456
+ return 0.0
457
+
458
+ return self._successive_upper_lower_count_final / self._character_count
459
+
460
+
461
+ class ArabicIsolatedFormPlugin(MessDetectorPlugin):
462
+ def __init__(self) -> None:
463
+ self._character_count: int = 0
464
+ self._isolated_form_count: int = 0
465
+
466
+ def reset(self) -> None: # pragma: no cover
467
+ self._character_count = 0
468
+ self._isolated_form_count = 0
469
+
470
+ def eligible(self, character: str) -> bool:
471
+ return is_arabic(character)
472
+
473
+ def feed(self, character: str) -> None:
474
+ self._character_count += 1
475
+
476
+ if is_arabic_isolated_form(character):
477
+ self._isolated_form_count += 1
478
+
479
+ @property
480
+ def ratio(self) -> float:
481
+ if self._character_count < 8:
482
+ return 0.0
483
+
484
+ isolated_form_usage: float = self._isolated_form_count / self._character_count
485
+
486
+ return isolated_form_usage
487
+
488
+
489
+ @lru_cache(maxsize=1024)
490
+ def is_suspiciously_successive_range(
491
+ unicode_range_a: Optional[str], unicode_range_b: Optional[str]
492
+ ) -> bool:
493
+ """
494
+ Determine if two Unicode range seen next to each other can be considered as suspicious.
495
+ """
496
+ if unicode_range_a is None or unicode_range_b is None:
497
+ return True
498
+
499
+ if unicode_range_a == unicode_range_b:
500
+ return False
501
+
502
+ if "Latin" in unicode_range_a and "Latin" in unicode_range_b:
503
+ return False
504
+
505
+ if "Emoticons" in unicode_range_a or "Emoticons" in unicode_range_b:
506
+ return False
507
+
508
+ # Latin characters can be accompanied with a combining diacritical mark
509
+ # eg. Vietnamese.
510
+ if ("Latin" in unicode_range_a or "Latin" in unicode_range_b) and (
511
+ "Combining" in unicode_range_a or "Combining" in unicode_range_b
512
+ ):
513
+ return False
514
+
515
+ keywords_range_a, keywords_range_b = unicode_range_a.split(
516
+ " "
517
+ ), unicode_range_b.split(" ")
518
+
519
+ for el in keywords_range_a:
520
+ if el in UNICODE_SECONDARY_RANGE_KEYWORD:
521
+ continue
522
+ if el in keywords_range_b:
523
+ return False
524
+
525
+ # Japanese Exception
526
+ range_a_jp_chars, range_b_jp_chars = (
527
+ unicode_range_a
528
+ in (
529
+ "Hiragana",
530
+ "Katakana",
531
+ ),
532
+ unicode_range_b in ("Hiragana", "Katakana"),
533
+ )
534
+ if (range_a_jp_chars or range_b_jp_chars) and (
535
+ "CJK" in unicode_range_a or "CJK" in unicode_range_b
536
+ ):
537
+ return False
538
+ if range_a_jp_chars and range_b_jp_chars:
539
+ return False
540
+
541
+ if "Hangul" in unicode_range_a or "Hangul" in unicode_range_b:
542
+ if "CJK" in unicode_range_a or "CJK" in unicode_range_b:
543
+ return False
544
+ if unicode_range_a == "Basic Latin" or unicode_range_b == "Basic Latin":
545
+ return False
546
+
547
+ # Chinese/Japanese use dedicated range for punctuation and/or separators.
548
+ if ("CJK" in unicode_range_a or "CJK" in unicode_range_b) or (
549
+ unicode_range_a in ["Katakana", "Hiragana"]
550
+ and unicode_range_b in ["Katakana", "Hiragana"]
551
+ ):
552
+ if "Punctuation" in unicode_range_a or "Punctuation" in unicode_range_b:
553
+ return False
554
+ if "Forms" in unicode_range_a or "Forms" in unicode_range_b:
555
+ return False
556
+ if unicode_range_a == "Basic Latin" or unicode_range_b == "Basic Latin":
557
+ return False
558
+
559
+ return True
560
+
561
+
562
+ @lru_cache(maxsize=2048)
563
+ def mess_ratio(
564
+ decoded_sequence: str, maximum_threshold: float = 0.2, debug: bool = False
565
+ ) -> float:
566
+ """
567
+ Compute a mess ratio given a decoded bytes sequence. The maximum threshold does stop the computation earlier.
568
+ """
569
+
570
+ detectors: List[MessDetectorPlugin] = [
571
+ md_class() for md_class in MessDetectorPlugin.__subclasses__()
572
+ ]
573
+
574
+ length: int = len(decoded_sequence) + 1
575
+
576
+ mean_mess_ratio: float = 0.0
577
+
578
+ if length < 512:
579
+ intermediary_mean_mess_ratio_calc: int = 32
580
+ elif length <= 1024:
581
+ intermediary_mean_mess_ratio_calc = 64
582
+ else:
583
+ intermediary_mean_mess_ratio_calc = 128
584
+
585
+ for character, index in zip(decoded_sequence + "\n", range(length)):
586
+ for detector in detectors:
587
+ if detector.eligible(character):
588
+ detector.feed(character)
589
+
590
+ if (
591
+ index > 0 and index % intermediary_mean_mess_ratio_calc == 0
592
+ ) or index == length - 1:
593
+ mean_mess_ratio = sum(dt.ratio for dt in detectors)
594
+
595
+ if mean_mess_ratio >= maximum_threshold:
596
+ break
597
+
598
+ if debug:
599
+ logger = getLogger("charset_normalizer")
600
+
601
+ logger.log(
602
+ TRACE,
603
+ "Mess-detector extended-analysis start. "
604
+ f"intermediary_mean_mess_ratio_calc={intermediary_mean_mess_ratio_calc} mean_mess_ratio={mean_mess_ratio} "
605
+ f"maximum_threshold={maximum_threshold}",
606
+ )
607
+
608
+ if len(decoded_sequence) > 16:
609
+ logger.log(TRACE, f"Starting with: {decoded_sequence[:16]}")
610
+ logger.log(TRACE, f"Ending with: {decoded_sequence[-16::]}")
611
+
612
+ for dt in detectors: # pragma: nocover
613
+ logger.log(TRACE, f"{dt.__class__}: {dt.ratio}")
614
+
615
+ return round(mean_mess_ratio, 3)
llmeval-env/lib/python3.10/site-packages/charset_normalizer/md__mypyc.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (268 kB). View file
 
llmeval-env/lib/python3.10/site-packages/charset_normalizer/models.py ADDED
@@ -0,0 +1,340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from encodings.aliases import aliases
2
+ from hashlib import sha256
3
+ from json import dumps
4
+ from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
5
+
6
+ from .constant import TOO_BIG_SEQUENCE
7
+ from .utils import iana_name, is_multi_byte_encoding, unicode_range
8
+
9
+
10
+ class CharsetMatch:
11
+ def __init__(
12
+ self,
13
+ payload: bytes,
14
+ guessed_encoding: str,
15
+ mean_mess_ratio: float,
16
+ has_sig_or_bom: bool,
17
+ languages: "CoherenceMatches",
18
+ decoded_payload: Optional[str] = None,
19
+ ):
20
+ self._payload: bytes = payload
21
+
22
+ self._encoding: str = guessed_encoding
23
+ self._mean_mess_ratio: float = mean_mess_ratio
24
+ self._languages: CoherenceMatches = languages
25
+ self._has_sig_or_bom: bool = has_sig_or_bom
26
+ self._unicode_ranges: Optional[List[str]] = None
27
+
28
+ self._leaves: List[CharsetMatch] = []
29
+ self._mean_coherence_ratio: float = 0.0
30
+
31
+ self._output_payload: Optional[bytes] = None
32
+ self._output_encoding: Optional[str] = None
33
+
34
+ self._string: Optional[str] = decoded_payload
35
+
36
+ def __eq__(self, other: object) -> bool:
37
+ if not isinstance(other, CharsetMatch):
38
+ raise TypeError(
39
+ "__eq__ cannot be invoked on {} and {}.".format(
40
+ str(other.__class__), str(self.__class__)
41
+ )
42
+ )
43
+ return self.encoding == other.encoding and self.fingerprint == other.fingerprint
44
+
45
+ def __lt__(self, other: object) -> bool:
46
+ """
47
+ Implemented to make sorted available upon CharsetMatches items.
48
+ """
49
+ if not isinstance(other, CharsetMatch):
50
+ raise ValueError
51
+
52
+ chaos_difference: float = abs(self.chaos - other.chaos)
53
+ coherence_difference: float = abs(self.coherence - other.coherence)
54
+
55
+ # Below 1% difference --> Use Coherence
56
+ if chaos_difference < 0.01 and coherence_difference > 0.02:
57
+ return self.coherence > other.coherence
58
+ elif chaos_difference < 0.01 and coherence_difference <= 0.02:
59
+ # When having a difficult decision, use the result that decoded as many multi-byte as possible.
60
+ # preserve RAM usage!
61
+ if len(self._payload) >= TOO_BIG_SEQUENCE:
62
+ return self.chaos < other.chaos
63
+ return self.multi_byte_usage > other.multi_byte_usage
64
+
65
+ return self.chaos < other.chaos
66
+
67
+ @property
68
+ def multi_byte_usage(self) -> float:
69
+ return 1.0 - (len(str(self)) / len(self.raw))
70
+
71
+ def __str__(self) -> str:
72
+ # Lazy Str Loading
73
+ if self._string is None:
74
+ self._string = str(self._payload, self._encoding, "strict")
75
+ return self._string
76
+
77
+ def __repr__(self) -> str:
78
+ return "<CharsetMatch '{}' bytes({})>".format(self.encoding, self.fingerprint)
79
+
80
+ def add_submatch(self, other: "CharsetMatch") -> None:
81
+ if not isinstance(other, CharsetMatch) or other == self:
82
+ raise ValueError(
83
+ "Unable to add instance <{}> as a submatch of a CharsetMatch".format(
84
+ other.__class__
85
+ )
86
+ )
87
+
88
+ other._string = None # Unload RAM usage; dirty trick.
89
+ self._leaves.append(other)
90
+
91
+ @property
92
+ def encoding(self) -> str:
93
+ return self._encoding
94
+
95
+ @property
96
+ def encoding_aliases(self) -> List[str]:
97
+ """
98
+ Encoding name are known by many name, using this could help when searching for IBM855 when it's listed as CP855.
99
+ """
100
+ also_known_as: List[str] = []
101
+ for u, p in aliases.items():
102
+ if self.encoding == u:
103
+ also_known_as.append(p)
104
+ elif self.encoding == p:
105
+ also_known_as.append(u)
106
+ return also_known_as
107
+
108
+ @property
109
+ def bom(self) -> bool:
110
+ return self._has_sig_or_bom
111
+
112
+ @property
113
+ def byte_order_mark(self) -> bool:
114
+ return self._has_sig_or_bom
115
+
116
+ @property
117
+ def languages(self) -> List[str]:
118
+ """
119
+ Return the complete list of possible languages found in decoded sequence.
120
+ Usually not really useful. Returned list may be empty even if 'language' property return something != 'Unknown'.
121
+ """
122
+ return [e[0] for e in self._languages]
123
+
124
+ @property
125
+ def language(self) -> str:
126
+ """
127
+ Most probable language found in decoded sequence. If none were detected or inferred, the property will return
128
+ "Unknown".
129
+ """
130
+ if not self._languages:
131
+ # Trying to infer the language based on the given encoding
132
+ # Its either English or we should not pronounce ourselves in certain cases.
133
+ if "ascii" in self.could_be_from_charset:
134
+ return "English"
135
+
136
+ # doing it there to avoid circular import
137
+ from charset_normalizer.cd import encoding_languages, mb_encoding_languages
138
+
139
+ languages = (
140
+ mb_encoding_languages(self.encoding)
141
+ if is_multi_byte_encoding(self.encoding)
142
+ else encoding_languages(self.encoding)
143
+ )
144
+
145
+ if len(languages) == 0 or "Latin Based" in languages:
146
+ return "Unknown"
147
+
148
+ return languages[0]
149
+
150
+ return self._languages[0][0]
151
+
152
+ @property
153
+ def chaos(self) -> float:
154
+ return self._mean_mess_ratio
155
+
156
+ @property
157
+ def coherence(self) -> float:
158
+ if not self._languages:
159
+ return 0.0
160
+ return self._languages[0][1]
161
+
162
+ @property
163
+ def percent_chaos(self) -> float:
164
+ return round(self.chaos * 100, ndigits=3)
165
+
166
+ @property
167
+ def percent_coherence(self) -> float:
168
+ return round(self.coherence * 100, ndigits=3)
169
+
170
+ @property
171
+ def raw(self) -> bytes:
172
+ """
173
+ Original untouched bytes.
174
+ """
175
+ return self._payload
176
+
177
+ @property
178
+ def submatch(self) -> List["CharsetMatch"]:
179
+ return self._leaves
180
+
181
+ @property
182
+ def has_submatch(self) -> bool:
183
+ return len(self._leaves) > 0
184
+
185
+ @property
186
+ def alphabets(self) -> List[str]:
187
+ if self._unicode_ranges is not None:
188
+ return self._unicode_ranges
189
+ # list detected ranges
190
+ detected_ranges: List[Optional[str]] = [
191
+ unicode_range(char) for char in str(self)
192
+ ]
193
+ # filter and sort
194
+ self._unicode_ranges = sorted(list({r for r in detected_ranges if r}))
195
+ return self._unicode_ranges
196
+
197
+ @property
198
+ def could_be_from_charset(self) -> List[str]:
199
+ """
200
+ The complete list of encoding that output the exact SAME str result and therefore could be the originating
201
+ encoding.
202
+ This list does include the encoding available in property 'encoding'.
203
+ """
204
+ return [self._encoding] + [m.encoding for m in self._leaves]
205
+
206
+ def output(self, encoding: str = "utf_8") -> bytes:
207
+ """
208
+ Method to get re-encoded bytes payload using given target encoding. Default to UTF-8.
209
+ Any errors will be simply ignored by the encoder NOT replaced.
210
+ """
211
+ if self._output_encoding is None or self._output_encoding != encoding:
212
+ self._output_encoding = encoding
213
+ self._output_payload = str(self).encode(encoding, "replace")
214
+
215
+ return self._output_payload # type: ignore
216
+
217
+ @property
218
+ def fingerprint(self) -> str:
219
+ """
220
+ Retrieve the unique SHA256 computed using the transformed (re-encoded) payload. Not the original one.
221
+ """
222
+ return sha256(self.output()).hexdigest()
223
+
224
+
225
+ class CharsetMatches:
226
+ """
227
+ Container with every CharsetMatch items ordered by default from most probable to the less one.
228
+ Act like a list(iterable) but does not implements all related methods.
229
+ """
230
+
231
+ def __init__(self, results: Optional[List[CharsetMatch]] = None):
232
+ self._results: List[CharsetMatch] = sorted(results) if results else []
233
+
234
+ def __iter__(self) -> Iterator[CharsetMatch]:
235
+ yield from self._results
236
+
237
+ def __getitem__(self, item: Union[int, str]) -> CharsetMatch:
238
+ """
239
+ Retrieve a single item either by its position or encoding name (alias may be used here).
240
+ Raise KeyError upon invalid index or encoding not present in results.
241
+ """
242
+ if isinstance(item, int):
243
+ return self._results[item]
244
+ if isinstance(item, str):
245
+ item = iana_name(item, False)
246
+ for result in self._results:
247
+ if item in result.could_be_from_charset:
248
+ return result
249
+ raise KeyError
250
+
251
+ def __len__(self) -> int:
252
+ return len(self._results)
253
+
254
+ def __bool__(self) -> bool:
255
+ return len(self._results) > 0
256
+
257
+ def append(self, item: CharsetMatch) -> None:
258
+ """
259
+ Insert a single match. Will be inserted accordingly to preserve sort.
260
+ Can be inserted as a submatch.
261
+ """
262
+ if not isinstance(item, CharsetMatch):
263
+ raise ValueError(
264
+ "Cannot append instance '{}' to CharsetMatches".format(
265
+ str(item.__class__)
266
+ )
267
+ )
268
+ # We should disable the submatch factoring when the input file is too heavy (conserve RAM usage)
269
+ if len(item.raw) <= TOO_BIG_SEQUENCE:
270
+ for match in self._results:
271
+ if match.fingerprint == item.fingerprint and match.chaos == item.chaos:
272
+ match.add_submatch(item)
273
+ return
274
+ self._results.append(item)
275
+ self._results = sorted(self._results)
276
+
277
+ def best(self) -> Optional["CharsetMatch"]:
278
+ """
279
+ Simply return the first match. Strict equivalent to matches[0].
280
+ """
281
+ if not self._results:
282
+ return None
283
+ return self._results[0]
284
+
285
+ def first(self) -> Optional["CharsetMatch"]:
286
+ """
287
+ Redundant method, call the method best(). Kept for BC reasons.
288
+ """
289
+ return self.best()
290
+
291
+
292
+ CoherenceMatch = Tuple[str, float]
293
+ CoherenceMatches = List[CoherenceMatch]
294
+
295
+
296
+ class CliDetectionResult:
297
+ def __init__(
298
+ self,
299
+ path: str,
300
+ encoding: Optional[str],
301
+ encoding_aliases: List[str],
302
+ alternative_encodings: List[str],
303
+ language: str,
304
+ alphabets: List[str],
305
+ has_sig_or_bom: bool,
306
+ chaos: float,
307
+ coherence: float,
308
+ unicode_path: Optional[str],
309
+ is_preferred: bool,
310
+ ):
311
+ self.path: str = path
312
+ self.unicode_path: Optional[str] = unicode_path
313
+ self.encoding: Optional[str] = encoding
314
+ self.encoding_aliases: List[str] = encoding_aliases
315
+ self.alternative_encodings: List[str] = alternative_encodings
316
+ self.language: str = language
317
+ self.alphabets: List[str] = alphabets
318
+ self.has_sig_or_bom: bool = has_sig_or_bom
319
+ self.chaos: float = chaos
320
+ self.coherence: float = coherence
321
+ self.is_preferred: bool = is_preferred
322
+
323
+ @property
324
+ def __dict__(self) -> Dict[str, Any]: # type: ignore
325
+ return {
326
+ "path": self.path,
327
+ "encoding": self.encoding,
328
+ "encoding_aliases": self.encoding_aliases,
329
+ "alternative_encodings": self.alternative_encodings,
330
+ "language": self.language,
331
+ "alphabets": self.alphabets,
332
+ "has_sig_or_bom": self.has_sig_or_bom,
333
+ "chaos": self.chaos,
334
+ "coherence": self.coherence,
335
+ "unicode_path": self.unicode_path,
336
+ "is_preferred": self.is_preferred,
337
+ }
338
+
339
+ def to_json(self) -> str:
340
+ return dumps(self.__dict__, ensure_ascii=True, indent=4)
llmeval-env/lib/python3.10/site-packages/charset_normalizer/py.typed ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/charset_normalizer/utils.py ADDED
@@ -0,0 +1,421 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ import logging
3
+ import unicodedata
4
+ from codecs import IncrementalDecoder
5
+ from encodings.aliases import aliases
6
+ from functools import lru_cache
7
+ from re import findall
8
+ from typing import Generator, List, Optional, Set, Tuple, Union
9
+
10
+ from _multibytecodec import MultibyteIncrementalDecoder
11
+
12
+ from .constant import (
13
+ ENCODING_MARKS,
14
+ IANA_SUPPORTED_SIMILAR,
15
+ RE_POSSIBLE_ENCODING_INDICATION,
16
+ UNICODE_RANGES_COMBINED,
17
+ UNICODE_SECONDARY_RANGE_KEYWORD,
18
+ UTF8_MAXIMAL_ALLOCATION,
19
+ )
20
+
21
+
22
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
23
+ def is_accentuated(character: str) -> bool:
24
+ try:
25
+ description: str = unicodedata.name(character)
26
+ except ValueError:
27
+ return False
28
+ return (
29
+ "WITH GRAVE" in description
30
+ or "WITH ACUTE" in description
31
+ or "WITH CEDILLA" in description
32
+ or "WITH DIAERESIS" in description
33
+ or "WITH CIRCUMFLEX" in description
34
+ or "WITH TILDE" in description
35
+ or "WITH MACRON" in description
36
+ or "WITH RING ABOVE" in description
37
+ )
38
+
39
+
40
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
41
+ def remove_accent(character: str) -> str:
42
+ decomposed: str = unicodedata.decomposition(character)
43
+ if not decomposed:
44
+ return character
45
+
46
+ codes: List[str] = decomposed.split(" ")
47
+
48
+ return chr(int(codes[0], 16))
49
+
50
+
51
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
52
+ def unicode_range(character: str) -> Optional[str]:
53
+ """
54
+ Retrieve the Unicode range official name from a single character.
55
+ """
56
+ character_ord: int = ord(character)
57
+
58
+ for range_name, ord_range in UNICODE_RANGES_COMBINED.items():
59
+ if character_ord in ord_range:
60
+ return range_name
61
+
62
+ return None
63
+
64
+
65
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
66
+ def is_latin(character: str) -> bool:
67
+ try:
68
+ description: str = unicodedata.name(character)
69
+ except ValueError:
70
+ return False
71
+ return "LATIN" in description
72
+
73
+
74
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
75
+ def is_punctuation(character: str) -> bool:
76
+ character_category: str = unicodedata.category(character)
77
+
78
+ if "P" in character_category:
79
+ return True
80
+
81
+ character_range: Optional[str] = unicode_range(character)
82
+
83
+ if character_range is None:
84
+ return False
85
+
86
+ return "Punctuation" in character_range
87
+
88
+
89
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
90
+ def is_symbol(character: str) -> bool:
91
+ character_category: str = unicodedata.category(character)
92
+
93
+ if "S" in character_category or "N" in character_category:
94
+ return True
95
+
96
+ character_range: Optional[str] = unicode_range(character)
97
+
98
+ if character_range is None:
99
+ return False
100
+
101
+ return "Forms" in character_range and character_category != "Lo"
102
+
103
+
104
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
105
+ def is_emoticon(character: str) -> bool:
106
+ character_range: Optional[str] = unicode_range(character)
107
+
108
+ if character_range is None:
109
+ return False
110
+
111
+ return "Emoticons" in character_range or "Pictographs" in character_range
112
+
113
+
114
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
115
+ def is_separator(character: str) -> bool:
116
+ if character.isspace() or character in {"|", "+", "<", ">"}:
117
+ return True
118
+
119
+ character_category: str = unicodedata.category(character)
120
+
121
+ return "Z" in character_category or character_category in {"Po", "Pd", "Pc"}
122
+
123
+
124
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
125
+ def is_case_variable(character: str) -> bool:
126
+ return character.islower() != character.isupper()
127
+
128
+
129
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
130
+ def is_cjk(character: str) -> bool:
131
+ try:
132
+ character_name = unicodedata.name(character)
133
+ except ValueError:
134
+ return False
135
+
136
+ return "CJK" in character_name
137
+
138
+
139
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
140
+ def is_hiragana(character: str) -> bool:
141
+ try:
142
+ character_name = unicodedata.name(character)
143
+ except ValueError:
144
+ return False
145
+
146
+ return "HIRAGANA" in character_name
147
+
148
+
149
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
150
+ def is_katakana(character: str) -> bool:
151
+ try:
152
+ character_name = unicodedata.name(character)
153
+ except ValueError:
154
+ return False
155
+
156
+ return "KATAKANA" in character_name
157
+
158
+
159
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
160
+ def is_hangul(character: str) -> bool:
161
+ try:
162
+ character_name = unicodedata.name(character)
163
+ except ValueError:
164
+ return False
165
+
166
+ return "HANGUL" in character_name
167
+
168
+
169
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
170
+ def is_thai(character: str) -> bool:
171
+ try:
172
+ character_name = unicodedata.name(character)
173
+ except ValueError:
174
+ return False
175
+
176
+ return "THAI" in character_name
177
+
178
+
179
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
180
+ def is_arabic(character: str) -> bool:
181
+ try:
182
+ character_name = unicodedata.name(character)
183
+ except ValueError:
184
+ return False
185
+
186
+ return "ARABIC" in character_name
187
+
188
+
189
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
190
+ def is_arabic_isolated_form(character: str) -> bool:
191
+ try:
192
+ character_name = unicodedata.name(character)
193
+ except ValueError:
194
+ return False
195
+
196
+ return "ARABIC" in character_name and "ISOLATED FORM" in character_name
197
+
198
+
199
+ @lru_cache(maxsize=len(UNICODE_RANGES_COMBINED))
200
+ def is_unicode_range_secondary(range_name: str) -> bool:
201
+ return any(keyword in range_name for keyword in UNICODE_SECONDARY_RANGE_KEYWORD)
202
+
203
+
204
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
205
+ def is_unprintable(character: str) -> bool:
206
+ return (
207
+ character.isspace() is False # includes \n \t \r \v
208
+ and character.isprintable() is False
209
+ and character != "\x1A" # Why? Its the ASCII substitute character.
210
+ and character != "\ufeff" # bug discovered in Python,
211
+ # Zero Width No-Break Space located in Arabic Presentation Forms-B, Unicode 1.1 not acknowledged as space.
212
+ )
213
+
214
+
215
+ def any_specified_encoding(sequence: bytes, search_zone: int = 8192) -> Optional[str]:
216
+ """
217
+ Extract using ASCII-only decoder any specified encoding in the first n-bytes.
218
+ """
219
+ if not isinstance(sequence, bytes):
220
+ raise TypeError
221
+
222
+ seq_len: int = len(sequence)
223
+
224
+ results: List[str] = findall(
225
+ RE_POSSIBLE_ENCODING_INDICATION,
226
+ sequence[: min(seq_len, search_zone)].decode("ascii", errors="ignore"),
227
+ )
228
+
229
+ if len(results) == 0:
230
+ return None
231
+
232
+ for specified_encoding in results:
233
+ specified_encoding = specified_encoding.lower().replace("-", "_")
234
+
235
+ encoding_alias: str
236
+ encoding_iana: str
237
+
238
+ for encoding_alias, encoding_iana in aliases.items():
239
+ if encoding_alias == specified_encoding:
240
+ return encoding_iana
241
+ if encoding_iana == specified_encoding:
242
+ return encoding_iana
243
+
244
+ return None
245
+
246
+
247
+ @lru_cache(maxsize=128)
248
+ def is_multi_byte_encoding(name: str) -> bool:
249
+ """
250
+ Verify is a specific encoding is a multi byte one based on it IANA name
251
+ """
252
+ return name in {
253
+ "utf_8",
254
+ "utf_8_sig",
255
+ "utf_16",
256
+ "utf_16_be",
257
+ "utf_16_le",
258
+ "utf_32",
259
+ "utf_32_le",
260
+ "utf_32_be",
261
+ "utf_7",
262
+ } or issubclass(
263
+ importlib.import_module("encodings.{}".format(name)).IncrementalDecoder,
264
+ MultibyteIncrementalDecoder,
265
+ )
266
+
267
+
268
+ def identify_sig_or_bom(sequence: bytes) -> Tuple[Optional[str], bytes]:
269
+ """
270
+ Identify and extract SIG/BOM in given sequence.
271
+ """
272
+
273
+ for iana_encoding in ENCODING_MARKS:
274
+ marks: Union[bytes, List[bytes]] = ENCODING_MARKS[iana_encoding]
275
+
276
+ if isinstance(marks, bytes):
277
+ marks = [marks]
278
+
279
+ for mark in marks:
280
+ if sequence.startswith(mark):
281
+ return iana_encoding, mark
282
+
283
+ return None, b""
284
+
285
+
286
+ def should_strip_sig_or_bom(iana_encoding: str) -> bool:
287
+ return iana_encoding not in {"utf_16", "utf_32"}
288
+
289
+
290
+ def iana_name(cp_name: str, strict: bool = True) -> str:
291
+ cp_name = cp_name.lower().replace("-", "_")
292
+
293
+ encoding_alias: str
294
+ encoding_iana: str
295
+
296
+ for encoding_alias, encoding_iana in aliases.items():
297
+ if cp_name in [encoding_alias, encoding_iana]:
298
+ return encoding_iana
299
+
300
+ if strict:
301
+ raise ValueError("Unable to retrieve IANA for '{}'".format(cp_name))
302
+
303
+ return cp_name
304
+
305
+
306
+ def range_scan(decoded_sequence: str) -> List[str]:
307
+ ranges: Set[str] = set()
308
+
309
+ for character in decoded_sequence:
310
+ character_range: Optional[str] = unicode_range(character)
311
+
312
+ if character_range is None:
313
+ continue
314
+
315
+ ranges.add(character_range)
316
+
317
+ return list(ranges)
318
+
319
+
320
+ def cp_similarity(iana_name_a: str, iana_name_b: str) -> float:
321
+ if is_multi_byte_encoding(iana_name_a) or is_multi_byte_encoding(iana_name_b):
322
+ return 0.0
323
+
324
+ decoder_a = importlib.import_module(
325
+ "encodings.{}".format(iana_name_a)
326
+ ).IncrementalDecoder
327
+ decoder_b = importlib.import_module(
328
+ "encodings.{}".format(iana_name_b)
329
+ ).IncrementalDecoder
330
+
331
+ id_a: IncrementalDecoder = decoder_a(errors="ignore")
332
+ id_b: IncrementalDecoder = decoder_b(errors="ignore")
333
+
334
+ character_match_count: int = 0
335
+
336
+ for i in range(255):
337
+ to_be_decoded: bytes = bytes([i])
338
+ if id_a.decode(to_be_decoded) == id_b.decode(to_be_decoded):
339
+ character_match_count += 1
340
+
341
+ return character_match_count / 254
342
+
343
+
344
+ def is_cp_similar(iana_name_a: str, iana_name_b: str) -> bool:
345
+ """
346
+ Determine if two code page are at least 80% similar. IANA_SUPPORTED_SIMILAR dict was generated using
347
+ the function cp_similarity.
348
+ """
349
+ return (
350
+ iana_name_a in IANA_SUPPORTED_SIMILAR
351
+ and iana_name_b in IANA_SUPPORTED_SIMILAR[iana_name_a]
352
+ )
353
+
354
+
355
+ def set_logging_handler(
356
+ name: str = "charset_normalizer",
357
+ level: int = logging.INFO,
358
+ format_string: str = "%(asctime)s | %(levelname)s | %(message)s",
359
+ ) -> None:
360
+ logger = logging.getLogger(name)
361
+ logger.setLevel(level)
362
+
363
+ handler = logging.StreamHandler()
364
+ handler.setFormatter(logging.Formatter(format_string))
365
+ logger.addHandler(handler)
366
+
367
+
368
+ def cut_sequence_chunks(
369
+ sequences: bytes,
370
+ encoding_iana: str,
371
+ offsets: range,
372
+ chunk_size: int,
373
+ bom_or_sig_available: bool,
374
+ strip_sig_or_bom: bool,
375
+ sig_payload: bytes,
376
+ is_multi_byte_decoder: bool,
377
+ decoded_payload: Optional[str] = None,
378
+ ) -> Generator[str, None, None]:
379
+ if decoded_payload and is_multi_byte_decoder is False:
380
+ for i in offsets:
381
+ chunk = decoded_payload[i : i + chunk_size]
382
+ if not chunk:
383
+ break
384
+ yield chunk
385
+ else:
386
+ for i in offsets:
387
+ chunk_end = i + chunk_size
388
+ if chunk_end > len(sequences) + 8:
389
+ continue
390
+
391
+ cut_sequence = sequences[i : i + chunk_size]
392
+
393
+ if bom_or_sig_available and strip_sig_or_bom is False:
394
+ cut_sequence = sig_payload + cut_sequence
395
+
396
+ chunk = cut_sequence.decode(
397
+ encoding_iana,
398
+ errors="ignore" if is_multi_byte_decoder else "strict",
399
+ )
400
+
401
+ # multi-byte bad cutting detector and adjustment
402
+ # not the cleanest way to perform that fix but clever enough for now.
403
+ if is_multi_byte_decoder and i > 0:
404
+ chunk_partial_size_chk: int = min(chunk_size, 16)
405
+
406
+ if (
407
+ decoded_payload
408
+ and chunk[:chunk_partial_size_chk] not in decoded_payload
409
+ ):
410
+ for j in range(i, i - 4, -1):
411
+ cut_sequence = sequences[j:chunk_end]
412
+
413
+ if bom_or_sig_available and strip_sig_or_bom is False:
414
+ cut_sequence = sig_payload + cut_sequence
415
+
416
+ chunk = cut_sequence.decode(encoding_iana, errors="ignore")
417
+
418
+ if chunk[:chunk_partial_size_chk] in decoded_payload:
419
+ break
420
+
421
+ yield chunk
llmeval-env/lib/python3.10/site-packages/charset_normalizer/version.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ """
2
+ Expose version
3
+ """
4
+
5
+ __version__ = "3.3.2"
6
+ VERSION = __version__.split(".")
llmeval-env/lib/python3.10/site-packages/joblib/externals/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (189 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/externals/cloudpickle/__init__.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import cloudpickle
2
+ from .cloudpickle import * # noqa
3
+
4
+ __doc__ = cloudpickle.__doc__
5
+
6
+ __version__ = "3.0.0"
7
+
8
+ __all__ = [ # noqa
9
+ "__version__",
10
+ "Pickler",
11
+ "CloudPickler",
12
+ "dumps",
13
+ "loads",
14
+ "dump",
15
+ "load",
16
+ "register_pickle_by_value",
17
+ "unregister_pickle_by_value",
18
+ ]
llmeval-env/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (424 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle.cpython-310.pyc ADDED
Binary file (36.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle_fast.cpython-310.pyc ADDED
Binary file (614 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle.py ADDED
@@ -0,0 +1,1487 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Pickler class to extend the standard pickle.Pickler functionality
2
+
3
+ The main objective is to make it natural to perform distributed computing on
4
+ clusters (such as PySpark, Dask, Ray...) with interactively defined code
5
+ (functions, classes, ...) written in notebooks or console.
6
+
7
+ In particular this pickler adds the following features:
8
+ - serialize interactively-defined or locally-defined functions, classes,
9
+ enums, typevars, lambdas and nested functions to compiled byte code;
10
+ - deal with some other non-serializable objects in an ad-hoc manner where
11
+ applicable.
12
+
13
+ This pickler is therefore meant to be used for the communication between short
14
+ lived Python processes running the same version of Python and libraries. In
15
+ particular, it is not meant to be used for long term storage of Python objects.
16
+
17
+ It does not include an unpickler, as standard Python unpickling suffices.
18
+
19
+ This module was extracted from the `cloud` package, developed by `PiCloud, Inc.
20
+ <https://web.archive.org/web/20140626004012/http://www.picloud.com/>`_.
21
+
22
+ Copyright (c) 2012-now, CloudPickle developers and contributors.
23
+ Copyright (c) 2012, Regents of the University of California.
24
+ Copyright (c) 2009 `PiCloud, Inc. <https://web.archive.org/web/20140626004012/http://www.picloud.com/>`_.
25
+ All rights reserved.
26
+
27
+ Redistribution and use in source and binary forms, with or without
28
+ modification, are permitted provided that the following conditions
29
+ are met:
30
+ * Redistributions of source code must retain the above copyright
31
+ notice, this list of conditions and the following disclaimer.
32
+ * Redistributions in binary form must reproduce the above copyright
33
+ notice, this list of conditions and the following disclaimer in the
34
+ documentation and/or other materials provided with the distribution.
35
+ * Neither the name of the University of California, Berkeley nor the
36
+ names of its contributors may be used to endorse or promote
37
+ products derived from this software without specific prior written
38
+ permission.
39
+
40
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
41
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
42
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
43
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
44
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
45
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
46
+ TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
47
+ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
48
+ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
49
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
50
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51
+ """
52
+
53
+ import _collections_abc
54
+ from collections import ChainMap, OrderedDict
55
+ import abc
56
+ import builtins
57
+ import copyreg
58
+ import dataclasses
59
+ import dis
60
+ from enum import Enum
61
+ import io
62
+ import itertools
63
+ import logging
64
+ import opcode
65
+ import pickle
66
+ from pickle import _getattribute
67
+ import platform
68
+ import struct
69
+ import sys
70
+ import threading
71
+ import types
72
+ import typing
73
+ import uuid
74
+ import warnings
75
+ import weakref
76
+
77
+ # The following import is required to be imported in the cloudpickle
78
+ # namespace to be able to load pickle files generated with older versions of
79
+ # cloudpickle. See: tests/test_backward_compat.py
80
+ from types import CellType # noqa: F401
81
+
82
+
83
+ # cloudpickle is meant for inter process communication: we expect all
84
+ # communicating processes to run the same Python version hence we favor
85
+ # communication speed over compatibility:
86
+ DEFAULT_PROTOCOL = pickle.HIGHEST_PROTOCOL
87
+
88
+ # Names of modules whose resources should be treated as dynamic.
89
+ _PICKLE_BY_VALUE_MODULES = set()
90
+
91
+ # Track the provenance of reconstructed dynamic classes to make it possible to
92
+ # reconstruct instances from the matching singleton class definition when
93
+ # appropriate and preserve the usual "isinstance" semantics of Python objects.
94
+ _DYNAMIC_CLASS_TRACKER_BY_CLASS = weakref.WeakKeyDictionary()
95
+ _DYNAMIC_CLASS_TRACKER_BY_ID = weakref.WeakValueDictionary()
96
+ _DYNAMIC_CLASS_TRACKER_LOCK = threading.Lock()
97
+
98
+ PYPY = platform.python_implementation() == "PyPy"
99
+
100
+ builtin_code_type = None
101
+ if PYPY:
102
+ # builtin-code objects only exist in pypy
103
+ builtin_code_type = type(float.__new__.__code__)
104
+
105
+ _extract_code_globals_cache = weakref.WeakKeyDictionary()
106
+
107
+
108
+ def _get_or_create_tracker_id(class_def):
109
+ with _DYNAMIC_CLASS_TRACKER_LOCK:
110
+ class_tracker_id = _DYNAMIC_CLASS_TRACKER_BY_CLASS.get(class_def)
111
+ if class_tracker_id is None:
112
+ class_tracker_id = uuid.uuid4().hex
113
+ _DYNAMIC_CLASS_TRACKER_BY_CLASS[class_def] = class_tracker_id
114
+ _DYNAMIC_CLASS_TRACKER_BY_ID[class_tracker_id] = class_def
115
+ return class_tracker_id
116
+
117
+
118
+ def _lookup_class_or_track(class_tracker_id, class_def):
119
+ if class_tracker_id is not None:
120
+ with _DYNAMIC_CLASS_TRACKER_LOCK:
121
+ class_def = _DYNAMIC_CLASS_TRACKER_BY_ID.setdefault(
122
+ class_tracker_id, class_def
123
+ )
124
+ _DYNAMIC_CLASS_TRACKER_BY_CLASS[class_def] = class_tracker_id
125
+ return class_def
126
+
127
+
128
+ def register_pickle_by_value(module):
129
+ """Register a module to make it functions and classes picklable by value.
130
+
131
+ By default, functions and classes that are attributes of an importable
132
+ module are to be pickled by reference, that is relying on re-importing
133
+ the attribute from the module at load time.
134
+
135
+ If `register_pickle_by_value(module)` is called, all its functions and
136
+ classes are subsequently to be pickled by value, meaning that they can
137
+ be loaded in Python processes where the module is not importable.
138
+
139
+ This is especially useful when developing a module in a distributed
140
+ execution environment: restarting the client Python process with the new
141
+ source code is enough: there is no need to re-install the new version
142
+ of the module on all the worker nodes nor to restart the workers.
143
+
144
+ Note: this feature is considered experimental. See the cloudpickle
145
+ README.md file for more details and limitations.
146
+ """
147
+ if not isinstance(module, types.ModuleType):
148
+ raise ValueError(f"Input should be a module object, got {str(module)} instead")
149
+ # In the future, cloudpickle may need a way to access any module registered
150
+ # for pickling by value in order to introspect relative imports inside
151
+ # functions pickled by value. (see
152
+ # https://github.com/cloudpipe/cloudpickle/pull/417#issuecomment-873684633).
153
+ # This access can be ensured by checking that module is present in
154
+ # sys.modules at registering time and assuming that it will still be in
155
+ # there when accessed during pickling. Another alternative would be to
156
+ # store a weakref to the module. Even though cloudpickle does not implement
157
+ # this introspection yet, in order to avoid a possible breaking change
158
+ # later, we still enforce the presence of module inside sys.modules.
159
+ if module.__name__ not in sys.modules:
160
+ raise ValueError(
161
+ f"{module} was not imported correctly, have you used an "
162
+ "`import` statement to access it?"
163
+ )
164
+ _PICKLE_BY_VALUE_MODULES.add(module.__name__)
165
+
166
+
167
+ def unregister_pickle_by_value(module):
168
+ """Unregister that the input module should be pickled by value."""
169
+ if not isinstance(module, types.ModuleType):
170
+ raise ValueError(f"Input should be a module object, got {str(module)} instead")
171
+ if module.__name__ not in _PICKLE_BY_VALUE_MODULES:
172
+ raise ValueError(f"{module} is not registered for pickle by value")
173
+ else:
174
+ _PICKLE_BY_VALUE_MODULES.remove(module.__name__)
175
+
176
+
177
+ def list_registry_pickle_by_value():
178
+ return _PICKLE_BY_VALUE_MODULES.copy()
179
+
180
+
181
+ def _is_registered_pickle_by_value(module):
182
+ module_name = module.__name__
183
+ if module_name in _PICKLE_BY_VALUE_MODULES:
184
+ return True
185
+ while True:
186
+ parent_name = module_name.rsplit(".", 1)[0]
187
+ if parent_name == module_name:
188
+ break
189
+ if parent_name in _PICKLE_BY_VALUE_MODULES:
190
+ return True
191
+ module_name = parent_name
192
+ return False
193
+
194
+
195
+ def _whichmodule(obj, name):
196
+ """Find the module an object belongs to.
197
+
198
+ This function differs from ``pickle.whichmodule`` in two ways:
199
+ - it does not mangle the cases where obj's module is __main__ and obj was
200
+ not found in any module.
201
+ - Errors arising during module introspection are ignored, as those errors
202
+ are considered unwanted side effects.
203
+ """
204
+ module_name = getattr(obj, "__module__", None)
205
+
206
+ if module_name is not None:
207
+ return module_name
208
+ # Protect the iteration by using a copy of sys.modules against dynamic
209
+ # modules that trigger imports of other modules upon calls to getattr or
210
+ # other threads importing at the same time.
211
+ for module_name, module in sys.modules.copy().items():
212
+ # Some modules such as coverage can inject non-module objects inside
213
+ # sys.modules
214
+ if (
215
+ module_name == "__main__"
216
+ or module is None
217
+ or not isinstance(module, types.ModuleType)
218
+ ):
219
+ continue
220
+ try:
221
+ if _getattribute(module, name)[0] is obj:
222
+ return module_name
223
+ except Exception:
224
+ pass
225
+ return None
226
+
227
+
228
+ def _should_pickle_by_reference(obj, name=None):
229
+ """Test whether an function or a class should be pickled by reference
230
+
231
+ Pickling by reference means by that the object (typically a function or a
232
+ class) is an attribute of a module that is assumed to be importable in the
233
+ target Python environment. Loading will therefore rely on importing the
234
+ module and then calling `getattr` on it to access the function or class.
235
+
236
+ Pickling by reference is the only option to pickle functions and classes
237
+ in the standard library. In cloudpickle the alternative option is to
238
+ pickle by value (for instance for interactively or locally defined
239
+ functions and classes or for attributes of modules that have been
240
+ explicitly registered to be pickled by value.
241
+ """
242
+ if isinstance(obj, types.FunctionType) or issubclass(type(obj), type):
243
+ module_and_name = _lookup_module_and_qualname(obj, name=name)
244
+ if module_and_name is None:
245
+ return False
246
+ module, name = module_and_name
247
+ return not _is_registered_pickle_by_value(module)
248
+
249
+ elif isinstance(obj, types.ModuleType):
250
+ # We assume that sys.modules is primarily used as a cache mechanism for
251
+ # the Python import machinery. Checking if a module has been added in
252
+ # is sys.modules therefore a cheap and simple heuristic to tell us
253
+ # whether we can assume that a given module could be imported by name
254
+ # in another Python process.
255
+ if _is_registered_pickle_by_value(obj):
256
+ return False
257
+ return obj.__name__ in sys.modules
258
+ else:
259
+ raise TypeError(
260
+ "cannot check importability of {} instances".format(type(obj).__name__)
261
+ )
262
+
263
+
264
+ def _lookup_module_and_qualname(obj, name=None):
265
+ if name is None:
266
+ name = getattr(obj, "__qualname__", None)
267
+ if name is None: # pragma: no cover
268
+ # This used to be needed for Python 2.7 support but is probably not
269
+ # needed anymore. However we keep the __name__ introspection in case
270
+ # users of cloudpickle rely on this old behavior for unknown reasons.
271
+ name = getattr(obj, "__name__", None)
272
+
273
+ module_name = _whichmodule(obj, name)
274
+
275
+ if module_name is None:
276
+ # In this case, obj.__module__ is None AND obj was not found in any
277
+ # imported module. obj is thus treated as dynamic.
278
+ return None
279
+
280
+ if module_name == "__main__":
281
+ return None
282
+
283
+ # Note: if module_name is in sys.modules, the corresponding module is
284
+ # assumed importable at unpickling time. See #357
285
+ module = sys.modules.get(module_name, None)
286
+ if module is None:
287
+ # The main reason why obj's module would not be imported is that this
288
+ # module has been dynamically created, using for example
289
+ # types.ModuleType. The other possibility is that module was removed
290
+ # from sys.modules after obj was created/imported. But this case is not
291
+ # supported, as the standard pickle does not support it either.
292
+ return None
293
+
294
+ try:
295
+ obj2, parent = _getattribute(module, name)
296
+ except AttributeError:
297
+ # obj was not found inside the module it points to
298
+ return None
299
+ if obj2 is not obj:
300
+ return None
301
+ return module, name
302
+
303
+
304
+ def _extract_code_globals(co):
305
+ """Find all globals names read or written to by codeblock co."""
306
+ out_names = _extract_code_globals_cache.get(co)
307
+ if out_names is None:
308
+ # We use a dict with None values instead of a set to get a
309
+ # deterministic order and avoid introducing non-deterministic pickle
310
+ # bytes as a results.
311
+ out_names = {name: None for name in _walk_global_ops(co)}
312
+
313
+ # Declaring a function inside another one using the "def ..." syntax
314
+ # generates a constant code object corresponding to the one of the
315
+ # nested function's As the nested function may itself need global
316
+ # variables, we need to introspect its code, extract its globals, (look
317
+ # for code object in it's co_consts attribute..) and add the result to
318
+ # code_globals
319
+ if co.co_consts:
320
+ for const in co.co_consts:
321
+ if isinstance(const, types.CodeType):
322
+ out_names.update(_extract_code_globals(const))
323
+
324
+ _extract_code_globals_cache[co] = out_names
325
+
326
+ return out_names
327
+
328
+
329
+ def _find_imported_submodules(code, top_level_dependencies):
330
+ """Find currently imported submodules used by a function.
331
+
332
+ Submodules used by a function need to be detected and referenced for the
333
+ function to work correctly at depickling time. Because submodules can be
334
+ referenced as attribute of their parent package (``package.submodule``), we
335
+ need a special introspection technique that does not rely on GLOBAL-related
336
+ opcodes to find references of them in a code object.
337
+
338
+ Example:
339
+ ```
340
+ import concurrent.futures
341
+ import cloudpickle
342
+ def func():
343
+ x = concurrent.futures.ThreadPoolExecutor
344
+ if __name__ == '__main__':
345
+ cloudpickle.dumps(func)
346
+ ```
347
+ The globals extracted by cloudpickle in the function's state include the
348
+ concurrent package, but not its submodule (here, concurrent.futures), which
349
+ is the module used by func. Find_imported_submodules will detect the usage
350
+ of concurrent.futures. Saving this module alongside with func will ensure
351
+ that calling func once depickled does not fail due to concurrent.futures
352
+ not being imported
353
+ """
354
+
355
+ subimports = []
356
+ # check if any known dependency is an imported package
357
+ for x in top_level_dependencies:
358
+ if (
359
+ isinstance(x, types.ModuleType)
360
+ and hasattr(x, "__package__")
361
+ and x.__package__
362
+ ):
363
+ # check if the package has any currently loaded sub-imports
364
+ prefix = x.__name__ + "."
365
+ # A concurrent thread could mutate sys.modules,
366
+ # make sure we iterate over a copy to avoid exceptions
367
+ for name in list(sys.modules):
368
+ # Older versions of pytest will add a "None" module to
369
+ # sys.modules.
370
+ if name is not None and name.startswith(prefix):
371
+ # check whether the function can address the sub-module
372
+ tokens = set(name[len(prefix) :].split("."))
373
+ if not tokens - set(code.co_names):
374
+ subimports.append(sys.modules[name])
375
+ return subimports
376
+
377
+
378
+ # relevant opcodes
379
+ STORE_GLOBAL = opcode.opmap["STORE_GLOBAL"]
380
+ DELETE_GLOBAL = opcode.opmap["DELETE_GLOBAL"]
381
+ LOAD_GLOBAL = opcode.opmap["LOAD_GLOBAL"]
382
+ GLOBAL_OPS = (STORE_GLOBAL, DELETE_GLOBAL, LOAD_GLOBAL)
383
+ HAVE_ARGUMENT = dis.HAVE_ARGUMENT
384
+ EXTENDED_ARG = dis.EXTENDED_ARG
385
+
386
+
387
+ _BUILTIN_TYPE_NAMES = {}
388
+ for k, v in types.__dict__.items():
389
+ if type(v) is type:
390
+ _BUILTIN_TYPE_NAMES[v] = k
391
+
392
+
393
+ def _builtin_type(name):
394
+ if name == "ClassType": # pragma: no cover
395
+ # Backward compat to load pickle files generated with cloudpickle
396
+ # < 1.3 even if loading pickle files from older versions is not
397
+ # officially supported.
398
+ return type
399
+ return getattr(types, name)
400
+
401
+
402
+ def _walk_global_ops(code):
403
+ """Yield referenced name for global-referencing instructions in code."""
404
+ for instr in dis.get_instructions(code):
405
+ op = instr.opcode
406
+ if op in GLOBAL_OPS:
407
+ yield instr.argval
408
+
409
+
410
+ def _extract_class_dict(cls):
411
+ """Retrieve a copy of the dict of a class without the inherited method."""
412
+ clsdict = dict(cls.__dict__) # copy dict proxy to a dict
413
+ if len(cls.__bases__) == 1:
414
+ inherited_dict = cls.__bases__[0].__dict__
415
+ else:
416
+ inherited_dict = {}
417
+ for base in reversed(cls.__bases__):
418
+ inherited_dict.update(base.__dict__)
419
+ to_remove = []
420
+ for name, value in clsdict.items():
421
+ try:
422
+ base_value = inherited_dict[name]
423
+ if value is base_value:
424
+ to_remove.append(name)
425
+ except KeyError:
426
+ pass
427
+ for name in to_remove:
428
+ clsdict.pop(name)
429
+ return clsdict
430
+
431
+
432
+ def is_tornado_coroutine(func):
433
+ """Return whether `func` is a Tornado coroutine function.
434
+
435
+ Running coroutines are not supported.
436
+ """
437
+ warnings.warn(
438
+ "is_tornado_coroutine is deprecated in cloudpickle 3.0 and will be "
439
+ "removed in cloudpickle 4.0. Use tornado.gen.is_coroutine_function "
440
+ "directly instead.",
441
+ category=DeprecationWarning,
442
+ )
443
+ if "tornado.gen" not in sys.modules:
444
+ return False
445
+ gen = sys.modules["tornado.gen"]
446
+ if not hasattr(gen, "is_coroutine_function"):
447
+ # Tornado version is too old
448
+ return False
449
+ return gen.is_coroutine_function(func)
450
+
451
+
452
+ def subimport(name):
453
+ # We cannot do simply: `return __import__(name)`: Indeed, if ``name`` is
454
+ # the name of a submodule, __import__ will return the top-level root module
455
+ # of this submodule. For instance, __import__('os.path') returns the `os`
456
+ # module.
457
+ __import__(name)
458
+ return sys.modules[name]
459
+
460
+
461
+ def dynamic_subimport(name, vars):
462
+ mod = types.ModuleType(name)
463
+ mod.__dict__.update(vars)
464
+ mod.__dict__["__builtins__"] = builtins.__dict__
465
+ return mod
466
+
467
+
468
+ def _get_cell_contents(cell):
469
+ try:
470
+ return cell.cell_contents
471
+ except ValueError:
472
+ # Handle empty cells explicitly with a sentinel value.
473
+ return _empty_cell_value
474
+
475
+
476
+ def instance(cls):
477
+ """Create a new instance of a class.
478
+
479
+ Parameters
480
+ ----------
481
+ cls : type
482
+ The class to create an instance of.
483
+
484
+ Returns
485
+ -------
486
+ instance : cls
487
+ A new instance of ``cls``.
488
+ """
489
+ return cls()
490
+
491
+
492
+ @instance
493
+ class _empty_cell_value:
494
+ """Sentinel for empty closures."""
495
+
496
+ @classmethod
497
+ def __reduce__(cls):
498
+ return cls.__name__
499
+
500
+
501
+ def _make_function(code, globals, name, argdefs, closure):
502
+ # Setting __builtins__ in globals is needed for nogil CPython.
503
+ globals["__builtins__"] = __builtins__
504
+ return types.FunctionType(code, globals, name, argdefs, closure)
505
+
506
+
507
+ def _make_empty_cell():
508
+ if False:
509
+ # trick the compiler into creating an empty cell in our lambda
510
+ cell = None
511
+ raise AssertionError("this route should not be executed")
512
+
513
+ return (lambda: cell).__closure__[0]
514
+
515
+
516
+ def _make_cell(value=_empty_cell_value):
517
+ cell = _make_empty_cell()
518
+ if value is not _empty_cell_value:
519
+ cell.cell_contents = value
520
+ return cell
521
+
522
+
523
+ def _make_skeleton_class(
524
+ type_constructor, name, bases, type_kwargs, class_tracker_id, extra
525
+ ):
526
+ """Build dynamic class with an empty __dict__ to be filled once memoized
527
+
528
+ If class_tracker_id is not None, try to lookup an existing class definition
529
+ matching that id. If none is found, track a newly reconstructed class
530
+ definition under that id so that other instances stemming from the same
531
+ class id will also reuse this class definition.
532
+
533
+ The "extra" variable is meant to be a dict (or None) that can be used for
534
+ forward compatibility shall the need arise.
535
+ """
536
+ skeleton_class = types.new_class(
537
+ name, bases, {"metaclass": type_constructor}, lambda ns: ns.update(type_kwargs)
538
+ )
539
+ return _lookup_class_or_track(class_tracker_id, skeleton_class)
540
+
541
+
542
+ def _make_skeleton_enum(
543
+ bases, name, qualname, members, module, class_tracker_id, extra
544
+ ):
545
+ """Build dynamic enum with an empty __dict__ to be filled once memoized
546
+
547
+ The creation of the enum class is inspired by the code of
548
+ EnumMeta._create_.
549
+
550
+ If class_tracker_id is not None, try to lookup an existing enum definition
551
+ matching that id. If none is found, track a newly reconstructed enum
552
+ definition under that id so that other instances stemming from the same
553
+ class id will also reuse this enum definition.
554
+
555
+ The "extra" variable is meant to be a dict (or None) that can be used for
556
+ forward compatibility shall the need arise.
557
+ """
558
+ # enums always inherit from their base Enum class at the last position in
559
+ # the list of base classes:
560
+ enum_base = bases[-1]
561
+ metacls = enum_base.__class__
562
+ classdict = metacls.__prepare__(name, bases)
563
+
564
+ for member_name, member_value in members.items():
565
+ classdict[member_name] = member_value
566
+ enum_class = metacls.__new__(metacls, name, bases, classdict)
567
+ enum_class.__module__ = module
568
+ enum_class.__qualname__ = qualname
569
+
570
+ return _lookup_class_or_track(class_tracker_id, enum_class)
571
+
572
+
573
+ def _make_typevar(name, bound, constraints, covariant, contravariant, class_tracker_id):
574
+ tv = typing.TypeVar(
575
+ name,
576
+ *constraints,
577
+ bound=bound,
578
+ covariant=covariant,
579
+ contravariant=contravariant,
580
+ )
581
+ return _lookup_class_or_track(class_tracker_id, tv)
582
+
583
+
584
+ def _decompose_typevar(obj):
585
+ return (
586
+ obj.__name__,
587
+ obj.__bound__,
588
+ obj.__constraints__,
589
+ obj.__covariant__,
590
+ obj.__contravariant__,
591
+ _get_or_create_tracker_id(obj),
592
+ )
593
+
594
+
595
+ def _typevar_reduce(obj):
596
+ # TypeVar instances require the module information hence why we
597
+ # are not using the _should_pickle_by_reference directly
598
+ module_and_name = _lookup_module_and_qualname(obj, name=obj.__name__)
599
+
600
+ if module_and_name is None:
601
+ return (_make_typevar, _decompose_typevar(obj))
602
+ elif _is_registered_pickle_by_value(module_and_name[0]):
603
+ return (_make_typevar, _decompose_typevar(obj))
604
+
605
+ return (getattr, module_and_name)
606
+
607
+
608
+ def _get_bases(typ):
609
+ if "__orig_bases__" in getattr(typ, "__dict__", {}):
610
+ # For generic types (see PEP 560)
611
+ # Note that simply checking `hasattr(typ, '__orig_bases__')` is not
612
+ # correct. Subclasses of a fully-parameterized generic class does not
613
+ # have `__orig_bases__` defined, but `hasattr(typ, '__orig_bases__')`
614
+ # will return True because it's defined in the base class.
615
+ bases_attr = "__orig_bases__"
616
+ else:
617
+ # For regular class objects
618
+ bases_attr = "__bases__"
619
+ return getattr(typ, bases_attr)
620
+
621
+
622
+ def _make_dict_keys(obj, is_ordered=False):
623
+ if is_ordered:
624
+ return OrderedDict.fromkeys(obj).keys()
625
+ else:
626
+ return dict.fromkeys(obj).keys()
627
+
628
+
629
+ def _make_dict_values(obj, is_ordered=False):
630
+ if is_ordered:
631
+ return OrderedDict((i, _) for i, _ in enumerate(obj)).values()
632
+ else:
633
+ return {i: _ for i, _ in enumerate(obj)}.values()
634
+
635
+
636
+ def _make_dict_items(obj, is_ordered=False):
637
+ if is_ordered:
638
+ return OrderedDict(obj).items()
639
+ else:
640
+ return obj.items()
641
+
642
+
643
+ # COLLECTION OF OBJECTS __getnewargs__-LIKE METHODS
644
+ # -------------------------------------------------
645
+
646
+
647
+ def _class_getnewargs(obj):
648
+ type_kwargs = {}
649
+ if "__module__" in obj.__dict__:
650
+ type_kwargs["__module__"] = obj.__module__
651
+
652
+ __dict__ = obj.__dict__.get("__dict__", None)
653
+ if isinstance(__dict__, property):
654
+ type_kwargs["__dict__"] = __dict__
655
+
656
+ return (
657
+ type(obj),
658
+ obj.__name__,
659
+ _get_bases(obj),
660
+ type_kwargs,
661
+ _get_or_create_tracker_id(obj),
662
+ None,
663
+ )
664
+
665
+
666
+ def _enum_getnewargs(obj):
667
+ members = {e.name: e.value for e in obj}
668
+ return (
669
+ obj.__bases__,
670
+ obj.__name__,
671
+ obj.__qualname__,
672
+ members,
673
+ obj.__module__,
674
+ _get_or_create_tracker_id(obj),
675
+ None,
676
+ )
677
+
678
+
679
+ # COLLECTION OF OBJECTS RECONSTRUCTORS
680
+ # ------------------------------------
681
+ def _file_reconstructor(retval):
682
+ return retval
683
+
684
+
685
+ # COLLECTION OF OBJECTS STATE GETTERS
686
+ # -----------------------------------
687
+
688
+
689
+ def _function_getstate(func):
690
+ # - Put func's dynamic attributes (stored in func.__dict__) in state. These
691
+ # attributes will be restored at unpickling time using
692
+ # f.__dict__.update(state)
693
+ # - Put func's members into slotstate. Such attributes will be restored at
694
+ # unpickling time by iterating over slotstate and calling setattr(func,
695
+ # slotname, slotvalue)
696
+ slotstate = {
697
+ "__name__": func.__name__,
698
+ "__qualname__": func.__qualname__,
699
+ "__annotations__": func.__annotations__,
700
+ "__kwdefaults__": func.__kwdefaults__,
701
+ "__defaults__": func.__defaults__,
702
+ "__module__": func.__module__,
703
+ "__doc__": func.__doc__,
704
+ "__closure__": func.__closure__,
705
+ }
706
+
707
+ f_globals_ref = _extract_code_globals(func.__code__)
708
+ f_globals = {k: func.__globals__[k] for k in f_globals_ref if k in func.__globals__}
709
+
710
+ if func.__closure__ is not None:
711
+ closure_values = list(map(_get_cell_contents, func.__closure__))
712
+ else:
713
+ closure_values = ()
714
+
715
+ # Extract currently-imported submodules used by func. Storing these modules
716
+ # in a smoke _cloudpickle_subimports attribute of the object's state will
717
+ # trigger the side effect of importing these modules at unpickling time
718
+ # (which is necessary for func to work correctly once depickled)
719
+ slotstate["_cloudpickle_submodules"] = _find_imported_submodules(
720
+ func.__code__, itertools.chain(f_globals.values(), closure_values)
721
+ )
722
+ slotstate["__globals__"] = f_globals
723
+
724
+ state = func.__dict__
725
+ return state, slotstate
726
+
727
+
728
+ def _class_getstate(obj):
729
+ clsdict = _extract_class_dict(obj)
730
+ clsdict.pop("__weakref__", None)
731
+
732
+ if issubclass(type(obj), abc.ABCMeta):
733
+ # If obj is an instance of an ABCMeta subclass, don't pickle the
734
+ # cache/negative caches populated during isinstance/issubclass
735
+ # checks, but pickle the list of registered subclasses of obj.
736
+ clsdict.pop("_abc_cache", None)
737
+ clsdict.pop("_abc_negative_cache", None)
738
+ clsdict.pop("_abc_negative_cache_version", None)
739
+ registry = clsdict.pop("_abc_registry", None)
740
+ if registry is None:
741
+ # The abc caches and registered subclasses of a
742
+ # class are bundled into the single _abc_impl attribute
743
+ clsdict.pop("_abc_impl", None)
744
+ (registry, _, _, _) = abc._get_dump(obj)
745
+
746
+ clsdict["_abc_impl"] = [subclass_weakref() for subclass_weakref in registry]
747
+ else:
748
+ # In the above if clause, registry is a set of weakrefs -- in
749
+ # this case, registry is a WeakSet
750
+ clsdict["_abc_impl"] = [type_ for type_ in registry]
751
+
752
+ if "__slots__" in clsdict:
753
+ # pickle string length optimization: member descriptors of obj are
754
+ # created automatically from obj's __slots__ attribute, no need to
755
+ # save them in obj's state
756
+ if isinstance(obj.__slots__, str):
757
+ clsdict.pop(obj.__slots__)
758
+ else:
759
+ for k in obj.__slots__:
760
+ clsdict.pop(k, None)
761
+
762
+ clsdict.pop("__dict__", None) # unpicklable property object
763
+
764
+ return (clsdict, {})
765
+
766
+
767
+ def _enum_getstate(obj):
768
+ clsdict, slotstate = _class_getstate(obj)
769
+
770
+ members = {e.name: e.value for e in obj}
771
+ # Cleanup the clsdict that will be passed to _make_skeleton_enum:
772
+ # Those attributes are already handled by the metaclass.
773
+ for attrname in [
774
+ "_generate_next_value_",
775
+ "_member_names_",
776
+ "_member_map_",
777
+ "_member_type_",
778
+ "_value2member_map_",
779
+ ]:
780
+ clsdict.pop(attrname, None)
781
+ for member in members:
782
+ clsdict.pop(member)
783
+ # Special handling of Enum subclasses
784
+ return clsdict, slotstate
785
+
786
+
787
+ # COLLECTIONS OF OBJECTS REDUCERS
788
+ # -------------------------------
789
+ # A reducer is a function taking a single argument (obj), and that returns a
790
+ # tuple with all the necessary data to re-construct obj. Apart from a few
791
+ # exceptions (list, dict, bytes, int, etc.), a reducer is necessary to
792
+ # correctly pickle an object.
793
+ # While many built-in objects (Exceptions objects, instances of the "object"
794
+ # class, etc), are shipped with their own built-in reducer (invoked using
795
+ # obj.__reduce__), some do not. The following methods were created to "fill
796
+ # these holes".
797
+
798
+
799
+ def _code_reduce(obj):
800
+ """code object reducer."""
801
+ # If you are not sure about the order of arguments, take a look at help
802
+ # of the specific type from types, for example:
803
+ # >>> from types import CodeType
804
+ # >>> help(CodeType)
805
+ if hasattr(obj, "co_exceptiontable"):
806
+ # Python 3.11 and later: there are some new attributes
807
+ # related to the enhanced exceptions.
808
+ args = (
809
+ obj.co_argcount,
810
+ obj.co_posonlyargcount,
811
+ obj.co_kwonlyargcount,
812
+ obj.co_nlocals,
813
+ obj.co_stacksize,
814
+ obj.co_flags,
815
+ obj.co_code,
816
+ obj.co_consts,
817
+ obj.co_names,
818
+ obj.co_varnames,
819
+ obj.co_filename,
820
+ obj.co_name,
821
+ obj.co_qualname,
822
+ obj.co_firstlineno,
823
+ obj.co_linetable,
824
+ obj.co_exceptiontable,
825
+ obj.co_freevars,
826
+ obj.co_cellvars,
827
+ )
828
+ elif hasattr(obj, "co_linetable"):
829
+ # Python 3.10 and later: obj.co_lnotab is deprecated and constructor
830
+ # expects obj.co_linetable instead.
831
+ args = (
832
+ obj.co_argcount,
833
+ obj.co_posonlyargcount,
834
+ obj.co_kwonlyargcount,
835
+ obj.co_nlocals,
836
+ obj.co_stacksize,
837
+ obj.co_flags,
838
+ obj.co_code,
839
+ obj.co_consts,
840
+ obj.co_names,
841
+ obj.co_varnames,
842
+ obj.co_filename,
843
+ obj.co_name,
844
+ obj.co_firstlineno,
845
+ obj.co_linetable,
846
+ obj.co_freevars,
847
+ obj.co_cellvars,
848
+ )
849
+ elif hasattr(obj, "co_nmeta"): # pragma: no cover
850
+ # "nogil" Python: modified attributes from 3.9
851
+ args = (
852
+ obj.co_argcount,
853
+ obj.co_posonlyargcount,
854
+ obj.co_kwonlyargcount,
855
+ obj.co_nlocals,
856
+ obj.co_framesize,
857
+ obj.co_ndefaultargs,
858
+ obj.co_nmeta,
859
+ obj.co_flags,
860
+ obj.co_code,
861
+ obj.co_consts,
862
+ obj.co_varnames,
863
+ obj.co_filename,
864
+ obj.co_name,
865
+ obj.co_firstlineno,
866
+ obj.co_lnotab,
867
+ obj.co_exc_handlers,
868
+ obj.co_jump_table,
869
+ obj.co_freevars,
870
+ obj.co_cellvars,
871
+ obj.co_free2reg,
872
+ obj.co_cell2reg,
873
+ )
874
+ else:
875
+ # Backward compat for 3.8 and 3.9
876
+ args = (
877
+ obj.co_argcount,
878
+ obj.co_posonlyargcount,
879
+ obj.co_kwonlyargcount,
880
+ obj.co_nlocals,
881
+ obj.co_stacksize,
882
+ obj.co_flags,
883
+ obj.co_code,
884
+ obj.co_consts,
885
+ obj.co_names,
886
+ obj.co_varnames,
887
+ obj.co_filename,
888
+ obj.co_name,
889
+ obj.co_firstlineno,
890
+ obj.co_lnotab,
891
+ obj.co_freevars,
892
+ obj.co_cellvars,
893
+ )
894
+ return types.CodeType, args
895
+
896
+
897
+ def _cell_reduce(obj):
898
+ """Cell (containing values of a function's free variables) reducer."""
899
+ try:
900
+ obj.cell_contents
901
+ except ValueError: # cell is empty
902
+ return _make_empty_cell, ()
903
+ else:
904
+ return _make_cell, (obj.cell_contents,)
905
+
906
+
907
+ def _classmethod_reduce(obj):
908
+ orig_func = obj.__func__
909
+ return type(obj), (orig_func,)
910
+
911
+
912
+ def _file_reduce(obj):
913
+ """Save a file."""
914
+ import io
915
+
916
+ if not hasattr(obj, "name") or not hasattr(obj, "mode"):
917
+ raise pickle.PicklingError(
918
+ "Cannot pickle files that do not map to an actual file"
919
+ )
920
+ if obj is sys.stdout:
921
+ return getattr, (sys, "stdout")
922
+ if obj is sys.stderr:
923
+ return getattr, (sys, "stderr")
924
+ if obj is sys.stdin:
925
+ raise pickle.PicklingError("Cannot pickle standard input")
926
+ if obj.closed:
927
+ raise pickle.PicklingError("Cannot pickle closed files")
928
+ if hasattr(obj, "isatty") and obj.isatty():
929
+ raise pickle.PicklingError("Cannot pickle files that map to tty objects")
930
+ if "r" not in obj.mode and "+" not in obj.mode:
931
+ raise pickle.PicklingError(
932
+ "Cannot pickle files that are not opened for reading: %s" % obj.mode
933
+ )
934
+
935
+ name = obj.name
936
+
937
+ retval = io.StringIO()
938
+
939
+ try:
940
+ # Read the whole file
941
+ curloc = obj.tell()
942
+ obj.seek(0)
943
+ contents = obj.read()
944
+ obj.seek(curloc)
945
+ except OSError as e:
946
+ raise pickle.PicklingError(
947
+ "Cannot pickle file %s as it cannot be read" % name
948
+ ) from e
949
+ retval.write(contents)
950
+ retval.seek(curloc)
951
+
952
+ retval.name = name
953
+ return _file_reconstructor, (retval,)
954
+
955
+
956
+ def _getset_descriptor_reduce(obj):
957
+ return getattr, (obj.__objclass__, obj.__name__)
958
+
959
+
960
+ def _mappingproxy_reduce(obj):
961
+ return types.MappingProxyType, (dict(obj),)
962
+
963
+
964
+ def _memoryview_reduce(obj):
965
+ return bytes, (obj.tobytes(),)
966
+
967
+
968
+ def _module_reduce(obj):
969
+ if _should_pickle_by_reference(obj):
970
+ return subimport, (obj.__name__,)
971
+ else:
972
+ # Some external libraries can populate the "__builtins__" entry of a
973
+ # module's `__dict__` with unpicklable objects (see #316). For that
974
+ # reason, we do not attempt to pickle the "__builtins__" entry, and
975
+ # restore a default value for it at unpickling time.
976
+ state = obj.__dict__.copy()
977
+ state.pop("__builtins__", None)
978
+ return dynamic_subimport, (obj.__name__, state)
979
+
980
+
981
+ def _method_reduce(obj):
982
+ return (types.MethodType, (obj.__func__, obj.__self__))
983
+
984
+
985
+ def _logger_reduce(obj):
986
+ return logging.getLogger, (obj.name,)
987
+
988
+
989
+ def _root_logger_reduce(obj):
990
+ return logging.getLogger, ()
991
+
992
+
993
+ def _property_reduce(obj):
994
+ return property, (obj.fget, obj.fset, obj.fdel, obj.__doc__)
995
+
996
+
997
+ def _weakset_reduce(obj):
998
+ return weakref.WeakSet, (list(obj),)
999
+
1000
+
1001
+ def _dynamic_class_reduce(obj):
1002
+ """Save a class that can't be referenced as a module attribute.
1003
+
1004
+ This method is used to serialize classes that are defined inside
1005
+ functions, or that otherwise can't be serialized as attribute lookups
1006
+ from importable modules.
1007
+ """
1008
+ if Enum is not None and issubclass(obj, Enum):
1009
+ return (
1010
+ _make_skeleton_enum,
1011
+ _enum_getnewargs(obj),
1012
+ _enum_getstate(obj),
1013
+ None,
1014
+ None,
1015
+ _class_setstate,
1016
+ )
1017
+ else:
1018
+ return (
1019
+ _make_skeleton_class,
1020
+ _class_getnewargs(obj),
1021
+ _class_getstate(obj),
1022
+ None,
1023
+ None,
1024
+ _class_setstate,
1025
+ )
1026
+
1027
+
1028
+ def _class_reduce(obj):
1029
+ """Select the reducer depending on the dynamic nature of the class obj."""
1030
+ if obj is type(None): # noqa
1031
+ return type, (None,)
1032
+ elif obj is type(Ellipsis):
1033
+ return type, (Ellipsis,)
1034
+ elif obj is type(NotImplemented):
1035
+ return type, (NotImplemented,)
1036
+ elif obj in _BUILTIN_TYPE_NAMES:
1037
+ return _builtin_type, (_BUILTIN_TYPE_NAMES[obj],)
1038
+ elif not _should_pickle_by_reference(obj):
1039
+ return _dynamic_class_reduce(obj)
1040
+ return NotImplemented
1041
+
1042
+
1043
+ def _dict_keys_reduce(obj):
1044
+ # Safer not to ship the full dict as sending the rest might
1045
+ # be unintended and could potentially cause leaking of
1046
+ # sensitive information
1047
+ return _make_dict_keys, (list(obj),)
1048
+
1049
+
1050
+ def _dict_values_reduce(obj):
1051
+ # Safer not to ship the full dict as sending the rest might
1052
+ # be unintended and could potentially cause leaking of
1053
+ # sensitive information
1054
+ return _make_dict_values, (list(obj),)
1055
+
1056
+
1057
+ def _dict_items_reduce(obj):
1058
+ return _make_dict_items, (dict(obj),)
1059
+
1060
+
1061
+ def _odict_keys_reduce(obj):
1062
+ # Safer not to ship the full dict as sending the rest might
1063
+ # be unintended and could potentially cause leaking of
1064
+ # sensitive information
1065
+ return _make_dict_keys, (list(obj), True)
1066
+
1067
+
1068
+ def _odict_values_reduce(obj):
1069
+ # Safer not to ship the full dict as sending the rest might
1070
+ # be unintended and could potentially cause leaking of
1071
+ # sensitive information
1072
+ return _make_dict_values, (list(obj), True)
1073
+
1074
+
1075
+ def _odict_items_reduce(obj):
1076
+ return _make_dict_items, (dict(obj), True)
1077
+
1078
+
1079
+ def _dataclass_field_base_reduce(obj):
1080
+ return _get_dataclass_field_type_sentinel, (obj.name,)
1081
+
1082
+
1083
+ # COLLECTIONS OF OBJECTS STATE SETTERS
1084
+ # ------------------------------------
1085
+ # state setters are called at unpickling time, once the object is created and
1086
+ # it has to be updated to how it was at unpickling time.
1087
+
1088
+
1089
+ def _function_setstate(obj, state):
1090
+ """Update the state of a dynamic function.
1091
+
1092
+ As __closure__ and __globals__ are readonly attributes of a function, we
1093
+ cannot rely on the native setstate routine of pickle.load_build, that calls
1094
+ setattr on items of the slotstate. Instead, we have to modify them inplace.
1095
+ """
1096
+ state, slotstate = state
1097
+ obj.__dict__.update(state)
1098
+
1099
+ obj_globals = slotstate.pop("__globals__")
1100
+ obj_closure = slotstate.pop("__closure__")
1101
+ # _cloudpickle_subimports is a set of submodules that must be loaded for
1102
+ # the pickled function to work correctly at unpickling time. Now that these
1103
+ # submodules are depickled (hence imported), they can be removed from the
1104
+ # object's state (the object state only served as a reference holder to
1105
+ # these submodules)
1106
+ slotstate.pop("_cloudpickle_submodules")
1107
+
1108
+ obj.__globals__.update(obj_globals)
1109
+ obj.__globals__["__builtins__"] = __builtins__
1110
+
1111
+ if obj_closure is not None:
1112
+ for i, cell in enumerate(obj_closure):
1113
+ try:
1114
+ value = cell.cell_contents
1115
+ except ValueError: # cell is empty
1116
+ continue
1117
+ obj.__closure__[i].cell_contents = value
1118
+
1119
+ for k, v in slotstate.items():
1120
+ setattr(obj, k, v)
1121
+
1122
+
1123
+ def _class_setstate(obj, state):
1124
+ state, slotstate = state
1125
+ registry = None
1126
+ for attrname, attr in state.items():
1127
+ if attrname == "_abc_impl":
1128
+ registry = attr
1129
+ else:
1130
+ setattr(obj, attrname, attr)
1131
+ if registry is not None:
1132
+ for subclass in registry:
1133
+ obj.register(subclass)
1134
+
1135
+ return obj
1136
+
1137
+
1138
+ # COLLECTION OF DATACLASS UTILITIES
1139
+ # ---------------------------------
1140
+ # There are some internal sentinel values whose identity must be preserved when
1141
+ # unpickling dataclass fields. Each sentinel value has a unique name that we can
1142
+ # use to retrieve its identity at unpickling time.
1143
+
1144
+
1145
+ _DATACLASSE_FIELD_TYPE_SENTINELS = {
1146
+ dataclasses._FIELD.name: dataclasses._FIELD,
1147
+ dataclasses._FIELD_CLASSVAR.name: dataclasses._FIELD_CLASSVAR,
1148
+ dataclasses._FIELD_INITVAR.name: dataclasses._FIELD_INITVAR,
1149
+ }
1150
+
1151
+
1152
+ def _get_dataclass_field_type_sentinel(name):
1153
+ return _DATACLASSE_FIELD_TYPE_SENTINELS[name]
1154
+
1155
+
1156
+ class Pickler(pickle.Pickler):
1157
+ # set of reducers defined and used by cloudpickle (private)
1158
+ _dispatch_table = {}
1159
+ _dispatch_table[classmethod] = _classmethod_reduce
1160
+ _dispatch_table[io.TextIOWrapper] = _file_reduce
1161
+ _dispatch_table[logging.Logger] = _logger_reduce
1162
+ _dispatch_table[logging.RootLogger] = _root_logger_reduce
1163
+ _dispatch_table[memoryview] = _memoryview_reduce
1164
+ _dispatch_table[property] = _property_reduce
1165
+ _dispatch_table[staticmethod] = _classmethod_reduce
1166
+ _dispatch_table[CellType] = _cell_reduce
1167
+ _dispatch_table[types.CodeType] = _code_reduce
1168
+ _dispatch_table[types.GetSetDescriptorType] = _getset_descriptor_reduce
1169
+ _dispatch_table[types.ModuleType] = _module_reduce
1170
+ _dispatch_table[types.MethodType] = _method_reduce
1171
+ _dispatch_table[types.MappingProxyType] = _mappingproxy_reduce
1172
+ _dispatch_table[weakref.WeakSet] = _weakset_reduce
1173
+ _dispatch_table[typing.TypeVar] = _typevar_reduce
1174
+ _dispatch_table[_collections_abc.dict_keys] = _dict_keys_reduce
1175
+ _dispatch_table[_collections_abc.dict_values] = _dict_values_reduce
1176
+ _dispatch_table[_collections_abc.dict_items] = _dict_items_reduce
1177
+ _dispatch_table[type(OrderedDict().keys())] = _odict_keys_reduce
1178
+ _dispatch_table[type(OrderedDict().values())] = _odict_values_reduce
1179
+ _dispatch_table[type(OrderedDict().items())] = _odict_items_reduce
1180
+ _dispatch_table[abc.abstractmethod] = _classmethod_reduce
1181
+ _dispatch_table[abc.abstractclassmethod] = _classmethod_reduce
1182
+ _dispatch_table[abc.abstractstaticmethod] = _classmethod_reduce
1183
+ _dispatch_table[abc.abstractproperty] = _property_reduce
1184
+ _dispatch_table[dataclasses._FIELD_BASE] = _dataclass_field_base_reduce
1185
+
1186
+ dispatch_table = ChainMap(_dispatch_table, copyreg.dispatch_table)
1187
+
1188
+ # function reducers are defined as instance methods of cloudpickle.Pickler
1189
+ # objects, as they rely on a cloudpickle.Pickler attribute (globals_ref)
1190
+ def _dynamic_function_reduce(self, func):
1191
+ """Reduce a function that is not pickleable via attribute lookup."""
1192
+ newargs = self._function_getnewargs(func)
1193
+ state = _function_getstate(func)
1194
+ return (_make_function, newargs, state, None, None, _function_setstate)
1195
+
1196
+ def _function_reduce(self, obj):
1197
+ """Reducer for function objects.
1198
+
1199
+ If obj is a top-level attribute of a file-backed module, this reducer
1200
+ returns NotImplemented, making the cloudpickle.Pickler fall back to
1201
+ traditional pickle.Pickler routines to save obj. Otherwise, it reduces
1202
+ obj using a custom cloudpickle reducer designed specifically to handle
1203
+ dynamic functions.
1204
+ """
1205
+ if _should_pickle_by_reference(obj):
1206
+ return NotImplemented
1207
+ else:
1208
+ return self._dynamic_function_reduce(obj)
1209
+
1210
+ def _function_getnewargs(self, func):
1211
+ code = func.__code__
1212
+
1213
+ # base_globals represents the future global namespace of func at
1214
+ # unpickling time. Looking it up and storing it in
1215
+ # cloudpickle.Pickler.globals_ref allow functions sharing the same
1216
+ # globals at pickling time to also share them once unpickled, at one
1217
+ # condition: since globals_ref is an attribute of a cloudpickle.Pickler
1218
+ # instance, and that a new cloudpickle.Pickler is created each time
1219
+ # cloudpickle.dump or cloudpickle.dumps is called, functions also need
1220
+ # to be saved within the same invocation of
1221
+ # cloudpickle.dump/cloudpickle.dumps (for example:
1222
+ # cloudpickle.dumps([f1, f2])). There is no such limitation when using
1223
+ # cloudpickle.Pickler.dump, as long as the multiple invocations are
1224
+ # bound to the same cloudpickle.Pickler instance.
1225
+ base_globals = self.globals_ref.setdefault(id(func.__globals__), {})
1226
+
1227
+ if base_globals == {}:
1228
+ # Add module attributes used to resolve relative imports
1229
+ # instructions inside func.
1230
+ for k in ["__package__", "__name__", "__path__", "__file__"]:
1231
+ if k in func.__globals__:
1232
+ base_globals[k] = func.__globals__[k]
1233
+
1234
+ # Do not bind the free variables before the function is created to
1235
+ # avoid infinite recursion.
1236
+ if func.__closure__ is None:
1237
+ closure = None
1238
+ else:
1239
+ closure = tuple(_make_empty_cell() for _ in range(len(code.co_freevars)))
1240
+
1241
+ return code, base_globals, None, None, closure
1242
+
1243
+ def dump(self, obj):
1244
+ try:
1245
+ return super().dump(obj)
1246
+ except RuntimeError as e:
1247
+ if len(e.args) > 0 and "recursion" in e.args[0]:
1248
+ msg = "Could not pickle object as excessively deep recursion required."
1249
+ raise pickle.PicklingError(msg) from e
1250
+ else:
1251
+ raise
1252
+
1253
+ def __init__(self, file, protocol=None, buffer_callback=None):
1254
+ if protocol is None:
1255
+ protocol = DEFAULT_PROTOCOL
1256
+ super().__init__(file, protocol=protocol, buffer_callback=buffer_callback)
1257
+ # map functions __globals__ attribute ids, to ensure that functions
1258
+ # sharing the same global namespace at pickling time also share
1259
+ # their global namespace at unpickling time.
1260
+ self.globals_ref = {}
1261
+ self.proto = int(protocol)
1262
+
1263
+ if not PYPY:
1264
+ # pickle.Pickler is the C implementation of the CPython pickler and
1265
+ # therefore we rely on reduce_override method to customize the pickler
1266
+ # behavior.
1267
+
1268
+ # `cloudpickle.Pickler.dispatch` is only left for backward
1269
+ # compatibility - note that when using protocol 5,
1270
+ # `cloudpickle.Pickler.dispatch` is not an extension of
1271
+ # `pickle._Pickler.dispatch` dictionary, because `cloudpickle.Pickler`
1272
+ # subclasses the C-implemented `pickle.Pickler`, which does not expose
1273
+ # a `dispatch` attribute. Earlier versions of `cloudpickle.Pickler`
1274
+ # used `cloudpickle.Pickler.dispatch` as a class-level attribute
1275
+ # storing all reducers implemented by cloudpickle, but the attribute
1276
+ # name was not a great choice given because it would collide with a
1277
+ # similarly named attribute in the pure-Python `pickle._Pickler`
1278
+ # implementation in the standard library.
1279
+ dispatch = dispatch_table
1280
+
1281
+ # Implementation of the reducer_override callback, in order to
1282
+ # efficiently serialize dynamic functions and classes by subclassing
1283
+ # the C-implemented `pickle.Pickler`.
1284
+ # TODO: decorrelate reducer_override (which is tied to CPython's
1285
+ # implementation - would it make sense to backport it to pypy? - and
1286
+ # pickle's protocol 5 which is implementation agnostic. Currently, the
1287
+ # availability of both notions coincide on CPython's pickle, but it may
1288
+ # not be the case anymore when pypy implements protocol 5.
1289
+
1290
+ def reducer_override(self, obj):
1291
+ """Type-agnostic reducing callback for function and classes.
1292
+
1293
+ For performance reasons, subclasses of the C `pickle.Pickler` class
1294
+ cannot register custom reducers for functions and classes in the
1295
+ dispatch_table attribute. Reducers for such types must instead
1296
+ implemented via the special `reducer_override` method.
1297
+
1298
+ Note that this method will be called for any object except a few
1299
+ builtin-types (int, lists, dicts etc.), which differs from reducers
1300
+ in the Pickler's dispatch_table, each of them being invoked for
1301
+ objects of a specific type only.
1302
+
1303
+ This property comes in handy for classes: although most classes are
1304
+ instances of the ``type`` metaclass, some of them can be instances
1305
+ of other custom metaclasses (such as enum.EnumMeta for example). In
1306
+ particular, the metaclass will likely not be known in advance, and
1307
+ thus cannot be special-cased using an entry in the dispatch_table.
1308
+ reducer_override, among other things, allows us to register a
1309
+ reducer that will be called for any class, independently of its
1310
+ type.
1311
+
1312
+ Notes:
1313
+
1314
+ * reducer_override has the priority over dispatch_table-registered
1315
+ reducers.
1316
+ * reducer_override can be used to fix other limitations of
1317
+ cloudpickle for other types that suffered from type-specific
1318
+ reducers, such as Exceptions. See
1319
+ https://github.com/cloudpipe/cloudpickle/issues/248
1320
+ """
1321
+ t = type(obj)
1322
+ try:
1323
+ is_anyclass = issubclass(t, type)
1324
+ except TypeError: # t is not a class (old Boost; see SF #502085)
1325
+ is_anyclass = False
1326
+
1327
+ if is_anyclass:
1328
+ return _class_reduce(obj)
1329
+ elif isinstance(obj, types.FunctionType):
1330
+ return self._function_reduce(obj)
1331
+ else:
1332
+ # fallback to save_global, including the Pickler's
1333
+ # dispatch_table
1334
+ return NotImplemented
1335
+
1336
+ else:
1337
+ # When reducer_override is not available, hack the pure-Python
1338
+ # Pickler's types.FunctionType and type savers. Note: the type saver
1339
+ # must override Pickler.save_global, because pickle.py contains a
1340
+ # hard-coded call to save_global when pickling meta-classes.
1341
+ dispatch = pickle.Pickler.dispatch.copy()
1342
+
1343
+ def _save_reduce_pickle5(
1344
+ self,
1345
+ func,
1346
+ args,
1347
+ state=None,
1348
+ listitems=None,
1349
+ dictitems=None,
1350
+ state_setter=None,
1351
+ obj=None,
1352
+ ):
1353
+ save = self.save
1354
+ write = self.write
1355
+ self.save_reduce(
1356
+ func,
1357
+ args,
1358
+ state=None,
1359
+ listitems=listitems,
1360
+ dictitems=dictitems,
1361
+ obj=obj,
1362
+ )
1363
+ # backport of the Python 3.8 state_setter pickle operations
1364
+ save(state_setter)
1365
+ save(obj) # simple BINGET opcode as obj is already memoized.
1366
+ save(state)
1367
+ write(pickle.TUPLE2)
1368
+ # Trigger a state_setter(obj, state) function call.
1369
+ write(pickle.REDUCE)
1370
+ # The purpose of state_setter is to carry-out an
1371
+ # inplace modification of obj. We do not care about what the
1372
+ # method might return, so its output is eventually removed from
1373
+ # the stack.
1374
+ write(pickle.POP)
1375
+
1376
+ def save_global(self, obj, name=None, pack=struct.pack):
1377
+ """Main dispatch method.
1378
+
1379
+ The name of this method is somewhat misleading: all types get
1380
+ dispatched here.
1381
+ """
1382
+ if obj is type(None): # noqa
1383
+ return self.save_reduce(type, (None,), obj=obj)
1384
+ elif obj is type(Ellipsis):
1385
+ return self.save_reduce(type, (Ellipsis,), obj=obj)
1386
+ elif obj is type(NotImplemented):
1387
+ return self.save_reduce(type, (NotImplemented,), obj=obj)
1388
+ elif obj in _BUILTIN_TYPE_NAMES:
1389
+ return self.save_reduce(
1390
+ _builtin_type, (_BUILTIN_TYPE_NAMES[obj],), obj=obj
1391
+ )
1392
+
1393
+ if name is not None:
1394
+ super().save_global(obj, name=name)
1395
+ elif not _should_pickle_by_reference(obj, name=name):
1396
+ self._save_reduce_pickle5(*_dynamic_class_reduce(obj), obj=obj)
1397
+ else:
1398
+ super().save_global(obj, name=name)
1399
+
1400
+ dispatch[type] = save_global
1401
+
1402
+ def save_function(self, obj, name=None):
1403
+ """Registered with the dispatch to handle all function types.
1404
+
1405
+ Determines what kind of function obj is (e.g. lambda, defined at
1406
+ interactive prompt, etc) and handles the pickling appropriately.
1407
+ """
1408
+ if _should_pickle_by_reference(obj, name=name):
1409
+ return super().save_global(obj, name=name)
1410
+ elif PYPY and isinstance(obj.__code__, builtin_code_type):
1411
+ return self.save_pypy_builtin_func(obj)
1412
+ else:
1413
+ return self._save_reduce_pickle5(
1414
+ *self._dynamic_function_reduce(obj), obj=obj
1415
+ )
1416
+
1417
+ def save_pypy_builtin_func(self, obj):
1418
+ """Save pypy equivalent of builtin functions.
1419
+
1420
+ PyPy does not have the concept of builtin-functions. Instead,
1421
+ builtin-functions are simple function instances, but with a
1422
+ builtin-code attribute.
1423
+ Most of the time, builtin functions should be pickled by attribute.
1424
+ But PyPy has flaky support for __qualname__, so some builtin
1425
+ functions such as float.__new__ will be classified as dynamic. For
1426
+ this reason only, we created this special routine. Because
1427
+ builtin-functions are not expected to have closure or globals,
1428
+ there is no additional hack (compared the one already implemented
1429
+ in pickle) to protect ourselves from reference cycles. A simple
1430
+ (reconstructor, newargs, obj.__dict__) tuple is save_reduced. Note
1431
+ also that PyPy improved their support for __qualname__ in v3.6, so
1432
+ this routing should be removed when cloudpickle supports only PyPy
1433
+ 3.6 and later.
1434
+ """
1435
+ rv = (
1436
+ types.FunctionType,
1437
+ (obj.__code__, {}, obj.__name__, obj.__defaults__, obj.__closure__),
1438
+ obj.__dict__,
1439
+ )
1440
+ self.save_reduce(*rv, obj=obj)
1441
+
1442
+ dispatch[types.FunctionType] = save_function
1443
+
1444
+
1445
+ # Shorthands similar to pickle.dump/pickle.dumps
1446
+
1447
+
1448
+ def dump(obj, file, protocol=None, buffer_callback=None):
1449
+ """Serialize obj as bytes streamed into file
1450
+
1451
+ protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
1452
+ pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
1453
+ speed between processes running the same Python version.
1454
+
1455
+ Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
1456
+ compatibility with older versions of Python (although this is not always
1457
+ guaranteed to work because cloudpickle relies on some internal
1458
+ implementation details that can change from one Python version to the
1459
+ next).
1460
+ """
1461
+ Pickler(file, protocol=protocol, buffer_callback=buffer_callback).dump(obj)
1462
+
1463
+
1464
+ def dumps(obj, protocol=None, buffer_callback=None):
1465
+ """Serialize obj as a string of bytes allocated in memory
1466
+
1467
+ protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
1468
+ pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
1469
+ speed between processes running the same Python version.
1470
+
1471
+ Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
1472
+ compatibility with older versions of Python (although this is not always
1473
+ guaranteed to work because cloudpickle relies on some internal
1474
+ implementation details that can change from one Python version to the
1475
+ next).
1476
+ """
1477
+ with io.BytesIO() as file:
1478
+ cp = Pickler(file, protocol=protocol, buffer_callback=buffer_callback)
1479
+ cp.dump(obj)
1480
+ return file.getvalue()
1481
+
1482
+
1483
+ # Include pickles unloading functions in this namespace for convenience.
1484
+ load, loads = pickle.load, pickle.loads
1485
+
1486
+ # Backward compat alias.
1487
+ CloudPickler = Pickler
llmeval-env/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle_fast.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Compatibility module.
2
+
3
+ It can be necessary to load files generated by previous versions of cloudpickle
4
+ that rely on symbols being defined under the `cloudpickle.cloudpickle_fast`
5
+ namespace.
6
+
7
+ See: tests/test_backward_compat.py
8
+ """
9
+ from . import cloudpickle
10
+
11
+
12
+ def __getattr__(name):
13
+ return getattr(cloudpickle, name)
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.18 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/_base.cpython-310.pyc ADDED
Binary file (749 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/cloudpickle_wrapper.cpython-310.pyc ADDED
Binary file (3.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/initializers.cpython-310.pyc ADDED
Binary file (2.39 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/process_executor.cpython-310.pyc ADDED
Binary file (33 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/reusable_executor.cpython-310.pyc ADDED
Binary file (7.51 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from multiprocessing import synchronize
3
+
4
+ from .context import get_context
5
+
6
+
7
+ def _make_name():
8
+ return f"/loky-{os.getpid()}-{next(synchronize.SemLock._rand)}"
9
+
10
+
11
+ # monkey patch the name creation for multiprocessing
12
+ synchronize.SemLock._make_name = staticmethod(_make_name)
13
+
14
+ __all__ = ["get_context"]
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (547 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_posix_reduction.cpython-310.pyc ADDED
Binary file (1.82 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_win_reduction.cpython-310.pyc ADDED
Binary file (504 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/context.cpython-310.pyc ADDED
Binary file (9.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/fork_exec.cpython-310.pyc ADDED
Binary file (1.12 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_posix.cpython-310.pyc ADDED
Binary file (5.05 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_win32.cpython-310.pyc ADDED
Binary file (4.26 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/process.cpython-310.pyc ADDED
Binary file (2.11 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/queues.cpython-310.pyc ADDED
Binary file (4.83 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/reduction.cpython-310.pyc ADDED
Binary file (4.96 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/resource_tracker.cpython-310.pyc ADDED
Binary file (7.83 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/spawn.cpython-310.pyc ADDED
Binary file (5.06 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/synchronize.cpython-310.pyc ADDED
Binary file (10.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/utils.cpython-310.pyc ADDED
Binary file (4.34 kB). View file
 
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/context.py ADDED
@@ -0,0 +1,378 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Basic context management with LokyContext
3
+ #
4
+ # author: Thomas Moreau and Olivier Grisel
5
+ #
6
+ # adapted from multiprocessing/context.py
7
+ # * Create a context ensuring loky uses only objects that are compatible
8
+ # * Add LokyContext to the list of context of multiprocessing so loky can be
9
+ # used with multiprocessing.set_start_method
10
+ # * Implement a CFS-aware amd physical-core aware cpu_count function.
11
+ #
12
+ import os
13
+ import sys
14
+ import math
15
+ import subprocess
16
+ import traceback
17
+ import warnings
18
+ import multiprocessing as mp
19
+ from multiprocessing import get_context as mp_get_context
20
+ from multiprocessing.context import BaseContext
21
+
22
+
23
+ from .process import LokyProcess, LokyInitMainProcess
24
+
25
+ # Apparently, on older Python versions, loky cannot work 61 workers on Windows
26
+ # but instead 60: ¯\_(ツ)_/¯
27
+ if sys.version_info >= (3, 8):
28
+ from concurrent.futures.process import _MAX_WINDOWS_WORKERS
29
+
30
+ if sys.version_info < (3, 10):
31
+ _MAX_WINDOWS_WORKERS = _MAX_WINDOWS_WORKERS - 1
32
+ else:
33
+ # compat for versions before 3.8 which do not define this.
34
+ _MAX_WINDOWS_WORKERS = 60
35
+
36
+ START_METHODS = ["loky", "loky_init_main", "spawn"]
37
+ if sys.platform != "win32":
38
+ START_METHODS += ["fork", "forkserver"]
39
+
40
+ _DEFAULT_START_METHOD = None
41
+
42
+ # Cache for the number of physical cores to avoid repeating subprocess calls.
43
+ # It should not change during the lifetime of the program.
44
+ physical_cores_cache = None
45
+
46
+
47
+ def get_context(method=None):
48
+ # Try to overload the default context
49
+ method = method or _DEFAULT_START_METHOD or "loky"
50
+ if method == "fork":
51
+ # If 'fork' is explicitly requested, warn user about potential issues.
52
+ warnings.warn(
53
+ "`fork` start method should not be used with "
54
+ "`loky` as it does not respect POSIX. Try using "
55
+ "`spawn` or `loky` instead.",
56
+ UserWarning,
57
+ )
58
+ try:
59
+ return mp_get_context(method)
60
+ except ValueError:
61
+ raise ValueError(
62
+ f"Unknown context '{method}'. Value should be in "
63
+ f"{START_METHODS}."
64
+ )
65
+
66
+
67
+ def set_start_method(method, force=False):
68
+ global _DEFAULT_START_METHOD
69
+ if _DEFAULT_START_METHOD is not None and not force:
70
+ raise RuntimeError("context has already been set")
71
+ assert method is None or method in START_METHODS, (
72
+ f"'{method}' is not a valid start_method. It should be in "
73
+ f"{START_METHODS}"
74
+ )
75
+
76
+ _DEFAULT_START_METHOD = method
77
+
78
+
79
+ def get_start_method():
80
+ return _DEFAULT_START_METHOD
81
+
82
+
83
+ def cpu_count(only_physical_cores=False):
84
+ """Return the number of CPUs the current process can use.
85
+
86
+ The returned number of CPUs accounts for:
87
+ * the number of CPUs in the system, as given by
88
+ ``multiprocessing.cpu_count``;
89
+ * the CPU affinity settings of the current process
90
+ (available on some Unix systems);
91
+ * Cgroup CPU bandwidth limit (available on Linux only, typically
92
+ set by docker and similar container orchestration systems);
93
+ * the value of the LOKY_MAX_CPU_COUNT environment variable if defined.
94
+ and is given as the minimum of these constraints.
95
+
96
+ If ``only_physical_cores`` is True, return the number of physical cores
97
+ instead of the number of logical cores (hyperthreading / SMT). Note that
98
+ this option is not enforced if the number of usable cores is controlled in
99
+ any other way such as: process affinity, Cgroup restricted CPU bandwidth
100
+ or the LOKY_MAX_CPU_COUNT environment variable. If the number of physical
101
+ cores is not found, return the number of logical cores.
102
+
103
+ Note that on Windows, the returned number of CPUs cannot exceed 61 (or 60 for
104
+ Python < 3.10), see:
105
+ https://bugs.python.org/issue26903.
106
+
107
+ It is also always larger or equal to 1.
108
+ """
109
+ # Note: os.cpu_count() is allowed to return None in its docstring
110
+ os_cpu_count = os.cpu_count() or 1
111
+ if sys.platform == "win32":
112
+ # On Windows, attempting to use more than 61 CPUs would result in a
113
+ # OS-level error. See https://bugs.python.org/issue26903. According to
114
+ # https://learn.microsoft.com/en-us/windows/win32/procthread/processor-groups
115
+ # it might be possible to go beyond with a lot of extra work but this
116
+ # does not look easy.
117
+ os_cpu_count = min(os_cpu_count, _MAX_WINDOWS_WORKERS)
118
+
119
+ cpu_count_user = _cpu_count_user(os_cpu_count)
120
+ aggregate_cpu_count = max(min(os_cpu_count, cpu_count_user), 1)
121
+
122
+ if not only_physical_cores:
123
+ return aggregate_cpu_count
124
+
125
+ if cpu_count_user < os_cpu_count:
126
+ # Respect user setting
127
+ return max(cpu_count_user, 1)
128
+
129
+ cpu_count_physical, exception = _count_physical_cores()
130
+ if cpu_count_physical != "not found":
131
+ return cpu_count_physical
132
+
133
+ # Fallback to default behavior
134
+ if exception is not None:
135
+ # warns only the first time
136
+ warnings.warn(
137
+ "Could not find the number of physical cores for the "
138
+ f"following reason:\n{exception}\n"
139
+ "Returning the number of logical cores instead. You can "
140
+ "silence this warning by setting LOKY_MAX_CPU_COUNT to "
141
+ "the number of cores you want to use."
142
+ )
143
+ traceback.print_tb(exception.__traceback__)
144
+
145
+ return aggregate_cpu_count
146
+
147
+
148
+ def _cpu_count_cgroup(os_cpu_count):
149
+ # Cgroup CPU bandwidth limit available in Linux since 2.6 kernel
150
+ cpu_max_fname = "/sys/fs/cgroup/cpu.max"
151
+ cfs_quota_fname = "/sys/fs/cgroup/cpu/cpu.cfs_quota_us"
152
+ cfs_period_fname = "/sys/fs/cgroup/cpu/cpu.cfs_period_us"
153
+ if os.path.exists(cpu_max_fname):
154
+ # cgroup v2
155
+ # https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html
156
+ with open(cpu_max_fname) as fh:
157
+ cpu_quota_us, cpu_period_us = fh.read().strip().split()
158
+ elif os.path.exists(cfs_quota_fname) and os.path.exists(cfs_period_fname):
159
+ # cgroup v1
160
+ # https://www.kernel.org/doc/html/latest/scheduler/sched-bwc.html#management
161
+ with open(cfs_quota_fname) as fh:
162
+ cpu_quota_us = fh.read().strip()
163
+ with open(cfs_period_fname) as fh:
164
+ cpu_period_us = fh.read().strip()
165
+ else:
166
+ # No Cgroup CPU bandwidth limit (e.g. non-Linux platform)
167
+ cpu_quota_us = "max"
168
+ cpu_period_us = 100_000 # unused, for consistency with default values
169
+
170
+ if cpu_quota_us == "max":
171
+ # No active Cgroup quota on a Cgroup-capable platform
172
+ return os_cpu_count
173
+ else:
174
+ cpu_quota_us = int(cpu_quota_us)
175
+ cpu_period_us = int(cpu_period_us)
176
+ if cpu_quota_us > 0 and cpu_period_us > 0:
177
+ return math.ceil(cpu_quota_us / cpu_period_us)
178
+ else: # pragma: no cover
179
+ # Setting a negative cpu_quota_us value is a valid way to disable
180
+ # cgroup CPU bandwith limits
181
+ return os_cpu_count
182
+
183
+
184
+ def _cpu_count_affinity(os_cpu_count):
185
+ # Number of available CPUs given affinity settings
186
+ if hasattr(os, "sched_getaffinity"):
187
+ try:
188
+ return len(os.sched_getaffinity(0))
189
+ except NotImplementedError:
190
+ pass
191
+
192
+ # On PyPy and possibly other platforms, os.sched_getaffinity does not exist
193
+ # or raises NotImplementedError, let's try with the psutil if installed.
194
+ try:
195
+ import psutil
196
+
197
+ p = psutil.Process()
198
+ if hasattr(p, "cpu_affinity"):
199
+ return len(p.cpu_affinity())
200
+
201
+ except ImportError: # pragma: no cover
202
+ if (
203
+ sys.platform == "linux"
204
+ and os.environ.get("LOKY_MAX_CPU_COUNT") is None
205
+ ):
206
+ # PyPy does not implement os.sched_getaffinity on Linux which
207
+ # can cause severe oversubscription problems. Better warn the
208
+ # user in this particularly pathological case which can wreck
209
+ # havoc, typically on CI workers.
210
+ warnings.warn(
211
+ "Failed to inspect CPU affinity constraints on this system. "
212
+ "Please install psutil or explictly set LOKY_MAX_CPU_COUNT."
213
+ )
214
+
215
+ # This can happen for platforms that do not implement any kind of CPU
216
+ # infinity such as macOS-based platforms.
217
+ return os_cpu_count
218
+
219
+
220
+ def _cpu_count_user(os_cpu_count):
221
+ """Number of user defined available CPUs"""
222
+ cpu_count_affinity = _cpu_count_affinity(os_cpu_count)
223
+
224
+ cpu_count_cgroup = _cpu_count_cgroup(os_cpu_count)
225
+
226
+ # User defined soft-limit passed as a loky specific environment variable.
227
+ cpu_count_loky = int(os.environ.get("LOKY_MAX_CPU_COUNT", os_cpu_count))
228
+
229
+ return min(cpu_count_affinity, cpu_count_cgroup, cpu_count_loky)
230
+
231
+
232
+ def _count_physical_cores():
233
+ """Return a tuple (number of physical cores, exception)
234
+
235
+ If the number of physical cores is found, exception is set to None.
236
+ If it has not been found, return ("not found", exception).
237
+
238
+ The number of physical cores is cached to avoid repeating subprocess calls.
239
+ """
240
+ exception = None
241
+
242
+ # First check if the value is cached
243
+ global physical_cores_cache
244
+ if physical_cores_cache is not None:
245
+ return physical_cores_cache, exception
246
+
247
+ # Not cached yet, find it
248
+ try:
249
+ if sys.platform == "linux":
250
+ cpu_info = subprocess.run(
251
+ "lscpu --parse=core".split(), capture_output=True, text=True
252
+ )
253
+ cpu_info = cpu_info.stdout.splitlines()
254
+ cpu_info = {line for line in cpu_info if not line.startswith("#")}
255
+ cpu_count_physical = len(cpu_info)
256
+ elif sys.platform == "win32":
257
+ cpu_info = subprocess.run(
258
+ "wmic CPU Get NumberOfCores /Format:csv".split(),
259
+ capture_output=True,
260
+ text=True,
261
+ )
262
+ cpu_info = cpu_info.stdout.splitlines()
263
+ cpu_info = [
264
+ l.split(",")[1]
265
+ for l in cpu_info
266
+ if (l and l != "Node,NumberOfCores")
267
+ ]
268
+ cpu_count_physical = sum(map(int, cpu_info))
269
+ elif sys.platform == "darwin":
270
+ cpu_info = subprocess.run(
271
+ "sysctl -n hw.physicalcpu".split(),
272
+ capture_output=True,
273
+ text=True,
274
+ )
275
+ cpu_info = cpu_info.stdout
276
+ cpu_count_physical = int(cpu_info)
277
+ else:
278
+ raise NotImplementedError(f"unsupported platform: {sys.platform}")
279
+
280
+ # if cpu_count_physical < 1, we did not find a valid value
281
+ if cpu_count_physical < 1:
282
+ raise ValueError(f"found {cpu_count_physical} physical cores < 1")
283
+
284
+ except Exception as e:
285
+ exception = e
286
+ cpu_count_physical = "not found"
287
+
288
+ # Put the result in cache
289
+ physical_cores_cache = cpu_count_physical
290
+
291
+ return cpu_count_physical, exception
292
+
293
+
294
+ class LokyContext(BaseContext):
295
+ """Context relying on the LokyProcess."""
296
+
297
+ _name = "loky"
298
+ Process = LokyProcess
299
+ cpu_count = staticmethod(cpu_count)
300
+
301
+ def Queue(self, maxsize=0, reducers=None):
302
+ """Returns a queue object"""
303
+ from .queues import Queue
304
+
305
+ return Queue(maxsize, reducers=reducers, ctx=self.get_context())
306
+
307
+ def SimpleQueue(self, reducers=None):
308
+ """Returns a queue object"""
309
+ from .queues import SimpleQueue
310
+
311
+ return SimpleQueue(reducers=reducers, ctx=self.get_context())
312
+
313
+ if sys.platform != "win32":
314
+ """For Unix platform, use our custom implementation of synchronize
315
+ ensuring that we use the loky.backend.resource_tracker to clean-up
316
+ the semaphores in case of a worker crash.
317
+ """
318
+
319
+ def Semaphore(self, value=1):
320
+ """Returns a semaphore object"""
321
+ from .synchronize import Semaphore
322
+
323
+ return Semaphore(value=value)
324
+
325
+ def BoundedSemaphore(self, value):
326
+ """Returns a bounded semaphore object"""
327
+ from .synchronize import BoundedSemaphore
328
+
329
+ return BoundedSemaphore(value)
330
+
331
+ def Lock(self):
332
+ """Returns a lock object"""
333
+ from .synchronize import Lock
334
+
335
+ return Lock()
336
+
337
+ def RLock(self):
338
+ """Returns a recurrent lock object"""
339
+ from .synchronize import RLock
340
+
341
+ return RLock()
342
+
343
+ def Condition(self, lock=None):
344
+ """Returns a condition object"""
345
+ from .synchronize import Condition
346
+
347
+ return Condition(lock)
348
+
349
+ def Event(self):
350
+ """Returns an event object"""
351
+ from .synchronize import Event
352
+
353
+ return Event()
354
+
355
+
356
+ class LokyInitMainContext(LokyContext):
357
+ """Extra context with LokyProcess, which does load the main module
358
+
359
+ This context is used for compatibility in the case ``cloudpickle`` is not
360
+ present on the running system. This permits to load functions defined in
361
+ the ``main`` module, using proper safeguards. The declaration of the
362
+ ``executor`` should be protected by ``if __name__ == "__main__":`` and the
363
+ functions and variable used from main should be out of this block.
364
+
365
+ This mimics the default behavior of multiprocessing under Windows and the
366
+ behavior of the ``spawn`` start method on a posix system.
367
+ For more details, see the end of the following section of python doc
368
+ https://docs.python.org/3/library/multiprocessing.html#multiprocessing-programming
369
+ """
370
+
371
+ _name = "loky_init_main"
372
+ Process = LokyInitMainProcess
373
+
374
+
375
+ # Register loky context so it works with multiprocessing.get_context
376
+ ctx_loky = LokyContext()
377
+ mp.context._concrete_contexts["loky"] = ctx_loky
378
+ mp.context._concrete_contexts["loky_init_main"] = LokyInitMainContext()
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/popen_loky_win32.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import msvcrt
4
+ import _winapi
5
+ from pickle import load
6
+ from multiprocessing import process, util
7
+ from multiprocessing.context import set_spawning_popen
8
+ from multiprocessing.popen_spawn_win32 import Popen as _Popen
9
+
10
+ from . import reduction, spawn
11
+
12
+
13
+ __all__ = ["Popen"]
14
+
15
+ #
16
+ #
17
+ #
18
+
19
+
20
+ def _path_eq(p1, p2):
21
+ return p1 == p2 or os.path.normcase(p1) == os.path.normcase(p2)
22
+
23
+
24
+ WINENV = hasattr(sys, "_base_executable") and not _path_eq(
25
+ sys.executable, sys._base_executable
26
+ )
27
+
28
+
29
+ def _close_handles(*handles):
30
+ for handle in handles:
31
+ _winapi.CloseHandle(handle)
32
+
33
+
34
+ #
35
+ # We define a Popen class similar to the one from subprocess, but
36
+ # whose constructor takes a process object as its argument.
37
+ #
38
+
39
+
40
+ class Popen(_Popen):
41
+ """
42
+ Start a subprocess to run the code of a process object.
43
+
44
+ We differ from cpython implementation with the way we handle environment
45
+ variables, in order to be able to modify then in the child processes before
46
+ importing any library, in order to control the number of threads in C-level
47
+ threadpools.
48
+
49
+ We also use the loky preparation data, in particular to handle main_module
50
+ inits and the loky resource tracker.
51
+ """
52
+
53
+ method = "loky"
54
+
55
+ def __init__(self, process_obj):
56
+ prep_data = spawn.get_preparation_data(
57
+ process_obj._name, getattr(process_obj, "init_main_module", True)
58
+ )
59
+
60
+ # read end of pipe will be duplicated by the child process
61
+ # -- see spawn_main() in spawn.py.
62
+ #
63
+ # bpo-33929: Previously, the read end of pipe was "stolen" by the child
64
+ # process, but it leaked a handle if the child process had been
65
+ # terminated before it could steal the handle from the parent process.
66
+ rhandle, whandle = _winapi.CreatePipe(None, 0)
67
+ wfd = msvcrt.open_osfhandle(whandle, 0)
68
+ cmd = get_command_line(parent_pid=os.getpid(), pipe_handle=rhandle)
69
+
70
+ python_exe = spawn.get_executable()
71
+
72
+ # copy the environment variables to set in the child process
73
+ child_env = {**os.environ, **process_obj.env}
74
+
75
+ # bpo-35797: When running in a venv, we bypass the redirect
76
+ # executor and launch our base Python.
77
+ if WINENV and _path_eq(python_exe, sys.executable):
78
+ cmd[0] = python_exe = sys._base_executable
79
+ child_env["__PYVENV_LAUNCHER__"] = sys.executable
80
+
81
+ cmd = " ".join(f'"{x}"' for x in cmd)
82
+
83
+ with open(wfd, "wb") as to_child:
84
+ # start process
85
+ try:
86
+ hp, ht, pid, _ = _winapi.CreateProcess(
87
+ python_exe,
88
+ cmd,
89
+ None,
90
+ None,
91
+ False,
92
+ 0,
93
+ child_env,
94
+ None,
95
+ None,
96
+ )
97
+ _winapi.CloseHandle(ht)
98
+ except BaseException:
99
+ _winapi.CloseHandle(rhandle)
100
+ raise
101
+
102
+ # set attributes of self
103
+ self.pid = pid
104
+ self.returncode = None
105
+ self._handle = hp
106
+ self.sentinel = int(hp)
107
+ self.finalizer = util.Finalize(
108
+ self, _close_handles, (self.sentinel, int(rhandle))
109
+ )
110
+
111
+ # send information to child
112
+ set_spawning_popen(self)
113
+ try:
114
+ reduction.dump(prep_data, to_child)
115
+ reduction.dump(process_obj, to_child)
116
+ finally:
117
+ set_spawning_popen(None)
118
+
119
+
120
+ def get_command_line(pipe_handle, parent_pid, **kwds):
121
+ """Returns prefix of command line used for spawning a child process."""
122
+ if getattr(sys, "frozen", False):
123
+ return [sys.executable, "--multiprocessing-fork", pipe_handle]
124
+ else:
125
+ prog = (
126
+ "from joblib.externals.loky.backend.popen_loky_win32 import main; "
127
+ f"main(pipe_handle={pipe_handle}, parent_pid={parent_pid})"
128
+ )
129
+ opts = util._args_from_interpreter_flags()
130
+ return [
131
+ spawn.get_executable(),
132
+ *opts,
133
+ "-c",
134
+ prog,
135
+ "--multiprocessing-fork",
136
+ ]
137
+
138
+
139
+ def is_forking(argv):
140
+ """Return whether commandline indicates we are forking."""
141
+ if len(argv) >= 2 and argv[1] == "--multiprocessing-fork":
142
+ return True
143
+ else:
144
+ return False
145
+
146
+
147
+ def main(pipe_handle, parent_pid=None):
148
+ """Run code specified by data received over pipe."""
149
+ assert is_forking(sys.argv), "Not forking"
150
+
151
+ if parent_pid is not None:
152
+ source_process = _winapi.OpenProcess(
153
+ _winapi.SYNCHRONIZE | _winapi.PROCESS_DUP_HANDLE, False, parent_pid
154
+ )
155
+ else:
156
+ source_process = None
157
+ new_handle = reduction.duplicate(
158
+ pipe_handle, source_process=source_process
159
+ )
160
+ fd = msvcrt.open_osfhandle(new_handle, os.O_RDONLY)
161
+ parent_sentinel = source_process
162
+
163
+ with os.fdopen(fd, "rb", closefd=True) as from_parent:
164
+ process.current_process()._inheriting = True
165
+ try:
166
+ preparation_data = load(from_parent)
167
+ spawn.prepare(preparation_data, parent_sentinel)
168
+ self = load(from_parent)
169
+ finally:
170
+ del process.current_process()._inheriting
171
+
172
+ exitcode = self._bootstrap(parent_sentinel)
173
+ sys.exit(exitcode)
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/queues.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Queue and SimpleQueue implementation for loky
3
+ #
4
+ # authors: Thomas Moreau, Olivier Grisel
5
+ #
6
+ # based on multiprocessing/queues.py (16/02/2017)
7
+ # * Add some custom reducers for the Queues/SimpleQueue to tweak the
8
+ # pickling process. (overload Queue._feed/SimpleQueue.put)
9
+ #
10
+ import os
11
+ import sys
12
+ import errno
13
+ import weakref
14
+ import threading
15
+ from multiprocessing import util
16
+ from multiprocessing.queues import (
17
+ Full,
18
+ Queue as mp_Queue,
19
+ SimpleQueue as mp_SimpleQueue,
20
+ _sentinel,
21
+ )
22
+ from multiprocessing.context import assert_spawning
23
+
24
+ from .reduction import dumps
25
+
26
+
27
+ __all__ = ["Queue", "SimpleQueue", "Full"]
28
+
29
+
30
+ class Queue(mp_Queue):
31
+ def __init__(self, maxsize=0, reducers=None, ctx=None):
32
+ super().__init__(maxsize=maxsize, ctx=ctx)
33
+ self._reducers = reducers
34
+
35
+ # Use custom queue set/get state to be able to reduce the custom reducers
36
+ def __getstate__(self):
37
+ assert_spawning(self)
38
+ return (
39
+ self._ignore_epipe,
40
+ self._maxsize,
41
+ self._reader,
42
+ self._writer,
43
+ self._reducers,
44
+ self._rlock,
45
+ self._wlock,
46
+ self._sem,
47
+ self._opid,
48
+ )
49
+
50
+ def __setstate__(self, state):
51
+ (
52
+ self._ignore_epipe,
53
+ self._maxsize,
54
+ self._reader,
55
+ self._writer,
56
+ self._reducers,
57
+ self._rlock,
58
+ self._wlock,
59
+ self._sem,
60
+ self._opid,
61
+ ) = state
62
+ if sys.version_info >= (3, 9):
63
+ self._reset()
64
+ else:
65
+ self._after_fork()
66
+
67
+ # Overload _start_thread to correctly call our custom _feed
68
+ def _start_thread(self):
69
+ util.debug("Queue._start_thread()")
70
+
71
+ # Start thread which transfers data from buffer to pipe
72
+ self._buffer.clear()
73
+ self._thread = threading.Thread(
74
+ target=Queue._feed,
75
+ args=(
76
+ self._buffer,
77
+ self._notempty,
78
+ self._send_bytes,
79
+ self._wlock,
80
+ self._writer.close,
81
+ self._reducers,
82
+ self._ignore_epipe,
83
+ self._on_queue_feeder_error,
84
+ self._sem,
85
+ ),
86
+ name="QueueFeederThread",
87
+ )
88
+ self._thread.daemon = True
89
+
90
+ util.debug("doing self._thread.start()")
91
+ self._thread.start()
92
+ util.debug("... done self._thread.start()")
93
+
94
+ # On process exit we will wait for data to be flushed to pipe.
95
+ #
96
+ # However, if this process created the queue then all
97
+ # processes which use the queue will be descendants of this
98
+ # process. Therefore waiting for the queue to be flushed
99
+ # is pointless once all the child processes have been joined.
100
+ created_by_this_process = self._opid == os.getpid()
101
+ if not self._joincancelled and not created_by_this_process:
102
+ self._jointhread = util.Finalize(
103
+ self._thread,
104
+ Queue._finalize_join,
105
+ [weakref.ref(self._thread)],
106
+ exitpriority=-5,
107
+ )
108
+
109
+ # Send sentinel to the thread queue object when garbage collected
110
+ self._close = util.Finalize(
111
+ self,
112
+ Queue._finalize_close,
113
+ [self._buffer, self._notempty],
114
+ exitpriority=10,
115
+ )
116
+
117
+ # Overload the _feed methods to use our custom pickling strategy.
118
+ @staticmethod
119
+ def _feed(
120
+ buffer,
121
+ notempty,
122
+ send_bytes,
123
+ writelock,
124
+ close,
125
+ reducers,
126
+ ignore_epipe,
127
+ onerror,
128
+ queue_sem,
129
+ ):
130
+ util.debug("starting thread to feed data to pipe")
131
+ nacquire = notempty.acquire
132
+ nrelease = notempty.release
133
+ nwait = notempty.wait
134
+ bpopleft = buffer.popleft
135
+ sentinel = _sentinel
136
+ if sys.platform != "win32":
137
+ wacquire = writelock.acquire
138
+ wrelease = writelock.release
139
+ else:
140
+ wacquire = None
141
+
142
+ while True:
143
+ try:
144
+ nacquire()
145
+ try:
146
+ if not buffer:
147
+ nwait()
148
+ finally:
149
+ nrelease()
150
+ try:
151
+ while True:
152
+ obj = bpopleft()
153
+ if obj is sentinel:
154
+ util.debug("feeder thread got sentinel -- exiting")
155
+ close()
156
+ return
157
+
158
+ # serialize the data before acquiring the lock
159
+ obj_ = dumps(obj, reducers=reducers)
160
+ if wacquire is None:
161
+ send_bytes(obj_)
162
+ else:
163
+ wacquire()
164
+ try:
165
+ send_bytes(obj_)
166
+ finally:
167
+ wrelease()
168
+ # Remove references early to avoid leaking memory
169
+ del obj, obj_
170
+ except IndexError:
171
+ pass
172
+ except BaseException as e:
173
+ if ignore_epipe and getattr(e, "errno", 0) == errno.EPIPE:
174
+ return
175
+ # Since this runs in a daemon thread the resources it uses
176
+ # may be become unusable while the process is cleaning up.
177
+ # We ignore errors which happen after the process has
178
+ # started to cleanup.
179
+ if util.is_exiting():
180
+ util.info(f"error in queue thread: {e}")
181
+ return
182
+ else:
183
+ queue_sem.release()
184
+ onerror(e, obj)
185
+
186
+ def _on_queue_feeder_error(self, e, obj):
187
+ """
188
+ Private API hook called when feeding data in the background thread
189
+ raises an exception. For overriding by concurrent.futures.
190
+ """
191
+ import traceback
192
+
193
+ traceback.print_exc()
194
+
195
+
196
+ class SimpleQueue(mp_SimpleQueue):
197
+ def __init__(self, reducers=None, ctx=None):
198
+ super().__init__(ctx=ctx)
199
+
200
+ # Add possiblity to use custom reducers
201
+ self._reducers = reducers
202
+
203
+ def close(self):
204
+ self._reader.close()
205
+ self._writer.close()
206
+
207
+ # Use custom queue set/get state to be able to reduce the custom reducers
208
+ def __getstate__(self):
209
+ assert_spawning(self)
210
+ return (
211
+ self._reader,
212
+ self._writer,
213
+ self._reducers,
214
+ self._rlock,
215
+ self._wlock,
216
+ )
217
+
218
+ def __setstate__(self, state):
219
+ (
220
+ self._reader,
221
+ self._writer,
222
+ self._reducers,
223
+ self._rlock,
224
+ self._wlock,
225
+ ) = state
226
+
227
+ # Overload put to use our customizable reducer
228
+ def put(self, obj):
229
+ # serialize the data before acquiring the lock
230
+ obj = dumps(obj, reducers=self._reducers)
231
+ if self._wlock is None:
232
+ # writes to a message oriented win32 pipe are atomic
233
+ self._writer.send_bytes(obj)
234
+ else:
235
+ with self._wlock:
236
+ self._writer.send_bytes(obj)
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/reduction.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Customizable Pickler with some basic reducers
3
+ #
4
+ # author: Thomas Moreau
5
+ #
6
+ # adapted from multiprocessing/reduction.py (17/02/2017)
7
+ # * Replace the ForkingPickler with a similar _LokyPickler,
8
+ # * Add CustomizableLokyPickler to allow customizing pickling process
9
+ # on the fly.
10
+ #
11
+ import copyreg
12
+ import io
13
+ import functools
14
+ import types
15
+ import sys
16
+ import os
17
+
18
+ from multiprocessing import util
19
+ from pickle import loads, HIGHEST_PROTOCOL
20
+
21
+ ###############################################################################
22
+ # Enable custom pickling in Loky.
23
+
24
+ _dispatch_table = {}
25
+
26
+
27
+ def register(type_, reduce_function):
28
+ _dispatch_table[type_] = reduce_function
29
+
30
+
31
+ ###############################################################################
32
+ # Registers extra pickling routines to improve picklization for loky
33
+
34
+
35
+ # make methods picklable
36
+ def _reduce_method(m):
37
+ if m.__self__ is None:
38
+ return getattr, (m.__class__, m.__func__.__name__)
39
+ else:
40
+ return getattr, (m.__self__, m.__func__.__name__)
41
+
42
+
43
+ class _C:
44
+ def f(self):
45
+ pass
46
+
47
+ @classmethod
48
+ def h(cls):
49
+ pass
50
+
51
+
52
+ register(type(_C().f), _reduce_method)
53
+ register(type(_C.h), _reduce_method)
54
+
55
+
56
+ if not hasattr(sys, "pypy_version_info"):
57
+ # PyPy uses functions instead of method_descriptors and wrapper_descriptors
58
+ def _reduce_method_descriptor(m):
59
+ return getattr, (m.__objclass__, m.__name__)
60
+
61
+ register(type(list.append), _reduce_method_descriptor)
62
+ register(type(int.__add__), _reduce_method_descriptor)
63
+
64
+
65
+ # Make partial func pickable
66
+ def _reduce_partial(p):
67
+ return _rebuild_partial, (p.func, p.args, p.keywords or {})
68
+
69
+
70
+ def _rebuild_partial(func, args, keywords):
71
+ return functools.partial(func, *args, **keywords)
72
+
73
+
74
+ register(functools.partial, _reduce_partial)
75
+
76
+ if sys.platform != "win32":
77
+ from ._posix_reduction import _mk_inheritable # noqa: F401
78
+ else:
79
+ from . import _win_reduction # noqa: F401
80
+
81
+ # global variable to change the pickler behavior
82
+ try:
83
+ from joblib.externals import cloudpickle # noqa: F401
84
+
85
+ DEFAULT_ENV = "cloudpickle"
86
+ except ImportError:
87
+ # If cloudpickle is not present, fallback to pickle
88
+ DEFAULT_ENV = "pickle"
89
+
90
+ ENV_LOKY_PICKLER = os.environ.get("LOKY_PICKLER", DEFAULT_ENV)
91
+ _LokyPickler = None
92
+ _loky_pickler_name = None
93
+
94
+
95
+ def set_loky_pickler(loky_pickler=None):
96
+ global _LokyPickler, _loky_pickler_name
97
+
98
+ if loky_pickler is None:
99
+ loky_pickler = ENV_LOKY_PICKLER
100
+
101
+ loky_pickler_cls = None
102
+
103
+ # The default loky_pickler is cloudpickle
104
+ if loky_pickler in ["", None]:
105
+ loky_pickler = "cloudpickle"
106
+
107
+ if loky_pickler == _loky_pickler_name:
108
+ return
109
+
110
+ if loky_pickler == "cloudpickle":
111
+ from joblib.externals.cloudpickle import CloudPickler as loky_pickler_cls
112
+ else:
113
+ try:
114
+ from importlib import import_module
115
+
116
+ module_pickle = import_module(loky_pickler)
117
+ loky_pickler_cls = module_pickle.Pickler
118
+ except (ImportError, AttributeError) as e:
119
+ extra_info = (
120
+ "\nThis error occurred while setting loky_pickler to"
121
+ f" '{loky_pickler}', as required by the env variable "
122
+ "LOKY_PICKLER or the function set_loky_pickler."
123
+ )
124
+ e.args = (e.args[0] + extra_info,) + e.args[1:]
125
+ e.msg = e.args[0]
126
+ raise e
127
+
128
+ util.debug(
129
+ f"Using '{loky_pickler if loky_pickler else 'cloudpickle'}' for "
130
+ "serialization."
131
+ )
132
+
133
+ class CustomizablePickler(loky_pickler_cls):
134
+ _loky_pickler_cls = loky_pickler_cls
135
+
136
+ def _set_dispatch_table(self, dispatch_table):
137
+ for ancestor_class in self._loky_pickler_cls.mro():
138
+ dt_attribute = getattr(ancestor_class, "dispatch_table", None)
139
+ if isinstance(dt_attribute, types.MemberDescriptorType):
140
+ # Ancestor class (typically _pickle.Pickler) has a
141
+ # member_descriptor for its "dispatch_table" attribute. Use
142
+ # it to set the dispatch_table as a member instead of a
143
+ # dynamic attribute in the __dict__ of the instance,
144
+ # otherwise it will not be taken into account by the C
145
+ # implementation of the dump method if a subclass defines a
146
+ # class-level dispatch_table attribute as was done in
147
+ # cloudpickle 1.6.0:
148
+ # https://github.com/joblib/loky/pull/260
149
+ dt_attribute.__set__(self, dispatch_table)
150
+ break
151
+
152
+ # On top of member descriptor set, also use setattr such that code
153
+ # that directly access self.dispatch_table gets a consistent view
154
+ # of the same table.
155
+ self.dispatch_table = dispatch_table
156
+
157
+ def __init__(self, writer, reducers=None, protocol=HIGHEST_PROTOCOL):
158
+ loky_pickler_cls.__init__(self, writer, protocol=protocol)
159
+ if reducers is None:
160
+ reducers = {}
161
+
162
+ if hasattr(self, "dispatch_table"):
163
+ # Force a copy that we will update without mutating the
164
+ # any class level defined dispatch_table.
165
+ loky_dt = dict(self.dispatch_table)
166
+ else:
167
+ # Use standard reducers as bases
168
+ loky_dt = copyreg.dispatch_table.copy()
169
+
170
+ # Register loky specific reducers
171
+ loky_dt.update(_dispatch_table)
172
+
173
+ # Set the new dispatch table, taking care of the fact that we
174
+ # need to use the member_descriptor when we inherit from a
175
+ # subclass of the C implementation of the Pickler base class
176
+ # with an class level dispatch_table attribute.
177
+ self._set_dispatch_table(loky_dt)
178
+
179
+ # Register the reducers
180
+ for type, reduce_func in reducers.items():
181
+ self.register(type, reduce_func)
182
+
183
+ def register(self, type, reduce_func):
184
+ """Attach a reducer function to a given type in the dispatch table."""
185
+ self.dispatch_table[type] = reduce_func
186
+
187
+ _LokyPickler = CustomizablePickler
188
+ _loky_pickler_name = loky_pickler
189
+
190
+
191
+ def get_loky_pickler_name():
192
+ global _loky_pickler_name
193
+ return _loky_pickler_name
194
+
195
+
196
+ def get_loky_pickler():
197
+ global _LokyPickler
198
+ return _LokyPickler
199
+
200
+
201
+ # Set it to its default value
202
+ set_loky_pickler()
203
+
204
+
205
+ def dump(obj, file, reducers=None, protocol=None):
206
+ """Replacement for pickle.dump() using _LokyPickler."""
207
+ global _LokyPickler
208
+ _LokyPickler(file, reducers=reducers, protocol=protocol).dump(obj)
209
+
210
+
211
+ def dumps(obj, reducers=None, protocol=None):
212
+ global _LokyPickler
213
+
214
+ buf = io.BytesIO()
215
+ dump(obj, buf, reducers=reducers, protocol=protocol)
216
+ return buf.getbuffer()
217
+
218
+
219
+ __all__ = ["dump", "dumps", "loads", "register", "set_loky_pickler"]
220
+
221
+ if sys.platform == "win32":
222
+ from multiprocessing.reduction import duplicate
223
+
224
+ __all__ += ["duplicate"]
llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/utils.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import time
4
+ import errno
5
+ import signal
6
+ import warnings
7
+ import subprocess
8
+ import traceback
9
+
10
+ try:
11
+ import psutil
12
+ except ImportError:
13
+ psutil = None
14
+
15
+
16
+ def kill_process_tree(process, use_psutil=True):
17
+ """Terminate process and its descendants with SIGKILL"""
18
+ if use_psutil and psutil is not None:
19
+ _kill_process_tree_with_psutil(process)
20
+ else:
21
+ _kill_process_tree_without_psutil(process)
22
+
23
+
24
+ def recursive_terminate(process, use_psutil=True):
25
+ warnings.warn(
26
+ "recursive_terminate is deprecated in loky 3.2, use kill_process_tree"
27
+ "instead",
28
+ DeprecationWarning,
29
+ )
30
+ kill_process_tree(process, use_psutil=use_psutil)
31
+
32
+
33
+ def _kill_process_tree_with_psutil(process):
34
+ try:
35
+ descendants = psutil.Process(process.pid).children(recursive=True)
36
+ except psutil.NoSuchProcess:
37
+ return
38
+
39
+ # Kill the descendants in reverse order to avoid killing the parents before
40
+ # the descendant in cases where there are more processes nested.
41
+ for descendant in descendants[::-1]:
42
+ try:
43
+ descendant.kill()
44
+ except psutil.NoSuchProcess:
45
+ pass
46
+
47
+ try:
48
+ psutil.Process(process.pid).kill()
49
+ except psutil.NoSuchProcess:
50
+ pass
51
+ process.join()
52
+
53
+
54
+ def _kill_process_tree_without_psutil(process):
55
+ """Terminate a process and its descendants."""
56
+ try:
57
+ if sys.platform == "win32":
58
+ _windows_taskkill_process_tree(process.pid)
59
+ else:
60
+ _posix_recursive_kill(process.pid)
61
+ except Exception: # pragma: no cover
62
+ details = traceback.format_exc()
63
+ warnings.warn(
64
+ "Failed to kill subprocesses on this platform. Please install"
65
+ "psutil: https://github.com/giampaolo/psutil\n"
66
+ f"Details:\n{details}"
67
+ )
68
+ # In case we cannot introspect or kill the descendants, we fall back to
69
+ # only killing the main process.
70
+ #
71
+ # Note: on Windows, process.kill() is an alias for process.terminate()
72
+ # which in turns calls the Win32 API function TerminateProcess().
73
+ process.kill()
74
+ process.join()
75
+
76
+
77
+ def _windows_taskkill_process_tree(pid):
78
+ # On windows, the taskkill function with option `/T` terminate a given
79
+ # process pid and its children.
80
+ try:
81
+ subprocess.check_output(
82
+ ["taskkill", "/F", "/T", "/PID", str(pid)], stderr=None
83
+ )
84
+ except subprocess.CalledProcessError as e:
85
+ # In Windows, taskkill returns 128, 255 for no process found.
86
+ if e.returncode not in [128, 255]:
87
+ # Let's raise to let the caller log the error details in a
88
+ # warning and only kill the root process.
89
+ raise # pragma: no cover
90
+
91
+
92
+ def _kill(pid):
93
+ # Not all systems (e.g. Windows) have a SIGKILL, but the C specification
94
+ # mandates a SIGTERM signal. While Windows is handled specifically above,
95
+ # let's try to be safe for other hypothetic platforms that only have
96
+ # SIGTERM without SIGKILL.
97
+ kill_signal = getattr(signal, "SIGKILL", signal.SIGTERM)
98
+ try:
99
+ os.kill(pid, kill_signal)
100
+ except OSError as e:
101
+ # if OSError is raised with [Errno 3] no such process, the process
102
+ # is already terminated, else, raise the error and let the top
103
+ # level function raise a warning and retry to kill the process.
104
+ if e.errno != errno.ESRCH:
105
+ raise # pragma: no cover
106
+
107
+
108
+ def _posix_recursive_kill(pid):
109
+ """Recursively kill the descendants of a process before killing it."""
110
+ try:
111
+ children_pids = subprocess.check_output(
112
+ ["pgrep", "-P", str(pid)], stderr=None, text=True
113
+ )
114
+ except subprocess.CalledProcessError as e:
115
+ # `ps` returns 1 when no child process has been found
116
+ if e.returncode == 1:
117
+ children_pids = ""
118
+ else:
119
+ raise # pragma: no cover
120
+
121
+ # Decode the result, split the cpid and remove the trailing line
122
+ for cpid in children_pids.splitlines():
123
+ cpid = int(cpid)
124
+ _posix_recursive_kill(cpid)
125
+
126
+ _kill(pid)
127
+
128
+
129
+ def get_exitcodes_terminated_worker(processes):
130
+ """Return a formatted string with the exitcodes of terminated workers.
131
+
132
+ If necessary, wait (up to .25s) for the system to correctly set the
133
+ exitcode of one terminated worker.
134
+ """
135
+ patience = 5
136
+
137
+ # Catch the exitcode of the terminated workers. There should at least be
138
+ # one. If not, wait a bit for the system to correctly set the exitcode of
139
+ # the terminated worker.
140
+ exitcodes = [
141
+ p.exitcode for p in list(processes.values()) if p.exitcode is not None
142
+ ]
143
+ while not exitcodes and patience > 0:
144
+ patience -= 1
145
+ exitcodes = [
146
+ p.exitcode
147
+ for p in list(processes.values())
148
+ if p.exitcode is not None
149
+ ]
150
+ time.sleep(0.05)
151
+
152
+ return _format_exitcodes(exitcodes)
153
+
154
+
155
+ def _format_exitcodes(exitcodes):
156
+ """Format a list of exit code with names of the signals if possible"""
157
+ str_exitcodes = [
158
+ f"{_get_exitcode_name(e)}({e})" for e in exitcodes if e is not None
159
+ ]
160
+ return "{" + ", ".join(str_exitcodes) + "}"
161
+
162
+
163
+ def _get_exitcode_name(exitcode):
164
+ if sys.platform == "win32":
165
+ # The exitcode are unreliable on windows (see bpo-31863).
166
+ # For this case, return UNKNOWN
167
+ return "UNKNOWN"
168
+
169
+ if exitcode < 0:
170
+ try:
171
+ import signal
172
+
173
+ return signal.Signals(-exitcode).name
174
+ except ValueError:
175
+ return "UNKNOWN"
176
+ elif exitcode != 255:
177
+ # The exitcode are unreliable on forkserver were 255 is always returned
178
+ # (see bpo-30589). For this case, return UNKNOWN
179
+ return "EXIT"
180
+
181
+ return "UNKNOWN"