applied-ai-018 commited on
Commit
aa5079b
·
verified ·
1 Parent(s): b49cbf3

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step80/zero/15.attention.dense.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step80/zero/15.attention.dense.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step80/zero/15.attention.dense.weight/fp32.pt +3 -0
  4. ckpts/universal/global_step80/zero/21.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  5. ckpts/universal/global_step80/zero/21.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  6. ckpts/universal/global_step80/zero/23.input_layernorm.weight/exp_avg.pt +3 -0
  7. ckpts/universal/global_step80/zero/23.input_layernorm.weight/exp_avg_sq.pt +3 -0
  8. ckpts/universal/global_step80/zero/23.input_layernorm.weight/fp32.pt +3 -0
  9. ckpts/universal/global_step80/zero/7.input_layernorm.weight/exp_avg_sq.pt +3 -0
  10. venv/lib/python3.10/site-packages/absl_py-2.1.0.dist-info/AUTHORS +7 -0
  11. venv/lib/python3.10/site-packages/absl_py-2.1.0.dist-info/INSTALLER +1 -0
  12. venv/lib/python3.10/site-packages/absl_py-2.1.0.dist-info/LICENSE +202 -0
  13. venv/lib/python3.10/site-packages/absl_py-2.1.0.dist-info/METADATA +84 -0
  14. venv/lib/python3.10/site-packages/absl_py-2.1.0.dist-info/RECORD +53 -0
  15. venv/lib/python3.10/site-packages/absl_py-2.1.0.dist-info/WHEEL +5 -0
  16. venv/lib/python3.10/site-packages/absl_py-2.1.0.dist-info/top_level.txt +1 -0
  17. venv/lib/python3.10/site-packages/charset_normalizer/__init__.py +46 -0
  18. venv/lib/python3.10/site-packages/charset_normalizer/__main__.py +4 -0
  19. venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/api.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/cd.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/constant.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/legacy.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/utils.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/version.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/charset_normalizer/cd.py +395 -0
  26. venv/lib/python3.10/site-packages/charset_normalizer/constant.py +1995 -0
  27. venv/lib/python3.10/site-packages/charset_normalizer/legacy.py +54 -0
  28. venv/lib/python3.10/site-packages/charset_normalizer/md.cpython-310-x86_64-linux-gnu.so +0 -0
  29. venv/lib/python3.10/site-packages/charset_normalizer/md__mypyc.cpython-310-x86_64-linux-gnu.so +0 -0
  30. venv/lib/python3.10/site-packages/charset_normalizer/models.py +340 -0
  31. venv/lib/python3.10/site-packages/charset_normalizer/py.typed +0 -0
  32. venv/lib/python3.10/site-packages/charset_normalizer/utils.py +421 -0
  33. venv/lib/python3.10/site-packages/charset_normalizer/version.py +6 -0
  34. venv/lib/python3.10/site-packages/functorch/__pycache__/__init__.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/functorch/_src/__init__.py +0 -0
  36. venv/lib/python3.10/site-packages/functorch/_src/__pycache__/__init__.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/functorch/_src/aot_autograd/__init__.py +8 -0
  38. venv/lib/python3.10/site-packages/functorch/_src/aot_autograd/__pycache__/__init__.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/functorch/_src/eager_transforms/__init__.py +7 -0
  40. venv/lib/python3.10/site-packages/functorch/_src/eager_transforms/__pycache__/__init__.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/functorch/_src/make_functional/__init__.py +4 -0
  42. venv/lib/python3.10/site-packages/functorch/_src/make_functional/__pycache__/__init__.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/functorch/_src/vmap/__init__.py +16 -0
  44. venv/lib/python3.10/site-packages/functorch/_src/vmap/__pycache__/__init__.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/functorch/compile/__pycache__/__init__.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/functorch/dim/dim.py +121 -0
  47. venv/lib/python3.10/site-packages/functorch/dim/reference.py +645 -0
  48. venv/lib/python3.10/site-packages/functorch/dim/tree_map.py +14 -0
  49. venv/lib/python3.10/site-packages/functorch/dim/wrap_type.py +71 -0
  50. venv/lib/python3.10/site-packages/functorch/einops/__init__.py +3 -0
ckpts/universal/global_step80/zero/15.attention.dense.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:295e96abb0750e08ad9feb17957890caa0d5090b123c71f5d83f046c6de803bb
3
+ size 16778396
ckpts/universal/global_step80/zero/15.attention.dense.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdec04eb7521f1b50f6b5145ce487080b890a4af3ad39ba0459c87de0ff19216
3
+ size 16778411
ckpts/universal/global_step80/zero/15.attention.dense.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ff29c27e5f7903b0c52006242cc555e2eed04ca2e788dd9fa6585e67d4254e2
3
+ size 16778317
ckpts/universal/global_step80/zero/21.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f57bce0ea9e701b12fde6c27a5da4ad1dfcec5b38f992cf2665546dfd9effb16
3
+ size 33555612
ckpts/universal/global_step80/zero/21.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a16f0a448f66f0f5ec1839a952b84f941bbb961604d1cfc93eb451524cb8c2d1
3
+ size 33555533
ckpts/universal/global_step80/zero/23.input_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b531c7a4638dd059c82b3fb29677d0123d27afc407ce2cbe87f192a01154f90
3
+ size 9372
ckpts/universal/global_step80/zero/23.input_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84f9a2a0c156213eea3acedb32d535e03a0a8d9c495a4d1175b20118f8bbc0e5
3
+ size 9387
ckpts/universal/global_step80/zero/23.input_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6009135d22ebe426f0006466e3f0366f6857b8c3b2c192fe2f4b3f3253aed54
3
+ size 9293
ckpts/universal/global_step80/zero/7.input_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8698db993558c6aff1684e62e860af67cb99f4918bb40694af9023ff58086d6d
3
+ size 9387
venv/lib/python3.10/site-packages/absl_py-2.1.0.dist-info/AUTHORS ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # This is the list of Abseil authors for copyright purposes.
2
+ #
3
+ # This does not necessarily list everyone who has contributed code, since in
4
+ # some cases, their employer may be the copyright holder. To see the full list
5
+ # of contributors, see the revision history in source control.
6
+
7
+ Google Inc.
venv/lib/python3.10/site-packages/absl_py-2.1.0.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
venv/lib/python3.10/site-packages/absl_py-2.1.0.dist-info/LICENSE ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ Apache License
3
+ Version 2.0, January 2004
4
+ http://www.apache.org/licenses/
5
+
6
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7
+
8
+ 1. Definitions.
9
+
10
+ "License" shall mean the terms and conditions for use, reproduction,
11
+ and distribution as defined by Sections 1 through 9 of this document.
12
+
13
+ "Licensor" shall mean the copyright owner or entity authorized by
14
+ the copyright owner that is granting the License.
15
+
16
+ "Legal Entity" shall mean the union of the acting entity and all
17
+ other entities that control, are controlled by, or are under common
18
+ control with that entity. For the purposes of this definition,
19
+ "control" means (i) the power, direct or indirect, to cause the
20
+ direction or management of such entity, whether by contract or
21
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
22
+ outstanding shares, or (iii) beneficial ownership of such entity.
23
+
24
+ "You" (or "Your") shall mean an individual or Legal Entity
25
+ exercising permissions granted by this License.
26
+
27
+ "Source" form shall mean the preferred form for making modifications,
28
+ including but not limited to software source code, documentation
29
+ source, and configuration files.
30
+
31
+ "Object" form shall mean any form resulting from mechanical
32
+ transformation or translation of a Source form, including but
33
+ not limited to compiled object code, generated documentation,
34
+ and conversions to other media types.
35
+
36
+ "Work" shall mean the work of authorship, whether in Source or
37
+ Object form, made available under the License, as indicated by a
38
+ copyright notice that is included in or attached to the work
39
+ (an example is provided in the Appendix below).
40
+
41
+ "Derivative Works" shall mean any work, whether in Source or Object
42
+ form, that is based on (or derived from) the Work and for which the
43
+ editorial revisions, annotations, elaborations, or other modifications
44
+ represent, as a whole, an original work of authorship. For the purposes
45
+ of this License, Derivative Works shall not include works that remain
46
+ separable from, or merely link (or bind by name) to the interfaces of,
47
+ the Work and Derivative Works thereof.
48
+
49
+ "Contribution" shall mean any work of authorship, including
50
+ the original version of the Work and any modifications or additions
51
+ to that Work or Derivative Works thereof, that is intentionally
52
+ submitted to Licensor for inclusion in the Work by the copyright owner
53
+ or by an individual or Legal Entity authorized to submit on behalf of
54
+ the copyright owner. For the purposes of this definition, "submitted"
55
+ means any form of electronic, verbal, or written communication sent
56
+ to the Licensor or its representatives, including but not limited to
57
+ communication on electronic mailing lists, source code control systems,
58
+ and issue tracking systems that are managed by, or on behalf of, the
59
+ Licensor for the purpose of discussing and improving the Work, but
60
+ excluding communication that is conspicuously marked or otherwise
61
+ designated in writing by the copyright owner as "Not a Contribution."
62
+
63
+ "Contributor" shall mean Licensor and any individual or Legal Entity
64
+ on behalf of whom a Contribution has been received by Licensor and
65
+ subsequently incorporated within the Work.
66
+
67
+ 2. Grant of Copyright License. Subject to the terms and conditions of
68
+ this License, each Contributor hereby grants to You a perpetual,
69
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70
+ copyright license to reproduce, prepare Derivative Works of,
71
+ publicly display, publicly perform, sublicense, and distribute the
72
+ Work and such Derivative Works in Source or Object form.
73
+
74
+ 3. Grant of Patent License. Subject to the terms and conditions of
75
+ this License, each Contributor hereby grants to You a perpetual,
76
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77
+ (except as stated in this section) patent license to make, have made,
78
+ use, offer to sell, sell, import, and otherwise transfer the Work,
79
+ where such license applies only to those patent claims licensable
80
+ by such Contributor that are necessarily infringed by their
81
+ Contribution(s) alone or by combination of their Contribution(s)
82
+ with the Work to which such Contribution(s) was submitted. If You
83
+ institute patent litigation against any entity (including a
84
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
85
+ or a Contribution incorporated within the Work constitutes direct
86
+ or contributory patent infringement, then any patent licenses
87
+ granted to You under this License for that Work shall terminate
88
+ as of the date such litigation is filed.
89
+
90
+ 4. Redistribution. You may reproduce and distribute copies of the
91
+ Work or Derivative Works thereof in any medium, with or without
92
+ modifications, and in Source or Object form, provided that You
93
+ meet the following conditions:
94
+
95
+ (a) You must give any other recipients of the Work or
96
+ Derivative Works a copy of this License; and
97
+
98
+ (b) You must cause any modified files to carry prominent notices
99
+ stating that You changed the files; and
100
+
101
+ (c) You must retain, in the Source form of any Derivative Works
102
+ that You distribute, all copyright, patent, trademark, and
103
+ attribution notices from the Source form of the Work,
104
+ excluding those notices that do not pertain to any part of
105
+ the Derivative Works; and
106
+
107
+ (d) If the Work includes a "NOTICE" text file as part of its
108
+ distribution, then any Derivative Works that You distribute must
109
+ include a readable copy of the attribution notices contained
110
+ within such NOTICE file, excluding those notices that do not
111
+ pertain to any part of the Derivative Works, in at least one
112
+ of the following places: within a NOTICE text file distributed
113
+ as part of the Derivative Works; within the Source form or
114
+ documentation, if provided along with the Derivative Works; or,
115
+ within a display generated by the Derivative Works, if and
116
+ wherever such third-party notices normally appear. The contents
117
+ of the NOTICE file are for informational purposes only and
118
+ do not modify the License. You may add Your own attribution
119
+ notices within Derivative Works that You distribute, alongside
120
+ or as an addendum to the NOTICE text from the Work, provided
121
+ that such additional attribution notices cannot be construed
122
+ as modifying the License.
123
+
124
+ You may add Your own copyright statement to Your modifications and
125
+ may provide additional or different license terms and conditions
126
+ for use, reproduction, or distribution of Your modifications, or
127
+ for any such Derivative Works as a whole, provided Your use,
128
+ reproduction, and distribution of the Work otherwise complies with
129
+ the conditions stated in this License.
130
+
131
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
132
+ any Contribution intentionally submitted for inclusion in the Work
133
+ by You to the Licensor shall be under the terms and conditions of
134
+ this License, without any additional terms or conditions.
135
+ Notwithstanding the above, nothing herein shall supersede or modify
136
+ the terms of any separate license agreement you may have executed
137
+ with Licensor regarding such Contributions.
138
+
139
+ 6. Trademarks. This License does not grant permission to use the trade
140
+ names, trademarks, service marks, or product names of the Licensor,
141
+ except as required for reasonable and customary use in describing the
142
+ origin of the Work and reproducing the content of the NOTICE file.
143
+
144
+ 7. Disclaimer of Warranty. Unless required by applicable law or
145
+ agreed to in writing, Licensor provides the Work (and each
146
+ Contributor provides its Contributions) on an "AS IS" BASIS,
147
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148
+ implied, including, without limitation, any warranties or conditions
149
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150
+ PARTICULAR PURPOSE. You are solely responsible for determining the
151
+ appropriateness of using or redistributing the Work and assume any
152
+ risks associated with Your exercise of permissions under this License.
153
+
154
+ 8. Limitation of Liability. In no event and under no legal theory,
155
+ whether in tort (including negligence), contract, or otherwise,
156
+ unless required by applicable law (such as deliberate and grossly
157
+ negligent acts) or agreed to in writing, shall any Contributor be
158
+ liable to You for damages, including any direct, indirect, special,
159
+ incidental, or consequential damages of any character arising as a
160
+ result of this License or out of the use or inability to use the
161
+ Work (including but not limited to damages for loss of goodwill,
162
+ work stoppage, computer failure or malfunction, or any and all
163
+ other commercial damages or losses), even if such Contributor
164
+ has been advised of the possibility of such damages.
165
+
166
+ 9. Accepting Warranty or Additional Liability. While redistributing
167
+ the Work or Derivative Works thereof, You may choose to offer,
168
+ and charge a fee for, acceptance of support, warranty, indemnity,
169
+ or other liability obligations and/or rights consistent with this
170
+ License. However, in accepting such obligations, You may act only
171
+ on Your own behalf and on Your sole responsibility, not on behalf
172
+ of any other Contributor, and only if You agree to indemnify,
173
+ defend, and hold each Contributor harmless for any liability
174
+ incurred by, or claims asserted against, such Contributor by reason
175
+ of your accepting any such warranty or additional liability.
176
+
177
+ END OF TERMS AND CONDITIONS
178
+
179
+ APPENDIX: How to apply the Apache License to your work.
180
+
181
+ To apply the Apache License to your work, attach the following
182
+ boilerplate notice, with the fields enclosed by brackets "[]"
183
+ replaced with your own identifying information. (Don't include
184
+ the brackets!) The text should be enclosed in the appropriate
185
+ comment syntax for the file format. We also recommend that a
186
+ file or class name and description of purpose be included on the
187
+ same "printed page" as the copyright notice for easier
188
+ identification within third-party archives.
189
+
190
+ Copyright [yyyy] [name of copyright owner]
191
+
192
+ Licensed under the Apache License, Version 2.0 (the "License");
193
+ you may not use this file except in compliance with the License.
194
+ You may obtain a copy of the License at
195
+
196
+ http://www.apache.org/licenses/LICENSE-2.0
197
+
198
+ Unless required by applicable law or agreed to in writing, software
199
+ distributed under the License is distributed on an "AS IS" BASIS,
200
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201
+ See the License for the specific language governing permissions and
202
+ limitations under the License.
venv/lib/python3.10/site-packages/absl_py-2.1.0.dist-info/METADATA ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: absl-py
3
+ Version: 2.1.0
4
+ Summary: Abseil Python Common Libraries, see https://github.com/abseil/abseil-py.
5
+ Home-page: https://github.com/abseil/abseil-py
6
+ Author: The Abseil Authors
7
+ License: Apache 2.0
8
+ Classifier: Programming Language :: Python
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: Programming Language :: Python :: 3.7
11
+ Classifier: Programming Language :: Python :: 3.8
12
+ Classifier: Programming Language :: Python :: 3.9
13
+ Classifier: Programming Language :: Python :: 3.10
14
+ Classifier: Programming Language :: Python :: 3.11
15
+ Classifier: Programming Language :: Python :: 3.12
16
+ Classifier: Intended Audience :: Developers
17
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
18
+ Classifier: License :: OSI Approved :: Apache Software License
19
+ Classifier: Operating System :: OS Independent
20
+ Requires-Python: >=3.7
21
+ Description-Content-Type: text/markdown
22
+ License-File: LICENSE
23
+ License-File: AUTHORS
24
+
25
+ # Abseil Python Common Libraries
26
+
27
+ This repository is a collection of Python library code for building Python
28
+ applications. The code is collected from Google's own Python code base, and has
29
+ been extensively tested and used in production.
30
+
31
+ ## Features
32
+
33
+ * Simple application startup
34
+ * Distributed commandline flags system
35
+ * Custom logging module with additional features
36
+ * Testing utilities
37
+
38
+ ## Getting Started
39
+
40
+ ### Installation
41
+
42
+ To install the package, simply run:
43
+
44
+ ```bash
45
+ pip install absl-py
46
+ ```
47
+
48
+ Or install from source:
49
+
50
+ ```bash
51
+ python setup.py install
52
+ ```
53
+
54
+ ### Running Tests
55
+
56
+ To run Abseil tests, you can clone the git repo and run
57
+ [bazel](https://bazel.build/):
58
+
59
+ ```bash
60
+ git clone https://github.com/abseil/abseil-py.git
61
+ cd abseil-py
62
+ bazel test absl/...
63
+ ```
64
+
65
+ ### Example Code
66
+
67
+ Please refer to
68
+ [smoke_tests/sample_app.py](https://github.com/abseil/abseil-py/blob/main/smoke_tests/sample_app.py)
69
+ as an example to get started.
70
+
71
+ ## Documentation
72
+
73
+ See the [Abseil Python Developer Guide](https://abseil.io/docs/python/).
74
+
75
+ ## Future Releases
76
+
77
+ The current repository includes an initial set of libraries for early adoption.
78
+ More components and interoperability with Abseil C++ Common Libraries
79
+ will come in future releases.
80
+
81
+ ## License
82
+
83
+ The Abseil Python library is licensed under the terms of the Apache
84
+ license. See [LICENSE](LICENSE) for more information.
venv/lib/python3.10/site-packages/absl_py-2.1.0.dist-info/RECORD ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ absl/__init__.py,sha256=7cM57swk2T1Hc5wxmt-JpcaR6xfdPJyL_lyRqgODvuM,584
2
+ absl/__pycache__/__init__.cpython-310.pyc,,
3
+ absl/__pycache__/app.cpython-310.pyc,,
4
+ absl/__pycache__/command_name.cpython-310.pyc,,
5
+ absl/app.py,sha256=DQROJ_Ovex6w2_nr_s7AHgXQle951XmcVtlNrMjfSFA,15374
6
+ absl/app.pyi,sha256=DqRvFRos3oFk00lZJSKaHZuL_3-LnZl-ylg_VAXtPcc,1737
7
+ absl/command_name.py,sha256=C7CuwMMedDLUOX88Et92QZb2se__nU7txgpO-01amxg,2301
8
+ absl/flags/__init__.py,sha256=FgR_NxQG1xLA2ZxLU51HTrLWV5kbN9eSCI-47Z7D3WA,7728
9
+ absl/flags/__pycache__/__init__.cpython-310.pyc,,
10
+ absl/flags/__pycache__/_argument_parser.cpython-310.pyc,,
11
+ absl/flags/__pycache__/_defines.cpython-310.pyc,,
12
+ absl/flags/__pycache__/_exceptions.cpython-310.pyc,,
13
+ absl/flags/__pycache__/_flag.cpython-310.pyc,,
14
+ absl/flags/__pycache__/_flagvalues.cpython-310.pyc,,
15
+ absl/flags/__pycache__/_helpers.cpython-310.pyc,,
16
+ absl/flags/__pycache__/_validators.cpython-310.pyc,,
17
+ absl/flags/__pycache__/_validators_classes.cpython-310.pyc,,
18
+ absl/flags/__pycache__/argparse_flags.cpython-310.pyc,,
19
+ absl/flags/_argument_parser.py,sha256=TQFhT0OcQuRO_1GTJoUvYC1KU6wV9f4Lc7jQmajBGi0,20934
20
+ absl/flags/_defines.py,sha256=s_YA_tAHFU4wxrJqKLH5uMldTl1DtlUfSvgBbflXkQ8,52783
21
+ absl/flags/_exceptions.py,sha256=Lws7ZZrlLJG83VHuOB4Z4CNfcSoKX5pJnsNRCtp-dMw,3657
22
+ absl/flags/_flag.py,sha256=Sv_d7kDSZh-VNr4JGrBy4g7VxnbRspOOd5hO6wA94qk,19895
23
+ absl/flags/_flagvalues.py,sha256=Gferpr9yg8Ntc6ij9tPiChliYz5jYWfVJoKzAREwNFw,54127
24
+ absl/flags/_helpers.py,sha256=uWWeqbhc19kTXonfM7mNZT68ZakmJgu-v5IHeS9A9Xc,14081
25
+ absl/flags/_validators.py,sha256=_hpVwThXQhL6PFOA9-L2ZRI-7zLu2UxU_hRJJWXYoHw,14144
26
+ absl/flags/_validators_classes.py,sha256=KLBJhJAt8C18gy2Uq-q7bUFNS_AhPBlxlwGiNm5gWXU,6157
27
+ absl/flags/argparse_flags.py,sha256=57E1HFa40tvnQ3DQzY3x1qdBUIxtfTTYAYONT_k8HOI,14485
28
+ absl/logging/__init__.py,sha256=mzF3rusWjzLbuVdZI8SfPiIoqfWO9kBUhxVOvGZQTv4,42082
29
+ absl/logging/__init__.pyi,sha256=NPAna_9rrYTVNIHLXUbdvsAZcNlv4IJs9yNnL59mxr8,5794
30
+ absl/logging/__pycache__/__init__.cpython-310.pyc,,
31
+ absl/logging/__pycache__/converter.cpython-310.pyc,,
32
+ absl/logging/converter.py,sha256=eTucx1Ojix7YWMQUyWKzPRTrxGLuCkNsTmJa1GW6k94,6353
33
+ absl/testing/__init__.py,sha256=7cM57swk2T1Hc5wxmt-JpcaR6xfdPJyL_lyRqgODvuM,584
34
+ absl/testing/__pycache__/__init__.cpython-310.pyc,,
35
+ absl/testing/__pycache__/_bazelize_command.cpython-310.pyc,,
36
+ absl/testing/__pycache__/_pretty_print_reporter.cpython-310.pyc,,
37
+ absl/testing/__pycache__/absltest.cpython-310.pyc,,
38
+ absl/testing/__pycache__/flagsaver.cpython-310.pyc,,
39
+ absl/testing/__pycache__/parameterized.cpython-310.pyc,,
40
+ absl/testing/__pycache__/xml_reporter.cpython-310.pyc,,
41
+ absl/testing/_bazelize_command.py,sha256=R4rV4j5AOSp3PNkVQKP1I-SKYzQbXyeuiOT3d23cTLA,2302
42
+ absl/testing/_pretty_print_reporter.py,sha256=nL5qSsYWF6O_C6L9PexwFSPxs68Wc85RhdhRBN2AgTw,3140
43
+ absl/testing/absltest.py,sha256=sgb0TPgNP0_nLKcxrHBlifvUsgufnYURVR8Vau3f278,101119
44
+ absl/testing/flagsaver.py,sha256=514JmVdCn-P0jsTntskCtUfxrHyp3urLdn2bzDd991s,13392
45
+ absl/testing/parameterized.py,sha256=PT1P3X__WkFC_NyGWifUdJeqn-BM4JI3yy-1zsGaFEI,27807
46
+ absl/testing/xml_reporter.py,sha256=k_9cWhw01RGCQImGDciTa_RrBEEuPZ3IPD5IASoRwwM,21720
47
+ absl_py-2.1.0.dist-info/AUTHORS,sha256=YoLudsylaQg7W5mLn4FroQMuEnuNx8RpQrhkd_xvv6U,296
48
+ absl_py-2.1.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
49
+ absl_py-2.1.0.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
50
+ absl_py-2.1.0.dist-info/METADATA,sha256=CTp5OILgEjYv4Y7dpCHzW5QmM57hl-2i-AizwFlnRYA,2311
51
+ absl_py-2.1.0.dist-info/RECORD,,
52
+ absl_py-2.1.0.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
53
+ absl_py-2.1.0.dist-info/top_level.txt,sha256=0M_1z27Hi5Bsj1EhTfE_ajdJdFxeP_aw0xXnR4BXXhI,5
venv/lib/python3.10/site-packages/absl_py-2.1.0.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.42.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
venv/lib/python3.10/site-packages/absl_py-2.1.0.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ absl
venv/lib/python3.10/site-packages/charset_normalizer/__init__.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ Charset-Normalizer
4
+ ~~~~~~~~~~~~~~
5
+ The Real First Universal Charset Detector.
6
+ A library that helps you read text from an unknown charset encoding.
7
+ Motivated by chardet, This package is trying to resolve the issue by taking a new approach.
8
+ All IANA character set names for which the Python core library provides codecs are supported.
9
+
10
+ Basic usage:
11
+ >>> from charset_normalizer import from_bytes
12
+ >>> results = from_bytes('Bсеки човек има право на образование. Oбразованието!'.encode('utf_8'))
13
+ >>> best_guess = results.best()
14
+ >>> str(best_guess)
15
+ 'Bсеки човек има право на образование. Oбразованието!'
16
+
17
+ Others methods and usages are available - see the full documentation
18
+ at <https://github.com/Ousret/charset_normalizer>.
19
+ :copyright: (c) 2021 by Ahmed TAHRI
20
+ :license: MIT, see LICENSE for more details.
21
+ """
22
+ import logging
23
+
24
+ from .api import from_bytes, from_fp, from_path, is_binary
25
+ from .legacy import detect
26
+ from .models import CharsetMatch, CharsetMatches
27
+ from .utils import set_logging_handler
28
+ from .version import VERSION, __version__
29
+
30
+ __all__ = (
31
+ "from_fp",
32
+ "from_path",
33
+ "from_bytes",
34
+ "is_binary",
35
+ "detect",
36
+ "CharsetMatch",
37
+ "CharsetMatches",
38
+ "__version__",
39
+ "VERSION",
40
+ "set_logging_handler",
41
+ )
42
+
43
+ # Attach a NullHandler to the top level logger by default
44
+ # https://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library
45
+
46
+ logging.getLogger("charset_normalizer").addHandler(logging.NullHandler())
venv/lib/python3.10/site-packages/charset_normalizer/__main__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from .cli import cli_detect
2
+
3
+ if __name__ == "__main__":
4
+ cli_detect()
venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/api.cpython-310.pyc ADDED
Binary file (11.4 kB). View file
 
venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/cd.cpython-310.pyc ADDED
Binary file (9.66 kB). View file
 
venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/constant.cpython-310.pyc ADDED
Binary file (30.5 kB). View file
 
venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/legacy.cpython-310.pyc ADDED
Binary file (1.85 kB). View file
 
venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/utils.cpython-310.pyc ADDED
Binary file (8.93 kB). View file
 
venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/version.cpython-310.pyc ADDED
Binary file (270 Bytes). View file
 
venv/lib/python3.10/site-packages/charset_normalizer/cd.py ADDED
@@ -0,0 +1,395 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ from codecs import IncrementalDecoder
3
+ from collections import Counter
4
+ from functools import lru_cache
5
+ from typing import Counter as TypeCounter, Dict, List, Optional, Tuple
6
+
7
+ from .constant import (
8
+ FREQUENCIES,
9
+ KO_NAMES,
10
+ LANGUAGE_SUPPORTED_COUNT,
11
+ TOO_SMALL_SEQUENCE,
12
+ ZH_NAMES,
13
+ )
14
+ from .md import is_suspiciously_successive_range
15
+ from .models import CoherenceMatches
16
+ from .utils import (
17
+ is_accentuated,
18
+ is_latin,
19
+ is_multi_byte_encoding,
20
+ is_unicode_range_secondary,
21
+ unicode_range,
22
+ )
23
+
24
+
25
+ def encoding_unicode_range(iana_name: str) -> List[str]:
26
+ """
27
+ Return associated unicode ranges in a single byte code page.
28
+ """
29
+ if is_multi_byte_encoding(iana_name):
30
+ raise IOError("Function not supported on multi-byte code page")
31
+
32
+ decoder = importlib.import_module(
33
+ "encodings.{}".format(iana_name)
34
+ ).IncrementalDecoder
35
+
36
+ p: IncrementalDecoder = decoder(errors="ignore")
37
+ seen_ranges: Dict[str, int] = {}
38
+ character_count: int = 0
39
+
40
+ for i in range(0x40, 0xFF):
41
+ chunk: str = p.decode(bytes([i]))
42
+
43
+ if chunk:
44
+ character_range: Optional[str] = unicode_range(chunk)
45
+
46
+ if character_range is None:
47
+ continue
48
+
49
+ if is_unicode_range_secondary(character_range) is False:
50
+ if character_range not in seen_ranges:
51
+ seen_ranges[character_range] = 0
52
+ seen_ranges[character_range] += 1
53
+ character_count += 1
54
+
55
+ return sorted(
56
+ [
57
+ character_range
58
+ for character_range in seen_ranges
59
+ if seen_ranges[character_range] / character_count >= 0.15
60
+ ]
61
+ )
62
+
63
+
64
+ def unicode_range_languages(primary_range: str) -> List[str]:
65
+ """
66
+ Return inferred languages used with a unicode range.
67
+ """
68
+ languages: List[str] = []
69
+
70
+ for language, characters in FREQUENCIES.items():
71
+ for character in characters:
72
+ if unicode_range(character) == primary_range:
73
+ languages.append(language)
74
+ break
75
+
76
+ return languages
77
+
78
+
79
+ @lru_cache()
80
+ def encoding_languages(iana_name: str) -> List[str]:
81
+ """
82
+ Single-byte encoding language association. Some code page are heavily linked to particular language(s).
83
+ This function does the correspondence.
84
+ """
85
+ unicode_ranges: List[str] = encoding_unicode_range(iana_name)
86
+ primary_range: Optional[str] = None
87
+
88
+ for specified_range in unicode_ranges:
89
+ if "Latin" not in specified_range:
90
+ primary_range = specified_range
91
+ break
92
+
93
+ if primary_range is None:
94
+ return ["Latin Based"]
95
+
96
+ return unicode_range_languages(primary_range)
97
+
98
+
99
+ @lru_cache()
100
+ def mb_encoding_languages(iana_name: str) -> List[str]:
101
+ """
102
+ Multi-byte encoding language association. Some code page are heavily linked to particular language(s).
103
+ This function does the correspondence.
104
+ """
105
+ if (
106
+ iana_name.startswith("shift_")
107
+ or iana_name.startswith("iso2022_jp")
108
+ or iana_name.startswith("euc_j")
109
+ or iana_name == "cp932"
110
+ ):
111
+ return ["Japanese"]
112
+ if iana_name.startswith("gb") or iana_name in ZH_NAMES:
113
+ return ["Chinese"]
114
+ if iana_name.startswith("iso2022_kr") or iana_name in KO_NAMES:
115
+ return ["Korean"]
116
+
117
+ return []
118
+
119
+
120
+ @lru_cache(maxsize=LANGUAGE_SUPPORTED_COUNT)
121
+ def get_target_features(language: str) -> Tuple[bool, bool]:
122
+ """
123
+ Determine main aspects from a supported language if it contains accents and if is pure Latin.
124
+ """
125
+ target_have_accents: bool = False
126
+ target_pure_latin: bool = True
127
+
128
+ for character in FREQUENCIES[language]:
129
+ if not target_have_accents and is_accentuated(character):
130
+ target_have_accents = True
131
+ if target_pure_latin and is_latin(character) is False:
132
+ target_pure_latin = False
133
+
134
+ return target_have_accents, target_pure_latin
135
+
136
+
137
+ def alphabet_languages(
138
+ characters: List[str], ignore_non_latin: bool = False
139
+ ) -> List[str]:
140
+ """
141
+ Return associated languages associated to given characters.
142
+ """
143
+ languages: List[Tuple[str, float]] = []
144
+
145
+ source_have_accents = any(is_accentuated(character) for character in characters)
146
+
147
+ for language, language_characters in FREQUENCIES.items():
148
+ target_have_accents, target_pure_latin = get_target_features(language)
149
+
150
+ if ignore_non_latin and target_pure_latin is False:
151
+ continue
152
+
153
+ if target_have_accents is False and source_have_accents:
154
+ continue
155
+
156
+ character_count: int = len(language_characters)
157
+
158
+ character_match_count: int = len(
159
+ [c for c in language_characters if c in characters]
160
+ )
161
+
162
+ ratio: float = character_match_count / character_count
163
+
164
+ if ratio >= 0.2:
165
+ languages.append((language, ratio))
166
+
167
+ languages = sorted(languages, key=lambda x: x[1], reverse=True)
168
+
169
+ return [compatible_language[0] for compatible_language in languages]
170
+
171
+
172
+ def characters_popularity_compare(
173
+ language: str, ordered_characters: List[str]
174
+ ) -> float:
175
+ """
176
+ Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language.
177
+ The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit).
178
+ Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.)
179
+ """
180
+ if language not in FREQUENCIES:
181
+ raise ValueError("{} not available".format(language))
182
+
183
+ character_approved_count: int = 0
184
+ FREQUENCIES_language_set = set(FREQUENCIES[language])
185
+
186
+ ordered_characters_count: int = len(ordered_characters)
187
+ target_language_characters_count: int = len(FREQUENCIES[language])
188
+
189
+ large_alphabet: bool = target_language_characters_count > 26
190
+
191
+ for character, character_rank in zip(
192
+ ordered_characters, range(0, ordered_characters_count)
193
+ ):
194
+ if character not in FREQUENCIES_language_set:
195
+ continue
196
+
197
+ character_rank_in_language: int = FREQUENCIES[language].index(character)
198
+ expected_projection_ratio: float = (
199
+ target_language_characters_count / ordered_characters_count
200
+ )
201
+ character_rank_projection: int = int(character_rank * expected_projection_ratio)
202
+
203
+ if (
204
+ large_alphabet is False
205
+ and abs(character_rank_projection - character_rank_in_language) > 4
206
+ ):
207
+ continue
208
+
209
+ if (
210
+ large_alphabet is True
211
+ and abs(character_rank_projection - character_rank_in_language)
212
+ < target_language_characters_count / 3
213
+ ):
214
+ character_approved_count += 1
215
+ continue
216
+
217
+ characters_before_source: List[str] = FREQUENCIES[language][
218
+ 0:character_rank_in_language
219
+ ]
220
+ characters_after_source: List[str] = FREQUENCIES[language][
221
+ character_rank_in_language:
222
+ ]
223
+ characters_before: List[str] = ordered_characters[0:character_rank]
224
+ characters_after: List[str] = ordered_characters[character_rank:]
225
+
226
+ before_match_count: int = len(
227
+ set(characters_before) & set(characters_before_source)
228
+ )
229
+
230
+ after_match_count: int = len(
231
+ set(characters_after) & set(characters_after_source)
232
+ )
233
+
234
+ if len(characters_before_source) == 0 and before_match_count <= 4:
235
+ character_approved_count += 1
236
+ continue
237
+
238
+ if len(characters_after_source) == 0 and after_match_count <= 4:
239
+ character_approved_count += 1
240
+ continue
241
+
242
+ if (
243
+ before_match_count / len(characters_before_source) >= 0.4
244
+ or after_match_count / len(characters_after_source) >= 0.4
245
+ ):
246
+ character_approved_count += 1
247
+ continue
248
+
249
+ return character_approved_count / len(ordered_characters)
250
+
251
+
252
+ def alpha_unicode_split(decoded_sequence: str) -> List[str]:
253
+ """
254
+ Given a decoded text sequence, return a list of str. Unicode range / alphabet separation.
255
+ Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list;
256
+ One containing the latin letters and the other hebrew.
257
+ """
258
+ layers: Dict[str, str] = {}
259
+
260
+ for character in decoded_sequence:
261
+ if character.isalpha() is False:
262
+ continue
263
+
264
+ character_range: Optional[str] = unicode_range(character)
265
+
266
+ if character_range is None:
267
+ continue
268
+
269
+ layer_target_range: Optional[str] = None
270
+
271
+ for discovered_range in layers:
272
+ if (
273
+ is_suspiciously_successive_range(discovered_range, character_range)
274
+ is False
275
+ ):
276
+ layer_target_range = discovered_range
277
+ break
278
+
279
+ if layer_target_range is None:
280
+ layer_target_range = character_range
281
+
282
+ if layer_target_range not in layers:
283
+ layers[layer_target_range] = character.lower()
284
+ continue
285
+
286
+ layers[layer_target_range] += character.lower()
287
+
288
+ return list(layers.values())
289
+
290
+
291
+ def merge_coherence_ratios(results: List[CoherenceMatches]) -> CoherenceMatches:
292
+ """
293
+ This function merge results previously given by the function coherence_ratio.
294
+ The return type is the same as coherence_ratio.
295
+ """
296
+ per_language_ratios: Dict[str, List[float]] = {}
297
+ for result in results:
298
+ for sub_result in result:
299
+ language, ratio = sub_result
300
+ if language not in per_language_ratios:
301
+ per_language_ratios[language] = [ratio]
302
+ continue
303
+ per_language_ratios[language].append(ratio)
304
+
305
+ merge = [
306
+ (
307
+ language,
308
+ round(
309
+ sum(per_language_ratios[language]) / len(per_language_ratios[language]),
310
+ 4,
311
+ ),
312
+ )
313
+ for language in per_language_ratios
314
+ ]
315
+
316
+ return sorted(merge, key=lambda x: x[1], reverse=True)
317
+
318
+
319
+ def filter_alt_coherence_matches(results: CoherenceMatches) -> CoherenceMatches:
320
+ """
321
+ We shall NOT return "English—" in CoherenceMatches because it is an alternative
322
+ of "English". This function only keeps the best match and remove the em-dash in it.
323
+ """
324
+ index_results: Dict[str, List[float]] = dict()
325
+
326
+ for result in results:
327
+ language, ratio = result
328
+ no_em_name: str = language.replace("—", "")
329
+
330
+ if no_em_name not in index_results:
331
+ index_results[no_em_name] = []
332
+
333
+ index_results[no_em_name].append(ratio)
334
+
335
+ if any(len(index_results[e]) > 1 for e in index_results):
336
+ filtered_results: CoherenceMatches = []
337
+
338
+ for language in index_results:
339
+ filtered_results.append((language, max(index_results[language])))
340
+
341
+ return filtered_results
342
+
343
+ return results
344
+
345
+
346
+ @lru_cache(maxsize=2048)
347
+ def coherence_ratio(
348
+ decoded_sequence: str, threshold: float = 0.1, lg_inclusion: Optional[str] = None
349
+ ) -> CoherenceMatches:
350
+ """
351
+ Detect ANY language that can be identified in given sequence. The sequence will be analysed by layers.
352
+ A layer = Character extraction by alphabets/ranges.
353
+ """
354
+
355
+ results: List[Tuple[str, float]] = []
356
+ ignore_non_latin: bool = False
357
+
358
+ sufficient_match_count: int = 0
359
+
360
+ lg_inclusion_list = lg_inclusion.split(",") if lg_inclusion is not None else []
361
+ if "Latin Based" in lg_inclusion_list:
362
+ ignore_non_latin = True
363
+ lg_inclusion_list.remove("Latin Based")
364
+
365
+ for layer in alpha_unicode_split(decoded_sequence):
366
+ sequence_frequencies: TypeCounter[str] = Counter(layer)
367
+ most_common = sequence_frequencies.most_common()
368
+
369
+ character_count: int = sum(o for c, o in most_common)
370
+
371
+ if character_count <= TOO_SMALL_SEQUENCE:
372
+ continue
373
+
374
+ popular_character_ordered: List[str] = [c for c, o in most_common]
375
+
376
+ for language in lg_inclusion_list or alphabet_languages(
377
+ popular_character_ordered, ignore_non_latin
378
+ ):
379
+ ratio: float = characters_popularity_compare(
380
+ language, popular_character_ordered
381
+ )
382
+
383
+ if ratio < threshold:
384
+ continue
385
+ elif ratio >= 0.8:
386
+ sufficient_match_count += 1
387
+
388
+ results.append((language, round(ratio, 4)))
389
+
390
+ if sufficient_match_count >= 3:
391
+ break
392
+
393
+ return sorted(
394
+ filter_alt_coherence_matches(results), key=lambda x: x[1], reverse=True
395
+ )
venv/lib/python3.10/site-packages/charset_normalizer/constant.py ADDED
@@ -0,0 +1,1995 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ from codecs import BOM_UTF8, BOM_UTF16_BE, BOM_UTF16_LE, BOM_UTF32_BE, BOM_UTF32_LE
3
+ from encodings.aliases import aliases
4
+ from re import IGNORECASE, compile as re_compile
5
+ from typing import Dict, List, Set, Union
6
+
7
+ # Contain for each eligible encoding a list of/item bytes SIG/BOM
8
+ ENCODING_MARKS: Dict[str, Union[bytes, List[bytes]]] = {
9
+ "utf_8": BOM_UTF8,
10
+ "utf_7": [
11
+ b"\x2b\x2f\x76\x38",
12
+ b"\x2b\x2f\x76\x39",
13
+ b"\x2b\x2f\x76\x2b",
14
+ b"\x2b\x2f\x76\x2f",
15
+ b"\x2b\x2f\x76\x38\x2d",
16
+ ],
17
+ "gb18030": b"\x84\x31\x95\x33",
18
+ "utf_32": [BOM_UTF32_BE, BOM_UTF32_LE],
19
+ "utf_16": [BOM_UTF16_BE, BOM_UTF16_LE],
20
+ }
21
+
22
+ TOO_SMALL_SEQUENCE: int = 32
23
+ TOO_BIG_SEQUENCE: int = int(10e6)
24
+
25
+ UTF8_MAXIMAL_ALLOCATION: int = 1_112_064
26
+
27
+ # Up-to-date Unicode ucd/15.0.0
28
+ UNICODE_RANGES_COMBINED: Dict[str, range] = {
29
+ "Control character": range(32),
30
+ "Basic Latin": range(32, 128),
31
+ "Latin-1 Supplement": range(128, 256),
32
+ "Latin Extended-A": range(256, 384),
33
+ "Latin Extended-B": range(384, 592),
34
+ "IPA Extensions": range(592, 688),
35
+ "Spacing Modifier Letters": range(688, 768),
36
+ "Combining Diacritical Marks": range(768, 880),
37
+ "Greek and Coptic": range(880, 1024),
38
+ "Cyrillic": range(1024, 1280),
39
+ "Cyrillic Supplement": range(1280, 1328),
40
+ "Armenian": range(1328, 1424),
41
+ "Hebrew": range(1424, 1536),
42
+ "Arabic": range(1536, 1792),
43
+ "Syriac": range(1792, 1872),
44
+ "Arabic Supplement": range(1872, 1920),
45
+ "Thaana": range(1920, 1984),
46
+ "NKo": range(1984, 2048),
47
+ "Samaritan": range(2048, 2112),
48
+ "Mandaic": range(2112, 2144),
49
+ "Syriac Supplement": range(2144, 2160),
50
+ "Arabic Extended-B": range(2160, 2208),
51
+ "Arabic Extended-A": range(2208, 2304),
52
+ "Devanagari": range(2304, 2432),
53
+ "Bengali": range(2432, 2560),
54
+ "Gurmukhi": range(2560, 2688),
55
+ "Gujarati": range(2688, 2816),
56
+ "Oriya": range(2816, 2944),
57
+ "Tamil": range(2944, 3072),
58
+ "Telugu": range(3072, 3200),
59
+ "Kannada": range(3200, 3328),
60
+ "Malayalam": range(3328, 3456),
61
+ "Sinhala": range(3456, 3584),
62
+ "Thai": range(3584, 3712),
63
+ "Lao": range(3712, 3840),
64
+ "Tibetan": range(3840, 4096),
65
+ "Myanmar": range(4096, 4256),
66
+ "Georgian": range(4256, 4352),
67
+ "Hangul Jamo": range(4352, 4608),
68
+ "Ethiopic": range(4608, 4992),
69
+ "Ethiopic Supplement": range(4992, 5024),
70
+ "Cherokee": range(5024, 5120),
71
+ "Unified Canadian Aboriginal Syllabics": range(5120, 5760),
72
+ "Ogham": range(5760, 5792),
73
+ "Runic": range(5792, 5888),
74
+ "Tagalog": range(5888, 5920),
75
+ "Hanunoo": range(5920, 5952),
76
+ "Buhid": range(5952, 5984),
77
+ "Tagbanwa": range(5984, 6016),
78
+ "Khmer": range(6016, 6144),
79
+ "Mongolian": range(6144, 6320),
80
+ "Unified Canadian Aboriginal Syllabics Extended": range(6320, 6400),
81
+ "Limbu": range(6400, 6480),
82
+ "Tai Le": range(6480, 6528),
83
+ "New Tai Lue": range(6528, 6624),
84
+ "Khmer Symbols": range(6624, 6656),
85
+ "Buginese": range(6656, 6688),
86
+ "Tai Tham": range(6688, 6832),
87
+ "Combining Diacritical Marks Extended": range(6832, 6912),
88
+ "Balinese": range(6912, 7040),
89
+ "Sundanese": range(7040, 7104),
90
+ "Batak": range(7104, 7168),
91
+ "Lepcha": range(7168, 7248),
92
+ "Ol Chiki": range(7248, 7296),
93
+ "Cyrillic Extended-C": range(7296, 7312),
94
+ "Georgian Extended": range(7312, 7360),
95
+ "Sundanese Supplement": range(7360, 7376),
96
+ "Vedic Extensions": range(7376, 7424),
97
+ "Phonetic Extensions": range(7424, 7552),
98
+ "Phonetic Extensions Supplement": range(7552, 7616),
99
+ "Combining Diacritical Marks Supplement": range(7616, 7680),
100
+ "Latin Extended Additional": range(7680, 7936),
101
+ "Greek Extended": range(7936, 8192),
102
+ "General Punctuation": range(8192, 8304),
103
+ "Superscripts and Subscripts": range(8304, 8352),
104
+ "Currency Symbols": range(8352, 8400),
105
+ "Combining Diacritical Marks for Symbols": range(8400, 8448),
106
+ "Letterlike Symbols": range(8448, 8528),
107
+ "Number Forms": range(8528, 8592),
108
+ "Arrows": range(8592, 8704),
109
+ "Mathematical Operators": range(8704, 8960),
110
+ "Miscellaneous Technical": range(8960, 9216),
111
+ "Control Pictures": range(9216, 9280),
112
+ "Optical Character Recognition": range(9280, 9312),
113
+ "Enclosed Alphanumerics": range(9312, 9472),
114
+ "Box Drawing": range(9472, 9600),
115
+ "Block Elements": range(9600, 9632),
116
+ "Geometric Shapes": range(9632, 9728),
117
+ "Miscellaneous Symbols": range(9728, 9984),
118
+ "Dingbats": range(9984, 10176),
119
+ "Miscellaneous Mathematical Symbols-A": range(10176, 10224),
120
+ "Supplemental Arrows-A": range(10224, 10240),
121
+ "Braille Patterns": range(10240, 10496),
122
+ "Supplemental Arrows-B": range(10496, 10624),
123
+ "Miscellaneous Mathematical Symbols-B": range(10624, 10752),
124
+ "Supplemental Mathematical Operators": range(10752, 11008),
125
+ "Miscellaneous Symbols and Arrows": range(11008, 11264),
126
+ "Glagolitic": range(11264, 11360),
127
+ "Latin Extended-C": range(11360, 11392),
128
+ "Coptic": range(11392, 11520),
129
+ "Georgian Supplement": range(11520, 11568),
130
+ "Tifinagh": range(11568, 11648),
131
+ "Ethiopic Extended": range(11648, 11744),
132
+ "Cyrillic Extended-A": range(11744, 11776),
133
+ "Supplemental Punctuation": range(11776, 11904),
134
+ "CJK Radicals Supplement": range(11904, 12032),
135
+ "Kangxi Radicals": range(12032, 12256),
136
+ "Ideographic Description Characters": range(12272, 12288),
137
+ "CJK Symbols and Punctuation": range(12288, 12352),
138
+ "Hiragana": range(12352, 12448),
139
+ "Katakana": range(12448, 12544),
140
+ "Bopomofo": range(12544, 12592),
141
+ "Hangul Compatibility Jamo": range(12592, 12688),
142
+ "Kanbun": range(12688, 12704),
143
+ "Bopomofo Extended": range(12704, 12736),
144
+ "CJK Strokes": range(12736, 12784),
145
+ "Katakana Phonetic Extensions": range(12784, 12800),
146
+ "Enclosed CJK Letters and Months": range(12800, 13056),
147
+ "CJK Compatibility": range(13056, 13312),
148
+ "CJK Unified Ideographs Extension A": range(13312, 19904),
149
+ "Yijing Hexagram Symbols": range(19904, 19968),
150
+ "CJK Unified Ideographs": range(19968, 40960),
151
+ "Yi Syllables": range(40960, 42128),
152
+ "Yi Radicals": range(42128, 42192),
153
+ "Lisu": range(42192, 42240),
154
+ "Vai": range(42240, 42560),
155
+ "Cyrillic Extended-B": range(42560, 42656),
156
+ "Bamum": range(42656, 42752),
157
+ "Modifier Tone Letters": range(42752, 42784),
158
+ "Latin Extended-D": range(42784, 43008),
159
+ "Syloti Nagri": range(43008, 43056),
160
+ "Common Indic Number Forms": range(43056, 43072),
161
+ "Phags-pa": range(43072, 43136),
162
+ "Saurashtra": range(43136, 43232),
163
+ "Devanagari Extended": range(43232, 43264),
164
+ "Kayah Li": range(43264, 43312),
165
+ "Rejang": range(43312, 43360),
166
+ "Hangul Jamo Extended-A": range(43360, 43392),
167
+ "Javanese": range(43392, 43488),
168
+ "Myanmar Extended-B": range(43488, 43520),
169
+ "Cham": range(43520, 43616),
170
+ "Myanmar Extended-A": range(43616, 43648),
171
+ "Tai Viet": range(43648, 43744),
172
+ "Meetei Mayek Extensions": range(43744, 43776),
173
+ "Ethiopic Extended-A": range(43776, 43824),
174
+ "Latin Extended-E": range(43824, 43888),
175
+ "Cherokee Supplement": range(43888, 43968),
176
+ "Meetei Mayek": range(43968, 44032),
177
+ "Hangul Syllables": range(44032, 55216),
178
+ "Hangul Jamo Extended-B": range(55216, 55296),
179
+ "High Surrogates": range(55296, 56192),
180
+ "High Private Use Surrogates": range(56192, 56320),
181
+ "Low Surrogates": range(56320, 57344),
182
+ "Private Use Area": range(57344, 63744),
183
+ "CJK Compatibility Ideographs": range(63744, 64256),
184
+ "Alphabetic Presentation Forms": range(64256, 64336),
185
+ "Arabic Presentation Forms-A": range(64336, 65024),
186
+ "Variation Selectors": range(65024, 65040),
187
+ "Vertical Forms": range(65040, 65056),
188
+ "Combining Half Marks": range(65056, 65072),
189
+ "CJK Compatibility Forms": range(65072, 65104),
190
+ "Small Form Variants": range(65104, 65136),
191
+ "Arabic Presentation Forms-B": range(65136, 65280),
192
+ "Halfwidth and Fullwidth Forms": range(65280, 65520),
193
+ "Specials": range(65520, 65536),
194
+ "Linear B Syllabary": range(65536, 65664),
195
+ "Linear B Ideograms": range(65664, 65792),
196
+ "Aegean Numbers": range(65792, 65856),
197
+ "Ancient Greek Numbers": range(65856, 65936),
198
+ "Ancient Symbols": range(65936, 66000),
199
+ "Phaistos Disc": range(66000, 66048),
200
+ "Lycian": range(66176, 66208),
201
+ "Carian": range(66208, 66272),
202
+ "Coptic Epact Numbers": range(66272, 66304),
203
+ "Old Italic": range(66304, 66352),
204
+ "Gothic": range(66352, 66384),
205
+ "Old Permic": range(66384, 66432),
206
+ "Ugaritic": range(66432, 66464),
207
+ "Old Persian": range(66464, 66528),
208
+ "Deseret": range(66560, 66640),
209
+ "Shavian": range(66640, 66688),
210
+ "Osmanya": range(66688, 66736),
211
+ "Osage": range(66736, 66816),
212
+ "Elbasan": range(66816, 66864),
213
+ "Caucasian Albanian": range(66864, 66928),
214
+ "Vithkuqi": range(66928, 67008),
215
+ "Linear A": range(67072, 67456),
216
+ "Latin Extended-F": range(67456, 67520),
217
+ "Cypriot Syllabary": range(67584, 67648),
218
+ "Imperial Aramaic": range(67648, 67680),
219
+ "Palmyrene": range(67680, 67712),
220
+ "Nabataean": range(67712, 67760),
221
+ "Hatran": range(67808, 67840),
222
+ "Phoenician": range(67840, 67872),
223
+ "Lydian": range(67872, 67904),
224
+ "Meroitic Hieroglyphs": range(67968, 68000),
225
+ "Meroitic Cursive": range(68000, 68096),
226
+ "Kharoshthi": range(68096, 68192),
227
+ "Old South Arabian": range(68192, 68224),
228
+ "Old North Arabian": range(68224, 68256),
229
+ "Manichaean": range(68288, 68352),
230
+ "Avestan": range(68352, 68416),
231
+ "Inscriptional Parthian": range(68416, 68448),
232
+ "Inscriptional Pahlavi": range(68448, 68480),
233
+ "Psalter Pahlavi": range(68480, 68528),
234
+ "Old Turkic": range(68608, 68688),
235
+ "Old Hungarian": range(68736, 68864),
236
+ "Hanifi Rohingya": range(68864, 68928),
237
+ "Rumi Numeral Symbols": range(69216, 69248),
238
+ "Yezidi": range(69248, 69312),
239
+ "Arabic Extended-C": range(69312, 69376),
240
+ "Old Sogdian": range(69376, 69424),
241
+ "Sogdian": range(69424, 69488),
242
+ "Old Uyghur": range(69488, 69552),
243
+ "Chorasmian": range(69552, 69600),
244
+ "Elymaic": range(69600, 69632),
245
+ "Brahmi": range(69632, 69760),
246
+ "Kaithi": range(69760, 69840),
247
+ "Sora Sompeng": range(69840, 69888),
248
+ "Chakma": range(69888, 69968),
249
+ "Mahajani": range(69968, 70016),
250
+ "Sharada": range(70016, 70112),
251
+ "Sinhala Archaic Numbers": range(70112, 70144),
252
+ "Khojki": range(70144, 70224),
253
+ "Multani": range(70272, 70320),
254
+ "Khudawadi": range(70320, 70400),
255
+ "Grantha": range(70400, 70528),
256
+ "Newa": range(70656, 70784),
257
+ "Tirhuta": range(70784, 70880),
258
+ "Siddham": range(71040, 71168),
259
+ "Modi": range(71168, 71264),
260
+ "Mongolian Supplement": range(71264, 71296),
261
+ "Takri": range(71296, 71376),
262
+ "Ahom": range(71424, 71504),
263
+ "Dogra": range(71680, 71760),
264
+ "Warang Citi": range(71840, 71936),
265
+ "Dives Akuru": range(71936, 72032),
266
+ "Nandinagari": range(72096, 72192),
267
+ "Zanabazar Square": range(72192, 72272),
268
+ "Soyombo": range(72272, 72368),
269
+ "Unified Canadian Aboriginal Syllabics Extended-A": range(72368, 72384),
270
+ "Pau Cin Hau": range(72384, 72448),
271
+ "Devanagari Extended-A": range(72448, 72544),
272
+ "Bhaiksuki": range(72704, 72816),
273
+ "Marchen": range(72816, 72896),
274
+ "Masaram Gondi": range(72960, 73056),
275
+ "Gunjala Gondi": range(73056, 73136),
276
+ "Makasar": range(73440, 73472),
277
+ "Kawi": range(73472, 73568),
278
+ "Lisu Supplement": range(73648, 73664),
279
+ "Tamil Supplement": range(73664, 73728),
280
+ "Cuneiform": range(73728, 74752),
281
+ "Cuneiform Numbers and Punctuation": range(74752, 74880),
282
+ "Early Dynastic Cuneiform": range(74880, 75088),
283
+ "Cypro-Minoan": range(77712, 77824),
284
+ "Egyptian Hieroglyphs": range(77824, 78896),
285
+ "Egyptian Hieroglyph Format Controls": range(78896, 78944),
286
+ "Anatolian Hieroglyphs": range(82944, 83584),
287
+ "Bamum Supplement": range(92160, 92736),
288
+ "Mro": range(92736, 92784),
289
+ "Tangsa": range(92784, 92880),
290
+ "Bassa Vah": range(92880, 92928),
291
+ "Pahawh Hmong": range(92928, 93072),
292
+ "Medefaidrin": range(93760, 93856),
293
+ "Miao": range(93952, 94112),
294
+ "Ideographic Symbols and Punctuation": range(94176, 94208),
295
+ "Tangut": range(94208, 100352),
296
+ "Tangut Components": range(100352, 101120),
297
+ "Khitan Small Script": range(101120, 101632),
298
+ "Tangut Supplement": range(101632, 101760),
299
+ "Kana Extended-B": range(110576, 110592),
300
+ "Kana Supplement": range(110592, 110848),
301
+ "Kana Extended-A": range(110848, 110896),
302
+ "Small Kana Extension": range(110896, 110960),
303
+ "Nushu": range(110960, 111360),
304
+ "Duployan": range(113664, 113824),
305
+ "Shorthand Format Controls": range(113824, 113840),
306
+ "Znamenny Musical Notation": range(118528, 118736),
307
+ "Byzantine Musical Symbols": range(118784, 119040),
308
+ "Musical Symbols": range(119040, 119296),
309
+ "Ancient Greek Musical Notation": range(119296, 119376),
310
+ "Kaktovik Numerals": range(119488, 119520),
311
+ "Mayan Numerals": range(119520, 119552),
312
+ "Tai Xuan Jing Symbols": range(119552, 119648),
313
+ "Counting Rod Numerals": range(119648, 119680),
314
+ "Mathematical Alphanumeric Symbols": range(119808, 120832),
315
+ "Sutton SignWriting": range(120832, 121520),
316
+ "Latin Extended-G": range(122624, 122880),
317
+ "Glagolitic Supplement": range(122880, 122928),
318
+ "Cyrillic Extended-D": range(122928, 123024),
319
+ "Nyiakeng Puachue Hmong": range(123136, 123216),
320
+ "Toto": range(123536, 123584),
321
+ "Wancho": range(123584, 123648),
322
+ "Nag Mundari": range(124112, 124160),
323
+ "Ethiopic Extended-B": range(124896, 124928),
324
+ "Mende Kikakui": range(124928, 125152),
325
+ "Adlam": range(125184, 125280),
326
+ "Indic Siyaq Numbers": range(126064, 126144),
327
+ "Ottoman Siyaq Numbers": range(126208, 126288),
328
+ "Arabic Mathematical Alphabetic Symbols": range(126464, 126720),
329
+ "Mahjong Tiles": range(126976, 127024),
330
+ "Domino Tiles": range(127024, 127136),
331
+ "Playing Cards": range(127136, 127232),
332
+ "Enclosed Alphanumeric Supplement": range(127232, 127488),
333
+ "Enclosed Ideographic Supplement": range(127488, 127744),
334
+ "Miscellaneous Symbols and Pictographs": range(127744, 128512),
335
+ "Emoticons range(Emoji)": range(128512, 128592),
336
+ "Ornamental Dingbats": range(128592, 128640),
337
+ "Transport and Map Symbols": range(128640, 128768),
338
+ "Alchemical Symbols": range(128768, 128896),
339
+ "Geometric Shapes Extended": range(128896, 129024),
340
+ "Supplemental Arrows-C": range(129024, 129280),
341
+ "Supplemental Symbols and Pictographs": range(129280, 129536),
342
+ "Chess Symbols": range(129536, 129648),
343
+ "Symbols and Pictographs Extended-A": range(129648, 129792),
344
+ "Symbols for Legacy Computing": range(129792, 130048),
345
+ "CJK Unified Ideographs Extension B": range(131072, 173792),
346
+ "CJK Unified Ideographs Extension C": range(173824, 177984),
347
+ "CJK Unified Ideographs Extension D": range(177984, 178208),
348
+ "CJK Unified Ideographs Extension E": range(178208, 183984),
349
+ "CJK Unified Ideographs Extension F": range(183984, 191472),
350
+ "CJK Compatibility Ideographs Supplement": range(194560, 195104),
351
+ "CJK Unified Ideographs Extension G": range(196608, 201552),
352
+ "CJK Unified Ideographs Extension H": range(201552, 205744),
353
+ "Tags": range(917504, 917632),
354
+ "Variation Selectors Supplement": range(917760, 918000),
355
+ "Supplementary Private Use Area-A": range(983040, 1048576),
356
+ "Supplementary Private Use Area-B": range(1048576, 1114112),
357
+ }
358
+
359
+
360
+ UNICODE_SECONDARY_RANGE_KEYWORD: List[str] = [
361
+ "Supplement",
362
+ "Extended",
363
+ "Extensions",
364
+ "Modifier",
365
+ "Marks",
366
+ "Punctuation",
367
+ "Symbols",
368
+ "Forms",
369
+ "Operators",
370
+ "Miscellaneous",
371
+ "Drawing",
372
+ "Block",
373
+ "Shapes",
374
+ "Supplemental",
375
+ "Tags",
376
+ ]
377
+
378
+ RE_POSSIBLE_ENCODING_INDICATION = re_compile(
379
+ r"(?:(?:encoding)|(?:charset)|(?:coding))(?:[\:= ]{1,10})(?:[\"\']?)([a-zA-Z0-9\-_]+)(?:[\"\']?)",
380
+ IGNORECASE,
381
+ )
382
+
383
+ IANA_NO_ALIASES = [
384
+ "cp720",
385
+ "cp737",
386
+ "cp856",
387
+ "cp874",
388
+ "cp875",
389
+ "cp1006",
390
+ "koi8_r",
391
+ "koi8_t",
392
+ "koi8_u",
393
+ ]
394
+
395
+ IANA_SUPPORTED: List[str] = sorted(
396
+ filter(
397
+ lambda x: x.endswith("_codec") is False
398
+ and x not in {"rot_13", "tactis", "mbcs"},
399
+ list(set(aliases.values())) + IANA_NO_ALIASES,
400
+ )
401
+ )
402
+
403
+ IANA_SUPPORTED_COUNT: int = len(IANA_SUPPORTED)
404
+
405
+ # pre-computed code page that are similar using the function cp_similarity.
406
+ IANA_SUPPORTED_SIMILAR: Dict[str, List[str]] = {
407
+ "cp037": ["cp1026", "cp1140", "cp273", "cp500"],
408
+ "cp1026": ["cp037", "cp1140", "cp273", "cp500"],
409
+ "cp1125": ["cp866"],
410
+ "cp1140": ["cp037", "cp1026", "cp273", "cp500"],
411
+ "cp1250": ["iso8859_2"],
412
+ "cp1251": ["kz1048", "ptcp154"],
413
+ "cp1252": ["iso8859_15", "iso8859_9", "latin_1"],
414
+ "cp1253": ["iso8859_7"],
415
+ "cp1254": ["iso8859_15", "iso8859_9", "latin_1"],
416
+ "cp1257": ["iso8859_13"],
417
+ "cp273": ["cp037", "cp1026", "cp1140", "cp500"],
418
+ "cp437": ["cp850", "cp858", "cp860", "cp861", "cp862", "cp863", "cp865"],
419
+ "cp500": ["cp037", "cp1026", "cp1140", "cp273"],
420
+ "cp850": ["cp437", "cp857", "cp858", "cp865"],
421
+ "cp857": ["cp850", "cp858", "cp865"],
422
+ "cp858": ["cp437", "cp850", "cp857", "cp865"],
423
+ "cp860": ["cp437", "cp861", "cp862", "cp863", "cp865"],
424
+ "cp861": ["cp437", "cp860", "cp862", "cp863", "cp865"],
425
+ "cp862": ["cp437", "cp860", "cp861", "cp863", "cp865"],
426
+ "cp863": ["cp437", "cp860", "cp861", "cp862", "cp865"],
427
+ "cp865": ["cp437", "cp850", "cp857", "cp858", "cp860", "cp861", "cp862", "cp863"],
428
+ "cp866": ["cp1125"],
429
+ "iso8859_10": ["iso8859_14", "iso8859_15", "iso8859_4", "iso8859_9", "latin_1"],
430
+ "iso8859_11": ["tis_620"],
431
+ "iso8859_13": ["cp1257"],
432
+ "iso8859_14": [
433
+ "iso8859_10",
434
+ "iso8859_15",
435
+ "iso8859_16",
436
+ "iso8859_3",
437
+ "iso8859_9",
438
+ "latin_1",
439
+ ],
440
+ "iso8859_15": [
441
+ "cp1252",
442
+ "cp1254",
443
+ "iso8859_10",
444
+ "iso8859_14",
445
+ "iso8859_16",
446
+ "iso8859_3",
447
+ "iso8859_9",
448
+ "latin_1",
449
+ ],
450
+ "iso8859_16": [
451
+ "iso8859_14",
452
+ "iso8859_15",
453
+ "iso8859_2",
454
+ "iso8859_3",
455
+ "iso8859_9",
456
+ "latin_1",
457
+ ],
458
+ "iso8859_2": ["cp1250", "iso8859_16", "iso8859_4"],
459
+ "iso8859_3": ["iso8859_14", "iso8859_15", "iso8859_16", "iso8859_9", "latin_1"],
460
+ "iso8859_4": ["iso8859_10", "iso8859_2", "iso8859_9", "latin_1"],
461
+ "iso8859_7": ["cp1253"],
462
+ "iso8859_9": [
463
+ "cp1252",
464
+ "cp1254",
465
+ "cp1258",
466
+ "iso8859_10",
467
+ "iso8859_14",
468
+ "iso8859_15",
469
+ "iso8859_16",
470
+ "iso8859_3",
471
+ "iso8859_4",
472
+ "latin_1",
473
+ ],
474
+ "kz1048": ["cp1251", "ptcp154"],
475
+ "latin_1": [
476
+ "cp1252",
477
+ "cp1254",
478
+ "cp1258",
479
+ "iso8859_10",
480
+ "iso8859_14",
481
+ "iso8859_15",
482
+ "iso8859_16",
483
+ "iso8859_3",
484
+ "iso8859_4",
485
+ "iso8859_9",
486
+ ],
487
+ "mac_iceland": ["mac_roman", "mac_turkish"],
488
+ "mac_roman": ["mac_iceland", "mac_turkish"],
489
+ "mac_turkish": ["mac_iceland", "mac_roman"],
490
+ "ptcp154": ["cp1251", "kz1048"],
491
+ "tis_620": ["iso8859_11"],
492
+ }
493
+
494
+
495
+ CHARDET_CORRESPONDENCE: Dict[str, str] = {
496
+ "iso2022_kr": "ISO-2022-KR",
497
+ "iso2022_jp": "ISO-2022-JP",
498
+ "euc_kr": "EUC-KR",
499
+ "tis_620": "TIS-620",
500
+ "utf_32": "UTF-32",
501
+ "euc_jp": "EUC-JP",
502
+ "koi8_r": "KOI8-R",
503
+ "iso8859_1": "ISO-8859-1",
504
+ "iso8859_2": "ISO-8859-2",
505
+ "iso8859_5": "ISO-8859-5",
506
+ "iso8859_6": "ISO-8859-6",
507
+ "iso8859_7": "ISO-8859-7",
508
+ "iso8859_8": "ISO-8859-8",
509
+ "utf_16": "UTF-16",
510
+ "cp855": "IBM855",
511
+ "mac_cyrillic": "MacCyrillic",
512
+ "gb2312": "GB2312",
513
+ "gb18030": "GB18030",
514
+ "cp932": "CP932",
515
+ "cp866": "IBM866",
516
+ "utf_8": "utf-8",
517
+ "utf_8_sig": "UTF-8-SIG",
518
+ "shift_jis": "SHIFT_JIS",
519
+ "big5": "Big5",
520
+ "cp1250": "windows-1250",
521
+ "cp1251": "windows-1251",
522
+ "cp1252": "Windows-1252",
523
+ "cp1253": "windows-1253",
524
+ "cp1255": "windows-1255",
525
+ "cp1256": "windows-1256",
526
+ "cp1254": "Windows-1254",
527
+ "cp949": "CP949",
528
+ }
529
+
530
+
531
+ COMMON_SAFE_ASCII_CHARACTERS: Set[str] = {
532
+ "<",
533
+ ">",
534
+ "=",
535
+ ":",
536
+ "/",
537
+ "&",
538
+ ";",
539
+ "{",
540
+ "}",
541
+ "[",
542
+ "]",
543
+ ",",
544
+ "|",
545
+ '"',
546
+ "-",
547
+ }
548
+
549
+
550
+ KO_NAMES: Set[str] = {"johab", "cp949", "euc_kr"}
551
+ ZH_NAMES: Set[str] = {"big5", "cp950", "big5hkscs", "hz"}
552
+
553
+ # Logging LEVEL below DEBUG
554
+ TRACE: int = 5
555
+
556
+
557
+ # Language label that contain the em dash "—"
558
+ # character are to be considered alternative seq to origin
559
+ FREQUENCIES: Dict[str, List[str]] = {
560
+ "English": [
561
+ "e",
562
+ "a",
563
+ "t",
564
+ "i",
565
+ "o",
566
+ "n",
567
+ "s",
568
+ "r",
569
+ "h",
570
+ "l",
571
+ "d",
572
+ "c",
573
+ "u",
574
+ "m",
575
+ "f",
576
+ "p",
577
+ "g",
578
+ "w",
579
+ "y",
580
+ "b",
581
+ "v",
582
+ "k",
583
+ "x",
584
+ "j",
585
+ "z",
586
+ "q",
587
+ ],
588
+ "English—": [
589
+ "e",
590
+ "a",
591
+ "t",
592
+ "i",
593
+ "o",
594
+ "n",
595
+ "s",
596
+ "r",
597
+ "h",
598
+ "l",
599
+ "d",
600
+ "c",
601
+ "m",
602
+ "u",
603
+ "f",
604
+ "p",
605
+ "g",
606
+ "w",
607
+ "b",
608
+ "y",
609
+ "v",
610
+ "k",
611
+ "j",
612
+ "x",
613
+ "z",
614
+ "q",
615
+ ],
616
+ "German": [
617
+ "e",
618
+ "n",
619
+ "i",
620
+ "r",
621
+ "s",
622
+ "t",
623
+ "a",
624
+ "d",
625
+ "h",
626
+ "u",
627
+ "l",
628
+ "g",
629
+ "o",
630
+ "c",
631
+ "m",
632
+ "b",
633
+ "f",
634
+ "k",
635
+ "w",
636
+ "z",
637
+ "p",
638
+ "v",
639
+ "ü",
640
+ "ä",
641
+ "ö",
642
+ "j",
643
+ ],
644
+ "French": [
645
+ "e",
646
+ "a",
647
+ "s",
648
+ "n",
649
+ "i",
650
+ "t",
651
+ "r",
652
+ "l",
653
+ "u",
654
+ "o",
655
+ "d",
656
+ "c",
657
+ "p",
658
+ "m",
659
+ "é",
660
+ "v",
661
+ "g",
662
+ "f",
663
+ "b",
664
+ "h",
665
+ "q",
666
+ "à",
667
+ "x",
668
+ "è",
669
+ "y",
670
+ "j",
671
+ ],
672
+ "Dutch": [
673
+ "e",
674
+ "n",
675
+ "a",
676
+ "i",
677
+ "r",
678
+ "t",
679
+ "o",
680
+ "d",
681
+ "s",
682
+ "l",
683
+ "g",
684
+ "h",
685
+ "v",
686
+ "m",
687
+ "u",
688
+ "k",
689
+ "c",
690
+ "p",
691
+ "b",
692
+ "w",
693
+ "j",
694
+ "z",
695
+ "f",
696
+ "y",
697
+ "x",
698
+ "ë",
699
+ ],
700
+ "Italian": [
701
+ "e",
702
+ "i",
703
+ "a",
704
+ "o",
705
+ "n",
706
+ "l",
707
+ "t",
708
+ "r",
709
+ "s",
710
+ "c",
711
+ "d",
712
+ "u",
713
+ "p",
714
+ "m",
715
+ "g",
716
+ "v",
717
+ "f",
718
+ "b",
719
+ "z",
720
+ "h",
721
+ "q",
722
+ "è",
723
+ "à",
724
+ "k",
725
+ "y",
726
+ "ò",
727
+ ],
728
+ "Polish": [
729
+ "a",
730
+ "i",
731
+ "o",
732
+ "e",
733
+ "n",
734
+ "r",
735
+ "z",
736
+ "w",
737
+ "s",
738
+ "c",
739
+ "t",
740
+ "k",
741
+ "y",
742
+ "d",
743
+ "p",
744
+ "m",
745
+ "u",
746
+ "l",
747
+ "j",
748
+ "ł",
749
+ "g",
750
+ "b",
751
+ "h",
752
+ "ą",
753
+ "ę",
754
+ "ó",
755
+ ],
756
+ "Spanish": [
757
+ "e",
758
+ "a",
759
+ "o",
760
+ "n",
761
+ "s",
762
+ "r",
763
+ "i",
764
+ "l",
765
+ "d",
766
+ "t",
767
+ "c",
768
+ "u",
769
+ "m",
770
+ "p",
771
+ "b",
772
+ "g",
773
+ "v",
774
+ "f",
775
+ "y",
776
+ "ó",
777
+ "h",
778
+ "q",
779
+ "í",
780
+ "j",
781
+ "z",
782
+ "á",
783
+ ],
784
+ "Russian": [
785
+ "о",
786
+ "а",
787
+ "е",
788
+ "и",
789
+ "н",
790
+ "с",
791
+ "т",
792
+ "р",
793
+ "в",
794
+ "л",
795
+ "к",
796
+ "м",
797
+ "д",
798
+ "п",
799
+ "у",
800
+ "г",
801
+ "я",
802
+ "ы",
803
+ "з",
804
+ "б",
805
+ "й",
806
+ "ь",
807
+ "ч",
808
+ "х",
809
+ "ж",
810
+ "ц",
811
+ ],
812
+ # Jap-Kanji
813
+ "Japanese": [
814
+ "人",
815
+ "一",
816
+ "大",
817
+ "亅",
818
+ "丁",
819
+ "丨",
820
+ "竹",
821
+ "笑",
822
+ "口",
823
+ "日",
824
+ "今",
825
+ "二",
826
+ "彳",
827
+ "行",
828
+ "十",
829
+ "土",
830
+ "丶",
831
+ "寸",
832
+ "寺",
833
+ "時",
834
+ "乙",
835
+ "丿",
836
+ "乂",
837
+ "气",
838
+ "気",
839
+ "冂",
840
+ "巾",
841
+ "亠",
842
+ "市",
843
+ "目",
844
+ "儿",
845
+ "見",
846
+ "八",
847
+ "小",
848
+ "凵",
849
+ "県",
850
+ "月",
851
+ "彐",
852
+ "門",
853
+ "間",
854
+ "木",
855
+ "東",
856
+ "山",
857
+ "出",
858
+ "本",
859
+ "中",
860
+ "刀",
861
+ "分",
862
+ "耳",
863
+ "又",
864
+ "取",
865
+ "最",
866
+ "言",
867
+ "田",
868
+ "心",
869
+ "思",
870
+ "刂",
871
+ "前",
872
+ "京",
873
+ "尹",
874
+ "事",
875
+ "生",
876
+ "厶",
877
+ "云",
878
+ "会",
879
+ "未",
880
+ "来",
881
+ "白",
882
+ "冫",
883
+ "楽",
884
+ "灬",
885
+ "馬",
886
+ "尸",
887
+ "尺",
888
+ "駅",
889
+ "明",
890
+ "耂",
891
+ "者",
892
+ "了",
893
+ "阝",
894
+ "都",
895
+ "高",
896
+ "卜",
897
+ "占",
898
+ "厂",
899
+ "广",
900
+ "店",
901
+ "子",
902
+ "申",
903
+ "奄",
904
+ "亻",
905
+ "俺",
906
+ "上",
907
+ "方",
908
+ "冖",
909
+ "学",
910
+ "衣",
911
+ "艮",
912
+ "食",
913
+ "自",
914
+ ],
915
+ # Jap-Katakana
916
+ "Japanese—": [
917
+ "ー",
918
+ "ン",
919
+ "ス",
920
+ "・",
921
+ "ル",
922
+ "ト",
923
+ "リ",
924
+ "イ",
925
+ "ア",
926
+ "ラ",
927
+ "ッ",
928
+ "ク",
929
+ "ド",
930
+ "シ",
931
+ "レ",
932
+ "ジ",
933
+ "タ",
934
+ "フ",
935
+ "ロ",
936
+ "カ",
937
+ "テ",
938
+ "マ",
939
+ "ィ",
940
+ "グ",
941
+ "バ",
942
+ "ム",
943
+ "プ",
944
+ "オ",
945
+ "コ",
946
+ "デ",
947
+ "ニ",
948
+ "ウ",
949
+ "メ",
950
+ "サ",
951
+ "ビ",
952
+ "ナ",
953
+ "ブ",
954
+ "ャ",
955
+ "エ",
956
+ "ュ",
957
+ "チ",
958
+ "キ",
959
+ "ズ",
960
+ "ダ",
961
+ "パ",
962
+ "ミ",
963
+ "ェ",
964
+ "ョ",
965
+ "ハ",
966
+ "セ",
967
+ "ベ",
968
+ "ガ",
969
+ "モ",
970
+ "ツ",
971
+ "ネ",
972
+ "ボ",
973
+ "ソ",
974
+ "ノ",
975
+ "ァ",
976
+ "ヴ",
977
+ "ワ",
978
+ "ポ",
979
+ "ペ",
980
+ "ピ",
981
+ "ケ",
982
+ "ゴ",
983
+ "ギ",
984
+ "ザ",
985
+ "ホ",
986
+ "ゲ",
987
+ "ォ",
988
+ "ヤ",
989
+ "ヒ",
990
+ "ユ",
991
+ "ヨ",
992
+ "ヘ",
993
+ "ゼ",
994
+ "ヌ",
995
+ "ゥ",
996
+ "ゾ",
997
+ "ヶ",
998
+ "ヂ",
999
+ "ヲ",
1000
+ "ヅ",
1001
+ "ヵ",
1002
+ "ヱ",
1003
+ "ヰ",
1004
+ "ヮ",
1005
+ "ヽ",
1006
+ "゠",
1007
+ "ヾ",
1008
+ "ヷ",
1009
+ "ヿ",
1010
+ "ヸ",
1011
+ "ヹ",
1012
+ "ヺ",
1013
+ ],
1014
+ # Jap-Hiragana
1015
+ "Japanese——": [
1016
+ "の",
1017
+ "に",
1018
+ "る",
1019
+ "た",
1020
+ "と",
1021
+ "は",
1022
+ "し",
1023
+ "い",
1024
+ "を",
1025
+ "で",
1026
+ "て",
1027
+ "が",
1028
+ "な",
1029
+ "れ",
1030
+ "か",
1031
+ "ら",
1032
+ "さ",
1033
+ "っ",
1034
+ "り",
1035
+ "す",
1036
+ "あ",
1037
+ "も",
1038
+ "こ",
1039
+ "ま",
1040
+ "う",
1041
+ "く",
1042
+ "よ",
1043
+ "き",
1044
+ "ん",
1045
+ "め",
1046
+ "お",
1047
+ "け",
1048
+ "そ",
1049
+ "つ",
1050
+ "だ",
1051
+ "や",
1052
+ "え",
1053
+ "ど",
1054
+ "わ",
1055
+ "ち",
1056
+ "み",
1057
+ "せ",
1058
+ "じ",
1059
+ "ば",
1060
+ "へ",
1061
+ "び",
1062
+ "ず",
1063
+ "ろ",
1064
+ "ほ",
1065
+ "げ",
1066
+ "む",
1067
+ "べ",
1068
+ "ひ",
1069
+ "ょ",
1070
+ "ゆ",
1071
+ "ぶ",
1072
+ "ご",
1073
+ "ゃ",
1074
+ "ね",
1075
+ "ふ",
1076
+ "ぐ",
1077
+ "ぎ",
1078
+ "ぼ",
1079
+ "ゅ",
1080
+ "づ",
1081
+ "ざ",
1082
+ "ぞ",
1083
+ "ぬ",
1084
+ "ぜ",
1085
+ "ぱ",
1086
+ "ぽ",
1087
+ "ぷ",
1088
+ "ぴ",
1089
+ "ぃ",
1090
+ "ぁ",
1091
+ "ぇ",
1092
+ "ぺ",
1093
+ "ゞ",
1094
+ "ぢ",
1095
+ "ぉ",
1096
+ "ぅ",
1097
+ "ゐ",
1098
+ "ゝ",
1099
+ "ゑ",
1100
+ "゛",
1101
+ "゜",
1102
+ "ゎ",
1103
+ "ゔ",
1104
+ "゚",
1105
+ "ゟ",
1106
+ "゙",
1107
+ "ゕ",
1108
+ "ゖ",
1109
+ ],
1110
+ "Portuguese": [
1111
+ "a",
1112
+ "e",
1113
+ "o",
1114
+ "s",
1115
+ "i",
1116
+ "r",
1117
+ "d",
1118
+ "n",
1119
+ "t",
1120
+ "m",
1121
+ "u",
1122
+ "c",
1123
+ "l",
1124
+ "p",
1125
+ "g",
1126
+ "v",
1127
+ "b",
1128
+ "f",
1129
+ "h",
1130
+ "ã",
1131
+ "q",
1132
+ "é",
1133
+ "ç",
1134
+ "á",
1135
+ "z",
1136
+ "í",
1137
+ ],
1138
+ "Swedish": [
1139
+ "e",
1140
+ "a",
1141
+ "n",
1142
+ "r",
1143
+ "t",
1144
+ "s",
1145
+ "i",
1146
+ "l",
1147
+ "d",
1148
+ "o",
1149
+ "m",
1150
+ "k",
1151
+ "g",
1152
+ "v",
1153
+ "h",
1154
+ "f",
1155
+ "u",
1156
+ "p",
1157
+ "ä",
1158
+ "c",
1159
+ "b",
1160
+ "ö",
1161
+ "å",
1162
+ "y",
1163
+ "j",
1164
+ "x",
1165
+ ],
1166
+ "Chinese": [
1167
+ "的",
1168
+ "一",
1169
+ "是",
1170
+ "不",
1171
+ "了",
1172
+ "在",
1173
+ "人",
1174
+ "有",
1175
+ "我",
1176
+ "他",
1177
+ "这",
1178
+ "个",
1179
+ "们",
1180
+ "中",
1181
+ "来",
1182
+ "上",
1183
+ "大",
1184
+ "为",
1185
+ "和",
1186
+ "国",
1187
+ "地",
1188
+ "到",
1189
+ "以",
1190
+ "说",
1191
+ "时",
1192
+ "要",
1193
+ "就",
1194
+ "出",
1195
+ "会",
1196
+ "可",
1197
+ "也",
1198
+ "你",
1199
+ "对",
1200
+ "生",
1201
+ "能",
1202
+ "而",
1203
+ "子",
1204
+ "那",
1205
+ "得",
1206
+ "于",
1207
+ "着",
1208
+ "下",
1209
+ "自",
1210
+ "之",
1211
+ "年",
1212
+ "过",
1213
+ "发",
1214
+ "后",
1215
+ "作",
1216
+ "里",
1217
+ "用",
1218
+ "道",
1219
+ "行",
1220
+ "所",
1221
+ "然",
1222
+ "家",
1223
+ "种",
1224
+ "事",
1225
+ "成",
1226
+ "方",
1227
+ "多",
1228
+ "经",
1229
+ "么",
1230
+ "去",
1231
+ "法",
1232
+ "学",
1233
+ "如",
1234
+ "都",
1235
+ "同",
1236
+ "现",
1237
+ "当",
1238
+ "没",
1239
+ "动",
1240
+ "面",
1241
+ "起",
1242
+ "看",
1243
+ "定",
1244
+ "天",
1245
+ "分",
1246
+ "还",
1247
+ "进",
1248
+ "好",
1249
+ "小",
1250
+ "部",
1251
+ "其",
1252
+ "些",
1253
+ "主",
1254
+ "样",
1255
+ "理",
1256
+ "心",
1257
+ "她",
1258
+ "本",
1259
+ "前",
1260
+ "开",
1261
+ "但",
1262
+ "因",
1263
+ "只",
1264
+ "从",
1265
+ "想",
1266
+ "实",
1267
+ ],
1268
+ "Ukrainian": [
1269
+ "о",
1270
+ "а",
1271
+ "н",
1272
+ "і",
1273
+ "и",
1274
+ "р",
1275
+ "в",
1276
+ "т",
1277
+ "е",
1278
+ "с",
1279
+ "к",
1280
+ "л",
1281
+ "у",
1282
+ "д",
1283
+ "м",
1284
+ "п",
1285
+ "з",
1286
+ "я",
1287
+ "ь",
1288
+ "б",
1289
+ "г",
1290
+ "й",
1291
+ "ч",
1292
+ "х",
1293
+ "ц",
1294
+ "ї",
1295
+ ],
1296
+ "Norwegian": [
1297
+ "e",
1298
+ "r",
1299
+ "n",
1300
+ "t",
1301
+ "a",
1302
+ "s",
1303
+ "i",
1304
+ "o",
1305
+ "l",
1306
+ "d",
1307
+ "g",
1308
+ "k",
1309
+ "m",
1310
+ "v",
1311
+ "f",
1312
+ "p",
1313
+ "u",
1314
+ "b",
1315
+ "h",
1316
+ "å",
1317
+ "y",
1318
+ "j",
1319
+ "ø",
1320
+ "c",
1321
+ "æ",
1322
+ "w",
1323
+ ],
1324
+ "Finnish": [
1325
+ "a",
1326
+ "i",
1327
+ "n",
1328
+ "t",
1329
+ "e",
1330
+ "s",
1331
+ "l",
1332
+ "o",
1333
+ "u",
1334
+ "k",
1335
+ "ä",
1336
+ "m",
1337
+ "r",
1338
+ "v",
1339
+ "j",
1340
+ "h",
1341
+ "p",
1342
+ "y",
1343
+ "d",
1344
+ "ö",
1345
+ "g",
1346
+ "c",
1347
+ "b",
1348
+ "f",
1349
+ "w",
1350
+ "z",
1351
+ ],
1352
+ "Vietnamese": [
1353
+ "n",
1354
+ "h",
1355
+ "t",
1356
+ "i",
1357
+ "c",
1358
+ "g",
1359
+ "a",
1360
+ "o",
1361
+ "u",
1362
+ "m",
1363
+ "l",
1364
+ "r",
1365
+ "à",
1366
+ "đ",
1367
+ "s",
1368
+ "e",
1369
+ "v",
1370
+ "p",
1371
+ "b",
1372
+ "y",
1373
+ "ư",
1374
+ "d",
1375
+ "á",
1376
+ "k",
1377
+ "ộ",
1378
+ "ế",
1379
+ ],
1380
+ "Czech": [
1381
+ "o",
1382
+ "e",
1383
+ "a",
1384
+ "n",
1385
+ "t",
1386
+ "s",
1387
+ "i",
1388
+ "l",
1389
+ "v",
1390
+ "r",
1391
+ "k",
1392
+ "d",
1393
+ "u",
1394
+ "m",
1395
+ "p",
1396
+ "í",
1397
+ "c",
1398
+ "h",
1399
+ "z",
1400
+ "á",
1401
+ "y",
1402
+ "j",
1403
+ "b",
1404
+ "ě",
1405
+ "é",
1406
+ "ř",
1407
+ ],
1408
+ "Hungarian": [
1409
+ "e",
1410
+ "a",
1411
+ "t",
1412
+ "l",
1413
+ "s",
1414
+ "n",
1415
+ "k",
1416
+ "r",
1417
+ "i",
1418
+ "o",
1419
+ "z",
1420
+ "á",
1421
+ "é",
1422
+ "g",
1423
+ "m",
1424
+ "b",
1425
+ "y",
1426
+ "v",
1427
+ "d",
1428
+ "h",
1429
+ "u",
1430
+ "p",
1431
+ "j",
1432
+ "ö",
1433
+ "f",
1434
+ "c",
1435
+ ],
1436
+ "Korean": [
1437
+ "이",
1438
+ "다",
1439
+ "에",
1440
+ "의",
1441
+ "는",
1442
+ "로",
1443
+ "하",
1444
+ "을",
1445
+ "가",
1446
+ "고",
1447
+ "지",
1448
+ "서",
1449
+ "한",
1450
+ "은",
1451
+ "기",
1452
+ "으",
1453
+ "년",
1454
+ "대",
1455
+ "사",
1456
+ "시",
1457
+ "를",
1458
+ "리",
1459
+ "도",
1460
+ "인",
1461
+ "스",
1462
+ "일",
1463
+ ],
1464
+ "Indonesian": [
1465
+ "a",
1466
+ "n",
1467
+ "e",
1468
+ "i",
1469
+ "r",
1470
+ "t",
1471
+ "u",
1472
+ "s",
1473
+ "d",
1474
+ "k",
1475
+ "m",
1476
+ "l",
1477
+ "g",
1478
+ "p",
1479
+ "b",
1480
+ "o",
1481
+ "h",
1482
+ "y",
1483
+ "j",
1484
+ "c",
1485
+ "w",
1486
+ "f",
1487
+ "v",
1488
+ "z",
1489
+ "x",
1490
+ "q",
1491
+ ],
1492
+ "Turkish": [
1493
+ "a",
1494
+ "e",
1495
+ "i",
1496
+ "n",
1497
+ "r",
1498
+ "l",
1499
+ "ı",
1500
+ "k",
1501
+ "d",
1502
+ "t",
1503
+ "s",
1504
+ "m",
1505
+ "y",
1506
+ "u",
1507
+ "o",
1508
+ "b",
1509
+ "ü",
1510
+ "ş",
1511
+ "v",
1512
+ "g",
1513
+ "z",
1514
+ "h",
1515
+ "c",
1516
+ "p",
1517
+ "ç",
1518
+ "ğ",
1519
+ ],
1520
+ "Romanian": [
1521
+ "e",
1522
+ "i",
1523
+ "a",
1524
+ "r",
1525
+ "n",
1526
+ "t",
1527
+ "u",
1528
+ "l",
1529
+ "o",
1530
+ "c",
1531
+ "s",
1532
+ "d",
1533
+ "p",
1534
+ "m",
1535
+ "ă",
1536
+ "f",
1537
+ "v",
1538
+ "î",
1539
+ "g",
1540
+ "b",
1541
+ "ș",
1542
+ "ț",
1543
+ "z",
1544
+ "h",
1545
+ "â",
1546
+ "j",
1547
+ ],
1548
+ "Farsi": [
1549
+ "ا",
1550
+ "ی",
1551
+ "ر",
1552
+ "د",
1553
+ "ن",
1554
+ "ه",
1555
+ "و",
1556
+ "م",
1557
+ "ت",
1558
+ "ب",
1559
+ "س",
1560
+ "ل",
1561
+ "ک",
1562
+ "ش",
1563
+ "ز",
1564
+ "ف",
1565
+ "گ",
1566
+ "ع",
1567
+ "خ",
1568
+ "ق",
1569
+ "ج",
1570
+ "آ",
1571
+ "پ",
1572
+ "ح",
1573
+ "ط",
1574
+ "ص",
1575
+ ],
1576
+ "Arabic": [
1577
+ "ا",
1578
+ "ل",
1579
+ "ي",
1580
+ "م",
1581
+ "و",
1582
+ "ن",
1583
+ "ر",
1584
+ "ت",
1585
+ "ب",
1586
+ "ة",
1587
+ "ع",
1588
+ "د",
1589
+ "س",
1590
+ "ف",
1591
+ "ه",
1592
+ "ك",
1593
+ "ق",
1594
+ "أ",
1595
+ "ح",
1596
+ "ج",
1597
+ "ش",
1598
+ "ط",
1599
+ "ص",
1600
+ "ى",
1601
+ "خ",
1602
+ "إ",
1603
+ ],
1604
+ "Danish": [
1605
+ "e",
1606
+ "r",
1607
+ "n",
1608
+ "t",
1609
+ "a",
1610
+ "i",
1611
+ "s",
1612
+ "d",
1613
+ "l",
1614
+ "o",
1615
+ "g",
1616
+ "m",
1617
+ "k",
1618
+ "f",
1619
+ "v",
1620
+ "u",
1621
+ "b",
1622
+ "h",
1623
+ "p",
1624
+ "å",
1625
+ "y",
1626
+ "ø",
1627
+ "æ",
1628
+ "c",
1629
+ "j",
1630
+ "w",
1631
+ ],
1632
+ "Serbian": [
1633
+ "а",
1634
+ "и",
1635
+ "о",
1636
+ "е",
1637
+ "н",
1638
+ "р",
1639
+ "с",
1640
+ "у",
1641
+ "т",
1642
+ "к",
1643
+ "ј",
1644
+ "в",
1645
+ "д",
1646
+ "м",
1647
+ "п",
1648
+ "л",
1649
+ "г",
1650
+ "з",
1651
+ "б",
1652
+ "a",
1653
+ "i",
1654
+ "e",
1655
+ "o",
1656
+ "n",
1657
+ "ц",
1658
+ "ш",
1659
+ ],
1660
+ "Lithuanian": [
1661
+ "i",
1662
+ "a",
1663
+ "s",
1664
+ "o",
1665
+ "r",
1666
+ "e",
1667
+ "t",
1668
+ "n",
1669
+ "u",
1670
+ "k",
1671
+ "m",
1672
+ "l",
1673
+ "p",
1674
+ "v",
1675
+ "d",
1676
+ "j",
1677
+ "g",
1678
+ "ė",
1679
+ "b",
1680
+ "y",
1681
+ "ų",
1682
+ "š",
1683
+ "ž",
1684
+ "c",
1685
+ "ą",
1686
+ "į",
1687
+ ],
1688
+ "Slovene": [
1689
+ "e",
1690
+ "a",
1691
+ "i",
1692
+ "o",
1693
+ "n",
1694
+ "r",
1695
+ "s",
1696
+ "l",
1697
+ "t",
1698
+ "j",
1699
+ "v",
1700
+ "k",
1701
+ "d",
1702
+ "p",
1703
+ "m",
1704
+ "u",
1705
+ "z",
1706
+ "b",
1707
+ "g",
1708
+ "h",
1709
+ "č",
1710
+ "c",
1711
+ "š",
1712
+ "ž",
1713
+ "f",
1714
+ "y",
1715
+ ],
1716
+ "Slovak": [
1717
+ "o",
1718
+ "a",
1719
+ "e",
1720
+ "n",
1721
+ "i",
1722
+ "r",
1723
+ "v",
1724
+ "t",
1725
+ "s",
1726
+ "l",
1727
+ "k",
1728
+ "d",
1729
+ "m",
1730
+ "p",
1731
+ "u",
1732
+ "c",
1733
+ "h",
1734
+ "j",
1735
+ "b",
1736
+ "z",
1737
+ "á",
1738
+ "y",
1739
+ "ý",
1740
+ "í",
1741
+ "č",
1742
+ "é",
1743
+ ],
1744
+ "Hebrew": [
1745
+ "י",
1746
+ "ו",
1747
+ "ה",
1748
+ "ל",
1749
+ "ר",
1750
+ "ב",
1751
+ "ת",
1752
+ "מ",
1753
+ "א",
1754
+ "ש",
1755
+ "נ",
1756
+ "ע",
1757
+ "ם",
1758
+ "ד",
1759
+ "ק",
1760
+ "ח",
1761
+ "פ",
1762
+ "ס",
1763
+ "כ",
1764
+ "ג",
1765
+ "ט",
1766
+ "צ",
1767
+ "ן",
1768
+ "ז",
1769
+ "ך",
1770
+ ],
1771
+ "Bulgarian": [
1772
+ "а",
1773
+ "и",
1774
+ "о",
1775
+ "е",
1776
+ "н",
1777
+ "т",
1778
+ "р",
1779
+ "с",
1780
+ "в",
1781
+ "л",
1782
+ "к",
1783
+ "д",
1784
+ "п",
1785
+ "м",
1786
+ "з",
1787
+ "г",
1788
+ "я",
1789
+ "ъ",
1790
+ "у",
1791
+ "б",
1792
+ "ч",
1793
+ "ц",
1794
+ "й",
1795
+ "ж",
1796
+ "щ",
1797
+ "х",
1798
+ ],
1799
+ "Croatian": [
1800
+ "a",
1801
+ "i",
1802
+ "o",
1803
+ "e",
1804
+ "n",
1805
+ "r",
1806
+ "j",
1807
+ "s",
1808
+ "t",
1809
+ "u",
1810
+ "k",
1811
+ "l",
1812
+ "v",
1813
+ "d",
1814
+ "m",
1815
+ "p",
1816
+ "g",
1817
+ "z",
1818
+ "b",
1819
+ "c",
1820
+ "č",
1821
+ "h",
1822
+ "š",
1823
+ "ž",
1824
+ "ć",
1825
+ "f",
1826
+ ],
1827
+ "Hindi": [
1828
+ "क",
1829
+ "र",
1830
+ "स",
1831
+ "न",
1832
+ "त",
1833
+ "म",
1834
+ "ह",
1835
+ "प",
1836
+ "य",
1837
+ "ल",
1838
+ "व",
1839
+ "ज",
1840
+ "द",
1841
+ "ग",
1842
+ "ब",
1843
+ "श",
1844
+ "ट",
1845
+ "अ",
1846
+ "ए",
1847
+ "थ",
1848
+ "भ",
1849
+ "ड",
1850
+ "च",
1851
+ "ध",
1852
+ "ष",
1853
+ "इ",
1854
+ ],
1855
+ "Estonian": [
1856
+ "a",
1857
+ "i",
1858
+ "e",
1859
+ "s",
1860
+ "t",
1861
+ "l",
1862
+ "u",
1863
+ "n",
1864
+ "o",
1865
+ "k",
1866
+ "r",
1867
+ "d",
1868
+ "m",
1869
+ "v",
1870
+ "g",
1871
+ "p",
1872
+ "j",
1873
+ "h",
1874
+ "ä",
1875
+ "b",
1876
+ "õ",
1877
+ "ü",
1878
+ "f",
1879
+ "c",
1880
+ "ö",
1881
+ "y",
1882
+ ],
1883
+ "Thai": [
1884
+ "า",
1885
+ "น",
1886
+ "ร",
1887
+ "อ",
1888
+ "ก",
1889
+ "เ",
1890
+ "ง",
1891
+ "ม",
1892
+ "ย",
1893
+ "ล",
1894
+ "ว",
1895
+ "ด",
1896
+ "ท",
1897
+ "ส",
1898
+ "ต",
1899
+ "ะ",
1900
+ "ป",
1901
+ "บ",
1902
+ "ค",
1903
+ "ห",
1904
+ "แ",
1905
+ "จ",
1906
+ "พ",
1907
+ "ช",
1908
+ "ข",
1909
+ "ใ",
1910
+ ],
1911
+ "Greek": [
1912
+ "α",
1913
+ "τ",
1914
+ "ο",
1915
+ "ι",
1916
+ "ε",
1917
+ "ν",
1918
+ "ρ",
1919
+ "σ",
1920
+ "κ",
1921
+ "η",
1922
+ "π",
1923
+ "ς",
1924
+ "υ",
1925
+ "μ",
1926
+ "λ",
1927
+ "ί",
1928
+ "ό",
1929
+ "ά",
1930
+ "γ",
1931
+ "έ",
1932
+ "δ",
1933
+ "ή",
1934
+ "ω",
1935
+ "χ",
1936
+ "θ",
1937
+ "ύ",
1938
+ ],
1939
+ "Tamil": [
1940
+ "க",
1941
+ "த",
1942
+ "ப",
1943
+ "ட",
1944
+ "ர",
1945
+ "ம",
1946
+ "ல",
1947
+ "ன",
1948
+ "வ",
1949
+ "ற",
1950
+ "ய",
1951
+ "ள",
1952
+ "ச",
1953
+ "ந",
1954
+ "இ",
1955
+ "ண",
1956
+ "அ",
1957
+ "ஆ",
1958
+ "ழ",
1959
+ "ங",
1960
+ "எ",
1961
+ "உ",
1962
+ "ஒ",
1963
+ "ஸ",
1964
+ ],
1965
+ "Kazakh": [
1966
+ "а",
1967
+ "ы",
1968
+ "е",
1969
+ "н",
1970
+ "т",
1971
+ "р",
1972
+ "л",
1973
+ "і",
1974
+ "д",
1975
+ "с",
1976
+ "м",
1977
+ "қ",
1978
+ "к",
1979
+ "о",
1980
+ "б",
1981
+ "и",
1982
+ "у",
1983
+ "ғ",
1984
+ "ж",
1985
+ "ң",
1986
+ "з",
1987
+ "ш",
1988
+ "й",
1989
+ "п",
1990
+ "г",
1991
+ "ө",
1992
+ ],
1993
+ }
1994
+
1995
+ LANGUAGE_SUPPORTED_COUNT: int = len(FREQUENCIES)
venv/lib/python3.10/site-packages/charset_normalizer/legacy.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, Optional, Union
2
+ from warnings import warn
3
+
4
+ from .api import from_bytes
5
+ from .constant import CHARDET_CORRESPONDENCE
6
+
7
+
8
+ def detect(
9
+ byte_str: bytes, should_rename_legacy: bool = False, **kwargs: Any
10
+ ) -> Dict[str, Optional[Union[str, float]]]:
11
+ """
12
+ chardet legacy method
13
+ Detect the encoding of the given byte string. It should be mostly backward-compatible.
14
+ Encoding name will match Chardet own writing whenever possible. (Not on encoding name unsupported by it)
15
+ This function is deprecated and should be used to migrate your project easily, consult the documentation for
16
+ further information. Not planned for removal.
17
+
18
+ :param byte_str: The byte sequence to examine.
19
+ :param should_rename_legacy: Should we rename legacy encodings
20
+ to their more modern equivalents?
21
+ """
22
+ if len(kwargs):
23
+ warn(
24
+ f"charset-normalizer disregard arguments '{','.join(list(kwargs.keys()))}' in legacy function detect()"
25
+ )
26
+
27
+ if not isinstance(byte_str, (bytearray, bytes)):
28
+ raise TypeError( # pragma: nocover
29
+ "Expected object of type bytes or bytearray, got: "
30
+ "{0}".format(type(byte_str))
31
+ )
32
+
33
+ if isinstance(byte_str, bytearray):
34
+ byte_str = bytes(byte_str)
35
+
36
+ r = from_bytes(byte_str).best()
37
+
38
+ encoding = r.encoding if r is not None else None
39
+ language = r.language if r is not None and r.language != "Unknown" else ""
40
+ confidence = 1.0 - r.chaos if r is not None else None
41
+
42
+ # Note: CharsetNormalizer does not return 'UTF-8-SIG' as the sig get stripped in the detection/normalization process
43
+ # but chardet does return 'utf-8-sig' and it is a valid codec name.
44
+ if r is not None and encoding == "utf_8" and r.bom:
45
+ encoding += "_sig"
46
+
47
+ if should_rename_legacy is False and encoding in CHARDET_CORRESPONDENCE:
48
+ encoding = CHARDET_CORRESPONDENCE[encoding]
49
+
50
+ return {
51
+ "encoding": encoding,
52
+ "language": language,
53
+ "confidence": confidence,
54
+ }
venv/lib/python3.10/site-packages/charset_normalizer/md.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (16.1 kB). View file
 
venv/lib/python3.10/site-packages/charset_normalizer/md__mypyc.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (268 kB). View file
 
venv/lib/python3.10/site-packages/charset_normalizer/models.py ADDED
@@ -0,0 +1,340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from encodings.aliases import aliases
2
+ from hashlib import sha256
3
+ from json import dumps
4
+ from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
5
+
6
+ from .constant import TOO_BIG_SEQUENCE
7
+ from .utils import iana_name, is_multi_byte_encoding, unicode_range
8
+
9
+
10
+ class CharsetMatch:
11
+ def __init__(
12
+ self,
13
+ payload: bytes,
14
+ guessed_encoding: str,
15
+ mean_mess_ratio: float,
16
+ has_sig_or_bom: bool,
17
+ languages: "CoherenceMatches",
18
+ decoded_payload: Optional[str] = None,
19
+ ):
20
+ self._payload: bytes = payload
21
+
22
+ self._encoding: str = guessed_encoding
23
+ self._mean_mess_ratio: float = mean_mess_ratio
24
+ self._languages: CoherenceMatches = languages
25
+ self._has_sig_or_bom: bool = has_sig_or_bom
26
+ self._unicode_ranges: Optional[List[str]] = None
27
+
28
+ self._leaves: List[CharsetMatch] = []
29
+ self._mean_coherence_ratio: float = 0.0
30
+
31
+ self._output_payload: Optional[bytes] = None
32
+ self._output_encoding: Optional[str] = None
33
+
34
+ self._string: Optional[str] = decoded_payload
35
+
36
+ def __eq__(self, other: object) -> bool:
37
+ if not isinstance(other, CharsetMatch):
38
+ raise TypeError(
39
+ "__eq__ cannot be invoked on {} and {}.".format(
40
+ str(other.__class__), str(self.__class__)
41
+ )
42
+ )
43
+ return self.encoding == other.encoding and self.fingerprint == other.fingerprint
44
+
45
+ def __lt__(self, other: object) -> bool:
46
+ """
47
+ Implemented to make sorted available upon CharsetMatches items.
48
+ """
49
+ if not isinstance(other, CharsetMatch):
50
+ raise ValueError
51
+
52
+ chaos_difference: float = abs(self.chaos - other.chaos)
53
+ coherence_difference: float = abs(self.coherence - other.coherence)
54
+
55
+ # Below 1% difference --> Use Coherence
56
+ if chaos_difference < 0.01 and coherence_difference > 0.02:
57
+ return self.coherence > other.coherence
58
+ elif chaos_difference < 0.01 and coherence_difference <= 0.02:
59
+ # When having a difficult decision, use the result that decoded as many multi-byte as possible.
60
+ # preserve RAM usage!
61
+ if len(self._payload) >= TOO_BIG_SEQUENCE:
62
+ return self.chaos < other.chaos
63
+ return self.multi_byte_usage > other.multi_byte_usage
64
+
65
+ return self.chaos < other.chaos
66
+
67
+ @property
68
+ def multi_byte_usage(self) -> float:
69
+ return 1.0 - (len(str(self)) / len(self.raw))
70
+
71
+ def __str__(self) -> str:
72
+ # Lazy Str Loading
73
+ if self._string is None:
74
+ self._string = str(self._payload, self._encoding, "strict")
75
+ return self._string
76
+
77
+ def __repr__(self) -> str:
78
+ return "<CharsetMatch '{}' bytes({})>".format(self.encoding, self.fingerprint)
79
+
80
+ def add_submatch(self, other: "CharsetMatch") -> None:
81
+ if not isinstance(other, CharsetMatch) or other == self:
82
+ raise ValueError(
83
+ "Unable to add instance <{}> as a submatch of a CharsetMatch".format(
84
+ other.__class__
85
+ )
86
+ )
87
+
88
+ other._string = None # Unload RAM usage; dirty trick.
89
+ self._leaves.append(other)
90
+
91
+ @property
92
+ def encoding(self) -> str:
93
+ return self._encoding
94
+
95
+ @property
96
+ def encoding_aliases(self) -> List[str]:
97
+ """
98
+ Encoding name are known by many name, using this could help when searching for IBM855 when it's listed as CP855.
99
+ """
100
+ also_known_as: List[str] = []
101
+ for u, p in aliases.items():
102
+ if self.encoding == u:
103
+ also_known_as.append(p)
104
+ elif self.encoding == p:
105
+ also_known_as.append(u)
106
+ return also_known_as
107
+
108
+ @property
109
+ def bom(self) -> bool:
110
+ return self._has_sig_or_bom
111
+
112
+ @property
113
+ def byte_order_mark(self) -> bool:
114
+ return self._has_sig_or_bom
115
+
116
+ @property
117
+ def languages(self) -> List[str]:
118
+ """
119
+ Return the complete list of possible languages found in decoded sequence.
120
+ Usually not really useful. Returned list may be empty even if 'language' property return something != 'Unknown'.
121
+ """
122
+ return [e[0] for e in self._languages]
123
+
124
+ @property
125
+ def language(self) -> str:
126
+ """
127
+ Most probable language found in decoded sequence. If none were detected or inferred, the property will return
128
+ "Unknown".
129
+ """
130
+ if not self._languages:
131
+ # Trying to infer the language based on the given encoding
132
+ # Its either English or we should not pronounce ourselves in certain cases.
133
+ if "ascii" in self.could_be_from_charset:
134
+ return "English"
135
+
136
+ # doing it there to avoid circular import
137
+ from charset_normalizer.cd import encoding_languages, mb_encoding_languages
138
+
139
+ languages = (
140
+ mb_encoding_languages(self.encoding)
141
+ if is_multi_byte_encoding(self.encoding)
142
+ else encoding_languages(self.encoding)
143
+ )
144
+
145
+ if len(languages) == 0 or "Latin Based" in languages:
146
+ return "Unknown"
147
+
148
+ return languages[0]
149
+
150
+ return self._languages[0][0]
151
+
152
+ @property
153
+ def chaos(self) -> float:
154
+ return self._mean_mess_ratio
155
+
156
+ @property
157
+ def coherence(self) -> float:
158
+ if not self._languages:
159
+ return 0.0
160
+ return self._languages[0][1]
161
+
162
+ @property
163
+ def percent_chaos(self) -> float:
164
+ return round(self.chaos * 100, ndigits=3)
165
+
166
+ @property
167
+ def percent_coherence(self) -> float:
168
+ return round(self.coherence * 100, ndigits=3)
169
+
170
+ @property
171
+ def raw(self) -> bytes:
172
+ """
173
+ Original untouched bytes.
174
+ """
175
+ return self._payload
176
+
177
+ @property
178
+ def submatch(self) -> List["CharsetMatch"]:
179
+ return self._leaves
180
+
181
+ @property
182
+ def has_submatch(self) -> bool:
183
+ return len(self._leaves) > 0
184
+
185
+ @property
186
+ def alphabets(self) -> List[str]:
187
+ if self._unicode_ranges is not None:
188
+ return self._unicode_ranges
189
+ # list detected ranges
190
+ detected_ranges: List[Optional[str]] = [
191
+ unicode_range(char) for char in str(self)
192
+ ]
193
+ # filter and sort
194
+ self._unicode_ranges = sorted(list({r for r in detected_ranges if r}))
195
+ return self._unicode_ranges
196
+
197
+ @property
198
+ def could_be_from_charset(self) -> List[str]:
199
+ """
200
+ The complete list of encoding that output the exact SAME str result and therefore could be the originating
201
+ encoding.
202
+ This list does include the encoding available in property 'encoding'.
203
+ """
204
+ return [self._encoding] + [m.encoding for m in self._leaves]
205
+
206
+ def output(self, encoding: str = "utf_8") -> bytes:
207
+ """
208
+ Method to get re-encoded bytes payload using given target encoding. Default to UTF-8.
209
+ Any errors will be simply ignored by the encoder NOT replaced.
210
+ """
211
+ if self._output_encoding is None or self._output_encoding != encoding:
212
+ self._output_encoding = encoding
213
+ self._output_payload = str(self).encode(encoding, "replace")
214
+
215
+ return self._output_payload # type: ignore
216
+
217
+ @property
218
+ def fingerprint(self) -> str:
219
+ """
220
+ Retrieve the unique SHA256 computed using the transformed (re-encoded) payload. Not the original one.
221
+ """
222
+ return sha256(self.output()).hexdigest()
223
+
224
+
225
+ class CharsetMatches:
226
+ """
227
+ Container with every CharsetMatch items ordered by default from most probable to the less one.
228
+ Act like a list(iterable) but does not implements all related methods.
229
+ """
230
+
231
+ def __init__(self, results: Optional[List[CharsetMatch]] = None):
232
+ self._results: List[CharsetMatch] = sorted(results) if results else []
233
+
234
+ def __iter__(self) -> Iterator[CharsetMatch]:
235
+ yield from self._results
236
+
237
+ def __getitem__(self, item: Union[int, str]) -> CharsetMatch:
238
+ """
239
+ Retrieve a single item either by its position or encoding name (alias may be used here).
240
+ Raise KeyError upon invalid index or encoding not present in results.
241
+ """
242
+ if isinstance(item, int):
243
+ return self._results[item]
244
+ if isinstance(item, str):
245
+ item = iana_name(item, False)
246
+ for result in self._results:
247
+ if item in result.could_be_from_charset:
248
+ return result
249
+ raise KeyError
250
+
251
+ def __len__(self) -> int:
252
+ return len(self._results)
253
+
254
+ def __bool__(self) -> bool:
255
+ return len(self._results) > 0
256
+
257
+ def append(self, item: CharsetMatch) -> None:
258
+ """
259
+ Insert a single match. Will be inserted accordingly to preserve sort.
260
+ Can be inserted as a submatch.
261
+ """
262
+ if not isinstance(item, CharsetMatch):
263
+ raise ValueError(
264
+ "Cannot append instance '{}' to CharsetMatches".format(
265
+ str(item.__class__)
266
+ )
267
+ )
268
+ # We should disable the submatch factoring when the input file is too heavy (conserve RAM usage)
269
+ if len(item.raw) <= TOO_BIG_SEQUENCE:
270
+ for match in self._results:
271
+ if match.fingerprint == item.fingerprint and match.chaos == item.chaos:
272
+ match.add_submatch(item)
273
+ return
274
+ self._results.append(item)
275
+ self._results = sorted(self._results)
276
+
277
+ def best(self) -> Optional["CharsetMatch"]:
278
+ """
279
+ Simply return the first match. Strict equivalent to matches[0].
280
+ """
281
+ if not self._results:
282
+ return None
283
+ return self._results[0]
284
+
285
+ def first(self) -> Optional["CharsetMatch"]:
286
+ """
287
+ Redundant method, call the method best(). Kept for BC reasons.
288
+ """
289
+ return self.best()
290
+
291
+
292
+ CoherenceMatch = Tuple[str, float]
293
+ CoherenceMatches = List[CoherenceMatch]
294
+
295
+
296
+ class CliDetectionResult:
297
+ def __init__(
298
+ self,
299
+ path: str,
300
+ encoding: Optional[str],
301
+ encoding_aliases: List[str],
302
+ alternative_encodings: List[str],
303
+ language: str,
304
+ alphabets: List[str],
305
+ has_sig_or_bom: bool,
306
+ chaos: float,
307
+ coherence: float,
308
+ unicode_path: Optional[str],
309
+ is_preferred: bool,
310
+ ):
311
+ self.path: str = path
312
+ self.unicode_path: Optional[str] = unicode_path
313
+ self.encoding: Optional[str] = encoding
314
+ self.encoding_aliases: List[str] = encoding_aliases
315
+ self.alternative_encodings: List[str] = alternative_encodings
316
+ self.language: str = language
317
+ self.alphabets: List[str] = alphabets
318
+ self.has_sig_or_bom: bool = has_sig_or_bom
319
+ self.chaos: float = chaos
320
+ self.coherence: float = coherence
321
+ self.is_preferred: bool = is_preferred
322
+
323
+ @property
324
+ def __dict__(self) -> Dict[str, Any]: # type: ignore
325
+ return {
326
+ "path": self.path,
327
+ "encoding": self.encoding,
328
+ "encoding_aliases": self.encoding_aliases,
329
+ "alternative_encodings": self.alternative_encodings,
330
+ "language": self.language,
331
+ "alphabets": self.alphabets,
332
+ "has_sig_or_bom": self.has_sig_or_bom,
333
+ "chaos": self.chaos,
334
+ "coherence": self.coherence,
335
+ "unicode_path": self.unicode_path,
336
+ "is_preferred": self.is_preferred,
337
+ }
338
+
339
+ def to_json(self) -> str:
340
+ return dumps(self.__dict__, ensure_ascii=True, indent=4)
venv/lib/python3.10/site-packages/charset_normalizer/py.typed ADDED
File without changes
venv/lib/python3.10/site-packages/charset_normalizer/utils.py ADDED
@@ -0,0 +1,421 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ import logging
3
+ import unicodedata
4
+ from codecs import IncrementalDecoder
5
+ from encodings.aliases import aliases
6
+ from functools import lru_cache
7
+ from re import findall
8
+ from typing import Generator, List, Optional, Set, Tuple, Union
9
+
10
+ from _multibytecodec import MultibyteIncrementalDecoder
11
+
12
+ from .constant import (
13
+ ENCODING_MARKS,
14
+ IANA_SUPPORTED_SIMILAR,
15
+ RE_POSSIBLE_ENCODING_INDICATION,
16
+ UNICODE_RANGES_COMBINED,
17
+ UNICODE_SECONDARY_RANGE_KEYWORD,
18
+ UTF8_MAXIMAL_ALLOCATION,
19
+ )
20
+
21
+
22
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
23
+ def is_accentuated(character: str) -> bool:
24
+ try:
25
+ description: str = unicodedata.name(character)
26
+ except ValueError:
27
+ return False
28
+ return (
29
+ "WITH GRAVE" in description
30
+ or "WITH ACUTE" in description
31
+ or "WITH CEDILLA" in description
32
+ or "WITH DIAERESIS" in description
33
+ or "WITH CIRCUMFLEX" in description
34
+ or "WITH TILDE" in description
35
+ or "WITH MACRON" in description
36
+ or "WITH RING ABOVE" in description
37
+ )
38
+
39
+
40
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
41
+ def remove_accent(character: str) -> str:
42
+ decomposed: str = unicodedata.decomposition(character)
43
+ if not decomposed:
44
+ return character
45
+
46
+ codes: List[str] = decomposed.split(" ")
47
+
48
+ return chr(int(codes[0], 16))
49
+
50
+
51
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
52
+ def unicode_range(character: str) -> Optional[str]:
53
+ """
54
+ Retrieve the Unicode range official name from a single character.
55
+ """
56
+ character_ord: int = ord(character)
57
+
58
+ for range_name, ord_range in UNICODE_RANGES_COMBINED.items():
59
+ if character_ord in ord_range:
60
+ return range_name
61
+
62
+ return None
63
+
64
+
65
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
66
+ def is_latin(character: str) -> bool:
67
+ try:
68
+ description: str = unicodedata.name(character)
69
+ except ValueError:
70
+ return False
71
+ return "LATIN" in description
72
+
73
+
74
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
75
+ def is_punctuation(character: str) -> bool:
76
+ character_category: str = unicodedata.category(character)
77
+
78
+ if "P" in character_category:
79
+ return True
80
+
81
+ character_range: Optional[str] = unicode_range(character)
82
+
83
+ if character_range is None:
84
+ return False
85
+
86
+ return "Punctuation" in character_range
87
+
88
+
89
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
90
+ def is_symbol(character: str) -> bool:
91
+ character_category: str = unicodedata.category(character)
92
+
93
+ if "S" in character_category or "N" in character_category:
94
+ return True
95
+
96
+ character_range: Optional[str] = unicode_range(character)
97
+
98
+ if character_range is None:
99
+ return False
100
+
101
+ return "Forms" in character_range and character_category != "Lo"
102
+
103
+
104
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
105
+ def is_emoticon(character: str) -> bool:
106
+ character_range: Optional[str] = unicode_range(character)
107
+
108
+ if character_range is None:
109
+ return False
110
+
111
+ return "Emoticons" in character_range or "Pictographs" in character_range
112
+
113
+
114
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
115
+ def is_separator(character: str) -> bool:
116
+ if character.isspace() or character in {"|", "+", "<", ">"}:
117
+ return True
118
+
119
+ character_category: str = unicodedata.category(character)
120
+
121
+ return "Z" in character_category or character_category in {"Po", "Pd", "Pc"}
122
+
123
+
124
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
125
+ def is_case_variable(character: str) -> bool:
126
+ return character.islower() != character.isupper()
127
+
128
+
129
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
130
+ def is_cjk(character: str) -> bool:
131
+ try:
132
+ character_name = unicodedata.name(character)
133
+ except ValueError:
134
+ return False
135
+
136
+ return "CJK" in character_name
137
+
138
+
139
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
140
+ def is_hiragana(character: str) -> bool:
141
+ try:
142
+ character_name = unicodedata.name(character)
143
+ except ValueError:
144
+ return False
145
+
146
+ return "HIRAGANA" in character_name
147
+
148
+
149
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
150
+ def is_katakana(character: str) -> bool:
151
+ try:
152
+ character_name = unicodedata.name(character)
153
+ except ValueError:
154
+ return False
155
+
156
+ return "KATAKANA" in character_name
157
+
158
+
159
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
160
+ def is_hangul(character: str) -> bool:
161
+ try:
162
+ character_name = unicodedata.name(character)
163
+ except ValueError:
164
+ return False
165
+
166
+ return "HANGUL" in character_name
167
+
168
+
169
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
170
+ def is_thai(character: str) -> bool:
171
+ try:
172
+ character_name = unicodedata.name(character)
173
+ except ValueError:
174
+ return False
175
+
176
+ return "THAI" in character_name
177
+
178
+
179
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
180
+ def is_arabic(character: str) -> bool:
181
+ try:
182
+ character_name = unicodedata.name(character)
183
+ except ValueError:
184
+ return False
185
+
186
+ return "ARABIC" in character_name
187
+
188
+
189
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
190
+ def is_arabic_isolated_form(character: str) -> bool:
191
+ try:
192
+ character_name = unicodedata.name(character)
193
+ except ValueError:
194
+ return False
195
+
196
+ return "ARABIC" in character_name and "ISOLATED FORM" in character_name
197
+
198
+
199
+ @lru_cache(maxsize=len(UNICODE_RANGES_COMBINED))
200
+ def is_unicode_range_secondary(range_name: str) -> bool:
201
+ return any(keyword in range_name for keyword in UNICODE_SECONDARY_RANGE_KEYWORD)
202
+
203
+
204
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
205
+ def is_unprintable(character: str) -> bool:
206
+ return (
207
+ character.isspace() is False # includes \n \t \r \v
208
+ and character.isprintable() is False
209
+ and character != "\x1A" # Why? Its the ASCII substitute character.
210
+ and character != "\ufeff" # bug discovered in Python,
211
+ # Zero Width No-Break Space located in Arabic Presentation Forms-B, Unicode 1.1 not acknowledged as space.
212
+ )
213
+
214
+
215
+ def any_specified_encoding(sequence: bytes, search_zone: int = 8192) -> Optional[str]:
216
+ """
217
+ Extract using ASCII-only decoder any specified encoding in the first n-bytes.
218
+ """
219
+ if not isinstance(sequence, bytes):
220
+ raise TypeError
221
+
222
+ seq_len: int = len(sequence)
223
+
224
+ results: List[str] = findall(
225
+ RE_POSSIBLE_ENCODING_INDICATION,
226
+ sequence[: min(seq_len, search_zone)].decode("ascii", errors="ignore"),
227
+ )
228
+
229
+ if len(results) == 0:
230
+ return None
231
+
232
+ for specified_encoding in results:
233
+ specified_encoding = specified_encoding.lower().replace("-", "_")
234
+
235
+ encoding_alias: str
236
+ encoding_iana: str
237
+
238
+ for encoding_alias, encoding_iana in aliases.items():
239
+ if encoding_alias == specified_encoding:
240
+ return encoding_iana
241
+ if encoding_iana == specified_encoding:
242
+ return encoding_iana
243
+
244
+ return None
245
+
246
+
247
+ @lru_cache(maxsize=128)
248
+ def is_multi_byte_encoding(name: str) -> bool:
249
+ """
250
+ Verify is a specific encoding is a multi byte one based on it IANA name
251
+ """
252
+ return name in {
253
+ "utf_8",
254
+ "utf_8_sig",
255
+ "utf_16",
256
+ "utf_16_be",
257
+ "utf_16_le",
258
+ "utf_32",
259
+ "utf_32_le",
260
+ "utf_32_be",
261
+ "utf_7",
262
+ } or issubclass(
263
+ importlib.import_module("encodings.{}".format(name)).IncrementalDecoder,
264
+ MultibyteIncrementalDecoder,
265
+ )
266
+
267
+
268
+ def identify_sig_or_bom(sequence: bytes) -> Tuple[Optional[str], bytes]:
269
+ """
270
+ Identify and extract SIG/BOM in given sequence.
271
+ """
272
+
273
+ for iana_encoding in ENCODING_MARKS:
274
+ marks: Union[bytes, List[bytes]] = ENCODING_MARKS[iana_encoding]
275
+
276
+ if isinstance(marks, bytes):
277
+ marks = [marks]
278
+
279
+ for mark in marks:
280
+ if sequence.startswith(mark):
281
+ return iana_encoding, mark
282
+
283
+ return None, b""
284
+
285
+
286
+ def should_strip_sig_or_bom(iana_encoding: str) -> bool:
287
+ return iana_encoding not in {"utf_16", "utf_32"}
288
+
289
+
290
+ def iana_name(cp_name: str, strict: bool = True) -> str:
291
+ cp_name = cp_name.lower().replace("-", "_")
292
+
293
+ encoding_alias: str
294
+ encoding_iana: str
295
+
296
+ for encoding_alias, encoding_iana in aliases.items():
297
+ if cp_name in [encoding_alias, encoding_iana]:
298
+ return encoding_iana
299
+
300
+ if strict:
301
+ raise ValueError("Unable to retrieve IANA for '{}'".format(cp_name))
302
+
303
+ return cp_name
304
+
305
+
306
+ def range_scan(decoded_sequence: str) -> List[str]:
307
+ ranges: Set[str] = set()
308
+
309
+ for character in decoded_sequence:
310
+ character_range: Optional[str] = unicode_range(character)
311
+
312
+ if character_range is None:
313
+ continue
314
+
315
+ ranges.add(character_range)
316
+
317
+ return list(ranges)
318
+
319
+
320
+ def cp_similarity(iana_name_a: str, iana_name_b: str) -> float:
321
+ if is_multi_byte_encoding(iana_name_a) or is_multi_byte_encoding(iana_name_b):
322
+ return 0.0
323
+
324
+ decoder_a = importlib.import_module(
325
+ "encodings.{}".format(iana_name_a)
326
+ ).IncrementalDecoder
327
+ decoder_b = importlib.import_module(
328
+ "encodings.{}".format(iana_name_b)
329
+ ).IncrementalDecoder
330
+
331
+ id_a: IncrementalDecoder = decoder_a(errors="ignore")
332
+ id_b: IncrementalDecoder = decoder_b(errors="ignore")
333
+
334
+ character_match_count: int = 0
335
+
336
+ for i in range(255):
337
+ to_be_decoded: bytes = bytes([i])
338
+ if id_a.decode(to_be_decoded) == id_b.decode(to_be_decoded):
339
+ character_match_count += 1
340
+
341
+ return character_match_count / 254
342
+
343
+
344
+ def is_cp_similar(iana_name_a: str, iana_name_b: str) -> bool:
345
+ """
346
+ Determine if two code page are at least 80% similar. IANA_SUPPORTED_SIMILAR dict was generated using
347
+ the function cp_similarity.
348
+ """
349
+ return (
350
+ iana_name_a in IANA_SUPPORTED_SIMILAR
351
+ and iana_name_b in IANA_SUPPORTED_SIMILAR[iana_name_a]
352
+ )
353
+
354
+
355
+ def set_logging_handler(
356
+ name: str = "charset_normalizer",
357
+ level: int = logging.INFO,
358
+ format_string: str = "%(asctime)s | %(levelname)s | %(message)s",
359
+ ) -> None:
360
+ logger = logging.getLogger(name)
361
+ logger.setLevel(level)
362
+
363
+ handler = logging.StreamHandler()
364
+ handler.setFormatter(logging.Formatter(format_string))
365
+ logger.addHandler(handler)
366
+
367
+
368
+ def cut_sequence_chunks(
369
+ sequences: bytes,
370
+ encoding_iana: str,
371
+ offsets: range,
372
+ chunk_size: int,
373
+ bom_or_sig_available: bool,
374
+ strip_sig_or_bom: bool,
375
+ sig_payload: bytes,
376
+ is_multi_byte_decoder: bool,
377
+ decoded_payload: Optional[str] = None,
378
+ ) -> Generator[str, None, None]:
379
+ if decoded_payload and is_multi_byte_decoder is False:
380
+ for i in offsets:
381
+ chunk = decoded_payload[i : i + chunk_size]
382
+ if not chunk:
383
+ break
384
+ yield chunk
385
+ else:
386
+ for i in offsets:
387
+ chunk_end = i + chunk_size
388
+ if chunk_end > len(sequences) + 8:
389
+ continue
390
+
391
+ cut_sequence = sequences[i : i + chunk_size]
392
+
393
+ if bom_or_sig_available and strip_sig_or_bom is False:
394
+ cut_sequence = sig_payload + cut_sequence
395
+
396
+ chunk = cut_sequence.decode(
397
+ encoding_iana,
398
+ errors="ignore" if is_multi_byte_decoder else "strict",
399
+ )
400
+
401
+ # multi-byte bad cutting detector and adjustment
402
+ # not the cleanest way to perform that fix but clever enough for now.
403
+ if is_multi_byte_decoder and i > 0:
404
+ chunk_partial_size_chk: int = min(chunk_size, 16)
405
+
406
+ if (
407
+ decoded_payload
408
+ and chunk[:chunk_partial_size_chk] not in decoded_payload
409
+ ):
410
+ for j in range(i, i - 4, -1):
411
+ cut_sequence = sequences[j:chunk_end]
412
+
413
+ if bom_or_sig_available and strip_sig_or_bom is False:
414
+ cut_sequence = sig_payload + cut_sequence
415
+
416
+ chunk = cut_sequence.decode(encoding_iana, errors="ignore")
417
+
418
+ if chunk[:chunk_partial_size_chk] in decoded_payload:
419
+ break
420
+
421
+ yield chunk
venv/lib/python3.10/site-packages/charset_normalizer/version.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ """
2
+ Expose version
3
+ """
4
+
5
+ __version__ = "3.3.2"
6
+ VERSION = __version__.split(".")
venv/lib/python3.10/site-packages/functorch/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (687 Bytes). View file
 
venv/lib/python3.10/site-packages/functorch/_src/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/functorch/_src/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (182 Bytes). View file
 
venv/lib/python3.10/site-packages/functorch/_src/aot_autograd/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # This file has moved to under torch/_functorch. It is not public API.
2
+ # If you are not a PyTorch developer and you are relying on the following
3
+ # imports, please file an issue.
4
+ from torch._functorch.aot_autograd import (
5
+ aot_autograd_decompositions,
6
+ KNOWN_TYPES,
7
+ PytreeThunk,
8
+ )
venv/lib/python3.10/site-packages/functorch/_src/aot_autograd/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (320 Bytes). View file
 
venv/lib/python3.10/site-packages/functorch/_src/eager_transforms/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # This file has moved to under torch/_functorch. It is not public API.
2
+ # If you are not a PyTorch developer and you are relying on the following
3
+ # imports, please file an issue.
4
+ from torch._functorch.eager_transforms import (
5
+ _assert_wrapped_functional,
6
+ _unwrap_functional_tensor,
7
+ )
venv/lib/python3.10/site-packages/functorch/_src/eager_transforms/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (319 Bytes). View file
 
venv/lib/python3.10/site-packages/functorch/_src/make_functional/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # This file has moved to under torch/_functorch. It is not public API.
2
+ # If you are not a PyTorch developer and you are relying on the following
3
+ # imports, please file an issue.
4
+ from torch._functorch.make_functional import _swap_state
venv/lib/python3.10/site-packages/functorch/_src/make_functional/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (266 Bytes). View file
 
venv/lib/python3.10/site-packages/functorch/_src/vmap/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file has moved to under torch/_functorch. It is not public API.
2
+ # If you are not a PyTorch developer and you are relying on the following
3
+ # imports, please file an issue.
4
+ from torch._functorch.vmap import (
5
+ _add_batch_dim,
6
+ _broadcast_to_and_flatten,
7
+ _create_batched_inputs,
8
+ _get_name,
9
+ _process_batched_inputs,
10
+ _remove_batch_dim,
11
+ _unwrap_batched,
12
+ _validate_and_get_batch_size,
13
+ Tensor,
14
+ tree_flatten,
15
+ tree_unflatten,
16
+ )
venv/lib/python3.10/site-packages/functorch/_src/vmap/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (528 Bytes). View file
 
venv/lib/python3.10/site-packages/functorch/compile/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.1 kB). View file
 
venv/lib/python3.10/site-packages/functorch/dim/dim.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+ import dis
7
+ import inspect
8
+
9
+ from dataclasses import dataclass
10
+ from typing import Union
11
+
12
+ from . import DimList
13
+
14
+ _vmap_levels = []
15
+
16
+
17
+ @dataclass
18
+ class LevelInfo:
19
+ level: int
20
+ alive: bool = True
21
+
22
+
23
+ class Dim:
24
+ def __init__(self, name: str, size: Union[None, int] = None):
25
+ self.name = name
26
+ self._size = None
27
+ self._vmap_level = None
28
+ if size is not None:
29
+ self.size = size
30
+
31
+ def __del__(self):
32
+ if self._vmap_level is not None:
33
+ _vmap_active_levels[self._vmap_stack].alive = False # noqa: F821
34
+ while (
35
+ not _vmap_levels[-1].alive
36
+ and current_level() == _vmap_levels[-1].level # noqa: F821
37
+ ):
38
+ _vmap_decrement_nesting() # noqa: F821
39
+ _vmap_levels.pop()
40
+
41
+ @property
42
+ def size(self):
43
+ assert self.is_bound
44
+ return self._size
45
+
46
+ @size.setter
47
+ def size(self, size: int):
48
+ from . import DimensionBindError
49
+
50
+ if self._size is None:
51
+ self._size = size
52
+ self._vmap_level = _vmap_increment_nesting(size, "same") # noqa: F821
53
+ self._vmap_stack = len(_vmap_levels)
54
+ _vmap_levels.append(LevelInfo(self._vmap_level))
55
+
56
+ elif self._size != size:
57
+ raise DimensionBindError(
58
+ f"Dim '{self}' previously bound to a dimension of size {self._size} cannot bind to a dimension of size {size}"
59
+ )
60
+
61
+ @property
62
+ def is_bound(self):
63
+ return self._size is not None
64
+
65
+ def __repr__(self):
66
+ return self.name
67
+
68
+
69
+ def extract_name(inst):
70
+ assert inst.opname == "STORE_FAST" or inst.opname == "STORE_NAME"
71
+ return inst.argval
72
+
73
+
74
+ _cache = {}
75
+
76
+
77
+ def dims(lists=0):
78
+ frame = inspect.currentframe()
79
+ assert frame is not None
80
+ calling_frame = frame.f_back
81
+ assert calling_frame is not None
82
+ code, lasti = calling_frame.f_code, calling_frame.f_lasti
83
+ key = (code, lasti)
84
+ if key not in _cache:
85
+ first = lasti // 2 + 1
86
+ instructions = list(dis.get_instructions(calling_frame.f_code))
87
+ unpack = instructions[first]
88
+
89
+ if unpack.opname == "STORE_FAST" or unpack.opname == "STORE_NAME":
90
+ # just a single dim, not a list
91
+ name = unpack.argval
92
+ ctor = Dim if lists == 0 else DimList
93
+ _cache[key] = lambda: ctor(name=name)
94
+ else:
95
+ assert unpack.opname == "UNPACK_SEQUENCE"
96
+ ndims = unpack.argval
97
+ names = tuple(
98
+ extract_name(instructions[first + 1 + i]) for i in range(ndims)
99
+ )
100
+ first_list = len(names) - lists
101
+ _cache[key] = lambda: tuple(
102
+ Dim(n) if i < first_list else DimList(name=n)
103
+ for i, n in enumerate(names)
104
+ )
105
+ return _cache[key]()
106
+
107
+
108
+ def _dim_set(positional, arg):
109
+ def convert(a):
110
+ if isinstance(a, Dim):
111
+ return a
112
+ else:
113
+ assert isinstance(a, int)
114
+ return positional[a]
115
+
116
+ if arg is None:
117
+ return positional
118
+ elif not isinstance(arg, (Dim, int)):
119
+ return tuple(convert(a) for a in arg)
120
+ else:
121
+ return (convert(arg),)
venv/lib/python3.10/site-packages/functorch/dim/reference.py ADDED
@@ -0,0 +1,645 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # reference python implementations for C ops
8
+ import torch
9
+
10
+ from functorch._C import dim as _C
11
+ from . import op_properties
12
+ from .batch_tensor import _enable_layers
13
+ from .tree_map import tree_flatten, tree_map
14
+
15
+ DimList = _C.DimList
16
+ import operator
17
+ from functools import reduce
18
+
19
+
20
+ # use dict to avoid writing C++ bindings for set
21
+ pointwise = set(op_properties.pointwise)
22
+
23
+
24
+ def prod(x):
25
+ return reduce(operator.mul, x, 1)
26
+
27
+
28
+ def _wrap_dim(d, N, keepdim):
29
+ from . import Dim
30
+
31
+ if isinstance(d, Dim):
32
+ assert not keepdim, "cannot preserve first-class dimensions with keepdim=True"
33
+ return d
34
+ elif d >= 0:
35
+ return d - N
36
+ else:
37
+ return d
38
+
39
+
40
+ def _dims(d, N, keepdim, single_dim):
41
+ from . import Dim
42
+
43
+ if isinstance(d, (Dim, int)):
44
+ return ltuple((_wrap_dim(d, N, keepdim),))
45
+ assert not single_dim, f"expected a single dimension or int but found: {d}"
46
+ return ltuple(_wrap_dim(x, N, keepdim) for x in d)
47
+
48
+
49
+ def _bind_dims_to_size(lhs_size, rhs, lhs_debug):
50
+ from . import DimensionMismatchError
51
+
52
+ not_bound = tuple((i, r) for i, r in enumerate(rhs) if not r.is_bound)
53
+ if len(not_bound) == 1:
54
+ idx, d = not_bound[0]
55
+ rhs_so_far = prod(r.size for r in rhs if r.is_bound)
56
+ if lhs_size % rhs_so_far != 0:
57
+ rhs_s = tuple("?" if not r.is_bound else str(r.size) for r in rhs)
58
+ raise DimensionMismatchError(
59
+ f"inferred dimension does not evenly fit into larger dimension: {lhs_size} vs {rhs_s}"
60
+ )
61
+ new_size = lhs_size // rhs_so_far
62
+ d.size = new_size
63
+ elif len(not_bound) > 1:
64
+ rhs_s = tuple("?" if not r.is_bound else str(r.size) for r in rhs)
65
+ raise DimensionMismatchError(
66
+ f"cannot infer the size of two dimensions at once: {rhs} with sizes {rhs_s}"
67
+ )
68
+ else:
69
+ rhs_size = prod(r.size for r in rhs)
70
+ if lhs_size != rhs_size:
71
+ raise DimensionMismatchError(
72
+ f"Dimension sizes to do not match ({lhs_size} != {rhs_size}) when matching {lhs_debug} to {rhs}"
73
+ )
74
+
75
+
76
+ def _tensor_levels(inp):
77
+ from . import _Tensor
78
+
79
+ if isinstance(inp, _Tensor):
80
+ return inp._tensor, llist(inp._levels), inp._has_device
81
+ else:
82
+ return inp, llist(range(-inp.ndim, 0)), True
83
+
84
+
85
+ def _match_levels(v, from_levels, to_levels):
86
+ view = []
87
+ permute = []
88
+ requires_view = False
89
+ size = v.size()
90
+ for t in to_levels:
91
+ try:
92
+ idx = from_levels.index(t)
93
+ permute.append(idx)
94
+ view.append(size[idx])
95
+ except ValueError:
96
+ view.append(1)
97
+ requires_view = True
98
+ if permute != list(range(len(permute))):
99
+ v = v.permute(*permute)
100
+ if requires_view:
101
+ v = v.view(*view)
102
+ return v
103
+
104
+
105
+ # make a single dimension positional but do not permute it,
106
+ # used to do multi-tensor operators where the dim being acted on
107
+ # should not physically move if possible
108
+ def _positional_no_permute(self, dim, expand_dim=False):
109
+ from . import Tensor
110
+
111
+ ptensor, levels = self._tensor, llist(self._levels)
112
+ try:
113
+ idx = levels.index(dim)
114
+ except ValueError:
115
+ if not expand_dim:
116
+ raise
117
+ idx = 0
118
+ ptensor = ptensor.expand(dim.size, *ptensor.size())
119
+ levels.insert(0, 0)
120
+ idx_batched = 0
121
+ for i in range(idx):
122
+ if isinstance(levels[i], int):
123
+ levels[i] -= 1
124
+ idx_batched += 1
125
+ levels[idx] = -idx_batched - 1
126
+ return Tensor.from_positional(ptensor, levels, self._has_device), idx_batched
127
+
128
+
129
+ def seq(a, b):
130
+ from . import Dim
131
+
132
+ if isinstance(a, Dim) != isinstance(b, Dim):
133
+ return False
134
+ if isinstance(a, Dim):
135
+ return a is b
136
+ else:
137
+ return a == b
138
+
139
+
140
+ class isin:
141
+ def __contains__(self, item):
142
+ for x in self:
143
+ if seq(item, x):
144
+ return True
145
+ return False
146
+
147
+ def index(self, item):
148
+ for i, x in enumerate(self):
149
+ if seq(item, x):
150
+ return i
151
+ raise ValueError
152
+
153
+
154
+ class llist(isin, list):
155
+ pass
156
+
157
+
158
+ class ltuple(isin, tuple):
159
+ pass
160
+
161
+
162
+ empty_dict = {}
163
+
164
+
165
+ @classmethod
166
+ def __torch_function__(self, orig, cls, args, kwargs=empty_dict):
167
+ from . import _Tensor, Tensor, TensorLike
168
+ from .delayed_mul_tensor import DelayedMulTensor
169
+
170
+ if orig is torch.Tensor.__mul__:
171
+ lhs, rhs = args
172
+ if (
173
+ isinstance(lhs, _Tensor)
174
+ and isinstance(rhs, _Tensor)
175
+ and lhs.ndim == 0
176
+ and rhs.ndim == 0
177
+ ):
178
+ return DelayedMulTensor(lhs, rhs)
179
+ all_dims = llist()
180
+ flat_args, unflatten = tree_flatten((args, kwargs))
181
+ device_holding_tensor = None
182
+ for f in flat_args:
183
+ if isinstance(f, _Tensor):
184
+ if f._has_device:
185
+ device_holding_tensor = f._batchtensor
186
+ for d in f.dims:
187
+ if d not in all_dims:
188
+ all_dims.append(d)
189
+
190
+ def unwrap(t):
191
+ if isinstance(t, _Tensor):
192
+ r = t._batchtensor
193
+ if device_holding_tensor is not None and not t._has_device:
194
+ r = r.to(device=device_holding_tensor.device)
195
+ return r
196
+ return t
197
+
198
+ if orig in pointwise:
199
+ result_levels = llist()
200
+ arg_levels = llist()
201
+ to_expand = []
202
+ for i, f in enumerate(flat_args):
203
+ if isinstance(f, TensorLike):
204
+ ptensor, levels, _ = _tensor_levels(f)
205
+ if (
206
+ isinstance(f, _Tensor)
207
+ and not f._has_device
208
+ and device_holding_tensor is not None
209
+ ):
210
+ ptensor = ptensor.to(device=device_holding_tensor.device)
211
+ flat_args[i] = ptensor
212
+ for l in levels:
213
+ if l not in result_levels:
214
+ result_levels.append(l)
215
+ to_expand.append((i, levels))
216
+
217
+ for i, levels in to_expand:
218
+ flat_args[i] = _match_levels(flat_args[i], levels, result_levels)
219
+ args, kwargs = unflatten(flat_args)
220
+ result = orig(*args, **kwargs)
221
+
222
+ def wrap(t):
223
+ if isinstance(t, TensorLike):
224
+ return Tensor.from_positional(
225
+ t, result_levels, device_holding_tensor is not None
226
+ )
227
+ return t
228
+
229
+ return tree_map(wrap, result)
230
+ else:
231
+
232
+ def wrap(t):
233
+ if isinstance(t, TensorLike):
234
+ return Tensor.from_batched(t, device_holding_tensor is not None)
235
+ return t
236
+
237
+ with _enable_layers(all_dims):
238
+ print(f"batch_tensor for {orig}")
239
+ args, kwargs = unflatten(unwrap(f) for f in flat_args)
240
+ result = orig(*args, **kwargs)
241
+ # print("END", orig)
242
+ return tree_map(wrap, result)
243
+
244
+
245
+ def positional(self, *dims):
246
+ from . import Dim, DimensionBindError, Tensor
247
+
248
+ ptensor, levels = self._tensor, llist(self._levels)
249
+ flat_dims = llist()
250
+ view = []
251
+ needs_view = False
252
+ ndim = self.ndim
253
+ for d in dims:
254
+ if isinstance(d, DimList):
255
+ flat_dims.extend(d)
256
+ view.extend(e.size for e in d)
257
+ elif isinstance(d, Dim):
258
+ flat_dims.append(d)
259
+ view.append(d.size)
260
+ elif isinstance(d, int):
261
+ d = _wrap_dim(d, ndim, False)
262
+ flat_dims.append(d)
263
+ view.append(ptensor.size(d))
264
+ else:
265
+ flat_dims.extend(d)
266
+ view.append(prod(e.size for e in d))
267
+ needs_view = True
268
+
269
+ permute = list(range(len(levels)))
270
+ nflat = len(flat_dims)
271
+ for i, d in enumerate(flat_dims):
272
+ try:
273
+ idx = levels.index(d)
274
+ except ValueError as e:
275
+ raise DimensionBindError(
276
+ f"tensor of dimensions {self.dims} does not contain dim {d}"
277
+ ) from e
278
+ p = permute[idx]
279
+ del levels[idx]
280
+ del permute[idx]
281
+ levels.insert(i, 0)
282
+ permute.insert(i, p)
283
+ ptensor = ptensor.permute(*permute)
284
+ seen = 0
285
+ for i in range(len(levels) - 1, -1, -1):
286
+ if isinstance(levels[i], int):
287
+ seen += 1
288
+ levels[i] = -seen
289
+ result = Tensor.from_positional(ptensor, levels, self._has_device)
290
+ if needs_view:
291
+ result = result.reshape(*view, *result.size()[len(flat_dims) :])
292
+ return result
293
+
294
+
295
+ def _contains_dim(input):
296
+ from . import Dim
297
+
298
+ for i in input:
299
+ if isinstance(i, Dim):
300
+ return True
301
+
302
+
303
+ def expand(self, *sizes):
304
+ if not _contains_dim(sizes):
305
+ return self.__torch_function__(torch.Tensor.expand, None, (self, *sizes))
306
+ dims = sizes
307
+ sizes = [d.size for d in dims] + [-1] * self.ndim
308
+ self = self.expand(*sizes)
309
+ return self[dims]
310
+
311
+
312
+ _not_present = object()
313
+
314
+
315
+ def _getarg(name, offset, args, kwargs, default):
316
+ if len(args) > offset:
317
+ return args[offset]
318
+ return kwargs.get(name, default)
319
+
320
+
321
+ def _patcharg(name, offset, args, kwargs, value):
322
+ if len(args) > offset:
323
+ args[offset] = value
324
+ else:
325
+ kwargs[name] = value
326
+
327
+
328
+ def _wrap(
329
+ orig, dim_offset=0, keepdim_offset=1, dim_name="dim", single_dim=False, reduce=True
330
+ ):
331
+ from . import Dim, Tensor, TensorLike
332
+
333
+ def fn(self, *args, **kwargs):
334
+ dim = _getarg(dim_name, dim_offset, args, kwargs, _not_present)
335
+ if dim is _not_present or (single_dim and not isinstance(dim, Dim)):
336
+ with _enable_layers(self.dims):
337
+ print(f"dim fallback batch_tensor for {orig}")
338
+ return Tensor.from_batched(
339
+ orig(self._batchtensor, *args, **kwargs), self._has_device
340
+ )
341
+ keepdim = (
342
+ _getarg("keepdim", keepdim_offset, args, kwargs, False) if reduce else False
343
+ )
344
+ t, levels = self._tensor, llist(self._levels)
345
+ dims = _dims(dim, self._batchtensor.ndim, keepdim, single_dim)
346
+ dim_indices = tuple(levels.index(d) for d in dims)
347
+ if reduce and not keepdim:
348
+ new_levels = [l for i, l in enumerate(levels) if i not in dim_indices]
349
+ else:
350
+ new_levels = levels
351
+
352
+ if len(dim_indices) == 1:
353
+ dim_indices = dim_indices[
354
+ 0
355
+ ] # so that dims that really only take a single argument work...
356
+ args = list(args)
357
+ _patcharg(dim_name, dim_offset, args, kwargs, dim_indices)
358
+
359
+ def wrap(t):
360
+ if isinstance(t, TensorLike):
361
+ return Tensor.from_positional(t, new_levels, self._has_device)
362
+ return t
363
+
364
+ with _enable_layers(new_levels):
365
+ print(f"dim used batch_tensor for {orig}")
366
+ r = orig(t, *args, **kwargs)
367
+ return tree_map(wrap, r)
368
+
369
+ return fn
370
+
371
+
372
+ def _def(name, *args, **kwargs):
373
+ from . import _Tensor
374
+
375
+ orig = getattr(torch.Tensor, name)
376
+ setattr(_Tensor, name, _wrap(orig, *args, **kwargs))
377
+
378
+
379
+ no_slice = slice(None)
380
+
381
+ _orig_getitem = torch.Tensor.__getitem__
382
+
383
+
384
+ class dim_tracker:
385
+ def __init__(self):
386
+ self.dims = llist()
387
+ self.count = []
388
+
389
+ def record(self, d):
390
+ if d not in self.dims:
391
+ self.dims.append(d)
392
+ self.count.append(1)
393
+
394
+ def __getitem__(self, d):
395
+ return self.count[self.dims.index(d)]
396
+
397
+
398
+ def t__getitem__(self, input):
399
+ from . import _Tensor, Dim, DimensionBindError, DimList, Tensor, TensorLike
400
+
401
+ # * bail to original example if we have a single non-Dim tensor, or a non-tensor
402
+ # * locate ... or an unbound tensor list, and determine its size, bind dim list
403
+ # (remember that None does not count to the total dim count)
404
+ # * bind simple dims and dim-packs to their sizes, count the number of uses of each dim,
405
+ # produce the re-view if needed
406
+ # * for each single-use dim index, replace with no_slice and mark that it will be added
407
+ # (keep track of whether we have to call super)
408
+ # * call super if needed
409
+ # * if we have dims to bind, bind them (it will help if we eliminated ... and None before)
410
+
411
+ # this handles bool indexing handling, as well as some other simple cases.
412
+
413
+ is_simple = (
414
+ not isinstance(input, Dim)
415
+ and not isinstance(input, (tuple, list))
416
+ and
417
+ # WAR for functorch bug where zero time tensors in getitem are not handled correctly.
418
+ not (isinstance(input, TensorLike) and input.ndim == 0)
419
+ )
420
+
421
+ if is_simple:
422
+ if isinstance(self, _Tensor):
423
+ return _Tensor.__torch_function__(_orig_getitem, None, (self, input))
424
+ else:
425
+ return _orig_getitem(self, input)
426
+
427
+ # can further optimize this case
428
+ if not isinstance(input, tuple):
429
+ input = [input]
430
+ else:
431
+ input = list(input)
432
+
433
+ dims_indexed = 0
434
+ expanding_object = None
435
+ dimlists = []
436
+ for i, s in enumerate(input):
437
+ if s is ... or isinstance(s, DimList) and not s.is_bound:
438
+ if expanding_object is not None:
439
+ msg = (
440
+ "at most one ... or unbound dimension list can exist in indexing list but"
441
+ f" found 2 at offsets {i} and {expanding_object}"
442
+ )
443
+ raise DimensionBindError(msg)
444
+ expanding_object = i
445
+
446
+ if isinstance(s, DimList):
447
+ dims_indexed += len(s) if s.is_bound else 0
448
+ dimlists.append(i)
449
+ elif s is not None and s is not ...:
450
+ dims_indexed += 1
451
+
452
+ ndim = self.ndim
453
+ if dims_indexed > ndim:
454
+ raise IndexError(
455
+ f"at least {dims_indexed} indices were supplied but the tensor only has {ndim} dimensions."
456
+ )
457
+ if expanding_object is not None:
458
+ expanding_ndims = ndim - dims_indexed
459
+ obj = input[expanding_object]
460
+ if obj is ...:
461
+ input[expanding_object : expanding_object + 1] = [
462
+ no_slice
463
+ ] * expanding_ndims
464
+ else:
465
+ obj.bind_len(expanding_ndims)
466
+ # flatten the dimslists into the indexing
467
+ for i in reversed(dimlists):
468
+ input[i : i + 1] = input[i]
469
+ dims_indexed = 0
470
+ requires_view = False
471
+ size = self.size()
472
+ view_sizes = []
473
+ dims_seen = dim_tracker()
474
+
475
+ def add_dims(t):
476
+ if not isinstance(t, _Tensor):
477
+ return
478
+ for d in t.dims:
479
+ dims_seen.record(d)
480
+
481
+ add_dims(self)
482
+ dim_packs = []
483
+ for i, idx in enumerate(input):
484
+ if idx is None:
485
+ input[i] = no_slice
486
+ view_sizes.append(1)
487
+ requires_view = True
488
+ else:
489
+ sz = size[dims_indexed]
490
+ if isinstance(idx, Dim):
491
+ idx.size = sz
492
+ dims_seen.record(idx)
493
+ view_sizes.append(sz)
494
+ elif isinstance(idx, (tuple, list)) and idx and isinstance(idx[0], Dim):
495
+ for d in idx:
496
+ dims_seen.record(idx)
497
+ _bind_dims_to_size(sz, idx, f"offset {i}")
498
+ view_sizes.extend(d.size for d in idx)
499
+ requires_view = True
500
+ dim_packs.append(i)
501
+ else:
502
+ add_dims(idx)
503
+ view_sizes.append(sz)
504
+ dims_indexed += 1
505
+ if requires_view:
506
+ self = self.view(*view_sizes)
507
+ for i in reversed(dim_packs):
508
+ input[i : i + 1] = input[i]
509
+
510
+ # currenty:
511
+ # input is flat, containing either Dim, or Tensor, or something valid for standard indexing
512
+ # self may have first-class dims as well.
513
+
514
+ # to index:
515
+ # drop the first class dims from self, they just become direct indices of their positions
516
+
517
+ # figure out the dimensions of the indexing tensors: union of all the dims in the tensors in the index.
518
+ # these dimensions will appear and need to be bound at the first place tensor occures
519
+
520
+ if isinstance(self, _Tensor):
521
+ ptensor_self, levels = self._tensor, list(self._levels)
522
+ # indices to ptensor rather than self which has first-class dimensions
523
+ input_it = iter(input)
524
+ flat_inputs = [next(input_it) if isinstance(l, int) else l for l in levels]
525
+ has_device = self._has_device
526
+ to_pad = 0
527
+ else:
528
+ ptensor_self, flat_inputs = self, input
529
+ to_pad = ptensor_self.ndim - len(flat_inputs)
530
+ has_device = True
531
+
532
+ result_levels = []
533
+ index_levels = []
534
+ tensor_insert_point = None
535
+ to_expand = {}
536
+ requires_getindex = False
537
+ for i, inp in enumerate(flat_inputs):
538
+ if isinstance(inp, Dim) and dims_seen[inp] == 1:
539
+ flat_inputs[i] = no_slice
540
+ result_levels.append(inp)
541
+ elif isinstance(inp, TensorLike):
542
+ requires_getindex = True
543
+ if tensor_insert_point is None:
544
+ tensor_insert_point = len(result_levels)
545
+ ptensor, levels, _ = _tensor_levels(inp)
546
+ to_expand[i] = levels
547
+ flat_inputs[i] = ptensor
548
+ for l in levels:
549
+ if l not in index_levels:
550
+ index_levels.append(l)
551
+ else:
552
+ requires_getindex = True
553
+ result_levels.append(0)
554
+
555
+ if tensor_insert_point is not None:
556
+ result_levels[tensor_insert_point:tensor_insert_point] = index_levels
557
+
558
+ for i, levels in to_expand.items():
559
+ flat_inputs[i] = _match_levels(flat_inputs[i], levels, index_levels)
560
+
561
+ if requires_getindex:
562
+ result = _orig_getitem(ptensor_self, flat_inputs)
563
+ else:
564
+ result = ptensor_self
565
+
566
+ next_positional = -1
567
+ if to_pad > 0:
568
+ result_levels.extend([0] * to_pad)
569
+ for i, r in enumerate(reversed(result_levels)):
570
+ if isinstance(r, int):
571
+ result_levels[-1 - i] = next_positional
572
+ next_positional -= 1
573
+
574
+ return Tensor.from_positional(result, result_levels, has_device)
575
+
576
+
577
+ # XXX - dim is optional and can be the outer-most dimension...
578
+ def stack(tensors, new_dim, dim=0, out=None):
579
+ if isinstance(dim, int):
580
+ return torch.stack(tensors, dim, out).index(dim, new_dim)
581
+ index = None
582
+ if out is not None:
583
+ out, index = _positional_no_permute(out, dim, expand_dim=True)
584
+ ptensors = []
585
+ for t in tensors:
586
+ pt, pi = _positional_no_permute(t, dim, expand_dim=True)
587
+ if index is not None and pi != index:
588
+ pt = pt.move_dim(pi, index)
589
+ else:
590
+ index = pi
591
+ ptensors.append(pt)
592
+ pr = torch.stack(ptensors, index, out=out)
593
+ return pr.index((index, index + 1), (new_dim, dim))
594
+
595
+
596
+ _orig_split = torch.Tensor.split
597
+
598
+
599
+ def split(self, split_size_or_sections, dim=0):
600
+ from . import _Tensor, Dim
601
+
602
+ if isinstance(split_size_or_sections, int) or any(
603
+ isinstance(t, int) for t in split_size_or_sections
604
+ ):
605
+ if isinstance(dim, Dim):
606
+ raise ValueError(
607
+ "when dim is specified as a Dim object, split sizes must also be dimensions."
608
+ )
609
+ return _orig_split(self, split_size_or_sections, dim=dim)
610
+
611
+ if isinstance(dim, Dim):
612
+ assert isinstance(self, _Tensor), f"Tensor does not have dimension {dim}"
613
+ self, dim = _positional_no_permute(self, dim)
614
+
615
+ size = self.size(dim)
616
+ total_bound_size = 0
617
+ unbound = []
618
+ sizes = []
619
+ for i, d in enumerate(split_size_or_sections):
620
+ if d.is_bound:
621
+ sizes.append(d.size)
622
+ total_bound_size += d.size
623
+ else:
624
+ sizes.append(0)
625
+ unbound.append(i)
626
+
627
+ if unbound:
628
+ assert (
629
+ total_bound_size <= size
630
+ ), f"result dimensions are larger than original: {total_bound_size} vs {size} ({split_size_or_sections})"
631
+ remaining_size = size - total_bound_size
632
+ chunk_size = -(-remaining_size // len(unbound))
633
+ for u in unbound:
634
+ sz = min(chunk_size, remaining_size)
635
+ split_size_or_sections[u].size = sz
636
+ sizes[u] = sz
637
+ remaining_size -= sz
638
+ else:
639
+ assert (
640
+ total_bound_size == size
641
+ ), f"result dimensions do not match original: {total_bound_size} vs {size} ({split_size_or_sections})"
642
+ return tuple(
643
+ t.index(dim, d)
644
+ for d, t in zip(split_size_or_sections, _orig_split(self, sizes, dim=dim))
645
+ )
venv/lib/python3.10/site-packages/functorch/dim/tree_map.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ from functorch._C import dim
8
+
9
+ tree_flatten = dim.tree_flatten
10
+
11
+
12
+ def tree_map(fn, tree):
13
+ vs, unflatten = tree_flatten(tree)
14
+ return unflatten(fn(v) for v in vs)
venv/lib/python3.10/site-packages/functorch/dim/wrap_type.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ from types import (
8
+ BuiltinMethodType,
9
+ FunctionType,
10
+ GetSetDescriptorType,
11
+ MethodDescriptorType,
12
+ WrapperDescriptorType,
13
+ )
14
+
15
+ from functorch._C import dim as _C
16
+
17
+ _wrap_method = _C._wrap_method
18
+
19
+ FUNC_TYPES = (
20
+ FunctionType,
21
+ MethodDescriptorType,
22
+ BuiltinMethodType,
23
+ WrapperDescriptorType,
24
+ )
25
+ PROPERTY_TYPES = (GetSetDescriptorType, property)
26
+
27
+
28
+ def _py_wrap_method(orig, __torch_function__):
29
+ def impl(*args, **kwargs):
30
+ return __torch_function__(orig, None, args, kwargs)
31
+
32
+ return impl
33
+
34
+
35
+ def wrap_type(use_c, to_patch, pattern, __torch_function__):
36
+ if use_c:
37
+ wrap_method = _wrap_method
38
+ else:
39
+ wrap_method = _py_wrap_method
40
+
41
+ all = {}
42
+ for t in reversed(pattern.mro()[:-1]): # skip object
43
+ all.update(t.__dict__)
44
+
45
+ def wrap_attr(orig):
46
+ return property(wrap_method(orig.__get__, __torch_function__))
47
+
48
+ for name, obj in all.items():
49
+ if name in (
50
+ "__dict__",
51
+ "__new__",
52
+ "__init__",
53
+ "__repr__",
54
+ "__weakref__",
55
+ "__doc__",
56
+ "__module__",
57
+ "__dir__",
58
+ ):
59
+ continue
60
+
61
+ # skip things that have been overloaded
62
+ # things that come from object like `__eq__` still need to be patched, however.
63
+ if hasattr(to_patch, name) and getattr(to_patch, name) is not getattr(
64
+ object, name, None
65
+ ):
66
+ continue
67
+
68
+ if isinstance(obj, FUNC_TYPES):
69
+ setattr(to_patch, name, wrap_method(obj, __torch_function__))
70
+ elif isinstance(obj, PROPERTY_TYPES):
71
+ setattr(to_patch, name, wrap_attr(obj))
venv/lib/python3.10/site-packages/functorch/einops/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .rearrange import rearrange
2
+
3
+ __all__ = ["rearrange"]