Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. Β
See raw diff
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/__init__.py +35 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/__pycache__/_ihatexml.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/__pycache__/_inputstream.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/__pycache__/_tokenizer.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/__pycache__/_utils.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/__pycache__/html5parser.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/__pycache__/serializer.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/_ihatexml.py +289 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/_inputstream.py +918 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/_tokenizer.py +1735 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/_trie/__init__.py +5 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/_trie/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/_trie/__pycache__/_base.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/_trie/__pycache__/py.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/_trie/_base.py +40 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/_trie/py.py +67 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/_utils.py +159 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/constants.py +2946 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/filters/__pycache__/base.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/filters/__pycache__/lint.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/html5parser.py +0 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/serializer.py +409 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/treeadapters/__init__.py +30 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/treeadapters/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/treeadapters/__pycache__/genshi.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/treeadapters/__pycache__/sax.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/treeadapters/genshi.py +54 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/treeadapters/sax.py +50 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/dom.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/treewalkers/__init__.py +154 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/dom.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/etree.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/etree_lxml.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/treewalkers/base.py +252 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/treewalkers/dom.py +43 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/treewalkers/etree.py +131 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.py +215 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/treewalkers/genshi.py +69 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/idna/__pycache__/codec.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/idna/__pycache__/compat.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/idna/__pycache__/package_data.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/idna/__pycache__/uts46data.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/progress/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/progress/__pycache__/bar.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/progress/__pycache__/colors.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/progress/__pycache__/counter.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/progress/__pycache__/spinner.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/progress/counter.py +47 -0
- env-llmeval/lib/python3.10/site-packages/pip/_vendor/pygments/__init__.py +83 -0
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/__init__.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
HTML parsing library based on the `WHATWG HTML specification
|
3 |
+
<https://whatwg.org/html>`_. The parser is designed to be compatible with
|
4 |
+
existing HTML found in the wild and implements well-defined error recovery that
|
5 |
+
is largely compatible with modern desktop web browsers.
|
6 |
+
|
7 |
+
Example usage::
|
8 |
+
|
9 |
+
from pip._vendor import html5lib
|
10 |
+
with open("my_document.html", "rb") as f:
|
11 |
+
tree = html5lib.parse(f)
|
12 |
+
|
13 |
+
For convenience, this module re-exports the following names:
|
14 |
+
|
15 |
+
* :func:`~.html5parser.parse`
|
16 |
+
* :func:`~.html5parser.parseFragment`
|
17 |
+
* :class:`~.html5parser.HTMLParser`
|
18 |
+
* :func:`~.treebuilders.getTreeBuilder`
|
19 |
+
* :func:`~.treewalkers.getTreeWalker`
|
20 |
+
* :func:`~.serializer.serialize`
|
21 |
+
"""
|
22 |
+
|
23 |
+
from __future__ import absolute_import, division, unicode_literals
|
24 |
+
|
25 |
+
from .html5parser import HTMLParser, parse, parseFragment
|
26 |
+
from .treebuilders import getTreeBuilder
|
27 |
+
from .treewalkers import getTreeWalker
|
28 |
+
from .serializer import serialize
|
29 |
+
|
30 |
+
__all__ = ["HTMLParser", "parse", "parseFragment", "getTreeBuilder",
|
31 |
+
"getTreeWalker", "serialize"]
|
32 |
+
|
33 |
+
# this has to be at the top level, see how setup.py parses this
|
34 |
+
#: Distribution version number.
|
35 |
+
__version__ = "1.1"
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/__pycache__/_ihatexml.cpython-310.pyc
ADDED
Binary file (13.9 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/__pycache__/_inputstream.cpython-310.pyc
ADDED
Binary file (21.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/__pycache__/_tokenizer.cpython-310.pyc
ADDED
Binary file (37.3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/__pycache__/_utils.cpython-310.pyc
ADDED
Binary file (4.79 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/__pycache__/html5parser.cpython-310.pyc
ADDED
Binary file (88.5 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/__pycache__/serializer.cpython-310.pyc
ADDED
Binary file (10.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/_ihatexml.py
ADDED
@@ -0,0 +1,289 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import absolute_import, division, unicode_literals
|
2 |
+
|
3 |
+
import re
|
4 |
+
import warnings
|
5 |
+
|
6 |
+
from .constants import DataLossWarning
|
7 |
+
|
8 |
+
baseChar = """
|
9 |
+
[#x0041-#x005A] | [#x0061-#x007A] | [#x00C0-#x00D6] | [#x00D8-#x00F6] |
|
10 |
+
[#x00F8-#x00FF] | [#x0100-#x0131] | [#x0134-#x013E] | [#x0141-#x0148] |
|
11 |
+
[#x014A-#x017E] | [#x0180-#x01C3] | [#x01CD-#x01F0] | [#x01F4-#x01F5] |
|
12 |
+
[#x01FA-#x0217] | [#x0250-#x02A8] | [#x02BB-#x02C1] | #x0386 |
|
13 |
+
[#x0388-#x038A] | #x038C | [#x038E-#x03A1] | [#x03A3-#x03CE] |
|
14 |
+
[#x03D0-#x03D6] | #x03DA | #x03DC | #x03DE | #x03E0 | [#x03E2-#x03F3] |
|
15 |
+
[#x0401-#x040C] | [#x040E-#x044F] | [#x0451-#x045C] | [#x045E-#x0481] |
|
16 |
+
[#x0490-#x04C4] | [#x04C7-#x04C8] | [#x04CB-#x04CC] | [#x04D0-#x04EB] |
|
17 |
+
[#x04EE-#x04F5] | [#x04F8-#x04F9] | [#x0531-#x0556] | #x0559 |
|
18 |
+
[#x0561-#x0586] | [#x05D0-#x05EA] | [#x05F0-#x05F2] | [#x0621-#x063A] |
|
19 |
+
[#x0641-#x064A] | [#x0671-#x06B7] | [#x06BA-#x06BE] | [#x06C0-#x06CE] |
|
20 |
+
[#x06D0-#x06D3] | #x06D5 | [#x06E5-#x06E6] | [#x0905-#x0939] | #x093D |
|
21 |
+
[#x0958-#x0961] | [#x0985-#x098C] | [#x098F-#x0990] | [#x0993-#x09A8] |
|
22 |
+
[#x09AA-#x09B0] | #x09B2 | [#x09B6-#x09B9] | [#x09DC-#x09DD] |
|
23 |
+
[#x09DF-#x09E1] | [#x09F0-#x09F1] | [#x0A05-#x0A0A] | [#x0A0F-#x0A10] |
|
24 |
+
[#x0A13-#x0A28] | [#x0A2A-#x0A30] | [#x0A32-#x0A33] | [#x0A35-#x0A36] |
|
25 |
+
[#x0A38-#x0A39] | [#x0A59-#x0A5C] | #x0A5E | [#x0A72-#x0A74] |
|
26 |
+
[#x0A85-#x0A8B] | #x0A8D | [#x0A8F-#x0A91] | [#x0A93-#x0AA8] |
|
27 |
+
[#x0AAA-#x0AB0] | [#x0AB2-#x0AB3] | [#x0AB5-#x0AB9] | #x0ABD | #x0AE0 |
|
28 |
+
[#x0B05-#x0B0C] | [#x0B0F-#x0B10] | [#x0B13-#x0B28] | [#x0B2A-#x0B30] |
|
29 |
+
[#x0B32-#x0B33] | [#x0B36-#x0B39] | #x0B3D | [#x0B5C-#x0B5D] |
|
30 |
+
[#x0B5F-#x0B61] | [#x0B85-#x0B8A] | [#x0B8E-#x0B90] | [#x0B92-#x0B95] |
|
31 |
+
[#x0B99-#x0B9A] | #x0B9C | [#x0B9E-#x0B9F] | [#x0BA3-#x0BA4] |
|
32 |
+
[#x0BA8-#x0BAA] | [#x0BAE-#x0BB5] | [#x0BB7-#x0BB9] | [#x0C05-#x0C0C] |
|
33 |
+
[#x0C0E-#x0C10] | [#x0C12-#x0C28] | [#x0C2A-#x0C33] | [#x0C35-#x0C39] |
|
34 |
+
[#x0C60-#x0C61] | [#x0C85-#x0C8C] | [#x0C8E-#x0C90] | [#x0C92-#x0CA8] |
|
35 |
+
[#x0CAA-#x0CB3] | [#x0CB5-#x0CB9] | #x0CDE | [#x0CE0-#x0CE1] |
|
36 |
+
[#x0D05-#x0D0C] | [#x0D0E-#x0D10] | [#x0D12-#x0D28] | [#x0D2A-#x0D39] |
|
37 |
+
[#x0D60-#x0D61] | [#x0E01-#x0E2E] | #x0E30 | [#x0E32-#x0E33] |
|
38 |
+
[#x0E40-#x0E45] | [#x0E81-#x0E82] | #x0E84 | [#x0E87-#x0E88] | #x0E8A |
|
39 |
+
#x0E8D | [#x0E94-#x0E97] | [#x0E99-#x0E9F] | [#x0EA1-#x0EA3] | #x0EA5 |
|
40 |
+
#x0EA7 | [#x0EAA-#x0EAB] | [#x0EAD-#x0EAE] | #x0EB0 | [#x0EB2-#x0EB3] |
|
41 |
+
#x0EBD | [#x0EC0-#x0EC4] | [#x0F40-#x0F47] | [#x0F49-#x0F69] |
|
42 |
+
[#x10A0-#x10C5] | [#x10D0-#x10F6] | #x1100 | [#x1102-#x1103] |
|
43 |
+
[#x1105-#x1107] | #x1109 | [#x110B-#x110C] | [#x110E-#x1112] | #x113C |
|
44 |
+
#x113E | #x1140 | #x114C | #x114E | #x1150 | [#x1154-#x1155] | #x1159 |
|
45 |
+
[#x115F-#x1161] | #x1163 | #x1165 | #x1167 | #x1169 | [#x116D-#x116E] |
|
46 |
+
[#x1172-#x1173] | #x1175 | #x119E | #x11A8 | #x11AB | [#x11AE-#x11AF] |
|
47 |
+
[#x11B7-#x11B8] | #x11BA | [#x11BC-#x11C2] | #x11EB | #x11F0 | #x11F9 |
|
48 |
+
[#x1E00-#x1E9B] | [#x1EA0-#x1EF9] | [#x1F00-#x1F15] | [#x1F18-#x1F1D] |
|
49 |
+
[#x1F20-#x1F45] | [#x1F48-#x1F4D] | [#x1F50-#x1F57] | #x1F59 | #x1F5B |
|
50 |
+
#x1F5D | [#x1F5F-#x1F7D] | [#x1F80-#x1FB4] | [#x1FB6-#x1FBC] | #x1FBE |
|
51 |
+
[#x1FC2-#x1FC4] | [#x1FC6-#x1FCC] | [#x1FD0-#x1FD3] | [#x1FD6-#x1FDB] |
|
52 |
+
[#x1FE0-#x1FEC] | [#x1FF2-#x1FF4] | [#x1FF6-#x1FFC] | #x2126 |
|
53 |
+
[#x212A-#x212B] | #x212E | [#x2180-#x2182] | [#x3041-#x3094] |
|
54 |
+
[#x30A1-#x30FA] | [#x3105-#x312C] | [#xAC00-#xD7A3]"""
|
55 |
+
|
56 |
+
ideographic = """[#x4E00-#x9FA5] | #x3007 | [#x3021-#x3029]"""
|
57 |
+
|
58 |
+
combiningCharacter = """
|
59 |
+
[#x0300-#x0345] | [#x0360-#x0361] | [#x0483-#x0486] | [#x0591-#x05A1] |
|
60 |
+
[#x05A3-#x05B9] | [#x05BB-#x05BD] | #x05BF | [#x05C1-#x05C2] | #x05C4 |
|
61 |
+
[#x064B-#x0652] | #x0670 | [#x06D6-#x06DC] | [#x06DD-#x06DF] |
|
62 |
+
[#x06E0-#x06E4] | [#x06E7-#x06E8] | [#x06EA-#x06ED] | [#x0901-#x0903] |
|
63 |
+
#x093C | [#x093E-#x094C] | #x094D | [#x0951-#x0954] | [#x0962-#x0963] |
|
64 |
+
[#x0981-#x0983] | #x09BC | #x09BE | #x09BF | [#x09C0-#x09C4] |
|
65 |
+
[#x09C7-#x09C8] | [#x09CB-#x09CD] | #x09D7 | [#x09E2-#x09E3] | #x0A02 |
|
66 |
+
#x0A3C | #x0A3E | #x0A3F | [#x0A40-#x0A42] | [#x0A47-#x0A48] |
|
67 |
+
[#x0A4B-#x0A4D] | [#x0A70-#x0A71] | [#x0A81-#x0A83] | #x0ABC |
|
68 |
+
[#x0ABE-#x0AC5] | [#x0AC7-#x0AC9] | [#x0ACB-#x0ACD] | [#x0B01-#x0B03] |
|
69 |
+
#x0B3C | [#x0B3E-#x0B43] | [#x0B47-#x0B48] | [#x0B4B-#x0B4D] |
|
70 |
+
[#x0B56-#x0B57] | [#x0B82-#x0B83] | [#x0BBE-#x0BC2] | [#x0BC6-#x0BC8] |
|
71 |
+
[#x0BCA-#x0BCD] | #x0BD7 | [#x0C01-#x0C03] | [#x0C3E-#x0C44] |
|
72 |
+
[#x0C46-#x0C48] | [#x0C4A-#x0C4D] | [#x0C55-#x0C56] | [#x0C82-#x0C83] |
|
73 |
+
[#x0CBE-#x0CC4] | [#x0CC6-#x0CC8] | [#x0CCA-#x0CCD] | [#x0CD5-#x0CD6] |
|
74 |
+
[#x0D02-#x0D03] | [#x0D3E-#x0D43] | [#x0D46-#x0D48] | [#x0D4A-#x0D4D] |
|
75 |
+
#x0D57 | #x0E31 | [#x0E34-#x0E3A] | [#x0E47-#x0E4E] | #x0EB1 |
|
76 |
+
[#x0EB4-#x0EB9] | [#x0EBB-#x0EBC] | [#x0EC8-#x0ECD] | [#x0F18-#x0F19] |
|
77 |
+
#x0F35 | #x0F37 | #x0F39 | #x0F3E | #x0F3F | [#x0F71-#x0F84] |
|
78 |
+
[#x0F86-#x0F8B] | [#x0F90-#x0F95] | #x0F97 | [#x0F99-#x0FAD] |
|
79 |
+
[#x0FB1-#x0FB7] | #x0FB9 | [#x20D0-#x20DC] | #x20E1 | [#x302A-#x302F] |
|
80 |
+
#x3099 | #x309A"""
|
81 |
+
|
82 |
+
digit = """
|
83 |
+
[#x0030-#x0039] | [#x0660-#x0669] | [#x06F0-#x06F9] | [#x0966-#x096F] |
|
84 |
+
[#x09E6-#x09EF] | [#x0A66-#x0A6F] | [#x0AE6-#x0AEF] | [#x0B66-#x0B6F] |
|
85 |
+
[#x0BE7-#x0BEF] | [#x0C66-#x0C6F] | [#x0CE6-#x0CEF] | [#x0D66-#x0D6F] |
|
86 |
+
[#x0E50-#x0E59] | [#x0ED0-#x0ED9] | [#x0F20-#x0F29]"""
|
87 |
+
|
88 |
+
extender = """
|
89 |
+
#x00B7 | #x02D0 | #x02D1 | #x0387 | #x0640 | #x0E46 | #x0EC6 | #x3005 |
|
90 |
+
#[#x3031-#x3035] | [#x309D-#x309E] | [#x30FC-#x30FE]"""
|
91 |
+
|
92 |
+
letter = " | ".join([baseChar, ideographic])
|
93 |
+
|
94 |
+
# Without the
|
95 |
+
name = " | ".join([letter, digit, ".", "-", "_", combiningCharacter,
|
96 |
+
extender])
|
97 |
+
nameFirst = " | ".join([letter, "_"])
|
98 |
+
|
99 |
+
reChar = re.compile(r"#x([\d|A-F]{4,4})")
|
100 |
+
reCharRange = re.compile(r"\[#x([\d|A-F]{4,4})-#x([\d|A-F]{4,4})\]")
|
101 |
+
|
102 |
+
|
103 |
+
def charStringToList(chars):
|
104 |
+
charRanges = [item.strip() for item in chars.split(" | ")]
|
105 |
+
rv = []
|
106 |
+
for item in charRanges:
|
107 |
+
foundMatch = False
|
108 |
+
for regexp in (reChar, reCharRange):
|
109 |
+
match = regexp.match(item)
|
110 |
+
if match is not None:
|
111 |
+
rv.append([hexToInt(item) for item in match.groups()])
|
112 |
+
if len(rv[-1]) == 1:
|
113 |
+
rv[-1] = rv[-1] * 2
|
114 |
+
foundMatch = True
|
115 |
+
break
|
116 |
+
if not foundMatch:
|
117 |
+
assert len(item) == 1
|
118 |
+
|
119 |
+
rv.append([ord(item)] * 2)
|
120 |
+
rv = normaliseCharList(rv)
|
121 |
+
return rv
|
122 |
+
|
123 |
+
|
124 |
+
def normaliseCharList(charList):
|
125 |
+
charList = sorted(charList)
|
126 |
+
for item in charList:
|
127 |
+
assert item[1] >= item[0]
|
128 |
+
rv = []
|
129 |
+
i = 0
|
130 |
+
while i < len(charList):
|
131 |
+
j = 1
|
132 |
+
rv.append(charList[i])
|
133 |
+
while i + j < len(charList) and charList[i + j][0] <= rv[-1][1] + 1:
|
134 |
+
rv[-1][1] = charList[i + j][1]
|
135 |
+
j += 1
|
136 |
+
i += j
|
137 |
+
return rv
|
138 |
+
|
139 |
+
|
140 |
+
# We don't really support characters above the BMP :(
|
141 |
+
max_unicode = int("FFFF", 16)
|
142 |
+
|
143 |
+
|
144 |
+
def missingRanges(charList):
|
145 |
+
rv = []
|
146 |
+
if charList[0] != 0:
|
147 |
+
rv.append([0, charList[0][0] - 1])
|
148 |
+
for i, item in enumerate(charList[:-1]):
|
149 |
+
rv.append([item[1] + 1, charList[i + 1][0] - 1])
|
150 |
+
if charList[-1][1] != max_unicode:
|
151 |
+
rv.append([charList[-1][1] + 1, max_unicode])
|
152 |
+
return rv
|
153 |
+
|
154 |
+
|
155 |
+
def listToRegexpStr(charList):
|
156 |
+
rv = []
|
157 |
+
for item in charList:
|
158 |
+
if item[0] == item[1]:
|
159 |
+
rv.append(escapeRegexp(chr(item[0])))
|
160 |
+
else:
|
161 |
+
rv.append(escapeRegexp(chr(item[0])) + "-" +
|
162 |
+
escapeRegexp(chr(item[1])))
|
163 |
+
return "[%s]" % "".join(rv)
|
164 |
+
|
165 |
+
|
166 |
+
def hexToInt(hex_str):
|
167 |
+
return int(hex_str, 16)
|
168 |
+
|
169 |
+
|
170 |
+
def escapeRegexp(string):
|
171 |
+
specialCharacters = (".", "^", "$", "*", "+", "?", "{", "}",
|
172 |
+
"[", "]", "|", "(", ")", "-")
|
173 |
+
for char in specialCharacters:
|
174 |
+
string = string.replace(char, "\\" + char)
|
175 |
+
|
176 |
+
return string
|
177 |
+
|
178 |
+
# output from the above
|
179 |
+
nonXmlNameBMPRegexp = re.compile('[\x00-,/:-@\\[-\\^`\\{-\xb6\xb8-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u02cf\u02d2-\u02ff\u0346-\u035f\u0362-\u0385\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482\u0487-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u0590\u05a2\u05ba\u05be\u05c0\u05c3\u05c5-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u063f\u0653-\u065f\u066a-\u066f\u06b8-\u06b9\u06bf\u06cf\u06d4\u06e9\u06ee-\u06ef\u06fa-\u0900\u0904\u093a-\u093b\u094e-\u0950\u0955-\u0957\u0964-\u0965\u0970-\u0980\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09bd\u09c5-\u09c6\u09c9-\u09ca\u09ce-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09f2-\u0a01\u0a03-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a58\u0a5d\u0a5f-\u0a65\u0a75-\u0a80\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0adf\u0ae1-\u0ae5\u0af0-\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3b\u0b44-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b62-\u0b65\u0b70-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bd6\u0bd8-\u0be6\u0bf0-\u0c00\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c3d\u0c45\u0c49\u0c4e-\u0c54\u0c57-\u0c5f\u0c62-\u0c65\u0c70-\u0c81\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbd\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce2-\u0ce5\u0cf0-\u0d01\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d3d\u0d44-\u0d45\u0d49\u0d4e-\u0d56\u0d58-\u0d5f\u0d62-\u0d65\u0d70-\u0e00\u0e2f\u0e3b-\u0e3f\u0e4f\u0e5a-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0f17\u0f1a-\u0f1f\u0f2a-\u0f34\u0f36\u0f38\u0f3a-\u0f3d\u0f48\u0f6a-\u0f70\u0f85\u0f8c-\u0f8f\u0f96\u0f98\u0fae-\u0fb0\u0fb8\u0fba-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u20cf\u20dd-\u20e0\u20e2-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3004\u3006\u3008-\u3020\u3030\u3036-\u3040\u3095-\u3098\u309b-\u309c\u309f-\u30a0\u30fb\u30ff-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]') # noqa
|
180 |
+
|
181 |
+
nonXmlNameFirstBMPRegexp = re.compile('[\x00-@\\[-\\^`\\{-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u0385\u0387\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u0640\u064b-\u0670\u06b8-\u06b9\u06bf\u06cf\u06d4\u06d6-\u06e4\u06e7-\u0904\u093a-\u093c\u093e-\u0957\u0962-\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09db\u09de\u09e2-\u09ef\u09f2-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a58\u0a5d\u0a5f-\u0a71\u0a75-\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abc\u0abe-\u0adf\u0ae1-\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3c\u0b3e-\u0b5b\u0b5e\u0b62-\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c5f\u0c62-\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cdd\u0cdf\u0ce2-\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d5f\u0d62-\u0e00\u0e2f\u0e31\u0e34-\u0e3f\u0e46-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eb1\u0eb4-\u0ebc\u0ebe-\u0ebf\u0ec5-\u0f3f\u0f48\u0f6a-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3006\u3008-\u3020\u302a-\u3040\u3095-\u30a0\u30fb-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]') # noqa
|
182 |
+
|
183 |
+
# Simpler things
|
184 |
+
nonPubidCharRegexp = re.compile("[^\x20\x0D\x0Aa-zA-Z0-9\\-'()+,./:=?;!*#@$_%]")
|
185 |
+
|
186 |
+
|
187 |
+
class InfosetFilter(object):
|
188 |
+
replacementRegexp = re.compile(r"U[\dA-F]{5,5}")
|
189 |
+
|
190 |
+
def __init__(self,
|
191 |
+
dropXmlnsLocalName=False,
|
192 |
+
dropXmlnsAttrNs=False,
|
193 |
+
preventDoubleDashComments=False,
|
194 |
+
preventDashAtCommentEnd=False,
|
195 |
+
replaceFormFeedCharacters=True,
|
196 |
+
preventSingleQuotePubid=False):
|
197 |
+
|
198 |
+
self.dropXmlnsLocalName = dropXmlnsLocalName
|
199 |
+
self.dropXmlnsAttrNs = dropXmlnsAttrNs
|
200 |
+
|
201 |
+
self.preventDoubleDashComments = preventDoubleDashComments
|
202 |
+
self.preventDashAtCommentEnd = preventDashAtCommentEnd
|
203 |
+
|
204 |
+
self.replaceFormFeedCharacters = replaceFormFeedCharacters
|
205 |
+
|
206 |
+
self.preventSingleQuotePubid = preventSingleQuotePubid
|
207 |
+
|
208 |
+
self.replaceCache = {}
|
209 |
+
|
210 |
+
def coerceAttribute(self, name, namespace=None):
|
211 |
+
if self.dropXmlnsLocalName and name.startswith("xmlns:"):
|
212 |
+
warnings.warn("Attributes cannot begin with xmlns", DataLossWarning)
|
213 |
+
return None
|
214 |
+
elif (self.dropXmlnsAttrNs and
|
215 |
+
namespace == "http://www.w3.org/2000/xmlns/"):
|
216 |
+
warnings.warn("Attributes cannot be in the xml namespace", DataLossWarning)
|
217 |
+
return None
|
218 |
+
else:
|
219 |
+
return self.toXmlName(name)
|
220 |
+
|
221 |
+
def coerceElement(self, name):
|
222 |
+
return self.toXmlName(name)
|
223 |
+
|
224 |
+
def coerceComment(self, data):
|
225 |
+
if self.preventDoubleDashComments:
|
226 |
+
while "--" in data:
|
227 |
+
warnings.warn("Comments cannot contain adjacent dashes", DataLossWarning)
|
228 |
+
data = data.replace("--", "- -")
|
229 |
+
if data.endswith("-"):
|
230 |
+
warnings.warn("Comments cannot end in a dash", DataLossWarning)
|
231 |
+
data += " "
|
232 |
+
return data
|
233 |
+
|
234 |
+
def coerceCharacters(self, data):
|
235 |
+
if self.replaceFormFeedCharacters:
|
236 |
+
for _ in range(data.count("\x0C")):
|
237 |
+
warnings.warn("Text cannot contain U+000C", DataLossWarning)
|
238 |
+
data = data.replace("\x0C", " ")
|
239 |
+
# Other non-xml characters
|
240 |
+
return data
|
241 |
+
|
242 |
+
def coercePubid(self, data):
|
243 |
+
dataOutput = data
|
244 |
+
for char in nonPubidCharRegexp.findall(data):
|
245 |
+
warnings.warn("Coercing non-XML pubid", DataLossWarning)
|
246 |
+
replacement = self.getReplacementCharacter(char)
|
247 |
+
dataOutput = dataOutput.replace(char, replacement)
|
248 |
+
if self.preventSingleQuotePubid and dataOutput.find("'") >= 0:
|
249 |
+
warnings.warn("Pubid cannot contain single quote", DataLossWarning)
|
250 |
+
dataOutput = dataOutput.replace("'", self.getReplacementCharacter("'"))
|
251 |
+
return dataOutput
|
252 |
+
|
253 |
+
def toXmlName(self, name):
|
254 |
+
nameFirst = name[0]
|
255 |
+
nameRest = name[1:]
|
256 |
+
m = nonXmlNameFirstBMPRegexp.match(nameFirst)
|
257 |
+
if m:
|
258 |
+
warnings.warn("Coercing non-XML name: %s" % name, DataLossWarning)
|
259 |
+
nameFirstOutput = self.getReplacementCharacter(nameFirst)
|
260 |
+
else:
|
261 |
+
nameFirstOutput = nameFirst
|
262 |
+
|
263 |
+
nameRestOutput = nameRest
|
264 |
+
replaceChars = set(nonXmlNameBMPRegexp.findall(nameRest))
|
265 |
+
for char in replaceChars:
|
266 |
+
warnings.warn("Coercing non-XML name: %s" % name, DataLossWarning)
|
267 |
+
replacement = self.getReplacementCharacter(char)
|
268 |
+
nameRestOutput = nameRestOutput.replace(char, replacement)
|
269 |
+
return nameFirstOutput + nameRestOutput
|
270 |
+
|
271 |
+
def getReplacementCharacter(self, char):
|
272 |
+
if char in self.replaceCache:
|
273 |
+
replacement = self.replaceCache[char]
|
274 |
+
else:
|
275 |
+
replacement = self.escapeChar(char)
|
276 |
+
return replacement
|
277 |
+
|
278 |
+
def fromXmlName(self, name):
|
279 |
+
for item in set(self.replacementRegexp.findall(name)):
|
280 |
+
name = name.replace(item, self.unescapeChar(item))
|
281 |
+
return name
|
282 |
+
|
283 |
+
def escapeChar(self, char):
|
284 |
+
replacement = "U%05X" % ord(char)
|
285 |
+
self.replaceCache[char] = replacement
|
286 |
+
return replacement
|
287 |
+
|
288 |
+
def unescapeChar(self, charcode):
|
289 |
+
return chr(int(charcode[1:], 16))
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/_inputstream.py
ADDED
@@ -0,0 +1,918 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import absolute_import, division, unicode_literals
|
2 |
+
|
3 |
+
from pip._vendor.six import text_type
|
4 |
+
from pip._vendor.six.moves import http_client, urllib
|
5 |
+
|
6 |
+
import codecs
|
7 |
+
import re
|
8 |
+
from io import BytesIO, StringIO
|
9 |
+
|
10 |
+
from pip._vendor import webencodings
|
11 |
+
|
12 |
+
from .constants import EOF, spaceCharacters, asciiLetters, asciiUppercase
|
13 |
+
from .constants import _ReparseException
|
14 |
+
from . import _utils
|
15 |
+
|
16 |
+
# Non-unicode versions of constants for use in the pre-parser
|
17 |
+
spaceCharactersBytes = frozenset([item.encode("ascii") for item in spaceCharacters])
|
18 |
+
asciiLettersBytes = frozenset([item.encode("ascii") for item in asciiLetters])
|
19 |
+
asciiUppercaseBytes = frozenset([item.encode("ascii") for item in asciiUppercase])
|
20 |
+
spacesAngleBrackets = spaceCharactersBytes | frozenset([b">", b"<"])
|
21 |
+
|
22 |
+
|
23 |
+
invalid_unicode_no_surrogate = "[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]" # noqa
|
24 |
+
|
25 |
+
if _utils.supports_lone_surrogates:
|
26 |
+
# Use one extra step of indirection and create surrogates with
|
27 |
+
# eval. Not using this indirection would introduce an illegal
|
28 |
+
# unicode literal on platforms not supporting such lone
|
29 |
+
# surrogates.
|
30 |
+
assert invalid_unicode_no_surrogate[-1] == "]" and invalid_unicode_no_surrogate.count("]") == 1
|
31 |
+
invalid_unicode_re = re.compile(invalid_unicode_no_surrogate[:-1] +
|
32 |
+
eval('"\\uD800-\\uDFFF"') + # pylint:disable=eval-used
|
33 |
+
"]")
|
34 |
+
else:
|
35 |
+
invalid_unicode_re = re.compile(invalid_unicode_no_surrogate)
|
36 |
+
|
37 |
+
non_bmp_invalid_codepoints = {0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
|
38 |
+
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF,
|
39 |
+
0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
|
40 |
+
0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF,
|
41 |
+
0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
|
42 |
+
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF,
|
43 |
+
0x10FFFE, 0x10FFFF}
|
44 |
+
|
45 |
+
ascii_punctuation_re = re.compile("[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005C\u005B-\u0060\u007B-\u007E]")
|
46 |
+
|
47 |
+
# Cache for charsUntil()
|
48 |
+
charsUntilRegEx = {}
|
49 |
+
|
50 |
+
|
51 |
+
class BufferedStream(object):
|
52 |
+
"""Buffering for streams that do not have buffering of their own
|
53 |
+
|
54 |
+
The buffer is implemented as a list of chunks on the assumption that
|
55 |
+
joining many strings will be slow since it is O(n**2)
|
56 |
+
"""
|
57 |
+
|
58 |
+
def __init__(self, stream):
|
59 |
+
self.stream = stream
|
60 |
+
self.buffer = []
|
61 |
+
self.position = [-1, 0] # chunk number, offset
|
62 |
+
|
63 |
+
def tell(self):
|
64 |
+
pos = 0
|
65 |
+
for chunk in self.buffer[:self.position[0]]:
|
66 |
+
pos += len(chunk)
|
67 |
+
pos += self.position[1]
|
68 |
+
return pos
|
69 |
+
|
70 |
+
def seek(self, pos):
|
71 |
+
assert pos <= self._bufferedBytes()
|
72 |
+
offset = pos
|
73 |
+
i = 0
|
74 |
+
while len(self.buffer[i]) < offset:
|
75 |
+
offset -= len(self.buffer[i])
|
76 |
+
i += 1
|
77 |
+
self.position = [i, offset]
|
78 |
+
|
79 |
+
def read(self, bytes):
|
80 |
+
if not self.buffer:
|
81 |
+
return self._readStream(bytes)
|
82 |
+
elif (self.position[0] == len(self.buffer) and
|
83 |
+
self.position[1] == len(self.buffer[-1])):
|
84 |
+
return self._readStream(bytes)
|
85 |
+
else:
|
86 |
+
return self._readFromBuffer(bytes)
|
87 |
+
|
88 |
+
def _bufferedBytes(self):
|
89 |
+
return sum([len(item) for item in self.buffer])
|
90 |
+
|
91 |
+
def _readStream(self, bytes):
|
92 |
+
data = self.stream.read(bytes)
|
93 |
+
self.buffer.append(data)
|
94 |
+
self.position[0] += 1
|
95 |
+
self.position[1] = len(data)
|
96 |
+
return data
|
97 |
+
|
98 |
+
def _readFromBuffer(self, bytes):
|
99 |
+
remainingBytes = bytes
|
100 |
+
rv = []
|
101 |
+
bufferIndex = self.position[0]
|
102 |
+
bufferOffset = self.position[1]
|
103 |
+
while bufferIndex < len(self.buffer) and remainingBytes != 0:
|
104 |
+
assert remainingBytes > 0
|
105 |
+
bufferedData = self.buffer[bufferIndex]
|
106 |
+
|
107 |
+
if remainingBytes <= len(bufferedData) - bufferOffset:
|
108 |
+
bytesToRead = remainingBytes
|
109 |
+
self.position = [bufferIndex, bufferOffset + bytesToRead]
|
110 |
+
else:
|
111 |
+
bytesToRead = len(bufferedData) - bufferOffset
|
112 |
+
self.position = [bufferIndex, len(bufferedData)]
|
113 |
+
bufferIndex += 1
|
114 |
+
rv.append(bufferedData[bufferOffset:bufferOffset + bytesToRead])
|
115 |
+
remainingBytes -= bytesToRead
|
116 |
+
|
117 |
+
bufferOffset = 0
|
118 |
+
|
119 |
+
if remainingBytes:
|
120 |
+
rv.append(self._readStream(remainingBytes))
|
121 |
+
|
122 |
+
return b"".join(rv)
|
123 |
+
|
124 |
+
|
125 |
+
def HTMLInputStream(source, **kwargs):
|
126 |
+
# Work around Python bug #20007: read(0) closes the connection.
|
127 |
+
# http://bugs.python.org/issue20007
|
128 |
+
if (isinstance(source, http_client.HTTPResponse) or
|
129 |
+
# Also check for addinfourl wrapping HTTPResponse
|
130 |
+
(isinstance(source, urllib.response.addbase) and
|
131 |
+
isinstance(source.fp, http_client.HTTPResponse))):
|
132 |
+
isUnicode = False
|
133 |
+
elif hasattr(source, "read"):
|
134 |
+
isUnicode = isinstance(source.read(0), text_type)
|
135 |
+
else:
|
136 |
+
isUnicode = isinstance(source, text_type)
|
137 |
+
|
138 |
+
if isUnicode:
|
139 |
+
encodings = [x for x in kwargs if x.endswith("_encoding")]
|
140 |
+
if encodings:
|
141 |
+
raise TypeError("Cannot set an encoding with a unicode input, set %r" % encodings)
|
142 |
+
|
143 |
+
return HTMLUnicodeInputStream(source, **kwargs)
|
144 |
+
else:
|
145 |
+
return HTMLBinaryInputStream(source, **kwargs)
|
146 |
+
|
147 |
+
|
148 |
+
class HTMLUnicodeInputStream(object):
|
149 |
+
"""Provides a unicode stream of characters to the HTMLTokenizer.
|
150 |
+
|
151 |
+
This class takes care of character encoding and removing or replacing
|
152 |
+
incorrect byte-sequences and also provides column and line tracking.
|
153 |
+
|
154 |
+
"""
|
155 |
+
|
156 |
+
_defaultChunkSize = 10240
|
157 |
+
|
158 |
+
def __init__(self, source):
|
159 |
+
"""Initialises the HTMLInputStream.
|
160 |
+
|
161 |
+
HTMLInputStream(source, [encoding]) -> Normalized stream from source
|
162 |
+
for use by html5lib.
|
163 |
+
|
164 |
+
source can be either a file-object, local filename or a string.
|
165 |
+
|
166 |
+
The optional encoding parameter must be a string that indicates
|
167 |
+
the encoding. If specified, that encoding will be used,
|
168 |
+
regardless of any BOM or later declaration (such as in a meta
|
169 |
+
element)
|
170 |
+
|
171 |
+
"""
|
172 |
+
|
173 |
+
if not _utils.supports_lone_surrogates:
|
174 |
+
# Such platforms will have already checked for such
|
175 |
+
# surrogate errors, so no need to do this checking.
|
176 |
+
self.reportCharacterErrors = None
|
177 |
+
elif len("\U0010FFFF") == 1:
|
178 |
+
self.reportCharacterErrors = self.characterErrorsUCS4
|
179 |
+
else:
|
180 |
+
self.reportCharacterErrors = self.characterErrorsUCS2
|
181 |
+
|
182 |
+
# List of where new lines occur
|
183 |
+
self.newLines = [0]
|
184 |
+
|
185 |
+
self.charEncoding = (lookupEncoding("utf-8"), "certain")
|
186 |
+
self.dataStream = self.openStream(source)
|
187 |
+
|
188 |
+
self.reset()
|
189 |
+
|
190 |
+
def reset(self):
|
191 |
+
self.chunk = ""
|
192 |
+
self.chunkSize = 0
|
193 |
+
self.chunkOffset = 0
|
194 |
+
self.errors = []
|
195 |
+
|
196 |
+
# number of (complete) lines in previous chunks
|
197 |
+
self.prevNumLines = 0
|
198 |
+
# number of columns in the last line of the previous chunk
|
199 |
+
self.prevNumCols = 0
|
200 |
+
|
201 |
+
# Deal with CR LF and surrogates split over chunk boundaries
|
202 |
+
self._bufferedCharacter = None
|
203 |
+
|
204 |
+
def openStream(self, source):
|
205 |
+
"""Produces a file object from source.
|
206 |
+
|
207 |
+
source can be either a file object, local filename or a string.
|
208 |
+
|
209 |
+
"""
|
210 |
+
# Already a file object
|
211 |
+
if hasattr(source, 'read'):
|
212 |
+
stream = source
|
213 |
+
else:
|
214 |
+
stream = StringIO(source)
|
215 |
+
|
216 |
+
return stream
|
217 |
+
|
218 |
+
def _position(self, offset):
|
219 |
+
chunk = self.chunk
|
220 |
+
nLines = chunk.count('\n', 0, offset)
|
221 |
+
positionLine = self.prevNumLines + nLines
|
222 |
+
lastLinePos = chunk.rfind('\n', 0, offset)
|
223 |
+
if lastLinePos == -1:
|
224 |
+
positionColumn = self.prevNumCols + offset
|
225 |
+
else:
|
226 |
+
positionColumn = offset - (lastLinePos + 1)
|
227 |
+
return (positionLine, positionColumn)
|
228 |
+
|
229 |
+
def position(self):
|
230 |
+
"""Returns (line, col) of the current position in the stream."""
|
231 |
+
line, col = self._position(self.chunkOffset)
|
232 |
+
return (line + 1, col)
|
233 |
+
|
234 |
+
def char(self):
|
235 |
+
""" Read one character from the stream or queue if available. Return
|
236 |
+
EOF when EOF is reached.
|
237 |
+
"""
|
238 |
+
# Read a new chunk from the input stream if necessary
|
239 |
+
if self.chunkOffset >= self.chunkSize:
|
240 |
+
if not self.readChunk():
|
241 |
+
return EOF
|
242 |
+
|
243 |
+
chunkOffset = self.chunkOffset
|
244 |
+
char = self.chunk[chunkOffset]
|
245 |
+
self.chunkOffset = chunkOffset + 1
|
246 |
+
|
247 |
+
return char
|
248 |
+
|
249 |
+
def readChunk(self, chunkSize=None):
|
250 |
+
if chunkSize is None:
|
251 |
+
chunkSize = self._defaultChunkSize
|
252 |
+
|
253 |
+
self.prevNumLines, self.prevNumCols = self._position(self.chunkSize)
|
254 |
+
|
255 |
+
self.chunk = ""
|
256 |
+
self.chunkSize = 0
|
257 |
+
self.chunkOffset = 0
|
258 |
+
|
259 |
+
data = self.dataStream.read(chunkSize)
|
260 |
+
|
261 |
+
# Deal with CR LF and surrogates broken across chunks
|
262 |
+
if self._bufferedCharacter:
|
263 |
+
data = self._bufferedCharacter + data
|
264 |
+
self._bufferedCharacter = None
|
265 |
+
elif not data:
|
266 |
+
# We have no more data, bye-bye stream
|
267 |
+
return False
|
268 |
+
|
269 |
+
if len(data) > 1:
|
270 |
+
lastv = ord(data[-1])
|
271 |
+
if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF:
|
272 |
+
self._bufferedCharacter = data[-1]
|
273 |
+
data = data[:-1]
|
274 |
+
|
275 |
+
if self.reportCharacterErrors:
|
276 |
+
self.reportCharacterErrors(data)
|
277 |
+
|
278 |
+
# Replace invalid characters
|
279 |
+
data = data.replace("\r\n", "\n")
|
280 |
+
data = data.replace("\r", "\n")
|
281 |
+
|
282 |
+
self.chunk = data
|
283 |
+
self.chunkSize = len(data)
|
284 |
+
|
285 |
+
return True
|
286 |
+
|
287 |
+
def characterErrorsUCS4(self, data):
|
288 |
+
for _ in range(len(invalid_unicode_re.findall(data))):
|
289 |
+
self.errors.append("invalid-codepoint")
|
290 |
+
|
291 |
+
def characterErrorsUCS2(self, data):
|
292 |
+
# Someone picked the wrong compile option
|
293 |
+
# You lose
|
294 |
+
skip = False
|
295 |
+
for match in invalid_unicode_re.finditer(data):
|
296 |
+
if skip:
|
297 |
+
continue
|
298 |
+
codepoint = ord(match.group())
|
299 |
+
pos = match.start()
|
300 |
+
# Pretty sure there should be endianness issues here
|
301 |
+
if _utils.isSurrogatePair(data[pos:pos + 2]):
|
302 |
+
# We have a surrogate pair!
|
303 |
+
char_val = _utils.surrogatePairToCodepoint(data[pos:pos + 2])
|
304 |
+
if char_val in non_bmp_invalid_codepoints:
|
305 |
+
self.errors.append("invalid-codepoint")
|
306 |
+
skip = True
|
307 |
+
elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and
|
308 |
+
pos == len(data) - 1):
|
309 |
+
self.errors.append("invalid-codepoint")
|
310 |
+
else:
|
311 |
+
skip = False
|
312 |
+
self.errors.append("invalid-codepoint")
|
313 |
+
|
314 |
+
def charsUntil(self, characters, opposite=False):
|
315 |
+
""" Returns a string of characters from the stream up to but not
|
316 |
+
including any character in 'characters' or EOF. 'characters' must be
|
317 |
+
a container that supports the 'in' method and iteration over its
|
318 |
+
characters.
|
319 |
+
"""
|
320 |
+
|
321 |
+
# Use a cache of regexps to find the required characters
|
322 |
+
try:
|
323 |
+
chars = charsUntilRegEx[(characters, opposite)]
|
324 |
+
except KeyError:
|
325 |
+
if __debug__:
|
326 |
+
for c in characters:
|
327 |
+
assert(ord(c) < 128)
|
328 |
+
regex = "".join(["\\x%02x" % ord(c) for c in characters])
|
329 |
+
if not opposite:
|
330 |
+
regex = "^%s" % regex
|
331 |
+
chars = charsUntilRegEx[(characters, opposite)] = re.compile("[%s]+" % regex)
|
332 |
+
|
333 |
+
rv = []
|
334 |
+
|
335 |
+
while True:
|
336 |
+
# Find the longest matching prefix
|
337 |
+
m = chars.match(self.chunk, self.chunkOffset)
|
338 |
+
if m is None:
|
339 |
+
# If nothing matched, and it wasn't because we ran out of chunk,
|
340 |
+
# then stop
|
341 |
+
if self.chunkOffset != self.chunkSize:
|
342 |
+
break
|
343 |
+
else:
|
344 |
+
end = m.end()
|
345 |
+
# If not the whole chunk matched, return everything
|
346 |
+
# up to the part that didn't match
|
347 |
+
if end != self.chunkSize:
|
348 |
+
rv.append(self.chunk[self.chunkOffset:end])
|
349 |
+
self.chunkOffset = end
|
350 |
+
break
|
351 |
+
# If the whole remainder of the chunk matched,
|
352 |
+
# use it all and read the next chunk
|
353 |
+
rv.append(self.chunk[self.chunkOffset:])
|
354 |
+
if not self.readChunk():
|
355 |
+
# Reached EOF
|
356 |
+
break
|
357 |
+
|
358 |
+
r = "".join(rv)
|
359 |
+
return r
|
360 |
+
|
361 |
+
def unget(self, char):
|
362 |
+
# Only one character is allowed to be ungotten at once - it must
|
363 |
+
# be consumed again before any further call to unget
|
364 |
+
if char is not EOF:
|
365 |
+
if self.chunkOffset == 0:
|
366 |
+
# unget is called quite rarely, so it's a good idea to do
|
367 |
+
# more work here if it saves a bit of work in the frequently
|
368 |
+
# called char and charsUntil.
|
369 |
+
# So, just prepend the ungotten character onto the current
|
370 |
+
# chunk:
|
371 |
+
self.chunk = char + self.chunk
|
372 |
+
self.chunkSize += 1
|
373 |
+
else:
|
374 |
+
self.chunkOffset -= 1
|
375 |
+
assert self.chunk[self.chunkOffset] == char
|
376 |
+
|
377 |
+
|
378 |
+
class HTMLBinaryInputStream(HTMLUnicodeInputStream):
|
379 |
+
"""Provides a unicode stream of characters to the HTMLTokenizer.
|
380 |
+
|
381 |
+
This class takes care of character encoding and removing or replacing
|
382 |
+
incorrect byte-sequences and also provides column and line tracking.
|
383 |
+
|
384 |
+
"""
|
385 |
+
|
386 |
+
def __init__(self, source, override_encoding=None, transport_encoding=None,
|
387 |
+
same_origin_parent_encoding=None, likely_encoding=None,
|
388 |
+
default_encoding="windows-1252", useChardet=True):
|
389 |
+
"""Initialises the HTMLInputStream.
|
390 |
+
|
391 |
+
HTMLInputStream(source, [encoding]) -> Normalized stream from source
|
392 |
+
for use by html5lib.
|
393 |
+
|
394 |
+
source can be either a file-object, local filename or a string.
|
395 |
+
|
396 |
+
The optional encoding parameter must be a string that indicates
|
397 |
+
the encoding. If specified, that encoding will be used,
|
398 |
+
regardless of any BOM or later declaration (such as in a meta
|
399 |
+
element)
|
400 |
+
|
401 |
+
"""
|
402 |
+
# Raw Stream - for unicode objects this will encode to utf-8 and set
|
403 |
+
# self.charEncoding as appropriate
|
404 |
+
self.rawStream = self.openStream(source)
|
405 |
+
|
406 |
+
HTMLUnicodeInputStream.__init__(self, self.rawStream)
|
407 |
+
|
408 |
+
# Encoding Information
|
409 |
+
# Number of bytes to use when looking for a meta element with
|
410 |
+
# encoding information
|
411 |
+
self.numBytesMeta = 1024
|
412 |
+
# Number of bytes to use when using detecting encoding using chardet
|
413 |
+
self.numBytesChardet = 100
|
414 |
+
# Things from args
|
415 |
+
self.override_encoding = override_encoding
|
416 |
+
self.transport_encoding = transport_encoding
|
417 |
+
self.same_origin_parent_encoding = same_origin_parent_encoding
|
418 |
+
self.likely_encoding = likely_encoding
|
419 |
+
self.default_encoding = default_encoding
|
420 |
+
|
421 |
+
# Determine encoding
|
422 |
+
self.charEncoding = self.determineEncoding(useChardet)
|
423 |
+
assert self.charEncoding[0] is not None
|
424 |
+
|
425 |
+
# Call superclass
|
426 |
+
self.reset()
|
427 |
+
|
428 |
+
def reset(self):
|
429 |
+
self.dataStream = self.charEncoding[0].codec_info.streamreader(self.rawStream, 'replace')
|
430 |
+
HTMLUnicodeInputStream.reset(self)
|
431 |
+
|
432 |
+
def openStream(self, source):
|
433 |
+
"""Produces a file object from source.
|
434 |
+
|
435 |
+
source can be either a file object, local filename or a string.
|
436 |
+
|
437 |
+
"""
|
438 |
+
# Already a file object
|
439 |
+
if hasattr(source, 'read'):
|
440 |
+
stream = source
|
441 |
+
else:
|
442 |
+
stream = BytesIO(source)
|
443 |
+
|
444 |
+
try:
|
445 |
+
stream.seek(stream.tell())
|
446 |
+
except Exception:
|
447 |
+
stream = BufferedStream(stream)
|
448 |
+
|
449 |
+
return stream
|
450 |
+
|
451 |
+
def determineEncoding(self, chardet=True):
|
452 |
+
# BOMs take precedence over everything
|
453 |
+
# This will also read past the BOM if present
|
454 |
+
charEncoding = self.detectBOM(), "certain"
|
455 |
+
if charEncoding[0] is not None:
|
456 |
+
return charEncoding
|
457 |
+
|
458 |
+
# If we've been overridden, we've been overridden
|
459 |
+
charEncoding = lookupEncoding(self.override_encoding), "certain"
|
460 |
+
if charEncoding[0] is not None:
|
461 |
+
return charEncoding
|
462 |
+
|
463 |
+
# Now check the transport layer
|
464 |
+
charEncoding = lookupEncoding(self.transport_encoding), "certain"
|
465 |
+
if charEncoding[0] is not None:
|
466 |
+
return charEncoding
|
467 |
+
|
468 |
+
# Look for meta elements with encoding information
|
469 |
+
charEncoding = self.detectEncodingMeta(), "tentative"
|
470 |
+
if charEncoding[0] is not None:
|
471 |
+
return charEncoding
|
472 |
+
|
473 |
+
# Parent document encoding
|
474 |
+
charEncoding = lookupEncoding(self.same_origin_parent_encoding), "tentative"
|
475 |
+
if charEncoding[0] is not None and not charEncoding[0].name.startswith("utf-16"):
|
476 |
+
return charEncoding
|
477 |
+
|
478 |
+
# "likely" encoding
|
479 |
+
charEncoding = lookupEncoding(self.likely_encoding), "tentative"
|
480 |
+
if charEncoding[0] is not None:
|
481 |
+
return charEncoding
|
482 |
+
|
483 |
+
# Guess with chardet, if available
|
484 |
+
if chardet:
|
485 |
+
try:
|
486 |
+
from pip._vendor.chardet.universaldetector import UniversalDetector
|
487 |
+
except ImportError:
|
488 |
+
pass
|
489 |
+
else:
|
490 |
+
buffers = []
|
491 |
+
detector = UniversalDetector()
|
492 |
+
while not detector.done:
|
493 |
+
buffer = self.rawStream.read(self.numBytesChardet)
|
494 |
+
assert isinstance(buffer, bytes)
|
495 |
+
if not buffer:
|
496 |
+
break
|
497 |
+
buffers.append(buffer)
|
498 |
+
detector.feed(buffer)
|
499 |
+
detector.close()
|
500 |
+
encoding = lookupEncoding(detector.result['encoding'])
|
501 |
+
self.rawStream.seek(0)
|
502 |
+
if encoding is not None:
|
503 |
+
return encoding, "tentative"
|
504 |
+
|
505 |
+
# Try the default encoding
|
506 |
+
charEncoding = lookupEncoding(self.default_encoding), "tentative"
|
507 |
+
if charEncoding[0] is not None:
|
508 |
+
return charEncoding
|
509 |
+
|
510 |
+
# Fallback to html5lib's default if even that hasn't worked
|
511 |
+
return lookupEncoding("windows-1252"), "tentative"
|
512 |
+
|
513 |
+
def changeEncoding(self, newEncoding):
|
514 |
+
assert self.charEncoding[1] != "certain"
|
515 |
+
newEncoding = lookupEncoding(newEncoding)
|
516 |
+
if newEncoding is None:
|
517 |
+
return
|
518 |
+
if newEncoding.name in ("utf-16be", "utf-16le"):
|
519 |
+
newEncoding = lookupEncoding("utf-8")
|
520 |
+
assert newEncoding is not None
|
521 |
+
elif newEncoding == self.charEncoding[0]:
|
522 |
+
self.charEncoding = (self.charEncoding[0], "certain")
|
523 |
+
else:
|
524 |
+
self.rawStream.seek(0)
|
525 |
+
self.charEncoding = (newEncoding, "certain")
|
526 |
+
self.reset()
|
527 |
+
raise _ReparseException("Encoding changed from %s to %s" % (self.charEncoding[0], newEncoding))
|
528 |
+
|
529 |
+
def detectBOM(self):
|
530 |
+
"""Attempts to detect at BOM at the start of the stream. If
|
531 |
+
an encoding can be determined from the BOM return the name of the
|
532 |
+
encoding otherwise return None"""
|
533 |
+
bomDict = {
|
534 |
+
codecs.BOM_UTF8: 'utf-8',
|
535 |
+
codecs.BOM_UTF16_LE: 'utf-16le', codecs.BOM_UTF16_BE: 'utf-16be',
|
536 |
+
codecs.BOM_UTF32_LE: 'utf-32le', codecs.BOM_UTF32_BE: 'utf-32be'
|
537 |
+
}
|
538 |
+
|
539 |
+
# Go to beginning of file and read in 4 bytes
|
540 |
+
string = self.rawStream.read(4)
|
541 |
+
assert isinstance(string, bytes)
|
542 |
+
|
543 |
+
# Try detecting the BOM using bytes from the string
|
544 |
+
encoding = bomDict.get(string[:3]) # UTF-8
|
545 |
+
seek = 3
|
546 |
+
if not encoding:
|
547 |
+
# Need to detect UTF-32 before UTF-16
|
548 |
+
encoding = bomDict.get(string) # UTF-32
|
549 |
+
seek = 4
|
550 |
+
if not encoding:
|
551 |
+
encoding = bomDict.get(string[:2]) # UTF-16
|
552 |
+
seek = 2
|
553 |
+
|
554 |
+
# Set the read position past the BOM if one was found, otherwise
|
555 |
+
# set it to the start of the stream
|
556 |
+
if encoding:
|
557 |
+
self.rawStream.seek(seek)
|
558 |
+
return lookupEncoding(encoding)
|
559 |
+
else:
|
560 |
+
self.rawStream.seek(0)
|
561 |
+
return None
|
562 |
+
|
563 |
+
def detectEncodingMeta(self):
|
564 |
+
"""Report the encoding declared by the meta element
|
565 |
+
"""
|
566 |
+
buffer = self.rawStream.read(self.numBytesMeta)
|
567 |
+
assert isinstance(buffer, bytes)
|
568 |
+
parser = EncodingParser(buffer)
|
569 |
+
self.rawStream.seek(0)
|
570 |
+
encoding = parser.getEncoding()
|
571 |
+
|
572 |
+
if encoding is not None and encoding.name in ("utf-16be", "utf-16le"):
|
573 |
+
encoding = lookupEncoding("utf-8")
|
574 |
+
|
575 |
+
return encoding
|
576 |
+
|
577 |
+
|
578 |
+
class EncodingBytes(bytes):
|
579 |
+
"""String-like object with an associated position and various extra methods
|
580 |
+
If the position is ever greater than the string length then an exception is
|
581 |
+
raised"""
|
582 |
+
def __new__(self, value):
|
583 |
+
assert isinstance(value, bytes)
|
584 |
+
return bytes.__new__(self, value.lower())
|
585 |
+
|
586 |
+
def __init__(self, value):
|
587 |
+
# pylint:disable=unused-argument
|
588 |
+
self._position = -1
|
589 |
+
|
590 |
+
def __iter__(self):
|
591 |
+
return self
|
592 |
+
|
593 |
+
def __next__(self):
|
594 |
+
p = self._position = self._position + 1
|
595 |
+
if p >= len(self):
|
596 |
+
raise StopIteration
|
597 |
+
elif p < 0:
|
598 |
+
raise TypeError
|
599 |
+
return self[p:p + 1]
|
600 |
+
|
601 |
+
def next(self):
|
602 |
+
# Py2 compat
|
603 |
+
return self.__next__()
|
604 |
+
|
605 |
+
def previous(self):
|
606 |
+
p = self._position
|
607 |
+
if p >= len(self):
|
608 |
+
raise StopIteration
|
609 |
+
elif p < 0:
|
610 |
+
raise TypeError
|
611 |
+
self._position = p = p - 1
|
612 |
+
return self[p:p + 1]
|
613 |
+
|
614 |
+
def setPosition(self, position):
|
615 |
+
if self._position >= len(self):
|
616 |
+
raise StopIteration
|
617 |
+
self._position = position
|
618 |
+
|
619 |
+
def getPosition(self):
|
620 |
+
if self._position >= len(self):
|
621 |
+
raise StopIteration
|
622 |
+
if self._position >= 0:
|
623 |
+
return self._position
|
624 |
+
else:
|
625 |
+
return None
|
626 |
+
|
627 |
+
position = property(getPosition, setPosition)
|
628 |
+
|
629 |
+
def getCurrentByte(self):
|
630 |
+
return self[self.position:self.position + 1]
|
631 |
+
|
632 |
+
currentByte = property(getCurrentByte)
|
633 |
+
|
634 |
+
def skip(self, chars=spaceCharactersBytes):
|
635 |
+
"""Skip past a list of characters"""
|
636 |
+
p = self.position # use property for the error-checking
|
637 |
+
while p < len(self):
|
638 |
+
c = self[p:p + 1]
|
639 |
+
if c not in chars:
|
640 |
+
self._position = p
|
641 |
+
return c
|
642 |
+
p += 1
|
643 |
+
self._position = p
|
644 |
+
return None
|
645 |
+
|
646 |
+
def skipUntil(self, chars):
|
647 |
+
p = self.position
|
648 |
+
while p < len(self):
|
649 |
+
c = self[p:p + 1]
|
650 |
+
if c in chars:
|
651 |
+
self._position = p
|
652 |
+
return c
|
653 |
+
p += 1
|
654 |
+
self._position = p
|
655 |
+
return None
|
656 |
+
|
657 |
+
def matchBytes(self, bytes):
|
658 |
+
"""Look for a sequence of bytes at the start of a string. If the bytes
|
659 |
+
are found return True and advance the position to the byte after the
|
660 |
+
match. Otherwise return False and leave the position alone"""
|
661 |
+
rv = self.startswith(bytes, self.position)
|
662 |
+
if rv:
|
663 |
+
self.position += len(bytes)
|
664 |
+
return rv
|
665 |
+
|
666 |
+
def jumpTo(self, bytes):
|
667 |
+
"""Look for the next sequence of bytes matching a given sequence. If
|
668 |
+
a match is found advance the position to the last byte of the match"""
|
669 |
+
try:
|
670 |
+
self._position = self.index(bytes, self.position) + len(bytes) - 1
|
671 |
+
except ValueError:
|
672 |
+
raise StopIteration
|
673 |
+
return True
|
674 |
+
|
675 |
+
|
676 |
+
class EncodingParser(object):
|
677 |
+
"""Mini parser for detecting character encoding from meta elements"""
|
678 |
+
|
679 |
+
def __init__(self, data):
|
680 |
+
"""string - the data to work on for encoding detection"""
|
681 |
+
self.data = EncodingBytes(data)
|
682 |
+
self.encoding = None
|
683 |
+
|
684 |
+
def getEncoding(self):
|
685 |
+
if b"<meta" not in self.data:
|
686 |
+
return None
|
687 |
+
|
688 |
+
methodDispatch = (
|
689 |
+
(b"<!--", self.handleComment),
|
690 |
+
(b"<meta", self.handleMeta),
|
691 |
+
(b"</", self.handlePossibleEndTag),
|
692 |
+
(b"<!", self.handleOther),
|
693 |
+
(b"<?", self.handleOther),
|
694 |
+
(b"<", self.handlePossibleStartTag))
|
695 |
+
for _ in self.data:
|
696 |
+
keepParsing = True
|
697 |
+
try:
|
698 |
+
self.data.jumpTo(b"<")
|
699 |
+
except StopIteration:
|
700 |
+
break
|
701 |
+
for key, method in methodDispatch:
|
702 |
+
if self.data.matchBytes(key):
|
703 |
+
try:
|
704 |
+
keepParsing = method()
|
705 |
+
break
|
706 |
+
except StopIteration:
|
707 |
+
keepParsing = False
|
708 |
+
break
|
709 |
+
if not keepParsing:
|
710 |
+
break
|
711 |
+
|
712 |
+
return self.encoding
|
713 |
+
|
714 |
+
def handleComment(self):
|
715 |
+
"""Skip over comments"""
|
716 |
+
return self.data.jumpTo(b"-->")
|
717 |
+
|
718 |
+
def handleMeta(self):
|
719 |
+
if self.data.currentByte not in spaceCharactersBytes:
|
720 |
+
# if we have <meta not followed by a space so just keep going
|
721 |
+
return True
|
722 |
+
# We have a valid meta element we want to search for attributes
|
723 |
+
hasPragma = False
|
724 |
+
pendingEncoding = None
|
725 |
+
while True:
|
726 |
+
# Try to find the next attribute after the current position
|
727 |
+
attr = self.getAttribute()
|
728 |
+
if attr is None:
|
729 |
+
return True
|
730 |
+
else:
|
731 |
+
if attr[0] == b"http-equiv":
|
732 |
+
hasPragma = attr[1] == b"content-type"
|
733 |
+
if hasPragma and pendingEncoding is not None:
|
734 |
+
self.encoding = pendingEncoding
|
735 |
+
return False
|
736 |
+
elif attr[0] == b"charset":
|
737 |
+
tentativeEncoding = attr[1]
|
738 |
+
codec = lookupEncoding(tentativeEncoding)
|
739 |
+
if codec is not None:
|
740 |
+
self.encoding = codec
|
741 |
+
return False
|
742 |
+
elif attr[0] == b"content":
|
743 |
+
contentParser = ContentAttrParser(EncodingBytes(attr[1]))
|
744 |
+
tentativeEncoding = contentParser.parse()
|
745 |
+
if tentativeEncoding is not None:
|
746 |
+
codec = lookupEncoding(tentativeEncoding)
|
747 |
+
if codec is not None:
|
748 |
+
if hasPragma:
|
749 |
+
self.encoding = codec
|
750 |
+
return False
|
751 |
+
else:
|
752 |
+
pendingEncoding = codec
|
753 |
+
|
754 |
+
def handlePossibleStartTag(self):
|
755 |
+
return self.handlePossibleTag(False)
|
756 |
+
|
757 |
+
def handlePossibleEndTag(self):
|
758 |
+
next(self.data)
|
759 |
+
return self.handlePossibleTag(True)
|
760 |
+
|
761 |
+
def handlePossibleTag(self, endTag):
|
762 |
+
data = self.data
|
763 |
+
if data.currentByte not in asciiLettersBytes:
|
764 |
+
# If the next byte is not an ascii letter either ignore this
|
765 |
+
# fragment (possible start tag case) or treat it according to
|
766 |
+
# handleOther
|
767 |
+
if endTag:
|
768 |
+
data.previous()
|
769 |
+
self.handleOther()
|
770 |
+
return True
|
771 |
+
|
772 |
+
c = data.skipUntil(spacesAngleBrackets)
|
773 |
+
if c == b"<":
|
774 |
+
# return to the first step in the overall "two step" algorithm
|
775 |
+
# reprocessing the < byte
|
776 |
+
data.previous()
|
777 |
+
else:
|
778 |
+
# Read all attributes
|
779 |
+
attr = self.getAttribute()
|
780 |
+
while attr is not None:
|
781 |
+
attr = self.getAttribute()
|
782 |
+
return True
|
783 |
+
|
784 |
+
def handleOther(self):
|
785 |
+
return self.data.jumpTo(b">")
|
786 |
+
|
787 |
+
def getAttribute(self):
|
788 |
+
"""Return a name,value pair for the next attribute in the stream,
|
789 |
+
if one is found, or None"""
|
790 |
+
data = self.data
|
791 |
+
# Step 1 (skip chars)
|
792 |
+
c = data.skip(spaceCharactersBytes | frozenset([b"/"]))
|
793 |
+
assert c is None or len(c) == 1
|
794 |
+
# Step 2
|
795 |
+
if c in (b">", None):
|
796 |
+
return None
|
797 |
+
# Step 3
|
798 |
+
attrName = []
|
799 |
+
attrValue = []
|
800 |
+
# Step 4 attribute name
|
801 |
+
while True:
|
802 |
+
if c == b"=" and attrName:
|
803 |
+
break
|
804 |
+
elif c in spaceCharactersBytes:
|
805 |
+
# Step 6!
|
806 |
+
c = data.skip()
|
807 |
+
break
|
808 |
+
elif c in (b"/", b">"):
|
809 |
+
return b"".join(attrName), b""
|
810 |
+
elif c in asciiUppercaseBytes:
|
811 |
+
attrName.append(c.lower())
|
812 |
+
elif c is None:
|
813 |
+
return None
|
814 |
+
else:
|
815 |
+
attrName.append(c)
|
816 |
+
# Step 5
|
817 |
+
c = next(data)
|
818 |
+
# Step 7
|
819 |
+
if c != b"=":
|
820 |
+
data.previous()
|
821 |
+
return b"".join(attrName), b""
|
822 |
+
# Step 8
|
823 |
+
next(data)
|
824 |
+
# Step 9
|
825 |
+
c = data.skip()
|
826 |
+
# Step 10
|
827 |
+
if c in (b"'", b'"'):
|
828 |
+
# 10.1
|
829 |
+
quoteChar = c
|
830 |
+
while True:
|
831 |
+
# 10.2
|
832 |
+
c = next(data)
|
833 |
+
# 10.3
|
834 |
+
if c == quoteChar:
|
835 |
+
next(data)
|
836 |
+
return b"".join(attrName), b"".join(attrValue)
|
837 |
+
# 10.4
|
838 |
+
elif c in asciiUppercaseBytes:
|
839 |
+
attrValue.append(c.lower())
|
840 |
+
# 10.5
|
841 |
+
else:
|
842 |
+
attrValue.append(c)
|
843 |
+
elif c == b">":
|
844 |
+
return b"".join(attrName), b""
|
845 |
+
elif c in asciiUppercaseBytes:
|
846 |
+
attrValue.append(c.lower())
|
847 |
+
elif c is None:
|
848 |
+
return None
|
849 |
+
else:
|
850 |
+
attrValue.append(c)
|
851 |
+
# Step 11
|
852 |
+
while True:
|
853 |
+
c = next(data)
|
854 |
+
if c in spacesAngleBrackets:
|
855 |
+
return b"".join(attrName), b"".join(attrValue)
|
856 |
+
elif c in asciiUppercaseBytes:
|
857 |
+
attrValue.append(c.lower())
|
858 |
+
elif c is None:
|
859 |
+
return None
|
860 |
+
else:
|
861 |
+
attrValue.append(c)
|
862 |
+
|
863 |
+
|
864 |
+
class ContentAttrParser(object):
|
865 |
+
def __init__(self, data):
|
866 |
+
assert isinstance(data, bytes)
|
867 |
+
self.data = data
|
868 |
+
|
869 |
+
def parse(self):
|
870 |
+
try:
|
871 |
+
# Check if the attr name is charset
|
872 |
+
# otherwise return
|
873 |
+
self.data.jumpTo(b"charset")
|
874 |
+
self.data.position += 1
|
875 |
+
self.data.skip()
|
876 |
+
if not self.data.currentByte == b"=":
|
877 |
+
# If there is no = sign keep looking for attrs
|
878 |
+
return None
|
879 |
+
self.data.position += 1
|
880 |
+
self.data.skip()
|
881 |
+
# Look for an encoding between matching quote marks
|
882 |
+
if self.data.currentByte in (b'"', b"'"):
|
883 |
+
quoteMark = self.data.currentByte
|
884 |
+
self.data.position += 1
|
885 |
+
oldPosition = self.data.position
|
886 |
+
if self.data.jumpTo(quoteMark):
|
887 |
+
return self.data[oldPosition:self.data.position]
|
888 |
+
else:
|
889 |
+
return None
|
890 |
+
else:
|
891 |
+
# Unquoted value
|
892 |
+
oldPosition = self.data.position
|
893 |
+
try:
|
894 |
+
self.data.skipUntil(spaceCharactersBytes)
|
895 |
+
return self.data[oldPosition:self.data.position]
|
896 |
+
except StopIteration:
|
897 |
+
# Return the whole remaining value
|
898 |
+
return self.data[oldPosition:]
|
899 |
+
except StopIteration:
|
900 |
+
return None
|
901 |
+
|
902 |
+
|
903 |
+
def lookupEncoding(encoding):
|
904 |
+
"""Return the python codec name corresponding to an encoding or None if the
|
905 |
+
string doesn't correspond to a valid encoding."""
|
906 |
+
if isinstance(encoding, bytes):
|
907 |
+
try:
|
908 |
+
encoding = encoding.decode("ascii")
|
909 |
+
except UnicodeDecodeError:
|
910 |
+
return None
|
911 |
+
|
912 |
+
if encoding is not None:
|
913 |
+
try:
|
914 |
+
return webencodings.lookup(encoding)
|
915 |
+
except AttributeError:
|
916 |
+
return None
|
917 |
+
else:
|
918 |
+
return None
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/_tokenizer.py
ADDED
@@ -0,0 +1,1735 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import absolute_import, division, unicode_literals
|
2 |
+
|
3 |
+
from pip._vendor.six import unichr as chr
|
4 |
+
|
5 |
+
from collections import deque, OrderedDict
|
6 |
+
from sys import version_info
|
7 |
+
|
8 |
+
from .constants import spaceCharacters
|
9 |
+
from .constants import entities
|
10 |
+
from .constants import asciiLetters, asciiUpper2Lower
|
11 |
+
from .constants import digits, hexDigits, EOF
|
12 |
+
from .constants import tokenTypes, tagTokenTypes
|
13 |
+
from .constants import replacementCharacters
|
14 |
+
|
15 |
+
from ._inputstream import HTMLInputStream
|
16 |
+
|
17 |
+
from ._trie import Trie
|
18 |
+
|
19 |
+
entitiesTrie = Trie(entities)
|
20 |
+
|
21 |
+
if version_info >= (3, 7):
|
22 |
+
attributeMap = dict
|
23 |
+
else:
|
24 |
+
attributeMap = OrderedDict
|
25 |
+
|
26 |
+
|
27 |
+
class HTMLTokenizer(object):
|
28 |
+
""" This class takes care of tokenizing HTML.
|
29 |
+
|
30 |
+
* self.currentToken
|
31 |
+
Holds the token that is currently being processed.
|
32 |
+
|
33 |
+
* self.state
|
34 |
+
Holds a reference to the method to be invoked... XXX
|
35 |
+
|
36 |
+
* self.stream
|
37 |
+
Points to HTMLInputStream object.
|
38 |
+
"""
|
39 |
+
|
40 |
+
def __init__(self, stream, parser=None, **kwargs):
|
41 |
+
|
42 |
+
self.stream = HTMLInputStream(stream, **kwargs)
|
43 |
+
self.parser = parser
|
44 |
+
|
45 |
+
# Setup the initial tokenizer state
|
46 |
+
self.escapeFlag = False
|
47 |
+
self.lastFourChars = []
|
48 |
+
self.state = self.dataState
|
49 |
+
self.escape = False
|
50 |
+
|
51 |
+
# The current token being created
|
52 |
+
self.currentToken = None
|
53 |
+
super(HTMLTokenizer, self).__init__()
|
54 |
+
|
55 |
+
def __iter__(self):
|
56 |
+
""" This is where the magic happens.
|
57 |
+
|
58 |
+
We do our usually processing through the states and when we have a token
|
59 |
+
to return we yield the token which pauses processing until the next token
|
60 |
+
is requested.
|
61 |
+
"""
|
62 |
+
self.tokenQueue = deque([])
|
63 |
+
# Start processing. When EOF is reached self.state will return False
|
64 |
+
# instead of True and the loop will terminate.
|
65 |
+
while self.state():
|
66 |
+
while self.stream.errors:
|
67 |
+
yield {"type": tokenTypes["ParseError"], "data": self.stream.errors.pop(0)}
|
68 |
+
while self.tokenQueue:
|
69 |
+
yield self.tokenQueue.popleft()
|
70 |
+
|
71 |
+
def consumeNumberEntity(self, isHex):
|
72 |
+
"""This function returns either U+FFFD or the character based on the
|
73 |
+
decimal or hexadecimal representation. It also discards ";" if present.
|
74 |
+
If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked.
|
75 |
+
"""
|
76 |
+
|
77 |
+
allowed = digits
|
78 |
+
radix = 10
|
79 |
+
if isHex:
|
80 |
+
allowed = hexDigits
|
81 |
+
radix = 16
|
82 |
+
|
83 |
+
charStack = []
|
84 |
+
|
85 |
+
# Consume all the characters that are in range while making sure we
|
86 |
+
# don't hit an EOF.
|
87 |
+
c = self.stream.char()
|
88 |
+
while c in allowed and c is not EOF:
|
89 |
+
charStack.append(c)
|
90 |
+
c = self.stream.char()
|
91 |
+
|
92 |
+
# Convert the set of characters consumed to an int.
|
93 |
+
charAsInt = int("".join(charStack), radix)
|
94 |
+
|
95 |
+
# Certain characters get replaced with others
|
96 |
+
if charAsInt in replacementCharacters:
|
97 |
+
char = replacementCharacters[charAsInt]
|
98 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
99 |
+
"illegal-codepoint-for-numeric-entity",
|
100 |
+
"datavars": {"charAsInt": charAsInt}})
|
101 |
+
elif ((0xD800 <= charAsInt <= 0xDFFF) or
|
102 |
+
(charAsInt > 0x10FFFF)):
|
103 |
+
char = "\uFFFD"
|
104 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
105 |
+
"illegal-codepoint-for-numeric-entity",
|
106 |
+
"datavars": {"charAsInt": charAsInt}})
|
107 |
+
else:
|
108 |
+
# Should speed up this check somehow (e.g. move the set to a constant)
|
109 |
+
if ((0x0001 <= charAsInt <= 0x0008) or
|
110 |
+
(0x000E <= charAsInt <= 0x001F) or
|
111 |
+
(0x007F <= charAsInt <= 0x009F) or
|
112 |
+
(0xFDD0 <= charAsInt <= 0xFDEF) or
|
113 |
+
charAsInt in frozenset([0x000B, 0xFFFE, 0xFFFF, 0x1FFFE,
|
114 |
+
0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
|
115 |
+
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE,
|
116 |
+
0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE,
|
117 |
+
0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE,
|
118 |
+
0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE,
|
119 |
+
0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
|
120 |
+
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE,
|
121 |
+
0xFFFFF, 0x10FFFE, 0x10FFFF])):
|
122 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
123 |
+
"data":
|
124 |
+
"illegal-codepoint-for-numeric-entity",
|
125 |
+
"datavars": {"charAsInt": charAsInt}})
|
126 |
+
try:
|
127 |
+
# Try/except needed as UCS-2 Python builds' unichar only works
|
128 |
+
# within the BMP.
|
129 |
+
char = chr(charAsInt)
|
130 |
+
except ValueError:
|
131 |
+
v = charAsInt - 0x10000
|
132 |
+
char = chr(0xD800 | (v >> 10)) + chr(0xDC00 | (v & 0x3FF))
|
133 |
+
|
134 |
+
# Discard the ; if present. Otherwise, put it back on the queue and
|
135 |
+
# invoke parseError on parser.
|
136 |
+
if c != ";":
|
137 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
138 |
+
"numeric-entity-without-semicolon"})
|
139 |
+
self.stream.unget(c)
|
140 |
+
|
141 |
+
return char
|
142 |
+
|
143 |
+
def consumeEntity(self, allowedChar=None, fromAttribute=False):
|
144 |
+
# Initialise to the default output for when no entity is matched
|
145 |
+
output = "&"
|
146 |
+
|
147 |
+
charStack = [self.stream.char()]
|
148 |
+
if (charStack[0] in spaceCharacters or charStack[0] in (EOF, "<", "&") or
|
149 |
+
(allowedChar is not None and allowedChar == charStack[0])):
|
150 |
+
self.stream.unget(charStack[0])
|
151 |
+
|
152 |
+
elif charStack[0] == "#":
|
153 |
+
# Read the next character to see if it's hex or decimal
|
154 |
+
hex = False
|
155 |
+
charStack.append(self.stream.char())
|
156 |
+
if charStack[-1] in ("x", "X"):
|
157 |
+
hex = True
|
158 |
+
charStack.append(self.stream.char())
|
159 |
+
|
160 |
+
# charStack[-1] should be the first digit
|
161 |
+
if (hex and charStack[-1] in hexDigits) \
|
162 |
+
or (not hex and charStack[-1] in digits):
|
163 |
+
# At least one digit found, so consume the whole number
|
164 |
+
self.stream.unget(charStack[-1])
|
165 |
+
output = self.consumeNumberEntity(hex)
|
166 |
+
else:
|
167 |
+
# No digits found
|
168 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
169 |
+
"data": "expected-numeric-entity"})
|
170 |
+
self.stream.unget(charStack.pop())
|
171 |
+
output = "&" + "".join(charStack)
|
172 |
+
|
173 |
+
else:
|
174 |
+
# At this point in the process might have named entity. Entities
|
175 |
+
# are stored in the global variable "entities".
|
176 |
+
#
|
177 |
+
# Consume characters and compare to these to a substring of the
|
178 |
+
# entity names in the list until the substring no longer matches.
|
179 |
+
while (charStack[-1] is not EOF):
|
180 |
+
if not entitiesTrie.has_keys_with_prefix("".join(charStack)):
|
181 |
+
break
|
182 |
+
charStack.append(self.stream.char())
|
183 |
+
|
184 |
+
# At this point we have a string that starts with some characters
|
185 |
+
# that may match an entity
|
186 |
+
# Try to find the longest entity the string will match to take care
|
187 |
+
# of ¬i for instance.
|
188 |
+
try:
|
189 |
+
entityName = entitiesTrie.longest_prefix("".join(charStack[:-1]))
|
190 |
+
entityLength = len(entityName)
|
191 |
+
except KeyError:
|
192 |
+
entityName = None
|
193 |
+
|
194 |
+
if entityName is not None:
|
195 |
+
if entityName[-1] != ";":
|
196 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
197 |
+
"named-entity-without-semicolon"})
|
198 |
+
if (entityName[-1] != ";" and fromAttribute and
|
199 |
+
(charStack[entityLength] in asciiLetters or
|
200 |
+
charStack[entityLength] in digits or
|
201 |
+
charStack[entityLength] == "=")):
|
202 |
+
self.stream.unget(charStack.pop())
|
203 |
+
output = "&" + "".join(charStack)
|
204 |
+
else:
|
205 |
+
output = entities[entityName]
|
206 |
+
self.stream.unget(charStack.pop())
|
207 |
+
output += "".join(charStack[entityLength:])
|
208 |
+
else:
|
209 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
210 |
+
"expected-named-entity"})
|
211 |
+
self.stream.unget(charStack.pop())
|
212 |
+
output = "&" + "".join(charStack)
|
213 |
+
|
214 |
+
if fromAttribute:
|
215 |
+
self.currentToken["data"][-1][1] += output
|
216 |
+
else:
|
217 |
+
if output in spaceCharacters:
|
218 |
+
tokenType = "SpaceCharacters"
|
219 |
+
else:
|
220 |
+
tokenType = "Characters"
|
221 |
+
self.tokenQueue.append({"type": tokenTypes[tokenType], "data": output})
|
222 |
+
|
223 |
+
def processEntityInAttribute(self, allowedChar):
|
224 |
+
"""This method replaces the need for "entityInAttributeValueState".
|
225 |
+
"""
|
226 |
+
self.consumeEntity(allowedChar=allowedChar, fromAttribute=True)
|
227 |
+
|
228 |
+
def emitCurrentToken(self):
|
229 |
+
"""This method is a generic handler for emitting the tags. It also sets
|
230 |
+
the state to "data" because that's what's needed after a token has been
|
231 |
+
emitted.
|
232 |
+
"""
|
233 |
+
token = self.currentToken
|
234 |
+
# Add token to the queue to be yielded
|
235 |
+
if (token["type"] in tagTokenTypes):
|
236 |
+
token["name"] = token["name"].translate(asciiUpper2Lower)
|
237 |
+
if token["type"] == tokenTypes["StartTag"]:
|
238 |
+
raw = token["data"]
|
239 |
+
data = attributeMap(raw)
|
240 |
+
if len(raw) > len(data):
|
241 |
+
# we had some duplicated attribute, fix so first wins
|
242 |
+
data.update(raw[::-1])
|
243 |
+
token["data"] = data
|
244 |
+
|
245 |
+
if token["type"] == tokenTypes["EndTag"]:
|
246 |
+
if token["data"]:
|
247 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
248 |
+
"data": "attributes-in-end-tag"})
|
249 |
+
if token["selfClosing"]:
|
250 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
251 |
+
"data": "self-closing-flag-on-end-tag"})
|
252 |
+
self.tokenQueue.append(token)
|
253 |
+
self.state = self.dataState
|
254 |
+
|
255 |
+
# Below are the various tokenizer states worked out.
|
256 |
+
def dataState(self):
|
257 |
+
data = self.stream.char()
|
258 |
+
if data == "&":
|
259 |
+
self.state = self.entityDataState
|
260 |
+
elif data == "<":
|
261 |
+
self.state = self.tagOpenState
|
262 |
+
elif data == "\u0000":
|
263 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
264 |
+
"data": "invalid-codepoint"})
|
265 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"],
|
266 |
+
"data": "\u0000"})
|
267 |
+
elif data is EOF:
|
268 |
+
# Tokenization ends.
|
269 |
+
return False
|
270 |
+
elif data in spaceCharacters:
|
271 |
+
# Directly after emitting a token you switch back to the "data
|
272 |
+
# state". At that point spaceCharacters are important so they are
|
273 |
+
# emitted separately.
|
274 |
+
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
|
275 |
+
data + self.stream.charsUntil(spaceCharacters, True)})
|
276 |
+
# No need to update lastFourChars here, since the first space will
|
277 |
+
# have already been appended to lastFourChars and will have broken
|
278 |
+
# any <!-- or --> sequences
|
279 |
+
else:
|
280 |
+
chars = self.stream.charsUntil(("&", "<", "\u0000"))
|
281 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
|
282 |
+
data + chars})
|
283 |
+
return True
|
284 |
+
|
285 |
+
def entityDataState(self):
|
286 |
+
self.consumeEntity()
|
287 |
+
self.state = self.dataState
|
288 |
+
return True
|
289 |
+
|
290 |
+
def rcdataState(self):
|
291 |
+
data = self.stream.char()
|
292 |
+
if data == "&":
|
293 |
+
self.state = self.characterReferenceInRcdata
|
294 |
+
elif data == "<":
|
295 |
+
self.state = self.rcdataLessThanSignState
|
296 |
+
elif data == EOF:
|
297 |
+
# Tokenization ends.
|
298 |
+
return False
|
299 |
+
elif data == "\u0000":
|
300 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
301 |
+
"data": "invalid-codepoint"})
|
302 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"],
|
303 |
+
"data": "\uFFFD"})
|
304 |
+
elif data in spaceCharacters:
|
305 |
+
# Directly after emitting a token you switch back to the "data
|
306 |
+
# state". At that point spaceCharacters are important so they are
|
307 |
+
# emitted separately.
|
308 |
+
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
|
309 |
+
data + self.stream.charsUntil(spaceCharacters, True)})
|
310 |
+
# No need to update lastFourChars here, since the first space will
|
311 |
+
# have already been appended to lastFourChars and will have broken
|
312 |
+
# any <!-- or --> sequences
|
313 |
+
else:
|
314 |
+
chars = self.stream.charsUntil(("&", "<", "\u0000"))
|
315 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
|
316 |
+
data + chars})
|
317 |
+
return True
|
318 |
+
|
319 |
+
def characterReferenceInRcdata(self):
|
320 |
+
self.consumeEntity()
|
321 |
+
self.state = self.rcdataState
|
322 |
+
return True
|
323 |
+
|
324 |
+
def rawtextState(self):
|
325 |
+
data = self.stream.char()
|
326 |
+
if data == "<":
|
327 |
+
self.state = self.rawtextLessThanSignState
|
328 |
+
elif data == "\u0000":
|
329 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
330 |
+
"data": "invalid-codepoint"})
|
331 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"],
|
332 |
+
"data": "\uFFFD"})
|
333 |
+
elif data == EOF:
|
334 |
+
# Tokenization ends.
|
335 |
+
return False
|
336 |
+
else:
|
337 |
+
chars = self.stream.charsUntil(("<", "\u0000"))
|
338 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
|
339 |
+
data + chars})
|
340 |
+
return True
|
341 |
+
|
342 |
+
def scriptDataState(self):
|
343 |
+
data = self.stream.char()
|
344 |
+
if data == "<":
|
345 |
+
self.state = self.scriptDataLessThanSignState
|
346 |
+
elif data == "\u0000":
|
347 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
348 |
+
"data": "invalid-codepoint"})
|
349 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"],
|
350 |
+
"data": "\uFFFD"})
|
351 |
+
elif data == EOF:
|
352 |
+
# Tokenization ends.
|
353 |
+
return False
|
354 |
+
else:
|
355 |
+
chars = self.stream.charsUntil(("<", "\u0000"))
|
356 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
|
357 |
+
data + chars})
|
358 |
+
return True
|
359 |
+
|
360 |
+
def plaintextState(self):
|
361 |
+
data = self.stream.char()
|
362 |
+
if data == EOF:
|
363 |
+
# Tokenization ends.
|
364 |
+
return False
|
365 |
+
elif data == "\u0000":
|
366 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
367 |
+
"data": "invalid-codepoint"})
|
368 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"],
|
369 |
+
"data": "\uFFFD"})
|
370 |
+
else:
|
371 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
|
372 |
+
data + self.stream.charsUntil("\u0000")})
|
373 |
+
return True
|
374 |
+
|
375 |
+
def tagOpenState(self):
|
376 |
+
data = self.stream.char()
|
377 |
+
if data == "!":
|
378 |
+
self.state = self.markupDeclarationOpenState
|
379 |
+
elif data == "/":
|
380 |
+
self.state = self.closeTagOpenState
|
381 |
+
elif data in asciiLetters:
|
382 |
+
self.currentToken = {"type": tokenTypes["StartTag"],
|
383 |
+
"name": data, "data": [],
|
384 |
+
"selfClosing": False,
|
385 |
+
"selfClosingAcknowledged": False}
|
386 |
+
self.state = self.tagNameState
|
387 |
+
elif data == ">":
|
388 |
+
# XXX In theory it could be something besides a tag name. But
|
389 |
+
# do we really care?
|
390 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
391 |
+
"expected-tag-name-but-got-right-bracket"})
|
392 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<>"})
|
393 |
+
self.state = self.dataState
|
394 |
+
elif data == "?":
|
395 |
+
# XXX In theory it could be something besides a tag name. But
|
396 |
+
# do we really care?
|
397 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
398 |
+
"expected-tag-name-but-got-question-mark"})
|
399 |
+
self.stream.unget(data)
|
400 |
+
self.state = self.bogusCommentState
|
401 |
+
else:
|
402 |
+
# XXX
|
403 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
404 |
+
"expected-tag-name"})
|
405 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
|
406 |
+
self.stream.unget(data)
|
407 |
+
self.state = self.dataState
|
408 |
+
return True
|
409 |
+
|
410 |
+
def closeTagOpenState(self):
|
411 |
+
data = self.stream.char()
|
412 |
+
if data in asciiLetters:
|
413 |
+
self.currentToken = {"type": tokenTypes["EndTag"], "name": data,
|
414 |
+
"data": [], "selfClosing": False}
|
415 |
+
self.state = self.tagNameState
|
416 |
+
elif data == ">":
|
417 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
418 |
+
"expected-closing-tag-but-got-right-bracket"})
|
419 |
+
self.state = self.dataState
|
420 |
+
elif data is EOF:
|
421 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
422 |
+
"expected-closing-tag-but-got-eof"})
|
423 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
|
424 |
+
self.state = self.dataState
|
425 |
+
else:
|
426 |
+
# XXX data can be _'_...
|
427 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
428 |
+
"expected-closing-tag-but-got-char",
|
429 |
+
"datavars": {"data": data}})
|
430 |
+
self.stream.unget(data)
|
431 |
+
self.state = self.bogusCommentState
|
432 |
+
return True
|
433 |
+
|
434 |
+
def tagNameState(self):
|
435 |
+
data = self.stream.char()
|
436 |
+
if data in spaceCharacters:
|
437 |
+
self.state = self.beforeAttributeNameState
|
438 |
+
elif data == ">":
|
439 |
+
self.emitCurrentToken()
|
440 |
+
elif data is EOF:
|
441 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
442 |
+
"eof-in-tag-name"})
|
443 |
+
self.state = self.dataState
|
444 |
+
elif data == "/":
|
445 |
+
self.state = self.selfClosingStartTagState
|
446 |
+
elif data == "\u0000":
|
447 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
448 |
+
"data": "invalid-codepoint"})
|
449 |
+
self.currentToken["name"] += "\uFFFD"
|
450 |
+
else:
|
451 |
+
self.currentToken["name"] += data
|
452 |
+
# (Don't use charsUntil here, because tag names are
|
453 |
+
# very short and it's faster to not do anything fancy)
|
454 |
+
return True
|
455 |
+
|
456 |
+
def rcdataLessThanSignState(self):
|
457 |
+
data = self.stream.char()
|
458 |
+
if data == "/":
|
459 |
+
self.temporaryBuffer = ""
|
460 |
+
self.state = self.rcdataEndTagOpenState
|
461 |
+
else:
|
462 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
|
463 |
+
self.stream.unget(data)
|
464 |
+
self.state = self.rcdataState
|
465 |
+
return True
|
466 |
+
|
467 |
+
def rcdataEndTagOpenState(self):
|
468 |
+
data = self.stream.char()
|
469 |
+
if data in asciiLetters:
|
470 |
+
self.temporaryBuffer += data
|
471 |
+
self.state = self.rcdataEndTagNameState
|
472 |
+
else:
|
473 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
|
474 |
+
self.stream.unget(data)
|
475 |
+
self.state = self.rcdataState
|
476 |
+
return True
|
477 |
+
|
478 |
+
def rcdataEndTagNameState(self):
|
479 |
+
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
|
480 |
+
data = self.stream.char()
|
481 |
+
if data in spaceCharacters and appropriate:
|
482 |
+
self.currentToken = {"type": tokenTypes["EndTag"],
|
483 |
+
"name": self.temporaryBuffer,
|
484 |
+
"data": [], "selfClosing": False}
|
485 |
+
self.state = self.beforeAttributeNameState
|
486 |
+
elif data == "/" and appropriate:
|
487 |
+
self.currentToken = {"type": tokenTypes["EndTag"],
|
488 |
+
"name": self.temporaryBuffer,
|
489 |
+
"data": [], "selfClosing": False}
|
490 |
+
self.state = self.selfClosingStartTagState
|
491 |
+
elif data == ">" and appropriate:
|
492 |
+
self.currentToken = {"type": tokenTypes["EndTag"],
|
493 |
+
"name": self.temporaryBuffer,
|
494 |
+
"data": [], "selfClosing": False}
|
495 |
+
self.emitCurrentToken()
|
496 |
+
self.state = self.dataState
|
497 |
+
elif data in asciiLetters:
|
498 |
+
self.temporaryBuffer += data
|
499 |
+
else:
|
500 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"],
|
501 |
+
"data": "</" + self.temporaryBuffer})
|
502 |
+
self.stream.unget(data)
|
503 |
+
self.state = self.rcdataState
|
504 |
+
return True
|
505 |
+
|
506 |
+
def rawtextLessThanSignState(self):
|
507 |
+
data = self.stream.char()
|
508 |
+
if data == "/":
|
509 |
+
self.temporaryBuffer = ""
|
510 |
+
self.state = self.rawtextEndTagOpenState
|
511 |
+
else:
|
512 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
|
513 |
+
self.stream.unget(data)
|
514 |
+
self.state = self.rawtextState
|
515 |
+
return True
|
516 |
+
|
517 |
+
def rawtextEndTagOpenState(self):
|
518 |
+
data = self.stream.char()
|
519 |
+
if data in asciiLetters:
|
520 |
+
self.temporaryBuffer += data
|
521 |
+
self.state = self.rawtextEndTagNameState
|
522 |
+
else:
|
523 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
|
524 |
+
self.stream.unget(data)
|
525 |
+
self.state = self.rawtextState
|
526 |
+
return True
|
527 |
+
|
528 |
+
def rawtextEndTagNameState(self):
|
529 |
+
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
|
530 |
+
data = self.stream.char()
|
531 |
+
if data in spaceCharacters and appropriate:
|
532 |
+
self.currentToken = {"type": tokenTypes["EndTag"],
|
533 |
+
"name": self.temporaryBuffer,
|
534 |
+
"data": [], "selfClosing": False}
|
535 |
+
self.state = self.beforeAttributeNameState
|
536 |
+
elif data == "/" and appropriate:
|
537 |
+
self.currentToken = {"type": tokenTypes["EndTag"],
|
538 |
+
"name": self.temporaryBuffer,
|
539 |
+
"data": [], "selfClosing": False}
|
540 |
+
self.state = self.selfClosingStartTagState
|
541 |
+
elif data == ">" and appropriate:
|
542 |
+
self.currentToken = {"type": tokenTypes["EndTag"],
|
543 |
+
"name": self.temporaryBuffer,
|
544 |
+
"data": [], "selfClosing": False}
|
545 |
+
self.emitCurrentToken()
|
546 |
+
self.state = self.dataState
|
547 |
+
elif data in asciiLetters:
|
548 |
+
self.temporaryBuffer += data
|
549 |
+
else:
|
550 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"],
|
551 |
+
"data": "</" + self.temporaryBuffer})
|
552 |
+
self.stream.unget(data)
|
553 |
+
self.state = self.rawtextState
|
554 |
+
return True
|
555 |
+
|
556 |
+
def scriptDataLessThanSignState(self):
|
557 |
+
data = self.stream.char()
|
558 |
+
if data == "/":
|
559 |
+
self.temporaryBuffer = ""
|
560 |
+
self.state = self.scriptDataEndTagOpenState
|
561 |
+
elif data == "!":
|
562 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<!"})
|
563 |
+
self.state = self.scriptDataEscapeStartState
|
564 |
+
else:
|
565 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
|
566 |
+
self.stream.unget(data)
|
567 |
+
self.state = self.scriptDataState
|
568 |
+
return True
|
569 |
+
|
570 |
+
def scriptDataEndTagOpenState(self):
|
571 |
+
data = self.stream.char()
|
572 |
+
if data in asciiLetters:
|
573 |
+
self.temporaryBuffer += data
|
574 |
+
self.state = self.scriptDataEndTagNameState
|
575 |
+
else:
|
576 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
|
577 |
+
self.stream.unget(data)
|
578 |
+
self.state = self.scriptDataState
|
579 |
+
return True
|
580 |
+
|
581 |
+
def scriptDataEndTagNameState(self):
|
582 |
+
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
|
583 |
+
data = self.stream.char()
|
584 |
+
if data in spaceCharacters and appropriate:
|
585 |
+
self.currentToken = {"type": tokenTypes["EndTag"],
|
586 |
+
"name": self.temporaryBuffer,
|
587 |
+
"data": [], "selfClosing": False}
|
588 |
+
self.state = self.beforeAttributeNameState
|
589 |
+
elif data == "/" and appropriate:
|
590 |
+
self.currentToken = {"type": tokenTypes["EndTag"],
|
591 |
+
"name": self.temporaryBuffer,
|
592 |
+
"data": [], "selfClosing": False}
|
593 |
+
self.state = self.selfClosingStartTagState
|
594 |
+
elif data == ">" and appropriate:
|
595 |
+
self.currentToken = {"type": tokenTypes["EndTag"],
|
596 |
+
"name": self.temporaryBuffer,
|
597 |
+
"data": [], "selfClosing": False}
|
598 |
+
self.emitCurrentToken()
|
599 |
+
self.state = self.dataState
|
600 |
+
elif data in asciiLetters:
|
601 |
+
self.temporaryBuffer += data
|
602 |
+
else:
|
603 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"],
|
604 |
+
"data": "</" + self.temporaryBuffer})
|
605 |
+
self.stream.unget(data)
|
606 |
+
self.state = self.scriptDataState
|
607 |
+
return True
|
608 |
+
|
609 |
+
def scriptDataEscapeStartState(self):
|
610 |
+
data = self.stream.char()
|
611 |
+
if data == "-":
|
612 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
|
613 |
+
self.state = self.scriptDataEscapeStartDashState
|
614 |
+
else:
|
615 |
+
self.stream.unget(data)
|
616 |
+
self.state = self.scriptDataState
|
617 |
+
return True
|
618 |
+
|
619 |
+
def scriptDataEscapeStartDashState(self):
|
620 |
+
data = self.stream.char()
|
621 |
+
if data == "-":
|
622 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
|
623 |
+
self.state = self.scriptDataEscapedDashDashState
|
624 |
+
else:
|
625 |
+
self.stream.unget(data)
|
626 |
+
self.state = self.scriptDataState
|
627 |
+
return True
|
628 |
+
|
629 |
+
def scriptDataEscapedState(self):
|
630 |
+
data = self.stream.char()
|
631 |
+
if data == "-":
|
632 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
|
633 |
+
self.state = self.scriptDataEscapedDashState
|
634 |
+
elif data == "<":
|
635 |
+
self.state = self.scriptDataEscapedLessThanSignState
|
636 |
+
elif data == "\u0000":
|
637 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
638 |
+
"data": "invalid-codepoint"})
|
639 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"],
|
640 |
+
"data": "\uFFFD"})
|
641 |
+
elif data == EOF:
|
642 |
+
self.state = self.dataState
|
643 |
+
else:
|
644 |
+
chars = self.stream.charsUntil(("<", "-", "\u0000"))
|
645 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
|
646 |
+
data + chars})
|
647 |
+
return True
|
648 |
+
|
649 |
+
def scriptDataEscapedDashState(self):
|
650 |
+
data = self.stream.char()
|
651 |
+
if data == "-":
|
652 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
|
653 |
+
self.state = self.scriptDataEscapedDashDashState
|
654 |
+
elif data == "<":
|
655 |
+
self.state = self.scriptDataEscapedLessThanSignState
|
656 |
+
elif data == "\u0000":
|
657 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
658 |
+
"data": "invalid-codepoint"})
|
659 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"],
|
660 |
+
"data": "\uFFFD"})
|
661 |
+
self.state = self.scriptDataEscapedState
|
662 |
+
elif data == EOF:
|
663 |
+
self.state = self.dataState
|
664 |
+
else:
|
665 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
|
666 |
+
self.state = self.scriptDataEscapedState
|
667 |
+
return True
|
668 |
+
|
669 |
+
def scriptDataEscapedDashDashState(self):
|
670 |
+
data = self.stream.char()
|
671 |
+
if data == "-":
|
672 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
|
673 |
+
elif data == "<":
|
674 |
+
self.state = self.scriptDataEscapedLessThanSignState
|
675 |
+
elif data == ">":
|
676 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
|
677 |
+
self.state = self.scriptDataState
|
678 |
+
elif data == "\u0000":
|
679 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
680 |
+
"data": "invalid-codepoint"})
|
681 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"],
|
682 |
+
"data": "\uFFFD"})
|
683 |
+
self.state = self.scriptDataEscapedState
|
684 |
+
elif data == EOF:
|
685 |
+
self.state = self.dataState
|
686 |
+
else:
|
687 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
|
688 |
+
self.state = self.scriptDataEscapedState
|
689 |
+
return True
|
690 |
+
|
691 |
+
def scriptDataEscapedLessThanSignState(self):
|
692 |
+
data = self.stream.char()
|
693 |
+
if data == "/":
|
694 |
+
self.temporaryBuffer = ""
|
695 |
+
self.state = self.scriptDataEscapedEndTagOpenState
|
696 |
+
elif data in asciiLetters:
|
697 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<" + data})
|
698 |
+
self.temporaryBuffer = data
|
699 |
+
self.state = self.scriptDataDoubleEscapeStartState
|
700 |
+
else:
|
701 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
|
702 |
+
self.stream.unget(data)
|
703 |
+
self.state = self.scriptDataEscapedState
|
704 |
+
return True
|
705 |
+
|
706 |
+
def scriptDataEscapedEndTagOpenState(self):
|
707 |
+
data = self.stream.char()
|
708 |
+
if data in asciiLetters:
|
709 |
+
self.temporaryBuffer = data
|
710 |
+
self.state = self.scriptDataEscapedEndTagNameState
|
711 |
+
else:
|
712 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
|
713 |
+
self.stream.unget(data)
|
714 |
+
self.state = self.scriptDataEscapedState
|
715 |
+
return True
|
716 |
+
|
717 |
+
def scriptDataEscapedEndTagNameState(self):
|
718 |
+
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
|
719 |
+
data = self.stream.char()
|
720 |
+
if data in spaceCharacters and appropriate:
|
721 |
+
self.currentToken = {"type": tokenTypes["EndTag"],
|
722 |
+
"name": self.temporaryBuffer,
|
723 |
+
"data": [], "selfClosing": False}
|
724 |
+
self.state = self.beforeAttributeNameState
|
725 |
+
elif data == "/" and appropriate:
|
726 |
+
self.currentToken = {"type": tokenTypes["EndTag"],
|
727 |
+
"name": self.temporaryBuffer,
|
728 |
+
"data": [], "selfClosing": False}
|
729 |
+
self.state = self.selfClosingStartTagState
|
730 |
+
elif data == ">" and appropriate:
|
731 |
+
self.currentToken = {"type": tokenTypes["EndTag"],
|
732 |
+
"name": self.temporaryBuffer,
|
733 |
+
"data": [], "selfClosing": False}
|
734 |
+
self.emitCurrentToken()
|
735 |
+
self.state = self.dataState
|
736 |
+
elif data in asciiLetters:
|
737 |
+
self.temporaryBuffer += data
|
738 |
+
else:
|
739 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"],
|
740 |
+
"data": "</" + self.temporaryBuffer})
|
741 |
+
self.stream.unget(data)
|
742 |
+
self.state = self.scriptDataEscapedState
|
743 |
+
return True
|
744 |
+
|
745 |
+
def scriptDataDoubleEscapeStartState(self):
|
746 |
+
data = self.stream.char()
|
747 |
+
if data in (spaceCharacters | frozenset(("/", ">"))):
|
748 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
|
749 |
+
if self.temporaryBuffer.lower() == "script":
|
750 |
+
self.state = self.scriptDataDoubleEscapedState
|
751 |
+
else:
|
752 |
+
self.state = self.scriptDataEscapedState
|
753 |
+
elif data in asciiLetters:
|
754 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
|
755 |
+
self.temporaryBuffer += data
|
756 |
+
else:
|
757 |
+
self.stream.unget(data)
|
758 |
+
self.state = self.scriptDataEscapedState
|
759 |
+
return True
|
760 |
+
|
761 |
+
def scriptDataDoubleEscapedState(self):
|
762 |
+
data = self.stream.char()
|
763 |
+
if data == "-":
|
764 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
|
765 |
+
self.state = self.scriptDataDoubleEscapedDashState
|
766 |
+
elif data == "<":
|
767 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
|
768 |
+
self.state = self.scriptDataDoubleEscapedLessThanSignState
|
769 |
+
elif data == "\u0000":
|
770 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
771 |
+
"data": "invalid-codepoint"})
|
772 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"],
|
773 |
+
"data": "\uFFFD"})
|
774 |
+
elif data == EOF:
|
775 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
776 |
+
"eof-in-script-in-script"})
|
777 |
+
self.state = self.dataState
|
778 |
+
else:
|
779 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
|
780 |
+
return True
|
781 |
+
|
782 |
+
def scriptDataDoubleEscapedDashState(self):
|
783 |
+
data = self.stream.char()
|
784 |
+
if data == "-":
|
785 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
|
786 |
+
self.state = self.scriptDataDoubleEscapedDashDashState
|
787 |
+
elif data == "<":
|
788 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
|
789 |
+
self.state = self.scriptDataDoubleEscapedLessThanSignState
|
790 |
+
elif data == "\u0000":
|
791 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
792 |
+
"data": "invalid-codepoint"})
|
793 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"],
|
794 |
+
"data": "\uFFFD"})
|
795 |
+
self.state = self.scriptDataDoubleEscapedState
|
796 |
+
elif data == EOF:
|
797 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
798 |
+
"eof-in-script-in-script"})
|
799 |
+
self.state = self.dataState
|
800 |
+
else:
|
801 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
|
802 |
+
self.state = self.scriptDataDoubleEscapedState
|
803 |
+
return True
|
804 |
+
|
805 |
+
def scriptDataDoubleEscapedDashDashState(self):
|
806 |
+
data = self.stream.char()
|
807 |
+
if data == "-":
|
808 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
|
809 |
+
elif data == "<":
|
810 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
|
811 |
+
self.state = self.scriptDataDoubleEscapedLessThanSignState
|
812 |
+
elif data == ">":
|
813 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
|
814 |
+
self.state = self.scriptDataState
|
815 |
+
elif data == "\u0000":
|
816 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
817 |
+
"data": "invalid-codepoint"})
|
818 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"],
|
819 |
+
"data": "\uFFFD"})
|
820 |
+
self.state = self.scriptDataDoubleEscapedState
|
821 |
+
elif data == EOF:
|
822 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
823 |
+
"eof-in-script-in-script"})
|
824 |
+
self.state = self.dataState
|
825 |
+
else:
|
826 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
|
827 |
+
self.state = self.scriptDataDoubleEscapedState
|
828 |
+
return True
|
829 |
+
|
830 |
+
def scriptDataDoubleEscapedLessThanSignState(self):
|
831 |
+
data = self.stream.char()
|
832 |
+
if data == "/":
|
833 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "/"})
|
834 |
+
self.temporaryBuffer = ""
|
835 |
+
self.state = self.scriptDataDoubleEscapeEndState
|
836 |
+
else:
|
837 |
+
self.stream.unget(data)
|
838 |
+
self.state = self.scriptDataDoubleEscapedState
|
839 |
+
return True
|
840 |
+
|
841 |
+
def scriptDataDoubleEscapeEndState(self):
|
842 |
+
data = self.stream.char()
|
843 |
+
if data in (spaceCharacters | frozenset(("/", ">"))):
|
844 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
|
845 |
+
if self.temporaryBuffer.lower() == "script":
|
846 |
+
self.state = self.scriptDataEscapedState
|
847 |
+
else:
|
848 |
+
self.state = self.scriptDataDoubleEscapedState
|
849 |
+
elif data in asciiLetters:
|
850 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
|
851 |
+
self.temporaryBuffer += data
|
852 |
+
else:
|
853 |
+
self.stream.unget(data)
|
854 |
+
self.state = self.scriptDataDoubleEscapedState
|
855 |
+
return True
|
856 |
+
|
857 |
+
def beforeAttributeNameState(self):
|
858 |
+
data = self.stream.char()
|
859 |
+
if data in spaceCharacters:
|
860 |
+
self.stream.charsUntil(spaceCharacters, True)
|
861 |
+
elif data in asciiLetters:
|
862 |
+
self.currentToken["data"].append([data, ""])
|
863 |
+
self.state = self.attributeNameState
|
864 |
+
elif data == ">":
|
865 |
+
self.emitCurrentToken()
|
866 |
+
elif data == "/":
|
867 |
+
self.state = self.selfClosingStartTagState
|
868 |
+
elif data in ("'", '"', "=", "<"):
|
869 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
870 |
+
"invalid-character-in-attribute-name"})
|
871 |
+
self.currentToken["data"].append([data, ""])
|
872 |
+
self.state = self.attributeNameState
|
873 |
+
elif data == "\u0000":
|
874 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
875 |
+
"data": "invalid-codepoint"})
|
876 |
+
self.currentToken["data"].append(["\uFFFD", ""])
|
877 |
+
self.state = self.attributeNameState
|
878 |
+
elif data is EOF:
|
879 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
880 |
+
"expected-attribute-name-but-got-eof"})
|
881 |
+
self.state = self.dataState
|
882 |
+
else:
|
883 |
+
self.currentToken["data"].append([data, ""])
|
884 |
+
self.state = self.attributeNameState
|
885 |
+
return True
|
886 |
+
|
887 |
+
def attributeNameState(self):
|
888 |
+
data = self.stream.char()
|
889 |
+
leavingThisState = True
|
890 |
+
emitToken = False
|
891 |
+
if data == "=":
|
892 |
+
self.state = self.beforeAttributeValueState
|
893 |
+
elif data in asciiLetters:
|
894 |
+
self.currentToken["data"][-1][0] += data +\
|
895 |
+
self.stream.charsUntil(asciiLetters, True)
|
896 |
+
leavingThisState = False
|
897 |
+
elif data == ">":
|
898 |
+
# XXX If we emit here the attributes are converted to a dict
|
899 |
+
# without being checked and when the code below runs we error
|
900 |
+
# because data is a dict not a list
|
901 |
+
emitToken = True
|
902 |
+
elif data in spaceCharacters:
|
903 |
+
self.state = self.afterAttributeNameState
|
904 |
+
elif data == "/":
|
905 |
+
self.state = self.selfClosingStartTagState
|
906 |
+
elif data == "\u0000":
|
907 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
908 |
+
"data": "invalid-codepoint"})
|
909 |
+
self.currentToken["data"][-1][0] += "\uFFFD"
|
910 |
+
leavingThisState = False
|
911 |
+
elif data in ("'", '"', "<"):
|
912 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
913 |
+
"data":
|
914 |
+
"invalid-character-in-attribute-name"})
|
915 |
+
self.currentToken["data"][-1][0] += data
|
916 |
+
leavingThisState = False
|
917 |
+
elif data is EOF:
|
918 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
919 |
+
"data": "eof-in-attribute-name"})
|
920 |
+
self.state = self.dataState
|
921 |
+
else:
|
922 |
+
self.currentToken["data"][-1][0] += data
|
923 |
+
leavingThisState = False
|
924 |
+
|
925 |
+
if leavingThisState:
|
926 |
+
# Attributes are not dropped at this stage. That happens when the
|
927 |
+
# start tag token is emitted so values can still be safely appended
|
928 |
+
# to attributes, but we do want to report the parse error in time.
|
929 |
+
self.currentToken["data"][-1][0] = (
|
930 |
+
self.currentToken["data"][-1][0].translate(asciiUpper2Lower))
|
931 |
+
for name, _ in self.currentToken["data"][:-1]:
|
932 |
+
if self.currentToken["data"][-1][0] == name:
|
933 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
934 |
+
"duplicate-attribute"})
|
935 |
+
break
|
936 |
+
# XXX Fix for above XXX
|
937 |
+
if emitToken:
|
938 |
+
self.emitCurrentToken()
|
939 |
+
return True
|
940 |
+
|
941 |
+
def afterAttributeNameState(self):
|
942 |
+
data = self.stream.char()
|
943 |
+
if data in spaceCharacters:
|
944 |
+
self.stream.charsUntil(spaceCharacters, True)
|
945 |
+
elif data == "=":
|
946 |
+
self.state = self.beforeAttributeValueState
|
947 |
+
elif data == ">":
|
948 |
+
self.emitCurrentToken()
|
949 |
+
elif data in asciiLetters:
|
950 |
+
self.currentToken["data"].append([data, ""])
|
951 |
+
self.state = self.attributeNameState
|
952 |
+
elif data == "/":
|
953 |
+
self.state = self.selfClosingStartTagState
|
954 |
+
elif data == "\u0000":
|
955 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
956 |
+
"data": "invalid-codepoint"})
|
957 |
+
self.currentToken["data"].append(["\uFFFD", ""])
|
958 |
+
self.state = self.attributeNameState
|
959 |
+
elif data in ("'", '"', "<"):
|
960 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
961 |
+
"invalid-character-after-attribute-name"})
|
962 |
+
self.currentToken["data"].append([data, ""])
|
963 |
+
self.state = self.attributeNameState
|
964 |
+
elif data is EOF:
|
965 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
966 |
+
"expected-end-of-tag-but-got-eof"})
|
967 |
+
self.state = self.dataState
|
968 |
+
else:
|
969 |
+
self.currentToken["data"].append([data, ""])
|
970 |
+
self.state = self.attributeNameState
|
971 |
+
return True
|
972 |
+
|
973 |
+
def beforeAttributeValueState(self):
|
974 |
+
data = self.stream.char()
|
975 |
+
if data in spaceCharacters:
|
976 |
+
self.stream.charsUntil(spaceCharacters, True)
|
977 |
+
elif data == "\"":
|
978 |
+
self.state = self.attributeValueDoubleQuotedState
|
979 |
+
elif data == "&":
|
980 |
+
self.state = self.attributeValueUnQuotedState
|
981 |
+
self.stream.unget(data)
|
982 |
+
elif data == "'":
|
983 |
+
self.state = self.attributeValueSingleQuotedState
|
984 |
+
elif data == ">":
|
985 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
986 |
+
"expected-attribute-value-but-got-right-bracket"})
|
987 |
+
self.emitCurrentToken()
|
988 |
+
elif data == "\u0000":
|
989 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
990 |
+
"data": "invalid-codepoint"})
|
991 |
+
self.currentToken["data"][-1][1] += "\uFFFD"
|
992 |
+
self.state = self.attributeValueUnQuotedState
|
993 |
+
elif data in ("=", "<", "`"):
|
994 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
995 |
+
"equals-in-unquoted-attribute-value"})
|
996 |
+
self.currentToken["data"][-1][1] += data
|
997 |
+
self.state = self.attributeValueUnQuotedState
|
998 |
+
elif data is EOF:
|
999 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1000 |
+
"expected-attribute-value-but-got-eof"})
|
1001 |
+
self.state = self.dataState
|
1002 |
+
else:
|
1003 |
+
self.currentToken["data"][-1][1] += data
|
1004 |
+
self.state = self.attributeValueUnQuotedState
|
1005 |
+
return True
|
1006 |
+
|
1007 |
+
def attributeValueDoubleQuotedState(self):
|
1008 |
+
data = self.stream.char()
|
1009 |
+
if data == "\"":
|
1010 |
+
self.state = self.afterAttributeValueState
|
1011 |
+
elif data == "&":
|
1012 |
+
self.processEntityInAttribute('"')
|
1013 |
+
elif data == "\u0000":
|
1014 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
1015 |
+
"data": "invalid-codepoint"})
|
1016 |
+
self.currentToken["data"][-1][1] += "\uFFFD"
|
1017 |
+
elif data is EOF:
|
1018 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1019 |
+
"eof-in-attribute-value-double-quote"})
|
1020 |
+
self.state = self.dataState
|
1021 |
+
else:
|
1022 |
+
self.currentToken["data"][-1][1] += data +\
|
1023 |
+
self.stream.charsUntil(("\"", "&", "\u0000"))
|
1024 |
+
return True
|
1025 |
+
|
1026 |
+
def attributeValueSingleQuotedState(self):
|
1027 |
+
data = self.stream.char()
|
1028 |
+
if data == "'":
|
1029 |
+
self.state = self.afterAttributeValueState
|
1030 |
+
elif data == "&":
|
1031 |
+
self.processEntityInAttribute("'")
|
1032 |
+
elif data == "\u0000":
|
1033 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
1034 |
+
"data": "invalid-codepoint"})
|
1035 |
+
self.currentToken["data"][-1][1] += "\uFFFD"
|
1036 |
+
elif data is EOF:
|
1037 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1038 |
+
"eof-in-attribute-value-single-quote"})
|
1039 |
+
self.state = self.dataState
|
1040 |
+
else:
|
1041 |
+
self.currentToken["data"][-1][1] += data +\
|
1042 |
+
self.stream.charsUntil(("'", "&", "\u0000"))
|
1043 |
+
return True
|
1044 |
+
|
1045 |
+
def attributeValueUnQuotedState(self):
|
1046 |
+
data = self.stream.char()
|
1047 |
+
if data in spaceCharacters:
|
1048 |
+
self.state = self.beforeAttributeNameState
|
1049 |
+
elif data == "&":
|
1050 |
+
self.processEntityInAttribute(">")
|
1051 |
+
elif data == ">":
|
1052 |
+
self.emitCurrentToken()
|
1053 |
+
elif data in ('"', "'", "=", "<", "`"):
|
1054 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1055 |
+
"unexpected-character-in-unquoted-attribute-value"})
|
1056 |
+
self.currentToken["data"][-1][1] += data
|
1057 |
+
elif data == "\u0000":
|
1058 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
1059 |
+
"data": "invalid-codepoint"})
|
1060 |
+
self.currentToken["data"][-1][1] += "\uFFFD"
|
1061 |
+
elif data is EOF:
|
1062 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1063 |
+
"eof-in-attribute-value-no-quotes"})
|
1064 |
+
self.state = self.dataState
|
1065 |
+
else:
|
1066 |
+
self.currentToken["data"][-1][1] += data + self.stream.charsUntil(
|
1067 |
+
frozenset(("&", ">", '"', "'", "=", "<", "`", "\u0000")) | spaceCharacters)
|
1068 |
+
return True
|
1069 |
+
|
1070 |
+
def afterAttributeValueState(self):
|
1071 |
+
data = self.stream.char()
|
1072 |
+
if data in spaceCharacters:
|
1073 |
+
self.state = self.beforeAttributeNameState
|
1074 |
+
elif data == ">":
|
1075 |
+
self.emitCurrentToken()
|
1076 |
+
elif data == "/":
|
1077 |
+
self.state = self.selfClosingStartTagState
|
1078 |
+
elif data is EOF:
|
1079 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1080 |
+
"unexpected-EOF-after-attribute-value"})
|
1081 |
+
self.stream.unget(data)
|
1082 |
+
self.state = self.dataState
|
1083 |
+
else:
|
1084 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1085 |
+
"unexpected-character-after-attribute-value"})
|
1086 |
+
self.stream.unget(data)
|
1087 |
+
self.state = self.beforeAttributeNameState
|
1088 |
+
return True
|
1089 |
+
|
1090 |
+
def selfClosingStartTagState(self):
|
1091 |
+
data = self.stream.char()
|
1092 |
+
if data == ">":
|
1093 |
+
self.currentToken["selfClosing"] = True
|
1094 |
+
self.emitCurrentToken()
|
1095 |
+
elif data is EOF:
|
1096 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
1097 |
+
"data":
|
1098 |
+
"unexpected-EOF-after-solidus-in-tag"})
|
1099 |
+
self.stream.unget(data)
|
1100 |
+
self.state = self.dataState
|
1101 |
+
else:
|
1102 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1103 |
+
"unexpected-character-after-solidus-in-tag"})
|
1104 |
+
self.stream.unget(data)
|
1105 |
+
self.state = self.beforeAttributeNameState
|
1106 |
+
return True
|
1107 |
+
|
1108 |
+
def bogusCommentState(self):
|
1109 |
+
# Make a new comment token and give it as value all the characters
|
1110 |
+
# until the first > or EOF (charsUntil checks for EOF automatically)
|
1111 |
+
# and emit it.
|
1112 |
+
data = self.stream.charsUntil(">")
|
1113 |
+
data = data.replace("\u0000", "\uFFFD")
|
1114 |
+
self.tokenQueue.append(
|
1115 |
+
{"type": tokenTypes["Comment"], "data": data})
|
1116 |
+
|
1117 |
+
# Eat the character directly after the bogus comment which is either a
|
1118 |
+
# ">" or an EOF.
|
1119 |
+
self.stream.char()
|
1120 |
+
self.state = self.dataState
|
1121 |
+
return True
|
1122 |
+
|
1123 |
+
def markupDeclarationOpenState(self):
|
1124 |
+
charStack = [self.stream.char()]
|
1125 |
+
if charStack[-1] == "-":
|
1126 |
+
charStack.append(self.stream.char())
|
1127 |
+
if charStack[-1] == "-":
|
1128 |
+
self.currentToken = {"type": tokenTypes["Comment"], "data": ""}
|
1129 |
+
self.state = self.commentStartState
|
1130 |
+
return True
|
1131 |
+
elif charStack[-1] in ('d', 'D'):
|
1132 |
+
matched = True
|
1133 |
+
for expected in (('o', 'O'), ('c', 'C'), ('t', 'T'),
|
1134 |
+
('y', 'Y'), ('p', 'P'), ('e', 'E')):
|
1135 |
+
charStack.append(self.stream.char())
|
1136 |
+
if charStack[-1] not in expected:
|
1137 |
+
matched = False
|
1138 |
+
break
|
1139 |
+
if matched:
|
1140 |
+
self.currentToken = {"type": tokenTypes["Doctype"],
|
1141 |
+
"name": "",
|
1142 |
+
"publicId": None, "systemId": None,
|
1143 |
+
"correct": True}
|
1144 |
+
self.state = self.doctypeState
|
1145 |
+
return True
|
1146 |
+
elif (charStack[-1] == "[" and
|
1147 |
+
self.parser is not None and
|
1148 |
+
self.parser.tree.openElements and
|
1149 |
+
self.parser.tree.openElements[-1].namespace != self.parser.tree.defaultNamespace):
|
1150 |
+
matched = True
|
1151 |
+
for expected in ["C", "D", "A", "T", "A", "["]:
|
1152 |
+
charStack.append(self.stream.char())
|
1153 |
+
if charStack[-1] != expected:
|
1154 |
+
matched = False
|
1155 |
+
break
|
1156 |
+
if matched:
|
1157 |
+
self.state = self.cdataSectionState
|
1158 |
+
return True
|
1159 |
+
|
1160 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1161 |
+
"expected-dashes-or-doctype"})
|
1162 |
+
|
1163 |
+
while charStack:
|
1164 |
+
self.stream.unget(charStack.pop())
|
1165 |
+
self.state = self.bogusCommentState
|
1166 |
+
return True
|
1167 |
+
|
1168 |
+
def commentStartState(self):
|
1169 |
+
data = self.stream.char()
|
1170 |
+
if data == "-":
|
1171 |
+
self.state = self.commentStartDashState
|
1172 |
+
elif data == "\u0000":
|
1173 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
1174 |
+
"data": "invalid-codepoint"})
|
1175 |
+
self.currentToken["data"] += "\uFFFD"
|
1176 |
+
elif data == ">":
|
1177 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1178 |
+
"incorrect-comment"})
|
1179 |
+
self.tokenQueue.append(self.currentToken)
|
1180 |
+
self.state = self.dataState
|
1181 |
+
elif data is EOF:
|
1182 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1183 |
+
"eof-in-comment"})
|
1184 |
+
self.tokenQueue.append(self.currentToken)
|
1185 |
+
self.state = self.dataState
|
1186 |
+
else:
|
1187 |
+
self.currentToken["data"] += data
|
1188 |
+
self.state = self.commentState
|
1189 |
+
return True
|
1190 |
+
|
1191 |
+
def commentStartDashState(self):
|
1192 |
+
data = self.stream.char()
|
1193 |
+
if data == "-":
|
1194 |
+
self.state = self.commentEndState
|
1195 |
+
elif data == "\u0000":
|
1196 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
1197 |
+
"data": "invalid-codepoint"})
|
1198 |
+
self.currentToken["data"] += "-\uFFFD"
|
1199 |
+
elif data == ">":
|
1200 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1201 |
+
"incorrect-comment"})
|
1202 |
+
self.tokenQueue.append(self.currentToken)
|
1203 |
+
self.state = self.dataState
|
1204 |
+
elif data is EOF:
|
1205 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1206 |
+
"eof-in-comment"})
|
1207 |
+
self.tokenQueue.append(self.currentToken)
|
1208 |
+
self.state = self.dataState
|
1209 |
+
else:
|
1210 |
+
self.currentToken["data"] += "-" + data
|
1211 |
+
self.state = self.commentState
|
1212 |
+
return True
|
1213 |
+
|
1214 |
+
def commentState(self):
|
1215 |
+
data = self.stream.char()
|
1216 |
+
if data == "-":
|
1217 |
+
self.state = self.commentEndDashState
|
1218 |
+
elif data == "\u0000":
|
1219 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
1220 |
+
"data": "invalid-codepoint"})
|
1221 |
+
self.currentToken["data"] += "\uFFFD"
|
1222 |
+
elif data is EOF:
|
1223 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
1224 |
+
"data": "eof-in-comment"})
|
1225 |
+
self.tokenQueue.append(self.currentToken)
|
1226 |
+
self.state = self.dataState
|
1227 |
+
else:
|
1228 |
+
self.currentToken["data"] += data + \
|
1229 |
+
self.stream.charsUntil(("-", "\u0000"))
|
1230 |
+
return True
|
1231 |
+
|
1232 |
+
def commentEndDashState(self):
|
1233 |
+
data = self.stream.char()
|
1234 |
+
if data == "-":
|
1235 |
+
self.state = self.commentEndState
|
1236 |
+
elif data == "\u0000":
|
1237 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
1238 |
+
"data": "invalid-codepoint"})
|
1239 |
+
self.currentToken["data"] += "-\uFFFD"
|
1240 |
+
self.state = self.commentState
|
1241 |
+
elif data is EOF:
|
1242 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1243 |
+
"eof-in-comment-end-dash"})
|
1244 |
+
self.tokenQueue.append(self.currentToken)
|
1245 |
+
self.state = self.dataState
|
1246 |
+
else:
|
1247 |
+
self.currentToken["data"] += "-" + data
|
1248 |
+
self.state = self.commentState
|
1249 |
+
return True
|
1250 |
+
|
1251 |
+
def commentEndState(self):
|
1252 |
+
data = self.stream.char()
|
1253 |
+
if data == ">":
|
1254 |
+
self.tokenQueue.append(self.currentToken)
|
1255 |
+
self.state = self.dataState
|
1256 |
+
elif data == "\u0000":
|
1257 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
1258 |
+
"data": "invalid-codepoint"})
|
1259 |
+
self.currentToken["data"] += "--\uFFFD"
|
1260 |
+
self.state = self.commentState
|
1261 |
+
elif data == "!":
|
1262 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1263 |
+
"unexpected-bang-after-double-dash-in-comment"})
|
1264 |
+
self.state = self.commentEndBangState
|
1265 |
+
elif data == "-":
|
1266 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1267 |
+
"unexpected-dash-after-double-dash-in-comment"})
|
1268 |
+
self.currentToken["data"] += data
|
1269 |
+
elif data is EOF:
|
1270 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1271 |
+
"eof-in-comment-double-dash"})
|
1272 |
+
self.tokenQueue.append(self.currentToken)
|
1273 |
+
self.state = self.dataState
|
1274 |
+
else:
|
1275 |
+
# XXX
|
1276 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1277 |
+
"unexpected-char-in-comment"})
|
1278 |
+
self.currentToken["data"] += "--" + data
|
1279 |
+
self.state = self.commentState
|
1280 |
+
return True
|
1281 |
+
|
1282 |
+
def commentEndBangState(self):
|
1283 |
+
data = self.stream.char()
|
1284 |
+
if data == ">":
|
1285 |
+
self.tokenQueue.append(self.currentToken)
|
1286 |
+
self.state = self.dataState
|
1287 |
+
elif data == "-":
|
1288 |
+
self.currentToken["data"] += "--!"
|
1289 |
+
self.state = self.commentEndDashState
|
1290 |
+
elif data == "\u0000":
|
1291 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
1292 |
+
"data": "invalid-codepoint"})
|
1293 |
+
self.currentToken["data"] += "--!\uFFFD"
|
1294 |
+
self.state = self.commentState
|
1295 |
+
elif data is EOF:
|
1296 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1297 |
+
"eof-in-comment-end-bang-state"})
|
1298 |
+
self.tokenQueue.append(self.currentToken)
|
1299 |
+
self.state = self.dataState
|
1300 |
+
else:
|
1301 |
+
self.currentToken["data"] += "--!" + data
|
1302 |
+
self.state = self.commentState
|
1303 |
+
return True
|
1304 |
+
|
1305 |
+
def doctypeState(self):
|
1306 |
+
data = self.stream.char()
|
1307 |
+
if data in spaceCharacters:
|
1308 |
+
self.state = self.beforeDoctypeNameState
|
1309 |
+
elif data is EOF:
|
1310 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1311 |
+
"expected-doctype-name-but-got-eof"})
|
1312 |
+
self.currentToken["correct"] = False
|
1313 |
+
self.tokenQueue.append(self.currentToken)
|
1314 |
+
self.state = self.dataState
|
1315 |
+
else:
|
1316 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1317 |
+
"need-space-after-doctype"})
|
1318 |
+
self.stream.unget(data)
|
1319 |
+
self.state = self.beforeDoctypeNameState
|
1320 |
+
return True
|
1321 |
+
|
1322 |
+
def beforeDoctypeNameState(self):
|
1323 |
+
data = self.stream.char()
|
1324 |
+
if data in spaceCharacters:
|
1325 |
+
pass
|
1326 |
+
elif data == ">":
|
1327 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1328 |
+
"expected-doctype-name-but-got-right-bracket"})
|
1329 |
+
self.currentToken["correct"] = False
|
1330 |
+
self.tokenQueue.append(self.currentToken)
|
1331 |
+
self.state = self.dataState
|
1332 |
+
elif data == "\u0000":
|
1333 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
1334 |
+
"data": "invalid-codepoint"})
|
1335 |
+
self.currentToken["name"] = "\uFFFD"
|
1336 |
+
self.state = self.doctypeNameState
|
1337 |
+
elif data is EOF:
|
1338 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1339 |
+
"expected-doctype-name-but-got-eof"})
|
1340 |
+
self.currentToken["correct"] = False
|
1341 |
+
self.tokenQueue.append(self.currentToken)
|
1342 |
+
self.state = self.dataState
|
1343 |
+
else:
|
1344 |
+
self.currentToken["name"] = data
|
1345 |
+
self.state = self.doctypeNameState
|
1346 |
+
return True
|
1347 |
+
|
1348 |
+
def doctypeNameState(self):
|
1349 |
+
data = self.stream.char()
|
1350 |
+
if data in spaceCharacters:
|
1351 |
+
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
|
1352 |
+
self.state = self.afterDoctypeNameState
|
1353 |
+
elif data == ">":
|
1354 |
+
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
|
1355 |
+
self.tokenQueue.append(self.currentToken)
|
1356 |
+
self.state = self.dataState
|
1357 |
+
elif data == "\u0000":
|
1358 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
1359 |
+
"data": "invalid-codepoint"})
|
1360 |
+
self.currentToken["name"] += "\uFFFD"
|
1361 |
+
self.state = self.doctypeNameState
|
1362 |
+
elif data is EOF:
|
1363 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1364 |
+
"eof-in-doctype-name"})
|
1365 |
+
self.currentToken["correct"] = False
|
1366 |
+
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
|
1367 |
+
self.tokenQueue.append(self.currentToken)
|
1368 |
+
self.state = self.dataState
|
1369 |
+
else:
|
1370 |
+
self.currentToken["name"] += data
|
1371 |
+
return True
|
1372 |
+
|
1373 |
+
def afterDoctypeNameState(self):
|
1374 |
+
data = self.stream.char()
|
1375 |
+
if data in spaceCharacters:
|
1376 |
+
pass
|
1377 |
+
elif data == ">":
|
1378 |
+
self.tokenQueue.append(self.currentToken)
|
1379 |
+
self.state = self.dataState
|
1380 |
+
elif data is EOF:
|
1381 |
+
self.currentToken["correct"] = False
|
1382 |
+
self.stream.unget(data)
|
1383 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1384 |
+
"eof-in-doctype"})
|
1385 |
+
self.tokenQueue.append(self.currentToken)
|
1386 |
+
self.state = self.dataState
|
1387 |
+
else:
|
1388 |
+
if data in ("p", "P"):
|
1389 |
+
matched = True
|
1390 |
+
for expected in (("u", "U"), ("b", "B"), ("l", "L"),
|
1391 |
+
("i", "I"), ("c", "C")):
|
1392 |
+
data = self.stream.char()
|
1393 |
+
if data not in expected:
|
1394 |
+
matched = False
|
1395 |
+
break
|
1396 |
+
if matched:
|
1397 |
+
self.state = self.afterDoctypePublicKeywordState
|
1398 |
+
return True
|
1399 |
+
elif data in ("s", "S"):
|
1400 |
+
matched = True
|
1401 |
+
for expected in (("y", "Y"), ("s", "S"), ("t", "T"),
|
1402 |
+
("e", "E"), ("m", "M")):
|
1403 |
+
data = self.stream.char()
|
1404 |
+
if data not in expected:
|
1405 |
+
matched = False
|
1406 |
+
break
|
1407 |
+
if matched:
|
1408 |
+
self.state = self.afterDoctypeSystemKeywordState
|
1409 |
+
return True
|
1410 |
+
|
1411 |
+
# All the characters read before the current 'data' will be
|
1412 |
+
# [a-zA-Z], so they're garbage in the bogus doctype and can be
|
1413 |
+
# discarded; only the latest character might be '>' or EOF
|
1414 |
+
# and needs to be ungetted
|
1415 |
+
self.stream.unget(data)
|
1416 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1417 |
+
"expected-space-or-right-bracket-in-doctype", "datavars":
|
1418 |
+
{"data": data}})
|
1419 |
+
self.currentToken["correct"] = False
|
1420 |
+
self.state = self.bogusDoctypeState
|
1421 |
+
|
1422 |
+
return True
|
1423 |
+
|
1424 |
+
def afterDoctypePublicKeywordState(self):
|
1425 |
+
data = self.stream.char()
|
1426 |
+
if data in spaceCharacters:
|
1427 |
+
self.state = self.beforeDoctypePublicIdentifierState
|
1428 |
+
elif data in ("'", '"'):
|
1429 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1430 |
+
"unexpected-char-in-doctype"})
|
1431 |
+
self.stream.unget(data)
|
1432 |
+
self.state = self.beforeDoctypePublicIdentifierState
|
1433 |
+
elif data is EOF:
|
1434 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1435 |
+
"eof-in-doctype"})
|
1436 |
+
self.currentToken["correct"] = False
|
1437 |
+
self.tokenQueue.append(self.currentToken)
|
1438 |
+
self.state = self.dataState
|
1439 |
+
else:
|
1440 |
+
self.stream.unget(data)
|
1441 |
+
self.state = self.beforeDoctypePublicIdentifierState
|
1442 |
+
return True
|
1443 |
+
|
1444 |
+
def beforeDoctypePublicIdentifierState(self):
|
1445 |
+
data = self.stream.char()
|
1446 |
+
if data in spaceCharacters:
|
1447 |
+
pass
|
1448 |
+
elif data == "\"":
|
1449 |
+
self.currentToken["publicId"] = ""
|
1450 |
+
self.state = self.doctypePublicIdentifierDoubleQuotedState
|
1451 |
+
elif data == "'":
|
1452 |
+
self.currentToken["publicId"] = ""
|
1453 |
+
self.state = self.doctypePublicIdentifierSingleQuotedState
|
1454 |
+
elif data == ">":
|
1455 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1456 |
+
"unexpected-end-of-doctype"})
|
1457 |
+
self.currentToken["correct"] = False
|
1458 |
+
self.tokenQueue.append(self.currentToken)
|
1459 |
+
self.state = self.dataState
|
1460 |
+
elif data is EOF:
|
1461 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1462 |
+
"eof-in-doctype"})
|
1463 |
+
self.currentToken["correct"] = False
|
1464 |
+
self.tokenQueue.append(self.currentToken)
|
1465 |
+
self.state = self.dataState
|
1466 |
+
else:
|
1467 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1468 |
+
"unexpected-char-in-doctype"})
|
1469 |
+
self.currentToken["correct"] = False
|
1470 |
+
self.state = self.bogusDoctypeState
|
1471 |
+
return True
|
1472 |
+
|
1473 |
+
def doctypePublicIdentifierDoubleQuotedState(self):
|
1474 |
+
data = self.stream.char()
|
1475 |
+
if data == "\"":
|
1476 |
+
self.state = self.afterDoctypePublicIdentifierState
|
1477 |
+
elif data == "\u0000":
|
1478 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
1479 |
+
"data": "invalid-codepoint"})
|
1480 |
+
self.currentToken["publicId"] += "\uFFFD"
|
1481 |
+
elif data == ">":
|
1482 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1483 |
+
"unexpected-end-of-doctype"})
|
1484 |
+
self.currentToken["correct"] = False
|
1485 |
+
self.tokenQueue.append(self.currentToken)
|
1486 |
+
self.state = self.dataState
|
1487 |
+
elif data is EOF:
|
1488 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1489 |
+
"eof-in-doctype"})
|
1490 |
+
self.currentToken["correct"] = False
|
1491 |
+
self.tokenQueue.append(self.currentToken)
|
1492 |
+
self.state = self.dataState
|
1493 |
+
else:
|
1494 |
+
self.currentToken["publicId"] += data
|
1495 |
+
return True
|
1496 |
+
|
1497 |
+
def doctypePublicIdentifierSingleQuotedState(self):
|
1498 |
+
data = self.stream.char()
|
1499 |
+
if data == "'":
|
1500 |
+
self.state = self.afterDoctypePublicIdentifierState
|
1501 |
+
elif data == "\u0000":
|
1502 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
1503 |
+
"data": "invalid-codepoint"})
|
1504 |
+
self.currentToken["publicId"] += "\uFFFD"
|
1505 |
+
elif data == ">":
|
1506 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1507 |
+
"unexpected-end-of-doctype"})
|
1508 |
+
self.currentToken["correct"] = False
|
1509 |
+
self.tokenQueue.append(self.currentToken)
|
1510 |
+
self.state = self.dataState
|
1511 |
+
elif data is EOF:
|
1512 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1513 |
+
"eof-in-doctype"})
|
1514 |
+
self.currentToken["correct"] = False
|
1515 |
+
self.tokenQueue.append(self.currentToken)
|
1516 |
+
self.state = self.dataState
|
1517 |
+
else:
|
1518 |
+
self.currentToken["publicId"] += data
|
1519 |
+
return True
|
1520 |
+
|
1521 |
+
def afterDoctypePublicIdentifierState(self):
|
1522 |
+
data = self.stream.char()
|
1523 |
+
if data in spaceCharacters:
|
1524 |
+
self.state = self.betweenDoctypePublicAndSystemIdentifiersState
|
1525 |
+
elif data == ">":
|
1526 |
+
self.tokenQueue.append(self.currentToken)
|
1527 |
+
self.state = self.dataState
|
1528 |
+
elif data == '"':
|
1529 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1530 |
+
"unexpected-char-in-doctype"})
|
1531 |
+
self.currentToken["systemId"] = ""
|
1532 |
+
self.state = self.doctypeSystemIdentifierDoubleQuotedState
|
1533 |
+
elif data == "'":
|
1534 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1535 |
+
"unexpected-char-in-doctype"})
|
1536 |
+
self.currentToken["systemId"] = ""
|
1537 |
+
self.state = self.doctypeSystemIdentifierSingleQuotedState
|
1538 |
+
elif data is EOF:
|
1539 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1540 |
+
"eof-in-doctype"})
|
1541 |
+
self.currentToken["correct"] = False
|
1542 |
+
self.tokenQueue.append(self.currentToken)
|
1543 |
+
self.state = self.dataState
|
1544 |
+
else:
|
1545 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1546 |
+
"unexpected-char-in-doctype"})
|
1547 |
+
self.currentToken["correct"] = False
|
1548 |
+
self.state = self.bogusDoctypeState
|
1549 |
+
return True
|
1550 |
+
|
1551 |
+
def betweenDoctypePublicAndSystemIdentifiersState(self):
|
1552 |
+
data = self.stream.char()
|
1553 |
+
if data in spaceCharacters:
|
1554 |
+
pass
|
1555 |
+
elif data == ">":
|
1556 |
+
self.tokenQueue.append(self.currentToken)
|
1557 |
+
self.state = self.dataState
|
1558 |
+
elif data == '"':
|
1559 |
+
self.currentToken["systemId"] = ""
|
1560 |
+
self.state = self.doctypeSystemIdentifierDoubleQuotedState
|
1561 |
+
elif data == "'":
|
1562 |
+
self.currentToken["systemId"] = ""
|
1563 |
+
self.state = self.doctypeSystemIdentifierSingleQuotedState
|
1564 |
+
elif data == EOF:
|
1565 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1566 |
+
"eof-in-doctype"})
|
1567 |
+
self.currentToken["correct"] = False
|
1568 |
+
self.tokenQueue.append(self.currentToken)
|
1569 |
+
self.state = self.dataState
|
1570 |
+
else:
|
1571 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1572 |
+
"unexpected-char-in-doctype"})
|
1573 |
+
self.currentToken["correct"] = False
|
1574 |
+
self.state = self.bogusDoctypeState
|
1575 |
+
return True
|
1576 |
+
|
1577 |
+
def afterDoctypeSystemKeywordState(self):
|
1578 |
+
data = self.stream.char()
|
1579 |
+
if data in spaceCharacters:
|
1580 |
+
self.state = self.beforeDoctypeSystemIdentifierState
|
1581 |
+
elif data in ("'", '"'):
|
1582 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1583 |
+
"unexpected-char-in-doctype"})
|
1584 |
+
self.stream.unget(data)
|
1585 |
+
self.state = self.beforeDoctypeSystemIdentifierState
|
1586 |
+
elif data is EOF:
|
1587 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1588 |
+
"eof-in-doctype"})
|
1589 |
+
self.currentToken["correct"] = False
|
1590 |
+
self.tokenQueue.append(self.currentToken)
|
1591 |
+
self.state = self.dataState
|
1592 |
+
else:
|
1593 |
+
self.stream.unget(data)
|
1594 |
+
self.state = self.beforeDoctypeSystemIdentifierState
|
1595 |
+
return True
|
1596 |
+
|
1597 |
+
def beforeDoctypeSystemIdentifierState(self):
|
1598 |
+
data = self.stream.char()
|
1599 |
+
if data in spaceCharacters:
|
1600 |
+
pass
|
1601 |
+
elif data == "\"":
|
1602 |
+
self.currentToken["systemId"] = ""
|
1603 |
+
self.state = self.doctypeSystemIdentifierDoubleQuotedState
|
1604 |
+
elif data == "'":
|
1605 |
+
self.currentToken["systemId"] = ""
|
1606 |
+
self.state = self.doctypeSystemIdentifierSingleQuotedState
|
1607 |
+
elif data == ">":
|
1608 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1609 |
+
"unexpected-char-in-doctype"})
|
1610 |
+
self.currentToken["correct"] = False
|
1611 |
+
self.tokenQueue.append(self.currentToken)
|
1612 |
+
self.state = self.dataState
|
1613 |
+
elif data is EOF:
|
1614 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1615 |
+
"eof-in-doctype"})
|
1616 |
+
self.currentToken["correct"] = False
|
1617 |
+
self.tokenQueue.append(self.currentToken)
|
1618 |
+
self.state = self.dataState
|
1619 |
+
else:
|
1620 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1621 |
+
"unexpected-char-in-doctype"})
|
1622 |
+
self.currentToken["correct"] = False
|
1623 |
+
self.state = self.bogusDoctypeState
|
1624 |
+
return True
|
1625 |
+
|
1626 |
+
def doctypeSystemIdentifierDoubleQuotedState(self):
|
1627 |
+
data = self.stream.char()
|
1628 |
+
if data == "\"":
|
1629 |
+
self.state = self.afterDoctypeSystemIdentifierState
|
1630 |
+
elif data == "\u0000":
|
1631 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
1632 |
+
"data": "invalid-codepoint"})
|
1633 |
+
self.currentToken["systemId"] += "\uFFFD"
|
1634 |
+
elif data == ">":
|
1635 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1636 |
+
"unexpected-end-of-doctype"})
|
1637 |
+
self.currentToken["correct"] = False
|
1638 |
+
self.tokenQueue.append(self.currentToken)
|
1639 |
+
self.state = self.dataState
|
1640 |
+
elif data is EOF:
|
1641 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1642 |
+
"eof-in-doctype"})
|
1643 |
+
self.currentToken["correct"] = False
|
1644 |
+
self.tokenQueue.append(self.currentToken)
|
1645 |
+
self.state = self.dataState
|
1646 |
+
else:
|
1647 |
+
self.currentToken["systemId"] += data
|
1648 |
+
return True
|
1649 |
+
|
1650 |
+
def doctypeSystemIdentifierSingleQuotedState(self):
|
1651 |
+
data = self.stream.char()
|
1652 |
+
if data == "'":
|
1653 |
+
self.state = self.afterDoctypeSystemIdentifierState
|
1654 |
+
elif data == "\u0000":
|
1655 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
1656 |
+
"data": "invalid-codepoint"})
|
1657 |
+
self.currentToken["systemId"] += "\uFFFD"
|
1658 |
+
elif data == ">":
|
1659 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1660 |
+
"unexpected-end-of-doctype"})
|
1661 |
+
self.currentToken["correct"] = False
|
1662 |
+
self.tokenQueue.append(self.currentToken)
|
1663 |
+
self.state = self.dataState
|
1664 |
+
elif data is EOF:
|
1665 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1666 |
+
"eof-in-doctype"})
|
1667 |
+
self.currentToken["correct"] = False
|
1668 |
+
self.tokenQueue.append(self.currentToken)
|
1669 |
+
self.state = self.dataState
|
1670 |
+
else:
|
1671 |
+
self.currentToken["systemId"] += data
|
1672 |
+
return True
|
1673 |
+
|
1674 |
+
def afterDoctypeSystemIdentifierState(self):
|
1675 |
+
data = self.stream.char()
|
1676 |
+
if data in spaceCharacters:
|
1677 |
+
pass
|
1678 |
+
elif data == ">":
|
1679 |
+
self.tokenQueue.append(self.currentToken)
|
1680 |
+
self.state = self.dataState
|
1681 |
+
elif data is EOF:
|
1682 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1683 |
+
"eof-in-doctype"})
|
1684 |
+
self.currentToken["correct"] = False
|
1685 |
+
self.tokenQueue.append(self.currentToken)
|
1686 |
+
self.state = self.dataState
|
1687 |
+
else:
|
1688 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1689 |
+
"unexpected-char-in-doctype"})
|
1690 |
+
self.state = self.bogusDoctypeState
|
1691 |
+
return True
|
1692 |
+
|
1693 |
+
def bogusDoctypeState(self):
|
1694 |
+
data = self.stream.char()
|
1695 |
+
if data == ">":
|
1696 |
+
self.tokenQueue.append(self.currentToken)
|
1697 |
+
self.state = self.dataState
|
1698 |
+
elif data is EOF:
|
1699 |
+
# XXX EMIT
|
1700 |
+
self.stream.unget(data)
|
1701 |
+
self.tokenQueue.append(self.currentToken)
|
1702 |
+
self.state = self.dataState
|
1703 |
+
else:
|
1704 |
+
pass
|
1705 |
+
return True
|
1706 |
+
|
1707 |
+
def cdataSectionState(self):
|
1708 |
+
data = []
|
1709 |
+
while True:
|
1710 |
+
data.append(self.stream.charsUntil("]"))
|
1711 |
+
data.append(self.stream.charsUntil(">"))
|
1712 |
+
char = self.stream.char()
|
1713 |
+
if char == EOF:
|
1714 |
+
break
|
1715 |
+
else:
|
1716 |
+
assert char == ">"
|
1717 |
+
if data[-1][-2:] == "]]":
|
1718 |
+
data[-1] = data[-1][:-2]
|
1719 |
+
break
|
1720 |
+
else:
|
1721 |
+
data.append(char)
|
1722 |
+
|
1723 |
+
data = "".join(data) # pylint:disable=redefined-variable-type
|
1724 |
+
# Deal with null here rather than in the parser
|
1725 |
+
nullCount = data.count("\u0000")
|
1726 |
+
if nullCount > 0:
|
1727 |
+
for _ in range(nullCount):
|
1728 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
1729 |
+
"data": "invalid-codepoint"})
|
1730 |
+
data = data.replace("\u0000", "\uFFFD")
|
1731 |
+
if data:
|
1732 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"],
|
1733 |
+
"data": data})
|
1734 |
+
self.state = self.dataState
|
1735 |
+
return True
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/_trie/__init__.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import absolute_import, division, unicode_literals
|
2 |
+
|
3 |
+
from .py import Trie
|
4 |
+
|
5 |
+
__all__ = ["Trie"]
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/_trie/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (345 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/_trie/__pycache__/_base.cpython-310.pyc
ADDED
Binary file (1.6 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/_trie/__pycache__/py.cpython-310.pyc
ADDED
Binary file (2.26 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/_trie/_base.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import absolute_import, division, unicode_literals
|
2 |
+
|
3 |
+
try:
|
4 |
+
from collections.abc import Mapping
|
5 |
+
except ImportError: # Python 2.7
|
6 |
+
from collections import Mapping
|
7 |
+
|
8 |
+
|
9 |
+
class Trie(Mapping):
|
10 |
+
"""Abstract base class for tries"""
|
11 |
+
|
12 |
+
def keys(self, prefix=None):
|
13 |
+
# pylint:disable=arguments-differ
|
14 |
+
keys = super(Trie, self).keys()
|
15 |
+
|
16 |
+
if prefix is None:
|
17 |
+
return set(keys)
|
18 |
+
|
19 |
+
return {x for x in keys if x.startswith(prefix)}
|
20 |
+
|
21 |
+
def has_keys_with_prefix(self, prefix):
|
22 |
+
for key in self.keys():
|
23 |
+
if key.startswith(prefix):
|
24 |
+
return True
|
25 |
+
|
26 |
+
return False
|
27 |
+
|
28 |
+
def longest_prefix(self, prefix):
|
29 |
+
if prefix in self:
|
30 |
+
return prefix
|
31 |
+
|
32 |
+
for i in range(1, len(prefix) + 1):
|
33 |
+
if prefix[:-i] in self:
|
34 |
+
return prefix[:-i]
|
35 |
+
|
36 |
+
raise KeyError(prefix)
|
37 |
+
|
38 |
+
def longest_prefix_item(self, prefix):
|
39 |
+
lprefix = self.longest_prefix(prefix)
|
40 |
+
return (lprefix, self[lprefix])
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/_trie/py.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import absolute_import, division, unicode_literals
|
2 |
+
from pip._vendor.six import text_type
|
3 |
+
|
4 |
+
from bisect import bisect_left
|
5 |
+
|
6 |
+
from ._base import Trie as ABCTrie
|
7 |
+
|
8 |
+
|
9 |
+
class Trie(ABCTrie):
|
10 |
+
def __init__(self, data):
|
11 |
+
if not all(isinstance(x, text_type) for x in data.keys()):
|
12 |
+
raise TypeError("All keys must be strings")
|
13 |
+
|
14 |
+
self._data = data
|
15 |
+
self._keys = sorted(data.keys())
|
16 |
+
self._cachestr = ""
|
17 |
+
self._cachepoints = (0, len(data))
|
18 |
+
|
19 |
+
def __contains__(self, key):
|
20 |
+
return key in self._data
|
21 |
+
|
22 |
+
def __len__(self):
|
23 |
+
return len(self._data)
|
24 |
+
|
25 |
+
def __iter__(self):
|
26 |
+
return iter(self._data)
|
27 |
+
|
28 |
+
def __getitem__(self, key):
|
29 |
+
return self._data[key]
|
30 |
+
|
31 |
+
def keys(self, prefix=None):
|
32 |
+
if prefix is None or prefix == "" or not self._keys:
|
33 |
+
return set(self._keys)
|
34 |
+
|
35 |
+
if prefix.startswith(self._cachestr):
|
36 |
+
lo, hi = self._cachepoints
|
37 |
+
start = i = bisect_left(self._keys, prefix, lo, hi)
|
38 |
+
else:
|
39 |
+
start = i = bisect_left(self._keys, prefix)
|
40 |
+
|
41 |
+
keys = set()
|
42 |
+
if start == len(self._keys):
|
43 |
+
return keys
|
44 |
+
|
45 |
+
while self._keys[i].startswith(prefix):
|
46 |
+
keys.add(self._keys[i])
|
47 |
+
i += 1
|
48 |
+
|
49 |
+
self._cachestr = prefix
|
50 |
+
self._cachepoints = (start, i)
|
51 |
+
|
52 |
+
return keys
|
53 |
+
|
54 |
+
def has_keys_with_prefix(self, prefix):
|
55 |
+
if prefix in self._data:
|
56 |
+
return True
|
57 |
+
|
58 |
+
if prefix.startswith(self._cachestr):
|
59 |
+
lo, hi = self._cachepoints
|
60 |
+
i = bisect_left(self._keys, prefix, lo, hi)
|
61 |
+
else:
|
62 |
+
i = bisect_left(self._keys, prefix)
|
63 |
+
|
64 |
+
if i == len(self._keys):
|
65 |
+
return False
|
66 |
+
|
67 |
+
return self._keys[i].startswith(prefix)
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/_utils.py
ADDED
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import absolute_import, division, unicode_literals
|
2 |
+
|
3 |
+
from types import ModuleType
|
4 |
+
|
5 |
+
try:
|
6 |
+
from collections.abc import Mapping
|
7 |
+
except ImportError:
|
8 |
+
from collections import Mapping
|
9 |
+
|
10 |
+
from pip._vendor.six import text_type, PY3
|
11 |
+
|
12 |
+
if PY3:
|
13 |
+
import xml.etree.ElementTree as default_etree
|
14 |
+
else:
|
15 |
+
try:
|
16 |
+
import xml.etree.cElementTree as default_etree
|
17 |
+
except ImportError:
|
18 |
+
import xml.etree.ElementTree as default_etree
|
19 |
+
|
20 |
+
|
21 |
+
__all__ = ["default_etree", "MethodDispatcher", "isSurrogatePair",
|
22 |
+
"surrogatePairToCodepoint", "moduleFactoryFactory",
|
23 |
+
"supports_lone_surrogates"]
|
24 |
+
|
25 |
+
|
26 |
+
# Platforms not supporting lone surrogates (\uD800-\uDFFF) should be
|
27 |
+
# caught by the below test. In general this would be any platform
|
28 |
+
# using UTF-16 as its encoding of unicode strings, such as
|
29 |
+
# Jython. This is because UTF-16 itself is based on the use of such
|
30 |
+
# surrogates, and there is no mechanism to further escape such
|
31 |
+
# escapes.
|
32 |
+
try:
|
33 |
+
_x = eval('"\\uD800"') # pylint:disable=eval-used
|
34 |
+
if not isinstance(_x, text_type):
|
35 |
+
# We need this with u"" because of http://bugs.jython.org/issue2039
|
36 |
+
_x = eval('u"\\uD800"') # pylint:disable=eval-used
|
37 |
+
assert isinstance(_x, text_type)
|
38 |
+
except Exception:
|
39 |
+
supports_lone_surrogates = False
|
40 |
+
else:
|
41 |
+
supports_lone_surrogates = True
|
42 |
+
|
43 |
+
|
44 |
+
class MethodDispatcher(dict):
|
45 |
+
"""Dict with 2 special properties:
|
46 |
+
|
47 |
+
On initiation, keys that are lists, sets or tuples are converted to
|
48 |
+
multiple keys so accessing any one of the items in the original
|
49 |
+
list-like object returns the matching value
|
50 |
+
|
51 |
+
md = MethodDispatcher({("foo", "bar"):"baz"})
|
52 |
+
md["foo"] == "baz"
|
53 |
+
|
54 |
+
A default value which can be set through the default attribute.
|
55 |
+
"""
|
56 |
+
|
57 |
+
def __init__(self, items=()):
|
58 |
+
_dictEntries = []
|
59 |
+
for name, value in items:
|
60 |
+
if isinstance(name, (list, tuple, frozenset, set)):
|
61 |
+
for item in name:
|
62 |
+
_dictEntries.append((item, value))
|
63 |
+
else:
|
64 |
+
_dictEntries.append((name, value))
|
65 |
+
dict.__init__(self, _dictEntries)
|
66 |
+
assert len(self) == len(_dictEntries)
|
67 |
+
self.default = None
|
68 |
+
|
69 |
+
def __getitem__(self, key):
|
70 |
+
return dict.get(self, key, self.default)
|
71 |
+
|
72 |
+
def __get__(self, instance, owner=None):
|
73 |
+
return BoundMethodDispatcher(instance, self)
|
74 |
+
|
75 |
+
|
76 |
+
class BoundMethodDispatcher(Mapping):
|
77 |
+
"""Wraps a MethodDispatcher, binding its return values to `instance`"""
|
78 |
+
def __init__(self, instance, dispatcher):
|
79 |
+
self.instance = instance
|
80 |
+
self.dispatcher = dispatcher
|
81 |
+
|
82 |
+
def __getitem__(self, key):
|
83 |
+
# see https://docs.python.org/3/reference/datamodel.html#object.__get__
|
84 |
+
# on a function, __get__ is used to bind a function to an instance as a bound method
|
85 |
+
return self.dispatcher[key].__get__(self.instance)
|
86 |
+
|
87 |
+
def get(self, key, default):
|
88 |
+
if key in self.dispatcher:
|
89 |
+
return self[key]
|
90 |
+
else:
|
91 |
+
return default
|
92 |
+
|
93 |
+
def __iter__(self):
|
94 |
+
return iter(self.dispatcher)
|
95 |
+
|
96 |
+
def __len__(self):
|
97 |
+
return len(self.dispatcher)
|
98 |
+
|
99 |
+
def __contains__(self, key):
|
100 |
+
return key in self.dispatcher
|
101 |
+
|
102 |
+
|
103 |
+
# Some utility functions to deal with weirdness around UCS2 vs UCS4
|
104 |
+
# python builds
|
105 |
+
|
106 |
+
def isSurrogatePair(data):
|
107 |
+
return (len(data) == 2 and
|
108 |
+
ord(data[0]) >= 0xD800 and ord(data[0]) <= 0xDBFF and
|
109 |
+
ord(data[1]) >= 0xDC00 and ord(data[1]) <= 0xDFFF)
|
110 |
+
|
111 |
+
|
112 |
+
def surrogatePairToCodepoint(data):
|
113 |
+
char_val = (0x10000 + (ord(data[0]) - 0xD800) * 0x400 +
|
114 |
+
(ord(data[1]) - 0xDC00))
|
115 |
+
return char_val
|
116 |
+
|
117 |
+
# Module Factory Factory (no, this isn't Java, I know)
|
118 |
+
# Here to stop this being duplicated all over the place.
|
119 |
+
|
120 |
+
|
121 |
+
def moduleFactoryFactory(factory):
|
122 |
+
moduleCache = {}
|
123 |
+
|
124 |
+
def moduleFactory(baseModule, *args, **kwargs):
|
125 |
+
if isinstance(ModuleType.__name__, type("")):
|
126 |
+
name = "_%s_factory" % baseModule.__name__
|
127 |
+
else:
|
128 |
+
name = b"_%s_factory" % baseModule.__name__
|
129 |
+
|
130 |
+
kwargs_tuple = tuple(kwargs.items())
|
131 |
+
|
132 |
+
try:
|
133 |
+
return moduleCache[name][args][kwargs_tuple]
|
134 |
+
except KeyError:
|
135 |
+
mod = ModuleType(name)
|
136 |
+
objs = factory(baseModule, *args, **kwargs)
|
137 |
+
mod.__dict__.update(objs)
|
138 |
+
if "name" not in moduleCache:
|
139 |
+
moduleCache[name] = {}
|
140 |
+
if "args" not in moduleCache[name]:
|
141 |
+
moduleCache[name][args] = {}
|
142 |
+
if "kwargs" not in moduleCache[name][args]:
|
143 |
+
moduleCache[name][args][kwargs_tuple] = {}
|
144 |
+
moduleCache[name][args][kwargs_tuple] = mod
|
145 |
+
return mod
|
146 |
+
|
147 |
+
return moduleFactory
|
148 |
+
|
149 |
+
|
150 |
+
def memoize(func):
|
151 |
+
cache = {}
|
152 |
+
|
153 |
+
def wrapped(*args, **kwargs):
|
154 |
+
key = (tuple(args), tuple(kwargs.items()))
|
155 |
+
if key not in cache:
|
156 |
+
cache[key] = func(*args, **kwargs)
|
157 |
+
return cache[key]
|
158 |
+
|
159 |
+
return wrapped
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/constants.py
ADDED
@@ -0,0 +1,2946 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import absolute_import, division, unicode_literals
|
2 |
+
|
3 |
+
import string
|
4 |
+
|
5 |
+
EOF = None
|
6 |
+
|
7 |
+
E = {
|
8 |
+
"null-character":
|
9 |
+
"Null character in input stream, replaced with U+FFFD.",
|
10 |
+
"invalid-codepoint":
|
11 |
+
"Invalid codepoint in stream.",
|
12 |
+
"incorrectly-placed-solidus":
|
13 |
+
"Solidus (/) incorrectly placed in tag.",
|
14 |
+
"incorrect-cr-newline-entity":
|
15 |
+
"Incorrect CR newline entity, replaced with LF.",
|
16 |
+
"illegal-windows-1252-entity":
|
17 |
+
"Entity used with illegal number (windows-1252 reference).",
|
18 |
+
"cant-convert-numeric-entity":
|
19 |
+
"Numeric entity couldn't be converted to character "
|
20 |
+
"(codepoint U+%(charAsInt)08x).",
|
21 |
+
"illegal-codepoint-for-numeric-entity":
|
22 |
+
"Numeric entity represents an illegal codepoint: "
|
23 |
+
"U+%(charAsInt)08x.",
|
24 |
+
"numeric-entity-without-semicolon":
|
25 |
+
"Numeric entity didn't end with ';'.",
|
26 |
+
"expected-numeric-entity-but-got-eof":
|
27 |
+
"Numeric entity expected. Got end of file instead.",
|
28 |
+
"expected-numeric-entity":
|
29 |
+
"Numeric entity expected but none found.",
|
30 |
+
"named-entity-without-semicolon":
|
31 |
+
"Named entity didn't end with ';'.",
|
32 |
+
"expected-named-entity":
|
33 |
+
"Named entity expected. Got none.",
|
34 |
+
"attributes-in-end-tag":
|
35 |
+
"End tag contains unexpected attributes.",
|
36 |
+
'self-closing-flag-on-end-tag':
|
37 |
+
"End tag contains unexpected self-closing flag.",
|
38 |
+
"expected-tag-name-but-got-right-bracket":
|
39 |
+
"Expected tag name. Got '>' instead.",
|
40 |
+
"expected-tag-name-but-got-question-mark":
|
41 |
+
"Expected tag name. Got '?' instead. (HTML doesn't "
|
42 |
+
"support processing instructions.)",
|
43 |
+
"expected-tag-name":
|
44 |
+
"Expected tag name. Got something else instead",
|
45 |
+
"expected-closing-tag-but-got-right-bracket":
|
46 |
+
"Expected closing tag. Got '>' instead. Ignoring '</>'.",
|
47 |
+
"expected-closing-tag-but-got-eof":
|
48 |
+
"Expected closing tag. Unexpected end of file.",
|
49 |
+
"expected-closing-tag-but-got-char":
|
50 |
+
"Expected closing tag. Unexpected character '%(data)s' found.",
|
51 |
+
"eof-in-tag-name":
|
52 |
+
"Unexpected end of file in the tag name.",
|
53 |
+
"expected-attribute-name-but-got-eof":
|
54 |
+
"Unexpected end of file. Expected attribute name instead.",
|
55 |
+
"eof-in-attribute-name":
|
56 |
+
"Unexpected end of file in attribute name.",
|
57 |
+
"invalid-character-in-attribute-name":
|
58 |
+
"Invalid character in attribute name",
|
59 |
+
"duplicate-attribute":
|
60 |
+
"Dropped duplicate attribute on tag.",
|
61 |
+
"expected-end-of-tag-name-but-got-eof":
|
62 |
+
"Unexpected end of file. Expected = or end of tag.",
|
63 |
+
"expected-attribute-value-but-got-eof":
|
64 |
+
"Unexpected end of file. Expected attribute value.",
|
65 |
+
"expected-attribute-value-but-got-right-bracket":
|
66 |
+
"Expected attribute value. Got '>' instead.",
|
67 |
+
'equals-in-unquoted-attribute-value':
|
68 |
+
"Unexpected = in unquoted attribute",
|
69 |
+
'unexpected-character-in-unquoted-attribute-value':
|
70 |
+
"Unexpected character in unquoted attribute",
|
71 |
+
"invalid-character-after-attribute-name":
|
72 |
+
"Unexpected character after attribute name.",
|
73 |
+
"unexpected-character-after-attribute-value":
|
74 |
+
"Unexpected character after attribute value.",
|
75 |
+
"eof-in-attribute-value-double-quote":
|
76 |
+
"Unexpected end of file in attribute value (\").",
|
77 |
+
"eof-in-attribute-value-single-quote":
|
78 |
+
"Unexpected end of file in attribute value (').",
|
79 |
+
"eof-in-attribute-value-no-quotes":
|
80 |
+
"Unexpected end of file in attribute value.",
|
81 |
+
"unexpected-EOF-after-solidus-in-tag":
|
82 |
+
"Unexpected end of file in tag. Expected >",
|
83 |
+
"unexpected-character-after-solidus-in-tag":
|
84 |
+
"Unexpected character after / in tag. Expected >",
|
85 |
+
"expected-dashes-or-doctype":
|
86 |
+
"Expected '--' or 'DOCTYPE'. Not found.",
|
87 |
+
"unexpected-bang-after-double-dash-in-comment":
|
88 |
+
"Unexpected ! after -- in comment",
|
89 |
+
"unexpected-space-after-double-dash-in-comment":
|
90 |
+
"Unexpected space after -- in comment",
|
91 |
+
"incorrect-comment":
|
92 |
+
"Incorrect comment.",
|
93 |
+
"eof-in-comment":
|
94 |
+
"Unexpected end of file in comment.",
|
95 |
+
"eof-in-comment-end-dash":
|
96 |
+
"Unexpected end of file in comment (-)",
|
97 |
+
"unexpected-dash-after-double-dash-in-comment":
|
98 |
+
"Unexpected '-' after '--' found in comment.",
|
99 |
+
"eof-in-comment-double-dash":
|
100 |
+
"Unexpected end of file in comment (--).",
|
101 |
+
"eof-in-comment-end-space-state":
|
102 |
+
"Unexpected end of file in comment.",
|
103 |
+
"eof-in-comment-end-bang-state":
|
104 |
+
"Unexpected end of file in comment.",
|
105 |
+
"unexpected-char-in-comment":
|
106 |
+
"Unexpected character in comment found.",
|
107 |
+
"need-space-after-doctype":
|
108 |
+
"No space after literal string 'DOCTYPE'.",
|
109 |
+
"expected-doctype-name-but-got-right-bracket":
|
110 |
+
"Unexpected > character. Expected DOCTYPE name.",
|
111 |
+
"expected-doctype-name-but-got-eof":
|
112 |
+
"Unexpected end of file. Expected DOCTYPE name.",
|
113 |
+
"eof-in-doctype-name":
|
114 |
+
"Unexpected end of file in DOCTYPE name.",
|
115 |
+
"eof-in-doctype":
|
116 |
+
"Unexpected end of file in DOCTYPE.",
|
117 |
+
"expected-space-or-right-bracket-in-doctype":
|
118 |
+
"Expected space or '>'. Got '%(data)s'",
|
119 |
+
"unexpected-end-of-doctype":
|
120 |
+
"Unexpected end of DOCTYPE.",
|
121 |
+
"unexpected-char-in-doctype":
|
122 |
+
"Unexpected character in DOCTYPE.",
|
123 |
+
"eof-in-innerhtml":
|
124 |
+
"XXX innerHTML EOF",
|
125 |
+
"unexpected-doctype":
|
126 |
+
"Unexpected DOCTYPE. Ignored.",
|
127 |
+
"non-html-root":
|
128 |
+
"html needs to be the first start tag.",
|
129 |
+
"expected-doctype-but-got-eof":
|
130 |
+
"Unexpected End of file. Expected DOCTYPE.",
|
131 |
+
"unknown-doctype":
|
132 |
+
"Erroneous DOCTYPE.",
|
133 |
+
"expected-doctype-but-got-chars":
|
134 |
+
"Unexpected non-space characters. Expected DOCTYPE.",
|
135 |
+
"expected-doctype-but-got-start-tag":
|
136 |
+
"Unexpected start tag (%(name)s). Expected DOCTYPE.",
|
137 |
+
"expected-doctype-but-got-end-tag":
|
138 |
+
"Unexpected end tag (%(name)s). Expected DOCTYPE.",
|
139 |
+
"end-tag-after-implied-root":
|
140 |
+
"Unexpected end tag (%(name)s) after the (implied) root element.",
|
141 |
+
"expected-named-closing-tag-but-got-eof":
|
142 |
+
"Unexpected end of file. Expected end tag (%(name)s).",
|
143 |
+
"two-heads-are-not-better-than-one":
|
144 |
+
"Unexpected start tag head in existing head. Ignored.",
|
145 |
+
"unexpected-end-tag":
|
146 |
+
"Unexpected end tag (%(name)s). Ignored.",
|
147 |
+
"unexpected-start-tag-out-of-my-head":
|
148 |
+
"Unexpected start tag (%(name)s) that can be in head. Moved.",
|
149 |
+
"unexpected-start-tag":
|
150 |
+
"Unexpected start tag (%(name)s).",
|
151 |
+
"missing-end-tag":
|
152 |
+
"Missing end tag (%(name)s).",
|
153 |
+
"missing-end-tags":
|
154 |
+
"Missing end tags (%(name)s).",
|
155 |
+
"unexpected-start-tag-implies-end-tag":
|
156 |
+
"Unexpected start tag (%(startName)s) "
|
157 |
+
"implies end tag (%(endName)s).",
|
158 |
+
"unexpected-start-tag-treated-as":
|
159 |
+
"Unexpected start tag (%(originalName)s). Treated as %(newName)s.",
|
160 |
+
"deprecated-tag":
|
161 |
+
"Unexpected start tag %(name)s. Don't use it!",
|
162 |
+
"unexpected-start-tag-ignored":
|
163 |
+
"Unexpected start tag %(name)s. Ignored.",
|
164 |
+
"expected-one-end-tag-but-got-another":
|
165 |
+
"Unexpected end tag (%(gotName)s). "
|
166 |
+
"Missing end tag (%(expectedName)s).",
|
167 |
+
"end-tag-too-early":
|
168 |
+
"End tag (%(name)s) seen too early. Expected other end tag.",
|
169 |
+
"end-tag-too-early-named":
|
170 |
+
"Unexpected end tag (%(gotName)s). Expected end tag (%(expectedName)s).",
|
171 |
+
"end-tag-too-early-ignored":
|
172 |
+
"End tag (%(name)s) seen too early. Ignored.",
|
173 |
+
"adoption-agency-1.1":
|
174 |
+
"End tag (%(name)s) violates step 1, "
|
175 |
+
"paragraph 1 of the adoption agency algorithm.",
|
176 |
+
"adoption-agency-1.2":
|
177 |
+
"End tag (%(name)s) violates step 1, "
|
178 |
+
"paragraph 2 of the adoption agency algorithm.",
|
179 |
+
"adoption-agency-1.3":
|
180 |
+
"End tag (%(name)s) violates step 1, "
|
181 |
+
"paragraph 3 of the adoption agency algorithm.",
|
182 |
+
"adoption-agency-4.4":
|
183 |
+
"End tag (%(name)s) violates step 4, "
|
184 |
+
"paragraph 4 of the adoption agency algorithm.",
|
185 |
+
"unexpected-end-tag-treated-as":
|
186 |
+
"Unexpected end tag (%(originalName)s). Treated as %(newName)s.",
|
187 |
+
"no-end-tag":
|
188 |
+
"This element (%(name)s) has no end tag.",
|
189 |
+
"unexpected-implied-end-tag-in-table":
|
190 |
+
"Unexpected implied end tag (%(name)s) in the table phase.",
|
191 |
+
"unexpected-implied-end-tag-in-table-body":
|
192 |
+
"Unexpected implied end tag (%(name)s) in the table body phase.",
|
193 |
+
"unexpected-char-implies-table-voodoo":
|
194 |
+
"Unexpected non-space characters in "
|
195 |
+
"table context caused voodoo mode.",
|
196 |
+
"unexpected-hidden-input-in-table":
|
197 |
+
"Unexpected input with type hidden in table context.",
|
198 |
+
"unexpected-form-in-table":
|
199 |
+
"Unexpected form in table context.",
|
200 |
+
"unexpected-start-tag-implies-table-voodoo":
|
201 |
+
"Unexpected start tag (%(name)s) in "
|
202 |
+
"table context caused voodoo mode.",
|
203 |
+
"unexpected-end-tag-implies-table-voodoo":
|
204 |
+
"Unexpected end tag (%(name)s) in "
|
205 |
+
"table context caused voodoo mode.",
|
206 |
+
"unexpected-cell-in-table-body":
|
207 |
+
"Unexpected table cell start tag (%(name)s) "
|
208 |
+
"in the table body phase.",
|
209 |
+
"unexpected-cell-end-tag":
|
210 |
+
"Got table cell end tag (%(name)s) "
|
211 |
+
"while required end tags are missing.",
|
212 |
+
"unexpected-end-tag-in-table-body":
|
213 |
+
"Unexpected end tag (%(name)s) in the table body phase. Ignored.",
|
214 |
+
"unexpected-implied-end-tag-in-table-row":
|
215 |
+
"Unexpected implied end tag (%(name)s) in the table row phase.",
|
216 |
+
"unexpected-end-tag-in-table-row":
|
217 |
+
"Unexpected end tag (%(name)s) in the table row phase. Ignored.",
|
218 |
+
"unexpected-select-in-select":
|
219 |
+
"Unexpected select start tag in the select phase "
|
220 |
+
"treated as select end tag.",
|
221 |
+
"unexpected-input-in-select":
|
222 |
+
"Unexpected input start tag in the select phase.",
|
223 |
+
"unexpected-start-tag-in-select":
|
224 |
+
"Unexpected start tag token (%(name)s in the select phase. "
|
225 |
+
"Ignored.",
|
226 |
+
"unexpected-end-tag-in-select":
|
227 |
+
"Unexpected end tag (%(name)s) in the select phase. Ignored.",
|
228 |
+
"unexpected-table-element-start-tag-in-select-in-table":
|
229 |
+
"Unexpected table element start tag (%(name)s) in the select in table phase.",
|
230 |
+
"unexpected-table-element-end-tag-in-select-in-table":
|
231 |
+
"Unexpected table element end tag (%(name)s) in the select in table phase.",
|
232 |
+
"unexpected-char-after-body":
|
233 |
+
"Unexpected non-space characters in the after body phase.",
|
234 |
+
"unexpected-start-tag-after-body":
|
235 |
+
"Unexpected start tag token (%(name)s)"
|
236 |
+
" in the after body phase.",
|
237 |
+
"unexpected-end-tag-after-body":
|
238 |
+
"Unexpected end tag token (%(name)s)"
|
239 |
+
" in the after body phase.",
|
240 |
+
"unexpected-char-in-frameset":
|
241 |
+
"Unexpected characters in the frameset phase. Characters ignored.",
|
242 |
+
"unexpected-start-tag-in-frameset":
|
243 |
+
"Unexpected start tag token (%(name)s)"
|
244 |
+
" in the frameset phase. Ignored.",
|
245 |
+
"unexpected-frameset-in-frameset-innerhtml":
|
246 |
+
"Unexpected end tag token (frameset) "
|
247 |
+
"in the frameset phase (innerHTML).",
|
248 |
+
"unexpected-end-tag-in-frameset":
|
249 |
+
"Unexpected end tag token (%(name)s)"
|
250 |
+
" in the frameset phase. Ignored.",
|
251 |
+
"unexpected-char-after-frameset":
|
252 |
+
"Unexpected non-space characters in the "
|
253 |
+
"after frameset phase. Ignored.",
|
254 |
+
"unexpected-start-tag-after-frameset":
|
255 |
+
"Unexpected start tag (%(name)s)"
|
256 |
+
" in the after frameset phase. Ignored.",
|
257 |
+
"unexpected-end-tag-after-frameset":
|
258 |
+
"Unexpected end tag (%(name)s)"
|
259 |
+
" in the after frameset phase. Ignored.",
|
260 |
+
"unexpected-end-tag-after-body-innerhtml":
|
261 |
+
"Unexpected end tag after body(innerHtml)",
|
262 |
+
"expected-eof-but-got-char":
|
263 |
+
"Unexpected non-space characters. Expected end of file.",
|
264 |
+
"expected-eof-but-got-start-tag":
|
265 |
+
"Unexpected start tag (%(name)s)"
|
266 |
+
". Expected end of file.",
|
267 |
+
"expected-eof-but-got-end-tag":
|
268 |
+
"Unexpected end tag (%(name)s)"
|
269 |
+
". Expected end of file.",
|
270 |
+
"eof-in-table":
|
271 |
+
"Unexpected end of file. Expected table content.",
|
272 |
+
"eof-in-select":
|
273 |
+
"Unexpected end of file. Expected select content.",
|
274 |
+
"eof-in-frameset":
|
275 |
+
"Unexpected end of file. Expected frameset content.",
|
276 |
+
"eof-in-script-in-script":
|
277 |
+
"Unexpected end of file. Expected script content.",
|
278 |
+
"eof-in-foreign-lands":
|
279 |
+
"Unexpected end of file. Expected foreign content",
|
280 |
+
"non-void-element-with-trailing-solidus":
|
281 |
+
"Trailing solidus not allowed on element %(name)s",
|
282 |
+
"unexpected-html-element-in-foreign-content":
|
283 |
+
"Element %(name)s not allowed in a non-html context",
|
284 |
+
"unexpected-end-tag-before-html":
|
285 |
+
"Unexpected end tag (%(name)s) before html.",
|
286 |
+
"unexpected-inhead-noscript-tag":
|
287 |
+
"Element %(name)s not allowed in a inhead-noscript context",
|
288 |
+
"eof-in-head-noscript":
|
289 |
+
"Unexpected end of file. Expected inhead-noscript content",
|
290 |
+
"char-in-head-noscript":
|
291 |
+
"Unexpected non-space character. Expected inhead-noscript content",
|
292 |
+
"XXX-undefined-error":
|
293 |
+
"Undefined error (this sucks and should be fixed)",
|
294 |
+
}
|
295 |
+
|
296 |
+
namespaces = {
|
297 |
+
"html": "http://www.w3.org/1999/xhtml",
|
298 |
+
"mathml": "http://www.w3.org/1998/Math/MathML",
|
299 |
+
"svg": "http://www.w3.org/2000/svg",
|
300 |
+
"xlink": "http://www.w3.org/1999/xlink",
|
301 |
+
"xml": "http://www.w3.org/XML/1998/namespace",
|
302 |
+
"xmlns": "http://www.w3.org/2000/xmlns/"
|
303 |
+
}
|
304 |
+
|
305 |
+
scopingElements = frozenset([
|
306 |
+
(namespaces["html"], "applet"),
|
307 |
+
(namespaces["html"], "caption"),
|
308 |
+
(namespaces["html"], "html"),
|
309 |
+
(namespaces["html"], "marquee"),
|
310 |
+
(namespaces["html"], "object"),
|
311 |
+
(namespaces["html"], "table"),
|
312 |
+
(namespaces["html"], "td"),
|
313 |
+
(namespaces["html"], "th"),
|
314 |
+
(namespaces["mathml"], "mi"),
|
315 |
+
(namespaces["mathml"], "mo"),
|
316 |
+
(namespaces["mathml"], "mn"),
|
317 |
+
(namespaces["mathml"], "ms"),
|
318 |
+
(namespaces["mathml"], "mtext"),
|
319 |
+
(namespaces["mathml"], "annotation-xml"),
|
320 |
+
(namespaces["svg"], "foreignObject"),
|
321 |
+
(namespaces["svg"], "desc"),
|
322 |
+
(namespaces["svg"], "title"),
|
323 |
+
])
|
324 |
+
|
325 |
+
formattingElements = frozenset([
|
326 |
+
(namespaces["html"], "a"),
|
327 |
+
(namespaces["html"], "b"),
|
328 |
+
(namespaces["html"], "big"),
|
329 |
+
(namespaces["html"], "code"),
|
330 |
+
(namespaces["html"], "em"),
|
331 |
+
(namespaces["html"], "font"),
|
332 |
+
(namespaces["html"], "i"),
|
333 |
+
(namespaces["html"], "nobr"),
|
334 |
+
(namespaces["html"], "s"),
|
335 |
+
(namespaces["html"], "small"),
|
336 |
+
(namespaces["html"], "strike"),
|
337 |
+
(namespaces["html"], "strong"),
|
338 |
+
(namespaces["html"], "tt"),
|
339 |
+
(namespaces["html"], "u")
|
340 |
+
])
|
341 |
+
|
342 |
+
specialElements = frozenset([
|
343 |
+
(namespaces["html"], "address"),
|
344 |
+
(namespaces["html"], "applet"),
|
345 |
+
(namespaces["html"], "area"),
|
346 |
+
(namespaces["html"], "article"),
|
347 |
+
(namespaces["html"], "aside"),
|
348 |
+
(namespaces["html"], "base"),
|
349 |
+
(namespaces["html"], "basefont"),
|
350 |
+
(namespaces["html"], "bgsound"),
|
351 |
+
(namespaces["html"], "blockquote"),
|
352 |
+
(namespaces["html"], "body"),
|
353 |
+
(namespaces["html"], "br"),
|
354 |
+
(namespaces["html"], "button"),
|
355 |
+
(namespaces["html"], "caption"),
|
356 |
+
(namespaces["html"], "center"),
|
357 |
+
(namespaces["html"], "col"),
|
358 |
+
(namespaces["html"], "colgroup"),
|
359 |
+
(namespaces["html"], "command"),
|
360 |
+
(namespaces["html"], "dd"),
|
361 |
+
(namespaces["html"], "details"),
|
362 |
+
(namespaces["html"], "dir"),
|
363 |
+
(namespaces["html"], "div"),
|
364 |
+
(namespaces["html"], "dl"),
|
365 |
+
(namespaces["html"], "dt"),
|
366 |
+
(namespaces["html"], "embed"),
|
367 |
+
(namespaces["html"], "fieldset"),
|
368 |
+
(namespaces["html"], "figure"),
|
369 |
+
(namespaces["html"], "footer"),
|
370 |
+
(namespaces["html"], "form"),
|
371 |
+
(namespaces["html"], "frame"),
|
372 |
+
(namespaces["html"], "frameset"),
|
373 |
+
(namespaces["html"], "h1"),
|
374 |
+
(namespaces["html"], "h2"),
|
375 |
+
(namespaces["html"], "h3"),
|
376 |
+
(namespaces["html"], "h4"),
|
377 |
+
(namespaces["html"], "h5"),
|
378 |
+
(namespaces["html"], "h6"),
|
379 |
+
(namespaces["html"], "head"),
|
380 |
+
(namespaces["html"], "header"),
|
381 |
+
(namespaces["html"], "hr"),
|
382 |
+
(namespaces["html"], "html"),
|
383 |
+
(namespaces["html"], "iframe"),
|
384 |
+
# Note that image is commented out in the spec as "this isn't an
|
385 |
+
# element that can end up on the stack, so it doesn't matter,"
|
386 |
+
(namespaces["html"], "image"),
|
387 |
+
(namespaces["html"], "img"),
|
388 |
+
(namespaces["html"], "input"),
|
389 |
+
(namespaces["html"], "isindex"),
|
390 |
+
(namespaces["html"], "li"),
|
391 |
+
(namespaces["html"], "link"),
|
392 |
+
(namespaces["html"], "listing"),
|
393 |
+
(namespaces["html"], "marquee"),
|
394 |
+
(namespaces["html"], "menu"),
|
395 |
+
(namespaces["html"], "meta"),
|
396 |
+
(namespaces["html"], "nav"),
|
397 |
+
(namespaces["html"], "noembed"),
|
398 |
+
(namespaces["html"], "noframes"),
|
399 |
+
(namespaces["html"], "noscript"),
|
400 |
+
(namespaces["html"], "object"),
|
401 |
+
(namespaces["html"], "ol"),
|
402 |
+
(namespaces["html"], "p"),
|
403 |
+
(namespaces["html"], "param"),
|
404 |
+
(namespaces["html"], "plaintext"),
|
405 |
+
(namespaces["html"], "pre"),
|
406 |
+
(namespaces["html"], "script"),
|
407 |
+
(namespaces["html"], "section"),
|
408 |
+
(namespaces["html"], "select"),
|
409 |
+
(namespaces["html"], "style"),
|
410 |
+
(namespaces["html"], "table"),
|
411 |
+
(namespaces["html"], "tbody"),
|
412 |
+
(namespaces["html"], "td"),
|
413 |
+
(namespaces["html"], "textarea"),
|
414 |
+
(namespaces["html"], "tfoot"),
|
415 |
+
(namespaces["html"], "th"),
|
416 |
+
(namespaces["html"], "thead"),
|
417 |
+
(namespaces["html"], "title"),
|
418 |
+
(namespaces["html"], "tr"),
|
419 |
+
(namespaces["html"], "ul"),
|
420 |
+
(namespaces["html"], "wbr"),
|
421 |
+
(namespaces["html"], "xmp"),
|
422 |
+
(namespaces["svg"], "foreignObject")
|
423 |
+
])
|
424 |
+
|
425 |
+
htmlIntegrationPointElements = frozenset([
|
426 |
+
(namespaces["mathml"], "annotation-xml"),
|
427 |
+
(namespaces["svg"], "foreignObject"),
|
428 |
+
(namespaces["svg"], "desc"),
|
429 |
+
(namespaces["svg"], "title")
|
430 |
+
])
|
431 |
+
|
432 |
+
mathmlTextIntegrationPointElements = frozenset([
|
433 |
+
(namespaces["mathml"], "mi"),
|
434 |
+
(namespaces["mathml"], "mo"),
|
435 |
+
(namespaces["mathml"], "mn"),
|
436 |
+
(namespaces["mathml"], "ms"),
|
437 |
+
(namespaces["mathml"], "mtext")
|
438 |
+
])
|
439 |
+
|
440 |
+
adjustSVGAttributes = {
|
441 |
+
"attributename": "attributeName",
|
442 |
+
"attributetype": "attributeType",
|
443 |
+
"basefrequency": "baseFrequency",
|
444 |
+
"baseprofile": "baseProfile",
|
445 |
+
"calcmode": "calcMode",
|
446 |
+
"clippathunits": "clipPathUnits",
|
447 |
+
"contentscripttype": "contentScriptType",
|
448 |
+
"contentstyletype": "contentStyleType",
|
449 |
+
"diffuseconstant": "diffuseConstant",
|
450 |
+
"edgemode": "edgeMode",
|
451 |
+
"externalresourcesrequired": "externalResourcesRequired",
|
452 |
+
"filterres": "filterRes",
|
453 |
+
"filterunits": "filterUnits",
|
454 |
+
"glyphref": "glyphRef",
|
455 |
+
"gradienttransform": "gradientTransform",
|
456 |
+
"gradientunits": "gradientUnits",
|
457 |
+
"kernelmatrix": "kernelMatrix",
|
458 |
+
"kernelunitlength": "kernelUnitLength",
|
459 |
+
"keypoints": "keyPoints",
|
460 |
+
"keysplines": "keySplines",
|
461 |
+
"keytimes": "keyTimes",
|
462 |
+
"lengthadjust": "lengthAdjust",
|
463 |
+
"limitingconeangle": "limitingConeAngle",
|
464 |
+
"markerheight": "markerHeight",
|
465 |
+
"markerunits": "markerUnits",
|
466 |
+
"markerwidth": "markerWidth",
|
467 |
+
"maskcontentunits": "maskContentUnits",
|
468 |
+
"maskunits": "maskUnits",
|
469 |
+
"numoctaves": "numOctaves",
|
470 |
+
"pathlength": "pathLength",
|
471 |
+
"patterncontentunits": "patternContentUnits",
|
472 |
+
"patterntransform": "patternTransform",
|
473 |
+
"patternunits": "patternUnits",
|
474 |
+
"pointsatx": "pointsAtX",
|
475 |
+
"pointsaty": "pointsAtY",
|
476 |
+
"pointsatz": "pointsAtZ",
|
477 |
+
"preservealpha": "preserveAlpha",
|
478 |
+
"preserveaspectratio": "preserveAspectRatio",
|
479 |
+
"primitiveunits": "primitiveUnits",
|
480 |
+
"refx": "refX",
|
481 |
+
"refy": "refY",
|
482 |
+
"repeatcount": "repeatCount",
|
483 |
+
"repeatdur": "repeatDur",
|
484 |
+
"requiredextensions": "requiredExtensions",
|
485 |
+
"requiredfeatures": "requiredFeatures",
|
486 |
+
"specularconstant": "specularConstant",
|
487 |
+
"specularexponent": "specularExponent",
|
488 |
+
"spreadmethod": "spreadMethod",
|
489 |
+
"startoffset": "startOffset",
|
490 |
+
"stddeviation": "stdDeviation",
|
491 |
+
"stitchtiles": "stitchTiles",
|
492 |
+
"surfacescale": "surfaceScale",
|
493 |
+
"systemlanguage": "systemLanguage",
|
494 |
+
"tablevalues": "tableValues",
|
495 |
+
"targetx": "targetX",
|
496 |
+
"targety": "targetY",
|
497 |
+
"textlength": "textLength",
|
498 |
+
"viewbox": "viewBox",
|
499 |
+
"viewtarget": "viewTarget",
|
500 |
+
"xchannelselector": "xChannelSelector",
|
501 |
+
"ychannelselector": "yChannelSelector",
|
502 |
+
"zoomandpan": "zoomAndPan"
|
503 |
+
}
|
504 |
+
|
505 |
+
adjustMathMLAttributes = {"definitionurl": "definitionURL"}
|
506 |
+
|
507 |
+
adjustForeignAttributes = {
|
508 |
+
"xlink:actuate": ("xlink", "actuate", namespaces["xlink"]),
|
509 |
+
"xlink:arcrole": ("xlink", "arcrole", namespaces["xlink"]),
|
510 |
+
"xlink:href": ("xlink", "href", namespaces["xlink"]),
|
511 |
+
"xlink:role": ("xlink", "role", namespaces["xlink"]),
|
512 |
+
"xlink:show": ("xlink", "show", namespaces["xlink"]),
|
513 |
+
"xlink:title": ("xlink", "title", namespaces["xlink"]),
|
514 |
+
"xlink:type": ("xlink", "type", namespaces["xlink"]),
|
515 |
+
"xml:base": ("xml", "base", namespaces["xml"]),
|
516 |
+
"xml:lang": ("xml", "lang", namespaces["xml"]),
|
517 |
+
"xml:space": ("xml", "space", namespaces["xml"]),
|
518 |
+
"xmlns": (None, "xmlns", namespaces["xmlns"]),
|
519 |
+
"xmlns:xlink": ("xmlns", "xlink", namespaces["xmlns"])
|
520 |
+
}
|
521 |
+
|
522 |
+
unadjustForeignAttributes = {(ns, local): qname for qname, (prefix, local, ns) in
|
523 |
+
adjustForeignAttributes.items()}
|
524 |
+
|
525 |
+
spaceCharacters = frozenset([
|
526 |
+
"\t",
|
527 |
+
"\n",
|
528 |
+
"\u000C",
|
529 |
+
" ",
|
530 |
+
"\r"
|
531 |
+
])
|
532 |
+
|
533 |
+
tableInsertModeElements = frozenset([
|
534 |
+
"table",
|
535 |
+
"tbody",
|
536 |
+
"tfoot",
|
537 |
+
"thead",
|
538 |
+
"tr"
|
539 |
+
])
|
540 |
+
|
541 |
+
asciiLowercase = frozenset(string.ascii_lowercase)
|
542 |
+
asciiUppercase = frozenset(string.ascii_uppercase)
|
543 |
+
asciiLetters = frozenset(string.ascii_letters)
|
544 |
+
digits = frozenset(string.digits)
|
545 |
+
hexDigits = frozenset(string.hexdigits)
|
546 |
+
|
547 |
+
asciiUpper2Lower = {ord(c): ord(c.lower()) for c in string.ascii_uppercase}
|
548 |
+
|
549 |
+
# Heading elements need to be ordered
|
550 |
+
headingElements = (
|
551 |
+
"h1",
|
552 |
+
"h2",
|
553 |
+
"h3",
|
554 |
+
"h4",
|
555 |
+
"h5",
|
556 |
+
"h6"
|
557 |
+
)
|
558 |
+
|
559 |
+
voidElements = frozenset([
|
560 |
+
"base",
|
561 |
+
"command",
|
562 |
+
"event-source",
|
563 |
+
"link",
|
564 |
+
"meta",
|
565 |
+
"hr",
|
566 |
+
"br",
|
567 |
+
"img",
|
568 |
+
"embed",
|
569 |
+
"param",
|
570 |
+
"area",
|
571 |
+
"col",
|
572 |
+
"input",
|
573 |
+
"source",
|
574 |
+
"track"
|
575 |
+
])
|
576 |
+
|
577 |
+
cdataElements = frozenset(['title', 'textarea'])
|
578 |
+
|
579 |
+
rcdataElements = frozenset([
|
580 |
+
'style',
|
581 |
+
'script',
|
582 |
+
'xmp',
|
583 |
+
'iframe',
|
584 |
+
'noembed',
|
585 |
+
'noframes',
|
586 |
+
'noscript'
|
587 |
+
])
|
588 |
+
|
589 |
+
booleanAttributes = {
|
590 |
+
"": frozenset(["irrelevant", "itemscope"]),
|
591 |
+
"style": frozenset(["scoped"]),
|
592 |
+
"img": frozenset(["ismap"]),
|
593 |
+
"audio": frozenset(["autoplay", "controls"]),
|
594 |
+
"video": frozenset(["autoplay", "controls"]),
|
595 |
+
"script": frozenset(["defer", "async"]),
|
596 |
+
"details": frozenset(["open"]),
|
597 |
+
"datagrid": frozenset(["multiple", "disabled"]),
|
598 |
+
"command": frozenset(["hidden", "disabled", "checked", "default"]),
|
599 |
+
"hr": frozenset(["noshade"]),
|
600 |
+
"menu": frozenset(["autosubmit"]),
|
601 |
+
"fieldset": frozenset(["disabled", "readonly"]),
|
602 |
+
"option": frozenset(["disabled", "readonly", "selected"]),
|
603 |
+
"optgroup": frozenset(["disabled", "readonly"]),
|
604 |
+
"button": frozenset(["disabled", "autofocus"]),
|
605 |
+
"input": frozenset(["disabled", "readonly", "required", "autofocus", "checked", "ismap"]),
|
606 |
+
"select": frozenset(["disabled", "readonly", "autofocus", "multiple"]),
|
607 |
+
"output": frozenset(["disabled", "readonly"]),
|
608 |
+
"iframe": frozenset(["seamless"]),
|
609 |
+
}
|
610 |
+
|
611 |
+
# entitiesWindows1252 has to be _ordered_ and needs to have an index. It
|
612 |
+
# therefore can't be a frozenset.
|
613 |
+
entitiesWindows1252 = (
|
614 |
+
8364, # 0x80 0x20AC EURO SIGN
|
615 |
+
65533, # 0x81 UNDEFINED
|
616 |
+
8218, # 0x82 0x201A SINGLE LOW-9 QUOTATION MARK
|
617 |
+
402, # 0x83 0x0192 LATIN SMALL LETTER F WITH HOOK
|
618 |
+
8222, # 0x84 0x201E DOUBLE LOW-9 QUOTATION MARK
|
619 |
+
8230, # 0x85 0x2026 HORIZONTAL ELLIPSIS
|
620 |
+
8224, # 0x86 0x2020 DAGGER
|
621 |
+
8225, # 0x87 0x2021 DOUBLE DAGGER
|
622 |
+
710, # 0x88 0x02C6 MODIFIER LETTER CIRCUMFLEX ACCENT
|
623 |
+
8240, # 0x89 0x2030 PER MILLE SIGN
|
624 |
+
352, # 0x8A 0x0160 LATIN CAPITAL LETTER S WITH CARON
|
625 |
+
8249, # 0x8B 0x2039 SINGLE LEFT-POINTING ANGLE QUOTATION MARK
|
626 |
+
338, # 0x8C 0x0152 LATIN CAPITAL LIGATURE OE
|
627 |
+
65533, # 0x8D UNDEFINED
|
628 |
+
381, # 0x8E 0x017D LATIN CAPITAL LETTER Z WITH CARON
|
629 |
+
65533, # 0x8F UNDEFINED
|
630 |
+
65533, # 0x90 UNDEFINED
|
631 |
+
8216, # 0x91 0x2018 LEFT SINGLE QUOTATION MARK
|
632 |
+
8217, # 0x92 0x2019 RIGHT SINGLE QUOTATION MARK
|
633 |
+
8220, # 0x93 0x201C LEFT DOUBLE QUOTATION MARK
|
634 |
+
8221, # 0x94 0x201D RIGHT DOUBLE QUOTATION MARK
|
635 |
+
8226, # 0x95 0x2022 BULLET
|
636 |
+
8211, # 0x96 0x2013 EN DASH
|
637 |
+
8212, # 0x97 0x2014 EM DASH
|
638 |
+
732, # 0x98 0x02DC SMALL TILDE
|
639 |
+
8482, # 0x99 0x2122 TRADE MARK SIGN
|
640 |
+
353, # 0x9A 0x0161 LATIN SMALL LETTER S WITH CARON
|
641 |
+
8250, # 0x9B 0x203A SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
|
642 |
+
339, # 0x9C 0x0153 LATIN SMALL LIGATURE OE
|
643 |
+
65533, # 0x9D UNDEFINED
|
644 |
+
382, # 0x9E 0x017E LATIN SMALL LETTER Z WITH CARON
|
645 |
+
376 # 0x9F 0x0178 LATIN CAPITAL LETTER Y WITH DIAERESIS
|
646 |
+
)
|
647 |
+
|
648 |
+
xmlEntities = frozenset(['lt;', 'gt;', 'amp;', 'apos;', 'quot;'])
|
649 |
+
|
650 |
+
entities = {
|
651 |
+
"AElig": "\xc6",
|
652 |
+
"AElig;": "\xc6",
|
653 |
+
"AMP": "&",
|
654 |
+
"AMP;": "&",
|
655 |
+
"Aacute": "\xc1",
|
656 |
+
"Aacute;": "\xc1",
|
657 |
+
"Abreve;": "\u0102",
|
658 |
+
"Acirc": "\xc2",
|
659 |
+
"Acirc;": "\xc2",
|
660 |
+
"Acy;": "\u0410",
|
661 |
+
"Afr;": "\U0001d504",
|
662 |
+
"Agrave": "\xc0",
|
663 |
+
"Agrave;": "\xc0",
|
664 |
+
"Alpha;": "\u0391",
|
665 |
+
"Amacr;": "\u0100",
|
666 |
+
"And;": "\u2a53",
|
667 |
+
"Aogon;": "\u0104",
|
668 |
+
"Aopf;": "\U0001d538",
|
669 |
+
"ApplyFunction;": "\u2061",
|
670 |
+
"Aring": "\xc5",
|
671 |
+
"Aring;": "\xc5",
|
672 |
+
"Ascr;": "\U0001d49c",
|
673 |
+
"Assign;": "\u2254",
|
674 |
+
"Atilde": "\xc3",
|
675 |
+
"Atilde;": "\xc3",
|
676 |
+
"Auml": "\xc4",
|
677 |
+
"Auml;": "\xc4",
|
678 |
+
"Backslash;": "\u2216",
|
679 |
+
"Barv;": "\u2ae7",
|
680 |
+
"Barwed;": "\u2306",
|
681 |
+
"Bcy;": "\u0411",
|
682 |
+
"Because;": "\u2235",
|
683 |
+
"Bernoullis;": "\u212c",
|
684 |
+
"Beta;": "\u0392",
|
685 |
+
"Bfr;": "\U0001d505",
|
686 |
+
"Bopf;": "\U0001d539",
|
687 |
+
"Breve;": "\u02d8",
|
688 |
+
"Bscr;": "\u212c",
|
689 |
+
"Bumpeq;": "\u224e",
|
690 |
+
"CHcy;": "\u0427",
|
691 |
+
"COPY": "\xa9",
|
692 |
+
"COPY;": "\xa9",
|
693 |
+
"Cacute;": "\u0106",
|
694 |
+
"Cap;": "\u22d2",
|
695 |
+
"CapitalDifferentialD;": "\u2145",
|
696 |
+
"Cayleys;": "\u212d",
|
697 |
+
"Ccaron;": "\u010c",
|
698 |
+
"Ccedil": "\xc7",
|
699 |
+
"Ccedil;": "\xc7",
|
700 |
+
"Ccirc;": "\u0108",
|
701 |
+
"Cconint;": "\u2230",
|
702 |
+
"Cdot;": "\u010a",
|
703 |
+
"Cedilla;": "\xb8",
|
704 |
+
"CenterDot;": "\xb7",
|
705 |
+
"Cfr;": "\u212d",
|
706 |
+
"Chi;": "\u03a7",
|
707 |
+
"CircleDot;": "\u2299",
|
708 |
+
"CircleMinus;": "\u2296",
|
709 |
+
"CirclePlus;": "\u2295",
|
710 |
+
"CircleTimes;": "\u2297",
|
711 |
+
"ClockwiseContourIntegral;": "\u2232",
|
712 |
+
"CloseCurlyDoubleQuote;": "\u201d",
|
713 |
+
"CloseCurlyQuote;": "\u2019",
|
714 |
+
"Colon;": "\u2237",
|
715 |
+
"Colone;": "\u2a74",
|
716 |
+
"Congruent;": "\u2261",
|
717 |
+
"Conint;": "\u222f",
|
718 |
+
"ContourIntegral;": "\u222e",
|
719 |
+
"Copf;": "\u2102",
|
720 |
+
"Coproduct;": "\u2210",
|
721 |
+
"CounterClockwiseContourIntegral;": "\u2233",
|
722 |
+
"Cross;": "\u2a2f",
|
723 |
+
"Cscr;": "\U0001d49e",
|
724 |
+
"Cup;": "\u22d3",
|
725 |
+
"CupCap;": "\u224d",
|
726 |
+
"DD;": "\u2145",
|
727 |
+
"DDotrahd;": "\u2911",
|
728 |
+
"DJcy;": "\u0402",
|
729 |
+
"DScy;": "\u0405",
|
730 |
+
"DZcy;": "\u040f",
|
731 |
+
"Dagger;": "\u2021",
|
732 |
+
"Darr;": "\u21a1",
|
733 |
+
"Dashv;": "\u2ae4",
|
734 |
+
"Dcaron;": "\u010e",
|
735 |
+
"Dcy;": "\u0414",
|
736 |
+
"Del;": "\u2207",
|
737 |
+
"Delta;": "\u0394",
|
738 |
+
"Dfr;": "\U0001d507",
|
739 |
+
"DiacriticalAcute;": "\xb4",
|
740 |
+
"DiacriticalDot;": "\u02d9",
|
741 |
+
"DiacriticalDoubleAcute;": "\u02dd",
|
742 |
+
"DiacriticalGrave;": "`",
|
743 |
+
"DiacriticalTilde;": "\u02dc",
|
744 |
+
"Diamond;": "\u22c4",
|
745 |
+
"DifferentialD;": "\u2146",
|
746 |
+
"Dopf;": "\U0001d53b",
|
747 |
+
"Dot;": "\xa8",
|
748 |
+
"DotDot;": "\u20dc",
|
749 |
+
"DotEqual;": "\u2250",
|
750 |
+
"DoubleContourIntegral;": "\u222f",
|
751 |
+
"DoubleDot;": "\xa8",
|
752 |
+
"DoubleDownArrow;": "\u21d3",
|
753 |
+
"DoubleLeftArrow;": "\u21d0",
|
754 |
+
"DoubleLeftRightArrow;": "\u21d4",
|
755 |
+
"DoubleLeftTee;": "\u2ae4",
|
756 |
+
"DoubleLongLeftArrow;": "\u27f8",
|
757 |
+
"DoubleLongLeftRightArrow;": "\u27fa",
|
758 |
+
"DoubleLongRightArrow;": "\u27f9",
|
759 |
+
"DoubleRightArrow;": "\u21d2",
|
760 |
+
"DoubleRightTee;": "\u22a8",
|
761 |
+
"DoubleUpArrow;": "\u21d1",
|
762 |
+
"DoubleUpDownArrow;": "\u21d5",
|
763 |
+
"DoubleVerticalBar;": "\u2225",
|
764 |
+
"DownArrow;": "\u2193",
|
765 |
+
"DownArrowBar;": "\u2913",
|
766 |
+
"DownArrowUpArrow;": "\u21f5",
|
767 |
+
"DownBreve;": "\u0311",
|
768 |
+
"DownLeftRightVector;": "\u2950",
|
769 |
+
"DownLeftTeeVector;": "\u295e",
|
770 |
+
"DownLeftVector;": "\u21bd",
|
771 |
+
"DownLeftVectorBar;": "\u2956",
|
772 |
+
"DownRightTeeVector;": "\u295f",
|
773 |
+
"DownRightVector;": "\u21c1",
|
774 |
+
"DownRightVectorBar;": "\u2957",
|
775 |
+
"DownTee;": "\u22a4",
|
776 |
+
"DownTeeArrow;": "\u21a7",
|
777 |
+
"Downarrow;": "\u21d3",
|
778 |
+
"Dscr;": "\U0001d49f",
|
779 |
+
"Dstrok;": "\u0110",
|
780 |
+
"ENG;": "\u014a",
|
781 |
+
"ETH": "\xd0",
|
782 |
+
"ETH;": "\xd0",
|
783 |
+
"Eacute": "\xc9",
|
784 |
+
"Eacute;": "\xc9",
|
785 |
+
"Ecaron;": "\u011a",
|
786 |
+
"Ecirc": "\xca",
|
787 |
+
"Ecirc;": "\xca",
|
788 |
+
"Ecy;": "\u042d",
|
789 |
+
"Edot;": "\u0116",
|
790 |
+
"Efr;": "\U0001d508",
|
791 |
+
"Egrave": "\xc8",
|
792 |
+
"Egrave;": "\xc8",
|
793 |
+
"Element;": "\u2208",
|
794 |
+
"Emacr;": "\u0112",
|
795 |
+
"EmptySmallSquare;": "\u25fb",
|
796 |
+
"EmptyVerySmallSquare;": "\u25ab",
|
797 |
+
"Eogon;": "\u0118",
|
798 |
+
"Eopf;": "\U0001d53c",
|
799 |
+
"Epsilon;": "\u0395",
|
800 |
+
"Equal;": "\u2a75",
|
801 |
+
"EqualTilde;": "\u2242",
|
802 |
+
"Equilibrium;": "\u21cc",
|
803 |
+
"Escr;": "\u2130",
|
804 |
+
"Esim;": "\u2a73",
|
805 |
+
"Eta;": "\u0397",
|
806 |
+
"Euml": "\xcb",
|
807 |
+
"Euml;": "\xcb",
|
808 |
+
"Exists;": "\u2203",
|
809 |
+
"ExponentialE;": "\u2147",
|
810 |
+
"Fcy;": "\u0424",
|
811 |
+
"Ffr;": "\U0001d509",
|
812 |
+
"FilledSmallSquare;": "\u25fc",
|
813 |
+
"FilledVerySmallSquare;": "\u25aa",
|
814 |
+
"Fopf;": "\U0001d53d",
|
815 |
+
"ForAll;": "\u2200",
|
816 |
+
"Fouriertrf;": "\u2131",
|
817 |
+
"Fscr;": "\u2131",
|
818 |
+
"GJcy;": "\u0403",
|
819 |
+
"GT": ">",
|
820 |
+
"GT;": ">",
|
821 |
+
"Gamma;": "\u0393",
|
822 |
+
"Gammad;": "\u03dc",
|
823 |
+
"Gbreve;": "\u011e",
|
824 |
+
"Gcedil;": "\u0122",
|
825 |
+
"Gcirc;": "\u011c",
|
826 |
+
"Gcy;": "\u0413",
|
827 |
+
"Gdot;": "\u0120",
|
828 |
+
"Gfr;": "\U0001d50a",
|
829 |
+
"Gg;": "\u22d9",
|
830 |
+
"Gopf;": "\U0001d53e",
|
831 |
+
"GreaterEqual;": "\u2265",
|
832 |
+
"GreaterEqualLess;": "\u22db",
|
833 |
+
"GreaterFullEqual;": "\u2267",
|
834 |
+
"GreaterGreater;": "\u2aa2",
|
835 |
+
"GreaterLess;": "\u2277",
|
836 |
+
"GreaterSlantEqual;": "\u2a7e",
|
837 |
+
"GreaterTilde;": "\u2273",
|
838 |
+
"Gscr;": "\U0001d4a2",
|
839 |
+
"Gt;": "\u226b",
|
840 |
+
"HARDcy;": "\u042a",
|
841 |
+
"Hacek;": "\u02c7",
|
842 |
+
"Hat;": "^",
|
843 |
+
"Hcirc;": "\u0124",
|
844 |
+
"Hfr;": "\u210c",
|
845 |
+
"HilbertSpace;": "\u210b",
|
846 |
+
"Hopf;": "\u210d",
|
847 |
+
"HorizontalLine;": "\u2500",
|
848 |
+
"Hscr;": "\u210b",
|
849 |
+
"Hstrok;": "\u0126",
|
850 |
+
"HumpDownHump;": "\u224e",
|
851 |
+
"HumpEqual;": "\u224f",
|
852 |
+
"IEcy;": "\u0415",
|
853 |
+
"IJlig;": "\u0132",
|
854 |
+
"IOcy;": "\u0401",
|
855 |
+
"Iacute": "\xcd",
|
856 |
+
"Iacute;": "\xcd",
|
857 |
+
"Icirc": "\xce",
|
858 |
+
"Icirc;": "\xce",
|
859 |
+
"Icy;": "\u0418",
|
860 |
+
"Idot;": "\u0130",
|
861 |
+
"Ifr;": "\u2111",
|
862 |
+
"Igrave": "\xcc",
|
863 |
+
"Igrave;": "\xcc",
|
864 |
+
"Im;": "\u2111",
|
865 |
+
"Imacr;": "\u012a",
|
866 |
+
"ImaginaryI;": "\u2148",
|
867 |
+
"Implies;": "\u21d2",
|
868 |
+
"Int;": "\u222c",
|
869 |
+
"Integral;": "\u222b",
|
870 |
+
"Intersection;": "\u22c2",
|
871 |
+
"InvisibleComma;": "\u2063",
|
872 |
+
"InvisibleTimes;": "\u2062",
|
873 |
+
"Iogon;": "\u012e",
|
874 |
+
"Iopf;": "\U0001d540",
|
875 |
+
"Iota;": "\u0399",
|
876 |
+
"Iscr;": "\u2110",
|
877 |
+
"Itilde;": "\u0128",
|
878 |
+
"Iukcy;": "\u0406",
|
879 |
+
"Iuml": "\xcf",
|
880 |
+
"Iuml;": "\xcf",
|
881 |
+
"Jcirc;": "\u0134",
|
882 |
+
"Jcy;": "\u0419",
|
883 |
+
"Jfr;": "\U0001d50d",
|
884 |
+
"Jopf;": "\U0001d541",
|
885 |
+
"Jscr;": "\U0001d4a5",
|
886 |
+
"Jsercy;": "\u0408",
|
887 |
+
"Jukcy;": "\u0404",
|
888 |
+
"KHcy;": "\u0425",
|
889 |
+
"KJcy;": "\u040c",
|
890 |
+
"Kappa;": "\u039a",
|
891 |
+
"Kcedil;": "\u0136",
|
892 |
+
"Kcy;": "\u041a",
|
893 |
+
"Kfr;": "\U0001d50e",
|
894 |
+
"Kopf;": "\U0001d542",
|
895 |
+
"Kscr;": "\U0001d4a6",
|
896 |
+
"LJcy;": "\u0409",
|
897 |
+
"LT": "<",
|
898 |
+
"LT;": "<",
|
899 |
+
"Lacute;": "\u0139",
|
900 |
+
"Lambda;": "\u039b",
|
901 |
+
"Lang;": "\u27ea",
|
902 |
+
"Laplacetrf;": "\u2112",
|
903 |
+
"Larr;": "\u219e",
|
904 |
+
"Lcaron;": "\u013d",
|
905 |
+
"Lcedil;": "\u013b",
|
906 |
+
"Lcy;": "\u041b",
|
907 |
+
"LeftAngleBracket;": "\u27e8",
|
908 |
+
"LeftArrow;": "\u2190",
|
909 |
+
"LeftArrowBar;": "\u21e4",
|
910 |
+
"LeftArrowRightArrow;": "\u21c6",
|
911 |
+
"LeftCeiling;": "\u2308",
|
912 |
+
"LeftDoubleBracket;": "\u27e6",
|
913 |
+
"LeftDownTeeVector;": "\u2961",
|
914 |
+
"LeftDownVector;": "\u21c3",
|
915 |
+
"LeftDownVectorBar;": "\u2959",
|
916 |
+
"LeftFloor;": "\u230a",
|
917 |
+
"LeftRightArrow;": "\u2194",
|
918 |
+
"LeftRightVector;": "\u294e",
|
919 |
+
"LeftTee;": "\u22a3",
|
920 |
+
"LeftTeeArrow;": "\u21a4",
|
921 |
+
"LeftTeeVector;": "\u295a",
|
922 |
+
"LeftTriangle;": "\u22b2",
|
923 |
+
"LeftTriangleBar;": "\u29cf",
|
924 |
+
"LeftTriangleEqual;": "\u22b4",
|
925 |
+
"LeftUpDownVector;": "\u2951",
|
926 |
+
"LeftUpTeeVector;": "\u2960",
|
927 |
+
"LeftUpVector;": "\u21bf",
|
928 |
+
"LeftUpVectorBar;": "\u2958",
|
929 |
+
"LeftVector;": "\u21bc",
|
930 |
+
"LeftVectorBar;": "\u2952",
|
931 |
+
"Leftarrow;": "\u21d0",
|
932 |
+
"Leftrightarrow;": "\u21d4",
|
933 |
+
"LessEqualGreater;": "\u22da",
|
934 |
+
"LessFullEqual;": "\u2266",
|
935 |
+
"LessGreater;": "\u2276",
|
936 |
+
"LessLess;": "\u2aa1",
|
937 |
+
"LessSlantEqual;": "\u2a7d",
|
938 |
+
"LessTilde;": "\u2272",
|
939 |
+
"Lfr;": "\U0001d50f",
|
940 |
+
"Ll;": "\u22d8",
|
941 |
+
"Lleftarrow;": "\u21da",
|
942 |
+
"Lmidot;": "\u013f",
|
943 |
+
"LongLeftArrow;": "\u27f5",
|
944 |
+
"LongLeftRightArrow;": "\u27f7",
|
945 |
+
"LongRightArrow;": "\u27f6",
|
946 |
+
"Longleftarrow;": "\u27f8",
|
947 |
+
"Longleftrightarrow;": "\u27fa",
|
948 |
+
"Longrightarrow;": "\u27f9",
|
949 |
+
"Lopf;": "\U0001d543",
|
950 |
+
"LowerLeftArrow;": "\u2199",
|
951 |
+
"LowerRightArrow;": "\u2198",
|
952 |
+
"Lscr;": "\u2112",
|
953 |
+
"Lsh;": "\u21b0",
|
954 |
+
"Lstrok;": "\u0141",
|
955 |
+
"Lt;": "\u226a",
|
956 |
+
"Map;": "\u2905",
|
957 |
+
"Mcy;": "\u041c",
|
958 |
+
"MediumSpace;": "\u205f",
|
959 |
+
"Mellintrf;": "\u2133",
|
960 |
+
"Mfr;": "\U0001d510",
|
961 |
+
"MinusPlus;": "\u2213",
|
962 |
+
"Mopf;": "\U0001d544",
|
963 |
+
"Mscr;": "\u2133",
|
964 |
+
"Mu;": "\u039c",
|
965 |
+
"NJcy;": "\u040a",
|
966 |
+
"Nacute;": "\u0143",
|
967 |
+
"Ncaron;": "\u0147",
|
968 |
+
"Ncedil;": "\u0145",
|
969 |
+
"Ncy;": "\u041d",
|
970 |
+
"NegativeMediumSpace;": "\u200b",
|
971 |
+
"NegativeThickSpace;": "\u200b",
|
972 |
+
"NegativeThinSpace;": "\u200b",
|
973 |
+
"NegativeVeryThinSpace;": "\u200b",
|
974 |
+
"NestedGreaterGreater;": "\u226b",
|
975 |
+
"NestedLessLess;": "\u226a",
|
976 |
+
"NewLine;": "\n",
|
977 |
+
"Nfr;": "\U0001d511",
|
978 |
+
"NoBreak;": "\u2060",
|
979 |
+
"NonBreakingSpace;": "\xa0",
|
980 |
+
"Nopf;": "\u2115",
|
981 |
+
"Not;": "\u2aec",
|
982 |
+
"NotCongruent;": "\u2262",
|
983 |
+
"NotCupCap;": "\u226d",
|
984 |
+
"NotDoubleVerticalBar;": "\u2226",
|
985 |
+
"NotElement;": "\u2209",
|
986 |
+
"NotEqual;": "\u2260",
|
987 |
+
"NotEqualTilde;": "\u2242\u0338",
|
988 |
+
"NotExists;": "\u2204",
|
989 |
+
"NotGreater;": "\u226f",
|
990 |
+
"NotGreaterEqual;": "\u2271",
|
991 |
+
"NotGreaterFullEqual;": "\u2267\u0338",
|
992 |
+
"NotGreaterGreater;": "\u226b\u0338",
|
993 |
+
"NotGreaterLess;": "\u2279",
|
994 |
+
"NotGreaterSlantEqual;": "\u2a7e\u0338",
|
995 |
+
"NotGreaterTilde;": "\u2275",
|
996 |
+
"NotHumpDownHump;": "\u224e\u0338",
|
997 |
+
"NotHumpEqual;": "\u224f\u0338",
|
998 |
+
"NotLeftTriangle;": "\u22ea",
|
999 |
+
"NotLeftTriangleBar;": "\u29cf\u0338",
|
1000 |
+
"NotLeftTriangleEqual;": "\u22ec",
|
1001 |
+
"NotLess;": "\u226e",
|
1002 |
+
"NotLessEqual;": "\u2270",
|
1003 |
+
"NotLessGreater;": "\u2278",
|
1004 |
+
"NotLessLess;": "\u226a\u0338",
|
1005 |
+
"NotLessSlantEqual;": "\u2a7d\u0338",
|
1006 |
+
"NotLessTilde;": "\u2274",
|
1007 |
+
"NotNestedGreaterGreater;": "\u2aa2\u0338",
|
1008 |
+
"NotNestedLessLess;": "\u2aa1\u0338",
|
1009 |
+
"NotPrecedes;": "\u2280",
|
1010 |
+
"NotPrecedesEqual;": "\u2aaf\u0338",
|
1011 |
+
"NotPrecedesSlantEqual;": "\u22e0",
|
1012 |
+
"NotReverseElement;": "\u220c",
|
1013 |
+
"NotRightTriangle;": "\u22eb",
|
1014 |
+
"NotRightTriangleBar;": "\u29d0\u0338",
|
1015 |
+
"NotRightTriangleEqual;": "\u22ed",
|
1016 |
+
"NotSquareSubset;": "\u228f\u0338",
|
1017 |
+
"NotSquareSubsetEqual;": "\u22e2",
|
1018 |
+
"NotSquareSuperset;": "\u2290\u0338",
|
1019 |
+
"NotSquareSupersetEqual;": "\u22e3",
|
1020 |
+
"NotSubset;": "\u2282\u20d2",
|
1021 |
+
"NotSubsetEqual;": "\u2288",
|
1022 |
+
"NotSucceeds;": "\u2281",
|
1023 |
+
"NotSucceedsEqual;": "\u2ab0\u0338",
|
1024 |
+
"NotSucceedsSlantEqual;": "\u22e1",
|
1025 |
+
"NotSucceedsTilde;": "\u227f\u0338",
|
1026 |
+
"NotSuperset;": "\u2283\u20d2",
|
1027 |
+
"NotSupersetEqual;": "\u2289",
|
1028 |
+
"NotTilde;": "\u2241",
|
1029 |
+
"NotTildeEqual;": "\u2244",
|
1030 |
+
"NotTildeFullEqual;": "\u2247",
|
1031 |
+
"NotTildeTilde;": "\u2249",
|
1032 |
+
"NotVerticalBar;": "\u2224",
|
1033 |
+
"Nscr;": "\U0001d4a9",
|
1034 |
+
"Ntilde": "\xd1",
|
1035 |
+
"Ntilde;": "\xd1",
|
1036 |
+
"Nu;": "\u039d",
|
1037 |
+
"OElig;": "\u0152",
|
1038 |
+
"Oacute": "\xd3",
|
1039 |
+
"Oacute;": "\xd3",
|
1040 |
+
"Ocirc": "\xd4",
|
1041 |
+
"Ocirc;": "\xd4",
|
1042 |
+
"Ocy;": "\u041e",
|
1043 |
+
"Odblac;": "\u0150",
|
1044 |
+
"Ofr;": "\U0001d512",
|
1045 |
+
"Ograve": "\xd2",
|
1046 |
+
"Ograve;": "\xd2",
|
1047 |
+
"Omacr;": "\u014c",
|
1048 |
+
"Omega;": "\u03a9",
|
1049 |
+
"Omicron;": "\u039f",
|
1050 |
+
"Oopf;": "\U0001d546",
|
1051 |
+
"OpenCurlyDoubleQuote;": "\u201c",
|
1052 |
+
"OpenCurlyQuote;": "\u2018",
|
1053 |
+
"Or;": "\u2a54",
|
1054 |
+
"Oscr;": "\U0001d4aa",
|
1055 |
+
"Oslash": "\xd8",
|
1056 |
+
"Oslash;": "\xd8",
|
1057 |
+
"Otilde": "\xd5",
|
1058 |
+
"Otilde;": "\xd5",
|
1059 |
+
"Otimes;": "\u2a37",
|
1060 |
+
"Ouml": "\xd6",
|
1061 |
+
"Ouml;": "\xd6",
|
1062 |
+
"OverBar;": "\u203e",
|
1063 |
+
"OverBrace;": "\u23de",
|
1064 |
+
"OverBracket;": "\u23b4",
|
1065 |
+
"OverParenthesis;": "\u23dc",
|
1066 |
+
"PartialD;": "\u2202",
|
1067 |
+
"Pcy;": "\u041f",
|
1068 |
+
"Pfr;": "\U0001d513",
|
1069 |
+
"Phi;": "\u03a6",
|
1070 |
+
"Pi;": "\u03a0",
|
1071 |
+
"PlusMinus;": "\xb1",
|
1072 |
+
"Poincareplane;": "\u210c",
|
1073 |
+
"Popf;": "\u2119",
|
1074 |
+
"Pr;": "\u2abb",
|
1075 |
+
"Precedes;": "\u227a",
|
1076 |
+
"PrecedesEqual;": "\u2aaf",
|
1077 |
+
"PrecedesSlantEqual;": "\u227c",
|
1078 |
+
"PrecedesTilde;": "\u227e",
|
1079 |
+
"Prime;": "\u2033",
|
1080 |
+
"Product;": "\u220f",
|
1081 |
+
"Proportion;": "\u2237",
|
1082 |
+
"Proportional;": "\u221d",
|
1083 |
+
"Pscr;": "\U0001d4ab",
|
1084 |
+
"Psi;": "\u03a8",
|
1085 |
+
"QUOT": "\"",
|
1086 |
+
"QUOT;": "\"",
|
1087 |
+
"Qfr;": "\U0001d514",
|
1088 |
+
"Qopf;": "\u211a",
|
1089 |
+
"Qscr;": "\U0001d4ac",
|
1090 |
+
"RBarr;": "\u2910",
|
1091 |
+
"REG": "\xae",
|
1092 |
+
"REG;": "\xae",
|
1093 |
+
"Racute;": "\u0154",
|
1094 |
+
"Rang;": "\u27eb",
|
1095 |
+
"Rarr;": "\u21a0",
|
1096 |
+
"Rarrtl;": "\u2916",
|
1097 |
+
"Rcaron;": "\u0158",
|
1098 |
+
"Rcedil;": "\u0156",
|
1099 |
+
"Rcy;": "\u0420",
|
1100 |
+
"Re;": "\u211c",
|
1101 |
+
"ReverseElement;": "\u220b",
|
1102 |
+
"ReverseEquilibrium;": "\u21cb",
|
1103 |
+
"ReverseUpEquilibrium;": "\u296f",
|
1104 |
+
"Rfr;": "\u211c",
|
1105 |
+
"Rho;": "\u03a1",
|
1106 |
+
"RightAngleBracket;": "\u27e9",
|
1107 |
+
"RightArrow;": "\u2192",
|
1108 |
+
"RightArrowBar;": "\u21e5",
|
1109 |
+
"RightArrowLeftArrow;": "\u21c4",
|
1110 |
+
"RightCeiling;": "\u2309",
|
1111 |
+
"RightDoubleBracket;": "\u27e7",
|
1112 |
+
"RightDownTeeVector;": "\u295d",
|
1113 |
+
"RightDownVector;": "\u21c2",
|
1114 |
+
"RightDownVectorBar;": "\u2955",
|
1115 |
+
"RightFloor;": "\u230b",
|
1116 |
+
"RightTee;": "\u22a2",
|
1117 |
+
"RightTeeArrow;": "\u21a6",
|
1118 |
+
"RightTeeVector;": "\u295b",
|
1119 |
+
"RightTriangle;": "\u22b3",
|
1120 |
+
"RightTriangleBar;": "\u29d0",
|
1121 |
+
"RightTriangleEqual;": "\u22b5",
|
1122 |
+
"RightUpDownVector;": "\u294f",
|
1123 |
+
"RightUpTeeVector;": "\u295c",
|
1124 |
+
"RightUpVector;": "\u21be",
|
1125 |
+
"RightUpVectorBar;": "\u2954",
|
1126 |
+
"RightVector;": "\u21c0",
|
1127 |
+
"RightVectorBar;": "\u2953",
|
1128 |
+
"Rightarrow;": "\u21d2",
|
1129 |
+
"Ropf;": "\u211d",
|
1130 |
+
"RoundImplies;": "\u2970",
|
1131 |
+
"Rrightarrow;": "\u21db",
|
1132 |
+
"Rscr;": "\u211b",
|
1133 |
+
"Rsh;": "\u21b1",
|
1134 |
+
"RuleDelayed;": "\u29f4",
|
1135 |
+
"SHCHcy;": "\u0429",
|
1136 |
+
"SHcy;": "\u0428",
|
1137 |
+
"SOFTcy;": "\u042c",
|
1138 |
+
"Sacute;": "\u015a",
|
1139 |
+
"Sc;": "\u2abc",
|
1140 |
+
"Scaron;": "\u0160",
|
1141 |
+
"Scedil;": "\u015e",
|
1142 |
+
"Scirc;": "\u015c",
|
1143 |
+
"Scy;": "\u0421",
|
1144 |
+
"Sfr;": "\U0001d516",
|
1145 |
+
"ShortDownArrow;": "\u2193",
|
1146 |
+
"ShortLeftArrow;": "\u2190",
|
1147 |
+
"ShortRightArrow;": "\u2192",
|
1148 |
+
"ShortUpArrow;": "\u2191",
|
1149 |
+
"Sigma;": "\u03a3",
|
1150 |
+
"SmallCircle;": "\u2218",
|
1151 |
+
"Sopf;": "\U0001d54a",
|
1152 |
+
"Sqrt;": "\u221a",
|
1153 |
+
"Square;": "\u25a1",
|
1154 |
+
"SquareIntersection;": "\u2293",
|
1155 |
+
"SquareSubset;": "\u228f",
|
1156 |
+
"SquareSubsetEqual;": "\u2291",
|
1157 |
+
"SquareSuperset;": "\u2290",
|
1158 |
+
"SquareSupersetEqual;": "\u2292",
|
1159 |
+
"SquareUnion;": "\u2294",
|
1160 |
+
"Sscr;": "\U0001d4ae",
|
1161 |
+
"Star;": "\u22c6",
|
1162 |
+
"Sub;": "\u22d0",
|
1163 |
+
"Subset;": "\u22d0",
|
1164 |
+
"SubsetEqual;": "\u2286",
|
1165 |
+
"Succeeds;": "\u227b",
|
1166 |
+
"SucceedsEqual;": "\u2ab0",
|
1167 |
+
"SucceedsSlantEqual;": "\u227d",
|
1168 |
+
"SucceedsTilde;": "\u227f",
|
1169 |
+
"SuchThat;": "\u220b",
|
1170 |
+
"Sum;": "\u2211",
|
1171 |
+
"Sup;": "\u22d1",
|
1172 |
+
"Superset;": "\u2283",
|
1173 |
+
"SupersetEqual;": "\u2287",
|
1174 |
+
"Supset;": "\u22d1",
|
1175 |
+
"THORN": "\xde",
|
1176 |
+
"THORN;": "\xde",
|
1177 |
+
"TRADE;": "\u2122",
|
1178 |
+
"TSHcy;": "\u040b",
|
1179 |
+
"TScy;": "\u0426",
|
1180 |
+
"Tab;": "\t",
|
1181 |
+
"Tau;": "\u03a4",
|
1182 |
+
"Tcaron;": "\u0164",
|
1183 |
+
"Tcedil;": "\u0162",
|
1184 |
+
"Tcy;": "\u0422",
|
1185 |
+
"Tfr;": "\U0001d517",
|
1186 |
+
"Therefore;": "\u2234",
|
1187 |
+
"Theta;": "\u0398",
|
1188 |
+
"ThickSpace;": "\u205f\u200a",
|
1189 |
+
"ThinSpace;": "\u2009",
|
1190 |
+
"Tilde;": "\u223c",
|
1191 |
+
"TildeEqual;": "\u2243",
|
1192 |
+
"TildeFullEqual;": "\u2245",
|
1193 |
+
"TildeTilde;": "\u2248",
|
1194 |
+
"Topf;": "\U0001d54b",
|
1195 |
+
"TripleDot;": "\u20db",
|
1196 |
+
"Tscr;": "\U0001d4af",
|
1197 |
+
"Tstrok;": "\u0166",
|
1198 |
+
"Uacute": "\xda",
|
1199 |
+
"Uacute;": "\xda",
|
1200 |
+
"Uarr;": "\u219f",
|
1201 |
+
"Uarrocir;": "\u2949",
|
1202 |
+
"Ubrcy;": "\u040e",
|
1203 |
+
"Ubreve;": "\u016c",
|
1204 |
+
"Ucirc": "\xdb",
|
1205 |
+
"Ucirc;": "\xdb",
|
1206 |
+
"Ucy;": "\u0423",
|
1207 |
+
"Udblac;": "\u0170",
|
1208 |
+
"Ufr;": "\U0001d518",
|
1209 |
+
"Ugrave": "\xd9",
|
1210 |
+
"Ugrave;": "\xd9",
|
1211 |
+
"Umacr;": "\u016a",
|
1212 |
+
"UnderBar;": "_",
|
1213 |
+
"UnderBrace;": "\u23df",
|
1214 |
+
"UnderBracket;": "\u23b5",
|
1215 |
+
"UnderParenthesis;": "\u23dd",
|
1216 |
+
"Union;": "\u22c3",
|
1217 |
+
"UnionPlus;": "\u228e",
|
1218 |
+
"Uogon;": "\u0172",
|
1219 |
+
"Uopf;": "\U0001d54c",
|
1220 |
+
"UpArrow;": "\u2191",
|
1221 |
+
"UpArrowBar;": "\u2912",
|
1222 |
+
"UpArrowDownArrow;": "\u21c5",
|
1223 |
+
"UpDownArrow;": "\u2195",
|
1224 |
+
"UpEquilibrium;": "\u296e",
|
1225 |
+
"UpTee;": "\u22a5",
|
1226 |
+
"UpTeeArrow;": "\u21a5",
|
1227 |
+
"Uparrow;": "\u21d1",
|
1228 |
+
"Updownarrow;": "\u21d5",
|
1229 |
+
"UpperLeftArrow;": "\u2196",
|
1230 |
+
"UpperRightArrow;": "\u2197",
|
1231 |
+
"Upsi;": "\u03d2",
|
1232 |
+
"Upsilon;": "\u03a5",
|
1233 |
+
"Uring;": "\u016e",
|
1234 |
+
"Uscr;": "\U0001d4b0",
|
1235 |
+
"Utilde;": "\u0168",
|
1236 |
+
"Uuml": "\xdc",
|
1237 |
+
"Uuml;": "\xdc",
|
1238 |
+
"VDash;": "\u22ab",
|
1239 |
+
"Vbar;": "\u2aeb",
|
1240 |
+
"Vcy;": "\u0412",
|
1241 |
+
"Vdash;": "\u22a9",
|
1242 |
+
"Vdashl;": "\u2ae6",
|
1243 |
+
"Vee;": "\u22c1",
|
1244 |
+
"Verbar;": "\u2016",
|
1245 |
+
"Vert;": "\u2016",
|
1246 |
+
"VerticalBar;": "\u2223",
|
1247 |
+
"VerticalLine;": "|",
|
1248 |
+
"VerticalSeparator;": "\u2758",
|
1249 |
+
"VerticalTilde;": "\u2240",
|
1250 |
+
"VeryThinSpace;": "\u200a",
|
1251 |
+
"Vfr;": "\U0001d519",
|
1252 |
+
"Vopf;": "\U0001d54d",
|
1253 |
+
"Vscr;": "\U0001d4b1",
|
1254 |
+
"Vvdash;": "\u22aa",
|
1255 |
+
"Wcirc;": "\u0174",
|
1256 |
+
"Wedge;": "\u22c0",
|
1257 |
+
"Wfr;": "\U0001d51a",
|
1258 |
+
"Wopf;": "\U0001d54e",
|
1259 |
+
"Wscr;": "\U0001d4b2",
|
1260 |
+
"Xfr;": "\U0001d51b",
|
1261 |
+
"Xi;": "\u039e",
|
1262 |
+
"Xopf;": "\U0001d54f",
|
1263 |
+
"Xscr;": "\U0001d4b3",
|
1264 |
+
"YAcy;": "\u042f",
|
1265 |
+
"YIcy;": "\u0407",
|
1266 |
+
"YUcy;": "\u042e",
|
1267 |
+
"Yacute": "\xdd",
|
1268 |
+
"Yacute;": "\xdd",
|
1269 |
+
"Ycirc;": "\u0176",
|
1270 |
+
"Ycy;": "\u042b",
|
1271 |
+
"Yfr;": "\U0001d51c",
|
1272 |
+
"Yopf;": "\U0001d550",
|
1273 |
+
"Yscr;": "\U0001d4b4",
|
1274 |
+
"Yuml;": "\u0178",
|
1275 |
+
"ZHcy;": "\u0416",
|
1276 |
+
"Zacute;": "\u0179",
|
1277 |
+
"Zcaron;": "\u017d",
|
1278 |
+
"Zcy;": "\u0417",
|
1279 |
+
"Zdot;": "\u017b",
|
1280 |
+
"ZeroWidthSpace;": "\u200b",
|
1281 |
+
"Zeta;": "\u0396",
|
1282 |
+
"Zfr;": "\u2128",
|
1283 |
+
"Zopf;": "\u2124",
|
1284 |
+
"Zscr;": "\U0001d4b5",
|
1285 |
+
"aacute": "\xe1",
|
1286 |
+
"aacute;": "\xe1",
|
1287 |
+
"abreve;": "\u0103",
|
1288 |
+
"ac;": "\u223e",
|
1289 |
+
"acE;": "\u223e\u0333",
|
1290 |
+
"acd;": "\u223f",
|
1291 |
+
"acirc": "\xe2",
|
1292 |
+
"acirc;": "\xe2",
|
1293 |
+
"acute": "\xb4",
|
1294 |
+
"acute;": "\xb4",
|
1295 |
+
"acy;": "\u0430",
|
1296 |
+
"aelig": "\xe6",
|
1297 |
+
"aelig;": "\xe6",
|
1298 |
+
"af;": "\u2061",
|
1299 |
+
"afr;": "\U0001d51e",
|
1300 |
+
"agrave": "\xe0",
|
1301 |
+
"agrave;": "\xe0",
|
1302 |
+
"alefsym;": "\u2135",
|
1303 |
+
"aleph;": "\u2135",
|
1304 |
+
"alpha;": "\u03b1",
|
1305 |
+
"amacr;": "\u0101",
|
1306 |
+
"amalg;": "\u2a3f",
|
1307 |
+
"amp": "&",
|
1308 |
+
"amp;": "&",
|
1309 |
+
"and;": "\u2227",
|
1310 |
+
"andand;": "\u2a55",
|
1311 |
+
"andd;": "\u2a5c",
|
1312 |
+
"andslope;": "\u2a58",
|
1313 |
+
"andv;": "\u2a5a",
|
1314 |
+
"ang;": "\u2220",
|
1315 |
+
"ange;": "\u29a4",
|
1316 |
+
"angle;": "\u2220",
|
1317 |
+
"angmsd;": "\u2221",
|
1318 |
+
"angmsdaa;": "\u29a8",
|
1319 |
+
"angmsdab;": "\u29a9",
|
1320 |
+
"angmsdac;": "\u29aa",
|
1321 |
+
"angmsdad;": "\u29ab",
|
1322 |
+
"angmsdae;": "\u29ac",
|
1323 |
+
"angmsdaf;": "\u29ad",
|
1324 |
+
"angmsdag;": "\u29ae",
|
1325 |
+
"angmsdah;": "\u29af",
|
1326 |
+
"angrt;": "\u221f",
|
1327 |
+
"angrtvb;": "\u22be",
|
1328 |
+
"angrtvbd;": "\u299d",
|
1329 |
+
"angsph;": "\u2222",
|
1330 |
+
"angst;": "\xc5",
|
1331 |
+
"angzarr;": "\u237c",
|
1332 |
+
"aogon;": "\u0105",
|
1333 |
+
"aopf;": "\U0001d552",
|
1334 |
+
"ap;": "\u2248",
|
1335 |
+
"apE;": "\u2a70",
|
1336 |
+
"apacir;": "\u2a6f",
|
1337 |
+
"ape;": "\u224a",
|
1338 |
+
"apid;": "\u224b",
|
1339 |
+
"apos;": "'",
|
1340 |
+
"approx;": "\u2248",
|
1341 |
+
"approxeq;": "\u224a",
|
1342 |
+
"aring": "\xe5",
|
1343 |
+
"aring;": "\xe5",
|
1344 |
+
"ascr;": "\U0001d4b6",
|
1345 |
+
"ast;": "*",
|
1346 |
+
"asymp;": "\u2248",
|
1347 |
+
"asympeq;": "\u224d",
|
1348 |
+
"atilde": "\xe3",
|
1349 |
+
"atilde;": "\xe3",
|
1350 |
+
"auml": "\xe4",
|
1351 |
+
"auml;": "\xe4",
|
1352 |
+
"awconint;": "\u2233",
|
1353 |
+
"awint;": "\u2a11",
|
1354 |
+
"bNot;": "\u2aed",
|
1355 |
+
"backcong;": "\u224c",
|
1356 |
+
"backepsilon;": "\u03f6",
|
1357 |
+
"backprime;": "\u2035",
|
1358 |
+
"backsim;": "\u223d",
|
1359 |
+
"backsimeq;": "\u22cd",
|
1360 |
+
"barvee;": "\u22bd",
|
1361 |
+
"barwed;": "\u2305",
|
1362 |
+
"barwedge;": "\u2305",
|
1363 |
+
"bbrk;": "\u23b5",
|
1364 |
+
"bbrktbrk;": "\u23b6",
|
1365 |
+
"bcong;": "\u224c",
|
1366 |
+
"bcy;": "\u0431",
|
1367 |
+
"bdquo;": "\u201e",
|
1368 |
+
"becaus;": "\u2235",
|
1369 |
+
"because;": "\u2235",
|
1370 |
+
"bemptyv;": "\u29b0",
|
1371 |
+
"bepsi;": "\u03f6",
|
1372 |
+
"bernou;": "\u212c",
|
1373 |
+
"beta;": "\u03b2",
|
1374 |
+
"beth;": "\u2136",
|
1375 |
+
"between;": "\u226c",
|
1376 |
+
"bfr;": "\U0001d51f",
|
1377 |
+
"bigcap;": "\u22c2",
|
1378 |
+
"bigcirc;": "\u25ef",
|
1379 |
+
"bigcup;": "\u22c3",
|
1380 |
+
"bigodot;": "\u2a00",
|
1381 |
+
"bigoplus;": "\u2a01",
|
1382 |
+
"bigotimes;": "\u2a02",
|
1383 |
+
"bigsqcup;": "\u2a06",
|
1384 |
+
"bigstar;": "\u2605",
|
1385 |
+
"bigtriangledown;": "\u25bd",
|
1386 |
+
"bigtriangleup;": "\u25b3",
|
1387 |
+
"biguplus;": "\u2a04",
|
1388 |
+
"bigvee;": "\u22c1",
|
1389 |
+
"bigwedge;": "\u22c0",
|
1390 |
+
"bkarow;": "\u290d",
|
1391 |
+
"blacklozenge;": "\u29eb",
|
1392 |
+
"blacksquare;": "\u25aa",
|
1393 |
+
"blacktriangle;": "\u25b4",
|
1394 |
+
"blacktriangledown;": "\u25be",
|
1395 |
+
"blacktriangleleft;": "\u25c2",
|
1396 |
+
"blacktriangleright;": "\u25b8",
|
1397 |
+
"blank;": "\u2423",
|
1398 |
+
"blk12;": "\u2592",
|
1399 |
+
"blk14;": "\u2591",
|
1400 |
+
"blk34;": "\u2593",
|
1401 |
+
"block;": "\u2588",
|
1402 |
+
"bne;": "=\u20e5",
|
1403 |
+
"bnequiv;": "\u2261\u20e5",
|
1404 |
+
"bnot;": "\u2310",
|
1405 |
+
"bopf;": "\U0001d553",
|
1406 |
+
"bot;": "\u22a5",
|
1407 |
+
"bottom;": "\u22a5",
|
1408 |
+
"bowtie;": "\u22c8",
|
1409 |
+
"boxDL;": "\u2557",
|
1410 |
+
"boxDR;": "\u2554",
|
1411 |
+
"boxDl;": "\u2556",
|
1412 |
+
"boxDr;": "\u2553",
|
1413 |
+
"boxH;": "\u2550",
|
1414 |
+
"boxHD;": "\u2566",
|
1415 |
+
"boxHU;": "\u2569",
|
1416 |
+
"boxHd;": "\u2564",
|
1417 |
+
"boxHu;": "\u2567",
|
1418 |
+
"boxUL;": "\u255d",
|
1419 |
+
"boxUR;": "\u255a",
|
1420 |
+
"boxUl;": "\u255c",
|
1421 |
+
"boxUr;": "\u2559",
|
1422 |
+
"boxV;": "\u2551",
|
1423 |
+
"boxVH;": "\u256c",
|
1424 |
+
"boxVL;": "\u2563",
|
1425 |
+
"boxVR;": "\u2560",
|
1426 |
+
"boxVh;": "\u256b",
|
1427 |
+
"boxVl;": "\u2562",
|
1428 |
+
"boxVr;": "\u255f",
|
1429 |
+
"boxbox;": "\u29c9",
|
1430 |
+
"boxdL;": "\u2555",
|
1431 |
+
"boxdR;": "\u2552",
|
1432 |
+
"boxdl;": "\u2510",
|
1433 |
+
"boxdr;": "\u250c",
|
1434 |
+
"boxh;": "\u2500",
|
1435 |
+
"boxhD;": "\u2565",
|
1436 |
+
"boxhU;": "\u2568",
|
1437 |
+
"boxhd;": "\u252c",
|
1438 |
+
"boxhu;": "\u2534",
|
1439 |
+
"boxminus;": "\u229f",
|
1440 |
+
"boxplus;": "\u229e",
|
1441 |
+
"boxtimes;": "\u22a0",
|
1442 |
+
"boxuL;": "\u255b",
|
1443 |
+
"boxuR;": "\u2558",
|
1444 |
+
"boxul;": "\u2518",
|
1445 |
+
"boxur;": "\u2514",
|
1446 |
+
"boxv;": "\u2502",
|
1447 |
+
"boxvH;": "\u256a",
|
1448 |
+
"boxvL;": "\u2561",
|
1449 |
+
"boxvR;": "\u255e",
|
1450 |
+
"boxvh;": "\u253c",
|
1451 |
+
"boxvl;": "\u2524",
|
1452 |
+
"boxvr;": "\u251c",
|
1453 |
+
"bprime;": "\u2035",
|
1454 |
+
"breve;": "\u02d8",
|
1455 |
+
"brvbar": "\xa6",
|
1456 |
+
"brvbar;": "\xa6",
|
1457 |
+
"bscr;": "\U0001d4b7",
|
1458 |
+
"bsemi;": "\u204f",
|
1459 |
+
"bsim;": "\u223d",
|
1460 |
+
"bsime;": "\u22cd",
|
1461 |
+
"bsol;": "\\",
|
1462 |
+
"bsolb;": "\u29c5",
|
1463 |
+
"bsolhsub;": "\u27c8",
|
1464 |
+
"bull;": "\u2022",
|
1465 |
+
"bullet;": "\u2022",
|
1466 |
+
"bump;": "\u224e",
|
1467 |
+
"bumpE;": "\u2aae",
|
1468 |
+
"bumpe;": "\u224f",
|
1469 |
+
"bumpeq;": "\u224f",
|
1470 |
+
"cacute;": "\u0107",
|
1471 |
+
"cap;": "\u2229",
|
1472 |
+
"capand;": "\u2a44",
|
1473 |
+
"capbrcup;": "\u2a49",
|
1474 |
+
"capcap;": "\u2a4b",
|
1475 |
+
"capcup;": "\u2a47",
|
1476 |
+
"capdot;": "\u2a40",
|
1477 |
+
"caps;": "\u2229\ufe00",
|
1478 |
+
"caret;": "\u2041",
|
1479 |
+
"caron;": "\u02c7",
|
1480 |
+
"ccaps;": "\u2a4d",
|
1481 |
+
"ccaron;": "\u010d",
|
1482 |
+
"ccedil": "\xe7",
|
1483 |
+
"ccedil;": "\xe7",
|
1484 |
+
"ccirc;": "\u0109",
|
1485 |
+
"ccups;": "\u2a4c",
|
1486 |
+
"ccupssm;": "\u2a50",
|
1487 |
+
"cdot;": "\u010b",
|
1488 |
+
"cedil": "\xb8",
|
1489 |
+
"cedil;": "\xb8",
|
1490 |
+
"cemptyv;": "\u29b2",
|
1491 |
+
"cent": "\xa2",
|
1492 |
+
"cent;": "\xa2",
|
1493 |
+
"centerdot;": "\xb7",
|
1494 |
+
"cfr;": "\U0001d520",
|
1495 |
+
"chcy;": "\u0447",
|
1496 |
+
"check;": "\u2713",
|
1497 |
+
"checkmark;": "\u2713",
|
1498 |
+
"chi;": "\u03c7",
|
1499 |
+
"cir;": "\u25cb",
|
1500 |
+
"cirE;": "\u29c3",
|
1501 |
+
"circ;": "\u02c6",
|
1502 |
+
"circeq;": "\u2257",
|
1503 |
+
"circlearrowleft;": "\u21ba",
|
1504 |
+
"circlearrowright;": "\u21bb",
|
1505 |
+
"circledR;": "\xae",
|
1506 |
+
"circledS;": "\u24c8",
|
1507 |
+
"circledast;": "\u229b",
|
1508 |
+
"circledcirc;": "\u229a",
|
1509 |
+
"circleddash;": "\u229d",
|
1510 |
+
"cire;": "\u2257",
|
1511 |
+
"cirfnint;": "\u2a10",
|
1512 |
+
"cirmid;": "\u2aef",
|
1513 |
+
"cirscir;": "\u29c2",
|
1514 |
+
"clubs;": "\u2663",
|
1515 |
+
"clubsuit;": "\u2663",
|
1516 |
+
"colon;": ":",
|
1517 |
+
"colone;": "\u2254",
|
1518 |
+
"coloneq;": "\u2254",
|
1519 |
+
"comma;": ",",
|
1520 |
+
"commat;": "@",
|
1521 |
+
"comp;": "\u2201",
|
1522 |
+
"compfn;": "\u2218",
|
1523 |
+
"complement;": "\u2201",
|
1524 |
+
"complexes;": "\u2102",
|
1525 |
+
"cong;": "\u2245",
|
1526 |
+
"congdot;": "\u2a6d",
|
1527 |
+
"conint;": "\u222e",
|
1528 |
+
"copf;": "\U0001d554",
|
1529 |
+
"coprod;": "\u2210",
|
1530 |
+
"copy": "\xa9",
|
1531 |
+
"copy;": "\xa9",
|
1532 |
+
"copysr;": "\u2117",
|
1533 |
+
"crarr;": "\u21b5",
|
1534 |
+
"cross;": "\u2717",
|
1535 |
+
"cscr;": "\U0001d4b8",
|
1536 |
+
"csub;": "\u2acf",
|
1537 |
+
"csube;": "\u2ad1",
|
1538 |
+
"csup;": "\u2ad0",
|
1539 |
+
"csupe;": "\u2ad2",
|
1540 |
+
"ctdot;": "\u22ef",
|
1541 |
+
"cudarrl;": "\u2938",
|
1542 |
+
"cudarrr;": "\u2935",
|
1543 |
+
"cuepr;": "\u22de",
|
1544 |
+
"cuesc;": "\u22df",
|
1545 |
+
"cularr;": "\u21b6",
|
1546 |
+
"cularrp;": "\u293d",
|
1547 |
+
"cup;": "\u222a",
|
1548 |
+
"cupbrcap;": "\u2a48",
|
1549 |
+
"cupcap;": "\u2a46",
|
1550 |
+
"cupcup;": "\u2a4a",
|
1551 |
+
"cupdot;": "\u228d",
|
1552 |
+
"cupor;": "\u2a45",
|
1553 |
+
"cups;": "\u222a\ufe00",
|
1554 |
+
"curarr;": "\u21b7",
|
1555 |
+
"curarrm;": "\u293c",
|
1556 |
+
"curlyeqprec;": "\u22de",
|
1557 |
+
"curlyeqsucc;": "\u22df",
|
1558 |
+
"curlyvee;": "\u22ce",
|
1559 |
+
"curlywedge;": "\u22cf",
|
1560 |
+
"curren": "\xa4",
|
1561 |
+
"curren;": "\xa4",
|
1562 |
+
"curvearrowleft;": "\u21b6",
|
1563 |
+
"curvearrowright;": "\u21b7",
|
1564 |
+
"cuvee;": "\u22ce",
|
1565 |
+
"cuwed;": "\u22cf",
|
1566 |
+
"cwconint;": "\u2232",
|
1567 |
+
"cwint;": "\u2231",
|
1568 |
+
"cylcty;": "\u232d",
|
1569 |
+
"dArr;": "\u21d3",
|
1570 |
+
"dHar;": "\u2965",
|
1571 |
+
"dagger;": "\u2020",
|
1572 |
+
"daleth;": "\u2138",
|
1573 |
+
"darr;": "\u2193",
|
1574 |
+
"dash;": "\u2010",
|
1575 |
+
"dashv;": "\u22a3",
|
1576 |
+
"dbkarow;": "\u290f",
|
1577 |
+
"dblac;": "\u02dd",
|
1578 |
+
"dcaron;": "\u010f",
|
1579 |
+
"dcy;": "\u0434",
|
1580 |
+
"dd;": "\u2146",
|
1581 |
+
"ddagger;": "\u2021",
|
1582 |
+
"ddarr;": "\u21ca",
|
1583 |
+
"ddotseq;": "\u2a77",
|
1584 |
+
"deg": "\xb0",
|
1585 |
+
"deg;": "\xb0",
|
1586 |
+
"delta;": "\u03b4",
|
1587 |
+
"demptyv;": "\u29b1",
|
1588 |
+
"dfisht;": "\u297f",
|
1589 |
+
"dfr;": "\U0001d521",
|
1590 |
+
"dharl;": "\u21c3",
|
1591 |
+
"dharr;": "\u21c2",
|
1592 |
+
"diam;": "\u22c4",
|
1593 |
+
"diamond;": "\u22c4",
|
1594 |
+
"diamondsuit;": "\u2666",
|
1595 |
+
"diams;": "\u2666",
|
1596 |
+
"die;": "\xa8",
|
1597 |
+
"digamma;": "\u03dd",
|
1598 |
+
"disin;": "\u22f2",
|
1599 |
+
"div;": "\xf7",
|
1600 |
+
"divide": "\xf7",
|
1601 |
+
"divide;": "\xf7",
|
1602 |
+
"divideontimes;": "\u22c7",
|
1603 |
+
"divonx;": "\u22c7",
|
1604 |
+
"djcy;": "\u0452",
|
1605 |
+
"dlcorn;": "\u231e",
|
1606 |
+
"dlcrop;": "\u230d",
|
1607 |
+
"dollar;": "$",
|
1608 |
+
"dopf;": "\U0001d555",
|
1609 |
+
"dot;": "\u02d9",
|
1610 |
+
"doteq;": "\u2250",
|
1611 |
+
"doteqdot;": "\u2251",
|
1612 |
+
"dotminus;": "\u2238",
|
1613 |
+
"dotplus;": "\u2214",
|
1614 |
+
"dotsquare;": "\u22a1",
|
1615 |
+
"doublebarwedge;": "\u2306",
|
1616 |
+
"downarrow;": "\u2193",
|
1617 |
+
"downdownarrows;": "\u21ca",
|
1618 |
+
"downharpoonleft;": "\u21c3",
|
1619 |
+
"downharpoonright;": "\u21c2",
|
1620 |
+
"drbkarow;": "\u2910",
|
1621 |
+
"drcorn;": "\u231f",
|
1622 |
+
"drcrop;": "\u230c",
|
1623 |
+
"dscr;": "\U0001d4b9",
|
1624 |
+
"dscy;": "\u0455",
|
1625 |
+
"dsol;": "\u29f6",
|
1626 |
+
"dstrok;": "\u0111",
|
1627 |
+
"dtdot;": "\u22f1",
|
1628 |
+
"dtri;": "\u25bf",
|
1629 |
+
"dtrif;": "\u25be",
|
1630 |
+
"duarr;": "\u21f5",
|
1631 |
+
"duhar;": "\u296f",
|
1632 |
+
"dwangle;": "\u29a6",
|
1633 |
+
"dzcy;": "\u045f",
|
1634 |
+
"dzigrarr;": "\u27ff",
|
1635 |
+
"eDDot;": "\u2a77",
|
1636 |
+
"eDot;": "\u2251",
|
1637 |
+
"eacute": "\xe9",
|
1638 |
+
"eacute;": "\xe9",
|
1639 |
+
"easter;": "\u2a6e",
|
1640 |
+
"ecaron;": "\u011b",
|
1641 |
+
"ecir;": "\u2256",
|
1642 |
+
"ecirc": "\xea",
|
1643 |
+
"ecirc;": "\xea",
|
1644 |
+
"ecolon;": "\u2255",
|
1645 |
+
"ecy;": "\u044d",
|
1646 |
+
"edot;": "\u0117",
|
1647 |
+
"ee;": "\u2147",
|
1648 |
+
"efDot;": "\u2252",
|
1649 |
+
"efr;": "\U0001d522",
|
1650 |
+
"eg;": "\u2a9a",
|
1651 |
+
"egrave": "\xe8",
|
1652 |
+
"egrave;": "\xe8",
|
1653 |
+
"egs;": "\u2a96",
|
1654 |
+
"egsdot;": "\u2a98",
|
1655 |
+
"el;": "\u2a99",
|
1656 |
+
"elinters;": "\u23e7",
|
1657 |
+
"ell;": "\u2113",
|
1658 |
+
"els;": "\u2a95",
|
1659 |
+
"elsdot;": "\u2a97",
|
1660 |
+
"emacr;": "\u0113",
|
1661 |
+
"empty;": "\u2205",
|
1662 |
+
"emptyset;": "\u2205",
|
1663 |
+
"emptyv;": "\u2205",
|
1664 |
+
"emsp13;": "\u2004",
|
1665 |
+
"emsp14;": "\u2005",
|
1666 |
+
"emsp;": "\u2003",
|
1667 |
+
"eng;": "\u014b",
|
1668 |
+
"ensp;": "\u2002",
|
1669 |
+
"eogon;": "\u0119",
|
1670 |
+
"eopf;": "\U0001d556",
|
1671 |
+
"epar;": "\u22d5",
|
1672 |
+
"eparsl;": "\u29e3",
|
1673 |
+
"eplus;": "\u2a71",
|
1674 |
+
"epsi;": "\u03b5",
|
1675 |
+
"epsilon;": "\u03b5",
|
1676 |
+
"epsiv;": "\u03f5",
|
1677 |
+
"eqcirc;": "\u2256",
|
1678 |
+
"eqcolon;": "\u2255",
|
1679 |
+
"eqsim;": "\u2242",
|
1680 |
+
"eqslantgtr;": "\u2a96",
|
1681 |
+
"eqslantless;": "\u2a95",
|
1682 |
+
"equals;": "=",
|
1683 |
+
"equest;": "\u225f",
|
1684 |
+
"equiv;": "\u2261",
|
1685 |
+
"equivDD;": "\u2a78",
|
1686 |
+
"eqvparsl;": "\u29e5",
|
1687 |
+
"erDot;": "\u2253",
|
1688 |
+
"erarr;": "\u2971",
|
1689 |
+
"escr;": "\u212f",
|
1690 |
+
"esdot;": "\u2250",
|
1691 |
+
"esim;": "\u2242",
|
1692 |
+
"eta;": "\u03b7",
|
1693 |
+
"eth": "\xf0",
|
1694 |
+
"eth;": "\xf0",
|
1695 |
+
"euml": "\xeb",
|
1696 |
+
"euml;": "\xeb",
|
1697 |
+
"euro;": "\u20ac",
|
1698 |
+
"excl;": "!",
|
1699 |
+
"exist;": "\u2203",
|
1700 |
+
"expectation;": "\u2130",
|
1701 |
+
"exponentiale;": "\u2147",
|
1702 |
+
"fallingdotseq;": "\u2252",
|
1703 |
+
"fcy;": "\u0444",
|
1704 |
+
"female;": "\u2640",
|
1705 |
+
"ffilig;": "\ufb03",
|
1706 |
+
"fflig;": "\ufb00",
|
1707 |
+
"ffllig;": "\ufb04",
|
1708 |
+
"ffr;": "\U0001d523",
|
1709 |
+
"filig;": "\ufb01",
|
1710 |
+
"fjlig;": "fj",
|
1711 |
+
"flat;": "\u266d",
|
1712 |
+
"fllig;": "\ufb02",
|
1713 |
+
"fltns;": "\u25b1",
|
1714 |
+
"fnof;": "\u0192",
|
1715 |
+
"fopf;": "\U0001d557",
|
1716 |
+
"forall;": "\u2200",
|
1717 |
+
"fork;": "\u22d4",
|
1718 |
+
"forkv;": "\u2ad9",
|
1719 |
+
"fpartint;": "\u2a0d",
|
1720 |
+
"frac12": "\xbd",
|
1721 |
+
"frac12;": "\xbd",
|
1722 |
+
"frac13;": "\u2153",
|
1723 |
+
"frac14": "\xbc",
|
1724 |
+
"frac14;": "\xbc",
|
1725 |
+
"frac15;": "\u2155",
|
1726 |
+
"frac16;": "\u2159",
|
1727 |
+
"frac18;": "\u215b",
|
1728 |
+
"frac23;": "\u2154",
|
1729 |
+
"frac25;": "\u2156",
|
1730 |
+
"frac34": "\xbe",
|
1731 |
+
"frac34;": "\xbe",
|
1732 |
+
"frac35;": "\u2157",
|
1733 |
+
"frac38;": "\u215c",
|
1734 |
+
"frac45;": "\u2158",
|
1735 |
+
"frac56;": "\u215a",
|
1736 |
+
"frac58;": "\u215d",
|
1737 |
+
"frac78;": "\u215e",
|
1738 |
+
"frasl;": "\u2044",
|
1739 |
+
"frown;": "\u2322",
|
1740 |
+
"fscr;": "\U0001d4bb",
|
1741 |
+
"gE;": "\u2267",
|
1742 |
+
"gEl;": "\u2a8c",
|
1743 |
+
"gacute;": "\u01f5",
|
1744 |
+
"gamma;": "\u03b3",
|
1745 |
+
"gammad;": "\u03dd",
|
1746 |
+
"gap;": "\u2a86",
|
1747 |
+
"gbreve;": "\u011f",
|
1748 |
+
"gcirc;": "\u011d",
|
1749 |
+
"gcy;": "\u0433",
|
1750 |
+
"gdot;": "\u0121",
|
1751 |
+
"ge;": "\u2265",
|
1752 |
+
"gel;": "\u22db",
|
1753 |
+
"geq;": "\u2265",
|
1754 |
+
"geqq;": "\u2267",
|
1755 |
+
"geqslant;": "\u2a7e",
|
1756 |
+
"ges;": "\u2a7e",
|
1757 |
+
"gescc;": "\u2aa9",
|
1758 |
+
"gesdot;": "\u2a80",
|
1759 |
+
"gesdoto;": "\u2a82",
|
1760 |
+
"gesdotol;": "\u2a84",
|
1761 |
+
"gesl;": "\u22db\ufe00",
|
1762 |
+
"gesles;": "\u2a94",
|
1763 |
+
"gfr;": "\U0001d524",
|
1764 |
+
"gg;": "\u226b",
|
1765 |
+
"ggg;": "\u22d9",
|
1766 |
+
"gimel;": "\u2137",
|
1767 |
+
"gjcy;": "\u0453",
|
1768 |
+
"gl;": "\u2277",
|
1769 |
+
"glE;": "\u2a92",
|
1770 |
+
"gla;": "\u2aa5",
|
1771 |
+
"glj;": "\u2aa4",
|
1772 |
+
"gnE;": "\u2269",
|
1773 |
+
"gnap;": "\u2a8a",
|
1774 |
+
"gnapprox;": "\u2a8a",
|
1775 |
+
"gne;": "\u2a88",
|
1776 |
+
"gneq;": "\u2a88",
|
1777 |
+
"gneqq;": "\u2269",
|
1778 |
+
"gnsim;": "\u22e7",
|
1779 |
+
"gopf;": "\U0001d558",
|
1780 |
+
"grave;": "`",
|
1781 |
+
"gscr;": "\u210a",
|
1782 |
+
"gsim;": "\u2273",
|
1783 |
+
"gsime;": "\u2a8e",
|
1784 |
+
"gsiml;": "\u2a90",
|
1785 |
+
"gt": ">",
|
1786 |
+
"gt;": ">",
|
1787 |
+
"gtcc;": "\u2aa7",
|
1788 |
+
"gtcir;": "\u2a7a",
|
1789 |
+
"gtdot;": "\u22d7",
|
1790 |
+
"gtlPar;": "\u2995",
|
1791 |
+
"gtquest;": "\u2a7c",
|
1792 |
+
"gtrapprox;": "\u2a86",
|
1793 |
+
"gtrarr;": "\u2978",
|
1794 |
+
"gtrdot;": "\u22d7",
|
1795 |
+
"gtreqless;": "\u22db",
|
1796 |
+
"gtreqqless;": "\u2a8c",
|
1797 |
+
"gtrless;": "\u2277",
|
1798 |
+
"gtrsim;": "\u2273",
|
1799 |
+
"gvertneqq;": "\u2269\ufe00",
|
1800 |
+
"gvnE;": "\u2269\ufe00",
|
1801 |
+
"hArr;": "\u21d4",
|
1802 |
+
"hairsp;": "\u200a",
|
1803 |
+
"half;": "\xbd",
|
1804 |
+
"hamilt;": "\u210b",
|
1805 |
+
"hardcy;": "\u044a",
|
1806 |
+
"harr;": "\u2194",
|
1807 |
+
"harrcir;": "\u2948",
|
1808 |
+
"harrw;": "\u21ad",
|
1809 |
+
"hbar;": "\u210f",
|
1810 |
+
"hcirc;": "\u0125",
|
1811 |
+
"hearts;": "\u2665",
|
1812 |
+
"heartsuit;": "\u2665",
|
1813 |
+
"hellip;": "\u2026",
|
1814 |
+
"hercon;": "\u22b9",
|
1815 |
+
"hfr;": "\U0001d525",
|
1816 |
+
"hksearow;": "\u2925",
|
1817 |
+
"hkswarow;": "\u2926",
|
1818 |
+
"hoarr;": "\u21ff",
|
1819 |
+
"homtht;": "\u223b",
|
1820 |
+
"hookleftarrow;": "\u21a9",
|
1821 |
+
"hookrightarrow;": "\u21aa",
|
1822 |
+
"hopf;": "\U0001d559",
|
1823 |
+
"horbar;": "\u2015",
|
1824 |
+
"hscr;": "\U0001d4bd",
|
1825 |
+
"hslash;": "\u210f",
|
1826 |
+
"hstrok;": "\u0127",
|
1827 |
+
"hybull;": "\u2043",
|
1828 |
+
"hyphen;": "\u2010",
|
1829 |
+
"iacute": "\xed",
|
1830 |
+
"iacute;": "\xed",
|
1831 |
+
"ic;": "\u2063",
|
1832 |
+
"icirc": "\xee",
|
1833 |
+
"icirc;": "\xee",
|
1834 |
+
"icy;": "\u0438",
|
1835 |
+
"iecy;": "\u0435",
|
1836 |
+
"iexcl": "\xa1",
|
1837 |
+
"iexcl;": "\xa1",
|
1838 |
+
"iff;": "\u21d4",
|
1839 |
+
"ifr;": "\U0001d526",
|
1840 |
+
"igrave": "\xec",
|
1841 |
+
"igrave;": "\xec",
|
1842 |
+
"ii;": "\u2148",
|
1843 |
+
"iiiint;": "\u2a0c",
|
1844 |
+
"iiint;": "\u222d",
|
1845 |
+
"iinfin;": "\u29dc",
|
1846 |
+
"iiota;": "\u2129",
|
1847 |
+
"ijlig;": "\u0133",
|
1848 |
+
"imacr;": "\u012b",
|
1849 |
+
"image;": "\u2111",
|
1850 |
+
"imagline;": "\u2110",
|
1851 |
+
"imagpart;": "\u2111",
|
1852 |
+
"imath;": "\u0131",
|
1853 |
+
"imof;": "\u22b7",
|
1854 |
+
"imped;": "\u01b5",
|
1855 |
+
"in;": "\u2208",
|
1856 |
+
"incare;": "\u2105",
|
1857 |
+
"infin;": "\u221e",
|
1858 |
+
"infintie;": "\u29dd",
|
1859 |
+
"inodot;": "\u0131",
|
1860 |
+
"int;": "\u222b",
|
1861 |
+
"intcal;": "\u22ba",
|
1862 |
+
"integers;": "\u2124",
|
1863 |
+
"intercal;": "\u22ba",
|
1864 |
+
"intlarhk;": "\u2a17",
|
1865 |
+
"intprod;": "\u2a3c",
|
1866 |
+
"iocy;": "\u0451",
|
1867 |
+
"iogon;": "\u012f",
|
1868 |
+
"iopf;": "\U0001d55a",
|
1869 |
+
"iota;": "\u03b9",
|
1870 |
+
"iprod;": "\u2a3c",
|
1871 |
+
"iquest": "\xbf",
|
1872 |
+
"iquest;": "\xbf",
|
1873 |
+
"iscr;": "\U0001d4be",
|
1874 |
+
"isin;": "\u2208",
|
1875 |
+
"isinE;": "\u22f9",
|
1876 |
+
"isindot;": "\u22f5",
|
1877 |
+
"isins;": "\u22f4",
|
1878 |
+
"isinsv;": "\u22f3",
|
1879 |
+
"isinv;": "\u2208",
|
1880 |
+
"it;": "\u2062",
|
1881 |
+
"itilde;": "\u0129",
|
1882 |
+
"iukcy;": "\u0456",
|
1883 |
+
"iuml": "\xef",
|
1884 |
+
"iuml;": "\xef",
|
1885 |
+
"jcirc;": "\u0135",
|
1886 |
+
"jcy;": "\u0439",
|
1887 |
+
"jfr;": "\U0001d527",
|
1888 |
+
"jmath;": "\u0237",
|
1889 |
+
"jopf;": "\U0001d55b",
|
1890 |
+
"jscr;": "\U0001d4bf",
|
1891 |
+
"jsercy;": "\u0458",
|
1892 |
+
"jukcy;": "\u0454",
|
1893 |
+
"kappa;": "\u03ba",
|
1894 |
+
"kappav;": "\u03f0",
|
1895 |
+
"kcedil;": "\u0137",
|
1896 |
+
"kcy;": "\u043a",
|
1897 |
+
"kfr;": "\U0001d528",
|
1898 |
+
"kgreen;": "\u0138",
|
1899 |
+
"khcy;": "\u0445",
|
1900 |
+
"kjcy;": "\u045c",
|
1901 |
+
"kopf;": "\U0001d55c",
|
1902 |
+
"kscr;": "\U0001d4c0",
|
1903 |
+
"lAarr;": "\u21da",
|
1904 |
+
"lArr;": "\u21d0",
|
1905 |
+
"lAtail;": "\u291b",
|
1906 |
+
"lBarr;": "\u290e",
|
1907 |
+
"lE;": "\u2266",
|
1908 |
+
"lEg;": "\u2a8b",
|
1909 |
+
"lHar;": "\u2962",
|
1910 |
+
"lacute;": "\u013a",
|
1911 |
+
"laemptyv;": "\u29b4",
|
1912 |
+
"lagran;": "\u2112",
|
1913 |
+
"lambda;": "\u03bb",
|
1914 |
+
"lang;": "\u27e8",
|
1915 |
+
"langd;": "\u2991",
|
1916 |
+
"langle;": "\u27e8",
|
1917 |
+
"lap;": "\u2a85",
|
1918 |
+
"laquo": "\xab",
|
1919 |
+
"laquo;": "\xab",
|
1920 |
+
"larr;": "\u2190",
|
1921 |
+
"larrb;": "\u21e4",
|
1922 |
+
"larrbfs;": "\u291f",
|
1923 |
+
"larrfs;": "\u291d",
|
1924 |
+
"larrhk;": "\u21a9",
|
1925 |
+
"larrlp;": "\u21ab",
|
1926 |
+
"larrpl;": "\u2939",
|
1927 |
+
"larrsim;": "\u2973",
|
1928 |
+
"larrtl;": "\u21a2",
|
1929 |
+
"lat;": "\u2aab",
|
1930 |
+
"latail;": "\u2919",
|
1931 |
+
"late;": "\u2aad",
|
1932 |
+
"lates;": "\u2aad\ufe00",
|
1933 |
+
"lbarr;": "\u290c",
|
1934 |
+
"lbbrk;": "\u2772",
|
1935 |
+
"lbrace;": "{",
|
1936 |
+
"lbrack;": "[",
|
1937 |
+
"lbrke;": "\u298b",
|
1938 |
+
"lbrksld;": "\u298f",
|
1939 |
+
"lbrkslu;": "\u298d",
|
1940 |
+
"lcaron;": "\u013e",
|
1941 |
+
"lcedil;": "\u013c",
|
1942 |
+
"lceil;": "\u2308",
|
1943 |
+
"lcub;": "{",
|
1944 |
+
"lcy;": "\u043b",
|
1945 |
+
"ldca;": "\u2936",
|
1946 |
+
"ldquo;": "\u201c",
|
1947 |
+
"ldquor;": "\u201e",
|
1948 |
+
"ldrdhar;": "\u2967",
|
1949 |
+
"ldrushar;": "\u294b",
|
1950 |
+
"ldsh;": "\u21b2",
|
1951 |
+
"le;": "\u2264",
|
1952 |
+
"leftarrow;": "\u2190",
|
1953 |
+
"leftarrowtail;": "\u21a2",
|
1954 |
+
"leftharpoondown;": "\u21bd",
|
1955 |
+
"leftharpoonup;": "\u21bc",
|
1956 |
+
"leftleftarrows;": "\u21c7",
|
1957 |
+
"leftrightarrow;": "\u2194",
|
1958 |
+
"leftrightarrows;": "\u21c6",
|
1959 |
+
"leftrightharpoons;": "\u21cb",
|
1960 |
+
"leftrightsquigarrow;": "\u21ad",
|
1961 |
+
"leftthreetimes;": "\u22cb",
|
1962 |
+
"leg;": "\u22da",
|
1963 |
+
"leq;": "\u2264",
|
1964 |
+
"leqq;": "\u2266",
|
1965 |
+
"leqslant;": "\u2a7d",
|
1966 |
+
"les;": "\u2a7d",
|
1967 |
+
"lescc;": "\u2aa8",
|
1968 |
+
"lesdot;": "\u2a7f",
|
1969 |
+
"lesdoto;": "\u2a81",
|
1970 |
+
"lesdotor;": "\u2a83",
|
1971 |
+
"lesg;": "\u22da\ufe00",
|
1972 |
+
"lesges;": "\u2a93",
|
1973 |
+
"lessapprox;": "\u2a85",
|
1974 |
+
"lessdot;": "\u22d6",
|
1975 |
+
"lesseqgtr;": "\u22da",
|
1976 |
+
"lesseqqgtr;": "\u2a8b",
|
1977 |
+
"lessgtr;": "\u2276",
|
1978 |
+
"lesssim;": "\u2272",
|
1979 |
+
"lfisht;": "\u297c",
|
1980 |
+
"lfloor;": "\u230a",
|
1981 |
+
"lfr;": "\U0001d529",
|
1982 |
+
"lg;": "\u2276",
|
1983 |
+
"lgE;": "\u2a91",
|
1984 |
+
"lhard;": "\u21bd",
|
1985 |
+
"lharu;": "\u21bc",
|
1986 |
+
"lharul;": "\u296a",
|
1987 |
+
"lhblk;": "\u2584",
|
1988 |
+
"ljcy;": "\u0459",
|
1989 |
+
"ll;": "\u226a",
|
1990 |
+
"llarr;": "\u21c7",
|
1991 |
+
"llcorner;": "\u231e",
|
1992 |
+
"llhard;": "\u296b",
|
1993 |
+
"lltri;": "\u25fa",
|
1994 |
+
"lmidot;": "\u0140",
|
1995 |
+
"lmoust;": "\u23b0",
|
1996 |
+
"lmoustache;": "\u23b0",
|
1997 |
+
"lnE;": "\u2268",
|
1998 |
+
"lnap;": "\u2a89",
|
1999 |
+
"lnapprox;": "\u2a89",
|
2000 |
+
"lne;": "\u2a87",
|
2001 |
+
"lneq;": "\u2a87",
|
2002 |
+
"lneqq;": "\u2268",
|
2003 |
+
"lnsim;": "\u22e6",
|
2004 |
+
"loang;": "\u27ec",
|
2005 |
+
"loarr;": "\u21fd",
|
2006 |
+
"lobrk;": "\u27e6",
|
2007 |
+
"longleftarrow;": "\u27f5",
|
2008 |
+
"longleftrightarrow;": "\u27f7",
|
2009 |
+
"longmapsto;": "\u27fc",
|
2010 |
+
"longrightarrow;": "\u27f6",
|
2011 |
+
"looparrowleft;": "\u21ab",
|
2012 |
+
"looparrowright;": "\u21ac",
|
2013 |
+
"lopar;": "\u2985",
|
2014 |
+
"lopf;": "\U0001d55d",
|
2015 |
+
"loplus;": "\u2a2d",
|
2016 |
+
"lotimes;": "\u2a34",
|
2017 |
+
"lowast;": "\u2217",
|
2018 |
+
"lowbar;": "_",
|
2019 |
+
"loz;": "\u25ca",
|
2020 |
+
"lozenge;": "\u25ca",
|
2021 |
+
"lozf;": "\u29eb",
|
2022 |
+
"lpar;": "(",
|
2023 |
+
"lparlt;": "\u2993",
|
2024 |
+
"lrarr;": "\u21c6",
|
2025 |
+
"lrcorner;": "\u231f",
|
2026 |
+
"lrhar;": "\u21cb",
|
2027 |
+
"lrhard;": "\u296d",
|
2028 |
+
"lrm;": "\u200e",
|
2029 |
+
"lrtri;": "\u22bf",
|
2030 |
+
"lsaquo;": "\u2039",
|
2031 |
+
"lscr;": "\U0001d4c1",
|
2032 |
+
"lsh;": "\u21b0",
|
2033 |
+
"lsim;": "\u2272",
|
2034 |
+
"lsime;": "\u2a8d",
|
2035 |
+
"lsimg;": "\u2a8f",
|
2036 |
+
"lsqb;": "[",
|
2037 |
+
"lsquo;": "\u2018",
|
2038 |
+
"lsquor;": "\u201a",
|
2039 |
+
"lstrok;": "\u0142",
|
2040 |
+
"lt": "<",
|
2041 |
+
"lt;": "<",
|
2042 |
+
"ltcc;": "\u2aa6",
|
2043 |
+
"ltcir;": "\u2a79",
|
2044 |
+
"ltdot;": "\u22d6",
|
2045 |
+
"lthree;": "\u22cb",
|
2046 |
+
"ltimes;": "\u22c9",
|
2047 |
+
"ltlarr;": "\u2976",
|
2048 |
+
"ltquest;": "\u2a7b",
|
2049 |
+
"ltrPar;": "\u2996",
|
2050 |
+
"ltri;": "\u25c3",
|
2051 |
+
"ltrie;": "\u22b4",
|
2052 |
+
"ltrif;": "\u25c2",
|
2053 |
+
"lurdshar;": "\u294a",
|
2054 |
+
"luruhar;": "\u2966",
|
2055 |
+
"lvertneqq;": "\u2268\ufe00",
|
2056 |
+
"lvnE;": "\u2268\ufe00",
|
2057 |
+
"mDDot;": "\u223a",
|
2058 |
+
"macr": "\xaf",
|
2059 |
+
"macr;": "\xaf",
|
2060 |
+
"male;": "\u2642",
|
2061 |
+
"malt;": "\u2720",
|
2062 |
+
"maltese;": "\u2720",
|
2063 |
+
"map;": "\u21a6",
|
2064 |
+
"mapsto;": "\u21a6",
|
2065 |
+
"mapstodown;": "\u21a7",
|
2066 |
+
"mapstoleft;": "\u21a4",
|
2067 |
+
"mapstoup;": "\u21a5",
|
2068 |
+
"marker;": "\u25ae",
|
2069 |
+
"mcomma;": "\u2a29",
|
2070 |
+
"mcy;": "\u043c",
|
2071 |
+
"mdash;": "\u2014",
|
2072 |
+
"measuredangle;": "\u2221",
|
2073 |
+
"mfr;": "\U0001d52a",
|
2074 |
+
"mho;": "\u2127",
|
2075 |
+
"micro": "\xb5",
|
2076 |
+
"micro;": "\xb5",
|
2077 |
+
"mid;": "\u2223",
|
2078 |
+
"midast;": "*",
|
2079 |
+
"midcir;": "\u2af0",
|
2080 |
+
"middot": "\xb7",
|
2081 |
+
"middot;": "\xb7",
|
2082 |
+
"minus;": "\u2212",
|
2083 |
+
"minusb;": "\u229f",
|
2084 |
+
"minusd;": "\u2238",
|
2085 |
+
"minusdu;": "\u2a2a",
|
2086 |
+
"mlcp;": "\u2adb",
|
2087 |
+
"mldr;": "\u2026",
|
2088 |
+
"mnplus;": "\u2213",
|
2089 |
+
"models;": "\u22a7",
|
2090 |
+
"mopf;": "\U0001d55e",
|
2091 |
+
"mp;": "\u2213",
|
2092 |
+
"mscr;": "\U0001d4c2",
|
2093 |
+
"mstpos;": "\u223e",
|
2094 |
+
"mu;": "\u03bc",
|
2095 |
+
"multimap;": "\u22b8",
|
2096 |
+
"mumap;": "\u22b8",
|
2097 |
+
"nGg;": "\u22d9\u0338",
|
2098 |
+
"nGt;": "\u226b\u20d2",
|
2099 |
+
"nGtv;": "\u226b\u0338",
|
2100 |
+
"nLeftarrow;": "\u21cd",
|
2101 |
+
"nLeftrightarrow;": "\u21ce",
|
2102 |
+
"nLl;": "\u22d8\u0338",
|
2103 |
+
"nLt;": "\u226a\u20d2",
|
2104 |
+
"nLtv;": "\u226a\u0338",
|
2105 |
+
"nRightarrow;": "\u21cf",
|
2106 |
+
"nVDash;": "\u22af",
|
2107 |
+
"nVdash;": "\u22ae",
|
2108 |
+
"nabla;": "\u2207",
|
2109 |
+
"nacute;": "\u0144",
|
2110 |
+
"nang;": "\u2220\u20d2",
|
2111 |
+
"nap;": "\u2249",
|
2112 |
+
"napE;": "\u2a70\u0338",
|
2113 |
+
"napid;": "\u224b\u0338",
|
2114 |
+
"napos;": "\u0149",
|
2115 |
+
"napprox;": "\u2249",
|
2116 |
+
"natur;": "\u266e",
|
2117 |
+
"natural;": "\u266e",
|
2118 |
+
"naturals;": "\u2115",
|
2119 |
+
"nbsp": "\xa0",
|
2120 |
+
"nbsp;": "\xa0",
|
2121 |
+
"nbump;": "\u224e\u0338",
|
2122 |
+
"nbumpe;": "\u224f\u0338",
|
2123 |
+
"ncap;": "\u2a43",
|
2124 |
+
"ncaron;": "\u0148",
|
2125 |
+
"ncedil;": "\u0146",
|
2126 |
+
"ncong;": "\u2247",
|
2127 |
+
"ncongdot;": "\u2a6d\u0338",
|
2128 |
+
"ncup;": "\u2a42",
|
2129 |
+
"ncy;": "\u043d",
|
2130 |
+
"ndash;": "\u2013",
|
2131 |
+
"ne;": "\u2260",
|
2132 |
+
"neArr;": "\u21d7",
|
2133 |
+
"nearhk;": "\u2924",
|
2134 |
+
"nearr;": "\u2197",
|
2135 |
+
"nearrow;": "\u2197",
|
2136 |
+
"nedot;": "\u2250\u0338",
|
2137 |
+
"nequiv;": "\u2262",
|
2138 |
+
"nesear;": "\u2928",
|
2139 |
+
"nesim;": "\u2242\u0338",
|
2140 |
+
"nexist;": "\u2204",
|
2141 |
+
"nexists;": "\u2204",
|
2142 |
+
"nfr;": "\U0001d52b",
|
2143 |
+
"ngE;": "\u2267\u0338",
|
2144 |
+
"nge;": "\u2271",
|
2145 |
+
"ngeq;": "\u2271",
|
2146 |
+
"ngeqq;": "\u2267\u0338",
|
2147 |
+
"ngeqslant;": "\u2a7e\u0338",
|
2148 |
+
"nges;": "\u2a7e\u0338",
|
2149 |
+
"ngsim;": "\u2275",
|
2150 |
+
"ngt;": "\u226f",
|
2151 |
+
"ngtr;": "\u226f",
|
2152 |
+
"nhArr;": "\u21ce",
|
2153 |
+
"nharr;": "\u21ae",
|
2154 |
+
"nhpar;": "\u2af2",
|
2155 |
+
"ni;": "\u220b",
|
2156 |
+
"nis;": "\u22fc",
|
2157 |
+
"nisd;": "\u22fa",
|
2158 |
+
"niv;": "\u220b",
|
2159 |
+
"njcy;": "\u045a",
|
2160 |
+
"nlArr;": "\u21cd",
|
2161 |
+
"nlE;": "\u2266\u0338",
|
2162 |
+
"nlarr;": "\u219a",
|
2163 |
+
"nldr;": "\u2025",
|
2164 |
+
"nle;": "\u2270",
|
2165 |
+
"nleftarrow;": "\u219a",
|
2166 |
+
"nleftrightarrow;": "\u21ae",
|
2167 |
+
"nleq;": "\u2270",
|
2168 |
+
"nleqq;": "\u2266\u0338",
|
2169 |
+
"nleqslant;": "\u2a7d\u0338",
|
2170 |
+
"nles;": "\u2a7d\u0338",
|
2171 |
+
"nless;": "\u226e",
|
2172 |
+
"nlsim;": "\u2274",
|
2173 |
+
"nlt;": "\u226e",
|
2174 |
+
"nltri;": "\u22ea",
|
2175 |
+
"nltrie;": "\u22ec",
|
2176 |
+
"nmid;": "\u2224",
|
2177 |
+
"nopf;": "\U0001d55f",
|
2178 |
+
"not": "\xac",
|
2179 |
+
"not;": "\xac",
|
2180 |
+
"notin;": "\u2209",
|
2181 |
+
"notinE;": "\u22f9\u0338",
|
2182 |
+
"notindot;": "\u22f5\u0338",
|
2183 |
+
"notinva;": "\u2209",
|
2184 |
+
"notinvb;": "\u22f7",
|
2185 |
+
"notinvc;": "\u22f6",
|
2186 |
+
"notni;": "\u220c",
|
2187 |
+
"notniva;": "\u220c",
|
2188 |
+
"notnivb;": "\u22fe",
|
2189 |
+
"notnivc;": "\u22fd",
|
2190 |
+
"npar;": "\u2226",
|
2191 |
+
"nparallel;": "\u2226",
|
2192 |
+
"nparsl;": "\u2afd\u20e5",
|
2193 |
+
"npart;": "\u2202\u0338",
|
2194 |
+
"npolint;": "\u2a14",
|
2195 |
+
"npr;": "\u2280",
|
2196 |
+
"nprcue;": "\u22e0",
|
2197 |
+
"npre;": "\u2aaf\u0338",
|
2198 |
+
"nprec;": "\u2280",
|
2199 |
+
"npreceq;": "\u2aaf\u0338",
|
2200 |
+
"nrArr;": "\u21cf",
|
2201 |
+
"nrarr;": "\u219b",
|
2202 |
+
"nrarrc;": "\u2933\u0338",
|
2203 |
+
"nrarrw;": "\u219d\u0338",
|
2204 |
+
"nrightarrow;": "\u219b",
|
2205 |
+
"nrtri;": "\u22eb",
|
2206 |
+
"nrtrie;": "\u22ed",
|
2207 |
+
"nsc;": "\u2281",
|
2208 |
+
"nsccue;": "\u22e1",
|
2209 |
+
"nsce;": "\u2ab0\u0338",
|
2210 |
+
"nscr;": "\U0001d4c3",
|
2211 |
+
"nshortmid;": "\u2224",
|
2212 |
+
"nshortparallel;": "\u2226",
|
2213 |
+
"nsim;": "\u2241",
|
2214 |
+
"nsime;": "\u2244",
|
2215 |
+
"nsimeq;": "\u2244",
|
2216 |
+
"nsmid;": "\u2224",
|
2217 |
+
"nspar;": "\u2226",
|
2218 |
+
"nsqsube;": "\u22e2",
|
2219 |
+
"nsqsupe;": "\u22e3",
|
2220 |
+
"nsub;": "\u2284",
|
2221 |
+
"nsubE;": "\u2ac5\u0338",
|
2222 |
+
"nsube;": "\u2288",
|
2223 |
+
"nsubset;": "\u2282\u20d2",
|
2224 |
+
"nsubseteq;": "\u2288",
|
2225 |
+
"nsubseteqq;": "\u2ac5\u0338",
|
2226 |
+
"nsucc;": "\u2281",
|
2227 |
+
"nsucceq;": "\u2ab0\u0338",
|
2228 |
+
"nsup;": "\u2285",
|
2229 |
+
"nsupE;": "\u2ac6\u0338",
|
2230 |
+
"nsupe;": "\u2289",
|
2231 |
+
"nsupset;": "\u2283\u20d2",
|
2232 |
+
"nsupseteq;": "\u2289",
|
2233 |
+
"nsupseteqq;": "\u2ac6\u0338",
|
2234 |
+
"ntgl;": "\u2279",
|
2235 |
+
"ntilde": "\xf1",
|
2236 |
+
"ntilde;": "\xf1",
|
2237 |
+
"ntlg;": "\u2278",
|
2238 |
+
"ntriangleleft;": "\u22ea",
|
2239 |
+
"ntrianglelefteq;": "\u22ec",
|
2240 |
+
"ntriangleright;": "\u22eb",
|
2241 |
+
"ntrianglerighteq;": "\u22ed",
|
2242 |
+
"nu;": "\u03bd",
|
2243 |
+
"num;": "#",
|
2244 |
+
"numero;": "\u2116",
|
2245 |
+
"numsp;": "\u2007",
|
2246 |
+
"nvDash;": "\u22ad",
|
2247 |
+
"nvHarr;": "\u2904",
|
2248 |
+
"nvap;": "\u224d\u20d2",
|
2249 |
+
"nvdash;": "\u22ac",
|
2250 |
+
"nvge;": "\u2265\u20d2",
|
2251 |
+
"nvgt;": ">\u20d2",
|
2252 |
+
"nvinfin;": "\u29de",
|
2253 |
+
"nvlArr;": "\u2902",
|
2254 |
+
"nvle;": "\u2264\u20d2",
|
2255 |
+
"nvlt;": "<\u20d2",
|
2256 |
+
"nvltrie;": "\u22b4\u20d2",
|
2257 |
+
"nvrArr;": "\u2903",
|
2258 |
+
"nvrtrie;": "\u22b5\u20d2",
|
2259 |
+
"nvsim;": "\u223c\u20d2",
|
2260 |
+
"nwArr;": "\u21d6",
|
2261 |
+
"nwarhk;": "\u2923",
|
2262 |
+
"nwarr;": "\u2196",
|
2263 |
+
"nwarrow;": "\u2196",
|
2264 |
+
"nwnear;": "\u2927",
|
2265 |
+
"oS;": "\u24c8",
|
2266 |
+
"oacute": "\xf3",
|
2267 |
+
"oacute;": "\xf3",
|
2268 |
+
"oast;": "\u229b",
|
2269 |
+
"ocir;": "\u229a",
|
2270 |
+
"ocirc": "\xf4",
|
2271 |
+
"ocirc;": "\xf4",
|
2272 |
+
"ocy;": "\u043e",
|
2273 |
+
"odash;": "\u229d",
|
2274 |
+
"odblac;": "\u0151",
|
2275 |
+
"odiv;": "\u2a38",
|
2276 |
+
"odot;": "\u2299",
|
2277 |
+
"odsold;": "\u29bc",
|
2278 |
+
"oelig;": "\u0153",
|
2279 |
+
"ofcir;": "\u29bf",
|
2280 |
+
"ofr;": "\U0001d52c",
|
2281 |
+
"ogon;": "\u02db",
|
2282 |
+
"ograve": "\xf2",
|
2283 |
+
"ograve;": "\xf2",
|
2284 |
+
"ogt;": "\u29c1",
|
2285 |
+
"ohbar;": "\u29b5",
|
2286 |
+
"ohm;": "\u03a9",
|
2287 |
+
"oint;": "\u222e",
|
2288 |
+
"olarr;": "\u21ba",
|
2289 |
+
"olcir;": "\u29be",
|
2290 |
+
"olcross;": "\u29bb",
|
2291 |
+
"oline;": "\u203e",
|
2292 |
+
"olt;": "\u29c0",
|
2293 |
+
"omacr;": "\u014d",
|
2294 |
+
"omega;": "\u03c9",
|
2295 |
+
"omicron;": "\u03bf",
|
2296 |
+
"omid;": "\u29b6",
|
2297 |
+
"ominus;": "\u2296",
|
2298 |
+
"oopf;": "\U0001d560",
|
2299 |
+
"opar;": "\u29b7",
|
2300 |
+
"operp;": "\u29b9",
|
2301 |
+
"oplus;": "\u2295",
|
2302 |
+
"or;": "\u2228",
|
2303 |
+
"orarr;": "\u21bb",
|
2304 |
+
"ord;": "\u2a5d",
|
2305 |
+
"order;": "\u2134",
|
2306 |
+
"orderof;": "\u2134",
|
2307 |
+
"ordf": "\xaa",
|
2308 |
+
"ordf;": "\xaa",
|
2309 |
+
"ordm": "\xba",
|
2310 |
+
"ordm;": "\xba",
|
2311 |
+
"origof;": "\u22b6",
|
2312 |
+
"oror;": "\u2a56",
|
2313 |
+
"orslope;": "\u2a57",
|
2314 |
+
"orv;": "\u2a5b",
|
2315 |
+
"oscr;": "\u2134",
|
2316 |
+
"oslash": "\xf8",
|
2317 |
+
"oslash;": "\xf8",
|
2318 |
+
"osol;": "\u2298",
|
2319 |
+
"otilde": "\xf5",
|
2320 |
+
"otilde;": "\xf5",
|
2321 |
+
"otimes;": "\u2297",
|
2322 |
+
"otimesas;": "\u2a36",
|
2323 |
+
"ouml": "\xf6",
|
2324 |
+
"ouml;": "\xf6",
|
2325 |
+
"ovbar;": "\u233d",
|
2326 |
+
"par;": "\u2225",
|
2327 |
+
"para": "\xb6",
|
2328 |
+
"para;": "\xb6",
|
2329 |
+
"parallel;": "\u2225",
|
2330 |
+
"parsim;": "\u2af3",
|
2331 |
+
"parsl;": "\u2afd",
|
2332 |
+
"part;": "\u2202",
|
2333 |
+
"pcy;": "\u043f",
|
2334 |
+
"percnt;": "%",
|
2335 |
+
"period;": ".",
|
2336 |
+
"permil;": "\u2030",
|
2337 |
+
"perp;": "\u22a5",
|
2338 |
+
"pertenk;": "\u2031",
|
2339 |
+
"pfr;": "\U0001d52d",
|
2340 |
+
"phi;": "\u03c6",
|
2341 |
+
"phiv;": "\u03d5",
|
2342 |
+
"phmmat;": "\u2133",
|
2343 |
+
"phone;": "\u260e",
|
2344 |
+
"pi;": "\u03c0",
|
2345 |
+
"pitchfork;": "\u22d4",
|
2346 |
+
"piv;": "\u03d6",
|
2347 |
+
"planck;": "\u210f",
|
2348 |
+
"planckh;": "\u210e",
|
2349 |
+
"plankv;": "\u210f",
|
2350 |
+
"plus;": "+",
|
2351 |
+
"plusacir;": "\u2a23",
|
2352 |
+
"plusb;": "\u229e",
|
2353 |
+
"pluscir;": "\u2a22",
|
2354 |
+
"plusdo;": "\u2214",
|
2355 |
+
"plusdu;": "\u2a25",
|
2356 |
+
"pluse;": "\u2a72",
|
2357 |
+
"plusmn": "\xb1",
|
2358 |
+
"plusmn;": "\xb1",
|
2359 |
+
"plussim;": "\u2a26",
|
2360 |
+
"plustwo;": "\u2a27",
|
2361 |
+
"pm;": "\xb1",
|
2362 |
+
"pointint;": "\u2a15",
|
2363 |
+
"popf;": "\U0001d561",
|
2364 |
+
"pound": "\xa3",
|
2365 |
+
"pound;": "\xa3",
|
2366 |
+
"pr;": "\u227a",
|
2367 |
+
"prE;": "\u2ab3",
|
2368 |
+
"prap;": "\u2ab7",
|
2369 |
+
"prcue;": "\u227c",
|
2370 |
+
"pre;": "\u2aaf",
|
2371 |
+
"prec;": "\u227a",
|
2372 |
+
"precapprox;": "\u2ab7",
|
2373 |
+
"preccurlyeq;": "\u227c",
|
2374 |
+
"preceq;": "\u2aaf",
|
2375 |
+
"precnapprox;": "\u2ab9",
|
2376 |
+
"precneqq;": "\u2ab5",
|
2377 |
+
"precnsim;": "\u22e8",
|
2378 |
+
"precsim;": "\u227e",
|
2379 |
+
"prime;": "\u2032",
|
2380 |
+
"primes;": "\u2119",
|
2381 |
+
"prnE;": "\u2ab5",
|
2382 |
+
"prnap;": "\u2ab9",
|
2383 |
+
"prnsim;": "\u22e8",
|
2384 |
+
"prod;": "\u220f",
|
2385 |
+
"profalar;": "\u232e",
|
2386 |
+
"profline;": "\u2312",
|
2387 |
+
"profsurf;": "\u2313",
|
2388 |
+
"prop;": "\u221d",
|
2389 |
+
"propto;": "\u221d",
|
2390 |
+
"prsim;": "\u227e",
|
2391 |
+
"prurel;": "\u22b0",
|
2392 |
+
"pscr;": "\U0001d4c5",
|
2393 |
+
"psi;": "\u03c8",
|
2394 |
+
"puncsp;": "\u2008",
|
2395 |
+
"qfr;": "\U0001d52e",
|
2396 |
+
"qint;": "\u2a0c",
|
2397 |
+
"qopf;": "\U0001d562",
|
2398 |
+
"qprime;": "\u2057",
|
2399 |
+
"qscr;": "\U0001d4c6",
|
2400 |
+
"quaternions;": "\u210d",
|
2401 |
+
"quatint;": "\u2a16",
|
2402 |
+
"quest;": "?",
|
2403 |
+
"questeq;": "\u225f",
|
2404 |
+
"quot": "\"",
|
2405 |
+
"quot;": "\"",
|
2406 |
+
"rAarr;": "\u21db",
|
2407 |
+
"rArr;": "\u21d2",
|
2408 |
+
"rAtail;": "\u291c",
|
2409 |
+
"rBarr;": "\u290f",
|
2410 |
+
"rHar;": "\u2964",
|
2411 |
+
"race;": "\u223d\u0331",
|
2412 |
+
"racute;": "\u0155",
|
2413 |
+
"radic;": "\u221a",
|
2414 |
+
"raemptyv;": "\u29b3",
|
2415 |
+
"rang;": "\u27e9",
|
2416 |
+
"rangd;": "\u2992",
|
2417 |
+
"range;": "\u29a5",
|
2418 |
+
"rangle;": "\u27e9",
|
2419 |
+
"raquo": "\xbb",
|
2420 |
+
"raquo;": "\xbb",
|
2421 |
+
"rarr;": "\u2192",
|
2422 |
+
"rarrap;": "\u2975",
|
2423 |
+
"rarrb;": "\u21e5",
|
2424 |
+
"rarrbfs;": "\u2920",
|
2425 |
+
"rarrc;": "\u2933",
|
2426 |
+
"rarrfs;": "\u291e",
|
2427 |
+
"rarrhk;": "\u21aa",
|
2428 |
+
"rarrlp;": "\u21ac",
|
2429 |
+
"rarrpl;": "\u2945",
|
2430 |
+
"rarrsim;": "\u2974",
|
2431 |
+
"rarrtl;": "\u21a3",
|
2432 |
+
"rarrw;": "\u219d",
|
2433 |
+
"ratail;": "\u291a",
|
2434 |
+
"ratio;": "\u2236",
|
2435 |
+
"rationals;": "\u211a",
|
2436 |
+
"rbarr;": "\u290d",
|
2437 |
+
"rbbrk;": "\u2773",
|
2438 |
+
"rbrace;": "}",
|
2439 |
+
"rbrack;": "]",
|
2440 |
+
"rbrke;": "\u298c",
|
2441 |
+
"rbrksld;": "\u298e",
|
2442 |
+
"rbrkslu;": "\u2990",
|
2443 |
+
"rcaron;": "\u0159",
|
2444 |
+
"rcedil;": "\u0157",
|
2445 |
+
"rceil;": "\u2309",
|
2446 |
+
"rcub;": "}",
|
2447 |
+
"rcy;": "\u0440",
|
2448 |
+
"rdca;": "\u2937",
|
2449 |
+
"rdldhar;": "\u2969",
|
2450 |
+
"rdquo;": "\u201d",
|
2451 |
+
"rdquor;": "\u201d",
|
2452 |
+
"rdsh;": "\u21b3",
|
2453 |
+
"real;": "\u211c",
|
2454 |
+
"realine;": "\u211b",
|
2455 |
+
"realpart;": "\u211c",
|
2456 |
+
"reals;": "\u211d",
|
2457 |
+
"rect;": "\u25ad",
|
2458 |
+
"reg": "\xae",
|
2459 |
+
"reg;": "\xae",
|
2460 |
+
"rfisht;": "\u297d",
|
2461 |
+
"rfloor;": "\u230b",
|
2462 |
+
"rfr;": "\U0001d52f",
|
2463 |
+
"rhard;": "\u21c1",
|
2464 |
+
"rharu;": "\u21c0",
|
2465 |
+
"rharul;": "\u296c",
|
2466 |
+
"rho;": "\u03c1",
|
2467 |
+
"rhov;": "\u03f1",
|
2468 |
+
"rightarrow;": "\u2192",
|
2469 |
+
"rightarrowtail;": "\u21a3",
|
2470 |
+
"rightharpoondown;": "\u21c1",
|
2471 |
+
"rightharpoonup;": "\u21c0",
|
2472 |
+
"rightleftarrows;": "\u21c4",
|
2473 |
+
"rightleftharpoons;": "\u21cc",
|
2474 |
+
"rightrightarrows;": "\u21c9",
|
2475 |
+
"rightsquigarrow;": "\u219d",
|
2476 |
+
"rightthreetimes;": "\u22cc",
|
2477 |
+
"ring;": "\u02da",
|
2478 |
+
"risingdotseq;": "\u2253",
|
2479 |
+
"rlarr;": "\u21c4",
|
2480 |
+
"rlhar;": "\u21cc",
|
2481 |
+
"rlm;": "\u200f",
|
2482 |
+
"rmoust;": "\u23b1",
|
2483 |
+
"rmoustache;": "\u23b1",
|
2484 |
+
"rnmid;": "\u2aee",
|
2485 |
+
"roang;": "\u27ed",
|
2486 |
+
"roarr;": "\u21fe",
|
2487 |
+
"robrk;": "\u27e7",
|
2488 |
+
"ropar;": "\u2986",
|
2489 |
+
"ropf;": "\U0001d563",
|
2490 |
+
"roplus;": "\u2a2e",
|
2491 |
+
"rotimes;": "\u2a35",
|
2492 |
+
"rpar;": ")",
|
2493 |
+
"rpargt;": "\u2994",
|
2494 |
+
"rppolint;": "\u2a12",
|
2495 |
+
"rrarr;": "\u21c9",
|
2496 |
+
"rsaquo;": "\u203a",
|
2497 |
+
"rscr;": "\U0001d4c7",
|
2498 |
+
"rsh;": "\u21b1",
|
2499 |
+
"rsqb;": "]",
|
2500 |
+
"rsquo;": "\u2019",
|
2501 |
+
"rsquor;": "\u2019",
|
2502 |
+
"rthree;": "\u22cc",
|
2503 |
+
"rtimes;": "\u22ca",
|
2504 |
+
"rtri;": "\u25b9",
|
2505 |
+
"rtrie;": "\u22b5",
|
2506 |
+
"rtrif;": "\u25b8",
|
2507 |
+
"rtriltri;": "\u29ce",
|
2508 |
+
"ruluhar;": "\u2968",
|
2509 |
+
"rx;": "\u211e",
|
2510 |
+
"sacute;": "\u015b",
|
2511 |
+
"sbquo;": "\u201a",
|
2512 |
+
"sc;": "\u227b",
|
2513 |
+
"scE;": "\u2ab4",
|
2514 |
+
"scap;": "\u2ab8",
|
2515 |
+
"scaron;": "\u0161",
|
2516 |
+
"sccue;": "\u227d",
|
2517 |
+
"sce;": "\u2ab0",
|
2518 |
+
"scedil;": "\u015f",
|
2519 |
+
"scirc;": "\u015d",
|
2520 |
+
"scnE;": "\u2ab6",
|
2521 |
+
"scnap;": "\u2aba",
|
2522 |
+
"scnsim;": "\u22e9",
|
2523 |
+
"scpolint;": "\u2a13",
|
2524 |
+
"scsim;": "\u227f",
|
2525 |
+
"scy;": "\u0441",
|
2526 |
+
"sdot;": "\u22c5",
|
2527 |
+
"sdotb;": "\u22a1",
|
2528 |
+
"sdote;": "\u2a66",
|
2529 |
+
"seArr;": "\u21d8",
|
2530 |
+
"searhk;": "\u2925",
|
2531 |
+
"searr;": "\u2198",
|
2532 |
+
"searrow;": "\u2198",
|
2533 |
+
"sect": "\xa7",
|
2534 |
+
"sect;": "\xa7",
|
2535 |
+
"semi;": ";",
|
2536 |
+
"seswar;": "\u2929",
|
2537 |
+
"setminus;": "\u2216",
|
2538 |
+
"setmn;": "\u2216",
|
2539 |
+
"sext;": "\u2736",
|
2540 |
+
"sfr;": "\U0001d530",
|
2541 |
+
"sfrown;": "\u2322",
|
2542 |
+
"sharp;": "\u266f",
|
2543 |
+
"shchcy;": "\u0449",
|
2544 |
+
"shcy;": "\u0448",
|
2545 |
+
"shortmid;": "\u2223",
|
2546 |
+
"shortparallel;": "\u2225",
|
2547 |
+
"shy": "\xad",
|
2548 |
+
"shy;": "\xad",
|
2549 |
+
"sigma;": "\u03c3",
|
2550 |
+
"sigmaf;": "\u03c2",
|
2551 |
+
"sigmav;": "\u03c2",
|
2552 |
+
"sim;": "\u223c",
|
2553 |
+
"simdot;": "\u2a6a",
|
2554 |
+
"sime;": "\u2243",
|
2555 |
+
"simeq;": "\u2243",
|
2556 |
+
"simg;": "\u2a9e",
|
2557 |
+
"simgE;": "\u2aa0",
|
2558 |
+
"siml;": "\u2a9d",
|
2559 |
+
"simlE;": "\u2a9f",
|
2560 |
+
"simne;": "\u2246",
|
2561 |
+
"simplus;": "\u2a24",
|
2562 |
+
"simrarr;": "\u2972",
|
2563 |
+
"slarr;": "\u2190",
|
2564 |
+
"smallsetminus;": "\u2216",
|
2565 |
+
"smashp;": "\u2a33",
|
2566 |
+
"smeparsl;": "\u29e4",
|
2567 |
+
"smid;": "\u2223",
|
2568 |
+
"smile;": "\u2323",
|
2569 |
+
"smt;": "\u2aaa",
|
2570 |
+
"smte;": "\u2aac",
|
2571 |
+
"smtes;": "\u2aac\ufe00",
|
2572 |
+
"softcy;": "\u044c",
|
2573 |
+
"sol;": "/",
|
2574 |
+
"solb;": "\u29c4",
|
2575 |
+
"solbar;": "\u233f",
|
2576 |
+
"sopf;": "\U0001d564",
|
2577 |
+
"spades;": "\u2660",
|
2578 |
+
"spadesuit;": "\u2660",
|
2579 |
+
"spar;": "\u2225",
|
2580 |
+
"sqcap;": "\u2293",
|
2581 |
+
"sqcaps;": "\u2293\ufe00",
|
2582 |
+
"sqcup;": "\u2294",
|
2583 |
+
"sqcups;": "\u2294\ufe00",
|
2584 |
+
"sqsub;": "\u228f",
|
2585 |
+
"sqsube;": "\u2291",
|
2586 |
+
"sqsubset;": "\u228f",
|
2587 |
+
"sqsubseteq;": "\u2291",
|
2588 |
+
"sqsup;": "\u2290",
|
2589 |
+
"sqsupe;": "\u2292",
|
2590 |
+
"sqsupset;": "\u2290",
|
2591 |
+
"sqsupseteq;": "\u2292",
|
2592 |
+
"squ;": "\u25a1",
|
2593 |
+
"square;": "\u25a1",
|
2594 |
+
"squarf;": "\u25aa",
|
2595 |
+
"squf;": "\u25aa",
|
2596 |
+
"srarr;": "\u2192",
|
2597 |
+
"sscr;": "\U0001d4c8",
|
2598 |
+
"ssetmn;": "\u2216",
|
2599 |
+
"ssmile;": "\u2323",
|
2600 |
+
"sstarf;": "\u22c6",
|
2601 |
+
"star;": "\u2606",
|
2602 |
+
"starf;": "\u2605",
|
2603 |
+
"straightepsilon;": "\u03f5",
|
2604 |
+
"straightphi;": "\u03d5",
|
2605 |
+
"strns;": "\xaf",
|
2606 |
+
"sub;": "\u2282",
|
2607 |
+
"subE;": "\u2ac5",
|
2608 |
+
"subdot;": "\u2abd",
|
2609 |
+
"sube;": "\u2286",
|
2610 |
+
"subedot;": "\u2ac3",
|
2611 |
+
"submult;": "\u2ac1",
|
2612 |
+
"subnE;": "\u2acb",
|
2613 |
+
"subne;": "\u228a",
|
2614 |
+
"subplus;": "\u2abf",
|
2615 |
+
"subrarr;": "\u2979",
|
2616 |
+
"subset;": "\u2282",
|
2617 |
+
"subseteq;": "\u2286",
|
2618 |
+
"subseteqq;": "\u2ac5",
|
2619 |
+
"subsetneq;": "\u228a",
|
2620 |
+
"subsetneqq;": "\u2acb",
|
2621 |
+
"subsim;": "\u2ac7",
|
2622 |
+
"subsub;": "\u2ad5",
|
2623 |
+
"subsup;": "\u2ad3",
|
2624 |
+
"succ;": "\u227b",
|
2625 |
+
"succapprox;": "\u2ab8",
|
2626 |
+
"succcurlyeq;": "\u227d",
|
2627 |
+
"succeq;": "\u2ab0",
|
2628 |
+
"succnapprox;": "\u2aba",
|
2629 |
+
"succneqq;": "\u2ab6",
|
2630 |
+
"succnsim;": "\u22e9",
|
2631 |
+
"succsim;": "\u227f",
|
2632 |
+
"sum;": "\u2211",
|
2633 |
+
"sung;": "\u266a",
|
2634 |
+
"sup1": "\xb9",
|
2635 |
+
"sup1;": "\xb9",
|
2636 |
+
"sup2": "\xb2",
|
2637 |
+
"sup2;": "\xb2",
|
2638 |
+
"sup3": "\xb3",
|
2639 |
+
"sup3;": "\xb3",
|
2640 |
+
"sup;": "\u2283",
|
2641 |
+
"supE;": "\u2ac6",
|
2642 |
+
"supdot;": "\u2abe",
|
2643 |
+
"supdsub;": "\u2ad8",
|
2644 |
+
"supe;": "\u2287",
|
2645 |
+
"supedot;": "\u2ac4",
|
2646 |
+
"suphsol;": "\u27c9",
|
2647 |
+
"suphsub;": "\u2ad7",
|
2648 |
+
"suplarr;": "\u297b",
|
2649 |
+
"supmult;": "\u2ac2",
|
2650 |
+
"supnE;": "\u2acc",
|
2651 |
+
"supne;": "\u228b",
|
2652 |
+
"supplus;": "\u2ac0",
|
2653 |
+
"supset;": "\u2283",
|
2654 |
+
"supseteq;": "\u2287",
|
2655 |
+
"supseteqq;": "\u2ac6",
|
2656 |
+
"supsetneq;": "\u228b",
|
2657 |
+
"supsetneqq;": "\u2acc",
|
2658 |
+
"supsim;": "\u2ac8",
|
2659 |
+
"supsub;": "\u2ad4",
|
2660 |
+
"supsup;": "\u2ad6",
|
2661 |
+
"swArr;": "\u21d9",
|
2662 |
+
"swarhk;": "\u2926",
|
2663 |
+
"swarr;": "\u2199",
|
2664 |
+
"swarrow;": "\u2199",
|
2665 |
+
"swnwar;": "\u292a",
|
2666 |
+
"szlig": "\xdf",
|
2667 |
+
"szlig;": "\xdf",
|
2668 |
+
"target;": "\u2316",
|
2669 |
+
"tau;": "\u03c4",
|
2670 |
+
"tbrk;": "\u23b4",
|
2671 |
+
"tcaron;": "\u0165",
|
2672 |
+
"tcedil;": "\u0163",
|
2673 |
+
"tcy;": "\u0442",
|
2674 |
+
"tdot;": "\u20db",
|
2675 |
+
"telrec;": "\u2315",
|
2676 |
+
"tfr;": "\U0001d531",
|
2677 |
+
"there4;": "\u2234",
|
2678 |
+
"therefore;": "\u2234",
|
2679 |
+
"theta;": "\u03b8",
|
2680 |
+
"thetasym;": "\u03d1",
|
2681 |
+
"thetav;": "\u03d1",
|
2682 |
+
"thickapprox;": "\u2248",
|
2683 |
+
"thicksim;": "\u223c",
|
2684 |
+
"thinsp;": "\u2009",
|
2685 |
+
"thkap;": "\u2248",
|
2686 |
+
"thksim;": "\u223c",
|
2687 |
+
"thorn": "\xfe",
|
2688 |
+
"thorn;": "\xfe",
|
2689 |
+
"tilde;": "\u02dc",
|
2690 |
+
"times": "\xd7",
|
2691 |
+
"times;": "\xd7",
|
2692 |
+
"timesb;": "\u22a0",
|
2693 |
+
"timesbar;": "\u2a31",
|
2694 |
+
"timesd;": "\u2a30",
|
2695 |
+
"tint;": "\u222d",
|
2696 |
+
"toea;": "\u2928",
|
2697 |
+
"top;": "\u22a4",
|
2698 |
+
"topbot;": "\u2336",
|
2699 |
+
"topcir;": "\u2af1",
|
2700 |
+
"topf;": "\U0001d565",
|
2701 |
+
"topfork;": "\u2ada",
|
2702 |
+
"tosa;": "\u2929",
|
2703 |
+
"tprime;": "\u2034",
|
2704 |
+
"trade;": "\u2122",
|
2705 |
+
"triangle;": "\u25b5",
|
2706 |
+
"triangledown;": "\u25bf",
|
2707 |
+
"triangleleft;": "\u25c3",
|
2708 |
+
"trianglelefteq;": "\u22b4",
|
2709 |
+
"triangleq;": "\u225c",
|
2710 |
+
"triangleright;": "\u25b9",
|
2711 |
+
"trianglerighteq;": "\u22b5",
|
2712 |
+
"tridot;": "\u25ec",
|
2713 |
+
"trie;": "\u225c",
|
2714 |
+
"triminus;": "\u2a3a",
|
2715 |
+
"triplus;": "\u2a39",
|
2716 |
+
"trisb;": "\u29cd",
|
2717 |
+
"tritime;": "\u2a3b",
|
2718 |
+
"trpezium;": "\u23e2",
|
2719 |
+
"tscr;": "\U0001d4c9",
|
2720 |
+
"tscy;": "\u0446",
|
2721 |
+
"tshcy;": "\u045b",
|
2722 |
+
"tstrok;": "\u0167",
|
2723 |
+
"twixt;": "\u226c",
|
2724 |
+
"twoheadleftarrow;": "\u219e",
|
2725 |
+
"twoheadrightarrow;": "\u21a0",
|
2726 |
+
"uArr;": "\u21d1",
|
2727 |
+
"uHar;": "\u2963",
|
2728 |
+
"uacute": "\xfa",
|
2729 |
+
"uacute;": "\xfa",
|
2730 |
+
"uarr;": "\u2191",
|
2731 |
+
"ubrcy;": "\u045e",
|
2732 |
+
"ubreve;": "\u016d",
|
2733 |
+
"ucirc": "\xfb",
|
2734 |
+
"ucirc;": "\xfb",
|
2735 |
+
"ucy;": "\u0443",
|
2736 |
+
"udarr;": "\u21c5",
|
2737 |
+
"udblac;": "\u0171",
|
2738 |
+
"udhar;": "\u296e",
|
2739 |
+
"ufisht;": "\u297e",
|
2740 |
+
"ufr;": "\U0001d532",
|
2741 |
+
"ugrave": "\xf9",
|
2742 |
+
"ugrave;": "\xf9",
|
2743 |
+
"uharl;": "\u21bf",
|
2744 |
+
"uharr;": "\u21be",
|
2745 |
+
"uhblk;": "\u2580",
|
2746 |
+
"ulcorn;": "\u231c",
|
2747 |
+
"ulcorner;": "\u231c",
|
2748 |
+
"ulcrop;": "\u230f",
|
2749 |
+
"ultri;": "\u25f8",
|
2750 |
+
"umacr;": "\u016b",
|
2751 |
+
"uml": "\xa8",
|
2752 |
+
"uml;": "\xa8",
|
2753 |
+
"uogon;": "\u0173",
|
2754 |
+
"uopf;": "\U0001d566",
|
2755 |
+
"uparrow;": "\u2191",
|
2756 |
+
"updownarrow;": "\u2195",
|
2757 |
+
"upharpoonleft;": "\u21bf",
|
2758 |
+
"upharpoonright;": "\u21be",
|
2759 |
+
"uplus;": "\u228e",
|
2760 |
+
"upsi;": "\u03c5",
|
2761 |
+
"upsih;": "\u03d2",
|
2762 |
+
"upsilon;": "\u03c5",
|
2763 |
+
"upuparrows;": "\u21c8",
|
2764 |
+
"urcorn;": "\u231d",
|
2765 |
+
"urcorner;": "\u231d",
|
2766 |
+
"urcrop;": "\u230e",
|
2767 |
+
"uring;": "\u016f",
|
2768 |
+
"urtri;": "\u25f9",
|
2769 |
+
"uscr;": "\U0001d4ca",
|
2770 |
+
"utdot;": "\u22f0",
|
2771 |
+
"utilde;": "\u0169",
|
2772 |
+
"utri;": "\u25b5",
|
2773 |
+
"utrif;": "\u25b4",
|
2774 |
+
"uuarr;": "\u21c8",
|
2775 |
+
"uuml": "\xfc",
|
2776 |
+
"uuml;": "\xfc",
|
2777 |
+
"uwangle;": "\u29a7",
|
2778 |
+
"vArr;": "\u21d5",
|
2779 |
+
"vBar;": "\u2ae8",
|
2780 |
+
"vBarv;": "\u2ae9",
|
2781 |
+
"vDash;": "\u22a8",
|
2782 |
+
"vangrt;": "\u299c",
|
2783 |
+
"varepsilon;": "\u03f5",
|
2784 |
+
"varkappa;": "\u03f0",
|
2785 |
+
"varnothing;": "\u2205",
|
2786 |
+
"varphi;": "\u03d5",
|
2787 |
+
"varpi;": "\u03d6",
|
2788 |
+
"varpropto;": "\u221d",
|
2789 |
+
"varr;": "\u2195",
|
2790 |
+
"varrho;": "\u03f1",
|
2791 |
+
"varsigma;": "\u03c2",
|
2792 |
+
"varsubsetneq;": "\u228a\ufe00",
|
2793 |
+
"varsubsetneqq;": "\u2acb\ufe00",
|
2794 |
+
"varsupsetneq;": "\u228b\ufe00",
|
2795 |
+
"varsupsetneqq;": "\u2acc\ufe00",
|
2796 |
+
"vartheta;": "\u03d1",
|
2797 |
+
"vartriangleleft;": "\u22b2",
|
2798 |
+
"vartriangleright;": "\u22b3",
|
2799 |
+
"vcy;": "\u0432",
|
2800 |
+
"vdash;": "\u22a2",
|
2801 |
+
"vee;": "\u2228",
|
2802 |
+
"veebar;": "\u22bb",
|
2803 |
+
"veeeq;": "\u225a",
|
2804 |
+
"vellip;": "\u22ee",
|
2805 |
+
"verbar;": "|",
|
2806 |
+
"vert;": "|",
|
2807 |
+
"vfr;": "\U0001d533",
|
2808 |
+
"vltri;": "\u22b2",
|
2809 |
+
"vnsub;": "\u2282\u20d2",
|
2810 |
+
"vnsup;": "\u2283\u20d2",
|
2811 |
+
"vopf;": "\U0001d567",
|
2812 |
+
"vprop;": "\u221d",
|
2813 |
+
"vrtri;": "\u22b3",
|
2814 |
+
"vscr;": "\U0001d4cb",
|
2815 |
+
"vsubnE;": "\u2acb\ufe00",
|
2816 |
+
"vsubne;": "\u228a\ufe00",
|
2817 |
+
"vsupnE;": "\u2acc\ufe00",
|
2818 |
+
"vsupne;": "\u228b\ufe00",
|
2819 |
+
"vzigzag;": "\u299a",
|
2820 |
+
"wcirc;": "\u0175",
|
2821 |
+
"wedbar;": "\u2a5f",
|
2822 |
+
"wedge;": "\u2227",
|
2823 |
+
"wedgeq;": "\u2259",
|
2824 |
+
"weierp;": "\u2118",
|
2825 |
+
"wfr;": "\U0001d534",
|
2826 |
+
"wopf;": "\U0001d568",
|
2827 |
+
"wp;": "\u2118",
|
2828 |
+
"wr;": "\u2240",
|
2829 |
+
"wreath;": "\u2240",
|
2830 |
+
"wscr;": "\U0001d4cc",
|
2831 |
+
"xcap;": "\u22c2",
|
2832 |
+
"xcirc;": "\u25ef",
|
2833 |
+
"xcup;": "\u22c3",
|
2834 |
+
"xdtri;": "\u25bd",
|
2835 |
+
"xfr;": "\U0001d535",
|
2836 |
+
"xhArr;": "\u27fa",
|
2837 |
+
"xharr;": "\u27f7",
|
2838 |
+
"xi;": "\u03be",
|
2839 |
+
"xlArr;": "\u27f8",
|
2840 |
+
"xlarr;": "\u27f5",
|
2841 |
+
"xmap;": "\u27fc",
|
2842 |
+
"xnis;": "\u22fb",
|
2843 |
+
"xodot;": "\u2a00",
|
2844 |
+
"xopf;": "\U0001d569",
|
2845 |
+
"xoplus;": "\u2a01",
|
2846 |
+
"xotime;": "\u2a02",
|
2847 |
+
"xrArr;": "\u27f9",
|
2848 |
+
"xrarr;": "\u27f6",
|
2849 |
+
"xscr;": "\U0001d4cd",
|
2850 |
+
"xsqcup;": "\u2a06",
|
2851 |
+
"xuplus;": "\u2a04",
|
2852 |
+
"xutri;": "\u25b3",
|
2853 |
+
"xvee;": "\u22c1",
|
2854 |
+
"xwedge;": "\u22c0",
|
2855 |
+
"yacute": "\xfd",
|
2856 |
+
"yacute;": "\xfd",
|
2857 |
+
"yacy;": "\u044f",
|
2858 |
+
"ycirc;": "\u0177",
|
2859 |
+
"ycy;": "\u044b",
|
2860 |
+
"yen": "\xa5",
|
2861 |
+
"yen;": "\xa5",
|
2862 |
+
"yfr;": "\U0001d536",
|
2863 |
+
"yicy;": "\u0457",
|
2864 |
+
"yopf;": "\U0001d56a",
|
2865 |
+
"yscr;": "\U0001d4ce",
|
2866 |
+
"yucy;": "\u044e",
|
2867 |
+
"yuml": "\xff",
|
2868 |
+
"yuml;": "\xff",
|
2869 |
+
"zacute;": "\u017a",
|
2870 |
+
"zcaron;": "\u017e",
|
2871 |
+
"zcy;": "\u0437",
|
2872 |
+
"zdot;": "\u017c",
|
2873 |
+
"zeetrf;": "\u2128",
|
2874 |
+
"zeta;": "\u03b6",
|
2875 |
+
"zfr;": "\U0001d537",
|
2876 |
+
"zhcy;": "\u0436",
|
2877 |
+
"zigrarr;": "\u21dd",
|
2878 |
+
"zopf;": "\U0001d56b",
|
2879 |
+
"zscr;": "\U0001d4cf",
|
2880 |
+
"zwj;": "\u200d",
|
2881 |
+
"zwnj;": "\u200c",
|
2882 |
+
}
|
2883 |
+
|
2884 |
+
replacementCharacters = {
|
2885 |
+
0x0: "\uFFFD",
|
2886 |
+
0x0d: "\u000D",
|
2887 |
+
0x80: "\u20AC",
|
2888 |
+
0x81: "\u0081",
|
2889 |
+
0x82: "\u201A",
|
2890 |
+
0x83: "\u0192",
|
2891 |
+
0x84: "\u201E",
|
2892 |
+
0x85: "\u2026",
|
2893 |
+
0x86: "\u2020",
|
2894 |
+
0x87: "\u2021",
|
2895 |
+
0x88: "\u02C6",
|
2896 |
+
0x89: "\u2030",
|
2897 |
+
0x8A: "\u0160",
|
2898 |
+
0x8B: "\u2039",
|
2899 |
+
0x8C: "\u0152",
|
2900 |
+
0x8D: "\u008D",
|
2901 |
+
0x8E: "\u017D",
|
2902 |
+
0x8F: "\u008F",
|
2903 |
+
0x90: "\u0090",
|
2904 |
+
0x91: "\u2018",
|
2905 |
+
0x92: "\u2019",
|
2906 |
+
0x93: "\u201C",
|
2907 |
+
0x94: "\u201D",
|
2908 |
+
0x95: "\u2022",
|
2909 |
+
0x96: "\u2013",
|
2910 |
+
0x97: "\u2014",
|
2911 |
+
0x98: "\u02DC",
|
2912 |
+
0x99: "\u2122",
|
2913 |
+
0x9A: "\u0161",
|
2914 |
+
0x9B: "\u203A",
|
2915 |
+
0x9C: "\u0153",
|
2916 |
+
0x9D: "\u009D",
|
2917 |
+
0x9E: "\u017E",
|
2918 |
+
0x9F: "\u0178",
|
2919 |
+
}
|
2920 |
+
|
2921 |
+
tokenTypes = {
|
2922 |
+
"Doctype": 0,
|
2923 |
+
"Characters": 1,
|
2924 |
+
"SpaceCharacters": 2,
|
2925 |
+
"StartTag": 3,
|
2926 |
+
"EndTag": 4,
|
2927 |
+
"EmptyTag": 5,
|
2928 |
+
"Comment": 6,
|
2929 |
+
"ParseError": 7
|
2930 |
+
}
|
2931 |
+
|
2932 |
+
tagTokenTypes = frozenset([tokenTypes["StartTag"], tokenTypes["EndTag"],
|
2933 |
+
tokenTypes["EmptyTag"]])
|
2934 |
+
|
2935 |
+
|
2936 |
+
prefixes = {v: k for k, v in namespaces.items()}
|
2937 |
+
prefixes["http://www.w3.org/1998/Math/MathML"] = "math"
|
2938 |
+
|
2939 |
+
|
2940 |
+
class DataLossWarning(UserWarning):
|
2941 |
+
"""Raised when the current tree is unable to represent the input data"""
|
2942 |
+
pass
|
2943 |
+
|
2944 |
+
|
2945 |
+
class _ReparseException(Exception):
|
2946 |
+
pass
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/filters/__pycache__/base.cpython-310.pyc
ADDED
Binary file (863 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/filters/__pycache__/lint.cpython-310.pyc
ADDED
Binary file (2.57 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/html5parser.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/serializer.py
ADDED
@@ -0,0 +1,409 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import absolute_import, division, unicode_literals
|
2 |
+
from pip._vendor.six import text_type
|
3 |
+
|
4 |
+
import re
|
5 |
+
|
6 |
+
from codecs import register_error, xmlcharrefreplace_errors
|
7 |
+
|
8 |
+
from .constants import voidElements, booleanAttributes, spaceCharacters
|
9 |
+
from .constants import rcdataElements, entities, xmlEntities
|
10 |
+
from . import treewalkers, _utils
|
11 |
+
from xml.sax.saxutils import escape
|
12 |
+
|
13 |
+
_quoteAttributeSpecChars = "".join(spaceCharacters) + "\"'=<>`"
|
14 |
+
_quoteAttributeSpec = re.compile("[" + _quoteAttributeSpecChars + "]")
|
15 |
+
_quoteAttributeLegacy = re.compile("[" + _quoteAttributeSpecChars +
|
16 |
+
"\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n"
|
17 |
+
"\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15"
|
18 |
+
"\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
|
19 |
+
"\x20\x2f\x60\xa0\u1680\u180e\u180f\u2000"
|
20 |
+
"\u2001\u2002\u2003\u2004\u2005\u2006\u2007"
|
21 |
+
"\u2008\u2009\u200a\u2028\u2029\u202f\u205f"
|
22 |
+
"\u3000]")
|
23 |
+
|
24 |
+
|
25 |
+
_encode_entity_map = {}
|
26 |
+
_is_ucs4 = len("\U0010FFFF") == 1
|
27 |
+
for k, v in list(entities.items()):
|
28 |
+
# skip multi-character entities
|
29 |
+
if ((_is_ucs4 and len(v) > 1) or
|
30 |
+
(not _is_ucs4 and len(v) > 2)):
|
31 |
+
continue
|
32 |
+
if v != "&":
|
33 |
+
if len(v) == 2:
|
34 |
+
v = _utils.surrogatePairToCodepoint(v)
|
35 |
+
else:
|
36 |
+
v = ord(v)
|
37 |
+
if v not in _encode_entity_map or k.islower():
|
38 |
+
# prefer < over < and similarly for &, >, etc.
|
39 |
+
_encode_entity_map[v] = k
|
40 |
+
|
41 |
+
|
42 |
+
def htmlentityreplace_errors(exc):
|
43 |
+
if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):
|
44 |
+
res = []
|
45 |
+
codepoints = []
|
46 |
+
skip = False
|
47 |
+
for i, c in enumerate(exc.object[exc.start:exc.end]):
|
48 |
+
if skip:
|
49 |
+
skip = False
|
50 |
+
continue
|
51 |
+
index = i + exc.start
|
52 |
+
if _utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]):
|
53 |
+
codepoint = _utils.surrogatePairToCodepoint(exc.object[index:index + 2])
|
54 |
+
skip = True
|
55 |
+
else:
|
56 |
+
codepoint = ord(c)
|
57 |
+
codepoints.append(codepoint)
|
58 |
+
for cp in codepoints:
|
59 |
+
e = _encode_entity_map.get(cp)
|
60 |
+
if e:
|
61 |
+
res.append("&")
|
62 |
+
res.append(e)
|
63 |
+
if not e.endswith(";"):
|
64 |
+
res.append(";")
|
65 |
+
else:
|
66 |
+
res.append("&#x%s;" % (hex(cp)[2:]))
|
67 |
+
return ("".join(res), exc.end)
|
68 |
+
else:
|
69 |
+
return xmlcharrefreplace_errors(exc)
|
70 |
+
|
71 |
+
|
72 |
+
register_error("htmlentityreplace", htmlentityreplace_errors)
|
73 |
+
|
74 |
+
|
75 |
+
def serialize(input, tree="etree", encoding=None, **serializer_opts):
|
76 |
+
"""Serializes the input token stream using the specified treewalker
|
77 |
+
|
78 |
+
:arg input: the token stream to serialize
|
79 |
+
|
80 |
+
:arg tree: the treewalker to use
|
81 |
+
|
82 |
+
:arg encoding: the encoding to use
|
83 |
+
|
84 |
+
:arg serializer_opts: any options to pass to the
|
85 |
+
:py:class:`html5lib.serializer.HTMLSerializer` that gets created
|
86 |
+
|
87 |
+
:returns: the tree serialized as a string
|
88 |
+
|
89 |
+
Example:
|
90 |
+
|
91 |
+
>>> from html5lib.html5parser import parse
|
92 |
+
>>> from html5lib.serializer import serialize
|
93 |
+
>>> token_stream = parse('<html><body><p>Hi!</p></body></html>')
|
94 |
+
>>> serialize(token_stream, omit_optional_tags=False)
|
95 |
+
'<html><head></head><body><p>Hi!</p></body></html>'
|
96 |
+
|
97 |
+
"""
|
98 |
+
# XXX: Should we cache this?
|
99 |
+
walker = treewalkers.getTreeWalker(tree)
|
100 |
+
s = HTMLSerializer(**serializer_opts)
|
101 |
+
return s.render(walker(input), encoding)
|
102 |
+
|
103 |
+
|
104 |
+
class HTMLSerializer(object):
|
105 |
+
|
106 |
+
# attribute quoting options
|
107 |
+
quote_attr_values = "legacy" # be secure by default
|
108 |
+
quote_char = '"'
|
109 |
+
use_best_quote_char = True
|
110 |
+
|
111 |
+
# tag syntax options
|
112 |
+
omit_optional_tags = True
|
113 |
+
minimize_boolean_attributes = True
|
114 |
+
use_trailing_solidus = False
|
115 |
+
space_before_trailing_solidus = True
|
116 |
+
|
117 |
+
# escaping options
|
118 |
+
escape_lt_in_attrs = False
|
119 |
+
escape_rcdata = False
|
120 |
+
resolve_entities = True
|
121 |
+
|
122 |
+
# miscellaneous options
|
123 |
+
alphabetical_attributes = False
|
124 |
+
inject_meta_charset = True
|
125 |
+
strip_whitespace = False
|
126 |
+
sanitize = False
|
127 |
+
|
128 |
+
options = ("quote_attr_values", "quote_char", "use_best_quote_char",
|
129 |
+
"omit_optional_tags", "minimize_boolean_attributes",
|
130 |
+
"use_trailing_solidus", "space_before_trailing_solidus",
|
131 |
+
"escape_lt_in_attrs", "escape_rcdata", "resolve_entities",
|
132 |
+
"alphabetical_attributes", "inject_meta_charset",
|
133 |
+
"strip_whitespace", "sanitize")
|
134 |
+
|
135 |
+
def __init__(self, **kwargs):
|
136 |
+
"""Initialize HTMLSerializer
|
137 |
+
|
138 |
+
:arg inject_meta_charset: Whether or not to inject the meta charset.
|
139 |
+
|
140 |
+
Defaults to ``True``.
|
141 |
+
|
142 |
+
:arg quote_attr_values: Whether to quote attribute values that don't
|
143 |
+
require quoting per legacy browser behavior (``"legacy"``), when
|
144 |
+
required by the standard (``"spec"``), or always (``"always"``).
|
145 |
+
|
146 |
+
Defaults to ``"legacy"``.
|
147 |
+
|
148 |
+
:arg quote_char: Use given quote character for attribute quoting.
|
149 |
+
|
150 |
+
Defaults to ``"`` which will use double quotes unless attribute
|
151 |
+
value contains a double quote, in which case single quotes are
|
152 |
+
used.
|
153 |
+
|
154 |
+
:arg escape_lt_in_attrs: Whether or not to escape ``<`` in attribute
|
155 |
+
values.
|
156 |
+
|
157 |
+
Defaults to ``False``.
|
158 |
+
|
159 |
+
:arg escape_rcdata: Whether to escape characters that need to be
|
160 |
+
escaped within normal elements within rcdata elements such as
|
161 |
+
style.
|
162 |
+
|
163 |
+
Defaults to ``False``.
|
164 |
+
|
165 |
+
:arg resolve_entities: Whether to resolve named character entities that
|
166 |
+
appear in the source tree. The XML predefined entities < >
|
167 |
+
& " ' are unaffected by this setting.
|
168 |
+
|
169 |
+
Defaults to ``True``.
|
170 |
+
|
171 |
+
:arg strip_whitespace: Whether to remove semantically meaningless
|
172 |
+
whitespace. (This compresses all whitespace to a single space
|
173 |
+
except within ``pre``.)
|
174 |
+
|
175 |
+
Defaults to ``False``.
|
176 |
+
|
177 |
+
:arg minimize_boolean_attributes: Shortens boolean attributes to give
|
178 |
+
just the attribute value, for example::
|
179 |
+
|
180 |
+
<input disabled="disabled">
|
181 |
+
|
182 |
+
becomes::
|
183 |
+
|
184 |
+
<input disabled>
|
185 |
+
|
186 |
+
Defaults to ``True``.
|
187 |
+
|
188 |
+
:arg use_trailing_solidus: Includes a close-tag slash at the end of the
|
189 |
+
start tag of void elements (empty elements whose end tag is
|
190 |
+
forbidden). E.g. ``<hr/>``.
|
191 |
+
|
192 |
+
Defaults to ``False``.
|
193 |
+
|
194 |
+
:arg space_before_trailing_solidus: Places a space immediately before
|
195 |
+
the closing slash in a tag using a trailing solidus. E.g.
|
196 |
+
``<hr />``. Requires ``use_trailing_solidus=True``.
|
197 |
+
|
198 |
+
Defaults to ``True``.
|
199 |
+
|
200 |
+
:arg sanitize: Strip all unsafe or unknown constructs from output.
|
201 |
+
See :py:class:`html5lib.filters.sanitizer.Filter`.
|
202 |
+
|
203 |
+
Defaults to ``False``.
|
204 |
+
|
205 |
+
:arg omit_optional_tags: Omit start/end tags that are optional.
|
206 |
+
|
207 |
+
Defaults to ``True``.
|
208 |
+
|
209 |
+
:arg alphabetical_attributes: Reorder attributes to be in alphabetical order.
|
210 |
+
|
211 |
+
Defaults to ``False``.
|
212 |
+
|
213 |
+
"""
|
214 |
+
unexpected_args = frozenset(kwargs) - frozenset(self.options)
|
215 |
+
if len(unexpected_args) > 0:
|
216 |
+
raise TypeError("__init__() got an unexpected keyword argument '%s'" % next(iter(unexpected_args)))
|
217 |
+
if 'quote_char' in kwargs:
|
218 |
+
self.use_best_quote_char = False
|
219 |
+
for attr in self.options:
|
220 |
+
setattr(self, attr, kwargs.get(attr, getattr(self, attr)))
|
221 |
+
self.errors = []
|
222 |
+
self.strict = False
|
223 |
+
|
224 |
+
def encode(self, string):
|
225 |
+
assert(isinstance(string, text_type))
|
226 |
+
if self.encoding:
|
227 |
+
return string.encode(self.encoding, "htmlentityreplace")
|
228 |
+
else:
|
229 |
+
return string
|
230 |
+
|
231 |
+
def encodeStrict(self, string):
|
232 |
+
assert(isinstance(string, text_type))
|
233 |
+
if self.encoding:
|
234 |
+
return string.encode(self.encoding, "strict")
|
235 |
+
else:
|
236 |
+
return string
|
237 |
+
|
238 |
+
def serialize(self, treewalker, encoding=None):
|
239 |
+
# pylint:disable=too-many-nested-blocks
|
240 |
+
self.encoding = encoding
|
241 |
+
in_cdata = False
|
242 |
+
self.errors = []
|
243 |
+
|
244 |
+
if encoding and self.inject_meta_charset:
|
245 |
+
from .filters.inject_meta_charset import Filter
|
246 |
+
treewalker = Filter(treewalker, encoding)
|
247 |
+
# Alphabetical attributes is here under the assumption that none of
|
248 |
+
# the later filters add or change order of attributes; it needs to be
|
249 |
+
# before the sanitizer so escaped elements come out correctly
|
250 |
+
if self.alphabetical_attributes:
|
251 |
+
from .filters.alphabeticalattributes import Filter
|
252 |
+
treewalker = Filter(treewalker)
|
253 |
+
# WhitespaceFilter should be used before OptionalTagFilter
|
254 |
+
# for maximum efficiently of this latter filter
|
255 |
+
if self.strip_whitespace:
|
256 |
+
from .filters.whitespace import Filter
|
257 |
+
treewalker = Filter(treewalker)
|
258 |
+
if self.sanitize:
|
259 |
+
from .filters.sanitizer import Filter
|
260 |
+
treewalker = Filter(treewalker)
|
261 |
+
if self.omit_optional_tags:
|
262 |
+
from .filters.optionaltags import Filter
|
263 |
+
treewalker = Filter(treewalker)
|
264 |
+
|
265 |
+
for token in treewalker:
|
266 |
+
type = token["type"]
|
267 |
+
if type == "Doctype":
|
268 |
+
doctype = "<!DOCTYPE %s" % token["name"]
|
269 |
+
|
270 |
+
if token["publicId"]:
|
271 |
+
doctype += ' PUBLIC "%s"' % token["publicId"]
|
272 |
+
elif token["systemId"]:
|
273 |
+
doctype += " SYSTEM"
|
274 |
+
if token["systemId"]:
|
275 |
+
if token["systemId"].find('"') >= 0:
|
276 |
+
if token["systemId"].find("'") >= 0:
|
277 |
+
self.serializeError("System identifier contains both single and double quote characters")
|
278 |
+
quote_char = "'"
|
279 |
+
else:
|
280 |
+
quote_char = '"'
|
281 |
+
doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char)
|
282 |
+
|
283 |
+
doctype += ">"
|
284 |
+
yield self.encodeStrict(doctype)
|
285 |
+
|
286 |
+
elif type in ("Characters", "SpaceCharacters"):
|
287 |
+
if type == "SpaceCharacters" or in_cdata:
|
288 |
+
if in_cdata and token["data"].find("</") >= 0:
|
289 |
+
self.serializeError("Unexpected </ in CDATA")
|
290 |
+
yield self.encode(token["data"])
|
291 |
+
else:
|
292 |
+
yield self.encode(escape(token["data"]))
|
293 |
+
|
294 |
+
elif type in ("StartTag", "EmptyTag"):
|
295 |
+
name = token["name"]
|
296 |
+
yield self.encodeStrict("<%s" % name)
|
297 |
+
if name in rcdataElements and not self.escape_rcdata:
|
298 |
+
in_cdata = True
|
299 |
+
elif in_cdata:
|
300 |
+
self.serializeError("Unexpected child element of a CDATA element")
|
301 |
+
for (_, attr_name), attr_value in token["data"].items():
|
302 |
+
# TODO: Add namespace support here
|
303 |
+
k = attr_name
|
304 |
+
v = attr_value
|
305 |
+
yield self.encodeStrict(' ')
|
306 |
+
|
307 |
+
yield self.encodeStrict(k)
|
308 |
+
if not self.minimize_boolean_attributes or \
|
309 |
+
(k not in booleanAttributes.get(name, tuple()) and
|
310 |
+
k not in booleanAttributes.get("", tuple())):
|
311 |
+
yield self.encodeStrict("=")
|
312 |
+
if self.quote_attr_values == "always" or len(v) == 0:
|
313 |
+
quote_attr = True
|
314 |
+
elif self.quote_attr_values == "spec":
|
315 |
+
quote_attr = _quoteAttributeSpec.search(v) is not None
|
316 |
+
elif self.quote_attr_values == "legacy":
|
317 |
+
quote_attr = _quoteAttributeLegacy.search(v) is not None
|
318 |
+
else:
|
319 |
+
raise ValueError("quote_attr_values must be one of: "
|
320 |
+
"'always', 'spec', or 'legacy'")
|
321 |
+
v = v.replace("&", "&")
|
322 |
+
if self.escape_lt_in_attrs:
|
323 |
+
v = v.replace("<", "<")
|
324 |
+
if quote_attr:
|
325 |
+
quote_char = self.quote_char
|
326 |
+
if self.use_best_quote_char:
|
327 |
+
if "'" in v and '"' not in v:
|
328 |
+
quote_char = '"'
|
329 |
+
elif '"' in v and "'" not in v:
|
330 |
+
quote_char = "'"
|
331 |
+
if quote_char == "'":
|
332 |
+
v = v.replace("'", "'")
|
333 |
+
else:
|
334 |
+
v = v.replace('"', """)
|
335 |
+
yield self.encodeStrict(quote_char)
|
336 |
+
yield self.encode(v)
|
337 |
+
yield self.encodeStrict(quote_char)
|
338 |
+
else:
|
339 |
+
yield self.encode(v)
|
340 |
+
if name in voidElements and self.use_trailing_solidus:
|
341 |
+
if self.space_before_trailing_solidus:
|
342 |
+
yield self.encodeStrict(" /")
|
343 |
+
else:
|
344 |
+
yield self.encodeStrict("/")
|
345 |
+
yield self.encode(">")
|
346 |
+
|
347 |
+
elif type == "EndTag":
|
348 |
+
name = token["name"]
|
349 |
+
if name in rcdataElements:
|
350 |
+
in_cdata = False
|
351 |
+
elif in_cdata:
|
352 |
+
self.serializeError("Unexpected child element of a CDATA element")
|
353 |
+
yield self.encodeStrict("</%s>" % name)
|
354 |
+
|
355 |
+
elif type == "Comment":
|
356 |
+
data = token["data"]
|
357 |
+
if data.find("--") >= 0:
|
358 |
+
self.serializeError("Comment contains --")
|
359 |
+
yield self.encodeStrict("<!--%s-->" % token["data"])
|
360 |
+
|
361 |
+
elif type == "Entity":
|
362 |
+
name = token["name"]
|
363 |
+
key = name + ";"
|
364 |
+
if key not in entities:
|
365 |
+
self.serializeError("Entity %s not recognized" % name)
|
366 |
+
if self.resolve_entities and key not in xmlEntities:
|
367 |
+
data = entities[key]
|
368 |
+
else:
|
369 |
+
data = "&%s;" % name
|
370 |
+
yield self.encodeStrict(data)
|
371 |
+
|
372 |
+
else:
|
373 |
+
self.serializeError(token["data"])
|
374 |
+
|
375 |
+
def render(self, treewalker, encoding=None):
|
376 |
+
"""Serializes the stream from the treewalker into a string
|
377 |
+
|
378 |
+
:arg treewalker: the treewalker to serialize
|
379 |
+
|
380 |
+
:arg encoding: the string encoding to use
|
381 |
+
|
382 |
+
:returns: the serialized tree
|
383 |
+
|
384 |
+
Example:
|
385 |
+
|
386 |
+
>>> from html5lib import parse, getTreeWalker
|
387 |
+
>>> from html5lib.serializer import HTMLSerializer
|
388 |
+
>>> token_stream = parse('<html><body>Hi!</body></html>')
|
389 |
+
>>> walker = getTreeWalker('etree')
|
390 |
+
>>> serializer = HTMLSerializer(omit_optional_tags=False)
|
391 |
+
>>> serializer.render(walker(token_stream))
|
392 |
+
'<html><head></head><body>Hi!</body></html>'
|
393 |
+
|
394 |
+
"""
|
395 |
+
if encoding:
|
396 |
+
return b"".join(list(self.serialize(treewalker, encoding)))
|
397 |
+
else:
|
398 |
+
return "".join(list(self.serialize(treewalker)))
|
399 |
+
|
400 |
+
def serializeError(self, data="XXX ERROR MESSAGE NEEDED"):
|
401 |
+
# XXX The idea is to make data mandatory.
|
402 |
+
self.errors.append(data)
|
403 |
+
if self.strict:
|
404 |
+
raise SerializeError
|
405 |
+
|
406 |
+
|
407 |
+
class SerializeError(Exception):
|
408 |
+
"""Error in serialized tree"""
|
409 |
+
pass
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/treeadapters/__init__.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Tree adapters let you convert from one tree structure to another
|
2 |
+
|
3 |
+
Example:
|
4 |
+
|
5 |
+
.. code-block:: python
|
6 |
+
|
7 |
+
from pip._vendor import html5lib
|
8 |
+
from pip._vendor.html5lib.treeadapters import genshi
|
9 |
+
|
10 |
+
doc = '<html><body>Hi!</body></html>'
|
11 |
+
treebuilder = html5lib.getTreeBuilder('etree')
|
12 |
+
parser = html5lib.HTMLParser(tree=treebuilder)
|
13 |
+
tree = parser.parse(doc)
|
14 |
+
TreeWalker = html5lib.getTreeWalker('etree')
|
15 |
+
|
16 |
+
genshi_tree = genshi.to_genshi(TreeWalker(tree))
|
17 |
+
|
18 |
+
"""
|
19 |
+
from __future__ import absolute_import, division, unicode_literals
|
20 |
+
|
21 |
+
from . import sax
|
22 |
+
|
23 |
+
__all__ = ["sax"]
|
24 |
+
|
25 |
+
try:
|
26 |
+
from . import genshi # noqa
|
27 |
+
except ImportError:
|
28 |
+
pass
|
29 |
+
else:
|
30 |
+
__all__.append("genshi")
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/treeadapters/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (934 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/treeadapters/__pycache__/genshi.cpython-310.pyc
ADDED
Binary file (1.55 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/treeadapters/__pycache__/sax.cpython-310.pyc
ADDED
Binary file (1.45 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/treeadapters/genshi.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import absolute_import, division, unicode_literals
|
2 |
+
|
3 |
+
from genshi.core import QName, Attrs
|
4 |
+
from genshi.core import START, END, TEXT, COMMENT, DOCTYPE
|
5 |
+
|
6 |
+
|
7 |
+
def to_genshi(walker):
|
8 |
+
"""Convert a tree to a genshi tree
|
9 |
+
|
10 |
+
:arg walker: the treewalker to use to walk the tree to convert it
|
11 |
+
|
12 |
+
:returns: generator of genshi nodes
|
13 |
+
|
14 |
+
"""
|
15 |
+
text = []
|
16 |
+
for token in walker:
|
17 |
+
type = token["type"]
|
18 |
+
if type in ("Characters", "SpaceCharacters"):
|
19 |
+
text.append(token["data"])
|
20 |
+
elif text:
|
21 |
+
yield TEXT, "".join(text), (None, -1, -1)
|
22 |
+
text = []
|
23 |
+
|
24 |
+
if type in ("StartTag", "EmptyTag"):
|
25 |
+
if token["namespace"]:
|
26 |
+
name = "{%s}%s" % (token["namespace"], token["name"])
|
27 |
+
else:
|
28 |
+
name = token["name"]
|
29 |
+
attrs = Attrs([(QName("{%s}%s" % attr if attr[0] is not None else attr[1]), value)
|
30 |
+
for attr, value in token["data"].items()])
|
31 |
+
yield (START, (QName(name), attrs), (None, -1, -1))
|
32 |
+
if type == "EmptyTag":
|
33 |
+
type = "EndTag"
|
34 |
+
|
35 |
+
if type == "EndTag":
|
36 |
+
if token["namespace"]:
|
37 |
+
name = "{%s}%s" % (token["namespace"], token["name"])
|
38 |
+
else:
|
39 |
+
name = token["name"]
|
40 |
+
|
41 |
+
yield END, QName(name), (None, -1, -1)
|
42 |
+
|
43 |
+
elif type == "Comment":
|
44 |
+
yield COMMENT, token["data"], (None, -1, -1)
|
45 |
+
|
46 |
+
elif type == "Doctype":
|
47 |
+
yield DOCTYPE, (token["name"], token["publicId"],
|
48 |
+
token["systemId"]), (None, -1, -1)
|
49 |
+
|
50 |
+
else:
|
51 |
+
pass # FIXME: What to do?
|
52 |
+
|
53 |
+
if text:
|
54 |
+
yield TEXT, "".join(text), (None, -1, -1)
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/treeadapters/sax.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import absolute_import, division, unicode_literals
|
2 |
+
|
3 |
+
from xml.sax.xmlreader import AttributesNSImpl
|
4 |
+
|
5 |
+
from ..constants import adjustForeignAttributes, unadjustForeignAttributes
|
6 |
+
|
7 |
+
prefix_mapping = {}
|
8 |
+
for prefix, localName, namespace in adjustForeignAttributes.values():
|
9 |
+
if prefix is not None:
|
10 |
+
prefix_mapping[prefix] = namespace
|
11 |
+
|
12 |
+
|
13 |
+
def to_sax(walker, handler):
|
14 |
+
"""Call SAX-like content handler based on treewalker walker
|
15 |
+
|
16 |
+
:arg walker: the treewalker to use to walk the tree to convert it
|
17 |
+
|
18 |
+
:arg handler: SAX handler to use
|
19 |
+
|
20 |
+
"""
|
21 |
+
handler.startDocument()
|
22 |
+
for prefix, namespace in prefix_mapping.items():
|
23 |
+
handler.startPrefixMapping(prefix, namespace)
|
24 |
+
|
25 |
+
for token in walker:
|
26 |
+
type = token["type"]
|
27 |
+
if type == "Doctype":
|
28 |
+
continue
|
29 |
+
elif type in ("StartTag", "EmptyTag"):
|
30 |
+
attrs = AttributesNSImpl(token["data"],
|
31 |
+
unadjustForeignAttributes)
|
32 |
+
handler.startElementNS((token["namespace"], token["name"]),
|
33 |
+
token["name"],
|
34 |
+
attrs)
|
35 |
+
if type == "EmptyTag":
|
36 |
+
handler.endElementNS((token["namespace"], token["name"]),
|
37 |
+
token["name"])
|
38 |
+
elif type == "EndTag":
|
39 |
+
handler.endElementNS((token["namespace"], token["name"]),
|
40 |
+
token["name"])
|
41 |
+
elif type in ("Characters", "SpaceCharacters"):
|
42 |
+
handler.characters(token["data"])
|
43 |
+
elif type == "Comment":
|
44 |
+
pass
|
45 |
+
else:
|
46 |
+
assert False, "Unknown token type"
|
47 |
+
|
48 |
+
for prefix, namespace in prefix_mapping.items():
|
49 |
+
handler.endPrefixMapping(prefix)
|
50 |
+
handler.endDocument()
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/dom.cpython-310.pyc
ADDED
Binary file (9.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/treewalkers/__init__.py
ADDED
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""A collection of modules for iterating through different kinds of
|
2 |
+
tree, generating tokens identical to those produced by the tokenizer
|
3 |
+
module.
|
4 |
+
|
5 |
+
To create a tree walker for a new type of tree, you need to
|
6 |
+
implement a tree walker object (called TreeWalker by convention) that
|
7 |
+
implements a 'serialize' method which takes a tree as sole argument and
|
8 |
+
returns an iterator which generates tokens.
|
9 |
+
"""
|
10 |
+
|
11 |
+
from __future__ import absolute_import, division, unicode_literals
|
12 |
+
|
13 |
+
from .. import constants
|
14 |
+
from .._utils import default_etree
|
15 |
+
|
16 |
+
__all__ = ["getTreeWalker", "pprint"]
|
17 |
+
|
18 |
+
treeWalkerCache = {}
|
19 |
+
|
20 |
+
|
21 |
+
def getTreeWalker(treeType, implementation=None, **kwargs):
|
22 |
+
"""Get a TreeWalker class for various types of tree with built-in support
|
23 |
+
|
24 |
+
:arg str treeType: the name of the tree type required (case-insensitive).
|
25 |
+
Supported values are:
|
26 |
+
|
27 |
+
* "dom": The xml.dom.minidom DOM implementation
|
28 |
+
* "etree": A generic walker for tree implementations exposing an
|
29 |
+
elementtree-like interface (known to work with ElementTree,
|
30 |
+
cElementTree and lxml.etree).
|
31 |
+
* "lxml": Optimized walker for lxml.etree
|
32 |
+
* "genshi": a Genshi stream
|
33 |
+
|
34 |
+
:arg implementation: A module implementing the tree type e.g.
|
35 |
+
xml.etree.ElementTree or cElementTree (Currently applies to the "etree"
|
36 |
+
tree type only).
|
37 |
+
|
38 |
+
:arg kwargs: keyword arguments passed to the etree walker--for other
|
39 |
+
walkers, this has no effect
|
40 |
+
|
41 |
+
:returns: a TreeWalker class
|
42 |
+
|
43 |
+
"""
|
44 |
+
|
45 |
+
treeType = treeType.lower()
|
46 |
+
if treeType not in treeWalkerCache:
|
47 |
+
if treeType == "dom":
|
48 |
+
from . import dom
|
49 |
+
treeWalkerCache[treeType] = dom.TreeWalker
|
50 |
+
elif treeType == "genshi":
|
51 |
+
from . import genshi
|
52 |
+
treeWalkerCache[treeType] = genshi.TreeWalker
|
53 |
+
elif treeType == "lxml":
|
54 |
+
from . import etree_lxml
|
55 |
+
treeWalkerCache[treeType] = etree_lxml.TreeWalker
|
56 |
+
elif treeType == "etree":
|
57 |
+
from . import etree
|
58 |
+
if implementation is None:
|
59 |
+
implementation = default_etree
|
60 |
+
# XXX: NEVER cache here, caching is done in the etree submodule
|
61 |
+
return etree.getETreeModule(implementation, **kwargs).TreeWalker
|
62 |
+
return treeWalkerCache.get(treeType)
|
63 |
+
|
64 |
+
|
65 |
+
def concatenateCharacterTokens(tokens):
|
66 |
+
pendingCharacters = []
|
67 |
+
for token in tokens:
|
68 |
+
type = token["type"]
|
69 |
+
if type in ("Characters", "SpaceCharacters"):
|
70 |
+
pendingCharacters.append(token["data"])
|
71 |
+
else:
|
72 |
+
if pendingCharacters:
|
73 |
+
yield {"type": "Characters", "data": "".join(pendingCharacters)}
|
74 |
+
pendingCharacters = []
|
75 |
+
yield token
|
76 |
+
if pendingCharacters:
|
77 |
+
yield {"type": "Characters", "data": "".join(pendingCharacters)}
|
78 |
+
|
79 |
+
|
80 |
+
def pprint(walker):
|
81 |
+
"""Pretty printer for tree walkers
|
82 |
+
|
83 |
+
Takes a TreeWalker instance and pretty prints the output of walking the tree.
|
84 |
+
|
85 |
+
:arg walker: a TreeWalker instance
|
86 |
+
|
87 |
+
"""
|
88 |
+
output = []
|
89 |
+
indent = 0
|
90 |
+
for token in concatenateCharacterTokens(walker):
|
91 |
+
type = token["type"]
|
92 |
+
if type in ("StartTag", "EmptyTag"):
|
93 |
+
# tag name
|
94 |
+
if token["namespace"] and token["namespace"] != constants.namespaces["html"]:
|
95 |
+
if token["namespace"] in constants.prefixes:
|
96 |
+
ns = constants.prefixes[token["namespace"]]
|
97 |
+
else:
|
98 |
+
ns = token["namespace"]
|
99 |
+
name = "%s %s" % (ns, token["name"])
|
100 |
+
else:
|
101 |
+
name = token["name"]
|
102 |
+
output.append("%s<%s>" % (" " * indent, name))
|
103 |
+
indent += 2
|
104 |
+
# attributes (sorted for consistent ordering)
|
105 |
+
attrs = token["data"]
|
106 |
+
for (namespace, localname), value in sorted(attrs.items()):
|
107 |
+
if namespace:
|
108 |
+
if namespace in constants.prefixes:
|
109 |
+
ns = constants.prefixes[namespace]
|
110 |
+
else:
|
111 |
+
ns = namespace
|
112 |
+
name = "%s %s" % (ns, localname)
|
113 |
+
else:
|
114 |
+
name = localname
|
115 |
+
output.append("%s%s=\"%s\"" % (" " * indent, name, value))
|
116 |
+
# self-closing
|
117 |
+
if type == "EmptyTag":
|
118 |
+
indent -= 2
|
119 |
+
|
120 |
+
elif type == "EndTag":
|
121 |
+
indent -= 2
|
122 |
+
|
123 |
+
elif type == "Comment":
|
124 |
+
output.append("%s<!-- %s -->" % (" " * indent, token["data"]))
|
125 |
+
|
126 |
+
elif type == "Doctype":
|
127 |
+
if token["name"]:
|
128 |
+
if token["publicId"]:
|
129 |
+
output.append("""%s<!DOCTYPE %s "%s" "%s">""" %
|
130 |
+
(" " * indent,
|
131 |
+
token["name"],
|
132 |
+
token["publicId"],
|
133 |
+
token["systemId"] if token["systemId"] else ""))
|
134 |
+
elif token["systemId"]:
|
135 |
+
output.append("""%s<!DOCTYPE %s "" "%s">""" %
|
136 |
+
(" " * indent,
|
137 |
+
token["name"],
|
138 |
+
token["systemId"]))
|
139 |
+
else:
|
140 |
+
output.append("%s<!DOCTYPE %s>" % (" " * indent,
|
141 |
+
token["name"]))
|
142 |
+
else:
|
143 |
+
output.append("%s<!DOCTYPE >" % (" " * indent,))
|
144 |
+
|
145 |
+
elif type == "Characters":
|
146 |
+
output.append("%s\"%s\"" % (" " * indent, token["data"]))
|
147 |
+
|
148 |
+
elif type == "SpaceCharacters":
|
149 |
+
assert False, "concatenateCharacterTokens should have got rid of all Space tokens"
|
150 |
+
|
151 |
+
else:
|
152 |
+
raise ValueError("Unknown token type, %s" % type)
|
153 |
+
|
154 |
+
return "\n".join(output)
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/dom.cpython-310.pyc
ADDED
Binary file (1.71 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/etree.cpython-310.pyc
ADDED
Binary file (3.47 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/etree_lxml.cpython-310.pyc
ADDED
Binary file (6.55 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/treewalkers/base.py
ADDED
@@ -0,0 +1,252 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import absolute_import, division, unicode_literals
|
2 |
+
|
3 |
+
from xml.dom import Node
|
4 |
+
from ..constants import namespaces, voidElements, spaceCharacters
|
5 |
+
|
6 |
+
__all__ = ["DOCUMENT", "DOCTYPE", "TEXT", "ELEMENT", "COMMENT", "ENTITY", "UNKNOWN",
|
7 |
+
"TreeWalker", "NonRecursiveTreeWalker"]
|
8 |
+
|
9 |
+
DOCUMENT = Node.DOCUMENT_NODE
|
10 |
+
DOCTYPE = Node.DOCUMENT_TYPE_NODE
|
11 |
+
TEXT = Node.TEXT_NODE
|
12 |
+
ELEMENT = Node.ELEMENT_NODE
|
13 |
+
COMMENT = Node.COMMENT_NODE
|
14 |
+
ENTITY = Node.ENTITY_NODE
|
15 |
+
UNKNOWN = "<#UNKNOWN#>"
|
16 |
+
|
17 |
+
spaceCharacters = "".join(spaceCharacters)
|
18 |
+
|
19 |
+
|
20 |
+
class TreeWalker(object):
|
21 |
+
"""Walks a tree yielding tokens
|
22 |
+
|
23 |
+
Tokens are dicts that all have a ``type`` field specifying the type of the
|
24 |
+
token.
|
25 |
+
|
26 |
+
"""
|
27 |
+
def __init__(self, tree):
|
28 |
+
"""Creates a TreeWalker
|
29 |
+
|
30 |
+
:arg tree: the tree to walk
|
31 |
+
|
32 |
+
"""
|
33 |
+
self.tree = tree
|
34 |
+
|
35 |
+
def __iter__(self):
|
36 |
+
raise NotImplementedError
|
37 |
+
|
38 |
+
def error(self, msg):
|
39 |
+
"""Generates an error token with the given message
|
40 |
+
|
41 |
+
:arg msg: the error message
|
42 |
+
|
43 |
+
:returns: SerializeError token
|
44 |
+
|
45 |
+
"""
|
46 |
+
return {"type": "SerializeError", "data": msg}
|
47 |
+
|
48 |
+
def emptyTag(self, namespace, name, attrs, hasChildren=False):
|
49 |
+
"""Generates an EmptyTag token
|
50 |
+
|
51 |
+
:arg namespace: the namespace of the token--can be ``None``
|
52 |
+
|
53 |
+
:arg name: the name of the element
|
54 |
+
|
55 |
+
:arg attrs: the attributes of the element as a dict
|
56 |
+
|
57 |
+
:arg hasChildren: whether or not to yield a SerializationError because
|
58 |
+
this tag shouldn't have children
|
59 |
+
|
60 |
+
:returns: EmptyTag token
|
61 |
+
|
62 |
+
"""
|
63 |
+
yield {"type": "EmptyTag", "name": name,
|
64 |
+
"namespace": namespace,
|
65 |
+
"data": attrs}
|
66 |
+
if hasChildren:
|
67 |
+
yield self.error("Void element has children")
|
68 |
+
|
69 |
+
def startTag(self, namespace, name, attrs):
|
70 |
+
"""Generates a StartTag token
|
71 |
+
|
72 |
+
:arg namespace: the namespace of the token--can be ``None``
|
73 |
+
|
74 |
+
:arg name: the name of the element
|
75 |
+
|
76 |
+
:arg attrs: the attributes of the element as a dict
|
77 |
+
|
78 |
+
:returns: StartTag token
|
79 |
+
|
80 |
+
"""
|
81 |
+
return {"type": "StartTag",
|
82 |
+
"name": name,
|
83 |
+
"namespace": namespace,
|
84 |
+
"data": attrs}
|
85 |
+
|
86 |
+
def endTag(self, namespace, name):
|
87 |
+
"""Generates an EndTag token
|
88 |
+
|
89 |
+
:arg namespace: the namespace of the token--can be ``None``
|
90 |
+
|
91 |
+
:arg name: the name of the element
|
92 |
+
|
93 |
+
:returns: EndTag token
|
94 |
+
|
95 |
+
"""
|
96 |
+
return {"type": "EndTag",
|
97 |
+
"name": name,
|
98 |
+
"namespace": namespace}
|
99 |
+
|
100 |
+
def text(self, data):
|
101 |
+
"""Generates SpaceCharacters and Characters tokens
|
102 |
+
|
103 |
+
Depending on what's in the data, this generates one or more
|
104 |
+
``SpaceCharacters`` and ``Characters`` tokens.
|
105 |
+
|
106 |
+
For example:
|
107 |
+
|
108 |
+
>>> from html5lib.treewalkers.base import TreeWalker
|
109 |
+
>>> # Give it an empty tree just so it instantiates
|
110 |
+
>>> walker = TreeWalker([])
|
111 |
+
>>> list(walker.text(''))
|
112 |
+
[]
|
113 |
+
>>> list(walker.text(' '))
|
114 |
+
[{u'data': ' ', u'type': u'SpaceCharacters'}]
|
115 |
+
>>> list(walker.text(' abc ')) # doctest: +NORMALIZE_WHITESPACE
|
116 |
+
[{u'data': ' ', u'type': u'SpaceCharacters'},
|
117 |
+
{u'data': u'abc', u'type': u'Characters'},
|
118 |
+
{u'data': u' ', u'type': u'SpaceCharacters'}]
|
119 |
+
|
120 |
+
:arg data: the text data
|
121 |
+
|
122 |
+
:returns: one or more ``SpaceCharacters`` and ``Characters`` tokens
|
123 |
+
|
124 |
+
"""
|
125 |
+
data = data
|
126 |
+
middle = data.lstrip(spaceCharacters)
|
127 |
+
left = data[:len(data) - len(middle)]
|
128 |
+
if left:
|
129 |
+
yield {"type": "SpaceCharacters", "data": left}
|
130 |
+
data = middle
|
131 |
+
middle = data.rstrip(spaceCharacters)
|
132 |
+
right = data[len(middle):]
|
133 |
+
if middle:
|
134 |
+
yield {"type": "Characters", "data": middle}
|
135 |
+
if right:
|
136 |
+
yield {"type": "SpaceCharacters", "data": right}
|
137 |
+
|
138 |
+
def comment(self, data):
|
139 |
+
"""Generates a Comment token
|
140 |
+
|
141 |
+
:arg data: the comment
|
142 |
+
|
143 |
+
:returns: Comment token
|
144 |
+
|
145 |
+
"""
|
146 |
+
return {"type": "Comment", "data": data}
|
147 |
+
|
148 |
+
def doctype(self, name, publicId=None, systemId=None):
|
149 |
+
"""Generates a Doctype token
|
150 |
+
|
151 |
+
:arg name:
|
152 |
+
|
153 |
+
:arg publicId:
|
154 |
+
|
155 |
+
:arg systemId:
|
156 |
+
|
157 |
+
:returns: the Doctype token
|
158 |
+
|
159 |
+
"""
|
160 |
+
return {"type": "Doctype",
|
161 |
+
"name": name,
|
162 |
+
"publicId": publicId,
|
163 |
+
"systemId": systemId}
|
164 |
+
|
165 |
+
def entity(self, name):
|
166 |
+
"""Generates an Entity token
|
167 |
+
|
168 |
+
:arg name: the entity name
|
169 |
+
|
170 |
+
:returns: an Entity token
|
171 |
+
|
172 |
+
"""
|
173 |
+
return {"type": "Entity", "name": name}
|
174 |
+
|
175 |
+
def unknown(self, nodeType):
|
176 |
+
"""Handles unknown node types"""
|
177 |
+
return self.error("Unknown node type: " + nodeType)
|
178 |
+
|
179 |
+
|
180 |
+
class NonRecursiveTreeWalker(TreeWalker):
|
181 |
+
def getNodeDetails(self, node):
|
182 |
+
raise NotImplementedError
|
183 |
+
|
184 |
+
def getFirstChild(self, node):
|
185 |
+
raise NotImplementedError
|
186 |
+
|
187 |
+
def getNextSibling(self, node):
|
188 |
+
raise NotImplementedError
|
189 |
+
|
190 |
+
def getParentNode(self, node):
|
191 |
+
raise NotImplementedError
|
192 |
+
|
193 |
+
def __iter__(self):
|
194 |
+
currentNode = self.tree
|
195 |
+
while currentNode is not None:
|
196 |
+
details = self.getNodeDetails(currentNode)
|
197 |
+
type, details = details[0], details[1:]
|
198 |
+
hasChildren = False
|
199 |
+
|
200 |
+
if type == DOCTYPE:
|
201 |
+
yield self.doctype(*details)
|
202 |
+
|
203 |
+
elif type == TEXT:
|
204 |
+
for token in self.text(*details):
|
205 |
+
yield token
|
206 |
+
|
207 |
+
elif type == ELEMENT:
|
208 |
+
namespace, name, attributes, hasChildren = details
|
209 |
+
if (not namespace or namespace == namespaces["html"]) and name in voidElements:
|
210 |
+
for token in self.emptyTag(namespace, name, attributes,
|
211 |
+
hasChildren):
|
212 |
+
yield token
|
213 |
+
hasChildren = False
|
214 |
+
else:
|
215 |
+
yield self.startTag(namespace, name, attributes)
|
216 |
+
|
217 |
+
elif type == COMMENT:
|
218 |
+
yield self.comment(details[0])
|
219 |
+
|
220 |
+
elif type == ENTITY:
|
221 |
+
yield self.entity(details[0])
|
222 |
+
|
223 |
+
elif type == DOCUMENT:
|
224 |
+
hasChildren = True
|
225 |
+
|
226 |
+
else:
|
227 |
+
yield self.unknown(details[0])
|
228 |
+
|
229 |
+
if hasChildren:
|
230 |
+
firstChild = self.getFirstChild(currentNode)
|
231 |
+
else:
|
232 |
+
firstChild = None
|
233 |
+
|
234 |
+
if firstChild is not None:
|
235 |
+
currentNode = firstChild
|
236 |
+
else:
|
237 |
+
while currentNode is not None:
|
238 |
+
details = self.getNodeDetails(currentNode)
|
239 |
+
type, details = details[0], details[1:]
|
240 |
+
if type == ELEMENT:
|
241 |
+
namespace, name, attributes, hasChildren = details
|
242 |
+
if (namespace and namespace != namespaces["html"]) or name not in voidElements:
|
243 |
+
yield self.endTag(namespace, name)
|
244 |
+
if self.tree is currentNode:
|
245 |
+
currentNode = None
|
246 |
+
break
|
247 |
+
nextSibling = self.getNextSibling(currentNode)
|
248 |
+
if nextSibling is not None:
|
249 |
+
currentNode = nextSibling
|
250 |
+
break
|
251 |
+
else:
|
252 |
+
currentNode = self.getParentNode(currentNode)
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/treewalkers/dom.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import absolute_import, division, unicode_literals
|
2 |
+
|
3 |
+
from xml.dom import Node
|
4 |
+
|
5 |
+
from . import base
|
6 |
+
|
7 |
+
|
8 |
+
class TreeWalker(base.NonRecursiveTreeWalker):
|
9 |
+
def getNodeDetails(self, node):
|
10 |
+
if node.nodeType == Node.DOCUMENT_TYPE_NODE:
|
11 |
+
return base.DOCTYPE, node.name, node.publicId, node.systemId
|
12 |
+
|
13 |
+
elif node.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
|
14 |
+
return base.TEXT, node.nodeValue
|
15 |
+
|
16 |
+
elif node.nodeType == Node.ELEMENT_NODE:
|
17 |
+
attrs = {}
|
18 |
+
for attr in list(node.attributes.keys()):
|
19 |
+
attr = node.getAttributeNode(attr)
|
20 |
+
if attr.namespaceURI:
|
21 |
+
attrs[(attr.namespaceURI, attr.localName)] = attr.value
|
22 |
+
else:
|
23 |
+
attrs[(None, attr.name)] = attr.value
|
24 |
+
return (base.ELEMENT, node.namespaceURI, node.nodeName,
|
25 |
+
attrs, node.hasChildNodes())
|
26 |
+
|
27 |
+
elif node.nodeType == Node.COMMENT_NODE:
|
28 |
+
return base.COMMENT, node.nodeValue
|
29 |
+
|
30 |
+
elif node.nodeType in (Node.DOCUMENT_NODE, Node.DOCUMENT_FRAGMENT_NODE):
|
31 |
+
return (base.DOCUMENT,)
|
32 |
+
|
33 |
+
else:
|
34 |
+
return base.UNKNOWN, node.nodeType
|
35 |
+
|
36 |
+
def getFirstChild(self, node):
|
37 |
+
return node.firstChild
|
38 |
+
|
39 |
+
def getNextSibling(self, node):
|
40 |
+
return node.nextSibling
|
41 |
+
|
42 |
+
def getParentNode(self, node):
|
43 |
+
return node.parentNode
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/treewalkers/etree.py
ADDED
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import absolute_import, division, unicode_literals
|
2 |
+
|
3 |
+
from collections import OrderedDict
|
4 |
+
import re
|
5 |
+
|
6 |
+
from pip._vendor.six import string_types
|
7 |
+
|
8 |
+
from . import base
|
9 |
+
from .._utils import moduleFactoryFactory
|
10 |
+
|
11 |
+
tag_regexp = re.compile("{([^}]*)}(.*)")
|
12 |
+
|
13 |
+
|
14 |
+
def getETreeBuilder(ElementTreeImplementation):
|
15 |
+
ElementTree = ElementTreeImplementation
|
16 |
+
ElementTreeCommentType = ElementTree.Comment("asd").tag
|
17 |
+
|
18 |
+
class TreeWalker(base.NonRecursiveTreeWalker): # pylint:disable=unused-variable
|
19 |
+
"""Given the particular ElementTree representation, this implementation,
|
20 |
+
to avoid using recursion, returns "nodes" as tuples with the following
|
21 |
+
content:
|
22 |
+
|
23 |
+
1. The current element
|
24 |
+
|
25 |
+
2. The index of the element relative to its parent
|
26 |
+
|
27 |
+
3. A stack of ancestor elements
|
28 |
+
|
29 |
+
4. A flag "text", "tail" or None to indicate if the current node is a
|
30 |
+
text node; either the text or tail of the current element (1)
|
31 |
+
"""
|
32 |
+
def getNodeDetails(self, node):
|
33 |
+
if isinstance(node, tuple): # It might be the root Element
|
34 |
+
elt, _, _, flag = node
|
35 |
+
if flag in ("text", "tail"):
|
36 |
+
return base.TEXT, getattr(elt, flag)
|
37 |
+
else:
|
38 |
+
node = elt
|
39 |
+
|
40 |
+
if not(hasattr(node, "tag")):
|
41 |
+
node = node.getroot()
|
42 |
+
|
43 |
+
if node.tag in ("DOCUMENT_ROOT", "DOCUMENT_FRAGMENT"):
|
44 |
+
return (base.DOCUMENT,)
|
45 |
+
|
46 |
+
elif node.tag == "<!DOCTYPE>":
|
47 |
+
return (base.DOCTYPE, node.text,
|
48 |
+
node.get("publicId"), node.get("systemId"))
|
49 |
+
|
50 |
+
elif node.tag == ElementTreeCommentType:
|
51 |
+
return base.COMMENT, node.text
|
52 |
+
|
53 |
+
else:
|
54 |
+
assert isinstance(node.tag, string_types), type(node.tag)
|
55 |
+
# This is assumed to be an ordinary element
|
56 |
+
match = tag_regexp.match(node.tag)
|
57 |
+
if match:
|
58 |
+
namespace, tag = match.groups()
|
59 |
+
else:
|
60 |
+
namespace = None
|
61 |
+
tag = node.tag
|
62 |
+
attrs = OrderedDict()
|
63 |
+
for name, value in list(node.attrib.items()):
|
64 |
+
match = tag_regexp.match(name)
|
65 |
+
if match:
|
66 |
+
attrs[(match.group(1), match.group(2))] = value
|
67 |
+
else:
|
68 |
+
attrs[(None, name)] = value
|
69 |
+
return (base.ELEMENT, namespace, tag,
|
70 |
+
attrs, len(node) or node.text)
|
71 |
+
|
72 |
+
def getFirstChild(self, node):
|
73 |
+
if isinstance(node, tuple):
|
74 |
+
element, key, parents, flag = node
|
75 |
+
else:
|
76 |
+
element, key, parents, flag = node, None, [], None
|
77 |
+
|
78 |
+
if flag in ("text", "tail"):
|
79 |
+
return None
|
80 |
+
else:
|
81 |
+
if element.text:
|
82 |
+
return element, key, parents, "text"
|
83 |
+
elif len(element):
|
84 |
+
parents.append(element)
|
85 |
+
return element[0], 0, parents, None
|
86 |
+
else:
|
87 |
+
return None
|
88 |
+
|
89 |
+
def getNextSibling(self, node):
|
90 |
+
if isinstance(node, tuple):
|
91 |
+
element, key, parents, flag = node
|
92 |
+
else:
|
93 |
+
return None
|
94 |
+
|
95 |
+
if flag == "text":
|
96 |
+
if len(element):
|
97 |
+
parents.append(element)
|
98 |
+
return element[0], 0, parents, None
|
99 |
+
else:
|
100 |
+
return None
|
101 |
+
else:
|
102 |
+
if element.tail and flag != "tail":
|
103 |
+
return element, key, parents, "tail"
|
104 |
+
elif key < len(parents[-1]) - 1:
|
105 |
+
return parents[-1][key + 1], key + 1, parents, None
|
106 |
+
else:
|
107 |
+
return None
|
108 |
+
|
109 |
+
def getParentNode(self, node):
|
110 |
+
if isinstance(node, tuple):
|
111 |
+
element, key, parents, flag = node
|
112 |
+
else:
|
113 |
+
return None
|
114 |
+
|
115 |
+
if flag == "text":
|
116 |
+
if not parents:
|
117 |
+
return element
|
118 |
+
else:
|
119 |
+
return element, key, parents, None
|
120 |
+
else:
|
121 |
+
parent = parents.pop()
|
122 |
+
if not parents:
|
123 |
+
return parent
|
124 |
+
else:
|
125 |
+
assert list(parents[-1]).count(parent) == 1
|
126 |
+
return parent, list(parents[-1]).index(parent), parents, None
|
127 |
+
|
128 |
+
return locals()
|
129 |
+
|
130 |
+
|
131 |
+
getETreeModule = moduleFactoryFactory(getETreeBuilder)
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.py
ADDED
@@ -0,0 +1,215 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import absolute_import, division, unicode_literals
|
2 |
+
from pip._vendor.six import text_type
|
3 |
+
|
4 |
+
from collections import OrderedDict
|
5 |
+
|
6 |
+
from lxml import etree
|
7 |
+
from ..treebuilders.etree import tag_regexp
|
8 |
+
|
9 |
+
from . import base
|
10 |
+
|
11 |
+
from .. import _ihatexml
|
12 |
+
|
13 |
+
|
14 |
+
def ensure_str(s):
|
15 |
+
if s is None:
|
16 |
+
return None
|
17 |
+
elif isinstance(s, text_type):
|
18 |
+
return s
|
19 |
+
else:
|
20 |
+
return s.decode("ascii", "strict")
|
21 |
+
|
22 |
+
|
23 |
+
class Root(object):
|
24 |
+
def __init__(self, et):
|
25 |
+
self.elementtree = et
|
26 |
+
self.children = []
|
27 |
+
|
28 |
+
try:
|
29 |
+
if et.docinfo.internalDTD:
|
30 |
+
self.children.append(Doctype(self,
|
31 |
+
ensure_str(et.docinfo.root_name),
|
32 |
+
ensure_str(et.docinfo.public_id),
|
33 |
+
ensure_str(et.docinfo.system_url)))
|
34 |
+
except AttributeError:
|
35 |
+
pass
|
36 |
+
|
37 |
+
try:
|
38 |
+
node = et.getroot()
|
39 |
+
except AttributeError:
|
40 |
+
node = et
|
41 |
+
|
42 |
+
while node.getprevious() is not None:
|
43 |
+
node = node.getprevious()
|
44 |
+
while node is not None:
|
45 |
+
self.children.append(node)
|
46 |
+
node = node.getnext()
|
47 |
+
|
48 |
+
self.text = None
|
49 |
+
self.tail = None
|
50 |
+
|
51 |
+
def __getitem__(self, key):
|
52 |
+
return self.children[key]
|
53 |
+
|
54 |
+
def getnext(self):
|
55 |
+
return None
|
56 |
+
|
57 |
+
def __len__(self):
|
58 |
+
return 1
|
59 |
+
|
60 |
+
|
61 |
+
class Doctype(object):
|
62 |
+
def __init__(self, root_node, name, public_id, system_id):
|
63 |
+
self.root_node = root_node
|
64 |
+
self.name = name
|
65 |
+
self.public_id = public_id
|
66 |
+
self.system_id = system_id
|
67 |
+
|
68 |
+
self.text = None
|
69 |
+
self.tail = None
|
70 |
+
|
71 |
+
def getnext(self):
|
72 |
+
return self.root_node.children[1]
|
73 |
+
|
74 |
+
|
75 |
+
class FragmentRoot(Root):
|
76 |
+
def __init__(self, children):
|
77 |
+
self.children = [FragmentWrapper(self, child) for child in children]
|
78 |
+
self.text = self.tail = None
|
79 |
+
|
80 |
+
def getnext(self):
|
81 |
+
return None
|
82 |
+
|
83 |
+
|
84 |
+
class FragmentWrapper(object):
|
85 |
+
def __init__(self, fragment_root, obj):
|
86 |
+
self.root_node = fragment_root
|
87 |
+
self.obj = obj
|
88 |
+
if hasattr(self.obj, 'text'):
|
89 |
+
self.text = ensure_str(self.obj.text)
|
90 |
+
else:
|
91 |
+
self.text = None
|
92 |
+
if hasattr(self.obj, 'tail'):
|
93 |
+
self.tail = ensure_str(self.obj.tail)
|
94 |
+
else:
|
95 |
+
self.tail = None
|
96 |
+
|
97 |
+
def __getattr__(self, name):
|
98 |
+
return getattr(self.obj, name)
|
99 |
+
|
100 |
+
def getnext(self):
|
101 |
+
siblings = self.root_node.children
|
102 |
+
idx = siblings.index(self)
|
103 |
+
if idx < len(siblings) - 1:
|
104 |
+
return siblings[idx + 1]
|
105 |
+
else:
|
106 |
+
return None
|
107 |
+
|
108 |
+
def __getitem__(self, key):
|
109 |
+
return self.obj[key]
|
110 |
+
|
111 |
+
def __bool__(self):
|
112 |
+
return bool(self.obj)
|
113 |
+
|
114 |
+
def getparent(self):
|
115 |
+
return None
|
116 |
+
|
117 |
+
def __str__(self):
|
118 |
+
return str(self.obj)
|
119 |
+
|
120 |
+
def __unicode__(self):
|
121 |
+
return str(self.obj)
|
122 |
+
|
123 |
+
def __len__(self):
|
124 |
+
return len(self.obj)
|
125 |
+
|
126 |
+
|
127 |
+
class TreeWalker(base.NonRecursiveTreeWalker):
|
128 |
+
def __init__(self, tree):
|
129 |
+
# pylint:disable=redefined-variable-type
|
130 |
+
if isinstance(tree, list):
|
131 |
+
self.fragmentChildren = set(tree)
|
132 |
+
tree = FragmentRoot(tree)
|
133 |
+
else:
|
134 |
+
self.fragmentChildren = set()
|
135 |
+
tree = Root(tree)
|
136 |
+
base.NonRecursiveTreeWalker.__init__(self, tree)
|
137 |
+
self.filter = _ihatexml.InfosetFilter()
|
138 |
+
|
139 |
+
def getNodeDetails(self, node):
|
140 |
+
if isinstance(node, tuple): # Text node
|
141 |
+
node, key = node
|
142 |
+
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
|
143 |
+
return base.TEXT, ensure_str(getattr(node, key))
|
144 |
+
|
145 |
+
elif isinstance(node, Root):
|
146 |
+
return (base.DOCUMENT,)
|
147 |
+
|
148 |
+
elif isinstance(node, Doctype):
|
149 |
+
return base.DOCTYPE, node.name, node.public_id, node.system_id
|
150 |
+
|
151 |
+
elif isinstance(node, FragmentWrapper) and not hasattr(node, "tag"):
|
152 |
+
return base.TEXT, ensure_str(node.obj)
|
153 |
+
|
154 |
+
elif node.tag == etree.Comment:
|
155 |
+
return base.COMMENT, ensure_str(node.text)
|
156 |
+
|
157 |
+
elif node.tag == etree.Entity:
|
158 |
+
return base.ENTITY, ensure_str(node.text)[1:-1] # strip &;
|
159 |
+
|
160 |
+
else:
|
161 |
+
# This is assumed to be an ordinary element
|
162 |
+
match = tag_regexp.match(ensure_str(node.tag))
|
163 |
+
if match:
|
164 |
+
namespace, tag = match.groups()
|
165 |
+
else:
|
166 |
+
namespace = None
|
167 |
+
tag = ensure_str(node.tag)
|
168 |
+
attrs = OrderedDict()
|
169 |
+
for name, value in list(node.attrib.items()):
|
170 |
+
name = ensure_str(name)
|
171 |
+
value = ensure_str(value)
|
172 |
+
match = tag_regexp.match(name)
|
173 |
+
if match:
|
174 |
+
attrs[(match.group(1), match.group(2))] = value
|
175 |
+
else:
|
176 |
+
attrs[(None, name)] = value
|
177 |
+
return (base.ELEMENT, namespace, self.filter.fromXmlName(tag),
|
178 |
+
attrs, len(node) > 0 or node.text)
|
179 |
+
|
180 |
+
def getFirstChild(self, node):
|
181 |
+
assert not isinstance(node, tuple), "Text nodes have no children"
|
182 |
+
|
183 |
+
assert len(node) or node.text, "Node has no children"
|
184 |
+
if node.text:
|
185 |
+
return (node, "text")
|
186 |
+
else:
|
187 |
+
return node[0]
|
188 |
+
|
189 |
+
def getNextSibling(self, node):
|
190 |
+
if isinstance(node, tuple): # Text node
|
191 |
+
node, key = node
|
192 |
+
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
|
193 |
+
if key == "text":
|
194 |
+
# XXX: we cannot use a "bool(node) and node[0] or None" construct here
|
195 |
+
# because node[0] might evaluate to False if it has no child element
|
196 |
+
if len(node):
|
197 |
+
return node[0]
|
198 |
+
else:
|
199 |
+
return None
|
200 |
+
else: # tail
|
201 |
+
return node.getnext()
|
202 |
+
|
203 |
+
return (node, "tail") if node.tail else node.getnext()
|
204 |
+
|
205 |
+
def getParentNode(self, node):
|
206 |
+
if isinstance(node, tuple): # Text node
|
207 |
+
node, key = node
|
208 |
+
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
|
209 |
+
if key == "text":
|
210 |
+
return node
|
211 |
+
# else: fallback to "normal" processing
|
212 |
+
elif node in self.fragmentChildren:
|
213 |
+
return None
|
214 |
+
|
215 |
+
return node.getparent()
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/html5lib/treewalkers/genshi.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import absolute_import, division, unicode_literals
|
2 |
+
|
3 |
+
from genshi.core import QName
|
4 |
+
from genshi.core import START, END, XML_NAMESPACE, DOCTYPE, TEXT
|
5 |
+
from genshi.core import START_NS, END_NS, START_CDATA, END_CDATA, PI, COMMENT
|
6 |
+
|
7 |
+
from . import base
|
8 |
+
|
9 |
+
from ..constants import voidElements, namespaces
|
10 |
+
|
11 |
+
|
12 |
+
class TreeWalker(base.TreeWalker):
|
13 |
+
def __iter__(self):
|
14 |
+
# Buffer the events so we can pass in the following one
|
15 |
+
previous = None
|
16 |
+
for event in self.tree:
|
17 |
+
if previous is not None:
|
18 |
+
for token in self.tokens(previous, event):
|
19 |
+
yield token
|
20 |
+
previous = event
|
21 |
+
|
22 |
+
# Don't forget the final event!
|
23 |
+
if previous is not None:
|
24 |
+
for token in self.tokens(previous, None):
|
25 |
+
yield token
|
26 |
+
|
27 |
+
def tokens(self, event, next):
|
28 |
+
kind, data, _ = event
|
29 |
+
if kind == START:
|
30 |
+
tag, attribs = data
|
31 |
+
name = tag.localname
|
32 |
+
namespace = tag.namespace
|
33 |
+
converted_attribs = {}
|
34 |
+
for k, v in attribs:
|
35 |
+
if isinstance(k, QName):
|
36 |
+
converted_attribs[(k.namespace, k.localname)] = v
|
37 |
+
else:
|
38 |
+
converted_attribs[(None, k)] = v
|
39 |
+
|
40 |
+
if namespace == namespaces["html"] and name in voidElements:
|
41 |
+
for token in self.emptyTag(namespace, name, converted_attribs,
|
42 |
+
not next or next[0] != END or
|
43 |
+
next[1] != tag):
|
44 |
+
yield token
|
45 |
+
else:
|
46 |
+
yield self.startTag(namespace, name, converted_attribs)
|
47 |
+
|
48 |
+
elif kind == END:
|
49 |
+
name = data.localname
|
50 |
+
namespace = data.namespace
|
51 |
+
if namespace != namespaces["html"] or name not in voidElements:
|
52 |
+
yield self.endTag(namespace, name)
|
53 |
+
|
54 |
+
elif kind == COMMENT:
|
55 |
+
yield self.comment(data)
|
56 |
+
|
57 |
+
elif kind == TEXT:
|
58 |
+
for token in self.text(data):
|
59 |
+
yield token
|
60 |
+
|
61 |
+
elif kind == DOCTYPE:
|
62 |
+
yield self.doctype(*data)
|
63 |
+
|
64 |
+
elif kind in (XML_NAMESPACE, DOCTYPE, START_NS, END_NS,
|
65 |
+
START_CDATA, END_CDATA, PI):
|
66 |
+
pass
|
67 |
+
|
68 |
+
else:
|
69 |
+
yield self.unknown(kind)
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/idna/__pycache__/codec.cpython-310.pyc
ADDED
Binary file (2.81 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/idna/__pycache__/compat.cpython-310.pyc
ADDED
Binary file (740 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/idna/__pycache__/package_data.cpython-310.pyc
ADDED
Binary file (204 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/idna/__pycache__/uts46data.cpython-310.pyc
ADDED
Binary file (151 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/progress/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (5.72 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/progress/__pycache__/bar.cpython-310.pyc
ADDED
Binary file (2.69 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/progress/__pycache__/colors.cpython-310.pyc
ADDED
Binary file (1.48 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/progress/__pycache__/counter.cpython-310.pyc
ADDED
Binary file (1.55 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/progress/__pycache__/spinner.cpython-310.pyc
ADDED
Binary file (1.38 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/progress/counter.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
|
3 |
+
# Copyright (c) 2012 Georgios Verigakis <[email protected]>
|
4 |
+
#
|
5 |
+
# Permission to use, copy, modify, and distribute this software for any
|
6 |
+
# purpose with or without fee is hereby granted, provided that the above
|
7 |
+
# copyright notice and this permission notice appear in all copies.
|
8 |
+
#
|
9 |
+
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
10 |
+
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
11 |
+
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
12 |
+
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
13 |
+
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
14 |
+
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
15 |
+
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
16 |
+
|
17 |
+
from __future__ import unicode_literals
|
18 |
+
from . import Infinite, Progress
|
19 |
+
|
20 |
+
|
21 |
+
class Counter(Infinite):
|
22 |
+
def update(self):
|
23 |
+
message = self.message % self
|
24 |
+
line = ''.join([message, str(self.index)])
|
25 |
+
self.writeln(line)
|
26 |
+
|
27 |
+
|
28 |
+
class Countdown(Progress):
|
29 |
+
def update(self):
|
30 |
+
message = self.message % self
|
31 |
+
line = ''.join([message, str(self.remaining)])
|
32 |
+
self.writeln(line)
|
33 |
+
|
34 |
+
|
35 |
+
class Stack(Progress):
|
36 |
+
phases = (' ', 'β', 'β', 'β', 'β', 'β
', 'β', 'β', 'β')
|
37 |
+
|
38 |
+
def update(self):
|
39 |
+
nphases = len(self.phases)
|
40 |
+
i = min(nphases - 1, int(self.progress * nphases))
|
41 |
+
message = self.message % self
|
42 |
+
line = ''.join([message, self.phases[i]])
|
43 |
+
self.writeln(line)
|
44 |
+
|
45 |
+
|
46 |
+
class Pie(Stack):
|
47 |
+
phases = ('β', 'β', 'β', 'β', 'β')
|
env-llmeval/lib/python3.10/site-packages/pip/_vendor/pygments/__init__.py
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Pygments
|
3 |
+
~~~~~~~~
|
4 |
+
|
5 |
+
Pygments is a syntax highlighting package written in Python.
|
6 |
+
|
7 |
+
It is a generic syntax highlighter for general use in all kinds of software
|
8 |
+
such as forum systems, wikis or other applications that need to prettify
|
9 |
+
source code. Highlights are:
|
10 |
+
|
11 |
+
* a wide range of common languages and markup formats is supported
|
12 |
+
* special attention is paid to details, increasing quality by a fair amount
|
13 |
+
* support for new languages and formats are added easily
|
14 |
+
* a number of output formats, presently HTML, LaTeX, RTF, SVG, all image
|
15 |
+
formats that PIL supports, and ANSI sequences
|
16 |
+
* it is usable as a command-line tool and as a library
|
17 |
+
* ... and it highlights even Brainfuck!
|
18 |
+
|
19 |
+
The `Pygments master branch`_ is installable with ``easy_install Pygments==dev``.
|
20 |
+
|
21 |
+
.. _Pygments master branch:
|
22 |
+
https://github.com/pygments/pygments/archive/master.zip#egg=Pygments-dev
|
23 |
+
|
24 |
+
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
|
25 |
+
:license: BSD, see LICENSE for details.
|
26 |
+
"""
|
27 |
+
from io import StringIO, BytesIO
|
28 |
+
|
29 |
+
__version__ = '2.11.2'
|
30 |
+
__docformat__ = 'restructuredtext'
|
31 |
+
|
32 |
+
__all__ = ['lex', 'format', 'highlight']
|
33 |
+
|
34 |
+
|
35 |
+
def lex(code, lexer):
|
36 |
+
"""
|
37 |
+
Lex ``code`` with ``lexer`` and return an iterable of tokens.
|
38 |
+
"""
|
39 |
+
try:
|
40 |
+
return lexer.get_tokens(code)
|
41 |
+
except TypeError as err:
|
42 |
+
if (isinstance(err.args[0], str) and
|
43 |
+
('unbound method get_tokens' in err.args[0] or
|
44 |
+
'missing 1 required positional argument' in err.args[0])):
|
45 |
+
raise TypeError('lex() argument must be a lexer instance, '
|
46 |
+
'not a class')
|
47 |
+
raise
|
48 |
+
|
49 |
+
|
50 |
+
def format(tokens, formatter, outfile=None): # pylint: disable=redefined-builtin
|
51 |
+
"""
|
52 |
+
Format a tokenlist ``tokens`` with the formatter ``formatter``.
|
53 |
+
|
54 |
+
If ``outfile`` is given and a valid file object (an object
|
55 |
+
with a ``write`` method), the result will be written to it, otherwise
|
56 |
+
it is returned as a string.
|
57 |
+
"""
|
58 |
+
try:
|
59 |
+
if not outfile:
|
60 |
+
realoutfile = getattr(formatter, 'encoding', None) and BytesIO() or StringIO()
|
61 |
+
formatter.format(tokens, realoutfile)
|
62 |
+
return realoutfile.getvalue()
|
63 |
+
else:
|
64 |
+
formatter.format(tokens, outfile)
|
65 |
+
except TypeError as err:
|
66 |
+
if (isinstance(err.args[0], str) and
|
67 |
+
('unbound method format' in err.args[0] or
|
68 |
+
'missing 1 required positional argument' in err.args[0])):
|
69 |
+
raise TypeError('format() argument must be a formatter instance, '
|
70 |
+
'not a class')
|
71 |
+
raise
|
72 |
+
|
73 |
+
|
74 |
+
def highlight(code, lexer, formatter, outfile=None):
|
75 |
+
"""
|
76 |
+
Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``.
|
77 |
+
|
78 |
+
If ``outfile`` is given and a valid file object (an object
|
79 |
+
with a ``write`` method), the result will be written to it, otherwise
|
80 |
+
it is returned as a string.
|
81 |
+
"""
|
82 |
+
return format(lex(code, lexer), formatter, outfile)
|
83 |
+
|