Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/__init__.py +35 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/__pycache__/_ihatexml.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/__pycache__/_inputstream.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/__pycache__/_tokenizer.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/__pycache__/_utils.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/__pycache__/constants.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/__pycache__/html5parser.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/__pycache__/serializer.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/_ihatexml.py +289 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/_inputstream.py +918 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/_tokenizer.py +1735 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/_trie/_base.py +40 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/_utils.py +159 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/constants.py +2946 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/filters/__init__.py +0 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/filters/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/filters/__pycache__/alphabeticalattributes.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/filters/__pycache__/base.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/filters/__pycache__/inject_meta_charset.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/filters/__pycache__/lint.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/filters/__pycache__/optionaltags.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/filters/__pycache__/sanitizer.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/filters/__pycache__/whitespace.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/filters/alphabeticalattributes.py +29 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/filters/base.py +12 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/filters/inject_meta_charset.py +73 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/filters/lint.py +93 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/filters/optionaltags.py +207 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/filters/sanitizer.py +916 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/filters/whitespace.py +38 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/html5parser.py +0 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/serializer.py +409 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/treeadapters/__init__.py +30 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/treeadapters/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/treeadapters/__pycache__/genshi.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/treeadapters/__pycache__/sax.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/treeadapters/genshi.py +54 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/treeadapters/sax.py +50 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/treebuilders/__init__.py +88 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/base.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/dom.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/etree.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/etree_lxml.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/treebuilders/base.py +417 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/treebuilders/dom.py +239 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/treebuilders/etree.py +343 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.py +392 -0
- llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/treewalkers/__init__.py +154 -0
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/__init__.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
HTML parsing library based on the `WHATWG HTML specification
|
3 |
+
<https://whatwg.org/html>`_. The parser is designed to be compatible with
|
4 |
+
existing HTML found in the wild and implements well-defined error recovery that
|
5 |
+
is largely compatible with modern desktop web browsers.
|
6 |
+
|
7 |
+
Example usage::
|
8 |
+
|
9 |
+
from pip._vendor import html5lib
|
10 |
+
with open("my_document.html", "rb") as f:
|
11 |
+
tree = html5lib.parse(f)
|
12 |
+
|
13 |
+
For convenience, this module re-exports the following names:
|
14 |
+
|
15 |
+
* :func:`~.html5parser.parse`
|
16 |
+
* :func:`~.html5parser.parseFragment`
|
17 |
+
* :class:`~.html5parser.HTMLParser`
|
18 |
+
* :func:`~.treebuilders.getTreeBuilder`
|
19 |
+
* :func:`~.treewalkers.getTreeWalker`
|
20 |
+
* :func:`~.serializer.serialize`
|
21 |
+
"""
|
22 |
+
|
23 |
+
from __future__ import absolute_import, division, unicode_literals
|
24 |
+
|
25 |
+
from .html5parser import HTMLParser, parse, parseFragment
|
26 |
+
from .treebuilders import getTreeBuilder
|
27 |
+
from .treewalkers import getTreeWalker
|
28 |
+
from .serializer import serialize
|
29 |
+
|
30 |
+
__all__ = ["HTMLParser", "parse", "parseFragment", "getTreeBuilder",
|
31 |
+
"getTreeWalker", "serialize"]
|
32 |
+
|
33 |
+
# this has to be at the top level, see how setup.py parses this
|
34 |
+
#: Distribution version number.
|
35 |
+
__version__ = "1.1"
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.3 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/__pycache__/_ihatexml.cpython-310.pyc
ADDED
Binary file (13.9 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/__pycache__/_inputstream.cpython-310.pyc
ADDED
Binary file (21.7 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/__pycache__/_tokenizer.cpython-310.pyc
ADDED
Binary file (37.3 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/__pycache__/_utils.cpython-310.pyc
ADDED
Binary file (4.8 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/__pycache__/constants.cpython-310.pyc
ADDED
Binary file (161 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/__pycache__/html5parser.cpython-310.pyc
ADDED
Binary file (88.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/__pycache__/serializer.cpython-310.pyc
ADDED
Binary file (10.7 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/_ihatexml.py
ADDED
@@ -0,0 +1,289 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import absolute_import, division, unicode_literals
|
2 |
+
|
3 |
+
import re
|
4 |
+
import warnings
|
5 |
+
|
6 |
+
from .constants import DataLossWarning
|
7 |
+
|
8 |
+
baseChar = """
|
9 |
+
[#x0041-#x005A] | [#x0061-#x007A] | [#x00C0-#x00D6] | [#x00D8-#x00F6] |
|
10 |
+
[#x00F8-#x00FF] | [#x0100-#x0131] | [#x0134-#x013E] | [#x0141-#x0148] |
|
11 |
+
[#x014A-#x017E] | [#x0180-#x01C3] | [#x01CD-#x01F0] | [#x01F4-#x01F5] |
|
12 |
+
[#x01FA-#x0217] | [#x0250-#x02A8] | [#x02BB-#x02C1] | #x0386 |
|
13 |
+
[#x0388-#x038A] | #x038C | [#x038E-#x03A1] | [#x03A3-#x03CE] |
|
14 |
+
[#x03D0-#x03D6] | #x03DA | #x03DC | #x03DE | #x03E0 | [#x03E2-#x03F3] |
|
15 |
+
[#x0401-#x040C] | [#x040E-#x044F] | [#x0451-#x045C] | [#x045E-#x0481] |
|
16 |
+
[#x0490-#x04C4] | [#x04C7-#x04C8] | [#x04CB-#x04CC] | [#x04D0-#x04EB] |
|
17 |
+
[#x04EE-#x04F5] | [#x04F8-#x04F9] | [#x0531-#x0556] | #x0559 |
|
18 |
+
[#x0561-#x0586] | [#x05D0-#x05EA] | [#x05F0-#x05F2] | [#x0621-#x063A] |
|
19 |
+
[#x0641-#x064A] | [#x0671-#x06B7] | [#x06BA-#x06BE] | [#x06C0-#x06CE] |
|
20 |
+
[#x06D0-#x06D3] | #x06D5 | [#x06E5-#x06E6] | [#x0905-#x0939] | #x093D |
|
21 |
+
[#x0958-#x0961] | [#x0985-#x098C] | [#x098F-#x0990] | [#x0993-#x09A8] |
|
22 |
+
[#x09AA-#x09B0] | #x09B2 | [#x09B6-#x09B9] | [#x09DC-#x09DD] |
|
23 |
+
[#x09DF-#x09E1] | [#x09F0-#x09F1] | [#x0A05-#x0A0A] | [#x0A0F-#x0A10] |
|
24 |
+
[#x0A13-#x0A28] | [#x0A2A-#x0A30] | [#x0A32-#x0A33] | [#x0A35-#x0A36] |
|
25 |
+
[#x0A38-#x0A39] | [#x0A59-#x0A5C] | #x0A5E | [#x0A72-#x0A74] |
|
26 |
+
[#x0A85-#x0A8B] | #x0A8D | [#x0A8F-#x0A91] | [#x0A93-#x0AA8] |
|
27 |
+
[#x0AAA-#x0AB0] | [#x0AB2-#x0AB3] | [#x0AB5-#x0AB9] | #x0ABD | #x0AE0 |
|
28 |
+
[#x0B05-#x0B0C] | [#x0B0F-#x0B10] | [#x0B13-#x0B28] | [#x0B2A-#x0B30] |
|
29 |
+
[#x0B32-#x0B33] | [#x0B36-#x0B39] | #x0B3D | [#x0B5C-#x0B5D] |
|
30 |
+
[#x0B5F-#x0B61] | [#x0B85-#x0B8A] | [#x0B8E-#x0B90] | [#x0B92-#x0B95] |
|
31 |
+
[#x0B99-#x0B9A] | #x0B9C | [#x0B9E-#x0B9F] | [#x0BA3-#x0BA4] |
|
32 |
+
[#x0BA8-#x0BAA] | [#x0BAE-#x0BB5] | [#x0BB7-#x0BB9] | [#x0C05-#x0C0C] |
|
33 |
+
[#x0C0E-#x0C10] | [#x0C12-#x0C28] | [#x0C2A-#x0C33] | [#x0C35-#x0C39] |
|
34 |
+
[#x0C60-#x0C61] | [#x0C85-#x0C8C] | [#x0C8E-#x0C90] | [#x0C92-#x0CA8] |
|
35 |
+
[#x0CAA-#x0CB3] | [#x0CB5-#x0CB9] | #x0CDE | [#x0CE0-#x0CE1] |
|
36 |
+
[#x0D05-#x0D0C] | [#x0D0E-#x0D10] | [#x0D12-#x0D28] | [#x0D2A-#x0D39] |
|
37 |
+
[#x0D60-#x0D61] | [#x0E01-#x0E2E] | #x0E30 | [#x0E32-#x0E33] |
|
38 |
+
[#x0E40-#x0E45] | [#x0E81-#x0E82] | #x0E84 | [#x0E87-#x0E88] | #x0E8A |
|
39 |
+
#x0E8D | [#x0E94-#x0E97] | [#x0E99-#x0E9F] | [#x0EA1-#x0EA3] | #x0EA5 |
|
40 |
+
#x0EA7 | [#x0EAA-#x0EAB] | [#x0EAD-#x0EAE] | #x0EB0 | [#x0EB2-#x0EB3] |
|
41 |
+
#x0EBD | [#x0EC0-#x0EC4] | [#x0F40-#x0F47] | [#x0F49-#x0F69] |
|
42 |
+
[#x10A0-#x10C5] | [#x10D0-#x10F6] | #x1100 | [#x1102-#x1103] |
|
43 |
+
[#x1105-#x1107] | #x1109 | [#x110B-#x110C] | [#x110E-#x1112] | #x113C |
|
44 |
+
#x113E | #x1140 | #x114C | #x114E | #x1150 | [#x1154-#x1155] | #x1159 |
|
45 |
+
[#x115F-#x1161] | #x1163 | #x1165 | #x1167 | #x1169 | [#x116D-#x116E] |
|
46 |
+
[#x1172-#x1173] | #x1175 | #x119E | #x11A8 | #x11AB | [#x11AE-#x11AF] |
|
47 |
+
[#x11B7-#x11B8] | #x11BA | [#x11BC-#x11C2] | #x11EB | #x11F0 | #x11F9 |
|
48 |
+
[#x1E00-#x1E9B] | [#x1EA0-#x1EF9] | [#x1F00-#x1F15] | [#x1F18-#x1F1D] |
|
49 |
+
[#x1F20-#x1F45] | [#x1F48-#x1F4D] | [#x1F50-#x1F57] | #x1F59 | #x1F5B |
|
50 |
+
#x1F5D | [#x1F5F-#x1F7D] | [#x1F80-#x1FB4] | [#x1FB6-#x1FBC] | #x1FBE |
|
51 |
+
[#x1FC2-#x1FC4] | [#x1FC6-#x1FCC] | [#x1FD0-#x1FD3] | [#x1FD6-#x1FDB] |
|
52 |
+
[#x1FE0-#x1FEC] | [#x1FF2-#x1FF4] | [#x1FF6-#x1FFC] | #x2126 |
|
53 |
+
[#x212A-#x212B] | #x212E | [#x2180-#x2182] | [#x3041-#x3094] |
|
54 |
+
[#x30A1-#x30FA] | [#x3105-#x312C] | [#xAC00-#xD7A3]"""
|
55 |
+
|
56 |
+
ideographic = """[#x4E00-#x9FA5] | #x3007 | [#x3021-#x3029]"""
|
57 |
+
|
58 |
+
combiningCharacter = """
|
59 |
+
[#x0300-#x0345] | [#x0360-#x0361] | [#x0483-#x0486] | [#x0591-#x05A1] |
|
60 |
+
[#x05A3-#x05B9] | [#x05BB-#x05BD] | #x05BF | [#x05C1-#x05C2] | #x05C4 |
|
61 |
+
[#x064B-#x0652] | #x0670 | [#x06D6-#x06DC] | [#x06DD-#x06DF] |
|
62 |
+
[#x06E0-#x06E4] | [#x06E7-#x06E8] | [#x06EA-#x06ED] | [#x0901-#x0903] |
|
63 |
+
#x093C | [#x093E-#x094C] | #x094D | [#x0951-#x0954] | [#x0962-#x0963] |
|
64 |
+
[#x0981-#x0983] | #x09BC | #x09BE | #x09BF | [#x09C0-#x09C4] |
|
65 |
+
[#x09C7-#x09C8] | [#x09CB-#x09CD] | #x09D7 | [#x09E2-#x09E3] | #x0A02 |
|
66 |
+
#x0A3C | #x0A3E | #x0A3F | [#x0A40-#x0A42] | [#x0A47-#x0A48] |
|
67 |
+
[#x0A4B-#x0A4D] | [#x0A70-#x0A71] | [#x0A81-#x0A83] | #x0ABC |
|
68 |
+
[#x0ABE-#x0AC5] | [#x0AC7-#x0AC9] | [#x0ACB-#x0ACD] | [#x0B01-#x0B03] |
|
69 |
+
#x0B3C | [#x0B3E-#x0B43] | [#x0B47-#x0B48] | [#x0B4B-#x0B4D] |
|
70 |
+
[#x0B56-#x0B57] | [#x0B82-#x0B83] | [#x0BBE-#x0BC2] | [#x0BC6-#x0BC8] |
|
71 |
+
[#x0BCA-#x0BCD] | #x0BD7 | [#x0C01-#x0C03] | [#x0C3E-#x0C44] |
|
72 |
+
[#x0C46-#x0C48] | [#x0C4A-#x0C4D] | [#x0C55-#x0C56] | [#x0C82-#x0C83] |
|
73 |
+
[#x0CBE-#x0CC4] | [#x0CC6-#x0CC8] | [#x0CCA-#x0CCD] | [#x0CD5-#x0CD6] |
|
74 |
+
[#x0D02-#x0D03] | [#x0D3E-#x0D43] | [#x0D46-#x0D48] | [#x0D4A-#x0D4D] |
|
75 |
+
#x0D57 | #x0E31 | [#x0E34-#x0E3A] | [#x0E47-#x0E4E] | #x0EB1 |
|
76 |
+
[#x0EB4-#x0EB9] | [#x0EBB-#x0EBC] | [#x0EC8-#x0ECD] | [#x0F18-#x0F19] |
|
77 |
+
#x0F35 | #x0F37 | #x0F39 | #x0F3E | #x0F3F | [#x0F71-#x0F84] |
|
78 |
+
[#x0F86-#x0F8B] | [#x0F90-#x0F95] | #x0F97 | [#x0F99-#x0FAD] |
|
79 |
+
[#x0FB1-#x0FB7] | #x0FB9 | [#x20D0-#x20DC] | #x20E1 | [#x302A-#x302F] |
|
80 |
+
#x3099 | #x309A"""
|
81 |
+
|
82 |
+
digit = """
|
83 |
+
[#x0030-#x0039] | [#x0660-#x0669] | [#x06F0-#x06F9] | [#x0966-#x096F] |
|
84 |
+
[#x09E6-#x09EF] | [#x0A66-#x0A6F] | [#x0AE6-#x0AEF] | [#x0B66-#x0B6F] |
|
85 |
+
[#x0BE7-#x0BEF] | [#x0C66-#x0C6F] | [#x0CE6-#x0CEF] | [#x0D66-#x0D6F] |
|
86 |
+
[#x0E50-#x0E59] | [#x0ED0-#x0ED9] | [#x0F20-#x0F29]"""
|
87 |
+
|
88 |
+
extender = """
|
89 |
+
#x00B7 | #x02D0 | #x02D1 | #x0387 | #x0640 | #x0E46 | #x0EC6 | #x3005 |
|
90 |
+
#[#x3031-#x3035] | [#x309D-#x309E] | [#x30FC-#x30FE]"""
|
91 |
+
|
92 |
+
letter = " | ".join([baseChar, ideographic])
|
93 |
+
|
94 |
+
# Without the
|
95 |
+
name = " | ".join([letter, digit, ".", "-", "_", combiningCharacter,
|
96 |
+
extender])
|
97 |
+
nameFirst = " | ".join([letter, "_"])
|
98 |
+
|
99 |
+
reChar = re.compile(r"#x([\d|A-F]{4,4})")
|
100 |
+
reCharRange = re.compile(r"\[#x([\d|A-F]{4,4})-#x([\d|A-F]{4,4})\]")
|
101 |
+
|
102 |
+
|
103 |
+
def charStringToList(chars):
|
104 |
+
charRanges = [item.strip() for item in chars.split(" | ")]
|
105 |
+
rv = []
|
106 |
+
for item in charRanges:
|
107 |
+
foundMatch = False
|
108 |
+
for regexp in (reChar, reCharRange):
|
109 |
+
match = regexp.match(item)
|
110 |
+
if match is not None:
|
111 |
+
rv.append([hexToInt(item) for item in match.groups()])
|
112 |
+
if len(rv[-1]) == 1:
|
113 |
+
rv[-1] = rv[-1] * 2
|
114 |
+
foundMatch = True
|
115 |
+
break
|
116 |
+
if not foundMatch:
|
117 |
+
assert len(item) == 1
|
118 |
+
|
119 |
+
rv.append([ord(item)] * 2)
|
120 |
+
rv = normaliseCharList(rv)
|
121 |
+
return rv
|
122 |
+
|
123 |
+
|
124 |
+
def normaliseCharList(charList):
|
125 |
+
charList = sorted(charList)
|
126 |
+
for item in charList:
|
127 |
+
assert item[1] >= item[0]
|
128 |
+
rv = []
|
129 |
+
i = 0
|
130 |
+
while i < len(charList):
|
131 |
+
j = 1
|
132 |
+
rv.append(charList[i])
|
133 |
+
while i + j < len(charList) and charList[i + j][0] <= rv[-1][1] + 1:
|
134 |
+
rv[-1][1] = charList[i + j][1]
|
135 |
+
j += 1
|
136 |
+
i += j
|
137 |
+
return rv
|
138 |
+
|
139 |
+
|
140 |
+
# We don't really support characters above the BMP :(
|
141 |
+
max_unicode = int("FFFF", 16)
|
142 |
+
|
143 |
+
|
144 |
+
def missingRanges(charList):
|
145 |
+
rv = []
|
146 |
+
if charList[0] != 0:
|
147 |
+
rv.append([0, charList[0][0] - 1])
|
148 |
+
for i, item in enumerate(charList[:-1]):
|
149 |
+
rv.append([item[1] + 1, charList[i + 1][0] - 1])
|
150 |
+
if charList[-1][1] != max_unicode:
|
151 |
+
rv.append([charList[-1][1] + 1, max_unicode])
|
152 |
+
return rv
|
153 |
+
|
154 |
+
|
155 |
+
def listToRegexpStr(charList):
|
156 |
+
rv = []
|
157 |
+
for item in charList:
|
158 |
+
if item[0] == item[1]:
|
159 |
+
rv.append(escapeRegexp(chr(item[0])))
|
160 |
+
else:
|
161 |
+
rv.append(escapeRegexp(chr(item[0])) + "-" +
|
162 |
+
escapeRegexp(chr(item[1])))
|
163 |
+
return "[%s]" % "".join(rv)
|
164 |
+
|
165 |
+
|
166 |
+
def hexToInt(hex_str):
|
167 |
+
return int(hex_str, 16)
|
168 |
+
|
169 |
+
|
170 |
+
def escapeRegexp(string):
|
171 |
+
specialCharacters = (".", "^", "$", "*", "+", "?", "{", "}",
|
172 |
+
"[", "]", "|", "(", ")", "-")
|
173 |
+
for char in specialCharacters:
|
174 |
+
string = string.replace(char, "\\" + char)
|
175 |
+
|
176 |
+
return string
|
177 |
+
|
178 |
+
# output from the above
|
179 |
+
nonXmlNameBMPRegexp = re.compile('[\x00-,/:-@\\[-\\^`\\{-\xb6\xb8-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u02cf\u02d2-\u02ff\u0346-\u035f\u0362-\u0385\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482\u0487-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u0590\u05a2\u05ba\u05be\u05c0\u05c3\u05c5-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u063f\u0653-\u065f\u066a-\u066f\u06b8-\u06b9\u06bf\u06cf\u06d4\u06e9\u06ee-\u06ef\u06fa-\u0900\u0904\u093a-\u093b\u094e-\u0950\u0955-\u0957\u0964-\u0965\u0970-\u0980\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09bd\u09c5-\u09c6\u09c9-\u09ca\u09ce-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09f2-\u0a01\u0a03-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a58\u0a5d\u0a5f-\u0a65\u0a75-\u0a80\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0adf\u0ae1-\u0ae5\u0af0-\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3b\u0b44-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b62-\u0b65\u0b70-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bd6\u0bd8-\u0be6\u0bf0-\u0c00\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c3d\u0c45\u0c49\u0c4e-\u0c54\u0c57-\u0c5f\u0c62-\u0c65\u0c70-\u0c81\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbd\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce2-\u0ce5\u0cf0-\u0d01\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d3d\u0d44-\u0d45\u0d49\u0d4e-\u0d56\u0d58-\u0d5f\u0d62-\u0d65\u0d70-\u0e00\u0e2f\u0e3b-\u0e3f\u0e4f\u0e5a-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0f17\u0f1a-\u0f1f\u0f2a-\u0f34\u0f36\u0f38\u0f3a-\u0f3d\u0f48\u0f6a-\u0f70\u0f85\u0f8c-\u0f8f\u0f96\u0f98\u0fae-\u0fb0\u0fb8\u0fba-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u20cf\u20dd-\u20e0\u20e2-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3004\u3006\u3008-\u3020\u3030\u3036-\u3040\u3095-\u3098\u309b-\u309c\u309f-\u30a0\u30fb\u30ff-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]') # noqa
|
180 |
+
|
181 |
+
nonXmlNameFirstBMPRegexp = re.compile('[\x00-@\\[-\\^`\\{-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u0385\u0387\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u0640\u064b-\u0670\u06b8-\u06b9\u06bf\u06cf\u06d4\u06d6-\u06e4\u06e7-\u0904\u093a-\u093c\u093e-\u0957\u0962-\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09db\u09de\u09e2-\u09ef\u09f2-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a58\u0a5d\u0a5f-\u0a71\u0a75-\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abc\u0abe-\u0adf\u0ae1-\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3c\u0b3e-\u0b5b\u0b5e\u0b62-\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c5f\u0c62-\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cdd\u0cdf\u0ce2-\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d5f\u0d62-\u0e00\u0e2f\u0e31\u0e34-\u0e3f\u0e46-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eb1\u0eb4-\u0ebc\u0ebe-\u0ebf\u0ec5-\u0f3f\u0f48\u0f6a-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3006\u3008-\u3020\u302a-\u3040\u3095-\u30a0\u30fb-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]') # noqa
|
182 |
+
|
183 |
+
# Simpler things
|
184 |
+
nonPubidCharRegexp = re.compile("[^\x20\x0D\x0Aa-zA-Z0-9\\-'()+,./:=?;!*#@$_%]")
|
185 |
+
|
186 |
+
|
187 |
+
class InfosetFilter(object):
|
188 |
+
replacementRegexp = re.compile(r"U[\dA-F]{5,5}")
|
189 |
+
|
190 |
+
def __init__(self,
|
191 |
+
dropXmlnsLocalName=False,
|
192 |
+
dropXmlnsAttrNs=False,
|
193 |
+
preventDoubleDashComments=False,
|
194 |
+
preventDashAtCommentEnd=False,
|
195 |
+
replaceFormFeedCharacters=True,
|
196 |
+
preventSingleQuotePubid=False):
|
197 |
+
|
198 |
+
self.dropXmlnsLocalName = dropXmlnsLocalName
|
199 |
+
self.dropXmlnsAttrNs = dropXmlnsAttrNs
|
200 |
+
|
201 |
+
self.preventDoubleDashComments = preventDoubleDashComments
|
202 |
+
self.preventDashAtCommentEnd = preventDashAtCommentEnd
|
203 |
+
|
204 |
+
self.replaceFormFeedCharacters = replaceFormFeedCharacters
|
205 |
+
|
206 |
+
self.preventSingleQuotePubid = preventSingleQuotePubid
|
207 |
+
|
208 |
+
self.replaceCache = {}
|
209 |
+
|
210 |
+
def coerceAttribute(self, name, namespace=None):
|
211 |
+
if self.dropXmlnsLocalName and name.startswith("xmlns:"):
|
212 |
+
warnings.warn("Attributes cannot begin with xmlns", DataLossWarning)
|
213 |
+
return None
|
214 |
+
elif (self.dropXmlnsAttrNs and
|
215 |
+
namespace == "http://www.w3.org/2000/xmlns/"):
|
216 |
+
warnings.warn("Attributes cannot be in the xml namespace", DataLossWarning)
|
217 |
+
return None
|
218 |
+
else:
|
219 |
+
return self.toXmlName(name)
|
220 |
+
|
221 |
+
def coerceElement(self, name):
|
222 |
+
return self.toXmlName(name)
|
223 |
+
|
224 |
+
def coerceComment(self, data):
|
225 |
+
if self.preventDoubleDashComments:
|
226 |
+
while "--" in data:
|
227 |
+
warnings.warn("Comments cannot contain adjacent dashes", DataLossWarning)
|
228 |
+
data = data.replace("--", "- -")
|
229 |
+
if data.endswith("-"):
|
230 |
+
warnings.warn("Comments cannot end in a dash", DataLossWarning)
|
231 |
+
data += " "
|
232 |
+
return data
|
233 |
+
|
234 |
+
def coerceCharacters(self, data):
|
235 |
+
if self.replaceFormFeedCharacters:
|
236 |
+
for _ in range(data.count("\x0C")):
|
237 |
+
warnings.warn("Text cannot contain U+000C", DataLossWarning)
|
238 |
+
data = data.replace("\x0C", " ")
|
239 |
+
# Other non-xml characters
|
240 |
+
return data
|
241 |
+
|
242 |
+
def coercePubid(self, data):
|
243 |
+
dataOutput = data
|
244 |
+
for char in nonPubidCharRegexp.findall(data):
|
245 |
+
warnings.warn("Coercing non-XML pubid", DataLossWarning)
|
246 |
+
replacement = self.getReplacementCharacter(char)
|
247 |
+
dataOutput = dataOutput.replace(char, replacement)
|
248 |
+
if self.preventSingleQuotePubid and dataOutput.find("'") >= 0:
|
249 |
+
warnings.warn("Pubid cannot contain single quote", DataLossWarning)
|
250 |
+
dataOutput = dataOutput.replace("'", self.getReplacementCharacter("'"))
|
251 |
+
return dataOutput
|
252 |
+
|
253 |
+
def toXmlName(self, name):
|
254 |
+
nameFirst = name[0]
|
255 |
+
nameRest = name[1:]
|
256 |
+
m = nonXmlNameFirstBMPRegexp.match(nameFirst)
|
257 |
+
if m:
|
258 |
+
warnings.warn("Coercing non-XML name: %s" % name, DataLossWarning)
|
259 |
+
nameFirstOutput = self.getReplacementCharacter(nameFirst)
|
260 |
+
else:
|
261 |
+
nameFirstOutput = nameFirst
|
262 |
+
|
263 |
+
nameRestOutput = nameRest
|
264 |
+
replaceChars = set(nonXmlNameBMPRegexp.findall(nameRest))
|
265 |
+
for char in replaceChars:
|
266 |
+
warnings.warn("Coercing non-XML name: %s" % name, DataLossWarning)
|
267 |
+
replacement = self.getReplacementCharacter(char)
|
268 |
+
nameRestOutput = nameRestOutput.replace(char, replacement)
|
269 |
+
return nameFirstOutput + nameRestOutput
|
270 |
+
|
271 |
+
def getReplacementCharacter(self, char):
|
272 |
+
if char in self.replaceCache:
|
273 |
+
replacement = self.replaceCache[char]
|
274 |
+
else:
|
275 |
+
replacement = self.escapeChar(char)
|
276 |
+
return replacement
|
277 |
+
|
278 |
+
def fromXmlName(self, name):
|
279 |
+
for item in set(self.replacementRegexp.findall(name)):
|
280 |
+
name = name.replace(item, self.unescapeChar(item))
|
281 |
+
return name
|
282 |
+
|
283 |
+
def escapeChar(self, char):
|
284 |
+
replacement = "U%05X" % ord(char)
|
285 |
+
self.replaceCache[char] = replacement
|
286 |
+
return replacement
|
287 |
+
|
288 |
+
def unescapeChar(self, charcode):
|
289 |
+
return chr(int(charcode[1:], 16))
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/_inputstream.py
ADDED
@@ -0,0 +1,918 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import absolute_import, division, unicode_literals
|
2 |
+
|
3 |
+
from pip._vendor.six import text_type
|
4 |
+
from pip._vendor.six.moves import http_client, urllib
|
5 |
+
|
6 |
+
import codecs
|
7 |
+
import re
|
8 |
+
from io import BytesIO, StringIO
|
9 |
+
|
10 |
+
from pip._vendor import webencodings
|
11 |
+
|
12 |
+
from .constants import EOF, spaceCharacters, asciiLetters, asciiUppercase
|
13 |
+
from .constants import _ReparseException
|
14 |
+
from . import _utils
|
15 |
+
|
16 |
+
# Non-unicode versions of constants for use in the pre-parser
|
17 |
+
spaceCharactersBytes = frozenset([item.encode("ascii") for item in spaceCharacters])
|
18 |
+
asciiLettersBytes = frozenset([item.encode("ascii") for item in asciiLetters])
|
19 |
+
asciiUppercaseBytes = frozenset([item.encode("ascii") for item in asciiUppercase])
|
20 |
+
spacesAngleBrackets = spaceCharactersBytes | frozenset([b">", b"<"])
|
21 |
+
|
22 |
+
|
23 |
+
invalid_unicode_no_surrogate = "[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]" # noqa
|
24 |
+
|
25 |
+
if _utils.supports_lone_surrogates:
|
26 |
+
# Use one extra step of indirection and create surrogates with
|
27 |
+
# eval. Not using this indirection would introduce an illegal
|
28 |
+
# unicode literal on platforms not supporting such lone
|
29 |
+
# surrogates.
|
30 |
+
assert invalid_unicode_no_surrogate[-1] == "]" and invalid_unicode_no_surrogate.count("]") == 1
|
31 |
+
invalid_unicode_re = re.compile(invalid_unicode_no_surrogate[:-1] +
|
32 |
+
eval('"\\uD800-\\uDFFF"') + # pylint:disable=eval-used
|
33 |
+
"]")
|
34 |
+
else:
|
35 |
+
invalid_unicode_re = re.compile(invalid_unicode_no_surrogate)
|
36 |
+
|
37 |
+
non_bmp_invalid_codepoints = {0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
|
38 |
+
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF,
|
39 |
+
0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
|
40 |
+
0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF,
|
41 |
+
0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
|
42 |
+
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF,
|
43 |
+
0x10FFFE, 0x10FFFF}
|
44 |
+
|
45 |
+
ascii_punctuation_re = re.compile("[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005C\u005B-\u0060\u007B-\u007E]")
|
46 |
+
|
47 |
+
# Cache for charsUntil()
|
48 |
+
charsUntilRegEx = {}
|
49 |
+
|
50 |
+
|
51 |
+
class BufferedStream(object):
|
52 |
+
"""Buffering for streams that do not have buffering of their own
|
53 |
+
|
54 |
+
The buffer is implemented as a list of chunks on the assumption that
|
55 |
+
joining many strings will be slow since it is O(n**2)
|
56 |
+
"""
|
57 |
+
|
58 |
+
def __init__(self, stream):
|
59 |
+
self.stream = stream
|
60 |
+
self.buffer = []
|
61 |
+
self.position = [-1, 0] # chunk number, offset
|
62 |
+
|
63 |
+
def tell(self):
|
64 |
+
pos = 0
|
65 |
+
for chunk in self.buffer[:self.position[0]]:
|
66 |
+
pos += len(chunk)
|
67 |
+
pos += self.position[1]
|
68 |
+
return pos
|
69 |
+
|
70 |
+
def seek(self, pos):
|
71 |
+
assert pos <= self._bufferedBytes()
|
72 |
+
offset = pos
|
73 |
+
i = 0
|
74 |
+
while len(self.buffer[i]) < offset:
|
75 |
+
offset -= len(self.buffer[i])
|
76 |
+
i += 1
|
77 |
+
self.position = [i, offset]
|
78 |
+
|
79 |
+
def read(self, bytes):
|
80 |
+
if not self.buffer:
|
81 |
+
return self._readStream(bytes)
|
82 |
+
elif (self.position[0] == len(self.buffer) and
|
83 |
+
self.position[1] == len(self.buffer[-1])):
|
84 |
+
return self._readStream(bytes)
|
85 |
+
else:
|
86 |
+
return self._readFromBuffer(bytes)
|
87 |
+
|
88 |
+
def _bufferedBytes(self):
|
89 |
+
return sum([len(item) for item in self.buffer])
|
90 |
+
|
91 |
+
def _readStream(self, bytes):
|
92 |
+
data = self.stream.read(bytes)
|
93 |
+
self.buffer.append(data)
|
94 |
+
self.position[0] += 1
|
95 |
+
self.position[1] = len(data)
|
96 |
+
return data
|
97 |
+
|
98 |
+
def _readFromBuffer(self, bytes):
|
99 |
+
remainingBytes = bytes
|
100 |
+
rv = []
|
101 |
+
bufferIndex = self.position[0]
|
102 |
+
bufferOffset = self.position[1]
|
103 |
+
while bufferIndex < len(self.buffer) and remainingBytes != 0:
|
104 |
+
assert remainingBytes > 0
|
105 |
+
bufferedData = self.buffer[bufferIndex]
|
106 |
+
|
107 |
+
if remainingBytes <= len(bufferedData) - bufferOffset:
|
108 |
+
bytesToRead = remainingBytes
|
109 |
+
self.position = [bufferIndex, bufferOffset + bytesToRead]
|
110 |
+
else:
|
111 |
+
bytesToRead = len(bufferedData) - bufferOffset
|
112 |
+
self.position = [bufferIndex, len(bufferedData)]
|
113 |
+
bufferIndex += 1
|
114 |
+
rv.append(bufferedData[bufferOffset:bufferOffset + bytesToRead])
|
115 |
+
remainingBytes -= bytesToRead
|
116 |
+
|
117 |
+
bufferOffset = 0
|
118 |
+
|
119 |
+
if remainingBytes:
|
120 |
+
rv.append(self._readStream(remainingBytes))
|
121 |
+
|
122 |
+
return b"".join(rv)
|
123 |
+
|
124 |
+
|
125 |
+
def HTMLInputStream(source, **kwargs):
|
126 |
+
# Work around Python bug #20007: read(0) closes the connection.
|
127 |
+
# http://bugs.python.org/issue20007
|
128 |
+
if (isinstance(source, http_client.HTTPResponse) or
|
129 |
+
# Also check for addinfourl wrapping HTTPResponse
|
130 |
+
(isinstance(source, urllib.response.addbase) and
|
131 |
+
isinstance(source.fp, http_client.HTTPResponse))):
|
132 |
+
isUnicode = False
|
133 |
+
elif hasattr(source, "read"):
|
134 |
+
isUnicode = isinstance(source.read(0), text_type)
|
135 |
+
else:
|
136 |
+
isUnicode = isinstance(source, text_type)
|
137 |
+
|
138 |
+
if isUnicode:
|
139 |
+
encodings = [x for x in kwargs if x.endswith("_encoding")]
|
140 |
+
if encodings:
|
141 |
+
raise TypeError("Cannot set an encoding with a unicode input, set %r" % encodings)
|
142 |
+
|
143 |
+
return HTMLUnicodeInputStream(source, **kwargs)
|
144 |
+
else:
|
145 |
+
return HTMLBinaryInputStream(source, **kwargs)
|
146 |
+
|
147 |
+
|
148 |
+
class HTMLUnicodeInputStream(object):
|
149 |
+
"""Provides a unicode stream of characters to the HTMLTokenizer.
|
150 |
+
|
151 |
+
This class takes care of character encoding and removing or replacing
|
152 |
+
incorrect byte-sequences and also provides column and line tracking.
|
153 |
+
|
154 |
+
"""
|
155 |
+
|
156 |
+
_defaultChunkSize = 10240
|
157 |
+
|
158 |
+
def __init__(self, source):
|
159 |
+
"""Initialises the HTMLInputStream.
|
160 |
+
|
161 |
+
HTMLInputStream(source, [encoding]) -> Normalized stream from source
|
162 |
+
for use by html5lib.
|
163 |
+
|
164 |
+
source can be either a file-object, local filename or a string.
|
165 |
+
|
166 |
+
The optional encoding parameter must be a string that indicates
|
167 |
+
the encoding. If specified, that encoding will be used,
|
168 |
+
regardless of any BOM or later declaration (such as in a meta
|
169 |
+
element)
|
170 |
+
|
171 |
+
"""
|
172 |
+
|
173 |
+
if not _utils.supports_lone_surrogates:
|
174 |
+
# Such platforms will have already checked for such
|
175 |
+
# surrogate errors, so no need to do this checking.
|
176 |
+
self.reportCharacterErrors = None
|
177 |
+
elif len("\U0010FFFF") == 1:
|
178 |
+
self.reportCharacterErrors = self.characterErrorsUCS4
|
179 |
+
else:
|
180 |
+
self.reportCharacterErrors = self.characterErrorsUCS2
|
181 |
+
|
182 |
+
# List of where new lines occur
|
183 |
+
self.newLines = [0]
|
184 |
+
|
185 |
+
self.charEncoding = (lookupEncoding("utf-8"), "certain")
|
186 |
+
self.dataStream = self.openStream(source)
|
187 |
+
|
188 |
+
self.reset()
|
189 |
+
|
190 |
+
def reset(self):
|
191 |
+
self.chunk = ""
|
192 |
+
self.chunkSize = 0
|
193 |
+
self.chunkOffset = 0
|
194 |
+
self.errors = []
|
195 |
+
|
196 |
+
# number of (complete) lines in previous chunks
|
197 |
+
self.prevNumLines = 0
|
198 |
+
# number of columns in the last line of the previous chunk
|
199 |
+
self.prevNumCols = 0
|
200 |
+
|
201 |
+
# Deal with CR LF and surrogates split over chunk boundaries
|
202 |
+
self._bufferedCharacter = None
|
203 |
+
|
204 |
+
def openStream(self, source):
|
205 |
+
"""Produces a file object from source.
|
206 |
+
|
207 |
+
source can be either a file object, local filename or a string.
|
208 |
+
|
209 |
+
"""
|
210 |
+
# Already a file object
|
211 |
+
if hasattr(source, 'read'):
|
212 |
+
stream = source
|
213 |
+
else:
|
214 |
+
stream = StringIO(source)
|
215 |
+
|
216 |
+
return stream
|
217 |
+
|
218 |
+
def _position(self, offset):
|
219 |
+
chunk = self.chunk
|
220 |
+
nLines = chunk.count('\n', 0, offset)
|
221 |
+
positionLine = self.prevNumLines + nLines
|
222 |
+
lastLinePos = chunk.rfind('\n', 0, offset)
|
223 |
+
if lastLinePos == -1:
|
224 |
+
positionColumn = self.prevNumCols + offset
|
225 |
+
else:
|
226 |
+
positionColumn = offset - (lastLinePos + 1)
|
227 |
+
return (positionLine, positionColumn)
|
228 |
+
|
229 |
+
def position(self):
|
230 |
+
"""Returns (line, col) of the current position in the stream."""
|
231 |
+
line, col = self._position(self.chunkOffset)
|
232 |
+
return (line + 1, col)
|
233 |
+
|
234 |
+
def char(self):
|
235 |
+
""" Read one character from the stream or queue if available. Return
|
236 |
+
EOF when EOF is reached.
|
237 |
+
"""
|
238 |
+
# Read a new chunk from the input stream if necessary
|
239 |
+
if self.chunkOffset >= self.chunkSize:
|
240 |
+
if not self.readChunk():
|
241 |
+
return EOF
|
242 |
+
|
243 |
+
chunkOffset = self.chunkOffset
|
244 |
+
char = self.chunk[chunkOffset]
|
245 |
+
self.chunkOffset = chunkOffset + 1
|
246 |
+
|
247 |
+
return char
|
248 |
+
|
249 |
+
def readChunk(self, chunkSize=None):
|
250 |
+
if chunkSize is None:
|
251 |
+
chunkSize = self._defaultChunkSize
|
252 |
+
|
253 |
+
self.prevNumLines, self.prevNumCols = self._position(self.chunkSize)
|
254 |
+
|
255 |
+
self.chunk = ""
|
256 |
+
self.chunkSize = 0
|
257 |
+
self.chunkOffset = 0
|
258 |
+
|
259 |
+
data = self.dataStream.read(chunkSize)
|
260 |
+
|
261 |
+
# Deal with CR LF and surrogates broken across chunks
|
262 |
+
if self._bufferedCharacter:
|
263 |
+
data = self._bufferedCharacter + data
|
264 |
+
self._bufferedCharacter = None
|
265 |
+
elif not data:
|
266 |
+
# We have no more data, bye-bye stream
|
267 |
+
return False
|
268 |
+
|
269 |
+
if len(data) > 1:
|
270 |
+
lastv = ord(data[-1])
|
271 |
+
if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF:
|
272 |
+
self._bufferedCharacter = data[-1]
|
273 |
+
data = data[:-1]
|
274 |
+
|
275 |
+
if self.reportCharacterErrors:
|
276 |
+
self.reportCharacterErrors(data)
|
277 |
+
|
278 |
+
# Replace invalid characters
|
279 |
+
data = data.replace("\r\n", "\n")
|
280 |
+
data = data.replace("\r", "\n")
|
281 |
+
|
282 |
+
self.chunk = data
|
283 |
+
self.chunkSize = len(data)
|
284 |
+
|
285 |
+
return True
|
286 |
+
|
287 |
+
def characterErrorsUCS4(self, data):
|
288 |
+
for _ in range(len(invalid_unicode_re.findall(data))):
|
289 |
+
self.errors.append("invalid-codepoint")
|
290 |
+
|
291 |
+
def characterErrorsUCS2(self, data):
|
292 |
+
# Someone picked the wrong compile option
|
293 |
+
# You lose
|
294 |
+
skip = False
|
295 |
+
for match in invalid_unicode_re.finditer(data):
|
296 |
+
if skip:
|
297 |
+
continue
|
298 |
+
codepoint = ord(match.group())
|
299 |
+
pos = match.start()
|
300 |
+
# Pretty sure there should be endianness issues here
|
301 |
+
if _utils.isSurrogatePair(data[pos:pos + 2]):
|
302 |
+
# We have a surrogate pair!
|
303 |
+
char_val = _utils.surrogatePairToCodepoint(data[pos:pos + 2])
|
304 |
+
if char_val in non_bmp_invalid_codepoints:
|
305 |
+
self.errors.append("invalid-codepoint")
|
306 |
+
skip = True
|
307 |
+
elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and
|
308 |
+
pos == len(data) - 1):
|
309 |
+
self.errors.append("invalid-codepoint")
|
310 |
+
else:
|
311 |
+
skip = False
|
312 |
+
self.errors.append("invalid-codepoint")
|
313 |
+
|
314 |
+
def charsUntil(self, characters, opposite=False):
|
315 |
+
""" Returns a string of characters from the stream up to but not
|
316 |
+
including any character in 'characters' or EOF. 'characters' must be
|
317 |
+
a container that supports the 'in' method and iteration over its
|
318 |
+
characters.
|
319 |
+
"""
|
320 |
+
|
321 |
+
# Use a cache of regexps to find the required characters
|
322 |
+
try:
|
323 |
+
chars = charsUntilRegEx[(characters, opposite)]
|
324 |
+
except KeyError:
|
325 |
+
if __debug__:
|
326 |
+
for c in characters:
|
327 |
+
assert(ord(c) < 128)
|
328 |
+
regex = "".join(["\\x%02x" % ord(c) for c in characters])
|
329 |
+
if not opposite:
|
330 |
+
regex = "^%s" % regex
|
331 |
+
chars = charsUntilRegEx[(characters, opposite)] = re.compile("[%s]+" % regex)
|
332 |
+
|
333 |
+
rv = []
|
334 |
+
|
335 |
+
while True:
|
336 |
+
# Find the longest matching prefix
|
337 |
+
m = chars.match(self.chunk, self.chunkOffset)
|
338 |
+
if m is None:
|
339 |
+
# If nothing matched, and it wasn't because we ran out of chunk,
|
340 |
+
# then stop
|
341 |
+
if self.chunkOffset != self.chunkSize:
|
342 |
+
break
|
343 |
+
else:
|
344 |
+
end = m.end()
|
345 |
+
# If not the whole chunk matched, return everything
|
346 |
+
# up to the part that didn't match
|
347 |
+
if end != self.chunkSize:
|
348 |
+
rv.append(self.chunk[self.chunkOffset:end])
|
349 |
+
self.chunkOffset = end
|
350 |
+
break
|
351 |
+
# If the whole remainder of the chunk matched,
|
352 |
+
# use it all and read the next chunk
|
353 |
+
rv.append(self.chunk[self.chunkOffset:])
|
354 |
+
if not self.readChunk():
|
355 |
+
# Reached EOF
|
356 |
+
break
|
357 |
+
|
358 |
+
r = "".join(rv)
|
359 |
+
return r
|
360 |
+
|
361 |
+
def unget(self, char):
|
362 |
+
# Only one character is allowed to be ungotten at once - it must
|
363 |
+
# be consumed again before any further call to unget
|
364 |
+
if char is not EOF:
|
365 |
+
if self.chunkOffset == 0:
|
366 |
+
# unget is called quite rarely, so it's a good idea to do
|
367 |
+
# more work here if it saves a bit of work in the frequently
|
368 |
+
# called char and charsUntil.
|
369 |
+
# So, just prepend the ungotten character onto the current
|
370 |
+
# chunk:
|
371 |
+
self.chunk = char + self.chunk
|
372 |
+
self.chunkSize += 1
|
373 |
+
else:
|
374 |
+
self.chunkOffset -= 1
|
375 |
+
assert self.chunk[self.chunkOffset] == char
|
376 |
+
|
377 |
+
|
378 |
+
class HTMLBinaryInputStream(HTMLUnicodeInputStream):
|
379 |
+
"""Provides a unicode stream of characters to the HTMLTokenizer.
|
380 |
+
|
381 |
+
This class takes care of character encoding and removing or replacing
|
382 |
+
incorrect byte-sequences and also provides column and line tracking.
|
383 |
+
|
384 |
+
"""
|
385 |
+
|
386 |
+
def __init__(self, source, override_encoding=None, transport_encoding=None,
|
387 |
+
same_origin_parent_encoding=None, likely_encoding=None,
|
388 |
+
default_encoding="windows-1252", useChardet=True):
|
389 |
+
"""Initialises the HTMLInputStream.
|
390 |
+
|
391 |
+
HTMLInputStream(source, [encoding]) -> Normalized stream from source
|
392 |
+
for use by html5lib.
|
393 |
+
|
394 |
+
source can be either a file-object, local filename or a string.
|
395 |
+
|
396 |
+
The optional encoding parameter must be a string that indicates
|
397 |
+
the encoding. If specified, that encoding will be used,
|
398 |
+
regardless of any BOM or later declaration (such as in a meta
|
399 |
+
element)
|
400 |
+
|
401 |
+
"""
|
402 |
+
# Raw Stream - for unicode objects this will encode to utf-8 and set
|
403 |
+
# self.charEncoding as appropriate
|
404 |
+
self.rawStream = self.openStream(source)
|
405 |
+
|
406 |
+
HTMLUnicodeInputStream.__init__(self, self.rawStream)
|
407 |
+
|
408 |
+
# Encoding Information
|
409 |
+
# Number of bytes to use when looking for a meta element with
|
410 |
+
# encoding information
|
411 |
+
self.numBytesMeta = 1024
|
412 |
+
# Number of bytes to use when using detecting encoding using chardet
|
413 |
+
self.numBytesChardet = 100
|
414 |
+
# Things from args
|
415 |
+
self.override_encoding = override_encoding
|
416 |
+
self.transport_encoding = transport_encoding
|
417 |
+
self.same_origin_parent_encoding = same_origin_parent_encoding
|
418 |
+
self.likely_encoding = likely_encoding
|
419 |
+
self.default_encoding = default_encoding
|
420 |
+
|
421 |
+
# Determine encoding
|
422 |
+
self.charEncoding = self.determineEncoding(useChardet)
|
423 |
+
assert self.charEncoding[0] is not None
|
424 |
+
|
425 |
+
# Call superclass
|
426 |
+
self.reset()
|
427 |
+
|
428 |
+
def reset(self):
|
429 |
+
self.dataStream = self.charEncoding[0].codec_info.streamreader(self.rawStream, 'replace')
|
430 |
+
HTMLUnicodeInputStream.reset(self)
|
431 |
+
|
432 |
+
def openStream(self, source):
|
433 |
+
"""Produces a file object from source.
|
434 |
+
|
435 |
+
source can be either a file object, local filename or a string.
|
436 |
+
|
437 |
+
"""
|
438 |
+
# Already a file object
|
439 |
+
if hasattr(source, 'read'):
|
440 |
+
stream = source
|
441 |
+
else:
|
442 |
+
stream = BytesIO(source)
|
443 |
+
|
444 |
+
try:
|
445 |
+
stream.seek(stream.tell())
|
446 |
+
except Exception:
|
447 |
+
stream = BufferedStream(stream)
|
448 |
+
|
449 |
+
return stream
|
450 |
+
|
451 |
+
def determineEncoding(self, chardet=True):
|
452 |
+
# BOMs take precedence over everything
|
453 |
+
# This will also read past the BOM if present
|
454 |
+
charEncoding = self.detectBOM(), "certain"
|
455 |
+
if charEncoding[0] is not None:
|
456 |
+
return charEncoding
|
457 |
+
|
458 |
+
# If we've been overridden, we've been overridden
|
459 |
+
charEncoding = lookupEncoding(self.override_encoding), "certain"
|
460 |
+
if charEncoding[0] is not None:
|
461 |
+
return charEncoding
|
462 |
+
|
463 |
+
# Now check the transport layer
|
464 |
+
charEncoding = lookupEncoding(self.transport_encoding), "certain"
|
465 |
+
if charEncoding[0] is not None:
|
466 |
+
return charEncoding
|
467 |
+
|
468 |
+
# Look for meta elements with encoding information
|
469 |
+
charEncoding = self.detectEncodingMeta(), "tentative"
|
470 |
+
if charEncoding[0] is not None:
|
471 |
+
return charEncoding
|
472 |
+
|
473 |
+
# Parent document encoding
|
474 |
+
charEncoding = lookupEncoding(self.same_origin_parent_encoding), "tentative"
|
475 |
+
if charEncoding[0] is not None and not charEncoding[0].name.startswith("utf-16"):
|
476 |
+
return charEncoding
|
477 |
+
|
478 |
+
# "likely" encoding
|
479 |
+
charEncoding = lookupEncoding(self.likely_encoding), "tentative"
|
480 |
+
if charEncoding[0] is not None:
|
481 |
+
return charEncoding
|
482 |
+
|
483 |
+
# Guess with chardet, if available
|
484 |
+
if chardet:
|
485 |
+
try:
|
486 |
+
from pip._vendor.chardet.universaldetector import UniversalDetector
|
487 |
+
except ImportError:
|
488 |
+
pass
|
489 |
+
else:
|
490 |
+
buffers = []
|
491 |
+
detector = UniversalDetector()
|
492 |
+
while not detector.done:
|
493 |
+
buffer = self.rawStream.read(self.numBytesChardet)
|
494 |
+
assert isinstance(buffer, bytes)
|
495 |
+
if not buffer:
|
496 |
+
break
|
497 |
+
buffers.append(buffer)
|
498 |
+
detector.feed(buffer)
|
499 |
+
detector.close()
|
500 |
+
encoding = lookupEncoding(detector.result['encoding'])
|
501 |
+
self.rawStream.seek(0)
|
502 |
+
if encoding is not None:
|
503 |
+
return encoding, "tentative"
|
504 |
+
|
505 |
+
# Try the default encoding
|
506 |
+
charEncoding = lookupEncoding(self.default_encoding), "tentative"
|
507 |
+
if charEncoding[0] is not None:
|
508 |
+
return charEncoding
|
509 |
+
|
510 |
+
# Fallback to html5lib's default if even that hasn't worked
|
511 |
+
return lookupEncoding("windows-1252"), "tentative"
|
512 |
+
|
513 |
+
def changeEncoding(self, newEncoding):
|
514 |
+
assert self.charEncoding[1] != "certain"
|
515 |
+
newEncoding = lookupEncoding(newEncoding)
|
516 |
+
if newEncoding is None:
|
517 |
+
return
|
518 |
+
if newEncoding.name in ("utf-16be", "utf-16le"):
|
519 |
+
newEncoding = lookupEncoding("utf-8")
|
520 |
+
assert newEncoding is not None
|
521 |
+
elif newEncoding == self.charEncoding[0]:
|
522 |
+
self.charEncoding = (self.charEncoding[0], "certain")
|
523 |
+
else:
|
524 |
+
self.rawStream.seek(0)
|
525 |
+
self.charEncoding = (newEncoding, "certain")
|
526 |
+
self.reset()
|
527 |
+
raise _ReparseException("Encoding changed from %s to %s" % (self.charEncoding[0], newEncoding))
|
528 |
+
|
529 |
+
def detectBOM(self):
|
530 |
+
"""Attempts to detect at BOM at the start of the stream. If
|
531 |
+
an encoding can be determined from the BOM return the name of the
|
532 |
+
encoding otherwise return None"""
|
533 |
+
bomDict = {
|
534 |
+
codecs.BOM_UTF8: 'utf-8',
|
535 |
+
codecs.BOM_UTF16_LE: 'utf-16le', codecs.BOM_UTF16_BE: 'utf-16be',
|
536 |
+
codecs.BOM_UTF32_LE: 'utf-32le', codecs.BOM_UTF32_BE: 'utf-32be'
|
537 |
+
}
|
538 |
+
|
539 |
+
# Go to beginning of file and read in 4 bytes
|
540 |
+
string = self.rawStream.read(4)
|
541 |
+
assert isinstance(string, bytes)
|
542 |
+
|
543 |
+
# Try detecting the BOM using bytes from the string
|
544 |
+
encoding = bomDict.get(string[:3]) # UTF-8
|
545 |
+
seek = 3
|
546 |
+
if not encoding:
|
547 |
+
# Need to detect UTF-32 before UTF-16
|
548 |
+
encoding = bomDict.get(string) # UTF-32
|
549 |
+
seek = 4
|
550 |
+
if not encoding:
|
551 |
+
encoding = bomDict.get(string[:2]) # UTF-16
|
552 |
+
seek = 2
|
553 |
+
|
554 |
+
# Set the read position past the BOM if one was found, otherwise
|
555 |
+
# set it to the start of the stream
|
556 |
+
if encoding:
|
557 |
+
self.rawStream.seek(seek)
|
558 |
+
return lookupEncoding(encoding)
|
559 |
+
else:
|
560 |
+
self.rawStream.seek(0)
|
561 |
+
return None
|
562 |
+
|
563 |
+
def detectEncodingMeta(self):
|
564 |
+
"""Report the encoding declared by the meta element
|
565 |
+
"""
|
566 |
+
buffer = self.rawStream.read(self.numBytesMeta)
|
567 |
+
assert isinstance(buffer, bytes)
|
568 |
+
parser = EncodingParser(buffer)
|
569 |
+
self.rawStream.seek(0)
|
570 |
+
encoding = parser.getEncoding()
|
571 |
+
|
572 |
+
if encoding is not None and encoding.name in ("utf-16be", "utf-16le"):
|
573 |
+
encoding = lookupEncoding("utf-8")
|
574 |
+
|
575 |
+
return encoding
|
576 |
+
|
577 |
+
|
578 |
+
class EncodingBytes(bytes):
|
579 |
+
"""String-like object with an associated position and various extra methods
|
580 |
+
If the position is ever greater than the string length then an exception is
|
581 |
+
raised"""
|
582 |
+
def __new__(self, value):
|
583 |
+
assert isinstance(value, bytes)
|
584 |
+
return bytes.__new__(self, value.lower())
|
585 |
+
|
586 |
+
def __init__(self, value):
|
587 |
+
# pylint:disable=unused-argument
|
588 |
+
self._position = -1
|
589 |
+
|
590 |
+
def __iter__(self):
|
591 |
+
return self
|
592 |
+
|
593 |
+
def __next__(self):
|
594 |
+
p = self._position = self._position + 1
|
595 |
+
if p >= len(self):
|
596 |
+
raise StopIteration
|
597 |
+
elif p < 0:
|
598 |
+
raise TypeError
|
599 |
+
return self[p:p + 1]
|
600 |
+
|
601 |
+
def next(self):
|
602 |
+
# Py2 compat
|
603 |
+
return self.__next__()
|
604 |
+
|
605 |
+
def previous(self):
|
606 |
+
p = self._position
|
607 |
+
if p >= len(self):
|
608 |
+
raise StopIteration
|
609 |
+
elif p < 0:
|
610 |
+
raise TypeError
|
611 |
+
self._position = p = p - 1
|
612 |
+
return self[p:p + 1]
|
613 |
+
|
614 |
+
def setPosition(self, position):
|
615 |
+
if self._position >= len(self):
|
616 |
+
raise StopIteration
|
617 |
+
self._position = position
|
618 |
+
|
619 |
+
def getPosition(self):
|
620 |
+
if self._position >= len(self):
|
621 |
+
raise StopIteration
|
622 |
+
if self._position >= 0:
|
623 |
+
return self._position
|
624 |
+
else:
|
625 |
+
return None
|
626 |
+
|
627 |
+
position = property(getPosition, setPosition)
|
628 |
+
|
629 |
+
def getCurrentByte(self):
|
630 |
+
return self[self.position:self.position + 1]
|
631 |
+
|
632 |
+
currentByte = property(getCurrentByte)
|
633 |
+
|
634 |
+
def skip(self, chars=spaceCharactersBytes):
|
635 |
+
"""Skip past a list of characters"""
|
636 |
+
p = self.position # use property for the error-checking
|
637 |
+
while p < len(self):
|
638 |
+
c = self[p:p + 1]
|
639 |
+
if c not in chars:
|
640 |
+
self._position = p
|
641 |
+
return c
|
642 |
+
p += 1
|
643 |
+
self._position = p
|
644 |
+
return None
|
645 |
+
|
646 |
+
def skipUntil(self, chars):
|
647 |
+
p = self.position
|
648 |
+
while p < len(self):
|
649 |
+
c = self[p:p + 1]
|
650 |
+
if c in chars:
|
651 |
+
self._position = p
|
652 |
+
return c
|
653 |
+
p += 1
|
654 |
+
self._position = p
|
655 |
+
return None
|
656 |
+
|
657 |
+
def matchBytes(self, bytes):
|
658 |
+
"""Look for a sequence of bytes at the start of a string. If the bytes
|
659 |
+
are found return True and advance the position to the byte after the
|
660 |
+
match. Otherwise return False and leave the position alone"""
|
661 |
+
rv = self.startswith(bytes, self.position)
|
662 |
+
if rv:
|
663 |
+
self.position += len(bytes)
|
664 |
+
return rv
|
665 |
+
|
666 |
+
def jumpTo(self, bytes):
|
667 |
+
"""Look for the next sequence of bytes matching a given sequence. If
|
668 |
+
a match is found advance the position to the last byte of the match"""
|
669 |
+
try:
|
670 |
+
self._position = self.index(bytes, self.position) + len(bytes) - 1
|
671 |
+
except ValueError:
|
672 |
+
raise StopIteration
|
673 |
+
return True
|
674 |
+
|
675 |
+
|
676 |
+
class EncodingParser(object):
|
677 |
+
"""Mini parser for detecting character encoding from meta elements"""
|
678 |
+
|
679 |
+
def __init__(self, data):
|
680 |
+
"""string - the data to work on for encoding detection"""
|
681 |
+
self.data = EncodingBytes(data)
|
682 |
+
self.encoding = None
|
683 |
+
|
684 |
+
def getEncoding(self):
|
685 |
+
if b"<meta" not in self.data:
|
686 |
+
return None
|
687 |
+
|
688 |
+
methodDispatch = (
|
689 |
+
(b"<!--", self.handleComment),
|
690 |
+
(b"<meta", self.handleMeta),
|
691 |
+
(b"</", self.handlePossibleEndTag),
|
692 |
+
(b"<!", self.handleOther),
|
693 |
+
(b"<?", self.handleOther),
|
694 |
+
(b"<", self.handlePossibleStartTag))
|
695 |
+
for _ in self.data:
|
696 |
+
keepParsing = True
|
697 |
+
try:
|
698 |
+
self.data.jumpTo(b"<")
|
699 |
+
except StopIteration:
|
700 |
+
break
|
701 |
+
for key, method in methodDispatch:
|
702 |
+
if self.data.matchBytes(key):
|
703 |
+
try:
|
704 |
+
keepParsing = method()
|
705 |
+
break
|
706 |
+
except StopIteration:
|
707 |
+
keepParsing = False
|
708 |
+
break
|
709 |
+
if not keepParsing:
|
710 |
+
break
|
711 |
+
|
712 |
+
return self.encoding
|
713 |
+
|
714 |
+
def handleComment(self):
|
715 |
+
"""Skip over comments"""
|
716 |
+
return self.data.jumpTo(b"-->")
|
717 |
+
|
718 |
+
def handleMeta(self):
|
719 |
+
if self.data.currentByte not in spaceCharactersBytes:
|
720 |
+
# if we have <meta not followed by a space so just keep going
|
721 |
+
return True
|
722 |
+
# We have a valid meta element we want to search for attributes
|
723 |
+
hasPragma = False
|
724 |
+
pendingEncoding = None
|
725 |
+
while True:
|
726 |
+
# Try to find the next attribute after the current position
|
727 |
+
attr = self.getAttribute()
|
728 |
+
if attr is None:
|
729 |
+
return True
|
730 |
+
else:
|
731 |
+
if attr[0] == b"http-equiv":
|
732 |
+
hasPragma = attr[1] == b"content-type"
|
733 |
+
if hasPragma and pendingEncoding is not None:
|
734 |
+
self.encoding = pendingEncoding
|
735 |
+
return False
|
736 |
+
elif attr[0] == b"charset":
|
737 |
+
tentativeEncoding = attr[1]
|
738 |
+
codec = lookupEncoding(tentativeEncoding)
|
739 |
+
if codec is not None:
|
740 |
+
self.encoding = codec
|
741 |
+
return False
|
742 |
+
elif attr[0] == b"content":
|
743 |
+
contentParser = ContentAttrParser(EncodingBytes(attr[1]))
|
744 |
+
tentativeEncoding = contentParser.parse()
|
745 |
+
if tentativeEncoding is not None:
|
746 |
+
codec = lookupEncoding(tentativeEncoding)
|
747 |
+
if codec is not None:
|
748 |
+
if hasPragma:
|
749 |
+
self.encoding = codec
|
750 |
+
return False
|
751 |
+
else:
|
752 |
+
pendingEncoding = codec
|
753 |
+
|
754 |
+
def handlePossibleStartTag(self):
|
755 |
+
return self.handlePossibleTag(False)
|
756 |
+
|
757 |
+
def handlePossibleEndTag(self):
|
758 |
+
next(self.data)
|
759 |
+
return self.handlePossibleTag(True)
|
760 |
+
|
761 |
+
def handlePossibleTag(self, endTag):
|
762 |
+
data = self.data
|
763 |
+
if data.currentByte not in asciiLettersBytes:
|
764 |
+
# If the next byte is not an ascii letter either ignore this
|
765 |
+
# fragment (possible start tag case) or treat it according to
|
766 |
+
# handleOther
|
767 |
+
if endTag:
|
768 |
+
data.previous()
|
769 |
+
self.handleOther()
|
770 |
+
return True
|
771 |
+
|
772 |
+
c = data.skipUntil(spacesAngleBrackets)
|
773 |
+
if c == b"<":
|
774 |
+
# return to the first step in the overall "two step" algorithm
|
775 |
+
# reprocessing the < byte
|
776 |
+
data.previous()
|
777 |
+
else:
|
778 |
+
# Read all attributes
|
779 |
+
attr = self.getAttribute()
|
780 |
+
while attr is not None:
|
781 |
+
attr = self.getAttribute()
|
782 |
+
return True
|
783 |
+
|
784 |
+
def handleOther(self):
|
785 |
+
return self.data.jumpTo(b">")
|
786 |
+
|
787 |
+
def getAttribute(self):
|
788 |
+
"""Return a name,value pair for the next attribute in the stream,
|
789 |
+
if one is found, or None"""
|
790 |
+
data = self.data
|
791 |
+
# Step 1 (skip chars)
|
792 |
+
c = data.skip(spaceCharactersBytes | frozenset([b"/"]))
|
793 |
+
assert c is None or len(c) == 1
|
794 |
+
# Step 2
|
795 |
+
if c in (b">", None):
|
796 |
+
return None
|
797 |
+
# Step 3
|
798 |
+
attrName = []
|
799 |
+
attrValue = []
|
800 |
+
# Step 4 attribute name
|
801 |
+
while True:
|
802 |
+
if c == b"=" and attrName:
|
803 |
+
break
|
804 |
+
elif c in spaceCharactersBytes:
|
805 |
+
# Step 6!
|
806 |
+
c = data.skip()
|
807 |
+
break
|
808 |
+
elif c in (b"/", b">"):
|
809 |
+
return b"".join(attrName), b""
|
810 |
+
elif c in asciiUppercaseBytes:
|
811 |
+
attrName.append(c.lower())
|
812 |
+
elif c is None:
|
813 |
+
return None
|
814 |
+
else:
|
815 |
+
attrName.append(c)
|
816 |
+
# Step 5
|
817 |
+
c = next(data)
|
818 |
+
# Step 7
|
819 |
+
if c != b"=":
|
820 |
+
data.previous()
|
821 |
+
return b"".join(attrName), b""
|
822 |
+
# Step 8
|
823 |
+
next(data)
|
824 |
+
# Step 9
|
825 |
+
c = data.skip()
|
826 |
+
# Step 10
|
827 |
+
if c in (b"'", b'"'):
|
828 |
+
# 10.1
|
829 |
+
quoteChar = c
|
830 |
+
while True:
|
831 |
+
# 10.2
|
832 |
+
c = next(data)
|
833 |
+
# 10.3
|
834 |
+
if c == quoteChar:
|
835 |
+
next(data)
|
836 |
+
return b"".join(attrName), b"".join(attrValue)
|
837 |
+
# 10.4
|
838 |
+
elif c in asciiUppercaseBytes:
|
839 |
+
attrValue.append(c.lower())
|
840 |
+
# 10.5
|
841 |
+
else:
|
842 |
+
attrValue.append(c)
|
843 |
+
elif c == b">":
|
844 |
+
return b"".join(attrName), b""
|
845 |
+
elif c in asciiUppercaseBytes:
|
846 |
+
attrValue.append(c.lower())
|
847 |
+
elif c is None:
|
848 |
+
return None
|
849 |
+
else:
|
850 |
+
attrValue.append(c)
|
851 |
+
# Step 11
|
852 |
+
while True:
|
853 |
+
c = next(data)
|
854 |
+
if c in spacesAngleBrackets:
|
855 |
+
return b"".join(attrName), b"".join(attrValue)
|
856 |
+
elif c in asciiUppercaseBytes:
|
857 |
+
attrValue.append(c.lower())
|
858 |
+
elif c is None:
|
859 |
+
return None
|
860 |
+
else:
|
861 |
+
attrValue.append(c)
|
862 |
+
|
863 |
+
|
864 |
+
class ContentAttrParser(object):
|
865 |
+
def __init__(self, data):
|
866 |
+
assert isinstance(data, bytes)
|
867 |
+
self.data = data
|
868 |
+
|
869 |
+
def parse(self):
|
870 |
+
try:
|
871 |
+
# Check if the attr name is charset
|
872 |
+
# otherwise return
|
873 |
+
self.data.jumpTo(b"charset")
|
874 |
+
self.data.position += 1
|
875 |
+
self.data.skip()
|
876 |
+
if not self.data.currentByte == b"=":
|
877 |
+
# If there is no = sign keep looking for attrs
|
878 |
+
return None
|
879 |
+
self.data.position += 1
|
880 |
+
self.data.skip()
|
881 |
+
# Look for an encoding between matching quote marks
|
882 |
+
if self.data.currentByte in (b'"', b"'"):
|
883 |
+
quoteMark = self.data.currentByte
|
884 |
+
self.data.position += 1
|
885 |
+
oldPosition = self.data.position
|
886 |
+
if self.data.jumpTo(quoteMark):
|
887 |
+
return self.data[oldPosition:self.data.position]
|
888 |
+
else:
|
889 |
+
return None
|
890 |
+
else:
|
891 |
+
# Unquoted value
|
892 |
+
oldPosition = self.data.position
|
893 |
+
try:
|
894 |
+
self.data.skipUntil(spaceCharactersBytes)
|
895 |
+
return self.data[oldPosition:self.data.position]
|
896 |
+
except StopIteration:
|
897 |
+
# Return the whole remaining value
|
898 |
+
return self.data[oldPosition:]
|
899 |
+
except StopIteration:
|
900 |
+
return None
|
901 |
+
|
902 |
+
|
903 |
+
def lookupEncoding(encoding):
|
904 |
+
"""Return the python codec name corresponding to an encoding or None if the
|
905 |
+
string doesn't correspond to a valid encoding."""
|
906 |
+
if isinstance(encoding, bytes):
|
907 |
+
try:
|
908 |
+
encoding = encoding.decode("ascii")
|
909 |
+
except UnicodeDecodeError:
|
910 |
+
return None
|
911 |
+
|
912 |
+
if encoding is not None:
|
913 |
+
try:
|
914 |
+
return webencodings.lookup(encoding)
|
915 |
+
except AttributeError:
|
916 |
+
return None
|
917 |
+
else:
|
918 |
+
return None
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/_tokenizer.py
ADDED
@@ -0,0 +1,1735 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import absolute_import, division, unicode_literals
|
2 |
+
|
3 |
+
from pip._vendor.six import unichr as chr
|
4 |
+
|
5 |
+
from collections import deque, OrderedDict
|
6 |
+
from sys import version_info
|
7 |
+
|
8 |
+
from .constants import spaceCharacters
|
9 |
+
from .constants import entities
|
10 |
+
from .constants import asciiLetters, asciiUpper2Lower
|
11 |
+
from .constants import digits, hexDigits, EOF
|
12 |
+
from .constants import tokenTypes, tagTokenTypes
|
13 |
+
from .constants import replacementCharacters
|
14 |
+
|
15 |
+
from ._inputstream import HTMLInputStream
|
16 |
+
|
17 |
+
from ._trie import Trie
|
18 |
+
|
19 |
+
entitiesTrie = Trie(entities)
|
20 |
+
|
21 |
+
if version_info >= (3, 7):
|
22 |
+
attributeMap = dict
|
23 |
+
else:
|
24 |
+
attributeMap = OrderedDict
|
25 |
+
|
26 |
+
|
27 |
+
class HTMLTokenizer(object):
|
28 |
+
""" This class takes care of tokenizing HTML.
|
29 |
+
|
30 |
+
* self.currentToken
|
31 |
+
Holds the token that is currently being processed.
|
32 |
+
|
33 |
+
* self.state
|
34 |
+
Holds a reference to the method to be invoked... XXX
|
35 |
+
|
36 |
+
* self.stream
|
37 |
+
Points to HTMLInputStream object.
|
38 |
+
"""
|
39 |
+
|
40 |
+
def __init__(self, stream, parser=None, **kwargs):
|
41 |
+
|
42 |
+
self.stream = HTMLInputStream(stream, **kwargs)
|
43 |
+
self.parser = parser
|
44 |
+
|
45 |
+
# Setup the initial tokenizer state
|
46 |
+
self.escapeFlag = False
|
47 |
+
self.lastFourChars = []
|
48 |
+
self.state = self.dataState
|
49 |
+
self.escape = False
|
50 |
+
|
51 |
+
# The current token being created
|
52 |
+
self.currentToken = None
|
53 |
+
super(HTMLTokenizer, self).__init__()
|
54 |
+
|
55 |
+
def __iter__(self):
|
56 |
+
""" This is where the magic happens.
|
57 |
+
|
58 |
+
We do our usually processing through the states and when we have a token
|
59 |
+
to return we yield the token which pauses processing until the next token
|
60 |
+
is requested.
|
61 |
+
"""
|
62 |
+
self.tokenQueue = deque([])
|
63 |
+
# Start processing. When EOF is reached self.state will return False
|
64 |
+
# instead of True and the loop will terminate.
|
65 |
+
while self.state():
|
66 |
+
while self.stream.errors:
|
67 |
+
yield {"type": tokenTypes["ParseError"], "data": self.stream.errors.pop(0)}
|
68 |
+
while self.tokenQueue:
|
69 |
+
yield self.tokenQueue.popleft()
|
70 |
+
|
71 |
+
def consumeNumberEntity(self, isHex):
|
72 |
+
"""This function returns either U+FFFD or the character based on the
|
73 |
+
decimal or hexadecimal representation. It also discards ";" if present.
|
74 |
+
If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked.
|
75 |
+
"""
|
76 |
+
|
77 |
+
allowed = digits
|
78 |
+
radix = 10
|
79 |
+
if isHex:
|
80 |
+
allowed = hexDigits
|
81 |
+
radix = 16
|
82 |
+
|
83 |
+
charStack = []
|
84 |
+
|
85 |
+
# Consume all the characters that are in range while making sure we
|
86 |
+
# don't hit an EOF.
|
87 |
+
c = self.stream.char()
|
88 |
+
while c in allowed and c is not EOF:
|
89 |
+
charStack.append(c)
|
90 |
+
c = self.stream.char()
|
91 |
+
|
92 |
+
# Convert the set of characters consumed to an int.
|
93 |
+
charAsInt = int("".join(charStack), radix)
|
94 |
+
|
95 |
+
# Certain characters get replaced with others
|
96 |
+
if charAsInt in replacementCharacters:
|
97 |
+
char = replacementCharacters[charAsInt]
|
98 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
99 |
+
"illegal-codepoint-for-numeric-entity",
|
100 |
+
"datavars": {"charAsInt": charAsInt}})
|
101 |
+
elif ((0xD800 <= charAsInt <= 0xDFFF) or
|
102 |
+
(charAsInt > 0x10FFFF)):
|
103 |
+
char = "\uFFFD"
|
104 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
105 |
+
"illegal-codepoint-for-numeric-entity",
|
106 |
+
"datavars": {"charAsInt": charAsInt}})
|
107 |
+
else:
|
108 |
+
# Should speed up this check somehow (e.g. move the set to a constant)
|
109 |
+
if ((0x0001 <= charAsInt <= 0x0008) or
|
110 |
+
(0x000E <= charAsInt <= 0x001F) or
|
111 |
+
(0x007F <= charAsInt <= 0x009F) or
|
112 |
+
(0xFDD0 <= charAsInt <= 0xFDEF) or
|
113 |
+
charAsInt in frozenset([0x000B, 0xFFFE, 0xFFFF, 0x1FFFE,
|
114 |
+
0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
|
115 |
+
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE,
|
116 |
+
0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE,
|
117 |
+
0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE,
|
118 |
+
0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE,
|
119 |
+
0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
|
120 |
+
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE,
|
121 |
+
0xFFFFF, 0x10FFFE, 0x10FFFF])):
|
122 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
123 |
+
"data":
|
124 |
+
"illegal-codepoint-for-numeric-entity",
|
125 |
+
"datavars": {"charAsInt": charAsInt}})
|
126 |
+
try:
|
127 |
+
# Try/except needed as UCS-2 Python builds' unichar only works
|
128 |
+
# within the BMP.
|
129 |
+
char = chr(charAsInt)
|
130 |
+
except ValueError:
|
131 |
+
v = charAsInt - 0x10000
|
132 |
+
char = chr(0xD800 | (v >> 10)) + chr(0xDC00 | (v & 0x3FF))
|
133 |
+
|
134 |
+
# Discard the ; if present. Otherwise, put it back on the queue and
|
135 |
+
# invoke parseError on parser.
|
136 |
+
if c != ";":
|
137 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
138 |
+
"numeric-entity-without-semicolon"})
|
139 |
+
self.stream.unget(c)
|
140 |
+
|
141 |
+
return char
|
142 |
+
|
143 |
+
def consumeEntity(self, allowedChar=None, fromAttribute=False):
|
144 |
+
# Initialise to the default output for when no entity is matched
|
145 |
+
output = "&"
|
146 |
+
|
147 |
+
charStack = [self.stream.char()]
|
148 |
+
if (charStack[0] in spaceCharacters or charStack[0] in (EOF, "<", "&") or
|
149 |
+
(allowedChar is not None and allowedChar == charStack[0])):
|
150 |
+
self.stream.unget(charStack[0])
|
151 |
+
|
152 |
+
elif charStack[0] == "#":
|
153 |
+
# Read the next character to see if it's hex or decimal
|
154 |
+
hex = False
|
155 |
+
charStack.append(self.stream.char())
|
156 |
+
if charStack[-1] in ("x", "X"):
|
157 |
+
hex = True
|
158 |
+
charStack.append(self.stream.char())
|
159 |
+
|
160 |
+
# charStack[-1] should be the first digit
|
161 |
+
if (hex and charStack[-1] in hexDigits) \
|
162 |
+
or (not hex and charStack[-1] in digits):
|
163 |
+
# At least one digit found, so consume the whole number
|
164 |
+
self.stream.unget(charStack[-1])
|
165 |
+
output = self.consumeNumberEntity(hex)
|
166 |
+
else:
|
167 |
+
# No digits found
|
168 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
169 |
+
"data": "expected-numeric-entity"})
|
170 |
+
self.stream.unget(charStack.pop())
|
171 |
+
output = "&" + "".join(charStack)
|
172 |
+
|
173 |
+
else:
|
174 |
+
# At this point in the process might have named entity. Entities
|
175 |
+
# are stored in the global variable "entities".
|
176 |
+
#
|
177 |
+
# Consume characters and compare to these to a substring of the
|
178 |
+
# entity names in the list until the substring no longer matches.
|
179 |
+
while (charStack[-1] is not EOF):
|
180 |
+
if not entitiesTrie.has_keys_with_prefix("".join(charStack)):
|
181 |
+
break
|
182 |
+
charStack.append(self.stream.char())
|
183 |
+
|
184 |
+
# At this point we have a string that starts with some characters
|
185 |
+
# that may match an entity
|
186 |
+
# Try to find the longest entity the string will match to take care
|
187 |
+
# of ¬i for instance.
|
188 |
+
try:
|
189 |
+
entityName = entitiesTrie.longest_prefix("".join(charStack[:-1]))
|
190 |
+
entityLength = len(entityName)
|
191 |
+
except KeyError:
|
192 |
+
entityName = None
|
193 |
+
|
194 |
+
if entityName is not None:
|
195 |
+
if entityName[-1] != ";":
|
196 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
197 |
+
"named-entity-without-semicolon"})
|
198 |
+
if (entityName[-1] != ";" and fromAttribute and
|
199 |
+
(charStack[entityLength] in asciiLetters or
|
200 |
+
charStack[entityLength] in digits or
|
201 |
+
charStack[entityLength] == "=")):
|
202 |
+
self.stream.unget(charStack.pop())
|
203 |
+
output = "&" + "".join(charStack)
|
204 |
+
else:
|
205 |
+
output = entities[entityName]
|
206 |
+
self.stream.unget(charStack.pop())
|
207 |
+
output += "".join(charStack[entityLength:])
|
208 |
+
else:
|
209 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
210 |
+
"expected-named-entity"})
|
211 |
+
self.stream.unget(charStack.pop())
|
212 |
+
output = "&" + "".join(charStack)
|
213 |
+
|
214 |
+
if fromAttribute:
|
215 |
+
self.currentToken["data"][-1][1] += output
|
216 |
+
else:
|
217 |
+
if output in spaceCharacters:
|
218 |
+
tokenType = "SpaceCharacters"
|
219 |
+
else:
|
220 |
+
tokenType = "Characters"
|
221 |
+
self.tokenQueue.append({"type": tokenTypes[tokenType], "data": output})
|
222 |
+
|
223 |
+
def processEntityInAttribute(self, allowedChar):
|
224 |
+
"""This method replaces the need for "entityInAttributeValueState".
|
225 |
+
"""
|
226 |
+
self.consumeEntity(allowedChar=allowedChar, fromAttribute=True)
|
227 |
+
|
228 |
+
def emitCurrentToken(self):
|
229 |
+
"""This method is a generic handler for emitting the tags. It also sets
|
230 |
+
the state to "data" because that's what's needed after a token has been
|
231 |
+
emitted.
|
232 |
+
"""
|
233 |
+
token = self.currentToken
|
234 |
+
# Add token to the queue to be yielded
|
235 |
+
if (token["type"] in tagTokenTypes):
|
236 |
+
token["name"] = token["name"].translate(asciiUpper2Lower)
|
237 |
+
if token["type"] == tokenTypes["StartTag"]:
|
238 |
+
raw = token["data"]
|
239 |
+
data = attributeMap(raw)
|
240 |
+
if len(raw) > len(data):
|
241 |
+
# we had some duplicated attribute, fix so first wins
|
242 |
+
data.update(raw[::-1])
|
243 |
+
token["data"] = data
|
244 |
+
|
245 |
+
if token["type"] == tokenTypes["EndTag"]:
|
246 |
+
if token["data"]:
|
247 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
248 |
+
"data": "attributes-in-end-tag"})
|
249 |
+
if token["selfClosing"]:
|
250 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
251 |
+
"data": "self-closing-flag-on-end-tag"})
|
252 |
+
self.tokenQueue.append(token)
|
253 |
+
self.state = self.dataState
|
254 |
+
|
255 |
+
# Below are the various tokenizer states worked out.
|
256 |
+
def dataState(self):
|
257 |
+
data = self.stream.char()
|
258 |
+
if data == "&":
|
259 |
+
self.state = self.entityDataState
|
260 |
+
elif data == "<":
|
261 |
+
self.state = self.tagOpenState
|
262 |
+
elif data == "\u0000":
|
263 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
264 |
+
"data": "invalid-codepoint"})
|
265 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"],
|
266 |
+
"data": "\u0000"})
|
267 |
+
elif data is EOF:
|
268 |
+
# Tokenization ends.
|
269 |
+
return False
|
270 |
+
elif data in spaceCharacters:
|
271 |
+
# Directly after emitting a token you switch back to the "data
|
272 |
+
# state". At that point spaceCharacters are important so they are
|
273 |
+
# emitted separately.
|
274 |
+
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
|
275 |
+
data + self.stream.charsUntil(spaceCharacters, True)})
|
276 |
+
# No need to update lastFourChars here, since the first space will
|
277 |
+
# have already been appended to lastFourChars and will have broken
|
278 |
+
# any <!-- or --> sequences
|
279 |
+
else:
|
280 |
+
chars = self.stream.charsUntil(("&", "<", "\u0000"))
|
281 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
|
282 |
+
data + chars})
|
283 |
+
return True
|
284 |
+
|
285 |
+
def entityDataState(self):
|
286 |
+
self.consumeEntity()
|
287 |
+
self.state = self.dataState
|
288 |
+
return True
|
289 |
+
|
290 |
+
def rcdataState(self):
|
291 |
+
data = self.stream.char()
|
292 |
+
if data == "&":
|
293 |
+
self.state = self.characterReferenceInRcdata
|
294 |
+
elif data == "<":
|
295 |
+
self.state = self.rcdataLessThanSignState
|
296 |
+
elif data == EOF:
|
297 |
+
# Tokenization ends.
|
298 |
+
return False
|
299 |
+
elif data == "\u0000":
|
300 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
301 |
+
"data": "invalid-codepoint"})
|
302 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"],
|
303 |
+
"data": "\uFFFD"})
|
304 |
+
elif data in spaceCharacters:
|
305 |
+
# Directly after emitting a token you switch back to the "data
|
306 |
+
# state". At that point spaceCharacters are important so they are
|
307 |
+
# emitted separately.
|
308 |
+
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
|
309 |
+
data + self.stream.charsUntil(spaceCharacters, True)})
|
310 |
+
# No need to update lastFourChars here, since the first space will
|
311 |
+
# have already been appended to lastFourChars and will have broken
|
312 |
+
# any <!-- or --> sequences
|
313 |
+
else:
|
314 |
+
chars = self.stream.charsUntil(("&", "<", "\u0000"))
|
315 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
|
316 |
+
data + chars})
|
317 |
+
return True
|
318 |
+
|
319 |
+
def characterReferenceInRcdata(self):
|
320 |
+
self.consumeEntity()
|
321 |
+
self.state = self.rcdataState
|
322 |
+
return True
|
323 |
+
|
324 |
+
def rawtextState(self):
|
325 |
+
data = self.stream.char()
|
326 |
+
if data == "<":
|
327 |
+
self.state = self.rawtextLessThanSignState
|
328 |
+
elif data == "\u0000":
|
329 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
330 |
+
"data": "invalid-codepoint"})
|
331 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"],
|
332 |
+
"data": "\uFFFD"})
|
333 |
+
elif data == EOF:
|
334 |
+
# Tokenization ends.
|
335 |
+
return False
|
336 |
+
else:
|
337 |
+
chars = self.stream.charsUntil(("<", "\u0000"))
|
338 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
|
339 |
+
data + chars})
|
340 |
+
return True
|
341 |
+
|
342 |
+
def scriptDataState(self):
|
343 |
+
data = self.stream.char()
|
344 |
+
if data == "<":
|
345 |
+
self.state = self.scriptDataLessThanSignState
|
346 |
+
elif data == "\u0000":
|
347 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
348 |
+
"data": "invalid-codepoint"})
|
349 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"],
|
350 |
+
"data": "\uFFFD"})
|
351 |
+
elif data == EOF:
|
352 |
+
# Tokenization ends.
|
353 |
+
return False
|
354 |
+
else:
|
355 |
+
chars = self.stream.charsUntil(("<", "\u0000"))
|
356 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
|
357 |
+
data + chars})
|
358 |
+
return True
|
359 |
+
|
360 |
+
def plaintextState(self):
|
361 |
+
data = self.stream.char()
|
362 |
+
if data == EOF:
|
363 |
+
# Tokenization ends.
|
364 |
+
return False
|
365 |
+
elif data == "\u0000":
|
366 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
367 |
+
"data": "invalid-codepoint"})
|
368 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"],
|
369 |
+
"data": "\uFFFD"})
|
370 |
+
else:
|
371 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
|
372 |
+
data + self.stream.charsUntil("\u0000")})
|
373 |
+
return True
|
374 |
+
|
375 |
+
def tagOpenState(self):
|
376 |
+
data = self.stream.char()
|
377 |
+
if data == "!":
|
378 |
+
self.state = self.markupDeclarationOpenState
|
379 |
+
elif data == "/":
|
380 |
+
self.state = self.closeTagOpenState
|
381 |
+
elif data in asciiLetters:
|
382 |
+
self.currentToken = {"type": tokenTypes["StartTag"],
|
383 |
+
"name": data, "data": [],
|
384 |
+
"selfClosing": False,
|
385 |
+
"selfClosingAcknowledged": False}
|
386 |
+
self.state = self.tagNameState
|
387 |
+
elif data == ">":
|
388 |
+
# XXX In theory it could be something besides a tag name. But
|
389 |
+
# do we really care?
|
390 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
391 |
+
"expected-tag-name-but-got-right-bracket"})
|
392 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<>"})
|
393 |
+
self.state = self.dataState
|
394 |
+
elif data == "?":
|
395 |
+
# XXX In theory it could be something besides a tag name. But
|
396 |
+
# do we really care?
|
397 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
398 |
+
"expected-tag-name-but-got-question-mark"})
|
399 |
+
self.stream.unget(data)
|
400 |
+
self.state = self.bogusCommentState
|
401 |
+
else:
|
402 |
+
# XXX
|
403 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
404 |
+
"expected-tag-name"})
|
405 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
|
406 |
+
self.stream.unget(data)
|
407 |
+
self.state = self.dataState
|
408 |
+
return True
|
409 |
+
|
410 |
+
def closeTagOpenState(self):
|
411 |
+
data = self.stream.char()
|
412 |
+
if data in asciiLetters:
|
413 |
+
self.currentToken = {"type": tokenTypes["EndTag"], "name": data,
|
414 |
+
"data": [], "selfClosing": False}
|
415 |
+
self.state = self.tagNameState
|
416 |
+
elif data == ">":
|
417 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
418 |
+
"expected-closing-tag-but-got-right-bracket"})
|
419 |
+
self.state = self.dataState
|
420 |
+
elif data is EOF:
|
421 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
422 |
+
"expected-closing-tag-but-got-eof"})
|
423 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
|
424 |
+
self.state = self.dataState
|
425 |
+
else:
|
426 |
+
# XXX data can be _'_...
|
427 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
428 |
+
"expected-closing-tag-but-got-char",
|
429 |
+
"datavars": {"data": data}})
|
430 |
+
self.stream.unget(data)
|
431 |
+
self.state = self.bogusCommentState
|
432 |
+
return True
|
433 |
+
|
434 |
+
def tagNameState(self):
|
435 |
+
data = self.stream.char()
|
436 |
+
if data in spaceCharacters:
|
437 |
+
self.state = self.beforeAttributeNameState
|
438 |
+
elif data == ">":
|
439 |
+
self.emitCurrentToken()
|
440 |
+
elif data is EOF:
|
441 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
442 |
+
"eof-in-tag-name"})
|
443 |
+
self.state = self.dataState
|
444 |
+
elif data == "/":
|
445 |
+
self.state = self.selfClosingStartTagState
|
446 |
+
elif data == "\u0000":
|
447 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
448 |
+
"data": "invalid-codepoint"})
|
449 |
+
self.currentToken["name"] += "\uFFFD"
|
450 |
+
else:
|
451 |
+
self.currentToken["name"] += data
|
452 |
+
# (Don't use charsUntil here, because tag names are
|
453 |
+
# very short and it's faster to not do anything fancy)
|
454 |
+
return True
|
455 |
+
|
456 |
+
def rcdataLessThanSignState(self):
|
457 |
+
data = self.stream.char()
|
458 |
+
if data == "/":
|
459 |
+
self.temporaryBuffer = ""
|
460 |
+
self.state = self.rcdataEndTagOpenState
|
461 |
+
else:
|
462 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
|
463 |
+
self.stream.unget(data)
|
464 |
+
self.state = self.rcdataState
|
465 |
+
return True
|
466 |
+
|
467 |
+
def rcdataEndTagOpenState(self):
|
468 |
+
data = self.stream.char()
|
469 |
+
if data in asciiLetters:
|
470 |
+
self.temporaryBuffer += data
|
471 |
+
self.state = self.rcdataEndTagNameState
|
472 |
+
else:
|
473 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
|
474 |
+
self.stream.unget(data)
|
475 |
+
self.state = self.rcdataState
|
476 |
+
return True
|
477 |
+
|
478 |
+
def rcdataEndTagNameState(self):
|
479 |
+
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
|
480 |
+
data = self.stream.char()
|
481 |
+
if data in spaceCharacters and appropriate:
|
482 |
+
self.currentToken = {"type": tokenTypes["EndTag"],
|
483 |
+
"name": self.temporaryBuffer,
|
484 |
+
"data": [], "selfClosing": False}
|
485 |
+
self.state = self.beforeAttributeNameState
|
486 |
+
elif data == "/" and appropriate:
|
487 |
+
self.currentToken = {"type": tokenTypes["EndTag"],
|
488 |
+
"name": self.temporaryBuffer,
|
489 |
+
"data": [], "selfClosing": False}
|
490 |
+
self.state = self.selfClosingStartTagState
|
491 |
+
elif data == ">" and appropriate:
|
492 |
+
self.currentToken = {"type": tokenTypes["EndTag"],
|
493 |
+
"name": self.temporaryBuffer,
|
494 |
+
"data": [], "selfClosing": False}
|
495 |
+
self.emitCurrentToken()
|
496 |
+
self.state = self.dataState
|
497 |
+
elif data in asciiLetters:
|
498 |
+
self.temporaryBuffer += data
|
499 |
+
else:
|
500 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"],
|
501 |
+
"data": "</" + self.temporaryBuffer})
|
502 |
+
self.stream.unget(data)
|
503 |
+
self.state = self.rcdataState
|
504 |
+
return True
|
505 |
+
|
506 |
+
def rawtextLessThanSignState(self):
|
507 |
+
data = self.stream.char()
|
508 |
+
if data == "/":
|
509 |
+
self.temporaryBuffer = ""
|
510 |
+
self.state = self.rawtextEndTagOpenState
|
511 |
+
else:
|
512 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
|
513 |
+
self.stream.unget(data)
|
514 |
+
self.state = self.rawtextState
|
515 |
+
return True
|
516 |
+
|
517 |
+
def rawtextEndTagOpenState(self):
|
518 |
+
data = self.stream.char()
|
519 |
+
if data in asciiLetters:
|
520 |
+
self.temporaryBuffer += data
|
521 |
+
self.state = self.rawtextEndTagNameState
|
522 |
+
else:
|
523 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
|
524 |
+
self.stream.unget(data)
|
525 |
+
self.state = self.rawtextState
|
526 |
+
return True
|
527 |
+
|
528 |
+
def rawtextEndTagNameState(self):
|
529 |
+
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
|
530 |
+
data = self.stream.char()
|
531 |
+
if data in spaceCharacters and appropriate:
|
532 |
+
self.currentToken = {"type": tokenTypes["EndTag"],
|
533 |
+
"name": self.temporaryBuffer,
|
534 |
+
"data": [], "selfClosing": False}
|
535 |
+
self.state = self.beforeAttributeNameState
|
536 |
+
elif data == "/" and appropriate:
|
537 |
+
self.currentToken = {"type": tokenTypes["EndTag"],
|
538 |
+
"name": self.temporaryBuffer,
|
539 |
+
"data": [], "selfClosing": False}
|
540 |
+
self.state = self.selfClosingStartTagState
|
541 |
+
elif data == ">" and appropriate:
|
542 |
+
self.currentToken = {"type": tokenTypes["EndTag"],
|
543 |
+
"name": self.temporaryBuffer,
|
544 |
+
"data": [], "selfClosing": False}
|
545 |
+
self.emitCurrentToken()
|
546 |
+
self.state = self.dataState
|
547 |
+
elif data in asciiLetters:
|
548 |
+
self.temporaryBuffer += data
|
549 |
+
else:
|
550 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"],
|
551 |
+
"data": "</" + self.temporaryBuffer})
|
552 |
+
self.stream.unget(data)
|
553 |
+
self.state = self.rawtextState
|
554 |
+
return True
|
555 |
+
|
556 |
+
def scriptDataLessThanSignState(self):
|
557 |
+
data = self.stream.char()
|
558 |
+
if data == "/":
|
559 |
+
self.temporaryBuffer = ""
|
560 |
+
self.state = self.scriptDataEndTagOpenState
|
561 |
+
elif data == "!":
|
562 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<!"})
|
563 |
+
self.state = self.scriptDataEscapeStartState
|
564 |
+
else:
|
565 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
|
566 |
+
self.stream.unget(data)
|
567 |
+
self.state = self.scriptDataState
|
568 |
+
return True
|
569 |
+
|
570 |
+
def scriptDataEndTagOpenState(self):
|
571 |
+
data = self.stream.char()
|
572 |
+
if data in asciiLetters:
|
573 |
+
self.temporaryBuffer += data
|
574 |
+
self.state = self.scriptDataEndTagNameState
|
575 |
+
else:
|
576 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
|
577 |
+
self.stream.unget(data)
|
578 |
+
self.state = self.scriptDataState
|
579 |
+
return True
|
580 |
+
|
581 |
+
def scriptDataEndTagNameState(self):
|
582 |
+
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
|
583 |
+
data = self.stream.char()
|
584 |
+
if data in spaceCharacters and appropriate:
|
585 |
+
self.currentToken = {"type": tokenTypes["EndTag"],
|
586 |
+
"name": self.temporaryBuffer,
|
587 |
+
"data": [], "selfClosing": False}
|
588 |
+
self.state = self.beforeAttributeNameState
|
589 |
+
elif data == "/" and appropriate:
|
590 |
+
self.currentToken = {"type": tokenTypes["EndTag"],
|
591 |
+
"name": self.temporaryBuffer,
|
592 |
+
"data": [], "selfClosing": False}
|
593 |
+
self.state = self.selfClosingStartTagState
|
594 |
+
elif data == ">" and appropriate:
|
595 |
+
self.currentToken = {"type": tokenTypes["EndTag"],
|
596 |
+
"name": self.temporaryBuffer,
|
597 |
+
"data": [], "selfClosing": False}
|
598 |
+
self.emitCurrentToken()
|
599 |
+
self.state = self.dataState
|
600 |
+
elif data in asciiLetters:
|
601 |
+
self.temporaryBuffer += data
|
602 |
+
else:
|
603 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"],
|
604 |
+
"data": "</" + self.temporaryBuffer})
|
605 |
+
self.stream.unget(data)
|
606 |
+
self.state = self.scriptDataState
|
607 |
+
return True
|
608 |
+
|
609 |
+
def scriptDataEscapeStartState(self):
|
610 |
+
data = self.stream.char()
|
611 |
+
if data == "-":
|
612 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
|
613 |
+
self.state = self.scriptDataEscapeStartDashState
|
614 |
+
else:
|
615 |
+
self.stream.unget(data)
|
616 |
+
self.state = self.scriptDataState
|
617 |
+
return True
|
618 |
+
|
619 |
+
def scriptDataEscapeStartDashState(self):
|
620 |
+
data = self.stream.char()
|
621 |
+
if data == "-":
|
622 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
|
623 |
+
self.state = self.scriptDataEscapedDashDashState
|
624 |
+
else:
|
625 |
+
self.stream.unget(data)
|
626 |
+
self.state = self.scriptDataState
|
627 |
+
return True
|
628 |
+
|
629 |
+
def scriptDataEscapedState(self):
|
630 |
+
data = self.stream.char()
|
631 |
+
if data == "-":
|
632 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
|
633 |
+
self.state = self.scriptDataEscapedDashState
|
634 |
+
elif data == "<":
|
635 |
+
self.state = self.scriptDataEscapedLessThanSignState
|
636 |
+
elif data == "\u0000":
|
637 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
638 |
+
"data": "invalid-codepoint"})
|
639 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"],
|
640 |
+
"data": "\uFFFD"})
|
641 |
+
elif data == EOF:
|
642 |
+
self.state = self.dataState
|
643 |
+
else:
|
644 |
+
chars = self.stream.charsUntil(("<", "-", "\u0000"))
|
645 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
|
646 |
+
data + chars})
|
647 |
+
return True
|
648 |
+
|
649 |
+
def scriptDataEscapedDashState(self):
|
650 |
+
data = self.stream.char()
|
651 |
+
if data == "-":
|
652 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
|
653 |
+
self.state = self.scriptDataEscapedDashDashState
|
654 |
+
elif data == "<":
|
655 |
+
self.state = self.scriptDataEscapedLessThanSignState
|
656 |
+
elif data == "\u0000":
|
657 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
658 |
+
"data": "invalid-codepoint"})
|
659 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"],
|
660 |
+
"data": "\uFFFD"})
|
661 |
+
self.state = self.scriptDataEscapedState
|
662 |
+
elif data == EOF:
|
663 |
+
self.state = self.dataState
|
664 |
+
else:
|
665 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
|
666 |
+
self.state = self.scriptDataEscapedState
|
667 |
+
return True
|
668 |
+
|
669 |
+
def scriptDataEscapedDashDashState(self):
|
670 |
+
data = self.stream.char()
|
671 |
+
if data == "-":
|
672 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
|
673 |
+
elif data == "<":
|
674 |
+
self.state = self.scriptDataEscapedLessThanSignState
|
675 |
+
elif data == ">":
|
676 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
|
677 |
+
self.state = self.scriptDataState
|
678 |
+
elif data == "\u0000":
|
679 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
680 |
+
"data": "invalid-codepoint"})
|
681 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"],
|
682 |
+
"data": "\uFFFD"})
|
683 |
+
self.state = self.scriptDataEscapedState
|
684 |
+
elif data == EOF:
|
685 |
+
self.state = self.dataState
|
686 |
+
else:
|
687 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
|
688 |
+
self.state = self.scriptDataEscapedState
|
689 |
+
return True
|
690 |
+
|
691 |
+
def scriptDataEscapedLessThanSignState(self):
|
692 |
+
data = self.stream.char()
|
693 |
+
if data == "/":
|
694 |
+
self.temporaryBuffer = ""
|
695 |
+
self.state = self.scriptDataEscapedEndTagOpenState
|
696 |
+
elif data in asciiLetters:
|
697 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<" + data})
|
698 |
+
self.temporaryBuffer = data
|
699 |
+
self.state = self.scriptDataDoubleEscapeStartState
|
700 |
+
else:
|
701 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
|
702 |
+
self.stream.unget(data)
|
703 |
+
self.state = self.scriptDataEscapedState
|
704 |
+
return True
|
705 |
+
|
706 |
+
def scriptDataEscapedEndTagOpenState(self):
|
707 |
+
data = self.stream.char()
|
708 |
+
if data in asciiLetters:
|
709 |
+
self.temporaryBuffer = data
|
710 |
+
self.state = self.scriptDataEscapedEndTagNameState
|
711 |
+
else:
|
712 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
|
713 |
+
self.stream.unget(data)
|
714 |
+
self.state = self.scriptDataEscapedState
|
715 |
+
return True
|
716 |
+
|
717 |
+
def scriptDataEscapedEndTagNameState(self):
|
718 |
+
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
|
719 |
+
data = self.stream.char()
|
720 |
+
if data in spaceCharacters and appropriate:
|
721 |
+
self.currentToken = {"type": tokenTypes["EndTag"],
|
722 |
+
"name": self.temporaryBuffer,
|
723 |
+
"data": [], "selfClosing": False}
|
724 |
+
self.state = self.beforeAttributeNameState
|
725 |
+
elif data == "/" and appropriate:
|
726 |
+
self.currentToken = {"type": tokenTypes["EndTag"],
|
727 |
+
"name": self.temporaryBuffer,
|
728 |
+
"data": [], "selfClosing": False}
|
729 |
+
self.state = self.selfClosingStartTagState
|
730 |
+
elif data == ">" and appropriate:
|
731 |
+
self.currentToken = {"type": tokenTypes["EndTag"],
|
732 |
+
"name": self.temporaryBuffer,
|
733 |
+
"data": [], "selfClosing": False}
|
734 |
+
self.emitCurrentToken()
|
735 |
+
self.state = self.dataState
|
736 |
+
elif data in asciiLetters:
|
737 |
+
self.temporaryBuffer += data
|
738 |
+
else:
|
739 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"],
|
740 |
+
"data": "</" + self.temporaryBuffer})
|
741 |
+
self.stream.unget(data)
|
742 |
+
self.state = self.scriptDataEscapedState
|
743 |
+
return True
|
744 |
+
|
745 |
+
def scriptDataDoubleEscapeStartState(self):
|
746 |
+
data = self.stream.char()
|
747 |
+
if data in (spaceCharacters | frozenset(("/", ">"))):
|
748 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
|
749 |
+
if self.temporaryBuffer.lower() == "script":
|
750 |
+
self.state = self.scriptDataDoubleEscapedState
|
751 |
+
else:
|
752 |
+
self.state = self.scriptDataEscapedState
|
753 |
+
elif data in asciiLetters:
|
754 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
|
755 |
+
self.temporaryBuffer += data
|
756 |
+
else:
|
757 |
+
self.stream.unget(data)
|
758 |
+
self.state = self.scriptDataEscapedState
|
759 |
+
return True
|
760 |
+
|
761 |
+
def scriptDataDoubleEscapedState(self):
|
762 |
+
data = self.stream.char()
|
763 |
+
if data == "-":
|
764 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
|
765 |
+
self.state = self.scriptDataDoubleEscapedDashState
|
766 |
+
elif data == "<":
|
767 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
|
768 |
+
self.state = self.scriptDataDoubleEscapedLessThanSignState
|
769 |
+
elif data == "\u0000":
|
770 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
771 |
+
"data": "invalid-codepoint"})
|
772 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"],
|
773 |
+
"data": "\uFFFD"})
|
774 |
+
elif data == EOF:
|
775 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
776 |
+
"eof-in-script-in-script"})
|
777 |
+
self.state = self.dataState
|
778 |
+
else:
|
779 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
|
780 |
+
return True
|
781 |
+
|
782 |
+
def scriptDataDoubleEscapedDashState(self):
|
783 |
+
data = self.stream.char()
|
784 |
+
if data == "-":
|
785 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
|
786 |
+
self.state = self.scriptDataDoubleEscapedDashDashState
|
787 |
+
elif data == "<":
|
788 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
|
789 |
+
self.state = self.scriptDataDoubleEscapedLessThanSignState
|
790 |
+
elif data == "\u0000":
|
791 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
792 |
+
"data": "invalid-codepoint"})
|
793 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"],
|
794 |
+
"data": "\uFFFD"})
|
795 |
+
self.state = self.scriptDataDoubleEscapedState
|
796 |
+
elif data == EOF:
|
797 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
798 |
+
"eof-in-script-in-script"})
|
799 |
+
self.state = self.dataState
|
800 |
+
else:
|
801 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
|
802 |
+
self.state = self.scriptDataDoubleEscapedState
|
803 |
+
return True
|
804 |
+
|
805 |
+
def scriptDataDoubleEscapedDashDashState(self):
|
806 |
+
data = self.stream.char()
|
807 |
+
if data == "-":
|
808 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
|
809 |
+
elif data == "<":
|
810 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
|
811 |
+
self.state = self.scriptDataDoubleEscapedLessThanSignState
|
812 |
+
elif data == ">":
|
813 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
|
814 |
+
self.state = self.scriptDataState
|
815 |
+
elif data == "\u0000":
|
816 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
817 |
+
"data": "invalid-codepoint"})
|
818 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"],
|
819 |
+
"data": "\uFFFD"})
|
820 |
+
self.state = self.scriptDataDoubleEscapedState
|
821 |
+
elif data == EOF:
|
822 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
823 |
+
"eof-in-script-in-script"})
|
824 |
+
self.state = self.dataState
|
825 |
+
else:
|
826 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
|
827 |
+
self.state = self.scriptDataDoubleEscapedState
|
828 |
+
return True
|
829 |
+
|
830 |
+
def scriptDataDoubleEscapedLessThanSignState(self):
|
831 |
+
data = self.stream.char()
|
832 |
+
if data == "/":
|
833 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "/"})
|
834 |
+
self.temporaryBuffer = ""
|
835 |
+
self.state = self.scriptDataDoubleEscapeEndState
|
836 |
+
else:
|
837 |
+
self.stream.unget(data)
|
838 |
+
self.state = self.scriptDataDoubleEscapedState
|
839 |
+
return True
|
840 |
+
|
841 |
+
def scriptDataDoubleEscapeEndState(self):
|
842 |
+
data = self.stream.char()
|
843 |
+
if data in (spaceCharacters | frozenset(("/", ">"))):
|
844 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
|
845 |
+
if self.temporaryBuffer.lower() == "script":
|
846 |
+
self.state = self.scriptDataEscapedState
|
847 |
+
else:
|
848 |
+
self.state = self.scriptDataDoubleEscapedState
|
849 |
+
elif data in asciiLetters:
|
850 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
|
851 |
+
self.temporaryBuffer += data
|
852 |
+
else:
|
853 |
+
self.stream.unget(data)
|
854 |
+
self.state = self.scriptDataDoubleEscapedState
|
855 |
+
return True
|
856 |
+
|
857 |
+
def beforeAttributeNameState(self):
|
858 |
+
data = self.stream.char()
|
859 |
+
if data in spaceCharacters:
|
860 |
+
self.stream.charsUntil(spaceCharacters, True)
|
861 |
+
elif data in asciiLetters:
|
862 |
+
self.currentToken["data"].append([data, ""])
|
863 |
+
self.state = self.attributeNameState
|
864 |
+
elif data == ">":
|
865 |
+
self.emitCurrentToken()
|
866 |
+
elif data == "/":
|
867 |
+
self.state = self.selfClosingStartTagState
|
868 |
+
elif data in ("'", '"', "=", "<"):
|
869 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
870 |
+
"invalid-character-in-attribute-name"})
|
871 |
+
self.currentToken["data"].append([data, ""])
|
872 |
+
self.state = self.attributeNameState
|
873 |
+
elif data == "\u0000":
|
874 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
875 |
+
"data": "invalid-codepoint"})
|
876 |
+
self.currentToken["data"].append(["\uFFFD", ""])
|
877 |
+
self.state = self.attributeNameState
|
878 |
+
elif data is EOF:
|
879 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
880 |
+
"expected-attribute-name-but-got-eof"})
|
881 |
+
self.state = self.dataState
|
882 |
+
else:
|
883 |
+
self.currentToken["data"].append([data, ""])
|
884 |
+
self.state = self.attributeNameState
|
885 |
+
return True
|
886 |
+
|
887 |
+
def attributeNameState(self):
|
888 |
+
data = self.stream.char()
|
889 |
+
leavingThisState = True
|
890 |
+
emitToken = False
|
891 |
+
if data == "=":
|
892 |
+
self.state = self.beforeAttributeValueState
|
893 |
+
elif data in asciiLetters:
|
894 |
+
self.currentToken["data"][-1][0] += data +\
|
895 |
+
self.stream.charsUntil(asciiLetters, True)
|
896 |
+
leavingThisState = False
|
897 |
+
elif data == ">":
|
898 |
+
# XXX If we emit here the attributes are converted to a dict
|
899 |
+
# without being checked and when the code below runs we error
|
900 |
+
# because data is a dict not a list
|
901 |
+
emitToken = True
|
902 |
+
elif data in spaceCharacters:
|
903 |
+
self.state = self.afterAttributeNameState
|
904 |
+
elif data == "/":
|
905 |
+
self.state = self.selfClosingStartTagState
|
906 |
+
elif data == "\u0000":
|
907 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
908 |
+
"data": "invalid-codepoint"})
|
909 |
+
self.currentToken["data"][-1][0] += "\uFFFD"
|
910 |
+
leavingThisState = False
|
911 |
+
elif data in ("'", '"', "<"):
|
912 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
913 |
+
"data":
|
914 |
+
"invalid-character-in-attribute-name"})
|
915 |
+
self.currentToken["data"][-1][0] += data
|
916 |
+
leavingThisState = False
|
917 |
+
elif data is EOF:
|
918 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
919 |
+
"data": "eof-in-attribute-name"})
|
920 |
+
self.state = self.dataState
|
921 |
+
else:
|
922 |
+
self.currentToken["data"][-1][0] += data
|
923 |
+
leavingThisState = False
|
924 |
+
|
925 |
+
if leavingThisState:
|
926 |
+
# Attributes are not dropped at this stage. That happens when the
|
927 |
+
# start tag token is emitted so values can still be safely appended
|
928 |
+
# to attributes, but we do want to report the parse error in time.
|
929 |
+
self.currentToken["data"][-1][0] = (
|
930 |
+
self.currentToken["data"][-1][0].translate(asciiUpper2Lower))
|
931 |
+
for name, _ in self.currentToken["data"][:-1]:
|
932 |
+
if self.currentToken["data"][-1][0] == name:
|
933 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
934 |
+
"duplicate-attribute"})
|
935 |
+
break
|
936 |
+
# XXX Fix for above XXX
|
937 |
+
if emitToken:
|
938 |
+
self.emitCurrentToken()
|
939 |
+
return True
|
940 |
+
|
941 |
+
def afterAttributeNameState(self):
|
942 |
+
data = self.stream.char()
|
943 |
+
if data in spaceCharacters:
|
944 |
+
self.stream.charsUntil(spaceCharacters, True)
|
945 |
+
elif data == "=":
|
946 |
+
self.state = self.beforeAttributeValueState
|
947 |
+
elif data == ">":
|
948 |
+
self.emitCurrentToken()
|
949 |
+
elif data in asciiLetters:
|
950 |
+
self.currentToken["data"].append([data, ""])
|
951 |
+
self.state = self.attributeNameState
|
952 |
+
elif data == "/":
|
953 |
+
self.state = self.selfClosingStartTagState
|
954 |
+
elif data == "\u0000":
|
955 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
956 |
+
"data": "invalid-codepoint"})
|
957 |
+
self.currentToken["data"].append(["\uFFFD", ""])
|
958 |
+
self.state = self.attributeNameState
|
959 |
+
elif data in ("'", '"', "<"):
|
960 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
961 |
+
"invalid-character-after-attribute-name"})
|
962 |
+
self.currentToken["data"].append([data, ""])
|
963 |
+
self.state = self.attributeNameState
|
964 |
+
elif data is EOF:
|
965 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
966 |
+
"expected-end-of-tag-but-got-eof"})
|
967 |
+
self.state = self.dataState
|
968 |
+
else:
|
969 |
+
self.currentToken["data"].append([data, ""])
|
970 |
+
self.state = self.attributeNameState
|
971 |
+
return True
|
972 |
+
|
973 |
+
def beforeAttributeValueState(self):
|
974 |
+
data = self.stream.char()
|
975 |
+
if data in spaceCharacters:
|
976 |
+
self.stream.charsUntil(spaceCharacters, True)
|
977 |
+
elif data == "\"":
|
978 |
+
self.state = self.attributeValueDoubleQuotedState
|
979 |
+
elif data == "&":
|
980 |
+
self.state = self.attributeValueUnQuotedState
|
981 |
+
self.stream.unget(data)
|
982 |
+
elif data == "'":
|
983 |
+
self.state = self.attributeValueSingleQuotedState
|
984 |
+
elif data == ">":
|
985 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
986 |
+
"expected-attribute-value-but-got-right-bracket"})
|
987 |
+
self.emitCurrentToken()
|
988 |
+
elif data == "\u0000":
|
989 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
990 |
+
"data": "invalid-codepoint"})
|
991 |
+
self.currentToken["data"][-1][1] += "\uFFFD"
|
992 |
+
self.state = self.attributeValueUnQuotedState
|
993 |
+
elif data in ("=", "<", "`"):
|
994 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
995 |
+
"equals-in-unquoted-attribute-value"})
|
996 |
+
self.currentToken["data"][-1][1] += data
|
997 |
+
self.state = self.attributeValueUnQuotedState
|
998 |
+
elif data is EOF:
|
999 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1000 |
+
"expected-attribute-value-but-got-eof"})
|
1001 |
+
self.state = self.dataState
|
1002 |
+
else:
|
1003 |
+
self.currentToken["data"][-1][1] += data
|
1004 |
+
self.state = self.attributeValueUnQuotedState
|
1005 |
+
return True
|
1006 |
+
|
1007 |
+
def attributeValueDoubleQuotedState(self):
|
1008 |
+
data = self.stream.char()
|
1009 |
+
if data == "\"":
|
1010 |
+
self.state = self.afterAttributeValueState
|
1011 |
+
elif data == "&":
|
1012 |
+
self.processEntityInAttribute('"')
|
1013 |
+
elif data == "\u0000":
|
1014 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
1015 |
+
"data": "invalid-codepoint"})
|
1016 |
+
self.currentToken["data"][-1][1] += "\uFFFD"
|
1017 |
+
elif data is EOF:
|
1018 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1019 |
+
"eof-in-attribute-value-double-quote"})
|
1020 |
+
self.state = self.dataState
|
1021 |
+
else:
|
1022 |
+
self.currentToken["data"][-1][1] += data +\
|
1023 |
+
self.stream.charsUntil(("\"", "&", "\u0000"))
|
1024 |
+
return True
|
1025 |
+
|
1026 |
+
def attributeValueSingleQuotedState(self):
|
1027 |
+
data = self.stream.char()
|
1028 |
+
if data == "'":
|
1029 |
+
self.state = self.afterAttributeValueState
|
1030 |
+
elif data == "&":
|
1031 |
+
self.processEntityInAttribute("'")
|
1032 |
+
elif data == "\u0000":
|
1033 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
1034 |
+
"data": "invalid-codepoint"})
|
1035 |
+
self.currentToken["data"][-1][1] += "\uFFFD"
|
1036 |
+
elif data is EOF:
|
1037 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1038 |
+
"eof-in-attribute-value-single-quote"})
|
1039 |
+
self.state = self.dataState
|
1040 |
+
else:
|
1041 |
+
self.currentToken["data"][-1][1] += data +\
|
1042 |
+
self.stream.charsUntil(("'", "&", "\u0000"))
|
1043 |
+
return True
|
1044 |
+
|
1045 |
+
def attributeValueUnQuotedState(self):
|
1046 |
+
data = self.stream.char()
|
1047 |
+
if data in spaceCharacters:
|
1048 |
+
self.state = self.beforeAttributeNameState
|
1049 |
+
elif data == "&":
|
1050 |
+
self.processEntityInAttribute(">")
|
1051 |
+
elif data == ">":
|
1052 |
+
self.emitCurrentToken()
|
1053 |
+
elif data in ('"', "'", "=", "<", "`"):
|
1054 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1055 |
+
"unexpected-character-in-unquoted-attribute-value"})
|
1056 |
+
self.currentToken["data"][-1][1] += data
|
1057 |
+
elif data == "\u0000":
|
1058 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
1059 |
+
"data": "invalid-codepoint"})
|
1060 |
+
self.currentToken["data"][-1][1] += "\uFFFD"
|
1061 |
+
elif data is EOF:
|
1062 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1063 |
+
"eof-in-attribute-value-no-quotes"})
|
1064 |
+
self.state = self.dataState
|
1065 |
+
else:
|
1066 |
+
self.currentToken["data"][-1][1] += data + self.stream.charsUntil(
|
1067 |
+
frozenset(("&", ">", '"', "'", "=", "<", "`", "\u0000")) | spaceCharacters)
|
1068 |
+
return True
|
1069 |
+
|
1070 |
+
def afterAttributeValueState(self):
|
1071 |
+
data = self.stream.char()
|
1072 |
+
if data in spaceCharacters:
|
1073 |
+
self.state = self.beforeAttributeNameState
|
1074 |
+
elif data == ">":
|
1075 |
+
self.emitCurrentToken()
|
1076 |
+
elif data == "/":
|
1077 |
+
self.state = self.selfClosingStartTagState
|
1078 |
+
elif data is EOF:
|
1079 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1080 |
+
"unexpected-EOF-after-attribute-value"})
|
1081 |
+
self.stream.unget(data)
|
1082 |
+
self.state = self.dataState
|
1083 |
+
else:
|
1084 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1085 |
+
"unexpected-character-after-attribute-value"})
|
1086 |
+
self.stream.unget(data)
|
1087 |
+
self.state = self.beforeAttributeNameState
|
1088 |
+
return True
|
1089 |
+
|
1090 |
+
def selfClosingStartTagState(self):
|
1091 |
+
data = self.stream.char()
|
1092 |
+
if data == ">":
|
1093 |
+
self.currentToken["selfClosing"] = True
|
1094 |
+
self.emitCurrentToken()
|
1095 |
+
elif data is EOF:
|
1096 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
1097 |
+
"data":
|
1098 |
+
"unexpected-EOF-after-solidus-in-tag"})
|
1099 |
+
self.stream.unget(data)
|
1100 |
+
self.state = self.dataState
|
1101 |
+
else:
|
1102 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1103 |
+
"unexpected-character-after-solidus-in-tag"})
|
1104 |
+
self.stream.unget(data)
|
1105 |
+
self.state = self.beforeAttributeNameState
|
1106 |
+
return True
|
1107 |
+
|
1108 |
+
def bogusCommentState(self):
|
1109 |
+
# Make a new comment token and give it as value all the characters
|
1110 |
+
# until the first > or EOF (charsUntil checks for EOF automatically)
|
1111 |
+
# and emit it.
|
1112 |
+
data = self.stream.charsUntil(">")
|
1113 |
+
data = data.replace("\u0000", "\uFFFD")
|
1114 |
+
self.tokenQueue.append(
|
1115 |
+
{"type": tokenTypes["Comment"], "data": data})
|
1116 |
+
|
1117 |
+
# Eat the character directly after the bogus comment which is either a
|
1118 |
+
# ">" or an EOF.
|
1119 |
+
self.stream.char()
|
1120 |
+
self.state = self.dataState
|
1121 |
+
return True
|
1122 |
+
|
1123 |
+
def markupDeclarationOpenState(self):
|
1124 |
+
charStack = [self.stream.char()]
|
1125 |
+
if charStack[-1] == "-":
|
1126 |
+
charStack.append(self.stream.char())
|
1127 |
+
if charStack[-1] == "-":
|
1128 |
+
self.currentToken = {"type": tokenTypes["Comment"], "data": ""}
|
1129 |
+
self.state = self.commentStartState
|
1130 |
+
return True
|
1131 |
+
elif charStack[-1] in ('d', 'D'):
|
1132 |
+
matched = True
|
1133 |
+
for expected in (('o', 'O'), ('c', 'C'), ('t', 'T'),
|
1134 |
+
('y', 'Y'), ('p', 'P'), ('e', 'E')):
|
1135 |
+
charStack.append(self.stream.char())
|
1136 |
+
if charStack[-1] not in expected:
|
1137 |
+
matched = False
|
1138 |
+
break
|
1139 |
+
if matched:
|
1140 |
+
self.currentToken = {"type": tokenTypes["Doctype"],
|
1141 |
+
"name": "",
|
1142 |
+
"publicId": None, "systemId": None,
|
1143 |
+
"correct": True}
|
1144 |
+
self.state = self.doctypeState
|
1145 |
+
return True
|
1146 |
+
elif (charStack[-1] == "[" and
|
1147 |
+
self.parser is not None and
|
1148 |
+
self.parser.tree.openElements and
|
1149 |
+
self.parser.tree.openElements[-1].namespace != self.parser.tree.defaultNamespace):
|
1150 |
+
matched = True
|
1151 |
+
for expected in ["C", "D", "A", "T", "A", "["]:
|
1152 |
+
charStack.append(self.stream.char())
|
1153 |
+
if charStack[-1] != expected:
|
1154 |
+
matched = False
|
1155 |
+
break
|
1156 |
+
if matched:
|
1157 |
+
self.state = self.cdataSectionState
|
1158 |
+
return True
|
1159 |
+
|
1160 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1161 |
+
"expected-dashes-or-doctype"})
|
1162 |
+
|
1163 |
+
while charStack:
|
1164 |
+
self.stream.unget(charStack.pop())
|
1165 |
+
self.state = self.bogusCommentState
|
1166 |
+
return True
|
1167 |
+
|
1168 |
+
def commentStartState(self):
|
1169 |
+
data = self.stream.char()
|
1170 |
+
if data == "-":
|
1171 |
+
self.state = self.commentStartDashState
|
1172 |
+
elif data == "\u0000":
|
1173 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
1174 |
+
"data": "invalid-codepoint"})
|
1175 |
+
self.currentToken["data"] += "\uFFFD"
|
1176 |
+
elif data == ">":
|
1177 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1178 |
+
"incorrect-comment"})
|
1179 |
+
self.tokenQueue.append(self.currentToken)
|
1180 |
+
self.state = self.dataState
|
1181 |
+
elif data is EOF:
|
1182 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1183 |
+
"eof-in-comment"})
|
1184 |
+
self.tokenQueue.append(self.currentToken)
|
1185 |
+
self.state = self.dataState
|
1186 |
+
else:
|
1187 |
+
self.currentToken["data"] += data
|
1188 |
+
self.state = self.commentState
|
1189 |
+
return True
|
1190 |
+
|
1191 |
+
def commentStartDashState(self):
|
1192 |
+
data = self.stream.char()
|
1193 |
+
if data == "-":
|
1194 |
+
self.state = self.commentEndState
|
1195 |
+
elif data == "\u0000":
|
1196 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
1197 |
+
"data": "invalid-codepoint"})
|
1198 |
+
self.currentToken["data"] += "-\uFFFD"
|
1199 |
+
elif data == ">":
|
1200 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1201 |
+
"incorrect-comment"})
|
1202 |
+
self.tokenQueue.append(self.currentToken)
|
1203 |
+
self.state = self.dataState
|
1204 |
+
elif data is EOF:
|
1205 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1206 |
+
"eof-in-comment"})
|
1207 |
+
self.tokenQueue.append(self.currentToken)
|
1208 |
+
self.state = self.dataState
|
1209 |
+
else:
|
1210 |
+
self.currentToken["data"] += "-" + data
|
1211 |
+
self.state = self.commentState
|
1212 |
+
return True
|
1213 |
+
|
1214 |
+
def commentState(self):
|
1215 |
+
data = self.stream.char()
|
1216 |
+
if data == "-":
|
1217 |
+
self.state = self.commentEndDashState
|
1218 |
+
elif data == "\u0000":
|
1219 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
1220 |
+
"data": "invalid-codepoint"})
|
1221 |
+
self.currentToken["data"] += "\uFFFD"
|
1222 |
+
elif data is EOF:
|
1223 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
1224 |
+
"data": "eof-in-comment"})
|
1225 |
+
self.tokenQueue.append(self.currentToken)
|
1226 |
+
self.state = self.dataState
|
1227 |
+
else:
|
1228 |
+
self.currentToken["data"] += data + \
|
1229 |
+
self.stream.charsUntil(("-", "\u0000"))
|
1230 |
+
return True
|
1231 |
+
|
1232 |
+
def commentEndDashState(self):
|
1233 |
+
data = self.stream.char()
|
1234 |
+
if data == "-":
|
1235 |
+
self.state = self.commentEndState
|
1236 |
+
elif data == "\u0000":
|
1237 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
1238 |
+
"data": "invalid-codepoint"})
|
1239 |
+
self.currentToken["data"] += "-\uFFFD"
|
1240 |
+
self.state = self.commentState
|
1241 |
+
elif data is EOF:
|
1242 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1243 |
+
"eof-in-comment-end-dash"})
|
1244 |
+
self.tokenQueue.append(self.currentToken)
|
1245 |
+
self.state = self.dataState
|
1246 |
+
else:
|
1247 |
+
self.currentToken["data"] += "-" + data
|
1248 |
+
self.state = self.commentState
|
1249 |
+
return True
|
1250 |
+
|
1251 |
+
def commentEndState(self):
|
1252 |
+
data = self.stream.char()
|
1253 |
+
if data == ">":
|
1254 |
+
self.tokenQueue.append(self.currentToken)
|
1255 |
+
self.state = self.dataState
|
1256 |
+
elif data == "\u0000":
|
1257 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
1258 |
+
"data": "invalid-codepoint"})
|
1259 |
+
self.currentToken["data"] += "--\uFFFD"
|
1260 |
+
self.state = self.commentState
|
1261 |
+
elif data == "!":
|
1262 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1263 |
+
"unexpected-bang-after-double-dash-in-comment"})
|
1264 |
+
self.state = self.commentEndBangState
|
1265 |
+
elif data == "-":
|
1266 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1267 |
+
"unexpected-dash-after-double-dash-in-comment"})
|
1268 |
+
self.currentToken["data"] += data
|
1269 |
+
elif data is EOF:
|
1270 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1271 |
+
"eof-in-comment-double-dash"})
|
1272 |
+
self.tokenQueue.append(self.currentToken)
|
1273 |
+
self.state = self.dataState
|
1274 |
+
else:
|
1275 |
+
# XXX
|
1276 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1277 |
+
"unexpected-char-in-comment"})
|
1278 |
+
self.currentToken["data"] += "--" + data
|
1279 |
+
self.state = self.commentState
|
1280 |
+
return True
|
1281 |
+
|
1282 |
+
def commentEndBangState(self):
|
1283 |
+
data = self.stream.char()
|
1284 |
+
if data == ">":
|
1285 |
+
self.tokenQueue.append(self.currentToken)
|
1286 |
+
self.state = self.dataState
|
1287 |
+
elif data == "-":
|
1288 |
+
self.currentToken["data"] += "--!"
|
1289 |
+
self.state = self.commentEndDashState
|
1290 |
+
elif data == "\u0000":
|
1291 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
1292 |
+
"data": "invalid-codepoint"})
|
1293 |
+
self.currentToken["data"] += "--!\uFFFD"
|
1294 |
+
self.state = self.commentState
|
1295 |
+
elif data is EOF:
|
1296 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1297 |
+
"eof-in-comment-end-bang-state"})
|
1298 |
+
self.tokenQueue.append(self.currentToken)
|
1299 |
+
self.state = self.dataState
|
1300 |
+
else:
|
1301 |
+
self.currentToken["data"] += "--!" + data
|
1302 |
+
self.state = self.commentState
|
1303 |
+
return True
|
1304 |
+
|
1305 |
+
def doctypeState(self):
|
1306 |
+
data = self.stream.char()
|
1307 |
+
if data in spaceCharacters:
|
1308 |
+
self.state = self.beforeDoctypeNameState
|
1309 |
+
elif data is EOF:
|
1310 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1311 |
+
"expected-doctype-name-but-got-eof"})
|
1312 |
+
self.currentToken["correct"] = False
|
1313 |
+
self.tokenQueue.append(self.currentToken)
|
1314 |
+
self.state = self.dataState
|
1315 |
+
else:
|
1316 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1317 |
+
"need-space-after-doctype"})
|
1318 |
+
self.stream.unget(data)
|
1319 |
+
self.state = self.beforeDoctypeNameState
|
1320 |
+
return True
|
1321 |
+
|
1322 |
+
def beforeDoctypeNameState(self):
|
1323 |
+
data = self.stream.char()
|
1324 |
+
if data in spaceCharacters:
|
1325 |
+
pass
|
1326 |
+
elif data == ">":
|
1327 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1328 |
+
"expected-doctype-name-but-got-right-bracket"})
|
1329 |
+
self.currentToken["correct"] = False
|
1330 |
+
self.tokenQueue.append(self.currentToken)
|
1331 |
+
self.state = self.dataState
|
1332 |
+
elif data == "\u0000":
|
1333 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
1334 |
+
"data": "invalid-codepoint"})
|
1335 |
+
self.currentToken["name"] = "\uFFFD"
|
1336 |
+
self.state = self.doctypeNameState
|
1337 |
+
elif data is EOF:
|
1338 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1339 |
+
"expected-doctype-name-but-got-eof"})
|
1340 |
+
self.currentToken["correct"] = False
|
1341 |
+
self.tokenQueue.append(self.currentToken)
|
1342 |
+
self.state = self.dataState
|
1343 |
+
else:
|
1344 |
+
self.currentToken["name"] = data
|
1345 |
+
self.state = self.doctypeNameState
|
1346 |
+
return True
|
1347 |
+
|
1348 |
+
def doctypeNameState(self):
|
1349 |
+
data = self.stream.char()
|
1350 |
+
if data in spaceCharacters:
|
1351 |
+
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
|
1352 |
+
self.state = self.afterDoctypeNameState
|
1353 |
+
elif data == ">":
|
1354 |
+
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
|
1355 |
+
self.tokenQueue.append(self.currentToken)
|
1356 |
+
self.state = self.dataState
|
1357 |
+
elif data == "\u0000":
|
1358 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
1359 |
+
"data": "invalid-codepoint"})
|
1360 |
+
self.currentToken["name"] += "\uFFFD"
|
1361 |
+
self.state = self.doctypeNameState
|
1362 |
+
elif data is EOF:
|
1363 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1364 |
+
"eof-in-doctype-name"})
|
1365 |
+
self.currentToken["correct"] = False
|
1366 |
+
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
|
1367 |
+
self.tokenQueue.append(self.currentToken)
|
1368 |
+
self.state = self.dataState
|
1369 |
+
else:
|
1370 |
+
self.currentToken["name"] += data
|
1371 |
+
return True
|
1372 |
+
|
1373 |
+
def afterDoctypeNameState(self):
|
1374 |
+
data = self.stream.char()
|
1375 |
+
if data in spaceCharacters:
|
1376 |
+
pass
|
1377 |
+
elif data == ">":
|
1378 |
+
self.tokenQueue.append(self.currentToken)
|
1379 |
+
self.state = self.dataState
|
1380 |
+
elif data is EOF:
|
1381 |
+
self.currentToken["correct"] = False
|
1382 |
+
self.stream.unget(data)
|
1383 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1384 |
+
"eof-in-doctype"})
|
1385 |
+
self.tokenQueue.append(self.currentToken)
|
1386 |
+
self.state = self.dataState
|
1387 |
+
else:
|
1388 |
+
if data in ("p", "P"):
|
1389 |
+
matched = True
|
1390 |
+
for expected in (("u", "U"), ("b", "B"), ("l", "L"),
|
1391 |
+
("i", "I"), ("c", "C")):
|
1392 |
+
data = self.stream.char()
|
1393 |
+
if data not in expected:
|
1394 |
+
matched = False
|
1395 |
+
break
|
1396 |
+
if matched:
|
1397 |
+
self.state = self.afterDoctypePublicKeywordState
|
1398 |
+
return True
|
1399 |
+
elif data in ("s", "S"):
|
1400 |
+
matched = True
|
1401 |
+
for expected in (("y", "Y"), ("s", "S"), ("t", "T"),
|
1402 |
+
("e", "E"), ("m", "M")):
|
1403 |
+
data = self.stream.char()
|
1404 |
+
if data not in expected:
|
1405 |
+
matched = False
|
1406 |
+
break
|
1407 |
+
if matched:
|
1408 |
+
self.state = self.afterDoctypeSystemKeywordState
|
1409 |
+
return True
|
1410 |
+
|
1411 |
+
# All the characters read before the current 'data' will be
|
1412 |
+
# [a-zA-Z], so they're garbage in the bogus doctype and can be
|
1413 |
+
# discarded; only the latest character might be '>' or EOF
|
1414 |
+
# and needs to be ungetted
|
1415 |
+
self.stream.unget(data)
|
1416 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1417 |
+
"expected-space-or-right-bracket-in-doctype", "datavars":
|
1418 |
+
{"data": data}})
|
1419 |
+
self.currentToken["correct"] = False
|
1420 |
+
self.state = self.bogusDoctypeState
|
1421 |
+
|
1422 |
+
return True
|
1423 |
+
|
1424 |
+
def afterDoctypePublicKeywordState(self):
|
1425 |
+
data = self.stream.char()
|
1426 |
+
if data in spaceCharacters:
|
1427 |
+
self.state = self.beforeDoctypePublicIdentifierState
|
1428 |
+
elif data in ("'", '"'):
|
1429 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1430 |
+
"unexpected-char-in-doctype"})
|
1431 |
+
self.stream.unget(data)
|
1432 |
+
self.state = self.beforeDoctypePublicIdentifierState
|
1433 |
+
elif data is EOF:
|
1434 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1435 |
+
"eof-in-doctype"})
|
1436 |
+
self.currentToken["correct"] = False
|
1437 |
+
self.tokenQueue.append(self.currentToken)
|
1438 |
+
self.state = self.dataState
|
1439 |
+
else:
|
1440 |
+
self.stream.unget(data)
|
1441 |
+
self.state = self.beforeDoctypePublicIdentifierState
|
1442 |
+
return True
|
1443 |
+
|
1444 |
+
def beforeDoctypePublicIdentifierState(self):
|
1445 |
+
data = self.stream.char()
|
1446 |
+
if data in spaceCharacters:
|
1447 |
+
pass
|
1448 |
+
elif data == "\"":
|
1449 |
+
self.currentToken["publicId"] = ""
|
1450 |
+
self.state = self.doctypePublicIdentifierDoubleQuotedState
|
1451 |
+
elif data == "'":
|
1452 |
+
self.currentToken["publicId"] = ""
|
1453 |
+
self.state = self.doctypePublicIdentifierSingleQuotedState
|
1454 |
+
elif data == ">":
|
1455 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1456 |
+
"unexpected-end-of-doctype"})
|
1457 |
+
self.currentToken["correct"] = False
|
1458 |
+
self.tokenQueue.append(self.currentToken)
|
1459 |
+
self.state = self.dataState
|
1460 |
+
elif data is EOF:
|
1461 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1462 |
+
"eof-in-doctype"})
|
1463 |
+
self.currentToken["correct"] = False
|
1464 |
+
self.tokenQueue.append(self.currentToken)
|
1465 |
+
self.state = self.dataState
|
1466 |
+
else:
|
1467 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1468 |
+
"unexpected-char-in-doctype"})
|
1469 |
+
self.currentToken["correct"] = False
|
1470 |
+
self.state = self.bogusDoctypeState
|
1471 |
+
return True
|
1472 |
+
|
1473 |
+
def doctypePublicIdentifierDoubleQuotedState(self):
|
1474 |
+
data = self.stream.char()
|
1475 |
+
if data == "\"":
|
1476 |
+
self.state = self.afterDoctypePublicIdentifierState
|
1477 |
+
elif data == "\u0000":
|
1478 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
1479 |
+
"data": "invalid-codepoint"})
|
1480 |
+
self.currentToken["publicId"] += "\uFFFD"
|
1481 |
+
elif data == ">":
|
1482 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1483 |
+
"unexpected-end-of-doctype"})
|
1484 |
+
self.currentToken["correct"] = False
|
1485 |
+
self.tokenQueue.append(self.currentToken)
|
1486 |
+
self.state = self.dataState
|
1487 |
+
elif data is EOF:
|
1488 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1489 |
+
"eof-in-doctype"})
|
1490 |
+
self.currentToken["correct"] = False
|
1491 |
+
self.tokenQueue.append(self.currentToken)
|
1492 |
+
self.state = self.dataState
|
1493 |
+
else:
|
1494 |
+
self.currentToken["publicId"] += data
|
1495 |
+
return True
|
1496 |
+
|
1497 |
+
def doctypePublicIdentifierSingleQuotedState(self):
|
1498 |
+
data = self.stream.char()
|
1499 |
+
if data == "'":
|
1500 |
+
self.state = self.afterDoctypePublicIdentifierState
|
1501 |
+
elif data == "\u0000":
|
1502 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
1503 |
+
"data": "invalid-codepoint"})
|
1504 |
+
self.currentToken["publicId"] += "\uFFFD"
|
1505 |
+
elif data == ">":
|
1506 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1507 |
+
"unexpected-end-of-doctype"})
|
1508 |
+
self.currentToken["correct"] = False
|
1509 |
+
self.tokenQueue.append(self.currentToken)
|
1510 |
+
self.state = self.dataState
|
1511 |
+
elif data is EOF:
|
1512 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1513 |
+
"eof-in-doctype"})
|
1514 |
+
self.currentToken["correct"] = False
|
1515 |
+
self.tokenQueue.append(self.currentToken)
|
1516 |
+
self.state = self.dataState
|
1517 |
+
else:
|
1518 |
+
self.currentToken["publicId"] += data
|
1519 |
+
return True
|
1520 |
+
|
1521 |
+
def afterDoctypePublicIdentifierState(self):
|
1522 |
+
data = self.stream.char()
|
1523 |
+
if data in spaceCharacters:
|
1524 |
+
self.state = self.betweenDoctypePublicAndSystemIdentifiersState
|
1525 |
+
elif data == ">":
|
1526 |
+
self.tokenQueue.append(self.currentToken)
|
1527 |
+
self.state = self.dataState
|
1528 |
+
elif data == '"':
|
1529 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1530 |
+
"unexpected-char-in-doctype"})
|
1531 |
+
self.currentToken["systemId"] = ""
|
1532 |
+
self.state = self.doctypeSystemIdentifierDoubleQuotedState
|
1533 |
+
elif data == "'":
|
1534 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1535 |
+
"unexpected-char-in-doctype"})
|
1536 |
+
self.currentToken["systemId"] = ""
|
1537 |
+
self.state = self.doctypeSystemIdentifierSingleQuotedState
|
1538 |
+
elif data is EOF:
|
1539 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1540 |
+
"eof-in-doctype"})
|
1541 |
+
self.currentToken["correct"] = False
|
1542 |
+
self.tokenQueue.append(self.currentToken)
|
1543 |
+
self.state = self.dataState
|
1544 |
+
else:
|
1545 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1546 |
+
"unexpected-char-in-doctype"})
|
1547 |
+
self.currentToken["correct"] = False
|
1548 |
+
self.state = self.bogusDoctypeState
|
1549 |
+
return True
|
1550 |
+
|
1551 |
+
def betweenDoctypePublicAndSystemIdentifiersState(self):
|
1552 |
+
data = self.stream.char()
|
1553 |
+
if data in spaceCharacters:
|
1554 |
+
pass
|
1555 |
+
elif data == ">":
|
1556 |
+
self.tokenQueue.append(self.currentToken)
|
1557 |
+
self.state = self.dataState
|
1558 |
+
elif data == '"':
|
1559 |
+
self.currentToken["systemId"] = ""
|
1560 |
+
self.state = self.doctypeSystemIdentifierDoubleQuotedState
|
1561 |
+
elif data == "'":
|
1562 |
+
self.currentToken["systemId"] = ""
|
1563 |
+
self.state = self.doctypeSystemIdentifierSingleQuotedState
|
1564 |
+
elif data == EOF:
|
1565 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1566 |
+
"eof-in-doctype"})
|
1567 |
+
self.currentToken["correct"] = False
|
1568 |
+
self.tokenQueue.append(self.currentToken)
|
1569 |
+
self.state = self.dataState
|
1570 |
+
else:
|
1571 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1572 |
+
"unexpected-char-in-doctype"})
|
1573 |
+
self.currentToken["correct"] = False
|
1574 |
+
self.state = self.bogusDoctypeState
|
1575 |
+
return True
|
1576 |
+
|
1577 |
+
def afterDoctypeSystemKeywordState(self):
|
1578 |
+
data = self.stream.char()
|
1579 |
+
if data in spaceCharacters:
|
1580 |
+
self.state = self.beforeDoctypeSystemIdentifierState
|
1581 |
+
elif data in ("'", '"'):
|
1582 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1583 |
+
"unexpected-char-in-doctype"})
|
1584 |
+
self.stream.unget(data)
|
1585 |
+
self.state = self.beforeDoctypeSystemIdentifierState
|
1586 |
+
elif data is EOF:
|
1587 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1588 |
+
"eof-in-doctype"})
|
1589 |
+
self.currentToken["correct"] = False
|
1590 |
+
self.tokenQueue.append(self.currentToken)
|
1591 |
+
self.state = self.dataState
|
1592 |
+
else:
|
1593 |
+
self.stream.unget(data)
|
1594 |
+
self.state = self.beforeDoctypeSystemIdentifierState
|
1595 |
+
return True
|
1596 |
+
|
1597 |
+
def beforeDoctypeSystemIdentifierState(self):
|
1598 |
+
data = self.stream.char()
|
1599 |
+
if data in spaceCharacters:
|
1600 |
+
pass
|
1601 |
+
elif data == "\"":
|
1602 |
+
self.currentToken["systemId"] = ""
|
1603 |
+
self.state = self.doctypeSystemIdentifierDoubleQuotedState
|
1604 |
+
elif data == "'":
|
1605 |
+
self.currentToken["systemId"] = ""
|
1606 |
+
self.state = self.doctypeSystemIdentifierSingleQuotedState
|
1607 |
+
elif data == ">":
|
1608 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1609 |
+
"unexpected-char-in-doctype"})
|
1610 |
+
self.currentToken["correct"] = False
|
1611 |
+
self.tokenQueue.append(self.currentToken)
|
1612 |
+
self.state = self.dataState
|
1613 |
+
elif data is EOF:
|
1614 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1615 |
+
"eof-in-doctype"})
|
1616 |
+
self.currentToken["correct"] = False
|
1617 |
+
self.tokenQueue.append(self.currentToken)
|
1618 |
+
self.state = self.dataState
|
1619 |
+
else:
|
1620 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1621 |
+
"unexpected-char-in-doctype"})
|
1622 |
+
self.currentToken["correct"] = False
|
1623 |
+
self.state = self.bogusDoctypeState
|
1624 |
+
return True
|
1625 |
+
|
1626 |
+
def doctypeSystemIdentifierDoubleQuotedState(self):
|
1627 |
+
data = self.stream.char()
|
1628 |
+
if data == "\"":
|
1629 |
+
self.state = self.afterDoctypeSystemIdentifierState
|
1630 |
+
elif data == "\u0000":
|
1631 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
1632 |
+
"data": "invalid-codepoint"})
|
1633 |
+
self.currentToken["systemId"] += "\uFFFD"
|
1634 |
+
elif data == ">":
|
1635 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1636 |
+
"unexpected-end-of-doctype"})
|
1637 |
+
self.currentToken["correct"] = False
|
1638 |
+
self.tokenQueue.append(self.currentToken)
|
1639 |
+
self.state = self.dataState
|
1640 |
+
elif data is EOF:
|
1641 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1642 |
+
"eof-in-doctype"})
|
1643 |
+
self.currentToken["correct"] = False
|
1644 |
+
self.tokenQueue.append(self.currentToken)
|
1645 |
+
self.state = self.dataState
|
1646 |
+
else:
|
1647 |
+
self.currentToken["systemId"] += data
|
1648 |
+
return True
|
1649 |
+
|
1650 |
+
def doctypeSystemIdentifierSingleQuotedState(self):
|
1651 |
+
data = self.stream.char()
|
1652 |
+
if data == "'":
|
1653 |
+
self.state = self.afterDoctypeSystemIdentifierState
|
1654 |
+
elif data == "\u0000":
|
1655 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
1656 |
+
"data": "invalid-codepoint"})
|
1657 |
+
self.currentToken["systemId"] += "\uFFFD"
|
1658 |
+
elif data == ">":
|
1659 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1660 |
+
"unexpected-end-of-doctype"})
|
1661 |
+
self.currentToken["correct"] = False
|
1662 |
+
self.tokenQueue.append(self.currentToken)
|
1663 |
+
self.state = self.dataState
|
1664 |
+
elif data is EOF:
|
1665 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1666 |
+
"eof-in-doctype"})
|
1667 |
+
self.currentToken["correct"] = False
|
1668 |
+
self.tokenQueue.append(self.currentToken)
|
1669 |
+
self.state = self.dataState
|
1670 |
+
else:
|
1671 |
+
self.currentToken["systemId"] += data
|
1672 |
+
return True
|
1673 |
+
|
1674 |
+
def afterDoctypeSystemIdentifierState(self):
|
1675 |
+
data = self.stream.char()
|
1676 |
+
if data in spaceCharacters:
|
1677 |
+
pass
|
1678 |
+
elif data == ">":
|
1679 |
+
self.tokenQueue.append(self.currentToken)
|
1680 |
+
self.state = self.dataState
|
1681 |
+
elif data is EOF:
|
1682 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1683 |
+
"eof-in-doctype"})
|
1684 |
+
self.currentToken["correct"] = False
|
1685 |
+
self.tokenQueue.append(self.currentToken)
|
1686 |
+
self.state = self.dataState
|
1687 |
+
else:
|
1688 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
|
1689 |
+
"unexpected-char-in-doctype"})
|
1690 |
+
self.state = self.bogusDoctypeState
|
1691 |
+
return True
|
1692 |
+
|
1693 |
+
def bogusDoctypeState(self):
|
1694 |
+
data = self.stream.char()
|
1695 |
+
if data == ">":
|
1696 |
+
self.tokenQueue.append(self.currentToken)
|
1697 |
+
self.state = self.dataState
|
1698 |
+
elif data is EOF:
|
1699 |
+
# XXX EMIT
|
1700 |
+
self.stream.unget(data)
|
1701 |
+
self.tokenQueue.append(self.currentToken)
|
1702 |
+
self.state = self.dataState
|
1703 |
+
else:
|
1704 |
+
pass
|
1705 |
+
return True
|
1706 |
+
|
1707 |
+
def cdataSectionState(self):
|
1708 |
+
data = []
|
1709 |
+
while True:
|
1710 |
+
data.append(self.stream.charsUntil("]"))
|
1711 |
+
data.append(self.stream.charsUntil(">"))
|
1712 |
+
char = self.stream.char()
|
1713 |
+
if char == EOF:
|
1714 |
+
break
|
1715 |
+
else:
|
1716 |
+
assert char == ">"
|
1717 |
+
if data[-1][-2:] == "]]":
|
1718 |
+
data[-1] = data[-1][:-2]
|
1719 |
+
break
|
1720 |
+
else:
|
1721 |
+
data.append(char)
|
1722 |
+
|
1723 |
+
data = "".join(data) # pylint:disable=redefined-variable-type
|
1724 |
+
# Deal with null here rather than in the parser
|
1725 |
+
nullCount = data.count("\u0000")
|
1726 |
+
if nullCount > 0:
|
1727 |
+
for _ in range(nullCount):
|
1728 |
+
self.tokenQueue.append({"type": tokenTypes["ParseError"],
|
1729 |
+
"data": "invalid-codepoint"})
|
1730 |
+
data = data.replace("\u0000", "\uFFFD")
|
1731 |
+
if data:
|
1732 |
+
self.tokenQueue.append({"type": tokenTypes["Characters"],
|
1733 |
+
"data": data})
|
1734 |
+
self.state = self.dataState
|
1735 |
+
return True
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/_trie/_base.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import absolute_import, division, unicode_literals
|
2 |
+
|
3 |
+
try:
|
4 |
+
from collections.abc import Mapping
|
5 |
+
except ImportError: # Python 2.7
|
6 |
+
from collections import Mapping
|
7 |
+
|
8 |
+
|
9 |
+
class Trie(Mapping):
|
10 |
+
"""Abstract base class for tries"""
|
11 |
+
|
12 |
+
def keys(self, prefix=None):
|
13 |
+
# pylint:disable=arguments-differ
|
14 |
+
keys = super(Trie, self).keys()
|
15 |
+
|
16 |
+
if prefix is None:
|
17 |
+
return set(keys)
|
18 |
+
|
19 |
+
return {x for x in keys if x.startswith(prefix)}
|
20 |
+
|
21 |
+
def has_keys_with_prefix(self, prefix):
|
22 |
+
for key in self.keys():
|
23 |
+
if key.startswith(prefix):
|
24 |
+
return True
|
25 |
+
|
26 |
+
return False
|
27 |
+
|
28 |
+
def longest_prefix(self, prefix):
|
29 |
+
if prefix in self:
|
30 |
+
return prefix
|
31 |
+
|
32 |
+
for i in range(1, len(prefix) + 1):
|
33 |
+
if prefix[:-i] in self:
|
34 |
+
return prefix[:-i]
|
35 |
+
|
36 |
+
raise KeyError(prefix)
|
37 |
+
|
38 |
+
def longest_prefix_item(self, prefix):
|
39 |
+
lprefix = self.longest_prefix(prefix)
|
40 |
+
return (lprefix, self[lprefix])
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/_utils.py
ADDED
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import absolute_import, division, unicode_literals
|
2 |
+
|
3 |
+
from types import ModuleType
|
4 |
+
|
5 |
+
try:
|
6 |
+
from collections.abc import Mapping
|
7 |
+
except ImportError:
|
8 |
+
from collections import Mapping
|
9 |
+
|
10 |
+
from pip._vendor.six import text_type, PY3
|
11 |
+
|
12 |
+
if PY3:
|
13 |
+
import xml.etree.ElementTree as default_etree
|
14 |
+
else:
|
15 |
+
try:
|
16 |
+
import xml.etree.cElementTree as default_etree
|
17 |
+
except ImportError:
|
18 |
+
import xml.etree.ElementTree as default_etree
|
19 |
+
|
20 |
+
|
21 |
+
__all__ = ["default_etree", "MethodDispatcher", "isSurrogatePair",
|
22 |
+
"surrogatePairToCodepoint", "moduleFactoryFactory",
|
23 |
+
"supports_lone_surrogates"]
|
24 |
+
|
25 |
+
|
26 |
+
# Platforms not supporting lone surrogates (\uD800-\uDFFF) should be
|
27 |
+
# caught by the below test. In general this would be any platform
|
28 |
+
# using UTF-16 as its encoding of unicode strings, such as
|
29 |
+
# Jython. This is because UTF-16 itself is based on the use of such
|
30 |
+
# surrogates, and there is no mechanism to further escape such
|
31 |
+
# escapes.
|
32 |
+
try:
|
33 |
+
_x = eval('"\\uD800"') # pylint:disable=eval-used
|
34 |
+
if not isinstance(_x, text_type):
|
35 |
+
# We need this with u"" because of http://bugs.jython.org/issue2039
|
36 |
+
_x = eval('u"\\uD800"') # pylint:disable=eval-used
|
37 |
+
assert isinstance(_x, text_type)
|
38 |
+
except Exception:
|
39 |
+
supports_lone_surrogates = False
|
40 |
+
else:
|
41 |
+
supports_lone_surrogates = True
|
42 |
+
|
43 |
+
|
44 |
+
class MethodDispatcher(dict):
|
45 |
+
"""Dict with 2 special properties:
|
46 |
+
|
47 |
+
On initiation, keys that are lists, sets or tuples are converted to
|
48 |
+
multiple keys so accessing any one of the items in the original
|
49 |
+
list-like object returns the matching value
|
50 |
+
|
51 |
+
md = MethodDispatcher({("foo", "bar"):"baz"})
|
52 |
+
md["foo"] == "baz"
|
53 |
+
|
54 |
+
A default value which can be set through the default attribute.
|
55 |
+
"""
|
56 |
+
|
57 |
+
def __init__(self, items=()):
|
58 |
+
_dictEntries = []
|
59 |
+
for name, value in items:
|
60 |
+
if isinstance(name, (list, tuple, frozenset, set)):
|
61 |
+
for item in name:
|
62 |
+
_dictEntries.append((item, value))
|
63 |
+
else:
|
64 |
+
_dictEntries.append((name, value))
|
65 |
+
dict.__init__(self, _dictEntries)
|
66 |
+
assert len(self) == len(_dictEntries)
|
67 |
+
self.default = None
|
68 |
+
|
69 |
+
def __getitem__(self, key):
|
70 |
+
return dict.get(self, key, self.default)
|
71 |
+
|
72 |
+
def __get__(self, instance, owner=None):
|
73 |
+
return BoundMethodDispatcher(instance, self)
|
74 |
+
|
75 |
+
|
76 |
+
class BoundMethodDispatcher(Mapping):
|
77 |
+
"""Wraps a MethodDispatcher, binding its return values to `instance`"""
|
78 |
+
def __init__(self, instance, dispatcher):
|
79 |
+
self.instance = instance
|
80 |
+
self.dispatcher = dispatcher
|
81 |
+
|
82 |
+
def __getitem__(self, key):
|
83 |
+
# see https://docs.python.org/3/reference/datamodel.html#object.__get__
|
84 |
+
# on a function, __get__ is used to bind a function to an instance as a bound method
|
85 |
+
return self.dispatcher[key].__get__(self.instance)
|
86 |
+
|
87 |
+
def get(self, key, default):
|
88 |
+
if key in self.dispatcher:
|
89 |
+
return self[key]
|
90 |
+
else:
|
91 |
+
return default
|
92 |
+
|
93 |
+
def __iter__(self):
|
94 |
+
return iter(self.dispatcher)
|
95 |
+
|
96 |
+
def __len__(self):
|
97 |
+
return len(self.dispatcher)
|
98 |
+
|
99 |
+
def __contains__(self, key):
|
100 |
+
return key in self.dispatcher
|
101 |
+
|
102 |
+
|
103 |
+
# Some utility functions to deal with weirdness around UCS2 vs UCS4
|
104 |
+
# python builds
|
105 |
+
|
106 |
+
def isSurrogatePair(data):
|
107 |
+
return (len(data) == 2 and
|
108 |
+
ord(data[0]) >= 0xD800 and ord(data[0]) <= 0xDBFF and
|
109 |
+
ord(data[1]) >= 0xDC00 and ord(data[1]) <= 0xDFFF)
|
110 |
+
|
111 |
+
|
112 |
+
def surrogatePairToCodepoint(data):
|
113 |
+
char_val = (0x10000 + (ord(data[0]) - 0xD800) * 0x400 +
|
114 |
+
(ord(data[1]) - 0xDC00))
|
115 |
+
return char_val
|
116 |
+
|
117 |
+
# Module Factory Factory (no, this isn't Java, I know)
|
118 |
+
# Here to stop this being duplicated all over the place.
|
119 |
+
|
120 |
+
|
121 |
+
def moduleFactoryFactory(factory):
|
122 |
+
moduleCache = {}
|
123 |
+
|
124 |
+
def moduleFactory(baseModule, *args, **kwargs):
|
125 |
+
if isinstance(ModuleType.__name__, type("")):
|
126 |
+
name = "_%s_factory" % baseModule.__name__
|
127 |
+
else:
|
128 |
+
name = b"_%s_factory" % baseModule.__name__
|
129 |
+
|
130 |
+
kwargs_tuple = tuple(kwargs.items())
|
131 |
+
|
132 |
+
try:
|
133 |
+
return moduleCache[name][args][kwargs_tuple]
|
134 |
+
except KeyError:
|
135 |
+
mod = ModuleType(name)
|
136 |
+
objs = factory(baseModule, *args, **kwargs)
|
137 |
+
mod.__dict__.update(objs)
|
138 |
+
if "name" not in moduleCache:
|
139 |
+
moduleCache[name] = {}
|
140 |
+
if "args" not in moduleCache[name]:
|
141 |
+
moduleCache[name][args] = {}
|
142 |
+
if "kwargs" not in moduleCache[name][args]:
|
143 |
+
moduleCache[name][args][kwargs_tuple] = {}
|
144 |
+
moduleCache[name][args][kwargs_tuple] = mod
|
145 |
+
return mod
|
146 |
+
|
147 |
+
return moduleFactory
|
148 |
+
|
149 |
+
|
150 |
+
def memoize(func):
|
151 |
+
cache = {}
|
152 |
+
|
153 |
+
def wrapped(*args, **kwargs):
|
154 |
+
key = (tuple(args), tuple(kwargs.items()))
|
155 |
+
if key not in cache:
|
156 |
+
cache[key] = func(*args, **kwargs)
|
157 |
+
return cache[key]
|
158 |
+
|
159 |
+
return wrapped
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/constants.py
ADDED
@@ -0,0 +1,2946 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import absolute_import, division, unicode_literals
|
2 |
+
|
3 |
+
import string
|
4 |
+
|
5 |
+
EOF = None
|
6 |
+
|
7 |
+
E = {
|
8 |
+
"null-character":
|
9 |
+
"Null character in input stream, replaced with U+FFFD.",
|
10 |
+
"invalid-codepoint":
|
11 |
+
"Invalid codepoint in stream.",
|
12 |
+
"incorrectly-placed-solidus":
|
13 |
+
"Solidus (/) incorrectly placed in tag.",
|
14 |
+
"incorrect-cr-newline-entity":
|
15 |
+
"Incorrect CR newline entity, replaced with LF.",
|
16 |
+
"illegal-windows-1252-entity":
|
17 |
+
"Entity used with illegal number (windows-1252 reference).",
|
18 |
+
"cant-convert-numeric-entity":
|
19 |
+
"Numeric entity couldn't be converted to character "
|
20 |
+
"(codepoint U+%(charAsInt)08x).",
|
21 |
+
"illegal-codepoint-for-numeric-entity":
|
22 |
+
"Numeric entity represents an illegal codepoint: "
|
23 |
+
"U+%(charAsInt)08x.",
|
24 |
+
"numeric-entity-without-semicolon":
|
25 |
+
"Numeric entity didn't end with ';'.",
|
26 |
+
"expected-numeric-entity-but-got-eof":
|
27 |
+
"Numeric entity expected. Got end of file instead.",
|
28 |
+
"expected-numeric-entity":
|
29 |
+
"Numeric entity expected but none found.",
|
30 |
+
"named-entity-without-semicolon":
|
31 |
+
"Named entity didn't end with ';'.",
|
32 |
+
"expected-named-entity":
|
33 |
+
"Named entity expected. Got none.",
|
34 |
+
"attributes-in-end-tag":
|
35 |
+
"End tag contains unexpected attributes.",
|
36 |
+
'self-closing-flag-on-end-tag':
|
37 |
+
"End tag contains unexpected self-closing flag.",
|
38 |
+
"expected-tag-name-but-got-right-bracket":
|
39 |
+
"Expected tag name. Got '>' instead.",
|
40 |
+
"expected-tag-name-but-got-question-mark":
|
41 |
+
"Expected tag name. Got '?' instead. (HTML doesn't "
|
42 |
+
"support processing instructions.)",
|
43 |
+
"expected-tag-name":
|
44 |
+
"Expected tag name. Got something else instead",
|
45 |
+
"expected-closing-tag-but-got-right-bracket":
|
46 |
+
"Expected closing tag. Got '>' instead. Ignoring '</>'.",
|
47 |
+
"expected-closing-tag-but-got-eof":
|
48 |
+
"Expected closing tag. Unexpected end of file.",
|
49 |
+
"expected-closing-tag-but-got-char":
|
50 |
+
"Expected closing tag. Unexpected character '%(data)s' found.",
|
51 |
+
"eof-in-tag-name":
|
52 |
+
"Unexpected end of file in the tag name.",
|
53 |
+
"expected-attribute-name-but-got-eof":
|
54 |
+
"Unexpected end of file. Expected attribute name instead.",
|
55 |
+
"eof-in-attribute-name":
|
56 |
+
"Unexpected end of file in attribute name.",
|
57 |
+
"invalid-character-in-attribute-name":
|
58 |
+
"Invalid character in attribute name",
|
59 |
+
"duplicate-attribute":
|
60 |
+
"Dropped duplicate attribute on tag.",
|
61 |
+
"expected-end-of-tag-name-but-got-eof":
|
62 |
+
"Unexpected end of file. Expected = or end of tag.",
|
63 |
+
"expected-attribute-value-but-got-eof":
|
64 |
+
"Unexpected end of file. Expected attribute value.",
|
65 |
+
"expected-attribute-value-but-got-right-bracket":
|
66 |
+
"Expected attribute value. Got '>' instead.",
|
67 |
+
'equals-in-unquoted-attribute-value':
|
68 |
+
"Unexpected = in unquoted attribute",
|
69 |
+
'unexpected-character-in-unquoted-attribute-value':
|
70 |
+
"Unexpected character in unquoted attribute",
|
71 |
+
"invalid-character-after-attribute-name":
|
72 |
+
"Unexpected character after attribute name.",
|
73 |
+
"unexpected-character-after-attribute-value":
|
74 |
+
"Unexpected character after attribute value.",
|
75 |
+
"eof-in-attribute-value-double-quote":
|
76 |
+
"Unexpected end of file in attribute value (\").",
|
77 |
+
"eof-in-attribute-value-single-quote":
|
78 |
+
"Unexpected end of file in attribute value (').",
|
79 |
+
"eof-in-attribute-value-no-quotes":
|
80 |
+
"Unexpected end of file in attribute value.",
|
81 |
+
"unexpected-EOF-after-solidus-in-tag":
|
82 |
+
"Unexpected end of file in tag. Expected >",
|
83 |
+
"unexpected-character-after-solidus-in-tag":
|
84 |
+
"Unexpected character after / in tag. Expected >",
|
85 |
+
"expected-dashes-or-doctype":
|
86 |
+
"Expected '--' or 'DOCTYPE'. Not found.",
|
87 |
+
"unexpected-bang-after-double-dash-in-comment":
|
88 |
+
"Unexpected ! after -- in comment",
|
89 |
+
"unexpected-space-after-double-dash-in-comment":
|
90 |
+
"Unexpected space after -- in comment",
|
91 |
+
"incorrect-comment":
|
92 |
+
"Incorrect comment.",
|
93 |
+
"eof-in-comment":
|
94 |
+
"Unexpected end of file in comment.",
|
95 |
+
"eof-in-comment-end-dash":
|
96 |
+
"Unexpected end of file in comment (-)",
|
97 |
+
"unexpected-dash-after-double-dash-in-comment":
|
98 |
+
"Unexpected '-' after '--' found in comment.",
|
99 |
+
"eof-in-comment-double-dash":
|
100 |
+
"Unexpected end of file in comment (--).",
|
101 |
+
"eof-in-comment-end-space-state":
|
102 |
+
"Unexpected end of file in comment.",
|
103 |
+
"eof-in-comment-end-bang-state":
|
104 |
+
"Unexpected end of file in comment.",
|
105 |
+
"unexpected-char-in-comment":
|
106 |
+
"Unexpected character in comment found.",
|
107 |
+
"need-space-after-doctype":
|
108 |
+
"No space after literal string 'DOCTYPE'.",
|
109 |
+
"expected-doctype-name-but-got-right-bracket":
|
110 |
+
"Unexpected > character. Expected DOCTYPE name.",
|
111 |
+
"expected-doctype-name-but-got-eof":
|
112 |
+
"Unexpected end of file. Expected DOCTYPE name.",
|
113 |
+
"eof-in-doctype-name":
|
114 |
+
"Unexpected end of file in DOCTYPE name.",
|
115 |
+
"eof-in-doctype":
|
116 |
+
"Unexpected end of file in DOCTYPE.",
|
117 |
+
"expected-space-or-right-bracket-in-doctype":
|
118 |
+
"Expected space or '>'. Got '%(data)s'",
|
119 |
+
"unexpected-end-of-doctype":
|
120 |
+
"Unexpected end of DOCTYPE.",
|
121 |
+
"unexpected-char-in-doctype":
|
122 |
+
"Unexpected character in DOCTYPE.",
|
123 |
+
"eof-in-innerhtml":
|
124 |
+
"XXX innerHTML EOF",
|
125 |
+
"unexpected-doctype":
|
126 |
+
"Unexpected DOCTYPE. Ignored.",
|
127 |
+
"non-html-root":
|
128 |
+
"html needs to be the first start tag.",
|
129 |
+
"expected-doctype-but-got-eof":
|
130 |
+
"Unexpected End of file. Expected DOCTYPE.",
|
131 |
+
"unknown-doctype":
|
132 |
+
"Erroneous DOCTYPE.",
|
133 |
+
"expected-doctype-but-got-chars":
|
134 |
+
"Unexpected non-space characters. Expected DOCTYPE.",
|
135 |
+
"expected-doctype-but-got-start-tag":
|
136 |
+
"Unexpected start tag (%(name)s). Expected DOCTYPE.",
|
137 |
+
"expected-doctype-but-got-end-tag":
|
138 |
+
"Unexpected end tag (%(name)s). Expected DOCTYPE.",
|
139 |
+
"end-tag-after-implied-root":
|
140 |
+
"Unexpected end tag (%(name)s) after the (implied) root element.",
|
141 |
+
"expected-named-closing-tag-but-got-eof":
|
142 |
+
"Unexpected end of file. Expected end tag (%(name)s).",
|
143 |
+
"two-heads-are-not-better-than-one":
|
144 |
+
"Unexpected start tag head in existing head. Ignored.",
|
145 |
+
"unexpected-end-tag":
|
146 |
+
"Unexpected end tag (%(name)s). Ignored.",
|
147 |
+
"unexpected-start-tag-out-of-my-head":
|
148 |
+
"Unexpected start tag (%(name)s) that can be in head. Moved.",
|
149 |
+
"unexpected-start-tag":
|
150 |
+
"Unexpected start tag (%(name)s).",
|
151 |
+
"missing-end-tag":
|
152 |
+
"Missing end tag (%(name)s).",
|
153 |
+
"missing-end-tags":
|
154 |
+
"Missing end tags (%(name)s).",
|
155 |
+
"unexpected-start-tag-implies-end-tag":
|
156 |
+
"Unexpected start tag (%(startName)s) "
|
157 |
+
"implies end tag (%(endName)s).",
|
158 |
+
"unexpected-start-tag-treated-as":
|
159 |
+
"Unexpected start tag (%(originalName)s). Treated as %(newName)s.",
|
160 |
+
"deprecated-tag":
|
161 |
+
"Unexpected start tag %(name)s. Don't use it!",
|
162 |
+
"unexpected-start-tag-ignored":
|
163 |
+
"Unexpected start tag %(name)s. Ignored.",
|
164 |
+
"expected-one-end-tag-but-got-another":
|
165 |
+
"Unexpected end tag (%(gotName)s). "
|
166 |
+
"Missing end tag (%(expectedName)s).",
|
167 |
+
"end-tag-too-early":
|
168 |
+
"End tag (%(name)s) seen too early. Expected other end tag.",
|
169 |
+
"end-tag-too-early-named":
|
170 |
+
"Unexpected end tag (%(gotName)s). Expected end tag (%(expectedName)s).",
|
171 |
+
"end-tag-too-early-ignored":
|
172 |
+
"End tag (%(name)s) seen too early. Ignored.",
|
173 |
+
"adoption-agency-1.1":
|
174 |
+
"End tag (%(name)s) violates step 1, "
|
175 |
+
"paragraph 1 of the adoption agency algorithm.",
|
176 |
+
"adoption-agency-1.2":
|
177 |
+
"End tag (%(name)s) violates step 1, "
|
178 |
+
"paragraph 2 of the adoption agency algorithm.",
|
179 |
+
"adoption-agency-1.3":
|
180 |
+
"End tag (%(name)s) violates step 1, "
|
181 |
+
"paragraph 3 of the adoption agency algorithm.",
|
182 |
+
"adoption-agency-4.4":
|
183 |
+
"End tag (%(name)s) violates step 4, "
|
184 |
+
"paragraph 4 of the adoption agency algorithm.",
|
185 |
+
"unexpected-end-tag-treated-as":
|
186 |
+
"Unexpected end tag (%(originalName)s). Treated as %(newName)s.",
|
187 |
+
"no-end-tag":
|
188 |
+
"This element (%(name)s) has no end tag.",
|
189 |
+
"unexpected-implied-end-tag-in-table":
|
190 |
+
"Unexpected implied end tag (%(name)s) in the table phase.",
|
191 |
+
"unexpected-implied-end-tag-in-table-body":
|
192 |
+
"Unexpected implied end tag (%(name)s) in the table body phase.",
|
193 |
+
"unexpected-char-implies-table-voodoo":
|
194 |
+
"Unexpected non-space characters in "
|
195 |
+
"table context caused voodoo mode.",
|
196 |
+
"unexpected-hidden-input-in-table":
|
197 |
+
"Unexpected input with type hidden in table context.",
|
198 |
+
"unexpected-form-in-table":
|
199 |
+
"Unexpected form in table context.",
|
200 |
+
"unexpected-start-tag-implies-table-voodoo":
|
201 |
+
"Unexpected start tag (%(name)s) in "
|
202 |
+
"table context caused voodoo mode.",
|
203 |
+
"unexpected-end-tag-implies-table-voodoo":
|
204 |
+
"Unexpected end tag (%(name)s) in "
|
205 |
+
"table context caused voodoo mode.",
|
206 |
+
"unexpected-cell-in-table-body":
|
207 |
+
"Unexpected table cell start tag (%(name)s) "
|
208 |
+
"in the table body phase.",
|
209 |
+
"unexpected-cell-end-tag":
|
210 |
+
"Got table cell end tag (%(name)s) "
|
211 |
+
"while required end tags are missing.",
|
212 |
+
"unexpected-end-tag-in-table-body":
|
213 |
+
"Unexpected end tag (%(name)s) in the table body phase. Ignored.",
|
214 |
+
"unexpected-implied-end-tag-in-table-row":
|
215 |
+
"Unexpected implied end tag (%(name)s) in the table row phase.",
|
216 |
+
"unexpected-end-tag-in-table-row":
|
217 |
+
"Unexpected end tag (%(name)s) in the table row phase. Ignored.",
|
218 |
+
"unexpected-select-in-select":
|
219 |
+
"Unexpected select start tag in the select phase "
|
220 |
+
"treated as select end tag.",
|
221 |
+
"unexpected-input-in-select":
|
222 |
+
"Unexpected input start tag in the select phase.",
|
223 |
+
"unexpected-start-tag-in-select":
|
224 |
+
"Unexpected start tag token (%(name)s in the select phase. "
|
225 |
+
"Ignored.",
|
226 |
+
"unexpected-end-tag-in-select":
|
227 |
+
"Unexpected end tag (%(name)s) in the select phase. Ignored.",
|
228 |
+
"unexpected-table-element-start-tag-in-select-in-table":
|
229 |
+
"Unexpected table element start tag (%(name)s) in the select in table phase.",
|
230 |
+
"unexpected-table-element-end-tag-in-select-in-table":
|
231 |
+
"Unexpected table element end tag (%(name)s) in the select in table phase.",
|
232 |
+
"unexpected-char-after-body":
|
233 |
+
"Unexpected non-space characters in the after body phase.",
|
234 |
+
"unexpected-start-tag-after-body":
|
235 |
+
"Unexpected start tag token (%(name)s)"
|
236 |
+
" in the after body phase.",
|
237 |
+
"unexpected-end-tag-after-body":
|
238 |
+
"Unexpected end tag token (%(name)s)"
|
239 |
+
" in the after body phase.",
|
240 |
+
"unexpected-char-in-frameset":
|
241 |
+
"Unexpected characters in the frameset phase. Characters ignored.",
|
242 |
+
"unexpected-start-tag-in-frameset":
|
243 |
+
"Unexpected start tag token (%(name)s)"
|
244 |
+
" in the frameset phase. Ignored.",
|
245 |
+
"unexpected-frameset-in-frameset-innerhtml":
|
246 |
+
"Unexpected end tag token (frameset) "
|
247 |
+
"in the frameset phase (innerHTML).",
|
248 |
+
"unexpected-end-tag-in-frameset":
|
249 |
+
"Unexpected end tag token (%(name)s)"
|
250 |
+
" in the frameset phase. Ignored.",
|
251 |
+
"unexpected-char-after-frameset":
|
252 |
+
"Unexpected non-space characters in the "
|
253 |
+
"after frameset phase. Ignored.",
|
254 |
+
"unexpected-start-tag-after-frameset":
|
255 |
+
"Unexpected start tag (%(name)s)"
|
256 |
+
" in the after frameset phase. Ignored.",
|
257 |
+
"unexpected-end-tag-after-frameset":
|
258 |
+
"Unexpected end tag (%(name)s)"
|
259 |
+
" in the after frameset phase. Ignored.",
|
260 |
+
"unexpected-end-tag-after-body-innerhtml":
|
261 |
+
"Unexpected end tag after body(innerHtml)",
|
262 |
+
"expected-eof-but-got-char":
|
263 |
+
"Unexpected non-space characters. Expected end of file.",
|
264 |
+
"expected-eof-but-got-start-tag":
|
265 |
+
"Unexpected start tag (%(name)s)"
|
266 |
+
". Expected end of file.",
|
267 |
+
"expected-eof-but-got-end-tag":
|
268 |
+
"Unexpected end tag (%(name)s)"
|
269 |
+
". Expected end of file.",
|
270 |
+
"eof-in-table":
|
271 |
+
"Unexpected end of file. Expected table content.",
|
272 |
+
"eof-in-select":
|
273 |
+
"Unexpected end of file. Expected select content.",
|
274 |
+
"eof-in-frameset":
|
275 |
+
"Unexpected end of file. Expected frameset content.",
|
276 |
+
"eof-in-script-in-script":
|
277 |
+
"Unexpected end of file. Expected script content.",
|
278 |
+
"eof-in-foreign-lands":
|
279 |
+
"Unexpected end of file. Expected foreign content",
|
280 |
+
"non-void-element-with-trailing-solidus":
|
281 |
+
"Trailing solidus not allowed on element %(name)s",
|
282 |
+
"unexpected-html-element-in-foreign-content":
|
283 |
+
"Element %(name)s not allowed in a non-html context",
|
284 |
+
"unexpected-end-tag-before-html":
|
285 |
+
"Unexpected end tag (%(name)s) before html.",
|
286 |
+
"unexpected-inhead-noscript-tag":
|
287 |
+
"Element %(name)s not allowed in a inhead-noscript context",
|
288 |
+
"eof-in-head-noscript":
|
289 |
+
"Unexpected end of file. Expected inhead-noscript content",
|
290 |
+
"char-in-head-noscript":
|
291 |
+
"Unexpected non-space character. Expected inhead-noscript content",
|
292 |
+
"XXX-undefined-error":
|
293 |
+
"Undefined error (this sucks and should be fixed)",
|
294 |
+
}
|
295 |
+
|
296 |
+
namespaces = {
|
297 |
+
"html": "http://www.w3.org/1999/xhtml",
|
298 |
+
"mathml": "http://www.w3.org/1998/Math/MathML",
|
299 |
+
"svg": "http://www.w3.org/2000/svg",
|
300 |
+
"xlink": "http://www.w3.org/1999/xlink",
|
301 |
+
"xml": "http://www.w3.org/XML/1998/namespace",
|
302 |
+
"xmlns": "http://www.w3.org/2000/xmlns/"
|
303 |
+
}
|
304 |
+
|
305 |
+
scopingElements = frozenset([
|
306 |
+
(namespaces["html"], "applet"),
|
307 |
+
(namespaces["html"], "caption"),
|
308 |
+
(namespaces["html"], "html"),
|
309 |
+
(namespaces["html"], "marquee"),
|
310 |
+
(namespaces["html"], "object"),
|
311 |
+
(namespaces["html"], "table"),
|
312 |
+
(namespaces["html"], "td"),
|
313 |
+
(namespaces["html"], "th"),
|
314 |
+
(namespaces["mathml"], "mi"),
|
315 |
+
(namespaces["mathml"], "mo"),
|
316 |
+
(namespaces["mathml"], "mn"),
|
317 |
+
(namespaces["mathml"], "ms"),
|
318 |
+
(namespaces["mathml"], "mtext"),
|
319 |
+
(namespaces["mathml"], "annotation-xml"),
|
320 |
+
(namespaces["svg"], "foreignObject"),
|
321 |
+
(namespaces["svg"], "desc"),
|
322 |
+
(namespaces["svg"], "title"),
|
323 |
+
])
|
324 |
+
|
325 |
+
formattingElements = frozenset([
|
326 |
+
(namespaces["html"], "a"),
|
327 |
+
(namespaces["html"], "b"),
|
328 |
+
(namespaces["html"], "big"),
|
329 |
+
(namespaces["html"], "code"),
|
330 |
+
(namespaces["html"], "em"),
|
331 |
+
(namespaces["html"], "font"),
|
332 |
+
(namespaces["html"], "i"),
|
333 |
+
(namespaces["html"], "nobr"),
|
334 |
+
(namespaces["html"], "s"),
|
335 |
+
(namespaces["html"], "small"),
|
336 |
+
(namespaces["html"], "strike"),
|
337 |
+
(namespaces["html"], "strong"),
|
338 |
+
(namespaces["html"], "tt"),
|
339 |
+
(namespaces["html"], "u")
|
340 |
+
])
|
341 |
+
|
342 |
+
specialElements = frozenset([
|
343 |
+
(namespaces["html"], "address"),
|
344 |
+
(namespaces["html"], "applet"),
|
345 |
+
(namespaces["html"], "area"),
|
346 |
+
(namespaces["html"], "article"),
|
347 |
+
(namespaces["html"], "aside"),
|
348 |
+
(namespaces["html"], "base"),
|
349 |
+
(namespaces["html"], "basefont"),
|
350 |
+
(namespaces["html"], "bgsound"),
|
351 |
+
(namespaces["html"], "blockquote"),
|
352 |
+
(namespaces["html"], "body"),
|
353 |
+
(namespaces["html"], "br"),
|
354 |
+
(namespaces["html"], "button"),
|
355 |
+
(namespaces["html"], "caption"),
|
356 |
+
(namespaces["html"], "center"),
|
357 |
+
(namespaces["html"], "col"),
|
358 |
+
(namespaces["html"], "colgroup"),
|
359 |
+
(namespaces["html"], "command"),
|
360 |
+
(namespaces["html"], "dd"),
|
361 |
+
(namespaces["html"], "details"),
|
362 |
+
(namespaces["html"], "dir"),
|
363 |
+
(namespaces["html"], "div"),
|
364 |
+
(namespaces["html"], "dl"),
|
365 |
+
(namespaces["html"], "dt"),
|
366 |
+
(namespaces["html"], "embed"),
|
367 |
+
(namespaces["html"], "fieldset"),
|
368 |
+
(namespaces["html"], "figure"),
|
369 |
+
(namespaces["html"], "footer"),
|
370 |
+
(namespaces["html"], "form"),
|
371 |
+
(namespaces["html"], "frame"),
|
372 |
+
(namespaces["html"], "frameset"),
|
373 |
+
(namespaces["html"], "h1"),
|
374 |
+
(namespaces["html"], "h2"),
|
375 |
+
(namespaces["html"], "h3"),
|
376 |
+
(namespaces["html"], "h4"),
|
377 |
+
(namespaces["html"], "h5"),
|
378 |
+
(namespaces["html"], "h6"),
|
379 |
+
(namespaces["html"], "head"),
|
380 |
+
(namespaces["html"], "header"),
|
381 |
+
(namespaces["html"], "hr"),
|
382 |
+
(namespaces["html"], "html"),
|
383 |
+
(namespaces["html"], "iframe"),
|
384 |
+
# Note that image is commented out in the spec as "this isn't an
|
385 |
+
# element that can end up on the stack, so it doesn't matter,"
|
386 |
+
(namespaces["html"], "image"),
|
387 |
+
(namespaces["html"], "img"),
|
388 |
+
(namespaces["html"], "input"),
|
389 |
+
(namespaces["html"], "isindex"),
|
390 |
+
(namespaces["html"], "li"),
|
391 |
+
(namespaces["html"], "link"),
|
392 |
+
(namespaces["html"], "listing"),
|
393 |
+
(namespaces["html"], "marquee"),
|
394 |
+
(namespaces["html"], "menu"),
|
395 |
+
(namespaces["html"], "meta"),
|
396 |
+
(namespaces["html"], "nav"),
|
397 |
+
(namespaces["html"], "noembed"),
|
398 |
+
(namespaces["html"], "noframes"),
|
399 |
+
(namespaces["html"], "noscript"),
|
400 |
+
(namespaces["html"], "object"),
|
401 |
+
(namespaces["html"], "ol"),
|
402 |
+
(namespaces["html"], "p"),
|
403 |
+
(namespaces["html"], "param"),
|
404 |
+
(namespaces["html"], "plaintext"),
|
405 |
+
(namespaces["html"], "pre"),
|
406 |
+
(namespaces["html"], "script"),
|
407 |
+
(namespaces["html"], "section"),
|
408 |
+
(namespaces["html"], "select"),
|
409 |
+
(namespaces["html"], "style"),
|
410 |
+
(namespaces["html"], "table"),
|
411 |
+
(namespaces["html"], "tbody"),
|
412 |
+
(namespaces["html"], "td"),
|
413 |
+
(namespaces["html"], "textarea"),
|
414 |
+
(namespaces["html"], "tfoot"),
|
415 |
+
(namespaces["html"], "th"),
|
416 |
+
(namespaces["html"], "thead"),
|
417 |
+
(namespaces["html"], "title"),
|
418 |
+
(namespaces["html"], "tr"),
|
419 |
+
(namespaces["html"], "ul"),
|
420 |
+
(namespaces["html"], "wbr"),
|
421 |
+
(namespaces["html"], "xmp"),
|
422 |
+
(namespaces["svg"], "foreignObject")
|
423 |
+
])
|
424 |
+
|
425 |
+
htmlIntegrationPointElements = frozenset([
|
426 |
+
(namespaces["mathml"], "annotation-xml"),
|
427 |
+
(namespaces["svg"], "foreignObject"),
|
428 |
+
(namespaces["svg"], "desc"),
|
429 |
+
(namespaces["svg"], "title")
|
430 |
+
])
|
431 |
+
|
432 |
+
mathmlTextIntegrationPointElements = frozenset([
|
433 |
+
(namespaces["mathml"], "mi"),
|
434 |
+
(namespaces["mathml"], "mo"),
|
435 |
+
(namespaces["mathml"], "mn"),
|
436 |
+
(namespaces["mathml"], "ms"),
|
437 |
+
(namespaces["mathml"], "mtext")
|
438 |
+
])
|
439 |
+
|
440 |
+
adjustSVGAttributes = {
|
441 |
+
"attributename": "attributeName",
|
442 |
+
"attributetype": "attributeType",
|
443 |
+
"basefrequency": "baseFrequency",
|
444 |
+
"baseprofile": "baseProfile",
|
445 |
+
"calcmode": "calcMode",
|
446 |
+
"clippathunits": "clipPathUnits",
|
447 |
+
"contentscripttype": "contentScriptType",
|
448 |
+
"contentstyletype": "contentStyleType",
|
449 |
+
"diffuseconstant": "diffuseConstant",
|
450 |
+
"edgemode": "edgeMode",
|
451 |
+
"externalresourcesrequired": "externalResourcesRequired",
|
452 |
+
"filterres": "filterRes",
|
453 |
+
"filterunits": "filterUnits",
|
454 |
+
"glyphref": "glyphRef",
|
455 |
+
"gradienttransform": "gradientTransform",
|
456 |
+
"gradientunits": "gradientUnits",
|
457 |
+
"kernelmatrix": "kernelMatrix",
|
458 |
+
"kernelunitlength": "kernelUnitLength",
|
459 |
+
"keypoints": "keyPoints",
|
460 |
+
"keysplines": "keySplines",
|
461 |
+
"keytimes": "keyTimes",
|
462 |
+
"lengthadjust": "lengthAdjust",
|
463 |
+
"limitingconeangle": "limitingConeAngle",
|
464 |
+
"markerheight": "markerHeight",
|
465 |
+
"markerunits": "markerUnits",
|
466 |
+
"markerwidth": "markerWidth",
|
467 |
+
"maskcontentunits": "maskContentUnits",
|
468 |
+
"maskunits": "maskUnits",
|
469 |
+
"numoctaves": "numOctaves",
|
470 |
+
"pathlength": "pathLength",
|
471 |
+
"patterncontentunits": "patternContentUnits",
|
472 |
+
"patterntransform": "patternTransform",
|
473 |
+
"patternunits": "patternUnits",
|
474 |
+
"pointsatx": "pointsAtX",
|
475 |
+
"pointsaty": "pointsAtY",
|
476 |
+
"pointsatz": "pointsAtZ",
|
477 |
+
"preservealpha": "preserveAlpha",
|
478 |
+
"preserveaspectratio": "preserveAspectRatio",
|
479 |
+
"primitiveunits": "primitiveUnits",
|
480 |
+
"refx": "refX",
|
481 |
+
"refy": "refY",
|
482 |
+
"repeatcount": "repeatCount",
|
483 |
+
"repeatdur": "repeatDur",
|
484 |
+
"requiredextensions": "requiredExtensions",
|
485 |
+
"requiredfeatures": "requiredFeatures",
|
486 |
+
"specularconstant": "specularConstant",
|
487 |
+
"specularexponent": "specularExponent",
|
488 |
+
"spreadmethod": "spreadMethod",
|
489 |
+
"startoffset": "startOffset",
|
490 |
+
"stddeviation": "stdDeviation",
|
491 |
+
"stitchtiles": "stitchTiles",
|
492 |
+
"surfacescale": "surfaceScale",
|
493 |
+
"systemlanguage": "systemLanguage",
|
494 |
+
"tablevalues": "tableValues",
|
495 |
+
"targetx": "targetX",
|
496 |
+
"targety": "targetY",
|
497 |
+
"textlength": "textLength",
|
498 |
+
"viewbox": "viewBox",
|
499 |
+
"viewtarget": "viewTarget",
|
500 |
+
"xchannelselector": "xChannelSelector",
|
501 |
+
"ychannelselector": "yChannelSelector",
|
502 |
+
"zoomandpan": "zoomAndPan"
|
503 |
+
}
|
504 |
+
|
505 |
+
adjustMathMLAttributes = {"definitionurl": "definitionURL"}
|
506 |
+
|
507 |
+
adjustForeignAttributes = {
|
508 |
+
"xlink:actuate": ("xlink", "actuate", namespaces["xlink"]),
|
509 |
+
"xlink:arcrole": ("xlink", "arcrole", namespaces["xlink"]),
|
510 |
+
"xlink:href": ("xlink", "href", namespaces["xlink"]),
|
511 |
+
"xlink:role": ("xlink", "role", namespaces["xlink"]),
|
512 |
+
"xlink:show": ("xlink", "show", namespaces["xlink"]),
|
513 |
+
"xlink:title": ("xlink", "title", namespaces["xlink"]),
|
514 |
+
"xlink:type": ("xlink", "type", namespaces["xlink"]),
|
515 |
+
"xml:base": ("xml", "base", namespaces["xml"]),
|
516 |
+
"xml:lang": ("xml", "lang", namespaces["xml"]),
|
517 |
+
"xml:space": ("xml", "space", namespaces["xml"]),
|
518 |
+
"xmlns": (None, "xmlns", namespaces["xmlns"]),
|
519 |
+
"xmlns:xlink": ("xmlns", "xlink", namespaces["xmlns"])
|
520 |
+
}
|
521 |
+
|
522 |
+
unadjustForeignAttributes = {(ns, local): qname for qname, (prefix, local, ns) in
|
523 |
+
adjustForeignAttributes.items()}
|
524 |
+
|
525 |
+
spaceCharacters = frozenset([
|
526 |
+
"\t",
|
527 |
+
"\n",
|
528 |
+
"\u000C",
|
529 |
+
" ",
|
530 |
+
"\r"
|
531 |
+
])
|
532 |
+
|
533 |
+
tableInsertModeElements = frozenset([
|
534 |
+
"table",
|
535 |
+
"tbody",
|
536 |
+
"tfoot",
|
537 |
+
"thead",
|
538 |
+
"tr"
|
539 |
+
])
|
540 |
+
|
541 |
+
asciiLowercase = frozenset(string.ascii_lowercase)
|
542 |
+
asciiUppercase = frozenset(string.ascii_uppercase)
|
543 |
+
asciiLetters = frozenset(string.ascii_letters)
|
544 |
+
digits = frozenset(string.digits)
|
545 |
+
hexDigits = frozenset(string.hexdigits)
|
546 |
+
|
547 |
+
asciiUpper2Lower = {ord(c): ord(c.lower()) for c in string.ascii_uppercase}
|
548 |
+
|
549 |
+
# Heading elements need to be ordered
|
550 |
+
headingElements = (
|
551 |
+
"h1",
|
552 |
+
"h2",
|
553 |
+
"h3",
|
554 |
+
"h4",
|
555 |
+
"h5",
|
556 |
+
"h6"
|
557 |
+
)
|
558 |
+
|
559 |
+
voidElements = frozenset([
|
560 |
+
"base",
|
561 |
+
"command",
|
562 |
+
"event-source",
|
563 |
+
"link",
|
564 |
+
"meta",
|
565 |
+
"hr",
|
566 |
+
"br",
|
567 |
+
"img",
|
568 |
+
"embed",
|
569 |
+
"param",
|
570 |
+
"area",
|
571 |
+
"col",
|
572 |
+
"input",
|
573 |
+
"source",
|
574 |
+
"track"
|
575 |
+
])
|
576 |
+
|
577 |
+
cdataElements = frozenset(['title', 'textarea'])
|
578 |
+
|
579 |
+
rcdataElements = frozenset([
|
580 |
+
'style',
|
581 |
+
'script',
|
582 |
+
'xmp',
|
583 |
+
'iframe',
|
584 |
+
'noembed',
|
585 |
+
'noframes',
|
586 |
+
'noscript'
|
587 |
+
])
|
588 |
+
|
589 |
+
booleanAttributes = {
|
590 |
+
"": frozenset(["irrelevant", "itemscope"]),
|
591 |
+
"style": frozenset(["scoped"]),
|
592 |
+
"img": frozenset(["ismap"]),
|
593 |
+
"audio": frozenset(["autoplay", "controls"]),
|
594 |
+
"video": frozenset(["autoplay", "controls"]),
|
595 |
+
"script": frozenset(["defer", "async"]),
|
596 |
+
"details": frozenset(["open"]),
|
597 |
+
"datagrid": frozenset(["multiple", "disabled"]),
|
598 |
+
"command": frozenset(["hidden", "disabled", "checked", "default"]),
|
599 |
+
"hr": frozenset(["noshade"]),
|
600 |
+
"menu": frozenset(["autosubmit"]),
|
601 |
+
"fieldset": frozenset(["disabled", "readonly"]),
|
602 |
+
"option": frozenset(["disabled", "readonly", "selected"]),
|
603 |
+
"optgroup": frozenset(["disabled", "readonly"]),
|
604 |
+
"button": frozenset(["disabled", "autofocus"]),
|
605 |
+
"input": frozenset(["disabled", "readonly", "required", "autofocus", "checked", "ismap"]),
|
606 |
+
"select": frozenset(["disabled", "readonly", "autofocus", "multiple"]),
|
607 |
+
"output": frozenset(["disabled", "readonly"]),
|
608 |
+
"iframe": frozenset(["seamless"]),
|
609 |
+
}
|
610 |
+
|
611 |
+
# entitiesWindows1252 has to be _ordered_ and needs to have an index. It
|
612 |
+
# therefore can't be a frozenset.
|
613 |
+
entitiesWindows1252 = (
|
614 |
+
8364, # 0x80 0x20AC EURO SIGN
|
615 |
+
65533, # 0x81 UNDEFINED
|
616 |
+
8218, # 0x82 0x201A SINGLE LOW-9 QUOTATION MARK
|
617 |
+
402, # 0x83 0x0192 LATIN SMALL LETTER F WITH HOOK
|
618 |
+
8222, # 0x84 0x201E DOUBLE LOW-9 QUOTATION MARK
|
619 |
+
8230, # 0x85 0x2026 HORIZONTAL ELLIPSIS
|
620 |
+
8224, # 0x86 0x2020 DAGGER
|
621 |
+
8225, # 0x87 0x2021 DOUBLE DAGGER
|
622 |
+
710, # 0x88 0x02C6 MODIFIER LETTER CIRCUMFLEX ACCENT
|
623 |
+
8240, # 0x89 0x2030 PER MILLE SIGN
|
624 |
+
352, # 0x8A 0x0160 LATIN CAPITAL LETTER S WITH CARON
|
625 |
+
8249, # 0x8B 0x2039 SINGLE LEFT-POINTING ANGLE QUOTATION MARK
|
626 |
+
338, # 0x8C 0x0152 LATIN CAPITAL LIGATURE OE
|
627 |
+
65533, # 0x8D UNDEFINED
|
628 |
+
381, # 0x8E 0x017D LATIN CAPITAL LETTER Z WITH CARON
|
629 |
+
65533, # 0x8F UNDEFINED
|
630 |
+
65533, # 0x90 UNDEFINED
|
631 |
+
8216, # 0x91 0x2018 LEFT SINGLE QUOTATION MARK
|
632 |
+
8217, # 0x92 0x2019 RIGHT SINGLE QUOTATION MARK
|
633 |
+
8220, # 0x93 0x201C LEFT DOUBLE QUOTATION MARK
|
634 |
+
8221, # 0x94 0x201D RIGHT DOUBLE QUOTATION MARK
|
635 |
+
8226, # 0x95 0x2022 BULLET
|
636 |
+
8211, # 0x96 0x2013 EN DASH
|
637 |
+
8212, # 0x97 0x2014 EM DASH
|
638 |
+
732, # 0x98 0x02DC SMALL TILDE
|
639 |
+
8482, # 0x99 0x2122 TRADE MARK SIGN
|
640 |
+
353, # 0x9A 0x0161 LATIN SMALL LETTER S WITH CARON
|
641 |
+
8250, # 0x9B 0x203A SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
|
642 |
+
339, # 0x9C 0x0153 LATIN SMALL LIGATURE OE
|
643 |
+
65533, # 0x9D UNDEFINED
|
644 |
+
382, # 0x9E 0x017E LATIN SMALL LETTER Z WITH CARON
|
645 |
+
376 # 0x9F 0x0178 LATIN CAPITAL LETTER Y WITH DIAERESIS
|
646 |
+
)
|
647 |
+
|
648 |
+
xmlEntities = frozenset(['lt;', 'gt;', 'amp;', 'apos;', 'quot;'])
|
649 |
+
|
650 |
+
entities = {
|
651 |
+
"AElig": "\xc6",
|
652 |
+
"AElig;": "\xc6",
|
653 |
+
"AMP": "&",
|
654 |
+
"AMP;": "&",
|
655 |
+
"Aacute": "\xc1",
|
656 |
+
"Aacute;": "\xc1",
|
657 |
+
"Abreve;": "\u0102",
|
658 |
+
"Acirc": "\xc2",
|
659 |
+
"Acirc;": "\xc2",
|
660 |
+
"Acy;": "\u0410",
|
661 |
+
"Afr;": "\U0001d504",
|
662 |
+
"Agrave": "\xc0",
|
663 |
+
"Agrave;": "\xc0",
|
664 |
+
"Alpha;": "\u0391",
|
665 |
+
"Amacr;": "\u0100",
|
666 |
+
"And;": "\u2a53",
|
667 |
+
"Aogon;": "\u0104",
|
668 |
+
"Aopf;": "\U0001d538",
|
669 |
+
"ApplyFunction;": "\u2061",
|
670 |
+
"Aring": "\xc5",
|
671 |
+
"Aring;": "\xc5",
|
672 |
+
"Ascr;": "\U0001d49c",
|
673 |
+
"Assign;": "\u2254",
|
674 |
+
"Atilde": "\xc3",
|
675 |
+
"Atilde;": "\xc3",
|
676 |
+
"Auml": "\xc4",
|
677 |
+
"Auml;": "\xc4",
|
678 |
+
"Backslash;": "\u2216",
|
679 |
+
"Barv;": "\u2ae7",
|
680 |
+
"Barwed;": "\u2306",
|
681 |
+
"Bcy;": "\u0411",
|
682 |
+
"Because;": "\u2235",
|
683 |
+
"Bernoullis;": "\u212c",
|
684 |
+
"Beta;": "\u0392",
|
685 |
+
"Bfr;": "\U0001d505",
|
686 |
+
"Bopf;": "\U0001d539",
|
687 |
+
"Breve;": "\u02d8",
|
688 |
+
"Bscr;": "\u212c",
|
689 |
+
"Bumpeq;": "\u224e",
|
690 |
+
"CHcy;": "\u0427",
|
691 |
+
"COPY": "\xa9",
|
692 |
+
"COPY;": "\xa9",
|
693 |
+
"Cacute;": "\u0106",
|
694 |
+
"Cap;": "\u22d2",
|
695 |
+
"CapitalDifferentialD;": "\u2145",
|
696 |
+
"Cayleys;": "\u212d",
|
697 |
+
"Ccaron;": "\u010c",
|
698 |
+
"Ccedil": "\xc7",
|
699 |
+
"Ccedil;": "\xc7",
|
700 |
+
"Ccirc;": "\u0108",
|
701 |
+
"Cconint;": "\u2230",
|
702 |
+
"Cdot;": "\u010a",
|
703 |
+
"Cedilla;": "\xb8",
|
704 |
+
"CenterDot;": "\xb7",
|
705 |
+
"Cfr;": "\u212d",
|
706 |
+
"Chi;": "\u03a7",
|
707 |
+
"CircleDot;": "\u2299",
|
708 |
+
"CircleMinus;": "\u2296",
|
709 |
+
"CirclePlus;": "\u2295",
|
710 |
+
"CircleTimes;": "\u2297",
|
711 |
+
"ClockwiseContourIntegral;": "\u2232",
|
712 |
+
"CloseCurlyDoubleQuote;": "\u201d",
|
713 |
+
"CloseCurlyQuote;": "\u2019",
|
714 |
+
"Colon;": "\u2237",
|
715 |
+
"Colone;": "\u2a74",
|
716 |
+
"Congruent;": "\u2261",
|
717 |
+
"Conint;": "\u222f",
|
718 |
+
"ContourIntegral;": "\u222e",
|
719 |
+
"Copf;": "\u2102",
|
720 |
+
"Coproduct;": "\u2210",
|
721 |
+
"CounterClockwiseContourIntegral;": "\u2233",
|
722 |
+
"Cross;": "\u2a2f",
|
723 |
+
"Cscr;": "\U0001d49e",
|
724 |
+
"Cup;": "\u22d3",
|
725 |
+
"CupCap;": "\u224d",
|
726 |
+
"DD;": "\u2145",
|
727 |
+
"DDotrahd;": "\u2911",
|
728 |
+
"DJcy;": "\u0402",
|
729 |
+
"DScy;": "\u0405",
|
730 |
+
"DZcy;": "\u040f",
|
731 |
+
"Dagger;": "\u2021",
|
732 |
+
"Darr;": "\u21a1",
|
733 |
+
"Dashv;": "\u2ae4",
|
734 |
+
"Dcaron;": "\u010e",
|
735 |
+
"Dcy;": "\u0414",
|
736 |
+
"Del;": "\u2207",
|
737 |
+
"Delta;": "\u0394",
|
738 |
+
"Dfr;": "\U0001d507",
|
739 |
+
"DiacriticalAcute;": "\xb4",
|
740 |
+
"DiacriticalDot;": "\u02d9",
|
741 |
+
"DiacriticalDoubleAcute;": "\u02dd",
|
742 |
+
"DiacriticalGrave;": "`",
|
743 |
+
"DiacriticalTilde;": "\u02dc",
|
744 |
+
"Diamond;": "\u22c4",
|
745 |
+
"DifferentialD;": "\u2146",
|
746 |
+
"Dopf;": "\U0001d53b",
|
747 |
+
"Dot;": "\xa8",
|
748 |
+
"DotDot;": "\u20dc",
|
749 |
+
"DotEqual;": "\u2250",
|
750 |
+
"DoubleContourIntegral;": "\u222f",
|
751 |
+
"DoubleDot;": "\xa8",
|
752 |
+
"DoubleDownArrow;": "\u21d3",
|
753 |
+
"DoubleLeftArrow;": "\u21d0",
|
754 |
+
"DoubleLeftRightArrow;": "\u21d4",
|
755 |
+
"DoubleLeftTee;": "\u2ae4",
|
756 |
+
"DoubleLongLeftArrow;": "\u27f8",
|
757 |
+
"DoubleLongLeftRightArrow;": "\u27fa",
|
758 |
+
"DoubleLongRightArrow;": "\u27f9",
|
759 |
+
"DoubleRightArrow;": "\u21d2",
|
760 |
+
"DoubleRightTee;": "\u22a8",
|
761 |
+
"DoubleUpArrow;": "\u21d1",
|
762 |
+
"DoubleUpDownArrow;": "\u21d5",
|
763 |
+
"DoubleVerticalBar;": "\u2225",
|
764 |
+
"DownArrow;": "\u2193",
|
765 |
+
"DownArrowBar;": "\u2913",
|
766 |
+
"DownArrowUpArrow;": "\u21f5",
|
767 |
+
"DownBreve;": "\u0311",
|
768 |
+
"DownLeftRightVector;": "\u2950",
|
769 |
+
"DownLeftTeeVector;": "\u295e",
|
770 |
+
"DownLeftVector;": "\u21bd",
|
771 |
+
"DownLeftVectorBar;": "\u2956",
|
772 |
+
"DownRightTeeVector;": "\u295f",
|
773 |
+
"DownRightVector;": "\u21c1",
|
774 |
+
"DownRightVectorBar;": "\u2957",
|
775 |
+
"DownTee;": "\u22a4",
|
776 |
+
"DownTeeArrow;": "\u21a7",
|
777 |
+
"Downarrow;": "\u21d3",
|
778 |
+
"Dscr;": "\U0001d49f",
|
779 |
+
"Dstrok;": "\u0110",
|
780 |
+
"ENG;": "\u014a",
|
781 |
+
"ETH": "\xd0",
|
782 |
+
"ETH;": "\xd0",
|
783 |
+
"Eacute": "\xc9",
|
784 |
+
"Eacute;": "\xc9",
|
785 |
+
"Ecaron;": "\u011a",
|
786 |
+
"Ecirc": "\xca",
|
787 |
+
"Ecirc;": "\xca",
|
788 |
+
"Ecy;": "\u042d",
|
789 |
+
"Edot;": "\u0116",
|
790 |
+
"Efr;": "\U0001d508",
|
791 |
+
"Egrave": "\xc8",
|
792 |
+
"Egrave;": "\xc8",
|
793 |
+
"Element;": "\u2208",
|
794 |
+
"Emacr;": "\u0112",
|
795 |
+
"EmptySmallSquare;": "\u25fb",
|
796 |
+
"EmptyVerySmallSquare;": "\u25ab",
|
797 |
+
"Eogon;": "\u0118",
|
798 |
+
"Eopf;": "\U0001d53c",
|
799 |
+
"Epsilon;": "\u0395",
|
800 |
+
"Equal;": "\u2a75",
|
801 |
+
"EqualTilde;": "\u2242",
|
802 |
+
"Equilibrium;": "\u21cc",
|
803 |
+
"Escr;": "\u2130",
|
804 |
+
"Esim;": "\u2a73",
|
805 |
+
"Eta;": "\u0397",
|
806 |
+
"Euml": "\xcb",
|
807 |
+
"Euml;": "\xcb",
|
808 |
+
"Exists;": "\u2203",
|
809 |
+
"ExponentialE;": "\u2147",
|
810 |
+
"Fcy;": "\u0424",
|
811 |
+
"Ffr;": "\U0001d509",
|
812 |
+
"FilledSmallSquare;": "\u25fc",
|
813 |
+
"FilledVerySmallSquare;": "\u25aa",
|
814 |
+
"Fopf;": "\U0001d53d",
|
815 |
+
"ForAll;": "\u2200",
|
816 |
+
"Fouriertrf;": "\u2131",
|
817 |
+
"Fscr;": "\u2131",
|
818 |
+
"GJcy;": "\u0403",
|
819 |
+
"GT": ">",
|
820 |
+
"GT;": ">",
|
821 |
+
"Gamma;": "\u0393",
|
822 |
+
"Gammad;": "\u03dc",
|
823 |
+
"Gbreve;": "\u011e",
|
824 |
+
"Gcedil;": "\u0122",
|
825 |
+
"Gcirc;": "\u011c",
|
826 |
+
"Gcy;": "\u0413",
|
827 |
+
"Gdot;": "\u0120",
|
828 |
+
"Gfr;": "\U0001d50a",
|
829 |
+
"Gg;": "\u22d9",
|
830 |
+
"Gopf;": "\U0001d53e",
|
831 |
+
"GreaterEqual;": "\u2265",
|
832 |
+
"GreaterEqualLess;": "\u22db",
|
833 |
+
"GreaterFullEqual;": "\u2267",
|
834 |
+
"GreaterGreater;": "\u2aa2",
|
835 |
+
"GreaterLess;": "\u2277",
|
836 |
+
"GreaterSlantEqual;": "\u2a7e",
|
837 |
+
"GreaterTilde;": "\u2273",
|
838 |
+
"Gscr;": "\U0001d4a2",
|
839 |
+
"Gt;": "\u226b",
|
840 |
+
"HARDcy;": "\u042a",
|
841 |
+
"Hacek;": "\u02c7",
|
842 |
+
"Hat;": "^",
|
843 |
+
"Hcirc;": "\u0124",
|
844 |
+
"Hfr;": "\u210c",
|
845 |
+
"HilbertSpace;": "\u210b",
|
846 |
+
"Hopf;": "\u210d",
|
847 |
+
"HorizontalLine;": "\u2500",
|
848 |
+
"Hscr;": "\u210b",
|
849 |
+
"Hstrok;": "\u0126",
|
850 |
+
"HumpDownHump;": "\u224e",
|
851 |
+
"HumpEqual;": "\u224f",
|
852 |
+
"IEcy;": "\u0415",
|
853 |
+
"IJlig;": "\u0132",
|
854 |
+
"IOcy;": "\u0401",
|
855 |
+
"Iacute": "\xcd",
|
856 |
+
"Iacute;": "\xcd",
|
857 |
+
"Icirc": "\xce",
|
858 |
+
"Icirc;": "\xce",
|
859 |
+
"Icy;": "\u0418",
|
860 |
+
"Idot;": "\u0130",
|
861 |
+
"Ifr;": "\u2111",
|
862 |
+
"Igrave": "\xcc",
|
863 |
+
"Igrave;": "\xcc",
|
864 |
+
"Im;": "\u2111",
|
865 |
+
"Imacr;": "\u012a",
|
866 |
+
"ImaginaryI;": "\u2148",
|
867 |
+
"Implies;": "\u21d2",
|
868 |
+
"Int;": "\u222c",
|
869 |
+
"Integral;": "\u222b",
|
870 |
+
"Intersection;": "\u22c2",
|
871 |
+
"InvisibleComma;": "\u2063",
|
872 |
+
"InvisibleTimes;": "\u2062",
|
873 |
+
"Iogon;": "\u012e",
|
874 |
+
"Iopf;": "\U0001d540",
|
875 |
+
"Iota;": "\u0399",
|
876 |
+
"Iscr;": "\u2110",
|
877 |
+
"Itilde;": "\u0128",
|
878 |
+
"Iukcy;": "\u0406",
|
879 |
+
"Iuml": "\xcf",
|
880 |
+
"Iuml;": "\xcf",
|
881 |
+
"Jcirc;": "\u0134",
|
882 |
+
"Jcy;": "\u0419",
|
883 |
+
"Jfr;": "\U0001d50d",
|
884 |
+
"Jopf;": "\U0001d541",
|
885 |
+
"Jscr;": "\U0001d4a5",
|
886 |
+
"Jsercy;": "\u0408",
|
887 |
+
"Jukcy;": "\u0404",
|
888 |
+
"KHcy;": "\u0425",
|
889 |
+
"KJcy;": "\u040c",
|
890 |
+
"Kappa;": "\u039a",
|
891 |
+
"Kcedil;": "\u0136",
|
892 |
+
"Kcy;": "\u041a",
|
893 |
+
"Kfr;": "\U0001d50e",
|
894 |
+
"Kopf;": "\U0001d542",
|
895 |
+
"Kscr;": "\U0001d4a6",
|
896 |
+
"LJcy;": "\u0409",
|
897 |
+
"LT": "<",
|
898 |
+
"LT;": "<",
|
899 |
+
"Lacute;": "\u0139",
|
900 |
+
"Lambda;": "\u039b",
|
901 |
+
"Lang;": "\u27ea",
|
902 |
+
"Laplacetrf;": "\u2112",
|
903 |
+
"Larr;": "\u219e",
|
904 |
+
"Lcaron;": "\u013d",
|
905 |
+
"Lcedil;": "\u013b",
|
906 |
+
"Lcy;": "\u041b",
|
907 |
+
"LeftAngleBracket;": "\u27e8",
|
908 |
+
"LeftArrow;": "\u2190",
|
909 |
+
"LeftArrowBar;": "\u21e4",
|
910 |
+
"LeftArrowRightArrow;": "\u21c6",
|
911 |
+
"LeftCeiling;": "\u2308",
|
912 |
+
"LeftDoubleBracket;": "\u27e6",
|
913 |
+
"LeftDownTeeVector;": "\u2961",
|
914 |
+
"LeftDownVector;": "\u21c3",
|
915 |
+
"LeftDownVectorBar;": "\u2959",
|
916 |
+
"LeftFloor;": "\u230a",
|
917 |
+
"LeftRightArrow;": "\u2194",
|
918 |
+
"LeftRightVector;": "\u294e",
|
919 |
+
"LeftTee;": "\u22a3",
|
920 |
+
"LeftTeeArrow;": "\u21a4",
|
921 |
+
"LeftTeeVector;": "\u295a",
|
922 |
+
"LeftTriangle;": "\u22b2",
|
923 |
+
"LeftTriangleBar;": "\u29cf",
|
924 |
+
"LeftTriangleEqual;": "\u22b4",
|
925 |
+
"LeftUpDownVector;": "\u2951",
|
926 |
+
"LeftUpTeeVector;": "\u2960",
|
927 |
+
"LeftUpVector;": "\u21bf",
|
928 |
+
"LeftUpVectorBar;": "\u2958",
|
929 |
+
"LeftVector;": "\u21bc",
|
930 |
+
"LeftVectorBar;": "\u2952",
|
931 |
+
"Leftarrow;": "\u21d0",
|
932 |
+
"Leftrightarrow;": "\u21d4",
|
933 |
+
"LessEqualGreater;": "\u22da",
|
934 |
+
"LessFullEqual;": "\u2266",
|
935 |
+
"LessGreater;": "\u2276",
|
936 |
+
"LessLess;": "\u2aa1",
|
937 |
+
"LessSlantEqual;": "\u2a7d",
|
938 |
+
"LessTilde;": "\u2272",
|
939 |
+
"Lfr;": "\U0001d50f",
|
940 |
+
"Ll;": "\u22d8",
|
941 |
+
"Lleftarrow;": "\u21da",
|
942 |
+
"Lmidot;": "\u013f",
|
943 |
+
"LongLeftArrow;": "\u27f5",
|
944 |
+
"LongLeftRightArrow;": "\u27f7",
|
945 |
+
"LongRightArrow;": "\u27f6",
|
946 |
+
"Longleftarrow;": "\u27f8",
|
947 |
+
"Longleftrightarrow;": "\u27fa",
|
948 |
+
"Longrightarrow;": "\u27f9",
|
949 |
+
"Lopf;": "\U0001d543",
|
950 |
+
"LowerLeftArrow;": "\u2199",
|
951 |
+
"LowerRightArrow;": "\u2198",
|
952 |
+
"Lscr;": "\u2112",
|
953 |
+
"Lsh;": "\u21b0",
|
954 |
+
"Lstrok;": "\u0141",
|
955 |
+
"Lt;": "\u226a",
|
956 |
+
"Map;": "\u2905",
|
957 |
+
"Mcy;": "\u041c",
|
958 |
+
"MediumSpace;": "\u205f",
|
959 |
+
"Mellintrf;": "\u2133",
|
960 |
+
"Mfr;": "\U0001d510",
|
961 |
+
"MinusPlus;": "\u2213",
|
962 |
+
"Mopf;": "\U0001d544",
|
963 |
+
"Mscr;": "\u2133",
|
964 |
+
"Mu;": "\u039c",
|
965 |
+
"NJcy;": "\u040a",
|
966 |
+
"Nacute;": "\u0143",
|
967 |
+
"Ncaron;": "\u0147",
|
968 |
+
"Ncedil;": "\u0145",
|
969 |
+
"Ncy;": "\u041d",
|
970 |
+
"NegativeMediumSpace;": "\u200b",
|
971 |
+
"NegativeThickSpace;": "\u200b",
|
972 |
+
"NegativeThinSpace;": "\u200b",
|
973 |
+
"NegativeVeryThinSpace;": "\u200b",
|
974 |
+
"NestedGreaterGreater;": "\u226b",
|
975 |
+
"NestedLessLess;": "\u226a",
|
976 |
+
"NewLine;": "\n",
|
977 |
+
"Nfr;": "\U0001d511",
|
978 |
+
"NoBreak;": "\u2060",
|
979 |
+
"NonBreakingSpace;": "\xa0",
|
980 |
+
"Nopf;": "\u2115",
|
981 |
+
"Not;": "\u2aec",
|
982 |
+
"NotCongruent;": "\u2262",
|
983 |
+
"NotCupCap;": "\u226d",
|
984 |
+
"NotDoubleVerticalBar;": "\u2226",
|
985 |
+
"NotElement;": "\u2209",
|
986 |
+
"NotEqual;": "\u2260",
|
987 |
+
"NotEqualTilde;": "\u2242\u0338",
|
988 |
+
"NotExists;": "\u2204",
|
989 |
+
"NotGreater;": "\u226f",
|
990 |
+
"NotGreaterEqual;": "\u2271",
|
991 |
+
"NotGreaterFullEqual;": "\u2267\u0338",
|
992 |
+
"NotGreaterGreater;": "\u226b\u0338",
|
993 |
+
"NotGreaterLess;": "\u2279",
|
994 |
+
"NotGreaterSlantEqual;": "\u2a7e\u0338",
|
995 |
+
"NotGreaterTilde;": "\u2275",
|
996 |
+
"NotHumpDownHump;": "\u224e\u0338",
|
997 |
+
"NotHumpEqual;": "\u224f\u0338",
|
998 |
+
"NotLeftTriangle;": "\u22ea",
|
999 |
+
"NotLeftTriangleBar;": "\u29cf\u0338",
|
1000 |
+
"NotLeftTriangleEqual;": "\u22ec",
|
1001 |
+
"NotLess;": "\u226e",
|
1002 |
+
"NotLessEqual;": "\u2270",
|
1003 |
+
"NotLessGreater;": "\u2278",
|
1004 |
+
"NotLessLess;": "\u226a\u0338",
|
1005 |
+
"NotLessSlantEqual;": "\u2a7d\u0338",
|
1006 |
+
"NotLessTilde;": "\u2274",
|
1007 |
+
"NotNestedGreaterGreater;": "\u2aa2\u0338",
|
1008 |
+
"NotNestedLessLess;": "\u2aa1\u0338",
|
1009 |
+
"NotPrecedes;": "\u2280",
|
1010 |
+
"NotPrecedesEqual;": "\u2aaf\u0338",
|
1011 |
+
"NotPrecedesSlantEqual;": "\u22e0",
|
1012 |
+
"NotReverseElement;": "\u220c",
|
1013 |
+
"NotRightTriangle;": "\u22eb",
|
1014 |
+
"NotRightTriangleBar;": "\u29d0\u0338",
|
1015 |
+
"NotRightTriangleEqual;": "\u22ed",
|
1016 |
+
"NotSquareSubset;": "\u228f\u0338",
|
1017 |
+
"NotSquareSubsetEqual;": "\u22e2",
|
1018 |
+
"NotSquareSuperset;": "\u2290\u0338",
|
1019 |
+
"NotSquareSupersetEqual;": "\u22e3",
|
1020 |
+
"NotSubset;": "\u2282\u20d2",
|
1021 |
+
"NotSubsetEqual;": "\u2288",
|
1022 |
+
"NotSucceeds;": "\u2281",
|
1023 |
+
"NotSucceedsEqual;": "\u2ab0\u0338",
|
1024 |
+
"NotSucceedsSlantEqual;": "\u22e1",
|
1025 |
+
"NotSucceedsTilde;": "\u227f\u0338",
|
1026 |
+
"NotSuperset;": "\u2283\u20d2",
|
1027 |
+
"NotSupersetEqual;": "\u2289",
|
1028 |
+
"NotTilde;": "\u2241",
|
1029 |
+
"NotTildeEqual;": "\u2244",
|
1030 |
+
"NotTildeFullEqual;": "\u2247",
|
1031 |
+
"NotTildeTilde;": "\u2249",
|
1032 |
+
"NotVerticalBar;": "\u2224",
|
1033 |
+
"Nscr;": "\U0001d4a9",
|
1034 |
+
"Ntilde": "\xd1",
|
1035 |
+
"Ntilde;": "\xd1",
|
1036 |
+
"Nu;": "\u039d",
|
1037 |
+
"OElig;": "\u0152",
|
1038 |
+
"Oacute": "\xd3",
|
1039 |
+
"Oacute;": "\xd3",
|
1040 |
+
"Ocirc": "\xd4",
|
1041 |
+
"Ocirc;": "\xd4",
|
1042 |
+
"Ocy;": "\u041e",
|
1043 |
+
"Odblac;": "\u0150",
|
1044 |
+
"Ofr;": "\U0001d512",
|
1045 |
+
"Ograve": "\xd2",
|
1046 |
+
"Ograve;": "\xd2",
|
1047 |
+
"Omacr;": "\u014c",
|
1048 |
+
"Omega;": "\u03a9",
|
1049 |
+
"Omicron;": "\u039f",
|
1050 |
+
"Oopf;": "\U0001d546",
|
1051 |
+
"OpenCurlyDoubleQuote;": "\u201c",
|
1052 |
+
"OpenCurlyQuote;": "\u2018",
|
1053 |
+
"Or;": "\u2a54",
|
1054 |
+
"Oscr;": "\U0001d4aa",
|
1055 |
+
"Oslash": "\xd8",
|
1056 |
+
"Oslash;": "\xd8",
|
1057 |
+
"Otilde": "\xd5",
|
1058 |
+
"Otilde;": "\xd5",
|
1059 |
+
"Otimes;": "\u2a37",
|
1060 |
+
"Ouml": "\xd6",
|
1061 |
+
"Ouml;": "\xd6",
|
1062 |
+
"OverBar;": "\u203e",
|
1063 |
+
"OverBrace;": "\u23de",
|
1064 |
+
"OverBracket;": "\u23b4",
|
1065 |
+
"OverParenthesis;": "\u23dc",
|
1066 |
+
"PartialD;": "\u2202",
|
1067 |
+
"Pcy;": "\u041f",
|
1068 |
+
"Pfr;": "\U0001d513",
|
1069 |
+
"Phi;": "\u03a6",
|
1070 |
+
"Pi;": "\u03a0",
|
1071 |
+
"PlusMinus;": "\xb1",
|
1072 |
+
"Poincareplane;": "\u210c",
|
1073 |
+
"Popf;": "\u2119",
|
1074 |
+
"Pr;": "\u2abb",
|
1075 |
+
"Precedes;": "\u227a",
|
1076 |
+
"PrecedesEqual;": "\u2aaf",
|
1077 |
+
"PrecedesSlantEqual;": "\u227c",
|
1078 |
+
"PrecedesTilde;": "\u227e",
|
1079 |
+
"Prime;": "\u2033",
|
1080 |
+
"Product;": "\u220f",
|
1081 |
+
"Proportion;": "\u2237",
|
1082 |
+
"Proportional;": "\u221d",
|
1083 |
+
"Pscr;": "\U0001d4ab",
|
1084 |
+
"Psi;": "\u03a8",
|
1085 |
+
"QUOT": "\"",
|
1086 |
+
"QUOT;": "\"",
|
1087 |
+
"Qfr;": "\U0001d514",
|
1088 |
+
"Qopf;": "\u211a",
|
1089 |
+
"Qscr;": "\U0001d4ac",
|
1090 |
+
"RBarr;": "\u2910",
|
1091 |
+
"REG": "\xae",
|
1092 |
+
"REG;": "\xae",
|
1093 |
+
"Racute;": "\u0154",
|
1094 |
+
"Rang;": "\u27eb",
|
1095 |
+
"Rarr;": "\u21a0",
|
1096 |
+
"Rarrtl;": "\u2916",
|
1097 |
+
"Rcaron;": "\u0158",
|
1098 |
+
"Rcedil;": "\u0156",
|
1099 |
+
"Rcy;": "\u0420",
|
1100 |
+
"Re;": "\u211c",
|
1101 |
+
"ReverseElement;": "\u220b",
|
1102 |
+
"ReverseEquilibrium;": "\u21cb",
|
1103 |
+
"ReverseUpEquilibrium;": "\u296f",
|
1104 |
+
"Rfr;": "\u211c",
|
1105 |
+
"Rho;": "\u03a1",
|
1106 |
+
"RightAngleBracket;": "\u27e9",
|
1107 |
+
"RightArrow;": "\u2192",
|
1108 |
+
"RightArrowBar;": "\u21e5",
|
1109 |
+
"RightArrowLeftArrow;": "\u21c4",
|
1110 |
+
"RightCeiling;": "\u2309",
|
1111 |
+
"RightDoubleBracket;": "\u27e7",
|
1112 |
+
"RightDownTeeVector;": "\u295d",
|
1113 |
+
"RightDownVector;": "\u21c2",
|
1114 |
+
"RightDownVectorBar;": "\u2955",
|
1115 |
+
"RightFloor;": "\u230b",
|
1116 |
+
"RightTee;": "\u22a2",
|
1117 |
+
"RightTeeArrow;": "\u21a6",
|
1118 |
+
"RightTeeVector;": "\u295b",
|
1119 |
+
"RightTriangle;": "\u22b3",
|
1120 |
+
"RightTriangleBar;": "\u29d0",
|
1121 |
+
"RightTriangleEqual;": "\u22b5",
|
1122 |
+
"RightUpDownVector;": "\u294f",
|
1123 |
+
"RightUpTeeVector;": "\u295c",
|
1124 |
+
"RightUpVector;": "\u21be",
|
1125 |
+
"RightUpVectorBar;": "\u2954",
|
1126 |
+
"RightVector;": "\u21c0",
|
1127 |
+
"RightVectorBar;": "\u2953",
|
1128 |
+
"Rightarrow;": "\u21d2",
|
1129 |
+
"Ropf;": "\u211d",
|
1130 |
+
"RoundImplies;": "\u2970",
|
1131 |
+
"Rrightarrow;": "\u21db",
|
1132 |
+
"Rscr;": "\u211b",
|
1133 |
+
"Rsh;": "\u21b1",
|
1134 |
+
"RuleDelayed;": "\u29f4",
|
1135 |
+
"SHCHcy;": "\u0429",
|
1136 |
+
"SHcy;": "\u0428",
|
1137 |
+
"SOFTcy;": "\u042c",
|
1138 |
+
"Sacute;": "\u015a",
|
1139 |
+
"Sc;": "\u2abc",
|
1140 |
+
"Scaron;": "\u0160",
|
1141 |
+
"Scedil;": "\u015e",
|
1142 |
+
"Scirc;": "\u015c",
|
1143 |
+
"Scy;": "\u0421",
|
1144 |
+
"Sfr;": "\U0001d516",
|
1145 |
+
"ShortDownArrow;": "\u2193",
|
1146 |
+
"ShortLeftArrow;": "\u2190",
|
1147 |
+
"ShortRightArrow;": "\u2192",
|
1148 |
+
"ShortUpArrow;": "\u2191",
|
1149 |
+
"Sigma;": "\u03a3",
|
1150 |
+
"SmallCircle;": "\u2218",
|
1151 |
+
"Sopf;": "\U0001d54a",
|
1152 |
+
"Sqrt;": "\u221a",
|
1153 |
+
"Square;": "\u25a1",
|
1154 |
+
"SquareIntersection;": "\u2293",
|
1155 |
+
"SquareSubset;": "\u228f",
|
1156 |
+
"SquareSubsetEqual;": "\u2291",
|
1157 |
+
"SquareSuperset;": "\u2290",
|
1158 |
+
"SquareSupersetEqual;": "\u2292",
|
1159 |
+
"SquareUnion;": "\u2294",
|
1160 |
+
"Sscr;": "\U0001d4ae",
|
1161 |
+
"Star;": "\u22c6",
|
1162 |
+
"Sub;": "\u22d0",
|
1163 |
+
"Subset;": "\u22d0",
|
1164 |
+
"SubsetEqual;": "\u2286",
|
1165 |
+
"Succeeds;": "\u227b",
|
1166 |
+
"SucceedsEqual;": "\u2ab0",
|
1167 |
+
"SucceedsSlantEqual;": "\u227d",
|
1168 |
+
"SucceedsTilde;": "\u227f",
|
1169 |
+
"SuchThat;": "\u220b",
|
1170 |
+
"Sum;": "\u2211",
|
1171 |
+
"Sup;": "\u22d1",
|
1172 |
+
"Superset;": "\u2283",
|
1173 |
+
"SupersetEqual;": "\u2287",
|
1174 |
+
"Supset;": "\u22d1",
|
1175 |
+
"THORN": "\xde",
|
1176 |
+
"THORN;": "\xde",
|
1177 |
+
"TRADE;": "\u2122",
|
1178 |
+
"TSHcy;": "\u040b",
|
1179 |
+
"TScy;": "\u0426",
|
1180 |
+
"Tab;": "\t",
|
1181 |
+
"Tau;": "\u03a4",
|
1182 |
+
"Tcaron;": "\u0164",
|
1183 |
+
"Tcedil;": "\u0162",
|
1184 |
+
"Tcy;": "\u0422",
|
1185 |
+
"Tfr;": "\U0001d517",
|
1186 |
+
"Therefore;": "\u2234",
|
1187 |
+
"Theta;": "\u0398",
|
1188 |
+
"ThickSpace;": "\u205f\u200a",
|
1189 |
+
"ThinSpace;": "\u2009",
|
1190 |
+
"Tilde;": "\u223c",
|
1191 |
+
"TildeEqual;": "\u2243",
|
1192 |
+
"TildeFullEqual;": "\u2245",
|
1193 |
+
"TildeTilde;": "\u2248",
|
1194 |
+
"Topf;": "\U0001d54b",
|
1195 |
+
"TripleDot;": "\u20db",
|
1196 |
+
"Tscr;": "\U0001d4af",
|
1197 |
+
"Tstrok;": "\u0166",
|
1198 |
+
"Uacute": "\xda",
|
1199 |
+
"Uacute;": "\xda",
|
1200 |
+
"Uarr;": "\u219f",
|
1201 |
+
"Uarrocir;": "\u2949",
|
1202 |
+
"Ubrcy;": "\u040e",
|
1203 |
+
"Ubreve;": "\u016c",
|
1204 |
+
"Ucirc": "\xdb",
|
1205 |
+
"Ucirc;": "\xdb",
|
1206 |
+
"Ucy;": "\u0423",
|
1207 |
+
"Udblac;": "\u0170",
|
1208 |
+
"Ufr;": "\U0001d518",
|
1209 |
+
"Ugrave": "\xd9",
|
1210 |
+
"Ugrave;": "\xd9",
|
1211 |
+
"Umacr;": "\u016a",
|
1212 |
+
"UnderBar;": "_",
|
1213 |
+
"UnderBrace;": "\u23df",
|
1214 |
+
"UnderBracket;": "\u23b5",
|
1215 |
+
"UnderParenthesis;": "\u23dd",
|
1216 |
+
"Union;": "\u22c3",
|
1217 |
+
"UnionPlus;": "\u228e",
|
1218 |
+
"Uogon;": "\u0172",
|
1219 |
+
"Uopf;": "\U0001d54c",
|
1220 |
+
"UpArrow;": "\u2191",
|
1221 |
+
"UpArrowBar;": "\u2912",
|
1222 |
+
"UpArrowDownArrow;": "\u21c5",
|
1223 |
+
"UpDownArrow;": "\u2195",
|
1224 |
+
"UpEquilibrium;": "\u296e",
|
1225 |
+
"UpTee;": "\u22a5",
|
1226 |
+
"UpTeeArrow;": "\u21a5",
|
1227 |
+
"Uparrow;": "\u21d1",
|
1228 |
+
"Updownarrow;": "\u21d5",
|
1229 |
+
"UpperLeftArrow;": "\u2196",
|
1230 |
+
"UpperRightArrow;": "\u2197",
|
1231 |
+
"Upsi;": "\u03d2",
|
1232 |
+
"Upsilon;": "\u03a5",
|
1233 |
+
"Uring;": "\u016e",
|
1234 |
+
"Uscr;": "\U0001d4b0",
|
1235 |
+
"Utilde;": "\u0168",
|
1236 |
+
"Uuml": "\xdc",
|
1237 |
+
"Uuml;": "\xdc",
|
1238 |
+
"VDash;": "\u22ab",
|
1239 |
+
"Vbar;": "\u2aeb",
|
1240 |
+
"Vcy;": "\u0412",
|
1241 |
+
"Vdash;": "\u22a9",
|
1242 |
+
"Vdashl;": "\u2ae6",
|
1243 |
+
"Vee;": "\u22c1",
|
1244 |
+
"Verbar;": "\u2016",
|
1245 |
+
"Vert;": "\u2016",
|
1246 |
+
"VerticalBar;": "\u2223",
|
1247 |
+
"VerticalLine;": "|",
|
1248 |
+
"VerticalSeparator;": "\u2758",
|
1249 |
+
"VerticalTilde;": "\u2240",
|
1250 |
+
"VeryThinSpace;": "\u200a",
|
1251 |
+
"Vfr;": "\U0001d519",
|
1252 |
+
"Vopf;": "\U0001d54d",
|
1253 |
+
"Vscr;": "\U0001d4b1",
|
1254 |
+
"Vvdash;": "\u22aa",
|
1255 |
+
"Wcirc;": "\u0174",
|
1256 |
+
"Wedge;": "\u22c0",
|
1257 |
+
"Wfr;": "\U0001d51a",
|
1258 |
+
"Wopf;": "\U0001d54e",
|
1259 |
+
"Wscr;": "\U0001d4b2",
|
1260 |
+
"Xfr;": "\U0001d51b",
|
1261 |
+
"Xi;": "\u039e",
|
1262 |
+
"Xopf;": "\U0001d54f",
|
1263 |
+
"Xscr;": "\U0001d4b3",
|
1264 |
+
"YAcy;": "\u042f",
|
1265 |
+
"YIcy;": "\u0407",
|
1266 |
+
"YUcy;": "\u042e",
|
1267 |
+
"Yacute": "\xdd",
|
1268 |
+
"Yacute;": "\xdd",
|
1269 |
+
"Ycirc;": "\u0176",
|
1270 |
+
"Ycy;": "\u042b",
|
1271 |
+
"Yfr;": "\U0001d51c",
|
1272 |
+
"Yopf;": "\U0001d550",
|
1273 |
+
"Yscr;": "\U0001d4b4",
|
1274 |
+
"Yuml;": "\u0178",
|
1275 |
+
"ZHcy;": "\u0416",
|
1276 |
+
"Zacute;": "\u0179",
|
1277 |
+
"Zcaron;": "\u017d",
|
1278 |
+
"Zcy;": "\u0417",
|
1279 |
+
"Zdot;": "\u017b",
|
1280 |
+
"ZeroWidthSpace;": "\u200b",
|
1281 |
+
"Zeta;": "\u0396",
|
1282 |
+
"Zfr;": "\u2128",
|
1283 |
+
"Zopf;": "\u2124",
|
1284 |
+
"Zscr;": "\U0001d4b5",
|
1285 |
+
"aacute": "\xe1",
|
1286 |
+
"aacute;": "\xe1",
|
1287 |
+
"abreve;": "\u0103",
|
1288 |
+
"ac;": "\u223e",
|
1289 |
+
"acE;": "\u223e\u0333",
|
1290 |
+
"acd;": "\u223f",
|
1291 |
+
"acirc": "\xe2",
|
1292 |
+
"acirc;": "\xe2",
|
1293 |
+
"acute": "\xb4",
|
1294 |
+
"acute;": "\xb4",
|
1295 |
+
"acy;": "\u0430",
|
1296 |
+
"aelig": "\xe6",
|
1297 |
+
"aelig;": "\xe6",
|
1298 |
+
"af;": "\u2061",
|
1299 |
+
"afr;": "\U0001d51e",
|
1300 |
+
"agrave": "\xe0",
|
1301 |
+
"agrave;": "\xe0",
|
1302 |
+
"alefsym;": "\u2135",
|
1303 |
+
"aleph;": "\u2135",
|
1304 |
+
"alpha;": "\u03b1",
|
1305 |
+
"amacr;": "\u0101",
|
1306 |
+
"amalg;": "\u2a3f",
|
1307 |
+
"amp": "&",
|
1308 |
+
"amp;": "&",
|
1309 |
+
"and;": "\u2227",
|
1310 |
+
"andand;": "\u2a55",
|
1311 |
+
"andd;": "\u2a5c",
|
1312 |
+
"andslope;": "\u2a58",
|
1313 |
+
"andv;": "\u2a5a",
|
1314 |
+
"ang;": "\u2220",
|
1315 |
+
"ange;": "\u29a4",
|
1316 |
+
"angle;": "\u2220",
|
1317 |
+
"angmsd;": "\u2221",
|
1318 |
+
"angmsdaa;": "\u29a8",
|
1319 |
+
"angmsdab;": "\u29a9",
|
1320 |
+
"angmsdac;": "\u29aa",
|
1321 |
+
"angmsdad;": "\u29ab",
|
1322 |
+
"angmsdae;": "\u29ac",
|
1323 |
+
"angmsdaf;": "\u29ad",
|
1324 |
+
"angmsdag;": "\u29ae",
|
1325 |
+
"angmsdah;": "\u29af",
|
1326 |
+
"angrt;": "\u221f",
|
1327 |
+
"angrtvb;": "\u22be",
|
1328 |
+
"angrtvbd;": "\u299d",
|
1329 |
+
"angsph;": "\u2222",
|
1330 |
+
"angst;": "\xc5",
|
1331 |
+
"angzarr;": "\u237c",
|
1332 |
+
"aogon;": "\u0105",
|
1333 |
+
"aopf;": "\U0001d552",
|
1334 |
+
"ap;": "\u2248",
|
1335 |
+
"apE;": "\u2a70",
|
1336 |
+
"apacir;": "\u2a6f",
|
1337 |
+
"ape;": "\u224a",
|
1338 |
+
"apid;": "\u224b",
|
1339 |
+
"apos;": "'",
|
1340 |
+
"approx;": "\u2248",
|
1341 |
+
"approxeq;": "\u224a",
|
1342 |
+
"aring": "\xe5",
|
1343 |
+
"aring;": "\xe5",
|
1344 |
+
"ascr;": "\U0001d4b6",
|
1345 |
+
"ast;": "*",
|
1346 |
+
"asymp;": "\u2248",
|
1347 |
+
"asympeq;": "\u224d",
|
1348 |
+
"atilde": "\xe3",
|
1349 |
+
"atilde;": "\xe3",
|
1350 |
+
"auml": "\xe4",
|
1351 |
+
"auml;": "\xe4",
|
1352 |
+
"awconint;": "\u2233",
|
1353 |
+
"awint;": "\u2a11",
|
1354 |
+
"bNot;": "\u2aed",
|
1355 |
+
"backcong;": "\u224c",
|
1356 |
+
"backepsilon;": "\u03f6",
|
1357 |
+
"backprime;": "\u2035",
|
1358 |
+
"backsim;": "\u223d",
|
1359 |
+
"backsimeq;": "\u22cd",
|
1360 |
+
"barvee;": "\u22bd",
|
1361 |
+
"barwed;": "\u2305",
|
1362 |
+
"barwedge;": "\u2305",
|
1363 |
+
"bbrk;": "\u23b5",
|
1364 |
+
"bbrktbrk;": "\u23b6",
|
1365 |
+
"bcong;": "\u224c",
|
1366 |
+
"bcy;": "\u0431",
|
1367 |
+
"bdquo;": "\u201e",
|
1368 |
+
"becaus;": "\u2235",
|
1369 |
+
"because;": "\u2235",
|
1370 |
+
"bemptyv;": "\u29b0",
|
1371 |
+
"bepsi;": "\u03f6",
|
1372 |
+
"bernou;": "\u212c",
|
1373 |
+
"beta;": "\u03b2",
|
1374 |
+
"beth;": "\u2136",
|
1375 |
+
"between;": "\u226c",
|
1376 |
+
"bfr;": "\U0001d51f",
|
1377 |
+
"bigcap;": "\u22c2",
|
1378 |
+
"bigcirc;": "\u25ef",
|
1379 |
+
"bigcup;": "\u22c3",
|
1380 |
+
"bigodot;": "\u2a00",
|
1381 |
+
"bigoplus;": "\u2a01",
|
1382 |
+
"bigotimes;": "\u2a02",
|
1383 |
+
"bigsqcup;": "\u2a06",
|
1384 |
+
"bigstar;": "\u2605",
|
1385 |
+
"bigtriangledown;": "\u25bd",
|
1386 |
+
"bigtriangleup;": "\u25b3",
|
1387 |
+
"biguplus;": "\u2a04",
|
1388 |
+
"bigvee;": "\u22c1",
|
1389 |
+
"bigwedge;": "\u22c0",
|
1390 |
+
"bkarow;": "\u290d",
|
1391 |
+
"blacklozenge;": "\u29eb",
|
1392 |
+
"blacksquare;": "\u25aa",
|
1393 |
+
"blacktriangle;": "\u25b4",
|
1394 |
+
"blacktriangledown;": "\u25be",
|
1395 |
+
"blacktriangleleft;": "\u25c2",
|
1396 |
+
"blacktriangleright;": "\u25b8",
|
1397 |
+
"blank;": "\u2423",
|
1398 |
+
"blk12;": "\u2592",
|
1399 |
+
"blk14;": "\u2591",
|
1400 |
+
"blk34;": "\u2593",
|
1401 |
+
"block;": "\u2588",
|
1402 |
+
"bne;": "=\u20e5",
|
1403 |
+
"bnequiv;": "\u2261\u20e5",
|
1404 |
+
"bnot;": "\u2310",
|
1405 |
+
"bopf;": "\U0001d553",
|
1406 |
+
"bot;": "\u22a5",
|
1407 |
+
"bottom;": "\u22a5",
|
1408 |
+
"bowtie;": "\u22c8",
|
1409 |
+
"boxDL;": "\u2557",
|
1410 |
+
"boxDR;": "\u2554",
|
1411 |
+
"boxDl;": "\u2556",
|
1412 |
+
"boxDr;": "\u2553",
|
1413 |
+
"boxH;": "\u2550",
|
1414 |
+
"boxHD;": "\u2566",
|
1415 |
+
"boxHU;": "\u2569",
|
1416 |
+
"boxHd;": "\u2564",
|
1417 |
+
"boxHu;": "\u2567",
|
1418 |
+
"boxUL;": "\u255d",
|
1419 |
+
"boxUR;": "\u255a",
|
1420 |
+
"boxUl;": "\u255c",
|
1421 |
+
"boxUr;": "\u2559",
|
1422 |
+
"boxV;": "\u2551",
|
1423 |
+
"boxVH;": "\u256c",
|
1424 |
+
"boxVL;": "\u2563",
|
1425 |
+
"boxVR;": "\u2560",
|
1426 |
+
"boxVh;": "\u256b",
|
1427 |
+
"boxVl;": "\u2562",
|
1428 |
+
"boxVr;": "\u255f",
|
1429 |
+
"boxbox;": "\u29c9",
|
1430 |
+
"boxdL;": "\u2555",
|
1431 |
+
"boxdR;": "\u2552",
|
1432 |
+
"boxdl;": "\u2510",
|
1433 |
+
"boxdr;": "\u250c",
|
1434 |
+
"boxh;": "\u2500",
|
1435 |
+
"boxhD;": "\u2565",
|
1436 |
+
"boxhU;": "\u2568",
|
1437 |
+
"boxhd;": "\u252c",
|
1438 |
+
"boxhu;": "\u2534",
|
1439 |
+
"boxminus;": "\u229f",
|
1440 |
+
"boxplus;": "\u229e",
|
1441 |
+
"boxtimes;": "\u22a0",
|
1442 |
+
"boxuL;": "\u255b",
|
1443 |
+
"boxuR;": "\u2558",
|
1444 |
+
"boxul;": "\u2518",
|
1445 |
+
"boxur;": "\u2514",
|
1446 |
+
"boxv;": "\u2502",
|
1447 |
+
"boxvH;": "\u256a",
|
1448 |
+
"boxvL;": "\u2561",
|
1449 |
+
"boxvR;": "\u255e",
|
1450 |
+
"boxvh;": "\u253c",
|
1451 |
+
"boxvl;": "\u2524",
|
1452 |
+
"boxvr;": "\u251c",
|
1453 |
+
"bprime;": "\u2035",
|
1454 |
+
"breve;": "\u02d8",
|
1455 |
+
"brvbar": "\xa6",
|
1456 |
+
"brvbar;": "\xa6",
|
1457 |
+
"bscr;": "\U0001d4b7",
|
1458 |
+
"bsemi;": "\u204f",
|
1459 |
+
"bsim;": "\u223d",
|
1460 |
+
"bsime;": "\u22cd",
|
1461 |
+
"bsol;": "\\",
|
1462 |
+
"bsolb;": "\u29c5",
|
1463 |
+
"bsolhsub;": "\u27c8",
|
1464 |
+
"bull;": "\u2022",
|
1465 |
+
"bullet;": "\u2022",
|
1466 |
+
"bump;": "\u224e",
|
1467 |
+
"bumpE;": "\u2aae",
|
1468 |
+
"bumpe;": "\u224f",
|
1469 |
+
"bumpeq;": "\u224f",
|
1470 |
+
"cacute;": "\u0107",
|
1471 |
+
"cap;": "\u2229",
|
1472 |
+
"capand;": "\u2a44",
|
1473 |
+
"capbrcup;": "\u2a49",
|
1474 |
+
"capcap;": "\u2a4b",
|
1475 |
+
"capcup;": "\u2a47",
|
1476 |
+
"capdot;": "\u2a40",
|
1477 |
+
"caps;": "\u2229\ufe00",
|
1478 |
+
"caret;": "\u2041",
|
1479 |
+
"caron;": "\u02c7",
|
1480 |
+
"ccaps;": "\u2a4d",
|
1481 |
+
"ccaron;": "\u010d",
|
1482 |
+
"ccedil": "\xe7",
|
1483 |
+
"ccedil;": "\xe7",
|
1484 |
+
"ccirc;": "\u0109",
|
1485 |
+
"ccups;": "\u2a4c",
|
1486 |
+
"ccupssm;": "\u2a50",
|
1487 |
+
"cdot;": "\u010b",
|
1488 |
+
"cedil": "\xb8",
|
1489 |
+
"cedil;": "\xb8",
|
1490 |
+
"cemptyv;": "\u29b2",
|
1491 |
+
"cent": "\xa2",
|
1492 |
+
"cent;": "\xa2",
|
1493 |
+
"centerdot;": "\xb7",
|
1494 |
+
"cfr;": "\U0001d520",
|
1495 |
+
"chcy;": "\u0447",
|
1496 |
+
"check;": "\u2713",
|
1497 |
+
"checkmark;": "\u2713",
|
1498 |
+
"chi;": "\u03c7",
|
1499 |
+
"cir;": "\u25cb",
|
1500 |
+
"cirE;": "\u29c3",
|
1501 |
+
"circ;": "\u02c6",
|
1502 |
+
"circeq;": "\u2257",
|
1503 |
+
"circlearrowleft;": "\u21ba",
|
1504 |
+
"circlearrowright;": "\u21bb",
|
1505 |
+
"circledR;": "\xae",
|
1506 |
+
"circledS;": "\u24c8",
|
1507 |
+
"circledast;": "\u229b",
|
1508 |
+
"circledcirc;": "\u229a",
|
1509 |
+
"circleddash;": "\u229d",
|
1510 |
+
"cire;": "\u2257",
|
1511 |
+
"cirfnint;": "\u2a10",
|
1512 |
+
"cirmid;": "\u2aef",
|
1513 |
+
"cirscir;": "\u29c2",
|
1514 |
+
"clubs;": "\u2663",
|
1515 |
+
"clubsuit;": "\u2663",
|
1516 |
+
"colon;": ":",
|
1517 |
+
"colone;": "\u2254",
|
1518 |
+
"coloneq;": "\u2254",
|
1519 |
+
"comma;": ",",
|
1520 |
+
"commat;": "@",
|
1521 |
+
"comp;": "\u2201",
|
1522 |
+
"compfn;": "\u2218",
|
1523 |
+
"complement;": "\u2201",
|
1524 |
+
"complexes;": "\u2102",
|
1525 |
+
"cong;": "\u2245",
|
1526 |
+
"congdot;": "\u2a6d",
|
1527 |
+
"conint;": "\u222e",
|
1528 |
+
"copf;": "\U0001d554",
|
1529 |
+
"coprod;": "\u2210",
|
1530 |
+
"copy": "\xa9",
|
1531 |
+
"copy;": "\xa9",
|
1532 |
+
"copysr;": "\u2117",
|
1533 |
+
"crarr;": "\u21b5",
|
1534 |
+
"cross;": "\u2717",
|
1535 |
+
"cscr;": "\U0001d4b8",
|
1536 |
+
"csub;": "\u2acf",
|
1537 |
+
"csube;": "\u2ad1",
|
1538 |
+
"csup;": "\u2ad0",
|
1539 |
+
"csupe;": "\u2ad2",
|
1540 |
+
"ctdot;": "\u22ef",
|
1541 |
+
"cudarrl;": "\u2938",
|
1542 |
+
"cudarrr;": "\u2935",
|
1543 |
+
"cuepr;": "\u22de",
|
1544 |
+
"cuesc;": "\u22df",
|
1545 |
+
"cularr;": "\u21b6",
|
1546 |
+
"cularrp;": "\u293d",
|
1547 |
+
"cup;": "\u222a",
|
1548 |
+
"cupbrcap;": "\u2a48",
|
1549 |
+
"cupcap;": "\u2a46",
|
1550 |
+
"cupcup;": "\u2a4a",
|
1551 |
+
"cupdot;": "\u228d",
|
1552 |
+
"cupor;": "\u2a45",
|
1553 |
+
"cups;": "\u222a\ufe00",
|
1554 |
+
"curarr;": "\u21b7",
|
1555 |
+
"curarrm;": "\u293c",
|
1556 |
+
"curlyeqprec;": "\u22de",
|
1557 |
+
"curlyeqsucc;": "\u22df",
|
1558 |
+
"curlyvee;": "\u22ce",
|
1559 |
+
"curlywedge;": "\u22cf",
|
1560 |
+
"curren": "\xa4",
|
1561 |
+
"curren;": "\xa4",
|
1562 |
+
"curvearrowleft;": "\u21b6",
|
1563 |
+
"curvearrowright;": "\u21b7",
|
1564 |
+
"cuvee;": "\u22ce",
|
1565 |
+
"cuwed;": "\u22cf",
|
1566 |
+
"cwconint;": "\u2232",
|
1567 |
+
"cwint;": "\u2231",
|
1568 |
+
"cylcty;": "\u232d",
|
1569 |
+
"dArr;": "\u21d3",
|
1570 |
+
"dHar;": "\u2965",
|
1571 |
+
"dagger;": "\u2020",
|
1572 |
+
"daleth;": "\u2138",
|
1573 |
+
"darr;": "\u2193",
|
1574 |
+
"dash;": "\u2010",
|
1575 |
+
"dashv;": "\u22a3",
|
1576 |
+
"dbkarow;": "\u290f",
|
1577 |
+
"dblac;": "\u02dd",
|
1578 |
+
"dcaron;": "\u010f",
|
1579 |
+
"dcy;": "\u0434",
|
1580 |
+
"dd;": "\u2146",
|
1581 |
+
"ddagger;": "\u2021",
|
1582 |
+
"ddarr;": "\u21ca",
|
1583 |
+
"ddotseq;": "\u2a77",
|
1584 |
+
"deg": "\xb0",
|
1585 |
+
"deg;": "\xb0",
|
1586 |
+
"delta;": "\u03b4",
|
1587 |
+
"demptyv;": "\u29b1",
|
1588 |
+
"dfisht;": "\u297f",
|
1589 |
+
"dfr;": "\U0001d521",
|
1590 |
+
"dharl;": "\u21c3",
|
1591 |
+
"dharr;": "\u21c2",
|
1592 |
+
"diam;": "\u22c4",
|
1593 |
+
"diamond;": "\u22c4",
|
1594 |
+
"diamondsuit;": "\u2666",
|
1595 |
+
"diams;": "\u2666",
|
1596 |
+
"die;": "\xa8",
|
1597 |
+
"digamma;": "\u03dd",
|
1598 |
+
"disin;": "\u22f2",
|
1599 |
+
"div;": "\xf7",
|
1600 |
+
"divide": "\xf7",
|
1601 |
+
"divide;": "\xf7",
|
1602 |
+
"divideontimes;": "\u22c7",
|
1603 |
+
"divonx;": "\u22c7",
|
1604 |
+
"djcy;": "\u0452",
|
1605 |
+
"dlcorn;": "\u231e",
|
1606 |
+
"dlcrop;": "\u230d",
|
1607 |
+
"dollar;": "$",
|
1608 |
+
"dopf;": "\U0001d555",
|
1609 |
+
"dot;": "\u02d9",
|
1610 |
+
"doteq;": "\u2250",
|
1611 |
+
"doteqdot;": "\u2251",
|
1612 |
+
"dotminus;": "\u2238",
|
1613 |
+
"dotplus;": "\u2214",
|
1614 |
+
"dotsquare;": "\u22a1",
|
1615 |
+
"doublebarwedge;": "\u2306",
|
1616 |
+
"downarrow;": "\u2193",
|
1617 |
+
"downdownarrows;": "\u21ca",
|
1618 |
+
"downharpoonleft;": "\u21c3",
|
1619 |
+
"downharpoonright;": "\u21c2",
|
1620 |
+
"drbkarow;": "\u2910",
|
1621 |
+
"drcorn;": "\u231f",
|
1622 |
+
"drcrop;": "\u230c",
|
1623 |
+
"dscr;": "\U0001d4b9",
|
1624 |
+
"dscy;": "\u0455",
|
1625 |
+
"dsol;": "\u29f6",
|
1626 |
+
"dstrok;": "\u0111",
|
1627 |
+
"dtdot;": "\u22f1",
|
1628 |
+
"dtri;": "\u25bf",
|
1629 |
+
"dtrif;": "\u25be",
|
1630 |
+
"duarr;": "\u21f5",
|
1631 |
+
"duhar;": "\u296f",
|
1632 |
+
"dwangle;": "\u29a6",
|
1633 |
+
"dzcy;": "\u045f",
|
1634 |
+
"dzigrarr;": "\u27ff",
|
1635 |
+
"eDDot;": "\u2a77",
|
1636 |
+
"eDot;": "\u2251",
|
1637 |
+
"eacute": "\xe9",
|
1638 |
+
"eacute;": "\xe9",
|
1639 |
+
"easter;": "\u2a6e",
|
1640 |
+
"ecaron;": "\u011b",
|
1641 |
+
"ecir;": "\u2256",
|
1642 |
+
"ecirc": "\xea",
|
1643 |
+
"ecirc;": "\xea",
|
1644 |
+
"ecolon;": "\u2255",
|
1645 |
+
"ecy;": "\u044d",
|
1646 |
+
"edot;": "\u0117",
|
1647 |
+
"ee;": "\u2147",
|
1648 |
+
"efDot;": "\u2252",
|
1649 |
+
"efr;": "\U0001d522",
|
1650 |
+
"eg;": "\u2a9a",
|
1651 |
+
"egrave": "\xe8",
|
1652 |
+
"egrave;": "\xe8",
|
1653 |
+
"egs;": "\u2a96",
|
1654 |
+
"egsdot;": "\u2a98",
|
1655 |
+
"el;": "\u2a99",
|
1656 |
+
"elinters;": "\u23e7",
|
1657 |
+
"ell;": "\u2113",
|
1658 |
+
"els;": "\u2a95",
|
1659 |
+
"elsdot;": "\u2a97",
|
1660 |
+
"emacr;": "\u0113",
|
1661 |
+
"empty;": "\u2205",
|
1662 |
+
"emptyset;": "\u2205",
|
1663 |
+
"emptyv;": "\u2205",
|
1664 |
+
"emsp13;": "\u2004",
|
1665 |
+
"emsp14;": "\u2005",
|
1666 |
+
"emsp;": "\u2003",
|
1667 |
+
"eng;": "\u014b",
|
1668 |
+
"ensp;": "\u2002",
|
1669 |
+
"eogon;": "\u0119",
|
1670 |
+
"eopf;": "\U0001d556",
|
1671 |
+
"epar;": "\u22d5",
|
1672 |
+
"eparsl;": "\u29e3",
|
1673 |
+
"eplus;": "\u2a71",
|
1674 |
+
"epsi;": "\u03b5",
|
1675 |
+
"epsilon;": "\u03b5",
|
1676 |
+
"epsiv;": "\u03f5",
|
1677 |
+
"eqcirc;": "\u2256",
|
1678 |
+
"eqcolon;": "\u2255",
|
1679 |
+
"eqsim;": "\u2242",
|
1680 |
+
"eqslantgtr;": "\u2a96",
|
1681 |
+
"eqslantless;": "\u2a95",
|
1682 |
+
"equals;": "=",
|
1683 |
+
"equest;": "\u225f",
|
1684 |
+
"equiv;": "\u2261",
|
1685 |
+
"equivDD;": "\u2a78",
|
1686 |
+
"eqvparsl;": "\u29e5",
|
1687 |
+
"erDot;": "\u2253",
|
1688 |
+
"erarr;": "\u2971",
|
1689 |
+
"escr;": "\u212f",
|
1690 |
+
"esdot;": "\u2250",
|
1691 |
+
"esim;": "\u2242",
|
1692 |
+
"eta;": "\u03b7",
|
1693 |
+
"eth": "\xf0",
|
1694 |
+
"eth;": "\xf0",
|
1695 |
+
"euml": "\xeb",
|
1696 |
+
"euml;": "\xeb",
|
1697 |
+
"euro;": "\u20ac",
|
1698 |
+
"excl;": "!",
|
1699 |
+
"exist;": "\u2203",
|
1700 |
+
"expectation;": "\u2130",
|
1701 |
+
"exponentiale;": "\u2147",
|
1702 |
+
"fallingdotseq;": "\u2252",
|
1703 |
+
"fcy;": "\u0444",
|
1704 |
+
"female;": "\u2640",
|
1705 |
+
"ffilig;": "\ufb03",
|
1706 |
+
"fflig;": "\ufb00",
|
1707 |
+
"ffllig;": "\ufb04",
|
1708 |
+
"ffr;": "\U0001d523",
|
1709 |
+
"filig;": "\ufb01",
|
1710 |
+
"fjlig;": "fj",
|
1711 |
+
"flat;": "\u266d",
|
1712 |
+
"fllig;": "\ufb02",
|
1713 |
+
"fltns;": "\u25b1",
|
1714 |
+
"fnof;": "\u0192",
|
1715 |
+
"fopf;": "\U0001d557",
|
1716 |
+
"forall;": "\u2200",
|
1717 |
+
"fork;": "\u22d4",
|
1718 |
+
"forkv;": "\u2ad9",
|
1719 |
+
"fpartint;": "\u2a0d",
|
1720 |
+
"frac12": "\xbd",
|
1721 |
+
"frac12;": "\xbd",
|
1722 |
+
"frac13;": "\u2153",
|
1723 |
+
"frac14": "\xbc",
|
1724 |
+
"frac14;": "\xbc",
|
1725 |
+
"frac15;": "\u2155",
|
1726 |
+
"frac16;": "\u2159",
|
1727 |
+
"frac18;": "\u215b",
|
1728 |
+
"frac23;": "\u2154",
|
1729 |
+
"frac25;": "\u2156",
|
1730 |
+
"frac34": "\xbe",
|
1731 |
+
"frac34;": "\xbe",
|
1732 |
+
"frac35;": "\u2157",
|
1733 |
+
"frac38;": "\u215c",
|
1734 |
+
"frac45;": "\u2158",
|
1735 |
+
"frac56;": "\u215a",
|
1736 |
+
"frac58;": "\u215d",
|
1737 |
+
"frac78;": "\u215e",
|
1738 |
+
"frasl;": "\u2044",
|
1739 |
+
"frown;": "\u2322",
|
1740 |
+
"fscr;": "\U0001d4bb",
|
1741 |
+
"gE;": "\u2267",
|
1742 |
+
"gEl;": "\u2a8c",
|
1743 |
+
"gacute;": "\u01f5",
|
1744 |
+
"gamma;": "\u03b3",
|
1745 |
+
"gammad;": "\u03dd",
|
1746 |
+
"gap;": "\u2a86",
|
1747 |
+
"gbreve;": "\u011f",
|
1748 |
+
"gcirc;": "\u011d",
|
1749 |
+
"gcy;": "\u0433",
|
1750 |
+
"gdot;": "\u0121",
|
1751 |
+
"ge;": "\u2265",
|
1752 |
+
"gel;": "\u22db",
|
1753 |
+
"geq;": "\u2265",
|
1754 |
+
"geqq;": "\u2267",
|
1755 |
+
"geqslant;": "\u2a7e",
|
1756 |
+
"ges;": "\u2a7e",
|
1757 |
+
"gescc;": "\u2aa9",
|
1758 |
+
"gesdot;": "\u2a80",
|
1759 |
+
"gesdoto;": "\u2a82",
|
1760 |
+
"gesdotol;": "\u2a84",
|
1761 |
+
"gesl;": "\u22db\ufe00",
|
1762 |
+
"gesles;": "\u2a94",
|
1763 |
+
"gfr;": "\U0001d524",
|
1764 |
+
"gg;": "\u226b",
|
1765 |
+
"ggg;": "\u22d9",
|
1766 |
+
"gimel;": "\u2137",
|
1767 |
+
"gjcy;": "\u0453",
|
1768 |
+
"gl;": "\u2277",
|
1769 |
+
"glE;": "\u2a92",
|
1770 |
+
"gla;": "\u2aa5",
|
1771 |
+
"glj;": "\u2aa4",
|
1772 |
+
"gnE;": "\u2269",
|
1773 |
+
"gnap;": "\u2a8a",
|
1774 |
+
"gnapprox;": "\u2a8a",
|
1775 |
+
"gne;": "\u2a88",
|
1776 |
+
"gneq;": "\u2a88",
|
1777 |
+
"gneqq;": "\u2269",
|
1778 |
+
"gnsim;": "\u22e7",
|
1779 |
+
"gopf;": "\U0001d558",
|
1780 |
+
"grave;": "`",
|
1781 |
+
"gscr;": "\u210a",
|
1782 |
+
"gsim;": "\u2273",
|
1783 |
+
"gsime;": "\u2a8e",
|
1784 |
+
"gsiml;": "\u2a90",
|
1785 |
+
"gt": ">",
|
1786 |
+
"gt;": ">",
|
1787 |
+
"gtcc;": "\u2aa7",
|
1788 |
+
"gtcir;": "\u2a7a",
|
1789 |
+
"gtdot;": "\u22d7",
|
1790 |
+
"gtlPar;": "\u2995",
|
1791 |
+
"gtquest;": "\u2a7c",
|
1792 |
+
"gtrapprox;": "\u2a86",
|
1793 |
+
"gtrarr;": "\u2978",
|
1794 |
+
"gtrdot;": "\u22d7",
|
1795 |
+
"gtreqless;": "\u22db",
|
1796 |
+
"gtreqqless;": "\u2a8c",
|
1797 |
+
"gtrless;": "\u2277",
|
1798 |
+
"gtrsim;": "\u2273",
|
1799 |
+
"gvertneqq;": "\u2269\ufe00",
|
1800 |
+
"gvnE;": "\u2269\ufe00",
|
1801 |
+
"hArr;": "\u21d4",
|
1802 |
+
"hairsp;": "\u200a",
|
1803 |
+
"half;": "\xbd",
|
1804 |
+
"hamilt;": "\u210b",
|
1805 |
+
"hardcy;": "\u044a",
|
1806 |
+
"harr;": "\u2194",
|
1807 |
+
"harrcir;": "\u2948",
|
1808 |
+
"harrw;": "\u21ad",
|
1809 |
+
"hbar;": "\u210f",
|
1810 |
+
"hcirc;": "\u0125",
|
1811 |
+
"hearts;": "\u2665",
|
1812 |
+
"heartsuit;": "\u2665",
|
1813 |
+
"hellip;": "\u2026",
|
1814 |
+
"hercon;": "\u22b9",
|
1815 |
+
"hfr;": "\U0001d525",
|
1816 |
+
"hksearow;": "\u2925",
|
1817 |
+
"hkswarow;": "\u2926",
|
1818 |
+
"hoarr;": "\u21ff",
|
1819 |
+
"homtht;": "\u223b",
|
1820 |
+
"hookleftarrow;": "\u21a9",
|
1821 |
+
"hookrightarrow;": "\u21aa",
|
1822 |
+
"hopf;": "\U0001d559",
|
1823 |
+
"horbar;": "\u2015",
|
1824 |
+
"hscr;": "\U0001d4bd",
|
1825 |
+
"hslash;": "\u210f",
|
1826 |
+
"hstrok;": "\u0127",
|
1827 |
+
"hybull;": "\u2043",
|
1828 |
+
"hyphen;": "\u2010",
|
1829 |
+
"iacute": "\xed",
|
1830 |
+
"iacute;": "\xed",
|
1831 |
+
"ic;": "\u2063",
|
1832 |
+
"icirc": "\xee",
|
1833 |
+
"icirc;": "\xee",
|
1834 |
+
"icy;": "\u0438",
|
1835 |
+
"iecy;": "\u0435",
|
1836 |
+
"iexcl": "\xa1",
|
1837 |
+
"iexcl;": "\xa1",
|
1838 |
+
"iff;": "\u21d4",
|
1839 |
+
"ifr;": "\U0001d526",
|
1840 |
+
"igrave": "\xec",
|
1841 |
+
"igrave;": "\xec",
|
1842 |
+
"ii;": "\u2148",
|
1843 |
+
"iiiint;": "\u2a0c",
|
1844 |
+
"iiint;": "\u222d",
|
1845 |
+
"iinfin;": "\u29dc",
|
1846 |
+
"iiota;": "\u2129",
|
1847 |
+
"ijlig;": "\u0133",
|
1848 |
+
"imacr;": "\u012b",
|
1849 |
+
"image;": "\u2111",
|
1850 |
+
"imagline;": "\u2110",
|
1851 |
+
"imagpart;": "\u2111",
|
1852 |
+
"imath;": "\u0131",
|
1853 |
+
"imof;": "\u22b7",
|
1854 |
+
"imped;": "\u01b5",
|
1855 |
+
"in;": "\u2208",
|
1856 |
+
"incare;": "\u2105",
|
1857 |
+
"infin;": "\u221e",
|
1858 |
+
"infintie;": "\u29dd",
|
1859 |
+
"inodot;": "\u0131",
|
1860 |
+
"int;": "\u222b",
|
1861 |
+
"intcal;": "\u22ba",
|
1862 |
+
"integers;": "\u2124",
|
1863 |
+
"intercal;": "\u22ba",
|
1864 |
+
"intlarhk;": "\u2a17",
|
1865 |
+
"intprod;": "\u2a3c",
|
1866 |
+
"iocy;": "\u0451",
|
1867 |
+
"iogon;": "\u012f",
|
1868 |
+
"iopf;": "\U0001d55a",
|
1869 |
+
"iota;": "\u03b9",
|
1870 |
+
"iprod;": "\u2a3c",
|
1871 |
+
"iquest": "\xbf",
|
1872 |
+
"iquest;": "\xbf",
|
1873 |
+
"iscr;": "\U0001d4be",
|
1874 |
+
"isin;": "\u2208",
|
1875 |
+
"isinE;": "\u22f9",
|
1876 |
+
"isindot;": "\u22f5",
|
1877 |
+
"isins;": "\u22f4",
|
1878 |
+
"isinsv;": "\u22f3",
|
1879 |
+
"isinv;": "\u2208",
|
1880 |
+
"it;": "\u2062",
|
1881 |
+
"itilde;": "\u0129",
|
1882 |
+
"iukcy;": "\u0456",
|
1883 |
+
"iuml": "\xef",
|
1884 |
+
"iuml;": "\xef",
|
1885 |
+
"jcirc;": "\u0135",
|
1886 |
+
"jcy;": "\u0439",
|
1887 |
+
"jfr;": "\U0001d527",
|
1888 |
+
"jmath;": "\u0237",
|
1889 |
+
"jopf;": "\U0001d55b",
|
1890 |
+
"jscr;": "\U0001d4bf",
|
1891 |
+
"jsercy;": "\u0458",
|
1892 |
+
"jukcy;": "\u0454",
|
1893 |
+
"kappa;": "\u03ba",
|
1894 |
+
"kappav;": "\u03f0",
|
1895 |
+
"kcedil;": "\u0137",
|
1896 |
+
"kcy;": "\u043a",
|
1897 |
+
"kfr;": "\U0001d528",
|
1898 |
+
"kgreen;": "\u0138",
|
1899 |
+
"khcy;": "\u0445",
|
1900 |
+
"kjcy;": "\u045c",
|
1901 |
+
"kopf;": "\U0001d55c",
|
1902 |
+
"kscr;": "\U0001d4c0",
|
1903 |
+
"lAarr;": "\u21da",
|
1904 |
+
"lArr;": "\u21d0",
|
1905 |
+
"lAtail;": "\u291b",
|
1906 |
+
"lBarr;": "\u290e",
|
1907 |
+
"lE;": "\u2266",
|
1908 |
+
"lEg;": "\u2a8b",
|
1909 |
+
"lHar;": "\u2962",
|
1910 |
+
"lacute;": "\u013a",
|
1911 |
+
"laemptyv;": "\u29b4",
|
1912 |
+
"lagran;": "\u2112",
|
1913 |
+
"lambda;": "\u03bb",
|
1914 |
+
"lang;": "\u27e8",
|
1915 |
+
"langd;": "\u2991",
|
1916 |
+
"langle;": "\u27e8",
|
1917 |
+
"lap;": "\u2a85",
|
1918 |
+
"laquo": "\xab",
|
1919 |
+
"laquo;": "\xab",
|
1920 |
+
"larr;": "\u2190",
|
1921 |
+
"larrb;": "\u21e4",
|
1922 |
+
"larrbfs;": "\u291f",
|
1923 |
+
"larrfs;": "\u291d",
|
1924 |
+
"larrhk;": "\u21a9",
|
1925 |
+
"larrlp;": "\u21ab",
|
1926 |
+
"larrpl;": "\u2939",
|
1927 |
+
"larrsim;": "\u2973",
|
1928 |
+
"larrtl;": "\u21a2",
|
1929 |
+
"lat;": "\u2aab",
|
1930 |
+
"latail;": "\u2919",
|
1931 |
+
"late;": "\u2aad",
|
1932 |
+
"lates;": "\u2aad\ufe00",
|
1933 |
+
"lbarr;": "\u290c",
|
1934 |
+
"lbbrk;": "\u2772",
|
1935 |
+
"lbrace;": "{",
|
1936 |
+
"lbrack;": "[",
|
1937 |
+
"lbrke;": "\u298b",
|
1938 |
+
"lbrksld;": "\u298f",
|
1939 |
+
"lbrkslu;": "\u298d",
|
1940 |
+
"lcaron;": "\u013e",
|
1941 |
+
"lcedil;": "\u013c",
|
1942 |
+
"lceil;": "\u2308",
|
1943 |
+
"lcub;": "{",
|
1944 |
+
"lcy;": "\u043b",
|
1945 |
+
"ldca;": "\u2936",
|
1946 |
+
"ldquo;": "\u201c",
|
1947 |
+
"ldquor;": "\u201e",
|
1948 |
+
"ldrdhar;": "\u2967",
|
1949 |
+
"ldrushar;": "\u294b",
|
1950 |
+
"ldsh;": "\u21b2",
|
1951 |
+
"le;": "\u2264",
|
1952 |
+
"leftarrow;": "\u2190",
|
1953 |
+
"leftarrowtail;": "\u21a2",
|
1954 |
+
"leftharpoondown;": "\u21bd",
|
1955 |
+
"leftharpoonup;": "\u21bc",
|
1956 |
+
"leftleftarrows;": "\u21c7",
|
1957 |
+
"leftrightarrow;": "\u2194",
|
1958 |
+
"leftrightarrows;": "\u21c6",
|
1959 |
+
"leftrightharpoons;": "\u21cb",
|
1960 |
+
"leftrightsquigarrow;": "\u21ad",
|
1961 |
+
"leftthreetimes;": "\u22cb",
|
1962 |
+
"leg;": "\u22da",
|
1963 |
+
"leq;": "\u2264",
|
1964 |
+
"leqq;": "\u2266",
|
1965 |
+
"leqslant;": "\u2a7d",
|
1966 |
+
"les;": "\u2a7d",
|
1967 |
+
"lescc;": "\u2aa8",
|
1968 |
+
"lesdot;": "\u2a7f",
|
1969 |
+
"lesdoto;": "\u2a81",
|
1970 |
+
"lesdotor;": "\u2a83",
|
1971 |
+
"lesg;": "\u22da\ufe00",
|
1972 |
+
"lesges;": "\u2a93",
|
1973 |
+
"lessapprox;": "\u2a85",
|
1974 |
+
"lessdot;": "\u22d6",
|
1975 |
+
"lesseqgtr;": "\u22da",
|
1976 |
+
"lesseqqgtr;": "\u2a8b",
|
1977 |
+
"lessgtr;": "\u2276",
|
1978 |
+
"lesssim;": "\u2272",
|
1979 |
+
"lfisht;": "\u297c",
|
1980 |
+
"lfloor;": "\u230a",
|
1981 |
+
"lfr;": "\U0001d529",
|
1982 |
+
"lg;": "\u2276",
|
1983 |
+
"lgE;": "\u2a91",
|
1984 |
+
"lhard;": "\u21bd",
|
1985 |
+
"lharu;": "\u21bc",
|
1986 |
+
"lharul;": "\u296a",
|
1987 |
+
"lhblk;": "\u2584",
|
1988 |
+
"ljcy;": "\u0459",
|
1989 |
+
"ll;": "\u226a",
|
1990 |
+
"llarr;": "\u21c7",
|
1991 |
+
"llcorner;": "\u231e",
|
1992 |
+
"llhard;": "\u296b",
|
1993 |
+
"lltri;": "\u25fa",
|
1994 |
+
"lmidot;": "\u0140",
|
1995 |
+
"lmoust;": "\u23b0",
|
1996 |
+
"lmoustache;": "\u23b0",
|
1997 |
+
"lnE;": "\u2268",
|
1998 |
+
"lnap;": "\u2a89",
|
1999 |
+
"lnapprox;": "\u2a89",
|
2000 |
+
"lne;": "\u2a87",
|
2001 |
+
"lneq;": "\u2a87",
|
2002 |
+
"lneqq;": "\u2268",
|
2003 |
+
"lnsim;": "\u22e6",
|
2004 |
+
"loang;": "\u27ec",
|
2005 |
+
"loarr;": "\u21fd",
|
2006 |
+
"lobrk;": "\u27e6",
|
2007 |
+
"longleftarrow;": "\u27f5",
|
2008 |
+
"longleftrightarrow;": "\u27f7",
|
2009 |
+
"longmapsto;": "\u27fc",
|
2010 |
+
"longrightarrow;": "\u27f6",
|
2011 |
+
"looparrowleft;": "\u21ab",
|
2012 |
+
"looparrowright;": "\u21ac",
|
2013 |
+
"lopar;": "\u2985",
|
2014 |
+
"lopf;": "\U0001d55d",
|
2015 |
+
"loplus;": "\u2a2d",
|
2016 |
+
"lotimes;": "\u2a34",
|
2017 |
+
"lowast;": "\u2217",
|
2018 |
+
"lowbar;": "_",
|
2019 |
+
"loz;": "\u25ca",
|
2020 |
+
"lozenge;": "\u25ca",
|
2021 |
+
"lozf;": "\u29eb",
|
2022 |
+
"lpar;": "(",
|
2023 |
+
"lparlt;": "\u2993",
|
2024 |
+
"lrarr;": "\u21c6",
|
2025 |
+
"lrcorner;": "\u231f",
|
2026 |
+
"lrhar;": "\u21cb",
|
2027 |
+
"lrhard;": "\u296d",
|
2028 |
+
"lrm;": "\u200e",
|
2029 |
+
"lrtri;": "\u22bf",
|
2030 |
+
"lsaquo;": "\u2039",
|
2031 |
+
"lscr;": "\U0001d4c1",
|
2032 |
+
"lsh;": "\u21b0",
|
2033 |
+
"lsim;": "\u2272",
|
2034 |
+
"lsime;": "\u2a8d",
|
2035 |
+
"lsimg;": "\u2a8f",
|
2036 |
+
"lsqb;": "[",
|
2037 |
+
"lsquo;": "\u2018",
|
2038 |
+
"lsquor;": "\u201a",
|
2039 |
+
"lstrok;": "\u0142",
|
2040 |
+
"lt": "<",
|
2041 |
+
"lt;": "<",
|
2042 |
+
"ltcc;": "\u2aa6",
|
2043 |
+
"ltcir;": "\u2a79",
|
2044 |
+
"ltdot;": "\u22d6",
|
2045 |
+
"lthree;": "\u22cb",
|
2046 |
+
"ltimes;": "\u22c9",
|
2047 |
+
"ltlarr;": "\u2976",
|
2048 |
+
"ltquest;": "\u2a7b",
|
2049 |
+
"ltrPar;": "\u2996",
|
2050 |
+
"ltri;": "\u25c3",
|
2051 |
+
"ltrie;": "\u22b4",
|
2052 |
+
"ltrif;": "\u25c2",
|
2053 |
+
"lurdshar;": "\u294a",
|
2054 |
+
"luruhar;": "\u2966",
|
2055 |
+
"lvertneqq;": "\u2268\ufe00",
|
2056 |
+
"lvnE;": "\u2268\ufe00",
|
2057 |
+
"mDDot;": "\u223a",
|
2058 |
+
"macr": "\xaf",
|
2059 |
+
"macr;": "\xaf",
|
2060 |
+
"male;": "\u2642",
|
2061 |
+
"malt;": "\u2720",
|
2062 |
+
"maltese;": "\u2720",
|
2063 |
+
"map;": "\u21a6",
|
2064 |
+
"mapsto;": "\u21a6",
|
2065 |
+
"mapstodown;": "\u21a7",
|
2066 |
+
"mapstoleft;": "\u21a4",
|
2067 |
+
"mapstoup;": "\u21a5",
|
2068 |
+
"marker;": "\u25ae",
|
2069 |
+
"mcomma;": "\u2a29",
|
2070 |
+
"mcy;": "\u043c",
|
2071 |
+
"mdash;": "\u2014",
|
2072 |
+
"measuredangle;": "\u2221",
|
2073 |
+
"mfr;": "\U0001d52a",
|
2074 |
+
"mho;": "\u2127",
|
2075 |
+
"micro": "\xb5",
|
2076 |
+
"micro;": "\xb5",
|
2077 |
+
"mid;": "\u2223",
|
2078 |
+
"midast;": "*",
|
2079 |
+
"midcir;": "\u2af0",
|
2080 |
+
"middot": "\xb7",
|
2081 |
+
"middot;": "\xb7",
|
2082 |
+
"minus;": "\u2212",
|
2083 |
+
"minusb;": "\u229f",
|
2084 |
+
"minusd;": "\u2238",
|
2085 |
+
"minusdu;": "\u2a2a",
|
2086 |
+
"mlcp;": "\u2adb",
|
2087 |
+
"mldr;": "\u2026",
|
2088 |
+
"mnplus;": "\u2213",
|
2089 |
+
"models;": "\u22a7",
|
2090 |
+
"mopf;": "\U0001d55e",
|
2091 |
+
"mp;": "\u2213",
|
2092 |
+
"mscr;": "\U0001d4c2",
|
2093 |
+
"mstpos;": "\u223e",
|
2094 |
+
"mu;": "\u03bc",
|
2095 |
+
"multimap;": "\u22b8",
|
2096 |
+
"mumap;": "\u22b8",
|
2097 |
+
"nGg;": "\u22d9\u0338",
|
2098 |
+
"nGt;": "\u226b\u20d2",
|
2099 |
+
"nGtv;": "\u226b\u0338",
|
2100 |
+
"nLeftarrow;": "\u21cd",
|
2101 |
+
"nLeftrightarrow;": "\u21ce",
|
2102 |
+
"nLl;": "\u22d8\u0338",
|
2103 |
+
"nLt;": "\u226a\u20d2",
|
2104 |
+
"nLtv;": "\u226a\u0338",
|
2105 |
+
"nRightarrow;": "\u21cf",
|
2106 |
+
"nVDash;": "\u22af",
|
2107 |
+
"nVdash;": "\u22ae",
|
2108 |
+
"nabla;": "\u2207",
|
2109 |
+
"nacute;": "\u0144",
|
2110 |
+
"nang;": "\u2220\u20d2",
|
2111 |
+
"nap;": "\u2249",
|
2112 |
+
"napE;": "\u2a70\u0338",
|
2113 |
+
"napid;": "\u224b\u0338",
|
2114 |
+
"napos;": "\u0149",
|
2115 |
+
"napprox;": "\u2249",
|
2116 |
+
"natur;": "\u266e",
|
2117 |
+
"natural;": "\u266e",
|
2118 |
+
"naturals;": "\u2115",
|
2119 |
+
"nbsp": "\xa0",
|
2120 |
+
"nbsp;": "\xa0",
|
2121 |
+
"nbump;": "\u224e\u0338",
|
2122 |
+
"nbumpe;": "\u224f\u0338",
|
2123 |
+
"ncap;": "\u2a43",
|
2124 |
+
"ncaron;": "\u0148",
|
2125 |
+
"ncedil;": "\u0146",
|
2126 |
+
"ncong;": "\u2247",
|
2127 |
+
"ncongdot;": "\u2a6d\u0338",
|
2128 |
+
"ncup;": "\u2a42",
|
2129 |
+
"ncy;": "\u043d",
|
2130 |
+
"ndash;": "\u2013",
|
2131 |
+
"ne;": "\u2260",
|
2132 |
+
"neArr;": "\u21d7",
|
2133 |
+
"nearhk;": "\u2924",
|
2134 |
+
"nearr;": "\u2197",
|
2135 |
+
"nearrow;": "\u2197",
|
2136 |
+
"nedot;": "\u2250\u0338",
|
2137 |
+
"nequiv;": "\u2262",
|
2138 |
+
"nesear;": "\u2928",
|
2139 |
+
"nesim;": "\u2242\u0338",
|
2140 |
+
"nexist;": "\u2204",
|
2141 |
+
"nexists;": "\u2204",
|
2142 |
+
"nfr;": "\U0001d52b",
|
2143 |
+
"ngE;": "\u2267\u0338",
|
2144 |
+
"nge;": "\u2271",
|
2145 |
+
"ngeq;": "\u2271",
|
2146 |
+
"ngeqq;": "\u2267\u0338",
|
2147 |
+
"ngeqslant;": "\u2a7e\u0338",
|
2148 |
+
"nges;": "\u2a7e\u0338",
|
2149 |
+
"ngsim;": "\u2275",
|
2150 |
+
"ngt;": "\u226f",
|
2151 |
+
"ngtr;": "\u226f",
|
2152 |
+
"nhArr;": "\u21ce",
|
2153 |
+
"nharr;": "\u21ae",
|
2154 |
+
"nhpar;": "\u2af2",
|
2155 |
+
"ni;": "\u220b",
|
2156 |
+
"nis;": "\u22fc",
|
2157 |
+
"nisd;": "\u22fa",
|
2158 |
+
"niv;": "\u220b",
|
2159 |
+
"njcy;": "\u045a",
|
2160 |
+
"nlArr;": "\u21cd",
|
2161 |
+
"nlE;": "\u2266\u0338",
|
2162 |
+
"nlarr;": "\u219a",
|
2163 |
+
"nldr;": "\u2025",
|
2164 |
+
"nle;": "\u2270",
|
2165 |
+
"nleftarrow;": "\u219a",
|
2166 |
+
"nleftrightarrow;": "\u21ae",
|
2167 |
+
"nleq;": "\u2270",
|
2168 |
+
"nleqq;": "\u2266\u0338",
|
2169 |
+
"nleqslant;": "\u2a7d\u0338",
|
2170 |
+
"nles;": "\u2a7d\u0338",
|
2171 |
+
"nless;": "\u226e",
|
2172 |
+
"nlsim;": "\u2274",
|
2173 |
+
"nlt;": "\u226e",
|
2174 |
+
"nltri;": "\u22ea",
|
2175 |
+
"nltrie;": "\u22ec",
|
2176 |
+
"nmid;": "\u2224",
|
2177 |
+
"nopf;": "\U0001d55f",
|
2178 |
+
"not": "\xac",
|
2179 |
+
"not;": "\xac",
|
2180 |
+
"notin;": "\u2209",
|
2181 |
+
"notinE;": "\u22f9\u0338",
|
2182 |
+
"notindot;": "\u22f5\u0338",
|
2183 |
+
"notinva;": "\u2209",
|
2184 |
+
"notinvb;": "\u22f7",
|
2185 |
+
"notinvc;": "\u22f6",
|
2186 |
+
"notni;": "\u220c",
|
2187 |
+
"notniva;": "\u220c",
|
2188 |
+
"notnivb;": "\u22fe",
|
2189 |
+
"notnivc;": "\u22fd",
|
2190 |
+
"npar;": "\u2226",
|
2191 |
+
"nparallel;": "\u2226",
|
2192 |
+
"nparsl;": "\u2afd\u20e5",
|
2193 |
+
"npart;": "\u2202\u0338",
|
2194 |
+
"npolint;": "\u2a14",
|
2195 |
+
"npr;": "\u2280",
|
2196 |
+
"nprcue;": "\u22e0",
|
2197 |
+
"npre;": "\u2aaf\u0338",
|
2198 |
+
"nprec;": "\u2280",
|
2199 |
+
"npreceq;": "\u2aaf\u0338",
|
2200 |
+
"nrArr;": "\u21cf",
|
2201 |
+
"nrarr;": "\u219b",
|
2202 |
+
"nrarrc;": "\u2933\u0338",
|
2203 |
+
"nrarrw;": "\u219d\u0338",
|
2204 |
+
"nrightarrow;": "\u219b",
|
2205 |
+
"nrtri;": "\u22eb",
|
2206 |
+
"nrtrie;": "\u22ed",
|
2207 |
+
"nsc;": "\u2281",
|
2208 |
+
"nsccue;": "\u22e1",
|
2209 |
+
"nsce;": "\u2ab0\u0338",
|
2210 |
+
"nscr;": "\U0001d4c3",
|
2211 |
+
"nshortmid;": "\u2224",
|
2212 |
+
"nshortparallel;": "\u2226",
|
2213 |
+
"nsim;": "\u2241",
|
2214 |
+
"nsime;": "\u2244",
|
2215 |
+
"nsimeq;": "\u2244",
|
2216 |
+
"nsmid;": "\u2224",
|
2217 |
+
"nspar;": "\u2226",
|
2218 |
+
"nsqsube;": "\u22e2",
|
2219 |
+
"nsqsupe;": "\u22e3",
|
2220 |
+
"nsub;": "\u2284",
|
2221 |
+
"nsubE;": "\u2ac5\u0338",
|
2222 |
+
"nsube;": "\u2288",
|
2223 |
+
"nsubset;": "\u2282\u20d2",
|
2224 |
+
"nsubseteq;": "\u2288",
|
2225 |
+
"nsubseteqq;": "\u2ac5\u0338",
|
2226 |
+
"nsucc;": "\u2281",
|
2227 |
+
"nsucceq;": "\u2ab0\u0338",
|
2228 |
+
"nsup;": "\u2285",
|
2229 |
+
"nsupE;": "\u2ac6\u0338",
|
2230 |
+
"nsupe;": "\u2289",
|
2231 |
+
"nsupset;": "\u2283\u20d2",
|
2232 |
+
"nsupseteq;": "\u2289",
|
2233 |
+
"nsupseteqq;": "\u2ac6\u0338",
|
2234 |
+
"ntgl;": "\u2279",
|
2235 |
+
"ntilde": "\xf1",
|
2236 |
+
"ntilde;": "\xf1",
|
2237 |
+
"ntlg;": "\u2278",
|
2238 |
+
"ntriangleleft;": "\u22ea",
|
2239 |
+
"ntrianglelefteq;": "\u22ec",
|
2240 |
+
"ntriangleright;": "\u22eb",
|
2241 |
+
"ntrianglerighteq;": "\u22ed",
|
2242 |
+
"nu;": "\u03bd",
|
2243 |
+
"num;": "#",
|
2244 |
+
"numero;": "\u2116",
|
2245 |
+
"numsp;": "\u2007",
|
2246 |
+
"nvDash;": "\u22ad",
|
2247 |
+
"nvHarr;": "\u2904",
|
2248 |
+
"nvap;": "\u224d\u20d2",
|
2249 |
+
"nvdash;": "\u22ac",
|
2250 |
+
"nvge;": "\u2265\u20d2",
|
2251 |
+
"nvgt;": ">\u20d2",
|
2252 |
+
"nvinfin;": "\u29de",
|
2253 |
+
"nvlArr;": "\u2902",
|
2254 |
+
"nvle;": "\u2264\u20d2",
|
2255 |
+
"nvlt;": "<\u20d2",
|
2256 |
+
"nvltrie;": "\u22b4\u20d2",
|
2257 |
+
"nvrArr;": "\u2903",
|
2258 |
+
"nvrtrie;": "\u22b5\u20d2",
|
2259 |
+
"nvsim;": "\u223c\u20d2",
|
2260 |
+
"nwArr;": "\u21d6",
|
2261 |
+
"nwarhk;": "\u2923",
|
2262 |
+
"nwarr;": "\u2196",
|
2263 |
+
"nwarrow;": "\u2196",
|
2264 |
+
"nwnear;": "\u2927",
|
2265 |
+
"oS;": "\u24c8",
|
2266 |
+
"oacute": "\xf3",
|
2267 |
+
"oacute;": "\xf3",
|
2268 |
+
"oast;": "\u229b",
|
2269 |
+
"ocir;": "\u229a",
|
2270 |
+
"ocirc": "\xf4",
|
2271 |
+
"ocirc;": "\xf4",
|
2272 |
+
"ocy;": "\u043e",
|
2273 |
+
"odash;": "\u229d",
|
2274 |
+
"odblac;": "\u0151",
|
2275 |
+
"odiv;": "\u2a38",
|
2276 |
+
"odot;": "\u2299",
|
2277 |
+
"odsold;": "\u29bc",
|
2278 |
+
"oelig;": "\u0153",
|
2279 |
+
"ofcir;": "\u29bf",
|
2280 |
+
"ofr;": "\U0001d52c",
|
2281 |
+
"ogon;": "\u02db",
|
2282 |
+
"ograve": "\xf2",
|
2283 |
+
"ograve;": "\xf2",
|
2284 |
+
"ogt;": "\u29c1",
|
2285 |
+
"ohbar;": "\u29b5",
|
2286 |
+
"ohm;": "\u03a9",
|
2287 |
+
"oint;": "\u222e",
|
2288 |
+
"olarr;": "\u21ba",
|
2289 |
+
"olcir;": "\u29be",
|
2290 |
+
"olcross;": "\u29bb",
|
2291 |
+
"oline;": "\u203e",
|
2292 |
+
"olt;": "\u29c0",
|
2293 |
+
"omacr;": "\u014d",
|
2294 |
+
"omega;": "\u03c9",
|
2295 |
+
"omicron;": "\u03bf",
|
2296 |
+
"omid;": "\u29b6",
|
2297 |
+
"ominus;": "\u2296",
|
2298 |
+
"oopf;": "\U0001d560",
|
2299 |
+
"opar;": "\u29b7",
|
2300 |
+
"operp;": "\u29b9",
|
2301 |
+
"oplus;": "\u2295",
|
2302 |
+
"or;": "\u2228",
|
2303 |
+
"orarr;": "\u21bb",
|
2304 |
+
"ord;": "\u2a5d",
|
2305 |
+
"order;": "\u2134",
|
2306 |
+
"orderof;": "\u2134",
|
2307 |
+
"ordf": "\xaa",
|
2308 |
+
"ordf;": "\xaa",
|
2309 |
+
"ordm": "\xba",
|
2310 |
+
"ordm;": "\xba",
|
2311 |
+
"origof;": "\u22b6",
|
2312 |
+
"oror;": "\u2a56",
|
2313 |
+
"orslope;": "\u2a57",
|
2314 |
+
"orv;": "\u2a5b",
|
2315 |
+
"oscr;": "\u2134",
|
2316 |
+
"oslash": "\xf8",
|
2317 |
+
"oslash;": "\xf8",
|
2318 |
+
"osol;": "\u2298",
|
2319 |
+
"otilde": "\xf5",
|
2320 |
+
"otilde;": "\xf5",
|
2321 |
+
"otimes;": "\u2297",
|
2322 |
+
"otimesas;": "\u2a36",
|
2323 |
+
"ouml": "\xf6",
|
2324 |
+
"ouml;": "\xf6",
|
2325 |
+
"ovbar;": "\u233d",
|
2326 |
+
"par;": "\u2225",
|
2327 |
+
"para": "\xb6",
|
2328 |
+
"para;": "\xb6",
|
2329 |
+
"parallel;": "\u2225",
|
2330 |
+
"parsim;": "\u2af3",
|
2331 |
+
"parsl;": "\u2afd",
|
2332 |
+
"part;": "\u2202",
|
2333 |
+
"pcy;": "\u043f",
|
2334 |
+
"percnt;": "%",
|
2335 |
+
"period;": ".",
|
2336 |
+
"permil;": "\u2030",
|
2337 |
+
"perp;": "\u22a5",
|
2338 |
+
"pertenk;": "\u2031",
|
2339 |
+
"pfr;": "\U0001d52d",
|
2340 |
+
"phi;": "\u03c6",
|
2341 |
+
"phiv;": "\u03d5",
|
2342 |
+
"phmmat;": "\u2133",
|
2343 |
+
"phone;": "\u260e",
|
2344 |
+
"pi;": "\u03c0",
|
2345 |
+
"pitchfork;": "\u22d4",
|
2346 |
+
"piv;": "\u03d6",
|
2347 |
+
"planck;": "\u210f",
|
2348 |
+
"planckh;": "\u210e",
|
2349 |
+
"plankv;": "\u210f",
|
2350 |
+
"plus;": "+",
|
2351 |
+
"plusacir;": "\u2a23",
|
2352 |
+
"plusb;": "\u229e",
|
2353 |
+
"pluscir;": "\u2a22",
|
2354 |
+
"plusdo;": "\u2214",
|
2355 |
+
"plusdu;": "\u2a25",
|
2356 |
+
"pluse;": "\u2a72",
|
2357 |
+
"plusmn": "\xb1",
|
2358 |
+
"plusmn;": "\xb1",
|
2359 |
+
"plussim;": "\u2a26",
|
2360 |
+
"plustwo;": "\u2a27",
|
2361 |
+
"pm;": "\xb1",
|
2362 |
+
"pointint;": "\u2a15",
|
2363 |
+
"popf;": "\U0001d561",
|
2364 |
+
"pound": "\xa3",
|
2365 |
+
"pound;": "\xa3",
|
2366 |
+
"pr;": "\u227a",
|
2367 |
+
"prE;": "\u2ab3",
|
2368 |
+
"prap;": "\u2ab7",
|
2369 |
+
"prcue;": "\u227c",
|
2370 |
+
"pre;": "\u2aaf",
|
2371 |
+
"prec;": "\u227a",
|
2372 |
+
"precapprox;": "\u2ab7",
|
2373 |
+
"preccurlyeq;": "\u227c",
|
2374 |
+
"preceq;": "\u2aaf",
|
2375 |
+
"precnapprox;": "\u2ab9",
|
2376 |
+
"precneqq;": "\u2ab5",
|
2377 |
+
"precnsim;": "\u22e8",
|
2378 |
+
"precsim;": "\u227e",
|
2379 |
+
"prime;": "\u2032",
|
2380 |
+
"primes;": "\u2119",
|
2381 |
+
"prnE;": "\u2ab5",
|
2382 |
+
"prnap;": "\u2ab9",
|
2383 |
+
"prnsim;": "\u22e8",
|
2384 |
+
"prod;": "\u220f",
|
2385 |
+
"profalar;": "\u232e",
|
2386 |
+
"profline;": "\u2312",
|
2387 |
+
"profsurf;": "\u2313",
|
2388 |
+
"prop;": "\u221d",
|
2389 |
+
"propto;": "\u221d",
|
2390 |
+
"prsim;": "\u227e",
|
2391 |
+
"prurel;": "\u22b0",
|
2392 |
+
"pscr;": "\U0001d4c5",
|
2393 |
+
"psi;": "\u03c8",
|
2394 |
+
"puncsp;": "\u2008",
|
2395 |
+
"qfr;": "\U0001d52e",
|
2396 |
+
"qint;": "\u2a0c",
|
2397 |
+
"qopf;": "\U0001d562",
|
2398 |
+
"qprime;": "\u2057",
|
2399 |
+
"qscr;": "\U0001d4c6",
|
2400 |
+
"quaternions;": "\u210d",
|
2401 |
+
"quatint;": "\u2a16",
|
2402 |
+
"quest;": "?",
|
2403 |
+
"questeq;": "\u225f",
|
2404 |
+
"quot": "\"",
|
2405 |
+
"quot;": "\"",
|
2406 |
+
"rAarr;": "\u21db",
|
2407 |
+
"rArr;": "\u21d2",
|
2408 |
+
"rAtail;": "\u291c",
|
2409 |
+
"rBarr;": "\u290f",
|
2410 |
+
"rHar;": "\u2964",
|
2411 |
+
"race;": "\u223d\u0331",
|
2412 |
+
"racute;": "\u0155",
|
2413 |
+
"radic;": "\u221a",
|
2414 |
+
"raemptyv;": "\u29b3",
|
2415 |
+
"rang;": "\u27e9",
|
2416 |
+
"rangd;": "\u2992",
|
2417 |
+
"range;": "\u29a5",
|
2418 |
+
"rangle;": "\u27e9",
|
2419 |
+
"raquo": "\xbb",
|
2420 |
+
"raquo;": "\xbb",
|
2421 |
+
"rarr;": "\u2192",
|
2422 |
+
"rarrap;": "\u2975",
|
2423 |
+
"rarrb;": "\u21e5",
|
2424 |
+
"rarrbfs;": "\u2920",
|
2425 |
+
"rarrc;": "\u2933",
|
2426 |
+
"rarrfs;": "\u291e",
|
2427 |
+
"rarrhk;": "\u21aa",
|
2428 |
+
"rarrlp;": "\u21ac",
|
2429 |
+
"rarrpl;": "\u2945",
|
2430 |
+
"rarrsim;": "\u2974",
|
2431 |
+
"rarrtl;": "\u21a3",
|
2432 |
+
"rarrw;": "\u219d",
|
2433 |
+
"ratail;": "\u291a",
|
2434 |
+
"ratio;": "\u2236",
|
2435 |
+
"rationals;": "\u211a",
|
2436 |
+
"rbarr;": "\u290d",
|
2437 |
+
"rbbrk;": "\u2773",
|
2438 |
+
"rbrace;": "}",
|
2439 |
+
"rbrack;": "]",
|
2440 |
+
"rbrke;": "\u298c",
|
2441 |
+
"rbrksld;": "\u298e",
|
2442 |
+
"rbrkslu;": "\u2990",
|
2443 |
+
"rcaron;": "\u0159",
|
2444 |
+
"rcedil;": "\u0157",
|
2445 |
+
"rceil;": "\u2309",
|
2446 |
+
"rcub;": "}",
|
2447 |
+
"rcy;": "\u0440",
|
2448 |
+
"rdca;": "\u2937",
|
2449 |
+
"rdldhar;": "\u2969",
|
2450 |
+
"rdquo;": "\u201d",
|
2451 |
+
"rdquor;": "\u201d",
|
2452 |
+
"rdsh;": "\u21b3",
|
2453 |
+
"real;": "\u211c",
|
2454 |
+
"realine;": "\u211b",
|
2455 |
+
"realpart;": "\u211c",
|
2456 |
+
"reals;": "\u211d",
|
2457 |
+
"rect;": "\u25ad",
|
2458 |
+
"reg": "\xae",
|
2459 |
+
"reg;": "\xae",
|
2460 |
+
"rfisht;": "\u297d",
|
2461 |
+
"rfloor;": "\u230b",
|
2462 |
+
"rfr;": "\U0001d52f",
|
2463 |
+
"rhard;": "\u21c1",
|
2464 |
+
"rharu;": "\u21c0",
|
2465 |
+
"rharul;": "\u296c",
|
2466 |
+
"rho;": "\u03c1",
|
2467 |
+
"rhov;": "\u03f1",
|
2468 |
+
"rightarrow;": "\u2192",
|
2469 |
+
"rightarrowtail;": "\u21a3",
|
2470 |
+
"rightharpoondown;": "\u21c1",
|
2471 |
+
"rightharpoonup;": "\u21c0",
|
2472 |
+
"rightleftarrows;": "\u21c4",
|
2473 |
+
"rightleftharpoons;": "\u21cc",
|
2474 |
+
"rightrightarrows;": "\u21c9",
|
2475 |
+
"rightsquigarrow;": "\u219d",
|
2476 |
+
"rightthreetimes;": "\u22cc",
|
2477 |
+
"ring;": "\u02da",
|
2478 |
+
"risingdotseq;": "\u2253",
|
2479 |
+
"rlarr;": "\u21c4",
|
2480 |
+
"rlhar;": "\u21cc",
|
2481 |
+
"rlm;": "\u200f",
|
2482 |
+
"rmoust;": "\u23b1",
|
2483 |
+
"rmoustache;": "\u23b1",
|
2484 |
+
"rnmid;": "\u2aee",
|
2485 |
+
"roang;": "\u27ed",
|
2486 |
+
"roarr;": "\u21fe",
|
2487 |
+
"robrk;": "\u27e7",
|
2488 |
+
"ropar;": "\u2986",
|
2489 |
+
"ropf;": "\U0001d563",
|
2490 |
+
"roplus;": "\u2a2e",
|
2491 |
+
"rotimes;": "\u2a35",
|
2492 |
+
"rpar;": ")",
|
2493 |
+
"rpargt;": "\u2994",
|
2494 |
+
"rppolint;": "\u2a12",
|
2495 |
+
"rrarr;": "\u21c9",
|
2496 |
+
"rsaquo;": "\u203a",
|
2497 |
+
"rscr;": "\U0001d4c7",
|
2498 |
+
"rsh;": "\u21b1",
|
2499 |
+
"rsqb;": "]",
|
2500 |
+
"rsquo;": "\u2019",
|
2501 |
+
"rsquor;": "\u2019",
|
2502 |
+
"rthree;": "\u22cc",
|
2503 |
+
"rtimes;": "\u22ca",
|
2504 |
+
"rtri;": "\u25b9",
|
2505 |
+
"rtrie;": "\u22b5",
|
2506 |
+
"rtrif;": "\u25b8",
|
2507 |
+
"rtriltri;": "\u29ce",
|
2508 |
+
"ruluhar;": "\u2968",
|
2509 |
+
"rx;": "\u211e",
|
2510 |
+
"sacute;": "\u015b",
|
2511 |
+
"sbquo;": "\u201a",
|
2512 |
+
"sc;": "\u227b",
|
2513 |
+
"scE;": "\u2ab4",
|
2514 |
+
"scap;": "\u2ab8",
|
2515 |
+
"scaron;": "\u0161",
|
2516 |
+
"sccue;": "\u227d",
|
2517 |
+
"sce;": "\u2ab0",
|
2518 |
+
"scedil;": "\u015f",
|
2519 |
+
"scirc;": "\u015d",
|
2520 |
+
"scnE;": "\u2ab6",
|
2521 |
+
"scnap;": "\u2aba",
|
2522 |
+
"scnsim;": "\u22e9",
|
2523 |
+
"scpolint;": "\u2a13",
|
2524 |
+
"scsim;": "\u227f",
|
2525 |
+
"scy;": "\u0441",
|
2526 |
+
"sdot;": "\u22c5",
|
2527 |
+
"sdotb;": "\u22a1",
|
2528 |
+
"sdote;": "\u2a66",
|
2529 |
+
"seArr;": "\u21d8",
|
2530 |
+
"searhk;": "\u2925",
|
2531 |
+
"searr;": "\u2198",
|
2532 |
+
"searrow;": "\u2198",
|
2533 |
+
"sect": "\xa7",
|
2534 |
+
"sect;": "\xa7",
|
2535 |
+
"semi;": ";",
|
2536 |
+
"seswar;": "\u2929",
|
2537 |
+
"setminus;": "\u2216",
|
2538 |
+
"setmn;": "\u2216",
|
2539 |
+
"sext;": "\u2736",
|
2540 |
+
"sfr;": "\U0001d530",
|
2541 |
+
"sfrown;": "\u2322",
|
2542 |
+
"sharp;": "\u266f",
|
2543 |
+
"shchcy;": "\u0449",
|
2544 |
+
"shcy;": "\u0448",
|
2545 |
+
"shortmid;": "\u2223",
|
2546 |
+
"shortparallel;": "\u2225",
|
2547 |
+
"shy": "\xad",
|
2548 |
+
"shy;": "\xad",
|
2549 |
+
"sigma;": "\u03c3",
|
2550 |
+
"sigmaf;": "\u03c2",
|
2551 |
+
"sigmav;": "\u03c2",
|
2552 |
+
"sim;": "\u223c",
|
2553 |
+
"simdot;": "\u2a6a",
|
2554 |
+
"sime;": "\u2243",
|
2555 |
+
"simeq;": "\u2243",
|
2556 |
+
"simg;": "\u2a9e",
|
2557 |
+
"simgE;": "\u2aa0",
|
2558 |
+
"siml;": "\u2a9d",
|
2559 |
+
"simlE;": "\u2a9f",
|
2560 |
+
"simne;": "\u2246",
|
2561 |
+
"simplus;": "\u2a24",
|
2562 |
+
"simrarr;": "\u2972",
|
2563 |
+
"slarr;": "\u2190",
|
2564 |
+
"smallsetminus;": "\u2216",
|
2565 |
+
"smashp;": "\u2a33",
|
2566 |
+
"smeparsl;": "\u29e4",
|
2567 |
+
"smid;": "\u2223",
|
2568 |
+
"smile;": "\u2323",
|
2569 |
+
"smt;": "\u2aaa",
|
2570 |
+
"smte;": "\u2aac",
|
2571 |
+
"smtes;": "\u2aac\ufe00",
|
2572 |
+
"softcy;": "\u044c",
|
2573 |
+
"sol;": "/",
|
2574 |
+
"solb;": "\u29c4",
|
2575 |
+
"solbar;": "\u233f",
|
2576 |
+
"sopf;": "\U0001d564",
|
2577 |
+
"spades;": "\u2660",
|
2578 |
+
"spadesuit;": "\u2660",
|
2579 |
+
"spar;": "\u2225",
|
2580 |
+
"sqcap;": "\u2293",
|
2581 |
+
"sqcaps;": "\u2293\ufe00",
|
2582 |
+
"sqcup;": "\u2294",
|
2583 |
+
"sqcups;": "\u2294\ufe00",
|
2584 |
+
"sqsub;": "\u228f",
|
2585 |
+
"sqsube;": "\u2291",
|
2586 |
+
"sqsubset;": "\u228f",
|
2587 |
+
"sqsubseteq;": "\u2291",
|
2588 |
+
"sqsup;": "\u2290",
|
2589 |
+
"sqsupe;": "\u2292",
|
2590 |
+
"sqsupset;": "\u2290",
|
2591 |
+
"sqsupseteq;": "\u2292",
|
2592 |
+
"squ;": "\u25a1",
|
2593 |
+
"square;": "\u25a1",
|
2594 |
+
"squarf;": "\u25aa",
|
2595 |
+
"squf;": "\u25aa",
|
2596 |
+
"srarr;": "\u2192",
|
2597 |
+
"sscr;": "\U0001d4c8",
|
2598 |
+
"ssetmn;": "\u2216",
|
2599 |
+
"ssmile;": "\u2323",
|
2600 |
+
"sstarf;": "\u22c6",
|
2601 |
+
"star;": "\u2606",
|
2602 |
+
"starf;": "\u2605",
|
2603 |
+
"straightepsilon;": "\u03f5",
|
2604 |
+
"straightphi;": "\u03d5",
|
2605 |
+
"strns;": "\xaf",
|
2606 |
+
"sub;": "\u2282",
|
2607 |
+
"subE;": "\u2ac5",
|
2608 |
+
"subdot;": "\u2abd",
|
2609 |
+
"sube;": "\u2286",
|
2610 |
+
"subedot;": "\u2ac3",
|
2611 |
+
"submult;": "\u2ac1",
|
2612 |
+
"subnE;": "\u2acb",
|
2613 |
+
"subne;": "\u228a",
|
2614 |
+
"subplus;": "\u2abf",
|
2615 |
+
"subrarr;": "\u2979",
|
2616 |
+
"subset;": "\u2282",
|
2617 |
+
"subseteq;": "\u2286",
|
2618 |
+
"subseteqq;": "\u2ac5",
|
2619 |
+
"subsetneq;": "\u228a",
|
2620 |
+
"subsetneqq;": "\u2acb",
|
2621 |
+
"subsim;": "\u2ac7",
|
2622 |
+
"subsub;": "\u2ad5",
|
2623 |
+
"subsup;": "\u2ad3",
|
2624 |
+
"succ;": "\u227b",
|
2625 |
+
"succapprox;": "\u2ab8",
|
2626 |
+
"succcurlyeq;": "\u227d",
|
2627 |
+
"succeq;": "\u2ab0",
|
2628 |
+
"succnapprox;": "\u2aba",
|
2629 |
+
"succneqq;": "\u2ab6",
|
2630 |
+
"succnsim;": "\u22e9",
|
2631 |
+
"succsim;": "\u227f",
|
2632 |
+
"sum;": "\u2211",
|
2633 |
+
"sung;": "\u266a",
|
2634 |
+
"sup1": "\xb9",
|
2635 |
+
"sup1;": "\xb9",
|
2636 |
+
"sup2": "\xb2",
|
2637 |
+
"sup2;": "\xb2",
|
2638 |
+
"sup3": "\xb3",
|
2639 |
+
"sup3;": "\xb3",
|
2640 |
+
"sup;": "\u2283",
|
2641 |
+
"supE;": "\u2ac6",
|
2642 |
+
"supdot;": "\u2abe",
|
2643 |
+
"supdsub;": "\u2ad8",
|
2644 |
+
"supe;": "\u2287",
|
2645 |
+
"supedot;": "\u2ac4",
|
2646 |
+
"suphsol;": "\u27c9",
|
2647 |
+
"suphsub;": "\u2ad7",
|
2648 |
+
"suplarr;": "\u297b",
|
2649 |
+
"supmult;": "\u2ac2",
|
2650 |
+
"supnE;": "\u2acc",
|
2651 |
+
"supne;": "\u228b",
|
2652 |
+
"supplus;": "\u2ac0",
|
2653 |
+
"supset;": "\u2283",
|
2654 |
+
"supseteq;": "\u2287",
|
2655 |
+
"supseteqq;": "\u2ac6",
|
2656 |
+
"supsetneq;": "\u228b",
|
2657 |
+
"supsetneqq;": "\u2acc",
|
2658 |
+
"supsim;": "\u2ac8",
|
2659 |
+
"supsub;": "\u2ad4",
|
2660 |
+
"supsup;": "\u2ad6",
|
2661 |
+
"swArr;": "\u21d9",
|
2662 |
+
"swarhk;": "\u2926",
|
2663 |
+
"swarr;": "\u2199",
|
2664 |
+
"swarrow;": "\u2199",
|
2665 |
+
"swnwar;": "\u292a",
|
2666 |
+
"szlig": "\xdf",
|
2667 |
+
"szlig;": "\xdf",
|
2668 |
+
"target;": "\u2316",
|
2669 |
+
"tau;": "\u03c4",
|
2670 |
+
"tbrk;": "\u23b4",
|
2671 |
+
"tcaron;": "\u0165",
|
2672 |
+
"tcedil;": "\u0163",
|
2673 |
+
"tcy;": "\u0442",
|
2674 |
+
"tdot;": "\u20db",
|
2675 |
+
"telrec;": "\u2315",
|
2676 |
+
"tfr;": "\U0001d531",
|
2677 |
+
"there4;": "\u2234",
|
2678 |
+
"therefore;": "\u2234",
|
2679 |
+
"theta;": "\u03b8",
|
2680 |
+
"thetasym;": "\u03d1",
|
2681 |
+
"thetav;": "\u03d1",
|
2682 |
+
"thickapprox;": "\u2248",
|
2683 |
+
"thicksim;": "\u223c",
|
2684 |
+
"thinsp;": "\u2009",
|
2685 |
+
"thkap;": "\u2248",
|
2686 |
+
"thksim;": "\u223c",
|
2687 |
+
"thorn": "\xfe",
|
2688 |
+
"thorn;": "\xfe",
|
2689 |
+
"tilde;": "\u02dc",
|
2690 |
+
"times": "\xd7",
|
2691 |
+
"times;": "\xd7",
|
2692 |
+
"timesb;": "\u22a0",
|
2693 |
+
"timesbar;": "\u2a31",
|
2694 |
+
"timesd;": "\u2a30",
|
2695 |
+
"tint;": "\u222d",
|
2696 |
+
"toea;": "\u2928",
|
2697 |
+
"top;": "\u22a4",
|
2698 |
+
"topbot;": "\u2336",
|
2699 |
+
"topcir;": "\u2af1",
|
2700 |
+
"topf;": "\U0001d565",
|
2701 |
+
"topfork;": "\u2ada",
|
2702 |
+
"tosa;": "\u2929",
|
2703 |
+
"tprime;": "\u2034",
|
2704 |
+
"trade;": "\u2122",
|
2705 |
+
"triangle;": "\u25b5",
|
2706 |
+
"triangledown;": "\u25bf",
|
2707 |
+
"triangleleft;": "\u25c3",
|
2708 |
+
"trianglelefteq;": "\u22b4",
|
2709 |
+
"triangleq;": "\u225c",
|
2710 |
+
"triangleright;": "\u25b9",
|
2711 |
+
"trianglerighteq;": "\u22b5",
|
2712 |
+
"tridot;": "\u25ec",
|
2713 |
+
"trie;": "\u225c",
|
2714 |
+
"triminus;": "\u2a3a",
|
2715 |
+
"triplus;": "\u2a39",
|
2716 |
+
"trisb;": "\u29cd",
|
2717 |
+
"tritime;": "\u2a3b",
|
2718 |
+
"trpezium;": "\u23e2",
|
2719 |
+
"tscr;": "\U0001d4c9",
|
2720 |
+
"tscy;": "\u0446",
|
2721 |
+
"tshcy;": "\u045b",
|
2722 |
+
"tstrok;": "\u0167",
|
2723 |
+
"twixt;": "\u226c",
|
2724 |
+
"twoheadleftarrow;": "\u219e",
|
2725 |
+
"twoheadrightarrow;": "\u21a0",
|
2726 |
+
"uArr;": "\u21d1",
|
2727 |
+
"uHar;": "\u2963",
|
2728 |
+
"uacute": "\xfa",
|
2729 |
+
"uacute;": "\xfa",
|
2730 |
+
"uarr;": "\u2191",
|
2731 |
+
"ubrcy;": "\u045e",
|
2732 |
+
"ubreve;": "\u016d",
|
2733 |
+
"ucirc": "\xfb",
|
2734 |
+
"ucirc;": "\xfb",
|
2735 |
+
"ucy;": "\u0443",
|
2736 |
+
"udarr;": "\u21c5",
|
2737 |
+
"udblac;": "\u0171",
|
2738 |
+
"udhar;": "\u296e",
|
2739 |
+
"ufisht;": "\u297e",
|
2740 |
+
"ufr;": "\U0001d532",
|
2741 |
+
"ugrave": "\xf9",
|
2742 |
+
"ugrave;": "\xf9",
|
2743 |
+
"uharl;": "\u21bf",
|
2744 |
+
"uharr;": "\u21be",
|
2745 |
+
"uhblk;": "\u2580",
|
2746 |
+
"ulcorn;": "\u231c",
|
2747 |
+
"ulcorner;": "\u231c",
|
2748 |
+
"ulcrop;": "\u230f",
|
2749 |
+
"ultri;": "\u25f8",
|
2750 |
+
"umacr;": "\u016b",
|
2751 |
+
"uml": "\xa8",
|
2752 |
+
"uml;": "\xa8",
|
2753 |
+
"uogon;": "\u0173",
|
2754 |
+
"uopf;": "\U0001d566",
|
2755 |
+
"uparrow;": "\u2191",
|
2756 |
+
"updownarrow;": "\u2195",
|
2757 |
+
"upharpoonleft;": "\u21bf",
|
2758 |
+
"upharpoonright;": "\u21be",
|
2759 |
+
"uplus;": "\u228e",
|
2760 |
+
"upsi;": "\u03c5",
|
2761 |
+
"upsih;": "\u03d2",
|
2762 |
+
"upsilon;": "\u03c5",
|
2763 |
+
"upuparrows;": "\u21c8",
|
2764 |
+
"urcorn;": "\u231d",
|
2765 |
+
"urcorner;": "\u231d",
|
2766 |
+
"urcrop;": "\u230e",
|
2767 |
+
"uring;": "\u016f",
|
2768 |
+
"urtri;": "\u25f9",
|
2769 |
+
"uscr;": "\U0001d4ca",
|
2770 |
+
"utdot;": "\u22f0",
|
2771 |
+
"utilde;": "\u0169",
|
2772 |
+
"utri;": "\u25b5",
|
2773 |
+
"utrif;": "\u25b4",
|
2774 |
+
"uuarr;": "\u21c8",
|
2775 |
+
"uuml": "\xfc",
|
2776 |
+
"uuml;": "\xfc",
|
2777 |
+
"uwangle;": "\u29a7",
|
2778 |
+
"vArr;": "\u21d5",
|
2779 |
+
"vBar;": "\u2ae8",
|
2780 |
+
"vBarv;": "\u2ae9",
|
2781 |
+
"vDash;": "\u22a8",
|
2782 |
+
"vangrt;": "\u299c",
|
2783 |
+
"varepsilon;": "\u03f5",
|
2784 |
+
"varkappa;": "\u03f0",
|
2785 |
+
"varnothing;": "\u2205",
|
2786 |
+
"varphi;": "\u03d5",
|
2787 |
+
"varpi;": "\u03d6",
|
2788 |
+
"varpropto;": "\u221d",
|
2789 |
+
"varr;": "\u2195",
|
2790 |
+
"varrho;": "\u03f1",
|
2791 |
+
"varsigma;": "\u03c2",
|
2792 |
+
"varsubsetneq;": "\u228a\ufe00",
|
2793 |
+
"varsubsetneqq;": "\u2acb\ufe00",
|
2794 |
+
"varsupsetneq;": "\u228b\ufe00",
|
2795 |
+
"varsupsetneqq;": "\u2acc\ufe00",
|
2796 |
+
"vartheta;": "\u03d1",
|
2797 |
+
"vartriangleleft;": "\u22b2",
|
2798 |
+
"vartriangleright;": "\u22b3",
|
2799 |
+
"vcy;": "\u0432",
|
2800 |
+
"vdash;": "\u22a2",
|
2801 |
+
"vee;": "\u2228",
|
2802 |
+
"veebar;": "\u22bb",
|
2803 |
+
"veeeq;": "\u225a",
|
2804 |
+
"vellip;": "\u22ee",
|
2805 |
+
"verbar;": "|",
|
2806 |
+
"vert;": "|",
|
2807 |
+
"vfr;": "\U0001d533",
|
2808 |
+
"vltri;": "\u22b2",
|
2809 |
+
"vnsub;": "\u2282\u20d2",
|
2810 |
+
"vnsup;": "\u2283\u20d2",
|
2811 |
+
"vopf;": "\U0001d567",
|
2812 |
+
"vprop;": "\u221d",
|
2813 |
+
"vrtri;": "\u22b3",
|
2814 |
+
"vscr;": "\U0001d4cb",
|
2815 |
+
"vsubnE;": "\u2acb\ufe00",
|
2816 |
+
"vsubne;": "\u228a\ufe00",
|
2817 |
+
"vsupnE;": "\u2acc\ufe00",
|
2818 |
+
"vsupne;": "\u228b\ufe00",
|
2819 |
+
"vzigzag;": "\u299a",
|
2820 |
+
"wcirc;": "\u0175",
|
2821 |
+
"wedbar;": "\u2a5f",
|
2822 |
+
"wedge;": "\u2227",
|
2823 |
+
"wedgeq;": "\u2259",
|
2824 |
+
"weierp;": "\u2118",
|
2825 |
+
"wfr;": "\U0001d534",
|
2826 |
+
"wopf;": "\U0001d568",
|
2827 |
+
"wp;": "\u2118",
|
2828 |
+
"wr;": "\u2240",
|
2829 |
+
"wreath;": "\u2240",
|
2830 |
+
"wscr;": "\U0001d4cc",
|
2831 |
+
"xcap;": "\u22c2",
|
2832 |
+
"xcirc;": "\u25ef",
|
2833 |
+
"xcup;": "\u22c3",
|
2834 |
+
"xdtri;": "\u25bd",
|
2835 |
+
"xfr;": "\U0001d535",
|
2836 |
+
"xhArr;": "\u27fa",
|
2837 |
+
"xharr;": "\u27f7",
|
2838 |
+
"xi;": "\u03be",
|
2839 |
+
"xlArr;": "\u27f8",
|
2840 |
+
"xlarr;": "\u27f5",
|
2841 |
+
"xmap;": "\u27fc",
|
2842 |
+
"xnis;": "\u22fb",
|
2843 |
+
"xodot;": "\u2a00",
|
2844 |
+
"xopf;": "\U0001d569",
|
2845 |
+
"xoplus;": "\u2a01",
|
2846 |
+
"xotime;": "\u2a02",
|
2847 |
+
"xrArr;": "\u27f9",
|
2848 |
+
"xrarr;": "\u27f6",
|
2849 |
+
"xscr;": "\U0001d4cd",
|
2850 |
+
"xsqcup;": "\u2a06",
|
2851 |
+
"xuplus;": "\u2a04",
|
2852 |
+
"xutri;": "\u25b3",
|
2853 |
+
"xvee;": "\u22c1",
|
2854 |
+
"xwedge;": "\u22c0",
|
2855 |
+
"yacute": "\xfd",
|
2856 |
+
"yacute;": "\xfd",
|
2857 |
+
"yacy;": "\u044f",
|
2858 |
+
"ycirc;": "\u0177",
|
2859 |
+
"ycy;": "\u044b",
|
2860 |
+
"yen": "\xa5",
|
2861 |
+
"yen;": "\xa5",
|
2862 |
+
"yfr;": "\U0001d536",
|
2863 |
+
"yicy;": "\u0457",
|
2864 |
+
"yopf;": "\U0001d56a",
|
2865 |
+
"yscr;": "\U0001d4ce",
|
2866 |
+
"yucy;": "\u044e",
|
2867 |
+
"yuml": "\xff",
|
2868 |
+
"yuml;": "\xff",
|
2869 |
+
"zacute;": "\u017a",
|
2870 |
+
"zcaron;": "\u017e",
|
2871 |
+
"zcy;": "\u0437",
|
2872 |
+
"zdot;": "\u017c",
|
2873 |
+
"zeetrf;": "\u2128",
|
2874 |
+
"zeta;": "\u03b6",
|
2875 |
+
"zfr;": "\U0001d537",
|
2876 |
+
"zhcy;": "\u0436",
|
2877 |
+
"zigrarr;": "\u21dd",
|
2878 |
+
"zopf;": "\U0001d56b",
|
2879 |
+
"zscr;": "\U0001d4cf",
|
2880 |
+
"zwj;": "\u200d",
|
2881 |
+
"zwnj;": "\u200c",
|
2882 |
+
}
|
2883 |
+
|
2884 |
+
replacementCharacters = {
|
2885 |
+
0x0: "\uFFFD",
|
2886 |
+
0x0d: "\u000D",
|
2887 |
+
0x80: "\u20AC",
|
2888 |
+
0x81: "\u0081",
|
2889 |
+
0x82: "\u201A",
|
2890 |
+
0x83: "\u0192",
|
2891 |
+
0x84: "\u201E",
|
2892 |
+
0x85: "\u2026",
|
2893 |
+
0x86: "\u2020",
|
2894 |
+
0x87: "\u2021",
|
2895 |
+
0x88: "\u02C6",
|
2896 |
+
0x89: "\u2030",
|
2897 |
+
0x8A: "\u0160",
|
2898 |
+
0x8B: "\u2039",
|
2899 |
+
0x8C: "\u0152",
|
2900 |
+
0x8D: "\u008D",
|
2901 |
+
0x8E: "\u017D",
|
2902 |
+
0x8F: "\u008F",
|
2903 |
+
0x90: "\u0090",
|
2904 |
+
0x91: "\u2018",
|
2905 |
+
0x92: "\u2019",
|
2906 |
+
0x93: "\u201C",
|
2907 |
+
0x94: "\u201D",
|
2908 |
+
0x95: "\u2022",
|
2909 |
+
0x96: "\u2013",
|
2910 |
+
0x97: "\u2014",
|
2911 |
+
0x98: "\u02DC",
|
2912 |
+
0x99: "\u2122",
|
2913 |
+
0x9A: "\u0161",
|
2914 |
+
0x9B: "\u203A",
|
2915 |
+
0x9C: "\u0153",
|
2916 |
+
0x9D: "\u009D",
|
2917 |
+
0x9E: "\u017E",
|
2918 |
+
0x9F: "\u0178",
|
2919 |
+
}
|
2920 |
+
|
2921 |
+
tokenTypes = {
|
2922 |
+
"Doctype": 0,
|
2923 |
+
"Characters": 1,
|
2924 |
+
"SpaceCharacters": 2,
|
2925 |
+
"StartTag": 3,
|
2926 |
+
"EndTag": 4,
|
2927 |
+
"EmptyTag": 5,
|
2928 |
+
"Comment": 6,
|
2929 |
+
"ParseError": 7
|
2930 |
+
}
|
2931 |
+
|
2932 |
+
tagTokenTypes = frozenset([tokenTypes["StartTag"], tokenTypes["EndTag"],
|
2933 |
+
tokenTypes["EmptyTag"]])
|
2934 |
+
|
2935 |
+
|
2936 |
+
prefixes = {v: k for k, v in namespaces.items()}
|
2937 |
+
prefixes["http://www.w3.org/1998/Math/MathML"] = "math"
|
2938 |
+
|
2939 |
+
|
2940 |
+
class DataLossWarning(UserWarning):
|
2941 |
+
"""Raised when the current tree is unable to represent the input data"""
|
2942 |
+
pass
|
2943 |
+
|
2944 |
+
|
2945 |
+
class _ReparseException(Exception):
|
2946 |
+
pass
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/filters/__init__.py
ADDED
File without changes
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/filters/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (201 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/filters/__pycache__/alphabeticalattributes.cpython-310.pyc
ADDED
Binary file (1.33 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/filters/__pycache__/base.cpython-310.pyc
ADDED
Binary file (871 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/filters/__pycache__/inject_meta_charset.cpython-310.pyc
ADDED
Binary file (1.87 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/filters/__pycache__/lint.cpython-310.pyc
ADDED
Binary file (2.58 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/filters/__pycache__/optionaltags.cpython-310.pyc
ADDED
Binary file (2.73 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/filters/__pycache__/sanitizer.cpython-310.pyc
ADDED
Binary file (20 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/filters/__pycache__/whitespace.cpython-310.pyc
ADDED
Binary file (1.38 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/filters/alphabeticalattributes.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import absolute_import, division, unicode_literals
|
2 |
+
|
3 |
+
from . import base
|
4 |
+
|
5 |
+
from collections import OrderedDict
|
6 |
+
|
7 |
+
|
8 |
+
def _attr_key(attr):
|
9 |
+
"""Return an appropriate key for an attribute for sorting
|
10 |
+
|
11 |
+
Attributes have a namespace that can be either ``None`` or a string. We
|
12 |
+
can't compare the two because they're different types, so we convert
|
13 |
+
``None`` to an empty string first.
|
14 |
+
|
15 |
+
"""
|
16 |
+
return (attr[0][0] or ''), attr[0][1]
|
17 |
+
|
18 |
+
|
19 |
+
class Filter(base.Filter):
|
20 |
+
"""Alphabetizes attributes for elements"""
|
21 |
+
def __iter__(self):
|
22 |
+
for token in base.Filter.__iter__(self):
|
23 |
+
if token["type"] in ("StartTag", "EmptyTag"):
|
24 |
+
attrs = OrderedDict()
|
25 |
+
for name, value in sorted(token["data"].items(),
|
26 |
+
key=_attr_key):
|
27 |
+
attrs[name] = value
|
28 |
+
token["data"] = attrs
|
29 |
+
yield token
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/filters/base.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import absolute_import, division, unicode_literals
|
2 |
+
|
3 |
+
|
4 |
+
class Filter(object):
|
5 |
+
def __init__(self, source):
|
6 |
+
self.source = source
|
7 |
+
|
8 |
+
def __iter__(self):
|
9 |
+
return iter(self.source)
|
10 |
+
|
11 |
+
def __getattr__(self, name):
|
12 |
+
return getattr(self.source, name)
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/filters/inject_meta_charset.py
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import absolute_import, division, unicode_literals
|
2 |
+
|
3 |
+
from . import base
|
4 |
+
|
5 |
+
|
6 |
+
class Filter(base.Filter):
|
7 |
+
"""Injects ``<meta charset=ENCODING>`` tag into head of document"""
|
8 |
+
def __init__(self, source, encoding):
|
9 |
+
"""Creates a Filter
|
10 |
+
|
11 |
+
:arg source: the source token stream
|
12 |
+
|
13 |
+
:arg encoding: the encoding to set
|
14 |
+
|
15 |
+
"""
|
16 |
+
base.Filter.__init__(self, source)
|
17 |
+
self.encoding = encoding
|
18 |
+
|
19 |
+
def __iter__(self):
|
20 |
+
state = "pre_head"
|
21 |
+
meta_found = (self.encoding is None)
|
22 |
+
pending = []
|
23 |
+
|
24 |
+
for token in base.Filter.__iter__(self):
|
25 |
+
type = token["type"]
|
26 |
+
if type == "StartTag":
|
27 |
+
if token["name"].lower() == "head":
|
28 |
+
state = "in_head"
|
29 |
+
|
30 |
+
elif type == "EmptyTag":
|
31 |
+
if token["name"].lower() == "meta":
|
32 |
+
# replace charset with actual encoding
|
33 |
+
has_http_equiv_content_type = False
|
34 |
+
for (namespace, name), value in token["data"].items():
|
35 |
+
if namespace is not None:
|
36 |
+
continue
|
37 |
+
elif name.lower() == 'charset':
|
38 |
+
token["data"][(namespace, name)] = self.encoding
|
39 |
+
meta_found = True
|
40 |
+
break
|
41 |
+
elif name == 'http-equiv' and value.lower() == 'content-type':
|
42 |
+
has_http_equiv_content_type = True
|
43 |
+
else:
|
44 |
+
if has_http_equiv_content_type and (None, "content") in token["data"]:
|
45 |
+
token["data"][(None, "content")] = 'text/html; charset=%s' % self.encoding
|
46 |
+
meta_found = True
|
47 |
+
|
48 |
+
elif token["name"].lower() == "head" and not meta_found:
|
49 |
+
# insert meta into empty head
|
50 |
+
yield {"type": "StartTag", "name": "head",
|
51 |
+
"data": token["data"]}
|
52 |
+
yield {"type": "EmptyTag", "name": "meta",
|
53 |
+
"data": {(None, "charset"): self.encoding}}
|
54 |
+
yield {"type": "EndTag", "name": "head"}
|
55 |
+
meta_found = True
|
56 |
+
continue
|
57 |
+
|
58 |
+
elif type == "EndTag":
|
59 |
+
if token["name"].lower() == "head" and pending:
|
60 |
+
# insert meta into head (if necessary) and flush pending queue
|
61 |
+
yield pending.pop(0)
|
62 |
+
if not meta_found:
|
63 |
+
yield {"type": "EmptyTag", "name": "meta",
|
64 |
+
"data": {(None, "charset"): self.encoding}}
|
65 |
+
while pending:
|
66 |
+
yield pending.pop(0)
|
67 |
+
meta_found = True
|
68 |
+
state = "post_head"
|
69 |
+
|
70 |
+
if state == "in_head":
|
71 |
+
pending.append(token)
|
72 |
+
else:
|
73 |
+
yield token
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/filters/lint.py
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import absolute_import, division, unicode_literals
|
2 |
+
|
3 |
+
from pip._vendor.six import text_type
|
4 |
+
|
5 |
+
from . import base
|
6 |
+
from ..constants import namespaces, voidElements
|
7 |
+
|
8 |
+
from ..constants import spaceCharacters
|
9 |
+
spaceCharacters = "".join(spaceCharacters)
|
10 |
+
|
11 |
+
|
12 |
+
class Filter(base.Filter):
|
13 |
+
"""Lints the token stream for errors
|
14 |
+
|
15 |
+
If it finds any errors, it'll raise an ``AssertionError``.
|
16 |
+
|
17 |
+
"""
|
18 |
+
def __init__(self, source, require_matching_tags=True):
|
19 |
+
"""Creates a Filter
|
20 |
+
|
21 |
+
:arg source: the source token stream
|
22 |
+
|
23 |
+
:arg require_matching_tags: whether or not to require matching tags
|
24 |
+
|
25 |
+
"""
|
26 |
+
super(Filter, self).__init__(source)
|
27 |
+
self.require_matching_tags = require_matching_tags
|
28 |
+
|
29 |
+
def __iter__(self):
|
30 |
+
open_elements = []
|
31 |
+
for token in base.Filter.__iter__(self):
|
32 |
+
type = token["type"]
|
33 |
+
if type in ("StartTag", "EmptyTag"):
|
34 |
+
namespace = token["namespace"]
|
35 |
+
name = token["name"]
|
36 |
+
assert namespace is None or isinstance(namespace, text_type)
|
37 |
+
assert namespace != ""
|
38 |
+
assert isinstance(name, text_type)
|
39 |
+
assert name != ""
|
40 |
+
assert isinstance(token["data"], dict)
|
41 |
+
if (not namespace or namespace == namespaces["html"]) and name in voidElements:
|
42 |
+
assert type == "EmptyTag"
|
43 |
+
else:
|
44 |
+
assert type == "StartTag"
|
45 |
+
if type == "StartTag" and self.require_matching_tags:
|
46 |
+
open_elements.append((namespace, name))
|
47 |
+
for (namespace, name), value in token["data"].items():
|
48 |
+
assert namespace is None or isinstance(namespace, text_type)
|
49 |
+
assert namespace != ""
|
50 |
+
assert isinstance(name, text_type)
|
51 |
+
assert name != ""
|
52 |
+
assert isinstance(value, text_type)
|
53 |
+
|
54 |
+
elif type == "EndTag":
|
55 |
+
namespace = token["namespace"]
|
56 |
+
name = token["name"]
|
57 |
+
assert namespace is None or isinstance(namespace, text_type)
|
58 |
+
assert namespace != ""
|
59 |
+
assert isinstance(name, text_type)
|
60 |
+
assert name != ""
|
61 |
+
if (not namespace or namespace == namespaces["html"]) and name in voidElements:
|
62 |
+
assert False, "Void element reported as EndTag token: %(tag)s" % {"tag": name}
|
63 |
+
elif self.require_matching_tags:
|
64 |
+
start = open_elements.pop()
|
65 |
+
assert start == (namespace, name)
|
66 |
+
|
67 |
+
elif type == "Comment":
|
68 |
+
data = token["data"]
|
69 |
+
assert isinstance(data, text_type)
|
70 |
+
|
71 |
+
elif type in ("Characters", "SpaceCharacters"):
|
72 |
+
data = token["data"]
|
73 |
+
assert isinstance(data, text_type)
|
74 |
+
assert data != ""
|
75 |
+
if type == "SpaceCharacters":
|
76 |
+
assert data.strip(spaceCharacters) == ""
|
77 |
+
|
78 |
+
elif type == "Doctype":
|
79 |
+
name = token["name"]
|
80 |
+
assert name is None or isinstance(name, text_type)
|
81 |
+
assert token["publicId"] is None or isinstance(name, text_type)
|
82 |
+
assert token["systemId"] is None or isinstance(name, text_type)
|
83 |
+
|
84 |
+
elif type == "Entity":
|
85 |
+
assert isinstance(token["name"], text_type)
|
86 |
+
|
87 |
+
elif type == "SerializerError":
|
88 |
+
assert isinstance(token["data"], text_type)
|
89 |
+
|
90 |
+
else:
|
91 |
+
assert False, "Unknown token type: %(type)s" % {"type": type}
|
92 |
+
|
93 |
+
yield token
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/filters/optionaltags.py
ADDED
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import absolute_import, division, unicode_literals
|
2 |
+
|
3 |
+
from . import base
|
4 |
+
|
5 |
+
|
6 |
+
class Filter(base.Filter):
|
7 |
+
"""Removes optional tags from the token stream"""
|
8 |
+
def slider(self):
|
9 |
+
previous1 = previous2 = None
|
10 |
+
for token in self.source:
|
11 |
+
if previous1 is not None:
|
12 |
+
yield previous2, previous1, token
|
13 |
+
previous2 = previous1
|
14 |
+
previous1 = token
|
15 |
+
if previous1 is not None:
|
16 |
+
yield previous2, previous1, None
|
17 |
+
|
18 |
+
def __iter__(self):
|
19 |
+
for previous, token, next in self.slider():
|
20 |
+
type = token["type"]
|
21 |
+
if type == "StartTag":
|
22 |
+
if (token["data"] or
|
23 |
+
not self.is_optional_start(token["name"], previous, next)):
|
24 |
+
yield token
|
25 |
+
elif type == "EndTag":
|
26 |
+
if not self.is_optional_end(token["name"], next):
|
27 |
+
yield token
|
28 |
+
else:
|
29 |
+
yield token
|
30 |
+
|
31 |
+
def is_optional_start(self, tagname, previous, next):
|
32 |
+
type = next and next["type"] or None
|
33 |
+
if tagname in 'html':
|
34 |
+
# An html element's start tag may be omitted if the first thing
|
35 |
+
# inside the html element is not a space character or a comment.
|
36 |
+
return type not in ("Comment", "SpaceCharacters")
|
37 |
+
elif tagname == 'head':
|
38 |
+
# A head element's start tag may be omitted if the first thing
|
39 |
+
# inside the head element is an element.
|
40 |
+
# XXX: we also omit the start tag if the head element is empty
|
41 |
+
if type in ("StartTag", "EmptyTag"):
|
42 |
+
return True
|
43 |
+
elif type == "EndTag":
|
44 |
+
return next["name"] == "head"
|
45 |
+
elif tagname == 'body':
|
46 |
+
# A body element's start tag may be omitted if the first thing
|
47 |
+
# inside the body element is not a space character or a comment,
|
48 |
+
# except if the first thing inside the body element is a script
|
49 |
+
# or style element and the node immediately preceding the body
|
50 |
+
# element is a head element whose end tag has been omitted.
|
51 |
+
if type in ("Comment", "SpaceCharacters"):
|
52 |
+
return False
|
53 |
+
elif type == "StartTag":
|
54 |
+
# XXX: we do not look at the preceding event, so we never omit
|
55 |
+
# the body element's start tag if it's followed by a script or
|
56 |
+
# a style element.
|
57 |
+
return next["name"] not in ('script', 'style')
|
58 |
+
else:
|
59 |
+
return True
|
60 |
+
elif tagname == 'colgroup':
|
61 |
+
# A colgroup element's start tag may be omitted if the first thing
|
62 |
+
# inside the colgroup element is a col element, and if the element
|
63 |
+
# is not immediately preceded by another colgroup element whose
|
64 |
+
# end tag has been omitted.
|
65 |
+
if type in ("StartTag", "EmptyTag"):
|
66 |
+
# XXX: we do not look at the preceding event, so instead we never
|
67 |
+
# omit the colgroup element's end tag when it is immediately
|
68 |
+
# followed by another colgroup element. See is_optional_end.
|
69 |
+
return next["name"] == "col"
|
70 |
+
else:
|
71 |
+
return False
|
72 |
+
elif tagname == 'tbody':
|
73 |
+
# A tbody element's start tag may be omitted if the first thing
|
74 |
+
# inside the tbody element is a tr element, and if the element is
|
75 |
+
# not immediately preceded by a tbody, thead, or tfoot element
|
76 |
+
# whose end tag has been omitted.
|
77 |
+
if type == "StartTag":
|
78 |
+
# omit the thead and tfoot elements' end tag when they are
|
79 |
+
# immediately followed by a tbody element. See is_optional_end.
|
80 |
+
if previous and previous['type'] == 'EndTag' and \
|
81 |
+
previous['name'] in ('tbody', 'thead', 'tfoot'):
|
82 |
+
return False
|
83 |
+
return next["name"] == 'tr'
|
84 |
+
else:
|
85 |
+
return False
|
86 |
+
return False
|
87 |
+
|
88 |
+
def is_optional_end(self, tagname, next):
|
89 |
+
type = next and next["type"] or None
|
90 |
+
if tagname in ('html', 'head', 'body'):
|
91 |
+
# An html element's end tag may be omitted if the html element
|
92 |
+
# is not immediately followed by a space character or a comment.
|
93 |
+
return type not in ("Comment", "SpaceCharacters")
|
94 |
+
elif tagname in ('li', 'optgroup', 'tr'):
|
95 |
+
# A li element's end tag may be omitted if the li element is
|
96 |
+
# immediately followed by another li element or if there is
|
97 |
+
# no more content in the parent element.
|
98 |
+
# An optgroup element's end tag may be omitted if the optgroup
|
99 |
+
# element is immediately followed by another optgroup element,
|
100 |
+
# or if there is no more content in the parent element.
|
101 |
+
# A tr element's end tag may be omitted if the tr element is
|
102 |
+
# immediately followed by another tr element, or if there is
|
103 |
+
# no more content in the parent element.
|
104 |
+
if type == "StartTag":
|
105 |
+
return next["name"] == tagname
|
106 |
+
else:
|
107 |
+
return type == "EndTag" or type is None
|
108 |
+
elif tagname in ('dt', 'dd'):
|
109 |
+
# A dt element's end tag may be omitted if the dt element is
|
110 |
+
# immediately followed by another dt element or a dd element.
|
111 |
+
# A dd element's end tag may be omitted if the dd element is
|
112 |
+
# immediately followed by another dd element or a dt element,
|
113 |
+
# or if there is no more content in the parent element.
|
114 |
+
if type == "StartTag":
|
115 |
+
return next["name"] in ('dt', 'dd')
|
116 |
+
elif tagname == 'dd':
|
117 |
+
return type == "EndTag" or type is None
|
118 |
+
else:
|
119 |
+
return False
|
120 |
+
elif tagname == 'p':
|
121 |
+
# A p element's end tag may be omitted if the p element is
|
122 |
+
# immediately followed by an address, article, aside,
|
123 |
+
# blockquote, datagrid, dialog, dir, div, dl, fieldset,
|
124 |
+
# footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu,
|
125 |
+
# nav, ol, p, pre, section, table, or ul, element, or if
|
126 |
+
# there is no more content in the parent element.
|
127 |
+
if type in ("StartTag", "EmptyTag"):
|
128 |
+
return next["name"] in ('address', 'article', 'aside',
|
129 |
+
'blockquote', 'datagrid', 'dialog',
|
130 |
+
'dir', 'div', 'dl', 'fieldset', 'footer',
|
131 |
+
'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
|
132 |
+
'header', 'hr', 'menu', 'nav', 'ol',
|
133 |
+
'p', 'pre', 'section', 'table', 'ul')
|
134 |
+
else:
|
135 |
+
return type == "EndTag" or type is None
|
136 |
+
elif tagname == 'option':
|
137 |
+
# An option element's end tag may be omitted if the option
|
138 |
+
# element is immediately followed by another option element,
|
139 |
+
# or if it is immediately followed by an <code>optgroup</code>
|
140 |
+
# element, or if there is no more content in the parent
|
141 |
+
# element.
|
142 |
+
if type == "StartTag":
|
143 |
+
return next["name"] in ('option', 'optgroup')
|
144 |
+
else:
|
145 |
+
return type == "EndTag" or type is None
|
146 |
+
elif tagname in ('rt', 'rp'):
|
147 |
+
# An rt element's end tag may be omitted if the rt element is
|
148 |
+
# immediately followed by an rt or rp element, or if there is
|
149 |
+
# no more content in the parent element.
|
150 |
+
# An rp element's end tag may be omitted if the rp element is
|
151 |
+
# immediately followed by an rt or rp element, or if there is
|
152 |
+
# no more content in the parent element.
|
153 |
+
if type == "StartTag":
|
154 |
+
return next["name"] in ('rt', 'rp')
|
155 |
+
else:
|
156 |
+
return type == "EndTag" or type is None
|
157 |
+
elif tagname == 'colgroup':
|
158 |
+
# A colgroup element's end tag may be omitted if the colgroup
|
159 |
+
# element is not immediately followed by a space character or
|
160 |
+
# a comment.
|
161 |
+
if type in ("Comment", "SpaceCharacters"):
|
162 |
+
return False
|
163 |
+
elif type == "StartTag":
|
164 |
+
# XXX: we also look for an immediately following colgroup
|
165 |
+
# element. See is_optional_start.
|
166 |
+
return next["name"] != 'colgroup'
|
167 |
+
else:
|
168 |
+
return True
|
169 |
+
elif tagname in ('thead', 'tbody'):
|
170 |
+
# A thead element's end tag may be omitted if the thead element
|
171 |
+
# is immediately followed by a tbody or tfoot element.
|
172 |
+
# A tbody element's end tag may be omitted if the tbody element
|
173 |
+
# is immediately followed by a tbody or tfoot element, or if
|
174 |
+
# there is no more content in the parent element.
|
175 |
+
# A tfoot element's end tag may be omitted if the tfoot element
|
176 |
+
# is immediately followed by a tbody element, or if there is no
|
177 |
+
# more content in the parent element.
|
178 |
+
# XXX: we never omit the end tag when the following element is
|
179 |
+
# a tbody. See is_optional_start.
|
180 |
+
if type == "StartTag":
|
181 |
+
return next["name"] in ['tbody', 'tfoot']
|
182 |
+
elif tagname == 'tbody':
|
183 |
+
return type == "EndTag" or type is None
|
184 |
+
else:
|
185 |
+
return False
|
186 |
+
elif tagname == 'tfoot':
|
187 |
+
# A tfoot element's end tag may be omitted if the tfoot element
|
188 |
+
# is immediately followed by a tbody element, or if there is no
|
189 |
+
# more content in the parent element.
|
190 |
+
# XXX: we never omit the end tag when the following element is
|
191 |
+
# a tbody. See is_optional_start.
|
192 |
+
if type == "StartTag":
|
193 |
+
return next["name"] == 'tbody'
|
194 |
+
else:
|
195 |
+
return type == "EndTag" or type is None
|
196 |
+
elif tagname in ('td', 'th'):
|
197 |
+
# A td element's end tag may be omitted if the td element is
|
198 |
+
# immediately followed by a td or th element, or if there is
|
199 |
+
# no more content in the parent element.
|
200 |
+
# A th element's end tag may be omitted if the th element is
|
201 |
+
# immediately followed by a td or th element, or if there is
|
202 |
+
# no more content in the parent element.
|
203 |
+
if type == "StartTag":
|
204 |
+
return next["name"] in ('td', 'th')
|
205 |
+
else:
|
206 |
+
return type == "EndTag" or type is None
|
207 |
+
return False
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/filters/sanitizer.py
ADDED
@@ -0,0 +1,916 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Deprecated from html5lib 1.1.
|
2 |
+
|
3 |
+
See `here <https://github.com/html5lib/html5lib-python/issues/443>`_ for
|
4 |
+
information about its deprecation; `Bleach <https://github.com/mozilla/bleach>`_
|
5 |
+
is recommended as a replacement. Please let us know in the aforementioned issue
|
6 |
+
if Bleach is unsuitable for your needs.
|
7 |
+
|
8 |
+
"""
|
9 |
+
from __future__ import absolute_import, division, unicode_literals
|
10 |
+
|
11 |
+
import re
|
12 |
+
import warnings
|
13 |
+
from xml.sax.saxutils import escape, unescape
|
14 |
+
|
15 |
+
from pip._vendor.six.moves import urllib_parse as urlparse
|
16 |
+
|
17 |
+
from . import base
|
18 |
+
from ..constants import namespaces, prefixes
|
19 |
+
|
20 |
+
__all__ = ["Filter"]
|
21 |
+
|
22 |
+
|
23 |
+
_deprecation_msg = (
|
24 |
+
"html5lib's sanitizer is deprecated; see " +
|
25 |
+
"https://github.com/html5lib/html5lib-python/issues/443 and please let " +
|
26 |
+
"us know if Bleach is unsuitable for your needs"
|
27 |
+
)
|
28 |
+
|
29 |
+
warnings.warn(_deprecation_msg, DeprecationWarning)
|
30 |
+
|
31 |
+
allowed_elements = frozenset((
|
32 |
+
(namespaces['html'], 'a'),
|
33 |
+
(namespaces['html'], 'abbr'),
|
34 |
+
(namespaces['html'], 'acronym'),
|
35 |
+
(namespaces['html'], 'address'),
|
36 |
+
(namespaces['html'], 'area'),
|
37 |
+
(namespaces['html'], 'article'),
|
38 |
+
(namespaces['html'], 'aside'),
|
39 |
+
(namespaces['html'], 'audio'),
|
40 |
+
(namespaces['html'], 'b'),
|
41 |
+
(namespaces['html'], 'big'),
|
42 |
+
(namespaces['html'], 'blockquote'),
|
43 |
+
(namespaces['html'], 'br'),
|
44 |
+
(namespaces['html'], 'button'),
|
45 |
+
(namespaces['html'], 'canvas'),
|
46 |
+
(namespaces['html'], 'caption'),
|
47 |
+
(namespaces['html'], 'center'),
|
48 |
+
(namespaces['html'], 'cite'),
|
49 |
+
(namespaces['html'], 'code'),
|
50 |
+
(namespaces['html'], 'col'),
|
51 |
+
(namespaces['html'], 'colgroup'),
|
52 |
+
(namespaces['html'], 'command'),
|
53 |
+
(namespaces['html'], 'datagrid'),
|
54 |
+
(namespaces['html'], 'datalist'),
|
55 |
+
(namespaces['html'], 'dd'),
|
56 |
+
(namespaces['html'], 'del'),
|
57 |
+
(namespaces['html'], 'details'),
|
58 |
+
(namespaces['html'], 'dfn'),
|
59 |
+
(namespaces['html'], 'dialog'),
|
60 |
+
(namespaces['html'], 'dir'),
|
61 |
+
(namespaces['html'], 'div'),
|
62 |
+
(namespaces['html'], 'dl'),
|
63 |
+
(namespaces['html'], 'dt'),
|
64 |
+
(namespaces['html'], 'em'),
|
65 |
+
(namespaces['html'], 'event-source'),
|
66 |
+
(namespaces['html'], 'fieldset'),
|
67 |
+
(namespaces['html'], 'figcaption'),
|
68 |
+
(namespaces['html'], 'figure'),
|
69 |
+
(namespaces['html'], 'footer'),
|
70 |
+
(namespaces['html'], 'font'),
|
71 |
+
(namespaces['html'], 'form'),
|
72 |
+
(namespaces['html'], 'header'),
|
73 |
+
(namespaces['html'], 'h1'),
|
74 |
+
(namespaces['html'], 'h2'),
|
75 |
+
(namespaces['html'], 'h3'),
|
76 |
+
(namespaces['html'], 'h4'),
|
77 |
+
(namespaces['html'], 'h5'),
|
78 |
+
(namespaces['html'], 'h6'),
|
79 |
+
(namespaces['html'], 'hr'),
|
80 |
+
(namespaces['html'], 'i'),
|
81 |
+
(namespaces['html'], 'img'),
|
82 |
+
(namespaces['html'], 'input'),
|
83 |
+
(namespaces['html'], 'ins'),
|
84 |
+
(namespaces['html'], 'keygen'),
|
85 |
+
(namespaces['html'], 'kbd'),
|
86 |
+
(namespaces['html'], 'label'),
|
87 |
+
(namespaces['html'], 'legend'),
|
88 |
+
(namespaces['html'], 'li'),
|
89 |
+
(namespaces['html'], 'm'),
|
90 |
+
(namespaces['html'], 'map'),
|
91 |
+
(namespaces['html'], 'menu'),
|
92 |
+
(namespaces['html'], 'meter'),
|
93 |
+
(namespaces['html'], 'multicol'),
|
94 |
+
(namespaces['html'], 'nav'),
|
95 |
+
(namespaces['html'], 'nextid'),
|
96 |
+
(namespaces['html'], 'ol'),
|
97 |
+
(namespaces['html'], 'output'),
|
98 |
+
(namespaces['html'], 'optgroup'),
|
99 |
+
(namespaces['html'], 'option'),
|
100 |
+
(namespaces['html'], 'p'),
|
101 |
+
(namespaces['html'], 'pre'),
|
102 |
+
(namespaces['html'], 'progress'),
|
103 |
+
(namespaces['html'], 'q'),
|
104 |
+
(namespaces['html'], 's'),
|
105 |
+
(namespaces['html'], 'samp'),
|
106 |
+
(namespaces['html'], 'section'),
|
107 |
+
(namespaces['html'], 'select'),
|
108 |
+
(namespaces['html'], 'small'),
|
109 |
+
(namespaces['html'], 'sound'),
|
110 |
+
(namespaces['html'], 'source'),
|
111 |
+
(namespaces['html'], 'spacer'),
|
112 |
+
(namespaces['html'], 'span'),
|
113 |
+
(namespaces['html'], 'strike'),
|
114 |
+
(namespaces['html'], 'strong'),
|
115 |
+
(namespaces['html'], 'sub'),
|
116 |
+
(namespaces['html'], 'sup'),
|
117 |
+
(namespaces['html'], 'table'),
|
118 |
+
(namespaces['html'], 'tbody'),
|
119 |
+
(namespaces['html'], 'td'),
|
120 |
+
(namespaces['html'], 'textarea'),
|
121 |
+
(namespaces['html'], 'time'),
|
122 |
+
(namespaces['html'], 'tfoot'),
|
123 |
+
(namespaces['html'], 'th'),
|
124 |
+
(namespaces['html'], 'thead'),
|
125 |
+
(namespaces['html'], 'tr'),
|
126 |
+
(namespaces['html'], 'tt'),
|
127 |
+
(namespaces['html'], 'u'),
|
128 |
+
(namespaces['html'], 'ul'),
|
129 |
+
(namespaces['html'], 'var'),
|
130 |
+
(namespaces['html'], 'video'),
|
131 |
+
(namespaces['mathml'], 'maction'),
|
132 |
+
(namespaces['mathml'], 'math'),
|
133 |
+
(namespaces['mathml'], 'merror'),
|
134 |
+
(namespaces['mathml'], 'mfrac'),
|
135 |
+
(namespaces['mathml'], 'mi'),
|
136 |
+
(namespaces['mathml'], 'mmultiscripts'),
|
137 |
+
(namespaces['mathml'], 'mn'),
|
138 |
+
(namespaces['mathml'], 'mo'),
|
139 |
+
(namespaces['mathml'], 'mover'),
|
140 |
+
(namespaces['mathml'], 'mpadded'),
|
141 |
+
(namespaces['mathml'], 'mphantom'),
|
142 |
+
(namespaces['mathml'], 'mprescripts'),
|
143 |
+
(namespaces['mathml'], 'mroot'),
|
144 |
+
(namespaces['mathml'], 'mrow'),
|
145 |
+
(namespaces['mathml'], 'mspace'),
|
146 |
+
(namespaces['mathml'], 'msqrt'),
|
147 |
+
(namespaces['mathml'], 'mstyle'),
|
148 |
+
(namespaces['mathml'], 'msub'),
|
149 |
+
(namespaces['mathml'], 'msubsup'),
|
150 |
+
(namespaces['mathml'], 'msup'),
|
151 |
+
(namespaces['mathml'], 'mtable'),
|
152 |
+
(namespaces['mathml'], 'mtd'),
|
153 |
+
(namespaces['mathml'], 'mtext'),
|
154 |
+
(namespaces['mathml'], 'mtr'),
|
155 |
+
(namespaces['mathml'], 'munder'),
|
156 |
+
(namespaces['mathml'], 'munderover'),
|
157 |
+
(namespaces['mathml'], 'none'),
|
158 |
+
(namespaces['svg'], 'a'),
|
159 |
+
(namespaces['svg'], 'animate'),
|
160 |
+
(namespaces['svg'], 'animateColor'),
|
161 |
+
(namespaces['svg'], 'animateMotion'),
|
162 |
+
(namespaces['svg'], 'animateTransform'),
|
163 |
+
(namespaces['svg'], 'clipPath'),
|
164 |
+
(namespaces['svg'], 'circle'),
|
165 |
+
(namespaces['svg'], 'defs'),
|
166 |
+
(namespaces['svg'], 'desc'),
|
167 |
+
(namespaces['svg'], 'ellipse'),
|
168 |
+
(namespaces['svg'], 'font-face'),
|
169 |
+
(namespaces['svg'], 'font-face-name'),
|
170 |
+
(namespaces['svg'], 'font-face-src'),
|
171 |
+
(namespaces['svg'], 'g'),
|
172 |
+
(namespaces['svg'], 'glyph'),
|
173 |
+
(namespaces['svg'], 'hkern'),
|
174 |
+
(namespaces['svg'], 'linearGradient'),
|
175 |
+
(namespaces['svg'], 'line'),
|
176 |
+
(namespaces['svg'], 'marker'),
|
177 |
+
(namespaces['svg'], 'metadata'),
|
178 |
+
(namespaces['svg'], 'missing-glyph'),
|
179 |
+
(namespaces['svg'], 'mpath'),
|
180 |
+
(namespaces['svg'], 'path'),
|
181 |
+
(namespaces['svg'], 'polygon'),
|
182 |
+
(namespaces['svg'], 'polyline'),
|
183 |
+
(namespaces['svg'], 'radialGradient'),
|
184 |
+
(namespaces['svg'], 'rect'),
|
185 |
+
(namespaces['svg'], 'set'),
|
186 |
+
(namespaces['svg'], 'stop'),
|
187 |
+
(namespaces['svg'], 'svg'),
|
188 |
+
(namespaces['svg'], 'switch'),
|
189 |
+
(namespaces['svg'], 'text'),
|
190 |
+
(namespaces['svg'], 'title'),
|
191 |
+
(namespaces['svg'], 'tspan'),
|
192 |
+
(namespaces['svg'], 'use'),
|
193 |
+
))
|
194 |
+
|
195 |
+
allowed_attributes = frozenset((
|
196 |
+
# HTML attributes
|
197 |
+
(None, 'abbr'),
|
198 |
+
(None, 'accept'),
|
199 |
+
(None, 'accept-charset'),
|
200 |
+
(None, 'accesskey'),
|
201 |
+
(None, 'action'),
|
202 |
+
(None, 'align'),
|
203 |
+
(None, 'alt'),
|
204 |
+
(None, 'autocomplete'),
|
205 |
+
(None, 'autofocus'),
|
206 |
+
(None, 'axis'),
|
207 |
+
(None, 'background'),
|
208 |
+
(None, 'balance'),
|
209 |
+
(None, 'bgcolor'),
|
210 |
+
(None, 'bgproperties'),
|
211 |
+
(None, 'border'),
|
212 |
+
(None, 'bordercolor'),
|
213 |
+
(None, 'bordercolordark'),
|
214 |
+
(None, 'bordercolorlight'),
|
215 |
+
(None, 'bottompadding'),
|
216 |
+
(None, 'cellpadding'),
|
217 |
+
(None, 'cellspacing'),
|
218 |
+
(None, 'ch'),
|
219 |
+
(None, 'challenge'),
|
220 |
+
(None, 'char'),
|
221 |
+
(None, 'charoff'),
|
222 |
+
(None, 'choff'),
|
223 |
+
(None, 'charset'),
|
224 |
+
(None, 'checked'),
|
225 |
+
(None, 'cite'),
|
226 |
+
(None, 'class'),
|
227 |
+
(None, 'clear'),
|
228 |
+
(None, 'color'),
|
229 |
+
(None, 'cols'),
|
230 |
+
(None, 'colspan'),
|
231 |
+
(None, 'compact'),
|
232 |
+
(None, 'contenteditable'),
|
233 |
+
(None, 'controls'),
|
234 |
+
(None, 'coords'),
|
235 |
+
(None, 'data'),
|
236 |
+
(None, 'datafld'),
|
237 |
+
(None, 'datapagesize'),
|
238 |
+
(None, 'datasrc'),
|
239 |
+
(None, 'datetime'),
|
240 |
+
(None, 'default'),
|
241 |
+
(None, 'delay'),
|
242 |
+
(None, 'dir'),
|
243 |
+
(None, 'disabled'),
|
244 |
+
(None, 'draggable'),
|
245 |
+
(None, 'dynsrc'),
|
246 |
+
(None, 'enctype'),
|
247 |
+
(None, 'end'),
|
248 |
+
(None, 'face'),
|
249 |
+
(None, 'for'),
|
250 |
+
(None, 'form'),
|
251 |
+
(None, 'frame'),
|
252 |
+
(None, 'galleryimg'),
|
253 |
+
(None, 'gutter'),
|
254 |
+
(None, 'headers'),
|
255 |
+
(None, 'height'),
|
256 |
+
(None, 'hidefocus'),
|
257 |
+
(None, 'hidden'),
|
258 |
+
(None, 'high'),
|
259 |
+
(None, 'href'),
|
260 |
+
(None, 'hreflang'),
|
261 |
+
(None, 'hspace'),
|
262 |
+
(None, 'icon'),
|
263 |
+
(None, 'id'),
|
264 |
+
(None, 'inputmode'),
|
265 |
+
(None, 'ismap'),
|
266 |
+
(None, 'keytype'),
|
267 |
+
(None, 'label'),
|
268 |
+
(None, 'leftspacing'),
|
269 |
+
(None, 'lang'),
|
270 |
+
(None, 'list'),
|
271 |
+
(None, 'longdesc'),
|
272 |
+
(None, 'loop'),
|
273 |
+
(None, 'loopcount'),
|
274 |
+
(None, 'loopend'),
|
275 |
+
(None, 'loopstart'),
|
276 |
+
(None, 'low'),
|
277 |
+
(None, 'lowsrc'),
|
278 |
+
(None, 'max'),
|
279 |
+
(None, 'maxlength'),
|
280 |
+
(None, 'media'),
|
281 |
+
(None, 'method'),
|
282 |
+
(None, 'min'),
|
283 |
+
(None, 'multiple'),
|
284 |
+
(None, 'name'),
|
285 |
+
(None, 'nohref'),
|
286 |
+
(None, 'noshade'),
|
287 |
+
(None, 'nowrap'),
|
288 |
+
(None, 'open'),
|
289 |
+
(None, 'optimum'),
|
290 |
+
(None, 'pattern'),
|
291 |
+
(None, 'ping'),
|
292 |
+
(None, 'point-size'),
|
293 |
+
(None, 'poster'),
|
294 |
+
(None, 'pqg'),
|
295 |
+
(None, 'preload'),
|
296 |
+
(None, 'prompt'),
|
297 |
+
(None, 'radiogroup'),
|
298 |
+
(None, 'readonly'),
|
299 |
+
(None, 'rel'),
|
300 |
+
(None, 'repeat-max'),
|
301 |
+
(None, 'repeat-min'),
|
302 |
+
(None, 'replace'),
|
303 |
+
(None, 'required'),
|
304 |
+
(None, 'rev'),
|
305 |
+
(None, 'rightspacing'),
|
306 |
+
(None, 'rows'),
|
307 |
+
(None, 'rowspan'),
|
308 |
+
(None, 'rules'),
|
309 |
+
(None, 'scope'),
|
310 |
+
(None, 'selected'),
|
311 |
+
(None, 'shape'),
|
312 |
+
(None, 'size'),
|
313 |
+
(None, 'span'),
|
314 |
+
(None, 'src'),
|
315 |
+
(None, 'start'),
|
316 |
+
(None, 'step'),
|
317 |
+
(None, 'style'),
|
318 |
+
(None, 'summary'),
|
319 |
+
(None, 'suppress'),
|
320 |
+
(None, 'tabindex'),
|
321 |
+
(None, 'target'),
|
322 |
+
(None, 'template'),
|
323 |
+
(None, 'title'),
|
324 |
+
(None, 'toppadding'),
|
325 |
+
(None, 'type'),
|
326 |
+
(None, 'unselectable'),
|
327 |
+
(None, 'usemap'),
|
328 |
+
(None, 'urn'),
|
329 |
+
(None, 'valign'),
|
330 |
+
(None, 'value'),
|
331 |
+
(None, 'variable'),
|
332 |
+
(None, 'volume'),
|
333 |
+
(None, 'vspace'),
|
334 |
+
(None, 'vrml'),
|
335 |
+
(None, 'width'),
|
336 |
+
(None, 'wrap'),
|
337 |
+
(namespaces['xml'], 'lang'),
|
338 |
+
# MathML attributes
|
339 |
+
(None, 'actiontype'),
|
340 |
+
(None, 'align'),
|
341 |
+
(None, 'columnalign'),
|
342 |
+
(None, 'columnalign'),
|
343 |
+
(None, 'columnalign'),
|
344 |
+
(None, 'columnlines'),
|
345 |
+
(None, 'columnspacing'),
|
346 |
+
(None, 'columnspan'),
|
347 |
+
(None, 'depth'),
|
348 |
+
(None, 'display'),
|
349 |
+
(None, 'displaystyle'),
|
350 |
+
(None, 'equalcolumns'),
|
351 |
+
(None, 'equalrows'),
|
352 |
+
(None, 'fence'),
|
353 |
+
(None, 'fontstyle'),
|
354 |
+
(None, 'fontweight'),
|
355 |
+
(None, 'frame'),
|
356 |
+
(None, 'height'),
|
357 |
+
(None, 'linethickness'),
|
358 |
+
(None, 'lspace'),
|
359 |
+
(None, 'mathbackground'),
|
360 |
+
(None, 'mathcolor'),
|
361 |
+
(None, 'mathvariant'),
|
362 |
+
(None, 'mathvariant'),
|
363 |
+
(None, 'maxsize'),
|
364 |
+
(None, 'minsize'),
|
365 |
+
(None, 'other'),
|
366 |
+
(None, 'rowalign'),
|
367 |
+
(None, 'rowalign'),
|
368 |
+
(None, 'rowalign'),
|
369 |
+
(None, 'rowlines'),
|
370 |
+
(None, 'rowspacing'),
|
371 |
+
(None, 'rowspan'),
|
372 |
+
(None, 'rspace'),
|
373 |
+
(None, 'scriptlevel'),
|
374 |
+
(None, 'selection'),
|
375 |
+
(None, 'separator'),
|
376 |
+
(None, 'stretchy'),
|
377 |
+
(None, 'width'),
|
378 |
+
(None, 'width'),
|
379 |
+
(namespaces['xlink'], 'href'),
|
380 |
+
(namespaces['xlink'], 'show'),
|
381 |
+
(namespaces['xlink'], 'type'),
|
382 |
+
# SVG attributes
|
383 |
+
(None, 'accent-height'),
|
384 |
+
(None, 'accumulate'),
|
385 |
+
(None, 'additive'),
|
386 |
+
(None, 'alphabetic'),
|
387 |
+
(None, 'arabic-form'),
|
388 |
+
(None, 'ascent'),
|
389 |
+
(None, 'attributeName'),
|
390 |
+
(None, 'attributeType'),
|
391 |
+
(None, 'baseProfile'),
|
392 |
+
(None, 'bbox'),
|
393 |
+
(None, 'begin'),
|
394 |
+
(None, 'by'),
|
395 |
+
(None, 'calcMode'),
|
396 |
+
(None, 'cap-height'),
|
397 |
+
(None, 'class'),
|
398 |
+
(None, 'clip-path'),
|
399 |
+
(None, 'color'),
|
400 |
+
(None, 'color-rendering'),
|
401 |
+
(None, 'content'),
|
402 |
+
(None, 'cx'),
|
403 |
+
(None, 'cy'),
|
404 |
+
(None, 'd'),
|
405 |
+
(None, 'dx'),
|
406 |
+
(None, 'dy'),
|
407 |
+
(None, 'descent'),
|
408 |
+
(None, 'display'),
|
409 |
+
(None, 'dur'),
|
410 |
+
(None, 'end'),
|
411 |
+
(None, 'fill'),
|
412 |
+
(None, 'fill-opacity'),
|
413 |
+
(None, 'fill-rule'),
|
414 |
+
(None, 'font-family'),
|
415 |
+
(None, 'font-size'),
|
416 |
+
(None, 'font-stretch'),
|
417 |
+
(None, 'font-style'),
|
418 |
+
(None, 'font-variant'),
|
419 |
+
(None, 'font-weight'),
|
420 |
+
(None, 'from'),
|
421 |
+
(None, 'fx'),
|
422 |
+
(None, 'fy'),
|
423 |
+
(None, 'g1'),
|
424 |
+
(None, 'g2'),
|
425 |
+
(None, 'glyph-name'),
|
426 |
+
(None, 'gradientUnits'),
|
427 |
+
(None, 'hanging'),
|
428 |
+
(None, 'height'),
|
429 |
+
(None, 'horiz-adv-x'),
|
430 |
+
(None, 'horiz-origin-x'),
|
431 |
+
(None, 'id'),
|
432 |
+
(None, 'ideographic'),
|
433 |
+
(None, 'k'),
|
434 |
+
(None, 'keyPoints'),
|
435 |
+
(None, 'keySplines'),
|
436 |
+
(None, 'keyTimes'),
|
437 |
+
(None, 'lang'),
|
438 |
+
(None, 'marker-end'),
|
439 |
+
(None, 'marker-mid'),
|
440 |
+
(None, 'marker-start'),
|
441 |
+
(None, 'markerHeight'),
|
442 |
+
(None, 'markerUnits'),
|
443 |
+
(None, 'markerWidth'),
|
444 |
+
(None, 'mathematical'),
|
445 |
+
(None, 'max'),
|
446 |
+
(None, 'min'),
|
447 |
+
(None, 'name'),
|
448 |
+
(None, 'offset'),
|
449 |
+
(None, 'opacity'),
|
450 |
+
(None, 'orient'),
|
451 |
+
(None, 'origin'),
|
452 |
+
(None, 'overline-position'),
|
453 |
+
(None, 'overline-thickness'),
|
454 |
+
(None, 'panose-1'),
|
455 |
+
(None, 'path'),
|
456 |
+
(None, 'pathLength'),
|
457 |
+
(None, 'points'),
|
458 |
+
(None, 'preserveAspectRatio'),
|
459 |
+
(None, 'r'),
|
460 |
+
(None, 'refX'),
|
461 |
+
(None, 'refY'),
|
462 |
+
(None, 'repeatCount'),
|
463 |
+
(None, 'repeatDur'),
|
464 |
+
(None, 'requiredExtensions'),
|
465 |
+
(None, 'requiredFeatures'),
|
466 |
+
(None, 'restart'),
|
467 |
+
(None, 'rotate'),
|
468 |
+
(None, 'rx'),
|
469 |
+
(None, 'ry'),
|
470 |
+
(None, 'slope'),
|
471 |
+
(None, 'stemh'),
|
472 |
+
(None, 'stemv'),
|
473 |
+
(None, 'stop-color'),
|
474 |
+
(None, 'stop-opacity'),
|
475 |
+
(None, 'strikethrough-position'),
|
476 |
+
(None, 'strikethrough-thickness'),
|
477 |
+
(None, 'stroke'),
|
478 |
+
(None, 'stroke-dasharray'),
|
479 |
+
(None, 'stroke-dashoffset'),
|
480 |
+
(None, 'stroke-linecap'),
|
481 |
+
(None, 'stroke-linejoin'),
|
482 |
+
(None, 'stroke-miterlimit'),
|
483 |
+
(None, 'stroke-opacity'),
|
484 |
+
(None, 'stroke-width'),
|
485 |
+
(None, 'systemLanguage'),
|
486 |
+
(None, 'target'),
|
487 |
+
(None, 'text-anchor'),
|
488 |
+
(None, 'to'),
|
489 |
+
(None, 'transform'),
|
490 |
+
(None, 'type'),
|
491 |
+
(None, 'u1'),
|
492 |
+
(None, 'u2'),
|
493 |
+
(None, 'underline-position'),
|
494 |
+
(None, 'underline-thickness'),
|
495 |
+
(None, 'unicode'),
|
496 |
+
(None, 'unicode-range'),
|
497 |
+
(None, 'units-per-em'),
|
498 |
+
(None, 'values'),
|
499 |
+
(None, 'version'),
|
500 |
+
(None, 'viewBox'),
|
501 |
+
(None, 'visibility'),
|
502 |
+
(None, 'width'),
|
503 |
+
(None, 'widths'),
|
504 |
+
(None, 'x'),
|
505 |
+
(None, 'x-height'),
|
506 |
+
(None, 'x1'),
|
507 |
+
(None, 'x2'),
|
508 |
+
(namespaces['xlink'], 'actuate'),
|
509 |
+
(namespaces['xlink'], 'arcrole'),
|
510 |
+
(namespaces['xlink'], 'href'),
|
511 |
+
(namespaces['xlink'], 'role'),
|
512 |
+
(namespaces['xlink'], 'show'),
|
513 |
+
(namespaces['xlink'], 'title'),
|
514 |
+
(namespaces['xlink'], 'type'),
|
515 |
+
(namespaces['xml'], 'base'),
|
516 |
+
(namespaces['xml'], 'lang'),
|
517 |
+
(namespaces['xml'], 'space'),
|
518 |
+
(None, 'y'),
|
519 |
+
(None, 'y1'),
|
520 |
+
(None, 'y2'),
|
521 |
+
(None, 'zoomAndPan'),
|
522 |
+
))
|
523 |
+
|
524 |
+
attr_val_is_uri = frozenset((
|
525 |
+
(None, 'href'),
|
526 |
+
(None, 'src'),
|
527 |
+
(None, 'cite'),
|
528 |
+
(None, 'action'),
|
529 |
+
(None, 'longdesc'),
|
530 |
+
(None, 'poster'),
|
531 |
+
(None, 'background'),
|
532 |
+
(None, 'datasrc'),
|
533 |
+
(None, 'dynsrc'),
|
534 |
+
(None, 'lowsrc'),
|
535 |
+
(None, 'ping'),
|
536 |
+
(namespaces['xlink'], 'href'),
|
537 |
+
(namespaces['xml'], 'base'),
|
538 |
+
))
|
539 |
+
|
540 |
+
svg_attr_val_allows_ref = frozenset((
|
541 |
+
(None, 'clip-path'),
|
542 |
+
(None, 'color-profile'),
|
543 |
+
(None, 'cursor'),
|
544 |
+
(None, 'fill'),
|
545 |
+
(None, 'filter'),
|
546 |
+
(None, 'marker'),
|
547 |
+
(None, 'marker-start'),
|
548 |
+
(None, 'marker-mid'),
|
549 |
+
(None, 'marker-end'),
|
550 |
+
(None, 'mask'),
|
551 |
+
(None, 'stroke'),
|
552 |
+
))
|
553 |
+
|
554 |
+
svg_allow_local_href = frozenset((
|
555 |
+
(None, 'altGlyph'),
|
556 |
+
(None, 'animate'),
|
557 |
+
(None, 'animateColor'),
|
558 |
+
(None, 'animateMotion'),
|
559 |
+
(None, 'animateTransform'),
|
560 |
+
(None, 'cursor'),
|
561 |
+
(None, 'feImage'),
|
562 |
+
(None, 'filter'),
|
563 |
+
(None, 'linearGradient'),
|
564 |
+
(None, 'pattern'),
|
565 |
+
(None, 'radialGradient'),
|
566 |
+
(None, 'textpath'),
|
567 |
+
(None, 'tref'),
|
568 |
+
(None, 'set'),
|
569 |
+
(None, 'use')
|
570 |
+
))
|
571 |
+
|
572 |
+
allowed_css_properties = frozenset((
|
573 |
+
'azimuth',
|
574 |
+
'background-color',
|
575 |
+
'border-bottom-color',
|
576 |
+
'border-collapse',
|
577 |
+
'border-color',
|
578 |
+
'border-left-color',
|
579 |
+
'border-right-color',
|
580 |
+
'border-top-color',
|
581 |
+
'clear',
|
582 |
+
'color',
|
583 |
+
'cursor',
|
584 |
+
'direction',
|
585 |
+
'display',
|
586 |
+
'elevation',
|
587 |
+
'float',
|
588 |
+
'font',
|
589 |
+
'font-family',
|
590 |
+
'font-size',
|
591 |
+
'font-style',
|
592 |
+
'font-variant',
|
593 |
+
'font-weight',
|
594 |
+
'height',
|
595 |
+
'letter-spacing',
|
596 |
+
'line-height',
|
597 |
+
'overflow',
|
598 |
+
'pause',
|
599 |
+
'pause-after',
|
600 |
+
'pause-before',
|
601 |
+
'pitch',
|
602 |
+
'pitch-range',
|
603 |
+
'richness',
|
604 |
+
'speak',
|
605 |
+
'speak-header',
|
606 |
+
'speak-numeral',
|
607 |
+
'speak-punctuation',
|
608 |
+
'speech-rate',
|
609 |
+
'stress',
|
610 |
+
'text-align',
|
611 |
+
'text-decoration',
|
612 |
+
'text-indent',
|
613 |
+
'unicode-bidi',
|
614 |
+
'vertical-align',
|
615 |
+
'voice-family',
|
616 |
+
'volume',
|
617 |
+
'white-space',
|
618 |
+
'width',
|
619 |
+
))
|
620 |
+
|
621 |
+
allowed_css_keywords = frozenset((
|
622 |
+
'auto',
|
623 |
+
'aqua',
|
624 |
+
'black',
|
625 |
+
'block',
|
626 |
+
'blue',
|
627 |
+
'bold',
|
628 |
+
'both',
|
629 |
+
'bottom',
|
630 |
+
'brown',
|
631 |
+
'center',
|
632 |
+
'collapse',
|
633 |
+
'dashed',
|
634 |
+
'dotted',
|
635 |
+
'fuchsia',
|
636 |
+
'gray',
|
637 |
+
'green',
|
638 |
+
'!important',
|
639 |
+
'italic',
|
640 |
+
'left',
|
641 |
+
'lime',
|
642 |
+
'maroon',
|
643 |
+
'medium',
|
644 |
+
'none',
|
645 |
+
'navy',
|
646 |
+
'normal',
|
647 |
+
'nowrap',
|
648 |
+
'olive',
|
649 |
+
'pointer',
|
650 |
+
'purple',
|
651 |
+
'red',
|
652 |
+
'right',
|
653 |
+
'solid',
|
654 |
+
'silver',
|
655 |
+
'teal',
|
656 |
+
'top',
|
657 |
+
'transparent',
|
658 |
+
'underline',
|
659 |
+
'white',
|
660 |
+
'yellow',
|
661 |
+
))
|
662 |
+
|
663 |
+
allowed_svg_properties = frozenset((
|
664 |
+
'fill',
|
665 |
+
'fill-opacity',
|
666 |
+
'fill-rule',
|
667 |
+
'stroke',
|
668 |
+
'stroke-width',
|
669 |
+
'stroke-linecap',
|
670 |
+
'stroke-linejoin',
|
671 |
+
'stroke-opacity',
|
672 |
+
))
|
673 |
+
|
674 |
+
allowed_protocols = frozenset((
|
675 |
+
'ed2k',
|
676 |
+
'ftp',
|
677 |
+
'http',
|
678 |
+
'https',
|
679 |
+
'irc',
|
680 |
+
'mailto',
|
681 |
+
'news',
|
682 |
+
'gopher',
|
683 |
+
'nntp',
|
684 |
+
'telnet',
|
685 |
+
'webcal',
|
686 |
+
'xmpp',
|
687 |
+
'callto',
|
688 |
+
'feed',
|
689 |
+
'urn',
|
690 |
+
'aim',
|
691 |
+
'rsync',
|
692 |
+
'tag',
|
693 |
+
'ssh',
|
694 |
+
'sftp',
|
695 |
+
'rtsp',
|
696 |
+
'afs',
|
697 |
+
'data',
|
698 |
+
))
|
699 |
+
|
700 |
+
allowed_content_types = frozenset((
|
701 |
+
'image/png',
|
702 |
+
'image/jpeg',
|
703 |
+
'image/gif',
|
704 |
+
'image/webp',
|
705 |
+
'image/bmp',
|
706 |
+
'text/plain',
|
707 |
+
))
|
708 |
+
|
709 |
+
|
710 |
+
data_content_type = re.compile(r'''
|
711 |
+
^
|
712 |
+
# Match a content type <application>/<type>
|
713 |
+
(?P<content_type>[-a-zA-Z0-9.]+/[-a-zA-Z0-9.]+)
|
714 |
+
# Match any character set and encoding
|
715 |
+
(?:(?:;charset=(?:[-a-zA-Z0-9]+)(?:;(?:base64))?)
|
716 |
+
|(?:;(?:base64))?(?:;charset=(?:[-a-zA-Z0-9]+))?)
|
717 |
+
# Assume the rest is data
|
718 |
+
,.*
|
719 |
+
$
|
720 |
+
''',
|
721 |
+
re.VERBOSE)
|
722 |
+
|
723 |
+
|
724 |
+
class Filter(base.Filter):
|
725 |
+
"""Sanitizes token stream of XHTML+MathML+SVG and of inline style attributes"""
|
726 |
+
def __init__(self,
|
727 |
+
source,
|
728 |
+
allowed_elements=allowed_elements,
|
729 |
+
allowed_attributes=allowed_attributes,
|
730 |
+
allowed_css_properties=allowed_css_properties,
|
731 |
+
allowed_css_keywords=allowed_css_keywords,
|
732 |
+
allowed_svg_properties=allowed_svg_properties,
|
733 |
+
allowed_protocols=allowed_protocols,
|
734 |
+
allowed_content_types=allowed_content_types,
|
735 |
+
attr_val_is_uri=attr_val_is_uri,
|
736 |
+
svg_attr_val_allows_ref=svg_attr_val_allows_ref,
|
737 |
+
svg_allow_local_href=svg_allow_local_href):
|
738 |
+
"""Creates a Filter
|
739 |
+
|
740 |
+
:arg allowed_elements: set of elements to allow--everything else will
|
741 |
+
be escaped
|
742 |
+
|
743 |
+
:arg allowed_attributes: set of attributes to allow in
|
744 |
+
elements--everything else will be stripped
|
745 |
+
|
746 |
+
:arg allowed_css_properties: set of CSS properties to allow--everything
|
747 |
+
else will be stripped
|
748 |
+
|
749 |
+
:arg allowed_css_keywords: set of CSS keywords to allow--everything
|
750 |
+
else will be stripped
|
751 |
+
|
752 |
+
:arg allowed_svg_properties: set of SVG properties to allow--everything
|
753 |
+
else will be removed
|
754 |
+
|
755 |
+
:arg allowed_protocols: set of allowed protocols for URIs
|
756 |
+
|
757 |
+
:arg allowed_content_types: set of allowed content types for ``data`` URIs.
|
758 |
+
|
759 |
+
:arg attr_val_is_uri: set of attributes that have URI values--values
|
760 |
+
that have a scheme not listed in ``allowed_protocols`` are removed
|
761 |
+
|
762 |
+
:arg svg_attr_val_allows_ref: set of SVG attributes that can have
|
763 |
+
references
|
764 |
+
|
765 |
+
:arg svg_allow_local_href: set of SVG elements that can have local
|
766 |
+
hrefs--these are removed
|
767 |
+
|
768 |
+
"""
|
769 |
+
super(Filter, self).__init__(source)
|
770 |
+
|
771 |
+
warnings.warn(_deprecation_msg, DeprecationWarning)
|
772 |
+
|
773 |
+
self.allowed_elements = allowed_elements
|
774 |
+
self.allowed_attributes = allowed_attributes
|
775 |
+
self.allowed_css_properties = allowed_css_properties
|
776 |
+
self.allowed_css_keywords = allowed_css_keywords
|
777 |
+
self.allowed_svg_properties = allowed_svg_properties
|
778 |
+
self.allowed_protocols = allowed_protocols
|
779 |
+
self.allowed_content_types = allowed_content_types
|
780 |
+
self.attr_val_is_uri = attr_val_is_uri
|
781 |
+
self.svg_attr_val_allows_ref = svg_attr_val_allows_ref
|
782 |
+
self.svg_allow_local_href = svg_allow_local_href
|
783 |
+
|
784 |
+
def __iter__(self):
|
785 |
+
for token in base.Filter.__iter__(self):
|
786 |
+
token = self.sanitize_token(token)
|
787 |
+
if token:
|
788 |
+
yield token
|
789 |
+
|
790 |
+
# Sanitize the +html+, escaping all elements not in ALLOWED_ELEMENTS, and
|
791 |
+
# stripping out all attributes not in ALLOWED_ATTRIBUTES. Style attributes
|
792 |
+
# are parsed, and a restricted set, specified by ALLOWED_CSS_PROPERTIES and
|
793 |
+
# ALLOWED_CSS_KEYWORDS, are allowed through. attributes in ATTR_VAL_IS_URI
|
794 |
+
# are scanned, and only URI schemes specified in ALLOWED_PROTOCOLS are
|
795 |
+
# allowed.
|
796 |
+
#
|
797 |
+
# sanitize_html('<script> do_nasty_stuff() </script>')
|
798 |
+
# => <script> do_nasty_stuff() </script>
|
799 |
+
# sanitize_html('<a href="javascript: sucker();">Click here for $100</a>')
|
800 |
+
# => <a>Click here for $100</a>
|
801 |
+
def sanitize_token(self, token):
|
802 |
+
|
803 |
+
# accommodate filters which use token_type differently
|
804 |
+
token_type = token["type"]
|
805 |
+
if token_type in ("StartTag", "EndTag", "EmptyTag"):
|
806 |
+
name = token["name"]
|
807 |
+
namespace = token["namespace"]
|
808 |
+
if ((namespace, name) in self.allowed_elements or
|
809 |
+
(namespace is None and
|
810 |
+
(namespaces["html"], name) in self.allowed_elements)):
|
811 |
+
return self.allowed_token(token)
|
812 |
+
else:
|
813 |
+
return self.disallowed_token(token)
|
814 |
+
elif token_type == "Comment":
|
815 |
+
pass
|
816 |
+
else:
|
817 |
+
return token
|
818 |
+
|
819 |
+
def allowed_token(self, token):
|
820 |
+
if "data" in token:
|
821 |
+
attrs = token["data"]
|
822 |
+
attr_names = set(attrs.keys())
|
823 |
+
|
824 |
+
# Remove forbidden attributes
|
825 |
+
for to_remove in (attr_names - self.allowed_attributes):
|
826 |
+
del token["data"][to_remove]
|
827 |
+
attr_names.remove(to_remove)
|
828 |
+
|
829 |
+
# Remove attributes with disallowed URL values
|
830 |
+
for attr in (attr_names & self.attr_val_is_uri):
|
831 |
+
assert attr in attrs
|
832 |
+
# I don't have a clue where this regexp comes from or why it matches those
|
833 |
+
# characters, nor why we call unescape. I just know it's always been here.
|
834 |
+
# Should you be worried by this comment in a sanitizer? Yes. On the other hand, all
|
835 |
+
# this will do is remove *more* than it otherwise would.
|
836 |
+
val_unescaped = re.sub("[`\x00-\x20\x7f-\xa0\\s]+", '',
|
837 |
+
unescape(attrs[attr])).lower()
|
838 |
+
# remove replacement characters from unescaped characters
|
839 |
+
val_unescaped = val_unescaped.replace("\ufffd", "")
|
840 |
+
try:
|
841 |
+
uri = urlparse.urlparse(val_unescaped)
|
842 |
+
except ValueError:
|
843 |
+
uri = None
|
844 |
+
del attrs[attr]
|
845 |
+
if uri and uri.scheme:
|
846 |
+
if uri.scheme not in self.allowed_protocols:
|
847 |
+
del attrs[attr]
|
848 |
+
if uri.scheme == 'data':
|
849 |
+
m = data_content_type.match(uri.path)
|
850 |
+
if not m:
|
851 |
+
del attrs[attr]
|
852 |
+
elif m.group('content_type') not in self.allowed_content_types:
|
853 |
+
del attrs[attr]
|
854 |
+
|
855 |
+
for attr in self.svg_attr_val_allows_ref:
|
856 |
+
if attr in attrs:
|
857 |
+
attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)',
|
858 |
+
' ',
|
859 |
+
unescape(attrs[attr]))
|
860 |
+
if (token["name"] in self.svg_allow_local_href and
|
861 |
+
(namespaces['xlink'], 'href') in attrs and re.search(r'^\s*[^#\s].*',
|
862 |
+
attrs[(namespaces['xlink'], 'href')])):
|
863 |
+
del attrs[(namespaces['xlink'], 'href')]
|
864 |
+
if (None, 'style') in attrs:
|
865 |
+
attrs[(None, 'style')] = self.sanitize_css(attrs[(None, 'style')])
|
866 |
+
token["data"] = attrs
|
867 |
+
return token
|
868 |
+
|
869 |
+
def disallowed_token(self, token):
|
870 |
+
token_type = token["type"]
|
871 |
+
if token_type == "EndTag":
|
872 |
+
token["data"] = "</%s>" % token["name"]
|
873 |
+
elif token["data"]:
|
874 |
+
assert token_type in ("StartTag", "EmptyTag")
|
875 |
+
attrs = []
|
876 |
+
for (ns, name), v in token["data"].items():
|
877 |
+
attrs.append(' %s="%s"' % (name if ns is None else "%s:%s" % (prefixes[ns], name), escape(v)))
|
878 |
+
token["data"] = "<%s%s>" % (token["name"], ''.join(attrs))
|
879 |
+
else:
|
880 |
+
token["data"] = "<%s>" % token["name"]
|
881 |
+
if token.get("selfClosing"):
|
882 |
+
token["data"] = token["data"][:-1] + "/>"
|
883 |
+
|
884 |
+
token["type"] = "Characters"
|
885 |
+
|
886 |
+
del token["name"]
|
887 |
+
return token
|
888 |
+
|
889 |
+
def sanitize_css(self, style):
|
890 |
+
# disallow urls
|
891 |
+
style = re.compile(r'url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style)
|
892 |
+
|
893 |
+
# gauntlet
|
894 |
+
if not re.match(r"""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
|
895 |
+
return ''
|
896 |
+
if not re.match(r"^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style):
|
897 |
+
return ''
|
898 |
+
|
899 |
+
clean = []
|
900 |
+
for prop, value in re.findall(r"([-\w]+)\s*:\s*([^:;]*)", style):
|
901 |
+
if not value:
|
902 |
+
continue
|
903 |
+
if prop.lower() in self.allowed_css_properties:
|
904 |
+
clean.append(prop + ': ' + value + ';')
|
905 |
+
elif prop.split('-')[0].lower() in ['background', 'border', 'margin',
|
906 |
+
'padding']:
|
907 |
+
for keyword in value.split():
|
908 |
+
if keyword not in self.allowed_css_keywords and \
|
909 |
+
not re.match(r"^(#[0-9a-fA-F]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$", keyword): # noqa
|
910 |
+
break
|
911 |
+
else:
|
912 |
+
clean.append(prop + ': ' + value + ';')
|
913 |
+
elif prop.lower() in self.allowed_svg_properties:
|
914 |
+
clean.append(prop + ': ' + value + ';')
|
915 |
+
|
916 |
+
return ' '.join(clean)
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/filters/whitespace.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import absolute_import, division, unicode_literals
|
2 |
+
|
3 |
+
import re
|
4 |
+
|
5 |
+
from . import base
|
6 |
+
from ..constants import rcdataElements, spaceCharacters
|
7 |
+
spaceCharacters = "".join(spaceCharacters)
|
8 |
+
|
9 |
+
SPACES_REGEX = re.compile("[%s]+" % spaceCharacters)
|
10 |
+
|
11 |
+
|
12 |
+
class Filter(base.Filter):
|
13 |
+
"""Collapses whitespace except in pre, textarea, and script elements"""
|
14 |
+
spacePreserveElements = frozenset(["pre", "textarea"] + list(rcdataElements))
|
15 |
+
|
16 |
+
def __iter__(self):
|
17 |
+
preserve = 0
|
18 |
+
for token in base.Filter.__iter__(self):
|
19 |
+
type = token["type"]
|
20 |
+
if type == "StartTag" \
|
21 |
+
and (preserve or token["name"] in self.spacePreserveElements):
|
22 |
+
preserve += 1
|
23 |
+
|
24 |
+
elif type == "EndTag" and preserve:
|
25 |
+
preserve -= 1
|
26 |
+
|
27 |
+
elif not preserve and type == "SpaceCharacters" and token["data"]:
|
28 |
+
# Test on token["data"] above to not introduce spaces where there were not
|
29 |
+
token["data"] = " "
|
30 |
+
|
31 |
+
elif not preserve and type == "Characters":
|
32 |
+
token["data"] = collapse_spaces(token["data"])
|
33 |
+
|
34 |
+
yield token
|
35 |
+
|
36 |
+
|
37 |
+
def collapse_spaces(text):
|
38 |
+
return SPACES_REGEX.sub(' ', text)
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/html5parser.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/serializer.py
ADDED
@@ -0,0 +1,409 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import absolute_import, division, unicode_literals
|
2 |
+
from pip._vendor.six import text_type
|
3 |
+
|
4 |
+
import re
|
5 |
+
|
6 |
+
from codecs import register_error, xmlcharrefreplace_errors
|
7 |
+
|
8 |
+
from .constants import voidElements, booleanAttributes, spaceCharacters
|
9 |
+
from .constants import rcdataElements, entities, xmlEntities
|
10 |
+
from . import treewalkers, _utils
|
11 |
+
from xml.sax.saxutils import escape
|
12 |
+
|
13 |
+
_quoteAttributeSpecChars = "".join(spaceCharacters) + "\"'=<>`"
|
14 |
+
_quoteAttributeSpec = re.compile("[" + _quoteAttributeSpecChars + "]")
|
15 |
+
_quoteAttributeLegacy = re.compile("[" + _quoteAttributeSpecChars +
|
16 |
+
"\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n"
|
17 |
+
"\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15"
|
18 |
+
"\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
|
19 |
+
"\x20\x2f\x60\xa0\u1680\u180e\u180f\u2000"
|
20 |
+
"\u2001\u2002\u2003\u2004\u2005\u2006\u2007"
|
21 |
+
"\u2008\u2009\u200a\u2028\u2029\u202f\u205f"
|
22 |
+
"\u3000]")
|
23 |
+
|
24 |
+
|
25 |
+
_encode_entity_map = {}
|
26 |
+
_is_ucs4 = len("\U0010FFFF") == 1
|
27 |
+
for k, v in list(entities.items()):
|
28 |
+
# skip multi-character entities
|
29 |
+
if ((_is_ucs4 and len(v) > 1) or
|
30 |
+
(not _is_ucs4 and len(v) > 2)):
|
31 |
+
continue
|
32 |
+
if v != "&":
|
33 |
+
if len(v) == 2:
|
34 |
+
v = _utils.surrogatePairToCodepoint(v)
|
35 |
+
else:
|
36 |
+
v = ord(v)
|
37 |
+
if v not in _encode_entity_map or k.islower():
|
38 |
+
# prefer < over < and similarly for &, >, etc.
|
39 |
+
_encode_entity_map[v] = k
|
40 |
+
|
41 |
+
|
42 |
+
def htmlentityreplace_errors(exc):
|
43 |
+
if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):
|
44 |
+
res = []
|
45 |
+
codepoints = []
|
46 |
+
skip = False
|
47 |
+
for i, c in enumerate(exc.object[exc.start:exc.end]):
|
48 |
+
if skip:
|
49 |
+
skip = False
|
50 |
+
continue
|
51 |
+
index = i + exc.start
|
52 |
+
if _utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]):
|
53 |
+
codepoint = _utils.surrogatePairToCodepoint(exc.object[index:index + 2])
|
54 |
+
skip = True
|
55 |
+
else:
|
56 |
+
codepoint = ord(c)
|
57 |
+
codepoints.append(codepoint)
|
58 |
+
for cp in codepoints:
|
59 |
+
e = _encode_entity_map.get(cp)
|
60 |
+
if e:
|
61 |
+
res.append("&")
|
62 |
+
res.append(e)
|
63 |
+
if not e.endswith(";"):
|
64 |
+
res.append(";")
|
65 |
+
else:
|
66 |
+
res.append("&#x%s;" % (hex(cp)[2:]))
|
67 |
+
return ("".join(res), exc.end)
|
68 |
+
else:
|
69 |
+
return xmlcharrefreplace_errors(exc)
|
70 |
+
|
71 |
+
|
72 |
+
register_error("htmlentityreplace", htmlentityreplace_errors)
|
73 |
+
|
74 |
+
|
75 |
+
def serialize(input, tree="etree", encoding=None, **serializer_opts):
|
76 |
+
"""Serializes the input token stream using the specified treewalker
|
77 |
+
|
78 |
+
:arg input: the token stream to serialize
|
79 |
+
|
80 |
+
:arg tree: the treewalker to use
|
81 |
+
|
82 |
+
:arg encoding: the encoding to use
|
83 |
+
|
84 |
+
:arg serializer_opts: any options to pass to the
|
85 |
+
:py:class:`html5lib.serializer.HTMLSerializer` that gets created
|
86 |
+
|
87 |
+
:returns: the tree serialized as a string
|
88 |
+
|
89 |
+
Example:
|
90 |
+
|
91 |
+
>>> from html5lib.html5parser import parse
|
92 |
+
>>> from html5lib.serializer import serialize
|
93 |
+
>>> token_stream = parse('<html><body><p>Hi!</p></body></html>')
|
94 |
+
>>> serialize(token_stream, omit_optional_tags=False)
|
95 |
+
'<html><head></head><body><p>Hi!</p></body></html>'
|
96 |
+
|
97 |
+
"""
|
98 |
+
# XXX: Should we cache this?
|
99 |
+
walker = treewalkers.getTreeWalker(tree)
|
100 |
+
s = HTMLSerializer(**serializer_opts)
|
101 |
+
return s.render(walker(input), encoding)
|
102 |
+
|
103 |
+
|
104 |
+
class HTMLSerializer(object):
|
105 |
+
|
106 |
+
# attribute quoting options
|
107 |
+
quote_attr_values = "legacy" # be secure by default
|
108 |
+
quote_char = '"'
|
109 |
+
use_best_quote_char = True
|
110 |
+
|
111 |
+
# tag syntax options
|
112 |
+
omit_optional_tags = True
|
113 |
+
minimize_boolean_attributes = True
|
114 |
+
use_trailing_solidus = False
|
115 |
+
space_before_trailing_solidus = True
|
116 |
+
|
117 |
+
# escaping options
|
118 |
+
escape_lt_in_attrs = False
|
119 |
+
escape_rcdata = False
|
120 |
+
resolve_entities = True
|
121 |
+
|
122 |
+
# miscellaneous options
|
123 |
+
alphabetical_attributes = False
|
124 |
+
inject_meta_charset = True
|
125 |
+
strip_whitespace = False
|
126 |
+
sanitize = False
|
127 |
+
|
128 |
+
options = ("quote_attr_values", "quote_char", "use_best_quote_char",
|
129 |
+
"omit_optional_tags", "minimize_boolean_attributes",
|
130 |
+
"use_trailing_solidus", "space_before_trailing_solidus",
|
131 |
+
"escape_lt_in_attrs", "escape_rcdata", "resolve_entities",
|
132 |
+
"alphabetical_attributes", "inject_meta_charset",
|
133 |
+
"strip_whitespace", "sanitize")
|
134 |
+
|
135 |
+
def __init__(self, **kwargs):
|
136 |
+
"""Initialize HTMLSerializer
|
137 |
+
|
138 |
+
:arg inject_meta_charset: Whether or not to inject the meta charset.
|
139 |
+
|
140 |
+
Defaults to ``True``.
|
141 |
+
|
142 |
+
:arg quote_attr_values: Whether to quote attribute values that don't
|
143 |
+
require quoting per legacy browser behavior (``"legacy"``), when
|
144 |
+
required by the standard (``"spec"``), or always (``"always"``).
|
145 |
+
|
146 |
+
Defaults to ``"legacy"``.
|
147 |
+
|
148 |
+
:arg quote_char: Use given quote character for attribute quoting.
|
149 |
+
|
150 |
+
Defaults to ``"`` which will use double quotes unless attribute
|
151 |
+
value contains a double quote, in which case single quotes are
|
152 |
+
used.
|
153 |
+
|
154 |
+
:arg escape_lt_in_attrs: Whether or not to escape ``<`` in attribute
|
155 |
+
values.
|
156 |
+
|
157 |
+
Defaults to ``False``.
|
158 |
+
|
159 |
+
:arg escape_rcdata: Whether to escape characters that need to be
|
160 |
+
escaped within normal elements within rcdata elements such as
|
161 |
+
style.
|
162 |
+
|
163 |
+
Defaults to ``False``.
|
164 |
+
|
165 |
+
:arg resolve_entities: Whether to resolve named character entities that
|
166 |
+
appear in the source tree. The XML predefined entities < >
|
167 |
+
& " ' are unaffected by this setting.
|
168 |
+
|
169 |
+
Defaults to ``True``.
|
170 |
+
|
171 |
+
:arg strip_whitespace: Whether to remove semantically meaningless
|
172 |
+
whitespace. (This compresses all whitespace to a single space
|
173 |
+
except within ``pre``.)
|
174 |
+
|
175 |
+
Defaults to ``False``.
|
176 |
+
|
177 |
+
:arg minimize_boolean_attributes: Shortens boolean attributes to give
|
178 |
+
just the attribute value, for example::
|
179 |
+
|
180 |
+
<input disabled="disabled">
|
181 |
+
|
182 |
+
becomes::
|
183 |
+
|
184 |
+
<input disabled>
|
185 |
+
|
186 |
+
Defaults to ``True``.
|
187 |
+
|
188 |
+
:arg use_trailing_solidus: Includes a close-tag slash at the end of the
|
189 |
+
start tag of void elements (empty elements whose end tag is
|
190 |
+
forbidden). E.g. ``<hr/>``.
|
191 |
+
|
192 |
+
Defaults to ``False``.
|
193 |
+
|
194 |
+
:arg space_before_trailing_solidus: Places a space immediately before
|
195 |
+
the closing slash in a tag using a trailing solidus. E.g.
|
196 |
+
``<hr />``. Requires ``use_trailing_solidus=True``.
|
197 |
+
|
198 |
+
Defaults to ``True``.
|
199 |
+
|
200 |
+
:arg sanitize: Strip all unsafe or unknown constructs from output.
|
201 |
+
See :py:class:`html5lib.filters.sanitizer.Filter`.
|
202 |
+
|
203 |
+
Defaults to ``False``.
|
204 |
+
|
205 |
+
:arg omit_optional_tags: Omit start/end tags that are optional.
|
206 |
+
|
207 |
+
Defaults to ``True``.
|
208 |
+
|
209 |
+
:arg alphabetical_attributes: Reorder attributes to be in alphabetical order.
|
210 |
+
|
211 |
+
Defaults to ``False``.
|
212 |
+
|
213 |
+
"""
|
214 |
+
unexpected_args = frozenset(kwargs) - frozenset(self.options)
|
215 |
+
if len(unexpected_args) > 0:
|
216 |
+
raise TypeError("__init__() got an unexpected keyword argument '%s'" % next(iter(unexpected_args)))
|
217 |
+
if 'quote_char' in kwargs:
|
218 |
+
self.use_best_quote_char = False
|
219 |
+
for attr in self.options:
|
220 |
+
setattr(self, attr, kwargs.get(attr, getattr(self, attr)))
|
221 |
+
self.errors = []
|
222 |
+
self.strict = False
|
223 |
+
|
224 |
+
def encode(self, string):
|
225 |
+
assert(isinstance(string, text_type))
|
226 |
+
if self.encoding:
|
227 |
+
return string.encode(self.encoding, "htmlentityreplace")
|
228 |
+
else:
|
229 |
+
return string
|
230 |
+
|
231 |
+
def encodeStrict(self, string):
|
232 |
+
assert(isinstance(string, text_type))
|
233 |
+
if self.encoding:
|
234 |
+
return string.encode(self.encoding, "strict")
|
235 |
+
else:
|
236 |
+
return string
|
237 |
+
|
238 |
+
def serialize(self, treewalker, encoding=None):
|
239 |
+
# pylint:disable=too-many-nested-blocks
|
240 |
+
self.encoding = encoding
|
241 |
+
in_cdata = False
|
242 |
+
self.errors = []
|
243 |
+
|
244 |
+
if encoding and self.inject_meta_charset:
|
245 |
+
from .filters.inject_meta_charset import Filter
|
246 |
+
treewalker = Filter(treewalker, encoding)
|
247 |
+
# Alphabetical attributes is here under the assumption that none of
|
248 |
+
# the later filters add or change order of attributes; it needs to be
|
249 |
+
# before the sanitizer so escaped elements come out correctly
|
250 |
+
if self.alphabetical_attributes:
|
251 |
+
from .filters.alphabeticalattributes import Filter
|
252 |
+
treewalker = Filter(treewalker)
|
253 |
+
# WhitespaceFilter should be used before OptionalTagFilter
|
254 |
+
# for maximum efficiently of this latter filter
|
255 |
+
if self.strip_whitespace:
|
256 |
+
from .filters.whitespace import Filter
|
257 |
+
treewalker = Filter(treewalker)
|
258 |
+
if self.sanitize:
|
259 |
+
from .filters.sanitizer import Filter
|
260 |
+
treewalker = Filter(treewalker)
|
261 |
+
if self.omit_optional_tags:
|
262 |
+
from .filters.optionaltags import Filter
|
263 |
+
treewalker = Filter(treewalker)
|
264 |
+
|
265 |
+
for token in treewalker:
|
266 |
+
type = token["type"]
|
267 |
+
if type == "Doctype":
|
268 |
+
doctype = "<!DOCTYPE %s" % token["name"]
|
269 |
+
|
270 |
+
if token["publicId"]:
|
271 |
+
doctype += ' PUBLIC "%s"' % token["publicId"]
|
272 |
+
elif token["systemId"]:
|
273 |
+
doctype += " SYSTEM"
|
274 |
+
if token["systemId"]:
|
275 |
+
if token["systemId"].find('"') >= 0:
|
276 |
+
if token["systemId"].find("'") >= 0:
|
277 |
+
self.serializeError("System identifier contains both single and double quote characters")
|
278 |
+
quote_char = "'"
|
279 |
+
else:
|
280 |
+
quote_char = '"'
|
281 |
+
doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char)
|
282 |
+
|
283 |
+
doctype += ">"
|
284 |
+
yield self.encodeStrict(doctype)
|
285 |
+
|
286 |
+
elif type in ("Characters", "SpaceCharacters"):
|
287 |
+
if type == "SpaceCharacters" or in_cdata:
|
288 |
+
if in_cdata and token["data"].find("</") >= 0:
|
289 |
+
self.serializeError("Unexpected </ in CDATA")
|
290 |
+
yield self.encode(token["data"])
|
291 |
+
else:
|
292 |
+
yield self.encode(escape(token["data"]))
|
293 |
+
|
294 |
+
elif type in ("StartTag", "EmptyTag"):
|
295 |
+
name = token["name"]
|
296 |
+
yield self.encodeStrict("<%s" % name)
|
297 |
+
if name in rcdataElements and not self.escape_rcdata:
|
298 |
+
in_cdata = True
|
299 |
+
elif in_cdata:
|
300 |
+
self.serializeError("Unexpected child element of a CDATA element")
|
301 |
+
for (_, attr_name), attr_value in token["data"].items():
|
302 |
+
# TODO: Add namespace support here
|
303 |
+
k = attr_name
|
304 |
+
v = attr_value
|
305 |
+
yield self.encodeStrict(' ')
|
306 |
+
|
307 |
+
yield self.encodeStrict(k)
|
308 |
+
if not self.minimize_boolean_attributes or \
|
309 |
+
(k not in booleanAttributes.get(name, tuple()) and
|
310 |
+
k not in booleanAttributes.get("", tuple())):
|
311 |
+
yield self.encodeStrict("=")
|
312 |
+
if self.quote_attr_values == "always" or len(v) == 0:
|
313 |
+
quote_attr = True
|
314 |
+
elif self.quote_attr_values == "spec":
|
315 |
+
quote_attr = _quoteAttributeSpec.search(v) is not None
|
316 |
+
elif self.quote_attr_values == "legacy":
|
317 |
+
quote_attr = _quoteAttributeLegacy.search(v) is not None
|
318 |
+
else:
|
319 |
+
raise ValueError("quote_attr_values must be one of: "
|
320 |
+
"'always', 'spec', or 'legacy'")
|
321 |
+
v = v.replace("&", "&")
|
322 |
+
if self.escape_lt_in_attrs:
|
323 |
+
v = v.replace("<", "<")
|
324 |
+
if quote_attr:
|
325 |
+
quote_char = self.quote_char
|
326 |
+
if self.use_best_quote_char:
|
327 |
+
if "'" in v and '"' not in v:
|
328 |
+
quote_char = '"'
|
329 |
+
elif '"' in v and "'" not in v:
|
330 |
+
quote_char = "'"
|
331 |
+
if quote_char == "'":
|
332 |
+
v = v.replace("'", "'")
|
333 |
+
else:
|
334 |
+
v = v.replace('"', """)
|
335 |
+
yield self.encodeStrict(quote_char)
|
336 |
+
yield self.encode(v)
|
337 |
+
yield self.encodeStrict(quote_char)
|
338 |
+
else:
|
339 |
+
yield self.encode(v)
|
340 |
+
if name in voidElements and self.use_trailing_solidus:
|
341 |
+
if self.space_before_trailing_solidus:
|
342 |
+
yield self.encodeStrict(" /")
|
343 |
+
else:
|
344 |
+
yield self.encodeStrict("/")
|
345 |
+
yield self.encode(">")
|
346 |
+
|
347 |
+
elif type == "EndTag":
|
348 |
+
name = token["name"]
|
349 |
+
if name in rcdataElements:
|
350 |
+
in_cdata = False
|
351 |
+
elif in_cdata:
|
352 |
+
self.serializeError("Unexpected child element of a CDATA element")
|
353 |
+
yield self.encodeStrict("</%s>" % name)
|
354 |
+
|
355 |
+
elif type == "Comment":
|
356 |
+
data = token["data"]
|
357 |
+
if data.find("--") >= 0:
|
358 |
+
self.serializeError("Comment contains --")
|
359 |
+
yield self.encodeStrict("<!--%s-->" % token["data"])
|
360 |
+
|
361 |
+
elif type == "Entity":
|
362 |
+
name = token["name"]
|
363 |
+
key = name + ";"
|
364 |
+
if key not in entities:
|
365 |
+
self.serializeError("Entity %s not recognized" % name)
|
366 |
+
if self.resolve_entities and key not in xmlEntities:
|
367 |
+
data = entities[key]
|
368 |
+
else:
|
369 |
+
data = "&%s;" % name
|
370 |
+
yield self.encodeStrict(data)
|
371 |
+
|
372 |
+
else:
|
373 |
+
self.serializeError(token["data"])
|
374 |
+
|
375 |
+
def render(self, treewalker, encoding=None):
|
376 |
+
"""Serializes the stream from the treewalker into a string
|
377 |
+
|
378 |
+
:arg treewalker: the treewalker to serialize
|
379 |
+
|
380 |
+
:arg encoding: the string encoding to use
|
381 |
+
|
382 |
+
:returns: the serialized tree
|
383 |
+
|
384 |
+
Example:
|
385 |
+
|
386 |
+
>>> from html5lib import parse, getTreeWalker
|
387 |
+
>>> from html5lib.serializer import HTMLSerializer
|
388 |
+
>>> token_stream = parse('<html><body>Hi!</body></html>')
|
389 |
+
>>> walker = getTreeWalker('etree')
|
390 |
+
>>> serializer = HTMLSerializer(omit_optional_tags=False)
|
391 |
+
>>> serializer.render(walker(token_stream))
|
392 |
+
'<html><head></head><body>Hi!</body></html>'
|
393 |
+
|
394 |
+
"""
|
395 |
+
if encoding:
|
396 |
+
return b"".join(list(self.serialize(treewalker, encoding)))
|
397 |
+
else:
|
398 |
+
return "".join(list(self.serialize(treewalker)))
|
399 |
+
|
400 |
+
def serializeError(self, data="XXX ERROR MESSAGE NEEDED"):
|
401 |
+
# XXX The idea is to make data mandatory.
|
402 |
+
self.errors.append(data)
|
403 |
+
if self.strict:
|
404 |
+
raise SerializeError
|
405 |
+
|
406 |
+
|
407 |
+
class SerializeError(Exception):
|
408 |
+
"""Error in serialized tree"""
|
409 |
+
pass
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/treeadapters/__init__.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Tree adapters let you convert from one tree structure to another
|
2 |
+
|
3 |
+
Example:
|
4 |
+
|
5 |
+
.. code-block:: python
|
6 |
+
|
7 |
+
from pip._vendor import html5lib
|
8 |
+
from pip._vendor.html5lib.treeadapters import genshi
|
9 |
+
|
10 |
+
doc = '<html><body>Hi!</body></html>'
|
11 |
+
treebuilder = html5lib.getTreeBuilder('etree')
|
12 |
+
parser = html5lib.HTMLParser(tree=treebuilder)
|
13 |
+
tree = parser.parse(doc)
|
14 |
+
TreeWalker = html5lib.getTreeWalker('etree')
|
15 |
+
|
16 |
+
genshi_tree = genshi.to_genshi(TreeWalker(tree))
|
17 |
+
|
18 |
+
"""
|
19 |
+
from __future__ import absolute_import, division, unicode_literals
|
20 |
+
|
21 |
+
from . import sax
|
22 |
+
|
23 |
+
__all__ = ["sax"]
|
24 |
+
|
25 |
+
try:
|
26 |
+
from . import genshi # noqa
|
27 |
+
except ImportError:
|
28 |
+
pass
|
29 |
+
else:
|
30 |
+
__all__.append("genshi")
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/treeadapters/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (942 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/treeadapters/__pycache__/genshi.cpython-310.pyc
ADDED
Binary file (1.55 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/treeadapters/__pycache__/sax.cpython-310.pyc
ADDED
Binary file (1.46 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/treeadapters/genshi.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import absolute_import, division, unicode_literals
|
2 |
+
|
3 |
+
from genshi.core import QName, Attrs
|
4 |
+
from genshi.core import START, END, TEXT, COMMENT, DOCTYPE
|
5 |
+
|
6 |
+
|
7 |
+
def to_genshi(walker):
|
8 |
+
"""Convert a tree to a genshi tree
|
9 |
+
|
10 |
+
:arg walker: the treewalker to use to walk the tree to convert it
|
11 |
+
|
12 |
+
:returns: generator of genshi nodes
|
13 |
+
|
14 |
+
"""
|
15 |
+
text = []
|
16 |
+
for token in walker:
|
17 |
+
type = token["type"]
|
18 |
+
if type in ("Characters", "SpaceCharacters"):
|
19 |
+
text.append(token["data"])
|
20 |
+
elif text:
|
21 |
+
yield TEXT, "".join(text), (None, -1, -1)
|
22 |
+
text = []
|
23 |
+
|
24 |
+
if type in ("StartTag", "EmptyTag"):
|
25 |
+
if token["namespace"]:
|
26 |
+
name = "{%s}%s" % (token["namespace"], token["name"])
|
27 |
+
else:
|
28 |
+
name = token["name"]
|
29 |
+
attrs = Attrs([(QName("{%s}%s" % attr if attr[0] is not None else attr[1]), value)
|
30 |
+
for attr, value in token["data"].items()])
|
31 |
+
yield (START, (QName(name), attrs), (None, -1, -1))
|
32 |
+
if type == "EmptyTag":
|
33 |
+
type = "EndTag"
|
34 |
+
|
35 |
+
if type == "EndTag":
|
36 |
+
if token["namespace"]:
|
37 |
+
name = "{%s}%s" % (token["namespace"], token["name"])
|
38 |
+
else:
|
39 |
+
name = token["name"]
|
40 |
+
|
41 |
+
yield END, QName(name), (None, -1, -1)
|
42 |
+
|
43 |
+
elif type == "Comment":
|
44 |
+
yield COMMENT, token["data"], (None, -1, -1)
|
45 |
+
|
46 |
+
elif type == "Doctype":
|
47 |
+
yield DOCTYPE, (token["name"], token["publicId"],
|
48 |
+
token["systemId"]), (None, -1, -1)
|
49 |
+
|
50 |
+
else:
|
51 |
+
pass # FIXME: What to do?
|
52 |
+
|
53 |
+
if text:
|
54 |
+
yield TEXT, "".join(text), (None, -1, -1)
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/treeadapters/sax.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import absolute_import, division, unicode_literals
|
2 |
+
|
3 |
+
from xml.sax.xmlreader import AttributesNSImpl
|
4 |
+
|
5 |
+
from ..constants import adjustForeignAttributes, unadjustForeignAttributes
|
6 |
+
|
7 |
+
prefix_mapping = {}
|
8 |
+
for prefix, localName, namespace in adjustForeignAttributes.values():
|
9 |
+
if prefix is not None:
|
10 |
+
prefix_mapping[prefix] = namespace
|
11 |
+
|
12 |
+
|
13 |
+
def to_sax(walker, handler):
|
14 |
+
"""Call SAX-like content handler based on treewalker walker
|
15 |
+
|
16 |
+
:arg walker: the treewalker to use to walk the tree to convert it
|
17 |
+
|
18 |
+
:arg handler: SAX handler to use
|
19 |
+
|
20 |
+
"""
|
21 |
+
handler.startDocument()
|
22 |
+
for prefix, namespace in prefix_mapping.items():
|
23 |
+
handler.startPrefixMapping(prefix, namespace)
|
24 |
+
|
25 |
+
for token in walker:
|
26 |
+
type = token["type"]
|
27 |
+
if type == "Doctype":
|
28 |
+
continue
|
29 |
+
elif type in ("StartTag", "EmptyTag"):
|
30 |
+
attrs = AttributesNSImpl(token["data"],
|
31 |
+
unadjustForeignAttributes)
|
32 |
+
handler.startElementNS((token["namespace"], token["name"]),
|
33 |
+
token["name"],
|
34 |
+
attrs)
|
35 |
+
if type == "EmptyTag":
|
36 |
+
handler.endElementNS((token["namespace"], token["name"]),
|
37 |
+
token["name"])
|
38 |
+
elif type == "EndTag":
|
39 |
+
handler.endElementNS((token["namespace"], token["name"]),
|
40 |
+
token["name"])
|
41 |
+
elif type in ("Characters", "SpaceCharacters"):
|
42 |
+
handler.characters(token["data"])
|
43 |
+
elif type == "Comment":
|
44 |
+
pass
|
45 |
+
else:
|
46 |
+
assert False, "Unknown token type"
|
47 |
+
|
48 |
+
for prefix, namespace in prefix_mapping.items():
|
49 |
+
handler.endPrefixMapping(prefix)
|
50 |
+
handler.endDocument()
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/treebuilders/__init__.py
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""A collection of modules for building different kinds of trees from HTML
|
2 |
+
documents.
|
3 |
+
|
4 |
+
To create a treebuilder for a new type of tree, you need to do
|
5 |
+
implement several things:
|
6 |
+
|
7 |
+
1. A set of classes for various types of elements: Document, Doctype, Comment,
|
8 |
+
Element. These must implement the interface of ``base.treebuilders.Node``
|
9 |
+
(although comment nodes have a different signature for their constructor,
|
10 |
+
see ``treebuilders.etree.Comment``) Textual content may also be implemented
|
11 |
+
as another node type, or not, as your tree implementation requires.
|
12 |
+
|
13 |
+
2. A treebuilder object (called ``TreeBuilder`` by convention) that inherits
|
14 |
+
from ``treebuilders.base.TreeBuilder``. This has 4 required attributes:
|
15 |
+
|
16 |
+
* ``documentClass`` - the class to use for the bottommost node of a document
|
17 |
+
* ``elementClass`` - the class to use for HTML Elements
|
18 |
+
* ``commentClass`` - the class to use for comments
|
19 |
+
* ``doctypeClass`` - the class to use for doctypes
|
20 |
+
|
21 |
+
It also has one required method:
|
22 |
+
|
23 |
+
* ``getDocument`` - Returns the root node of the complete document tree
|
24 |
+
|
25 |
+
3. If you wish to run the unit tests, you must also create a ``testSerializer``
|
26 |
+
method on your treebuilder which accepts a node and returns a string
|
27 |
+
containing Node and its children serialized according to the format used in
|
28 |
+
the unittests
|
29 |
+
|
30 |
+
"""
|
31 |
+
|
32 |
+
from __future__ import absolute_import, division, unicode_literals
|
33 |
+
|
34 |
+
from .._utils import default_etree
|
35 |
+
|
36 |
+
treeBuilderCache = {}
|
37 |
+
|
38 |
+
|
39 |
+
def getTreeBuilder(treeType, implementation=None, **kwargs):
|
40 |
+
"""Get a TreeBuilder class for various types of trees with built-in support
|
41 |
+
|
42 |
+
:arg treeType: the name of the tree type required (case-insensitive). Supported
|
43 |
+
values are:
|
44 |
+
|
45 |
+
* "dom" - A generic builder for DOM implementations, defaulting to a
|
46 |
+
xml.dom.minidom based implementation.
|
47 |
+
* "etree" - A generic builder for tree implementations exposing an
|
48 |
+
ElementTree-like interface, defaulting to xml.etree.cElementTree if
|
49 |
+
available and xml.etree.ElementTree if not.
|
50 |
+
* "lxml" - A etree-based builder for lxml.etree, handling limitations
|
51 |
+
of lxml's implementation.
|
52 |
+
|
53 |
+
:arg implementation: (Currently applies to the "etree" and "dom" tree
|
54 |
+
types). A module implementing the tree type e.g. xml.etree.ElementTree
|
55 |
+
or xml.etree.cElementTree.
|
56 |
+
|
57 |
+
:arg kwargs: Any additional options to pass to the TreeBuilder when
|
58 |
+
creating it.
|
59 |
+
|
60 |
+
Example:
|
61 |
+
|
62 |
+
>>> from html5lib.treebuilders import getTreeBuilder
|
63 |
+
>>> builder = getTreeBuilder('etree')
|
64 |
+
|
65 |
+
"""
|
66 |
+
|
67 |
+
treeType = treeType.lower()
|
68 |
+
if treeType not in treeBuilderCache:
|
69 |
+
if treeType == "dom":
|
70 |
+
from . import dom
|
71 |
+
# Come up with a sane default (pref. from the stdlib)
|
72 |
+
if implementation is None:
|
73 |
+
from xml.dom import minidom
|
74 |
+
implementation = minidom
|
75 |
+
# NEVER cache here, caching is done in the dom submodule
|
76 |
+
return dom.getDomModule(implementation, **kwargs).TreeBuilder
|
77 |
+
elif treeType == "lxml":
|
78 |
+
from . import etree_lxml
|
79 |
+
treeBuilderCache[treeType] = etree_lxml.TreeBuilder
|
80 |
+
elif treeType == "etree":
|
81 |
+
from . import etree
|
82 |
+
if implementation is None:
|
83 |
+
implementation = default_etree
|
84 |
+
# NEVER cache here, caching is done in the etree submodule
|
85 |
+
return etree.getETreeModule(implementation, **kwargs).TreeBuilder
|
86 |
+
else:
|
87 |
+
raise ValueError("""Unrecognised treebuilder "%s" """ % treeType)
|
88 |
+
return treeBuilderCache.get(treeType)
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (3.33 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/base.cpython-310.pyc
ADDED
Binary file (11.3 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/dom.cpython-310.pyc
ADDED
Binary file (9.41 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/etree.cpython-310.pyc
ADDED
Binary file (11.7 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/etree_lxml.cpython-310.pyc
ADDED
Binary file (13 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/treebuilders/base.py
ADDED
@@ -0,0 +1,417 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import absolute_import, division, unicode_literals
|
2 |
+
from pip._vendor.six import text_type
|
3 |
+
|
4 |
+
from ..constants import scopingElements, tableInsertModeElements, namespaces
|
5 |
+
|
6 |
+
# The scope markers are inserted when entering object elements,
|
7 |
+
# marquees, table cells, and table captions, and are used to prevent formatting
|
8 |
+
# from "leaking" into tables, object elements, and marquees.
|
9 |
+
Marker = None
|
10 |
+
|
11 |
+
listElementsMap = {
|
12 |
+
None: (frozenset(scopingElements), False),
|
13 |
+
"button": (frozenset(scopingElements | {(namespaces["html"], "button")}), False),
|
14 |
+
"list": (frozenset(scopingElements | {(namespaces["html"], "ol"),
|
15 |
+
(namespaces["html"], "ul")}), False),
|
16 |
+
"table": (frozenset([(namespaces["html"], "html"),
|
17 |
+
(namespaces["html"], "table")]), False),
|
18 |
+
"select": (frozenset([(namespaces["html"], "optgroup"),
|
19 |
+
(namespaces["html"], "option")]), True)
|
20 |
+
}
|
21 |
+
|
22 |
+
|
23 |
+
class Node(object):
|
24 |
+
"""Represents an item in the tree"""
|
25 |
+
def __init__(self, name):
|
26 |
+
"""Creates a Node
|
27 |
+
|
28 |
+
:arg name: The tag name associated with the node
|
29 |
+
|
30 |
+
"""
|
31 |
+
# The tag name associated with the node
|
32 |
+
self.name = name
|
33 |
+
# The parent of the current node (or None for the document node)
|
34 |
+
self.parent = None
|
35 |
+
# The value of the current node (applies to text nodes and comments)
|
36 |
+
self.value = None
|
37 |
+
# A dict holding name -> value pairs for attributes of the node
|
38 |
+
self.attributes = {}
|
39 |
+
# A list of child nodes of the current node. This must include all
|
40 |
+
# elements but not necessarily other node types.
|
41 |
+
self.childNodes = []
|
42 |
+
# A list of miscellaneous flags that can be set on the node.
|
43 |
+
self._flags = []
|
44 |
+
|
45 |
+
def __str__(self):
|
46 |
+
attributesStr = " ".join(["%s=\"%s\"" % (name, value)
|
47 |
+
for name, value in
|
48 |
+
self.attributes.items()])
|
49 |
+
if attributesStr:
|
50 |
+
return "<%s %s>" % (self.name, attributesStr)
|
51 |
+
else:
|
52 |
+
return "<%s>" % (self.name)
|
53 |
+
|
54 |
+
def __repr__(self):
|
55 |
+
return "<%s>" % (self.name)
|
56 |
+
|
57 |
+
def appendChild(self, node):
|
58 |
+
"""Insert node as a child of the current node
|
59 |
+
|
60 |
+
:arg node: the node to insert
|
61 |
+
|
62 |
+
"""
|
63 |
+
raise NotImplementedError
|
64 |
+
|
65 |
+
def insertText(self, data, insertBefore=None):
|
66 |
+
"""Insert data as text in the current node, positioned before the
|
67 |
+
start of node insertBefore or to the end of the node's text.
|
68 |
+
|
69 |
+
:arg data: the data to insert
|
70 |
+
|
71 |
+
:arg insertBefore: True if you want to insert the text before the node
|
72 |
+
and False if you want to insert it after the node
|
73 |
+
|
74 |
+
"""
|
75 |
+
raise NotImplementedError
|
76 |
+
|
77 |
+
def insertBefore(self, node, refNode):
|
78 |
+
"""Insert node as a child of the current node, before refNode in the
|
79 |
+
list of child nodes. Raises ValueError if refNode is not a child of
|
80 |
+
the current node
|
81 |
+
|
82 |
+
:arg node: the node to insert
|
83 |
+
|
84 |
+
:arg refNode: the child node to insert the node before
|
85 |
+
|
86 |
+
"""
|
87 |
+
raise NotImplementedError
|
88 |
+
|
89 |
+
def removeChild(self, node):
|
90 |
+
"""Remove node from the children of the current node
|
91 |
+
|
92 |
+
:arg node: the child node to remove
|
93 |
+
|
94 |
+
"""
|
95 |
+
raise NotImplementedError
|
96 |
+
|
97 |
+
def reparentChildren(self, newParent):
|
98 |
+
"""Move all the children of the current node to newParent.
|
99 |
+
This is needed so that trees that don't store text as nodes move the
|
100 |
+
text in the correct way
|
101 |
+
|
102 |
+
:arg newParent: the node to move all this node's children to
|
103 |
+
|
104 |
+
"""
|
105 |
+
# XXX - should this method be made more general?
|
106 |
+
for child in self.childNodes:
|
107 |
+
newParent.appendChild(child)
|
108 |
+
self.childNodes = []
|
109 |
+
|
110 |
+
def cloneNode(self):
|
111 |
+
"""Return a shallow copy of the current node i.e. a node with the same
|
112 |
+
name and attributes but with no parent or child nodes
|
113 |
+
"""
|
114 |
+
raise NotImplementedError
|
115 |
+
|
116 |
+
def hasContent(self):
|
117 |
+
"""Return true if the node has children or text, false otherwise
|
118 |
+
"""
|
119 |
+
raise NotImplementedError
|
120 |
+
|
121 |
+
|
122 |
+
class ActiveFormattingElements(list):
|
123 |
+
def append(self, node):
|
124 |
+
equalCount = 0
|
125 |
+
if node != Marker:
|
126 |
+
for element in self[::-1]:
|
127 |
+
if element == Marker:
|
128 |
+
break
|
129 |
+
if self.nodesEqual(element, node):
|
130 |
+
equalCount += 1
|
131 |
+
if equalCount == 3:
|
132 |
+
self.remove(element)
|
133 |
+
break
|
134 |
+
list.append(self, node)
|
135 |
+
|
136 |
+
def nodesEqual(self, node1, node2):
|
137 |
+
if not node1.nameTuple == node2.nameTuple:
|
138 |
+
return False
|
139 |
+
|
140 |
+
if not node1.attributes == node2.attributes:
|
141 |
+
return False
|
142 |
+
|
143 |
+
return True
|
144 |
+
|
145 |
+
|
146 |
+
class TreeBuilder(object):
|
147 |
+
"""Base treebuilder implementation
|
148 |
+
|
149 |
+
* documentClass - the class to use for the bottommost node of a document
|
150 |
+
* elementClass - the class to use for HTML Elements
|
151 |
+
* commentClass - the class to use for comments
|
152 |
+
* doctypeClass - the class to use for doctypes
|
153 |
+
|
154 |
+
"""
|
155 |
+
# pylint:disable=not-callable
|
156 |
+
|
157 |
+
# Document class
|
158 |
+
documentClass = None
|
159 |
+
|
160 |
+
# The class to use for creating a node
|
161 |
+
elementClass = None
|
162 |
+
|
163 |
+
# The class to use for creating comments
|
164 |
+
commentClass = None
|
165 |
+
|
166 |
+
# The class to use for creating doctypes
|
167 |
+
doctypeClass = None
|
168 |
+
|
169 |
+
# Fragment class
|
170 |
+
fragmentClass = None
|
171 |
+
|
172 |
+
def __init__(self, namespaceHTMLElements):
|
173 |
+
"""Create a TreeBuilder
|
174 |
+
|
175 |
+
:arg namespaceHTMLElements: whether or not to namespace HTML elements
|
176 |
+
|
177 |
+
"""
|
178 |
+
if namespaceHTMLElements:
|
179 |
+
self.defaultNamespace = "http://www.w3.org/1999/xhtml"
|
180 |
+
else:
|
181 |
+
self.defaultNamespace = None
|
182 |
+
self.reset()
|
183 |
+
|
184 |
+
def reset(self):
|
185 |
+
self.openElements = []
|
186 |
+
self.activeFormattingElements = ActiveFormattingElements()
|
187 |
+
|
188 |
+
# XXX - rename these to headElement, formElement
|
189 |
+
self.headPointer = None
|
190 |
+
self.formPointer = None
|
191 |
+
|
192 |
+
self.insertFromTable = False
|
193 |
+
|
194 |
+
self.document = self.documentClass()
|
195 |
+
|
196 |
+
def elementInScope(self, target, variant=None):
|
197 |
+
|
198 |
+
# If we pass a node in we match that. if we pass a string
|
199 |
+
# match any node with that name
|
200 |
+
exactNode = hasattr(target, "nameTuple")
|
201 |
+
if not exactNode:
|
202 |
+
if isinstance(target, text_type):
|
203 |
+
target = (namespaces["html"], target)
|
204 |
+
assert isinstance(target, tuple)
|
205 |
+
|
206 |
+
listElements, invert = listElementsMap[variant]
|
207 |
+
|
208 |
+
for node in reversed(self.openElements):
|
209 |
+
if exactNode and node == target:
|
210 |
+
return True
|
211 |
+
elif not exactNode and node.nameTuple == target:
|
212 |
+
return True
|
213 |
+
elif (invert ^ (node.nameTuple in listElements)):
|
214 |
+
return False
|
215 |
+
|
216 |
+
assert False # We should never reach this point
|
217 |
+
|
218 |
+
def reconstructActiveFormattingElements(self):
|
219 |
+
# Within this algorithm the order of steps described in the
|
220 |
+
# specification is not quite the same as the order of steps in the
|
221 |
+
# code. It should still do the same though.
|
222 |
+
|
223 |
+
# Step 1: stop the algorithm when there's nothing to do.
|
224 |
+
if not self.activeFormattingElements:
|
225 |
+
return
|
226 |
+
|
227 |
+
# Step 2 and step 3: we start with the last element. So i is -1.
|
228 |
+
i = len(self.activeFormattingElements) - 1
|
229 |
+
entry = self.activeFormattingElements[i]
|
230 |
+
if entry == Marker or entry in self.openElements:
|
231 |
+
return
|
232 |
+
|
233 |
+
# Step 6
|
234 |
+
while entry != Marker and entry not in self.openElements:
|
235 |
+
if i == 0:
|
236 |
+
# This will be reset to 0 below
|
237 |
+
i = -1
|
238 |
+
break
|
239 |
+
i -= 1
|
240 |
+
# Step 5: let entry be one earlier in the list.
|
241 |
+
entry = self.activeFormattingElements[i]
|
242 |
+
|
243 |
+
while True:
|
244 |
+
# Step 7
|
245 |
+
i += 1
|
246 |
+
|
247 |
+
# Step 8
|
248 |
+
entry = self.activeFormattingElements[i]
|
249 |
+
clone = entry.cloneNode() # Mainly to get a new copy of the attributes
|
250 |
+
|
251 |
+
# Step 9
|
252 |
+
element = self.insertElement({"type": "StartTag",
|
253 |
+
"name": clone.name,
|
254 |
+
"namespace": clone.namespace,
|
255 |
+
"data": clone.attributes})
|
256 |
+
|
257 |
+
# Step 10
|
258 |
+
self.activeFormattingElements[i] = element
|
259 |
+
|
260 |
+
# Step 11
|
261 |
+
if element == self.activeFormattingElements[-1]:
|
262 |
+
break
|
263 |
+
|
264 |
+
def clearActiveFormattingElements(self):
|
265 |
+
entry = self.activeFormattingElements.pop()
|
266 |
+
while self.activeFormattingElements and entry != Marker:
|
267 |
+
entry = self.activeFormattingElements.pop()
|
268 |
+
|
269 |
+
def elementInActiveFormattingElements(self, name):
|
270 |
+
"""Check if an element exists between the end of the active
|
271 |
+
formatting elements and the last marker. If it does, return it, else
|
272 |
+
return false"""
|
273 |
+
|
274 |
+
for item in self.activeFormattingElements[::-1]:
|
275 |
+
# Check for Marker first because if it's a Marker it doesn't have a
|
276 |
+
# name attribute.
|
277 |
+
if item == Marker:
|
278 |
+
break
|
279 |
+
elif item.name == name:
|
280 |
+
return item
|
281 |
+
return False
|
282 |
+
|
283 |
+
def insertRoot(self, token):
|
284 |
+
element = self.createElement(token)
|
285 |
+
self.openElements.append(element)
|
286 |
+
self.document.appendChild(element)
|
287 |
+
|
288 |
+
def insertDoctype(self, token):
|
289 |
+
name = token["name"]
|
290 |
+
publicId = token["publicId"]
|
291 |
+
systemId = token["systemId"]
|
292 |
+
|
293 |
+
doctype = self.doctypeClass(name, publicId, systemId)
|
294 |
+
self.document.appendChild(doctype)
|
295 |
+
|
296 |
+
def insertComment(self, token, parent=None):
|
297 |
+
if parent is None:
|
298 |
+
parent = self.openElements[-1]
|
299 |
+
parent.appendChild(self.commentClass(token["data"]))
|
300 |
+
|
301 |
+
def createElement(self, token):
|
302 |
+
"""Create an element but don't insert it anywhere"""
|
303 |
+
name = token["name"]
|
304 |
+
namespace = token.get("namespace", self.defaultNamespace)
|
305 |
+
element = self.elementClass(name, namespace)
|
306 |
+
element.attributes = token["data"]
|
307 |
+
return element
|
308 |
+
|
309 |
+
def _getInsertFromTable(self):
|
310 |
+
return self._insertFromTable
|
311 |
+
|
312 |
+
def _setInsertFromTable(self, value):
|
313 |
+
"""Switch the function used to insert an element from the
|
314 |
+
normal one to the misnested table one and back again"""
|
315 |
+
self._insertFromTable = value
|
316 |
+
if value:
|
317 |
+
self.insertElement = self.insertElementTable
|
318 |
+
else:
|
319 |
+
self.insertElement = self.insertElementNormal
|
320 |
+
|
321 |
+
insertFromTable = property(_getInsertFromTable, _setInsertFromTable)
|
322 |
+
|
323 |
+
def insertElementNormal(self, token):
|
324 |
+
name = token["name"]
|
325 |
+
assert isinstance(name, text_type), "Element %s not unicode" % name
|
326 |
+
namespace = token.get("namespace", self.defaultNamespace)
|
327 |
+
element = self.elementClass(name, namespace)
|
328 |
+
element.attributes = token["data"]
|
329 |
+
self.openElements[-1].appendChild(element)
|
330 |
+
self.openElements.append(element)
|
331 |
+
return element
|
332 |
+
|
333 |
+
def insertElementTable(self, token):
|
334 |
+
"""Create an element and insert it into the tree"""
|
335 |
+
element = self.createElement(token)
|
336 |
+
if self.openElements[-1].name not in tableInsertModeElements:
|
337 |
+
return self.insertElementNormal(token)
|
338 |
+
else:
|
339 |
+
# We should be in the InTable mode. This means we want to do
|
340 |
+
# special magic element rearranging
|
341 |
+
parent, insertBefore = self.getTableMisnestedNodePosition()
|
342 |
+
if insertBefore is None:
|
343 |
+
parent.appendChild(element)
|
344 |
+
else:
|
345 |
+
parent.insertBefore(element, insertBefore)
|
346 |
+
self.openElements.append(element)
|
347 |
+
return element
|
348 |
+
|
349 |
+
def insertText(self, data, parent=None):
|
350 |
+
"""Insert text data."""
|
351 |
+
if parent is None:
|
352 |
+
parent = self.openElements[-1]
|
353 |
+
|
354 |
+
if (not self.insertFromTable or (self.insertFromTable and
|
355 |
+
self.openElements[-1].name
|
356 |
+
not in tableInsertModeElements)):
|
357 |
+
parent.insertText(data)
|
358 |
+
else:
|
359 |
+
# We should be in the InTable mode. This means we want to do
|
360 |
+
# special magic element rearranging
|
361 |
+
parent, insertBefore = self.getTableMisnestedNodePosition()
|
362 |
+
parent.insertText(data, insertBefore)
|
363 |
+
|
364 |
+
def getTableMisnestedNodePosition(self):
|
365 |
+
"""Get the foster parent element, and sibling to insert before
|
366 |
+
(or None) when inserting a misnested table node"""
|
367 |
+
# The foster parent element is the one which comes before the most
|
368 |
+
# recently opened table element
|
369 |
+
# XXX - this is really inelegant
|
370 |
+
lastTable = None
|
371 |
+
fosterParent = None
|
372 |
+
insertBefore = None
|
373 |
+
for elm in self.openElements[::-1]:
|
374 |
+
if elm.name == "table":
|
375 |
+
lastTable = elm
|
376 |
+
break
|
377 |
+
if lastTable:
|
378 |
+
# XXX - we should really check that this parent is actually a
|
379 |
+
# node here
|
380 |
+
if lastTable.parent:
|
381 |
+
fosterParent = lastTable.parent
|
382 |
+
insertBefore = lastTable
|
383 |
+
else:
|
384 |
+
fosterParent = self.openElements[
|
385 |
+
self.openElements.index(lastTable) - 1]
|
386 |
+
else:
|
387 |
+
fosterParent = self.openElements[0]
|
388 |
+
return fosterParent, insertBefore
|
389 |
+
|
390 |
+
def generateImpliedEndTags(self, exclude=None):
|
391 |
+
name = self.openElements[-1].name
|
392 |
+
# XXX td, th and tr are not actually needed
|
393 |
+
if (name in frozenset(("dd", "dt", "li", "option", "optgroup", "p", "rp", "rt")) and
|
394 |
+
name != exclude):
|
395 |
+
self.openElements.pop()
|
396 |
+
# XXX This is not entirely what the specification says. We should
|
397 |
+
# investigate it more closely.
|
398 |
+
self.generateImpliedEndTags(exclude)
|
399 |
+
|
400 |
+
def getDocument(self):
|
401 |
+
"""Return the final tree"""
|
402 |
+
return self.document
|
403 |
+
|
404 |
+
def getFragment(self):
|
405 |
+
"""Return the final fragment"""
|
406 |
+
# assert self.innerHTML
|
407 |
+
fragment = self.fragmentClass()
|
408 |
+
self.openElements[0].reparentChildren(fragment)
|
409 |
+
return fragment
|
410 |
+
|
411 |
+
def testSerializer(self, node):
|
412 |
+
"""Serialize the subtree of node in the format required by unit tests
|
413 |
+
|
414 |
+
:arg node: the node from which to start serializing
|
415 |
+
|
416 |
+
"""
|
417 |
+
raise NotImplementedError
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/treebuilders/dom.py
ADDED
@@ -0,0 +1,239 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import absolute_import, division, unicode_literals
|
2 |
+
|
3 |
+
|
4 |
+
try:
|
5 |
+
from collections.abc import MutableMapping
|
6 |
+
except ImportError: # Python 2.7
|
7 |
+
from collections import MutableMapping
|
8 |
+
from xml.dom import minidom, Node
|
9 |
+
import weakref
|
10 |
+
|
11 |
+
from . import base
|
12 |
+
from .. import constants
|
13 |
+
from ..constants import namespaces
|
14 |
+
from .._utils import moduleFactoryFactory
|
15 |
+
|
16 |
+
|
17 |
+
def getDomBuilder(DomImplementation):
|
18 |
+
Dom = DomImplementation
|
19 |
+
|
20 |
+
class AttrList(MutableMapping):
|
21 |
+
def __init__(self, element):
|
22 |
+
self.element = element
|
23 |
+
|
24 |
+
def __iter__(self):
|
25 |
+
return iter(self.element.attributes.keys())
|
26 |
+
|
27 |
+
def __setitem__(self, name, value):
|
28 |
+
if isinstance(name, tuple):
|
29 |
+
raise NotImplementedError
|
30 |
+
else:
|
31 |
+
attr = self.element.ownerDocument.createAttribute(name)
|
32 |
+
attr.value = value
|
33 |
+
self.element.attributes[name] = attr
|
34 |
+
|
35 |
+
def __len__(self):
|
36 |
+
return len(self.element.attributes)
|
37 |
+
|
38 |
+
def items(self):
|
39 |
+
return list(self.element.attributes.items())
|
40 |
+
|
41 |
+
def values(self):
|
42 |
+
return list(self.element.attributes.values())
|
43 |
+
|
44 |
+
def __getitem__(self, name):
|
45 |
+
if isinstance(name, tuple):
|
46 |
+
raise NotImplementedError
|
47 |
+
else:
|
48 |
+
return self.element.attributes[name].value
|
49 |
+
|
50 |
+
def __delitem__(self, name):
|
51 |
+
if isinstance(name, tuple):
|
52 |
+
raise NotImplementedError
|
53 |
+
else:
|
54 |
+
del self.element.attributes[name]
|
55 |
+
|
56 |
+
class NodeBuilder(base.Node):
|
57 |
+
def __init__(self, element):
|
58 |
+
base.Node.__init__(self, element.nodeName)
|
59 |
+
self.element = element
|
60 |
+
|
61 |
+
namespace = property(lambda self: hasattr(self.element, "namespaceURI") and
|
62 |
+
self.element.namespaceURI or None)
|
63 |
+
|
64 |
+
def appendChild(self, node):
|
65 |
+
node.parent = self
|
66 |
+
self.element.appendChild(node.element)
|
67 |
+
|
68 |
+
def insertText(self, data, insertBefore=None):
|
69 |
+
text = self.element.ownerDocument.createTextNode(data)
|
70 |
+
if insertBefore:
|
71 |
+
self.element.insertBefore(text, insertBefore.element)
|
72 |
+
else:
|
73 |
+
self.element.appendChild(text)
|
74 |
+
|
75 |
+
def insertBefore(self, node, refNode):
|
76 |
+
self.element.insertBefore(node.element, refNode.element)
|
77 |
+
node.parent = self
|
78 |
+
|
79 |
+
def removeChild(self, node):
|
80 |
+
if node.element.parentNode == self.element:
|
81 |
+
self.element.removeChild(node.element)
|
82 |
+
node.parent = None
|
83 |
+
|
84 |
+
def reparentChildren(self, newParent):
|
85 |
+
while self.element.hasChildNodes():
|
86 |
+
child = self.element.firstChild
|
87 |
+
self.element.removeChild(child)
|
88 |
+
newParent.element.appendChild(child)
|
89 |
+
self.childNodes = []
|
90 |
+
|
91 |
+
def getAttributes(self):
|
92 |
+
return AttrList(self.element)
|
93 |
+
|
94 |
+
def setAttributes(self, attributes):
|
95 |
+
if attributes:
|
96 |
+
for name, value in list(attributes.items()):
|
97 |
+
if isinstance(name, tuple):
|
98 |
+
if name[0] is not None:
|
99 |
+
qualifiedName = (name[0] + ":" + name[1])
|
100 |
+
else:
|
101 |
+
qualifiedName = name[1]
|
102 |
+
self.element.setAttributeNS(name[2], qualifiedName,
|
103 |
+
value)
|
104 |
+
else:
|
105 |
+
self.element.setAttribute(
|
106 |
+
name, value)
|
107 |
+
attributes = property(getAttributes, setAttributes)
|
108 |
+
|
109 |
+
def cloneNode(self):
|
110 |
+
return NodeBuilder(self.element.cloneNode(False))
|
111 |
+
|
112 |
+
def hasContent(self):
|
113 |
+
return self.element.hasChildNodes()
|
114 |
+
|
115 |
+
def getNameTuple(self):
|
116 |
+
if self.namespace is None:
|
117 |
+
return namespaces["html"], self.name
|
118 |
+
else:
|
119 |
+
return self.namespace, self.name
|
120 |
+
|
121 |
+
nameTuple = property(getNameTuple)
|
122 |
+
|
123 |
+
class TreeBuilder(base.TreeBuilder): # pylint:disable=unused-variable
|
124 |
+
def documentClass(self):
|
125 |
+
self.dom = Dom.getDOMImplementation().createDocument(None, None, None)
|
126 |
+
return weakref.proxy(self)
|
127 |
+
|
128 |
+
def insertDoctype(self, token):
|
129 |
+
name = token["name"]
|
130 |
+
publicId = token["publicId"]
|
131 |
+
systemId = token["systemId"]
|
132 |
+
|
133 |
+
domimpl = Dom.getDOMImplementation()
|
134 |
+
doctype = domimpl.createDocumentType(name, publicId, systemId)
|
135 |
+
self.document.appendChild(NodeBuilder(doctype))
|
136 |
+
if Dom == minidom:
|
137 |
+
doctype.ownerDocument = self.dom
|
138 |
+
|
139 |
+
def elementClass(self, name, namespace=None):
|
140 |
+
if namespace is None and self.defaultNamespace is None:
|
141 |
+
node = self.dom.createElement(name)
|
142 |
+
else:
|
143 |
+
node = self.dom.createElementNS(namespace, name)
|
144 |
+
|
145 |
+
return NodeBuilder(node)
|
146 |
+
|
147 |
+
def commentClass(self, data):
|
148 |
+
return NodeBuilder(self.dom.createComment(data))
|
149 |
+
|
150 |
+
def fragmentClass(self):
|
151 |
+
return NodeBuilder(self.dom.createDocumentFragment())
|
152 |
+
|
153 |
+
def appendChild(self, node):
|
154 |
+
self.dom.appendChild(node.element)
|
155 |
+
|
156 |
+
def testSerializer(self, element):
|
157 |
+
return testSerializer(element)
|
158 |
+
|
159 |
+
def getDocument(self):
|
160 |
+
return self.dom
|
161 |
+
|
162 |
+
def getFragment(self):
|
163 |
+
return base.TreeBuilder.getFragment(self).element
|
164 |
+
|
165 |
+
def insertText(self, data, parent=None):
|
166 |
+
data = data
|
167 |
+
if parent != self:
|
168 |
+
base.TreeBuilder.insertText(self, data, parent)
|
169 |
+
else:
|
170 |
+
# HACK: allow text nodes as children of the document node
|
171 |
+
if hasattr(self.dom, '_child_node_types'):
|
172 |
+
# pylint:disable=protected-access
|
173 |
+
if Node.TEXT_NODE not in self.dom._child_node_types:
|
174 |
+
self.dom._child_node_types = list(self.dom._child_node_types)
|
175 |
+
self.dom._child_node_types.append(Node.TEXT_NODE)
|
176 |
+
self.dom.appendChild(self.dom.createTextNode(data))
|
177 |
+
|
178 |
+
implementation = DomImplementation
|
179 |
+
name = None
|
180 |
+
|
181 |
+
def testSerializer(element):
|
182 |
+
element.normalize()
|
183 |
+
rv = []
|
184 |
+
|
185 |
+
def serializeElement(element, indent=0):
|
186 |
+
if element.nodeType == Node.DOCUMENT_TYPE_NODE:
|
187 |
+
if element.name:
|
188 |
+
if element.publicId or element.systemId:
|
189 |
+
publicId = element.publicId or ""
|
190 |
+
systemId = element.systemId or ""
|
191 |
+
rv.append("""|%s<!DOCTYPE %s "%s" "%s">""" %
|
192 |
+
(' ' * indent, element.name, publicId, systemId))
|
193 |
+
else:
|
194 |
+
rv.append("|%s<!DOCTYPE %s>" % (' ' * indent, element.name))
|
195 |
+
else:
|
196 |
+
rv.append("|%s<!DOCTYPE >" % (' ' * indent,))
|
197 |
+
elif element.nodeType == Node.DOCUMENT_NODE:
|
198 |
+
rv.append("#document")
|
199 |
+
elif element.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
|
200 |
+
rv.append("#document-fragment")
|
201 |
+
elif element.nodeType == Node.COMMENT_NODE:
|
202 |
+
rv.append("|%s<!-- %s -->" % (' ' * indent, element.nodeValue))
|
203 |
+
elif element.nodeType == Node.TEXT_NODE:
|
204 |
+
rv.append("|%s\"%s\"" % (' ' * indent, element.nodeValue))
|
205 |
+
else:
|
206 |
+
if (hasattr(element, "namespaceURI") and
|
207 |
+
element.namespaceURI is not None):
|
208 |
+
name = "%s %s" % (constants.prefixes[element.namespaceURI],
|
209 |
+
element.nodeName)
|
210 |
+
else:
|
211 |
+
name = element.nodeName
|
212 |
+
rv.append("|%s<%s>" % (' ' * indent, name))
|
213 |
+
if element.hasAttributes():
|
214 |
+
attributes = []
|
215 |
+
for i in range(len(element.attributes)):
|
216 |
+
attr = element.attributes.item(i)
|
217 |
+
name = attr.nodeName
|
218 |
+
value = attr.value
|
219 |
+
ns = attr.namespaceURI
|
220 |
+
if ns:
|
221 |
+
name = "%s %s" % (constants.prefixes[ns], attr.localName)
|
222 |
+
else:
|
223 |
+
name = attr.nodeName
|
224 |
+
attributes.append((name, value))
|
225 |
+
|
226 |
+
for name, value in sorted(attributes):
|
227 |
+
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
|
228 |
+
indent += 2
|
229 |
+
for child in element.childNodes:
|
230 |
+
serializeElement(child, indent)
|
231 |
+
serializeElement(element, 0)
|
232 |
+
|
233 |
+
return "\n".join(rv)
|
234 |
+
|
235 |
+
return locals()
|
236 |
+
|
237 |
+
|
238 |
+
# The actual means to get a module!
|
239 |
+
getDomModule = moduleFactoryFactory(getDomBuilder)
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/treebuilders/etree.py
ADDED
@@ -0,0 +1,343 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import absolute_import, division, unicode_literals
|
2 |
+
# pylint:disable=protected-access
|
3 |
+
|
4 |
+
from pip._vendor.six import text_type
|
5 |
+
|
6 |
+
import re
|
7 |
+
|
8 |
+
from copy import copy
|
9 |
+
|
10 |
+
from . import base
|
11 |
+
from .. import _ihatexml
|
12 |
+
from .. import constants
|
13 |
+
from ..constants import namespaces
|
14 |
+
from .._utils import moduleFactoryFactory
|
15 |
+
|
16 |
+
tag_regexp = re.compile("{([^}]*)}(.*)")
|
17 |
+
|
18 |
+
|
19 |
+
def getETreeBuilder(ElementTreeImplementation, fullTree=False):
|
20 |
+
ElementTree = ElementTreeImplementation
|
21 |
+
ElementTreeCommentType = ElementTree.Comment("asd").tag
|
22 |
+
|
23 |
+
class Element(base.Node):
|
24 |
+
def __init__(self, name, namespace=None):
|
25 |
+
self._name = name
|
26 |
+
self._namespace = namespace
|
27 |
+
self._element = ElementTree.Element(self._getETreeTag(name,
|
28 |
+
namespace))
|
29 |
+
if namespace is None:
|
30 |
+
self.nameTuple = namespaces["html"], self._name
|
31 |
+
else:
|
32 |
+
self.nameTuple = self._namespace, self._name
|
33 |
+
self.parent = None
|
34 |
+
self._childNodes = []
|
35 |
+
self._flags = []
|
36 |
+
|
37 |
+
def _getETreeTag(self, name, namespace):
|
38 |
+
if namespace is None:
|
39 |
+
etree_tag = name
|
40 |
+
else:
|
41 |
+
etree_tag = "{%s}%s" % (namespace, name)
|
42 |
+
return etree_tag
|
43 |
+
|
44 |
+
def _setName(self, name):
|
45 |
+
self._name = name
|
46 |
+
self._element.tag = self._getETreeTag(self._name, self._namespace)
|
47 |
+
|
48 |
+
def _getName(self):
|
49 |
+
return self._name
|
50 |
+
|
51 |
+
name = property(_getName, _setName)
|
52 |
+
|
53 |
+
def _setNamespace(self, namespace):
|
54 |
+
self._namespace = namespace
|
55 |
+
self._element.tag = self._getETreeTag(self._name, self._namespace)
|
56 |
+
|
57 |
+
def _getNamespace(self):
|
58 |
+
return self._namespace
|
59 |
+
|
60 |
+
namespace = property(_getNamespace, _setNamespace)
|
61 |
+
|
62 |
+
def _getAttributes(self):
|
63 |
+
return self._element.attrib
|
64 |
+
|
65 |
+
def _setAttributes(self, attributes):
|
66 |
+
el_attrib = self._element.attrib
|
67 |
+
el_attrib.clear()
|
68 |
+
if attributes:
|
69 |
+
# calling .items _always_ allocates, and the above truthy check is cheaper than the
|
70 |
+
# allocation on average
|
71 |
+
for key, value in attributes.items():
|
72 |
+
if isinstance(key, tuple):
|
73 |
+
name = "{%s}%s" % (key[2], key[1])
|
74 |
+
else:
|
75 |
+
name = key
|
76 |
+
el_attrib[name] = value
|
77 |
+
|
78 |
+
attributes = property(_getAttributes, _setAttributes)
|
79 |
+
|
80 |
+
def _getChildNodes(self):
|
81 |
+
return self._childNodes
|
82 |
+
|
83 |
+
def _setChildNodes(self, value):
|
84 |
+
del self._element[:]
|
85 |
+
self._childNodes = []
|
86 |
+
for element in value:
|
87 |
+
self.insertChild(element)
|
88 |
+
|
89 |
+
childNodes = property(_getChildNodes, _setChildNodes)
|
90 |
+
|
91 |
+
def hasContent(self):
|
92 |
+
"""Return true if the node has children or text"""
|
93 |
+
return bool(self._element.text or len(self._element))
|
94 |
+
|
95 |
+
def appendChild(self, node):
|
96 |
+
self._childNodes.append(node)
|
97 |
+
self._element.append(node._element)
|
98 |
+
node.parent = self
|
99 |
+
|
100 |
+
def insertBefore(self, node, refNode):
|
101 |
+
index = list(self._element).index(refNode._element)
|
102 |
+
self._element.insert(index, node._element)
|
103 |
+
node.parent = self
|
104 |
+
|
105 |
+
def removeChild(self, node):
|
106 |
+
self._childNodes.remove(node)
|
107 |
+
self._element.remove(node._element)
|
108 |
+
node.parent = None
|
109 |
+
|
110 |
+
def insertText(self, data, insertBefore=None):
|
111 |
+
if not(len(self._element)):
|
112 |
+
if not self._element.text:
|
113 |
+
self._element.text = ""
|
114 |
+
self._element.text += data
|
115 |
+
elif insertBefore is None:
|
116 |
+
# Insert the text as the tail of the last child element
|
117 |
+
if not self._element[-1].tail:
|
118 |
+
self._element[-1].tail = ""
|
119 |
+
self._element[-1].tail += data
|
120 |
+
else:
|
121 |
+
# Insert the text before the specified node
|
122 |
+
children = list(self._element)
|
123 |
+
index = children.index(insertBefore._element)
|
124 |
+
if index > 0:
|
125 |
+
if not self._element[index - 1].tail:
|
126 |
+
self._element[index - 1].tail = ""
|
127 |
+
self._element[index - 1].tail += data
|
128 |
+
else:
|
129 |
+
if not self._element.text:
|
130 |
+
self._element.text = ""
|
131 |
+
self._element.text += data
|
132 |
+
|
133 |
+
def cloneNode(self):
|
134 |
+
element = type(self)(self.name, self.namespace)
|
135 |
+
if self._element.attrib:
|
136 |
+
element._element.attrib = copy(self._element.attrib)
|
137 |
+
return element
|
138 |
+
|
139 |
+
def reparentChildren(self, newParent):
|
140 |
+
if newParent.childNodes:
|
141 |
+
newParent.childNodes[-1]._element.tail += self._element.text
|
142 |
+
else:
|
143 |
+
if not newParent._element.text:
|
144 |
+
newParent._element.text = ""
|
145 |
+
if self._element.text is not None:
|
146 |
+
newParent._element.text += self._element.text
|
147 |
+
self._element.text = ""
|
148 |
+
base.Node.reparentChildren(self, newParent)
|
149 |
+
|
150 |
+
class Comment(Element):
|
151 |
+
def __init__(self, data):
|
152 |
+
# Use the superclass constructor to set all properties on the
|
153 |
+
# wrapper element
|
154 |
+
self._element = ElementTree.Comment(data)
|
155 |
+
self.parent = None
|
156 |
+
self._childNodes = []
|
157 |
+
self._flags = []
|
158 |
+
|
159 |
+
def _getData(self):
|
160 |
+
return self._element.text
|
161 |
+
|
162 |
+
def _setData(self, value):
|
163 |
+
self._element.text = value
|
164 |
+
|
165 |
+
data = property(_getData, _setData)
|
166 |
+
|
167 |
+
class DocumentType(Element):
|
168 |
+
def __init__(self, name, publicId, systemId):
|
169 |
+
Element.__init__(self, "<!DOCTYPE>")
|
170 |
+
self._element.text = name
|
171 |
+
self.publicId = publicId
|
172 |
+
self.systemId = systemId
|
173 |
+
|
174 |
+
def _getPublicId(self):
|
175 |
+
return self._element.get("publicId", "")
|
176 |
+
|
177 |
+
def _setPublicId(self, value):
|
178 |
+
if value is not None:
|
179 |
+
self._element.set("publicId", value)
|
180 |
+
|
181 |
+
publicId = property(_getPublicId, _setPublicId)
|
182 |
+
|
183 |
+
def _getSystemId(self):
|
184 |
+
return self._element.get("systemId", "")
|
185 |
+
|
186 |
+
def _setSystemId(self, value):
|
187 |
+
if value is not None:
|
188 |
+
self._element.set("systemId", value)
|
189 |
+
|
190 |
+
systemId = property(_getSystemId, _setSystemId)
|
191 |
+
|
192 |
+
class Document(Element):
|
193 |
+
def __init__(self):
|
194 |
+
Element.__init__(self, "DOCUMENT_ROOT")
|
195 |
+
|
196 |
+
class DocumentFragment(Element):
|
197 |
+
def __init__(self):
|
198 |
+
Element.__init__(self, "DOCUMENT_FRAGMENT")
|
199 |
+
|
200 |
+
def testSerializer(element):
|
201 |
+
rv = []
|
202 |
+
|
203 |
+
def serializeElement(element, indent=0):
|
204 |
+
if not(hasattr(element, "tag")):
|
205 |
+
element = element.getroot()
|
206 |
+
if element.tag == "<!DOCTYPE>":
|
207 |
+
if element.get("publicId") or element.get("systemId"):
|
208 |
+
publicId = element.get("publicId") or ""
|
209 |
+
systemId = element.get("systemId") or ""
|
210 |
+
rv.append("""<!DOCTYPE %s "%s" "%s">""" %
|
211 |
+
(element.text, publicId, systemId))
|
212 |
+
else:
|
213 |
+
rv.append("<!DOCTYPE %s>" % (element.text,))
|
214 |
+
elif element.tag == "DOCUMENT_ROOT":
|
215 |
+
rv.append("#document")
|
216 |
+
if element.text is not None:
|
217 |
+
rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
|
218 |
+
if element.tail is not None:
|
219 |
+
raise TypeError("Document node cannot have tail")
|
220 |
+
if hasattr(element, "attrib") and len(element.attrib):
|
221 |
+
raise TypeError("Document node cannot have attributes")
|
222 |
+
elif element.tag == ElementTreeCommentType:
|
223 |
+
rv.append("|%s<!-- %s -->" % (' ' * indent, element.text))
|
224 |
+
else:
|
225 |
+
assert isinstance(element.tag, text_type), \
|
226 |
+
"Expected unicode, got %s, %s" % (type(element.tag), element.tag)
|
227 |
+
nsmatch = tag_regexp.match(element.tag)
|
228 |
+
|
229 |
+
if nsmatch is None:
|
230 |
+
name = element.tag
|
231 |
+
else:
|
232 |
+
ns, name = nsmatch.groups()
|
233 |
+
prefix = constants.prefixes[ns]
|
234 |
+
name = "%s %s" % (prefix, name)
|
235 |
+
rv.append("|%s<%s>" % (' ' * indent, name))
|
236 |
+
|
237 |
+
if hasattr(element, "attrib"):
|
238 |
+
attributes = []
|
239 |
+
for name, value in element.attrib.items():
|
240 |
+
nsmatch = tag_regexp.match(name)
|
241 |
+
if nsmatch is not None:
|
242 |
+
ns, name = nsmatch.groups()
|
243 |
+
prefix = constants.prefixes[ns]
|
244 |
+
attr_string = "%s %s" % (prefix, name)
|
245 |
+
else:
|
246 |
+
attr_string = name
|
247 |
+
attributes.append((attr_string, value))
|
248 |
+
|
249 |
+
for name, value in sorted(attributes):
|
250 |
+
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
|
251 |
+
if element.text:
|
252 |
+
rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
|
253 |
+
indent += 2
|
254 |
+
for child in element:
|
255 |
+
serializeElement(child, indent)
|
256 |
+
if element.tail:
|
257 |
+
rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail))
|
258 |
+
serializeElement(element, 0)
|
259 |
+
|
260 |
+
return "\n".join(rv)
|
261 |
+
|
262 |
+
def tostring(element): # pylint:disable=unused-variable
|
263 |
+
"""Serialize an element and its child nodes to a string"""
|
264 |
+
rv = []
|
265 |
+
filter = _ihatexml.InfosetFilter()
|
266 |
+
|
267 |
+
def serializeElement(element):
|
268 |
+
if isinstance(element, ElementTree.ElementTree):
|
269 |
+
element = element.getroot()
|
270 |
+
|
271 |
+
if element.tag == "<!DOCTYPE>":
|
272 |
+
if element.get("publicId") or element.get("systemId"):
|
273 |
+
publicId = element.get("publicId") or ""
|
274 |
+
systemId = element.get("systemId") or ""
|
275 |
+
rv.append("""<!DOCTYPE %s PUBLIC "%s" "%s">""" %
|
276 |
+
(element.text, publicId, systemId))
|
277 |
+
else:
|
278 |
+
rv.append("<!DOCTYPE %s>" % (element.text,))
|
279 |
+
elif element.tag == "DOCUMENT_ROOT":
|
280 |
+
if element.text is not None:
|
281 |
+
rv.append(element.text)
|
282 |
+
if element.tail is not None:
|
283 |
+
raise TypeError("Document node cannot have tail")
|
284 |
+
if hasattr(element, "attrib") and len(element.attrib):
|
285 |
+
raise TypeError("Document node cannot have attributes")
|
286 |
+
|
287 |
+
for child in element:
|
288 |
+
serializeElement(child)
|
289 |
+
|
290 |
+
elif element.tag == ElementTreeCommentType:
|
291 |
+
rv.append("<!--%s-->" % (element.text,))
|
292 |
+
else:
|
293 |
+
# This is assumed to be an ordinary element
|
294 |
+
if not element.attrib:
|
295 |
+
rv.append("<%s>" % (filter.fromXmlName(element.tag),))
|
296 |
+
else:
|
297 |
+
attr = " ".join(["%s=\"%s\"" % (
|
298 |
+
filter.fromXmlName(name), value)
|
299 |
+
for name, value in element.attrib.items()])
|
300 |
+
rv.append("<%s %s>" % (element.tag, attr))
|
301 |
+
if element.text:
|
302 |
+
rv.append(element.text)
|
303 |
+
|
304 |
+
for child in element:
|
305 |
+
serializeElement(child)
|
306 |
+
|
307 |
+
rv.append("</%s>" % (element.tag,))
|
308 |
+
|
309 |
+
if element.tail:
|
310 |
+
rv.append(element.tail)
|
311 |
+
|
312 |
+
serializeElement(element)
|
313 |
+
|
314 |
+
return "".join(rv)
|
315 |
+
|
316 |
+
class TreeBuilder(base.TreeBuilder): # pylint:disable=unused-variable
|
317 |
+
documentClass = Document
|
318 |
+
doctypeClass = DocumentType
|
319 |
+
elementClass = Element
|
320 |
+
commentClass = Comment
|
321 |
+
fragmentClass = DocumentFragment
|
322 |
+
implementation = ElementTreeImplementation
|
323 |
+
|
324 |
+
def testSerializer(self, element):
|
325 |
+
return testSerializer(element)
|
326 |
+
|
327 |
+
def getDocument(self):
|
328 |
+
if fullTree:
|
329 |
+
return self.document._element
|
330 |
+
else:
|
331 |
+
if self.defaultNamespace is not None:
|
332 |
+
return self.document._element.find(
|
333 |
+
"{%s}html" % self.defaultNamespace)
|
334 |
+
else:
|
335 |
+
return self.document._element.find("html")
|
336 |
+
|
337 |
+
def getFragment(self):
|
338 |
+
return base.TreeBuilder.getFragment(self)._element
|
339 |
+
|
340 |
+
return locals()
|
341 |
+
|
342 |
+
|
343 |
+
getETreeModule = moduleFactoryFactory(getETreeBuilder)
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.py
ADDED
@@ -0,0 +1,392 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Module for supporting the lxml.etree library. The idea here is to use as much
|
2 |
+
of the native library as possible, without using fragile hacks like custom element
|
3 |
+
names that break between releases. The downside of this is that we cannot represent
|
4 |
+
all possible trees; specifically the following are known to cause problems:
|
5 |
+
|
6 |
+
Text or comments as siblings of the root element
|
7 |
+
Docypes with no name
|
8 |
+
|
9 |
+
When any of these things occur, we emit a DataLossWarning
|
10 |
+
"""
|
11 |
+
|
12 |
+
from __future__ import absolute_import, division, unicode_literals
|
13 |
+
# pylint:disable=protected-access
|
14 |
+
|
15 |
+
import warnings
|
16 |
+
import re
|
17 |
+
import sys
|
18 |
+
|
19 |
+
try:
|
20 |
+
from collections.abc import MutableMapping
|
21 |
+
except ImportError:
|
22 |
+
from collections import MutableMapping
|
23 |
+
|
24 |
+
from . import base
|
25 |
+
from ..constants import DataLossWarning
|
26 |
+
from .. import constants
|
27 |
+
from . import etree as etree_builders
|
28 |
+
from .. import _ihatexml
|
29 |
+
|
30 |
+
import lxml.etree as etree
|
31 |
+
from pip._vendor.six import PY3, binary_type
|
32 |
+
|
33 |
+
|
34 |
+
fullTree = True
|
35 |
+
tag_regexp = re.compile("{([^}]*)}(.*)")
|
36 |
+
|
37 |
+
comment_type = etree.Comment("asd").tag
|
38 |
+
|
39 |
+
|
40 |
+
class DocumentType(object):
|
41 |
+
def __init__(self, name, publicId, systemId):
|
42 |
+
self.name = name
|
43 |
+
self.publicId = publicId
|
44 |
+
self.systemId = systemId
|
45 |
+
|
46 |
+
|
47 |
+
class Document(object):
|
48 |
+
def __init__(self):
|
49 |
+
self._elementTree = None
|
50 |
+
self._childNodes = []
|
51 |
+
|
52 |
+
def appendChild(self, element):
|
53 |
+
last = self._elementTree.getroot()
|
54 |
+
for last in self._elementTree.getroot().itersiblings():
|
55 |
+
pass
|
56 |
+
|
57 |
+
last.addnext(element._element)
|
58 |
+
|
59 |
+
def _getChildNodes(self):
|
60 |
+
return self._childNodes
|
61 |
+
|
62 |
+
childNodes = property(_getChildNodes)
|
63 |
+
|
64 |
+
|
65 |
+
def testSerializer(element):
|
66 |
+
rv = []
|
67 |
+
infosetFilter = _ihatexml.InfosetFilter(preventDoubleDashComments=True)
|
68 |
+
|
69 |
+
def serializeElement(element, indent=0):
|
70 |
+
if not hasattr(element, "tag"):
|
71 |
+
if hasattr(element, "getroot"):
|
72 |
+
# Full tree case
|
73 |
+
rv.append("#document")
|
74 |
+
if element.docinfo.internalDTD:
|
75 |
+
if not (element.docinfo.public_id or
|
76 |
+
element.docinfo.system_url):
|
77 |
+
dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name
|
78 |
+
else:
|
79 |
+
dtd_str = """<!DOCTYPE %s "%s" "%s">""" % (
|
80 |
+
element.docinfo.root_name,
|
81 |
+
element.docinfo.public_id,
|
82 |
+
element.docinfo.system_url)
|
83 |
+
rv.append("|%s%s" % (' ' * (indent + 2), dtd_str))
|
84 |
+
next_element = element.getroot()
|
85 |
+
while next_element.getprevious() is not None:
|
86 |
+
next_element = next_element.getprevious()
|
87 |
+
while next_element is not None:
|
88 |
+
serializeElement(next_element, indent + 2)
|
89 |
+
next_element = next_element.getnext()
|
90 |
+
elif isinstance(element, str) or isinstance(element, bytes):
|
91 |
+
# Text in a fragment
|
92 |
+
assert isinstance(element, str) or sys.version_info[0] == 2
|
93 |
+
rv.append("|%s\"%s\"" % (' ' * indent, element))
|
94 |
+
else:
|
95 |
+
# Fragment case
|
96 |
+
rv.append("#document-fragment")
|
97 |
+
for next_element in element:
|
98 |
+
serializeElement(next_element, indent + 2)
|
99 |
+
elif element.tag == comment_type:
|
100 |
+
rv.append("|%s<!-- %s -->" % (' ' * indent, element.text))
|
101 |
+
if hasattr(element, "tail") and element.tail:
|
102 |
+
rv.append("|%s\"%s\"" % (' ' * indent, element.tail))
|
103 |
+
else:
|
104 |
+
assert isinstance(element, etree._Element)
|
105 |
+
nsmatch = etree_builders.tag_regexp.match(element.tag)
|
106 |
+
if nsmatch is not None:
|
107 |
+
ns = nsmatch.group(1)
|
108 |
+
tag = nsmatch.group(2)
|
109 |
+
prefix = constants.prefixes[ns]
|
110 |
+
rv.append("|%s<%s %s>" % (' ' * indent, prefix,
|
111 |
+
infosetFilter.fromXmlName(tag)))
|
112 |
+
else:
|
113 |
+
rv.append("|%s<%s>" % (' ' * indent,
|
114 |
+
infosetFilter.fromXmlName(element.tag)))
|
115 |
+
|
116 |
+
if hasattr(element, "attrib"):
|
117 |
+
attributes = []
|
118 |
+
for name, value in element.attrib.items():
|
119 |
+
nsmatch = tag_regexp.match(name)
|
120 |
+
if nsmatch is not None:
|
121 |
+
ns, name = nsmatch.groups()
|
122 |
+
name = infosetFilter.fromXmlName(name)
|
123 |
+
prefix = constants.prefixes[ns]
|
124 |
+
attr_string = "%s %s" % (prefix, name)
|
125 |
+
else:
|
126 |
+
attr_string = infosetFilter.fromXmlName(name)
|
127 |
+
attributes.append((attr_string, value))
|
128 |
+
|
129 |
+
for name, value in sorted(attributes):
|
130 |
+
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
|
131 |
+
|
132 |
+
if element.text:
|
133 |
+
rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
|
134 |
+
indent += 2
|
135 |
+
for child in element:
|
136 |
+
serializeElement(child, indent)
|
137 |
+
if hasattr(element, "tail") and element.tail:
|
138 |
+
rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail))
|
139 |
+
serializeElement(element, 0)
|
140 |
+
|
141 |
+
return "\n".join(rv)
|
142 |
+
|
143 |
+
|
144 |
+
def tostring(element):
|
145 |
+
"""Serialize an element and its child nodes to a string"""
|
146 |
+
rv = []
|
147 |
+
|
148 |
+
def serializeElement(element):
|
149 |
+
if not hasattr(element, "tag"):
|
150 |
+
if element.docinfo.internalDTD:
|
151 |
+
if element.docinfo.doctype:
|
152 |
+
dtd_str = element.docinfo.doctype
|
153 |
+
else:
|
154 |
+
dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name
|
155 |
+
rv.append(dtd_str)
|
156 |
+
serializeElement(element.getroot())
|
157 |
+
|
158 |
+
elif element.tag == comment_type:
|
159 |
+
rv.append("<!--%s-->" % (element.text,))
|
160 |
+
|
161 |
+
else:
|
162 |
+
# This is assumed to be an ordinary element
|
163 |
+
if not element.attrib:
|
164 |
+
rv.append("<%s>" % (element.tag,))
|
165 |
+
else:
|
166 |
+
attr = " ".join(["%s=\"%s\"" % (name, value)
|
167 |
+
for name, value in element.attrib.items()])
|
168 |
+
rv.append("<%s %s>" % (element.tag, attr))
|
169 |
+
if element.text:
|
170 |
+
rv.append(element.text)
|
171 |
+
|
172 |
+
for child in element:
|
173 |
+
serializeElement(child)
|
174 |
+
|
175 |
+
rv.append("</%s>" % (element.tag,))
|
176 |
+
|
177 |
+
if hasattr(element, "tail") and element.tail:
|
178 |
+
rv.append(element.tail)
|
179 |
+
|
180 |
+
serializeElement(element)
|
181 |
+
|
182 |
+
return "".join(rv)
|
183 |
+
|
184 |
+
|
185 |
+
class TreeBuilder(base.TreeBuilder):
|
186 |
+
documentClass = Document
|
187 |
+
doctypeClass = DocumentType
|
188 |
+
elementClass = None
|
189 |
+
commentClass = None
|
190 |
+
fragmentClass = Document
|
191 |
+
implementation = etree
|
192 |
+
|
193 |
+
def __init__(self, namespaceHTMLElements, fullTree=False):
|
194 |
+
builder = etree_builders.getETreeModule(etree, fullTree=fullTree)
|
195 |
+
infosetFilter = self.infosetFilter = _ihatexml.InfosetFilter(preventDoubleDashComments=True)
|
196 |
+
self.namespaceHTMLElements = namespaceHTMLElements
|
197 |
+
|
198 |
+
class Attributes(MutableMapping):
|
199 |
+
def __init__(self, element):
|
200 |
+
self._element = element
|
201 |
+
|
202 |
+
def _coerceKey(self, key):
|
203 |
+
if isinstance(key, tuple):
|
204 |
+
name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1]))
|
205 |
+
else:
|
206 |
+
name = infosetFilter.coerceAttribute(key)
|
207 |
+
return name
|
208 |
+
|
209 |
+
def __getitem__(self, key):
|
210 |
+
value = self._element._element.attrib[self._coerceKey(key)]
|
211 |
+
if not PY3 and isinstance(value, binary_type):
|
212 |
+
value = value.decode("ascii")
|
213 |
+
return value
|
214 |
+
|
215 |
+
def __setitem__(self, key, value):
|
216 |
+
self._element._element.attrib[self._coerceKey(key)] = value
|
217 |
+
|
218 |
+
def __delitem__(self, key):
|
219 |
+
del self._element._element.attrib[self._coerceKey(key)]
|
220 |
+
|
221 |
+
def __iter__(self):
|
222 |
+
return iter(self._element._element.attrib)
|
223 |
+
|
224 |
+
def __len__(self):
|
225 |
+
return len(self._element._element.attrib)
|
226 |
+
|
227 |
+
def clear(self):
|
228 |
+
return self._element._element.attrib.clear()
|
229 |
+
|
230 |
+
class Element(builder.Element):
|
231 |
+
def __init__(self, name, namespace):
|
232 |
+
name = infosetFilter.coerceElement(name)
|
233 |
+
builder.Element.__init__(self, name, namespace=namespace)
|
234 |
+
self._attributes = Attributes(self)
|
235 |
+
|
236 |
+
def _setName(self, name):
|
237 |
+
self._name = infosetFilter.coerceElement(name)
|
238 |
+
self._element.tag = self._getETreeTag(
|
239 |
+
self._name, self._namespace)
|
240 |
+
|
241 |
+
def _getName(self):
|
242 |
+
return infosetFilter.fromXmlName(self._name)
|
243 |
+
|
244 |
+
name = property(_getName, _setName)
|
245 |
+
|
246 |
+
def _getAttributes(self):
|
247 |
+
return self._attributes
|
248 |
+
|
249 |
+
def _setAttributes(self, value):
|
250 |
+
attributes = self.attributes
|
251 |
+
attributes.clear()
|
252 |
+
attributes.update(value)
|
253 |
+
|
254 |
+
attributes = property(_getAttributes, _setAttributes)
|
255 |
+
|
256 |
+
def insertText(self, data, insertBefore=None):
|
257 |
+
data = infosetFilter.coerceCharacters(data)
|
258 |
+
builder.Element.insertText(self, data, insertBefore)
|
259 |
+
|
260 |
+
def cloneNode(self):
|
261 |
+
element = type(self)(self.name, self.namespace)
|
262 |
+
if self._element.attrib:
|
263 |
+
element._element.attrib.update(self._element.attrib)
|
264 |
+
return element
|
265 |
+
|
266 |
+
class Comment(builder.Comment):
|
267 |
+
def __init__(self, data):
|
268 |
+
data = infosetFilter.coerceComment(data)
|
269 |
+
builder.Comment.__init__(self, data)
|
270 |
+
|
271 |
+
def _setData(self, data):
|
272 |
+
data = infosetFilter.coerceComment(data)
|
273 |
+
self._element.text = data
|
274 |
+
|
275 |
+
def _getData(self):
|
276 |
+
return self._element.text
|
277 |
+
|
278 |
+
data = property(_getData, _setData)
|
279 |
+
|
280 |
+
self.elementClass = Element
|
281 |
+
self.commentClass = Comment
|
282 |
+
# self.fragmentClass = builder.DocumentFragment
|
283 |
+
base.TreeBuilder.__init__(self, namespaceHTMLElements)
|
284 |
+
|
285 |
+
def reset(self):
|
286 |
+
base.TreeBuilder.reset(self)
|
287 |
+
self.insertComment = self.insertCommentInitial
|
288 |
+
self.initial_comments = []
|
289 |
+
self.doctype = None
|
290 |
+
|
291 |
+
def testSerializer(self, element):
|
292 |
+
return testSerializer(element)
|
293 |
+
|
294 |
+
def getDocument(self):
|
295 |
+
if fullTree:
|
296 |
+
return self.document._elementTree
|
297 |
+
else:
|
298 |
+
return self.document._elementTree.getroot()
|
299 |
+
|
300 |
+
def getFragment(self):
|
301 |
+
fragment = []
|
302 |
+
element = self.openElements[0]._element
|
303 |
+
if element.text:
|
304 |
+
fragment.append(element.text)
|
305 |
+
fragment.extend(list(element))
|
306 |
+
if element.tail:
|
307 |
+
fragment.append(element.tail)
|
308 |
+
return fragment
|
309 |
+
|
310 |
+
def insertDoctype(self, token):
|
311 |
+
name = token["name"]
|
312 |
+
publicId = token["publicId"]
|
313 |
+
systemId = token["systemId"]
|
314 |
+
|
315 |
+
if not name:
|
316 |
+
warnings.warn("lxml cannot represent empty doctype", DataLossWarning)
|
317 |
+
self.doctype = None
|
318 |
+
else:
|
319 |
+
coercedName = self.infosetFilter.coerceElement(name)
|
320 |
+
if coercedName != name:
|
321 |
+
warnings.warn("lxml cannot represent non-xml doctype", DataLossWarning)
|
322 |
+
|
323 |
+
doctype = self.doctypeClass(coercedName, publicId, systemId)
|
324 |
+
self.doctype = doctype
|
325 |
+
|
326 |
+
def insertCommentInitial(self, data, parent=None):
|
327 |
+
assert parent is None or parent is self.document
|
328 |
+
assert self.document._elementTree is None
|
329 |
+
self.initial_comments.append(data)
|
330 |
+
|
331 |
+
def insertCommentMain(self, data, parent=None):
|
332 |
+
if (parent == self.document and
|
333 |
+
self.document._elementTree.getroot()[-1].tag == comment_type):
|
334 |
+
warnings.warn("lxml cannot represent adjacent comments beyond the root elements", DataLossWarning)
|
335 |
+
super(TreeBuilder, self).insertComment(data, parent)
|
336 |
+
|
337 |
+
def insertRoot(self, token):
|
338 |
+
# Because of the way libxml2 works, it doesn't seem to be possible to
|
339 |
+
# alter information like the doctype after the tree has been parsed.
|
340 |
+
# Therefore we need to use the built-in parser to create our initial
|
341 |
+
# tree, after which we can add elements like normal
|
342 |
+
docStr = ""
|
343 |
+
if self.doctype:
|
344 |
+
assert self.doctype.name
|
345 |
+
docStr += "<!DOCTYPE %s" % self.doctype.name
|
346 |
+
if (self.doctype.publicId is not None or
|
347 |
+
self.doctype.systemId is not None):
|
348 |
+
docStr += (' PUBLIC "%s" ' %
|
349 |
+
(self.infosetFilter.coercePubid(self.doctype.publicId or "")))
|
350 |
+
if self.doctype.systemId:
|
351 |
+
sysid = self.doctype.systemId
|
352 |
+
if sysid.find("'") >= 0 and sysid.find('"') >= 0:
|
353 |
+
warnings.warn("DOCTYPE system cannot contain single and double quotes", DataLossWarning)
|
354 |
+
sysid = sysid.replace("'", 'U00027')
|
355 |
+
if sysid.find("'") >= 0:
|
356 |
+
docStr += '"%s"' % sysid
|
357 |
+
else:
|
358 |
+
docStr += "'%s'" % sysid
|
359 |
+
else:
|
360 |
+
docStr += "''"
|
361 |
+
docStr += ">"
|
362 |
+
if self.doctype.name != token["name"]:
|
363 |
+
warnings.warn("lxml cannot represent doctype with a different name to the root element", DataLossWarning)
|
364 |
+
docStr += "<THIS_SHOULD_NEVER_APPEAR_PUBLICLY/>"
|
365 |
+
root = etree.fromstring(docStr)
|
366 |
+
|
367 |
+
# Append the initial comments:
|
368 |
+
for comment_token in self.initial_comments:
|
369 |
+
comment = self.commentClass(comment_token["data"])
|
370 |
+
root.addprevious(comment._element)
|
371 |
+
|
372 |
+
# Create the root document and add the ElementTree to it
|
373 |
+
self.document = self.documentClass()
|
374 |
+
self.document._elementTree = root.getroottree()
|
375 |
+
|
376 |
+
# Give the root element the right name
|
377 |
+
name = token["name"]
|
378 |
+
namespace = token.get("namespace", self.defaultNamespace)
|
379 |
+
if namespace is None:
|
380 |
+
etree_tag = name
|
381 |
+
else:
|
382 |
+
etree_tag = "{%s}%s" % (namespace, name)
|
383 |
+
root.tag = etree_tag
|
384 |
+
|
385 |
+
# Add the root element to the internal child/open data structures
|
386 |
+
root_element = self.elementClass(name, namespace)
|
387 |
+
root_element._element = root
|
388 |
+
self.document._childNodes.append(root_element)
|
389 |
+
self.openElements.append(root_element)
|
390 |
+
|
391 |
+
# Reset to the default insert comment function
|
392 |
+
self.insertComment = self.insertCommentMain
|
llmeval-env/lib/python3.10/site-packages/pip/_vendor/html5lib/treewalkers/__init__.py
ADDED
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""A collection of modules for iterating through different kinds of
|
2 |
+
tree, generating tokens identical to those produced by the tokenizer
|
3 |
+
module.
|
4 |
+
|
5 |
+
To create a tree walker for a new type of tree, you need to
|
6 |
+
implement a tree walker object (called TreeWalker by convention) that
|
7 |
+
implements a 'serialize' method which takes a tree as sole argument and
|
8 |
+
returns an iterator which generates tokens.
|
9 |
+
"""
|
10 |
+
|
11 |
+
from __future__ import absolute_import, division, unicode_literals
|
12 |
+
|
13 |
+
from .. import constants
|
14 |
+
from .._utils import default_etree
|
15 |
+
|
16 |
+
__all__ = ["getTreeWalker", "pprint"]
|
17 |
+
|
18 |
+
treeWalkerCache = {}
|
19 |
+
|
20 |
+
|
21 |
+
def getTreeWalker(treeType, implementation=None, **kwargs):
|
22 |
+
"""Get a TreeWalker class for various types of tree with built-in support
|
23 |
+
|
24 |
+
:arg str treeType: the name of the tree type required (case-insensitive).
|
25 |
+
Supported values are:
|
26 |
+
|
27 |
+
* "dom": The xml.dom.minidom DOM implementation
|
28 |
+
* "etree": A generic walker for tree implementations exposing an
|
29 |
+
elementtree-like interface (known to work with ElementTree,
|
30 |
+
cElementTree and lxml.etree).
|
31 |
+
* "lxml": Optimized walker for lxml.etree
|
32 |
+
* "genshi": a Genshi stream
|
33 |
+
|
34 |
+
:arg implementation: A module implementing the tree type e.g.
|
35 |
+
xml.etree.ElementTree or cElementTree (Currently applies to the "etree"
|
36 |
+
tree type only).
|
37 |
+
|
38 |
+
:arg kwargs: keyword arguments passed to the etree walker--for other
|
39 |
+
walkers, this has no effect
|
40 |
+
|
41 |
+
:returns: a TreeWalker class
|
42 |
+
|
43 |
+
"""
|
44 |
+
|
45 |
+
treeType = treeType.lower()
|
46 |
+
if treeType not in treeWalkerCache:
|
47 |
+
if treeType == "dom":
|
48 |
+
from . import dom
|
49 |
+
treeWalkerCache[treeType] = dom.TreeWalker
|
50 |
+
elif treeType == "genshi":
|
51 |
+
from . import genshi
|
52 |
+
treeWalkerCache[treeType] = genshi.TreeWalker
|
53 |
+
elif treeType == "lxml":
|
54 |
+
from . import etree_lxml
|
55 |
+
treeWalkerCache[treeType] = etree_lxml.TreeWalker
|
56 |
+
elif treeType == "etree":
|
57 |
+
from . import etree
|
58 |
+
if implementation is None:
|
59 |
+
implementation = default_etree
|
60 |
+
# XXX: NEVER cache here, caching is done in the etree submodule
|
61 |
+
return etree.getETreeModule(implementation, **kwargs).TreeWalker
|
62 |
+
return treeWalkerCache.get(treeType)
|
63 |
+
|
64 |
+
|
65 |
+
def concatenateCharacterTokens(tokens):
|
66 |
+
pendingCharacters = []
|
67 |
+
for token in tokens:
|
68 |
+
type = token["type"]
|
69 |
+
if type in ("Characters", "SpaceCharacters"):
|
70 |
+
pendingCharacters.append(token["data"])
|
71 |
+
else:
|
72 |
+
if pendingCharacters:
|
73 |
+
yield {"type": "Characters", "data": "".join(pendingCharacters)}
|
74 |
+
pendingCharacters = []
|
75 |
+
yield token
|
76 |
+
if pendingCharacters:
|
77 |
+
yield {"type": "Characters", "data": "".join(pendingCharacters)}
|
78 |
+
|
79 |
+
|
80 |
+
def pprint(walker):
|
81 |
+
"""Pretty printer for tree walkers
|
82 |
+
|
83 |
+
Takes a TreeWalker instance and pretty prints the output of walking the tree.
|
84 |
+
|
85 |
+
:arg walker: a TreeWalker instance
|
86 |
+
|
87 |
+
"""
|
88 |
+
output = []
|
89 |
+
indent = 0
|
90 |
+
for token in concatenateCharacterTokens(walker):
|
91 |
+
type = token["type"]
|
92 |
+
if type in ("StartTag", "EmptyTag"):
|
93 |
+
# tag name
|
94 |
+
if token["namespace"] and token["namespace"] != constants.namespaces["html"]:
|
95 |
+
if token["namespace"] in constants.prefixes:
|
96 |
+
ns = constants.prefixes[token["namespace"]]
|
97 |
+
else:
|
98 |
+
ns = token["namespace"]
|
99 |
+
name = "%s %s" % (ns, token["name"])
|
100 |
+
else:
|
101 |
+
name = token["name"]
|
102 |
+
output.append("%s<%s>" % (" " * indent, name))
|
103 |
+
indent += 2
|
104 |
+
# attributes (sorted for consistent ordering)
|
105 |
+
attrs = token["data"]
|
106 |
+
for (namespace, localname), value in sorted(attrs.items()):
|
107 |
+
if namespace:
|
108 |
+
if namespace in constants.prefixes:
|
109 |
+
ns = constants.prefixes[namespace]
|
110 |
+
else:
|
111 |
+
ns = namespace
|
112 |
+
name = "%s %s" % (ns, localname)
|
113 |
+
else:
|
114 |
+
name = localname
|
115 |
+
output.append("%s%s=\"%s\"" % (" " * indent, name, value))
|
116 |
+
# self-closing
|
117 |
+
if type == "EmptyTag":
|
118 |
+
indent -= 2
|
119 |
+
|
120 |
+
elif type == "EndTag":
|
121 |
+
indent -= 2
|
122 |
+
|
123 |
+
elif type == "Comment":
|
124 |
+
output.append("%s<!-- %s -->" % (" " * indent, token["data"]))
|
125 |
+
|
126 |
+
elif type == "Doctype":
|
127 |
+
if token["name"]:
|
128 |
+
if token["publicId"]:
|
129 |
+
output.append("""%s<!DOCTYPE %s "%s" "%s">""" %
|
130 |
+
(" " * indent,
|
131 |
+
token["name"],
|
132 |
+
token["publicId"],
|
133 |
+
token["systemId"] if token["systemId"] else ""))
|
134 |
+
elif token["systemId"]:
|
135 |
+
output.append("""%s<!DOCTYPE %s "" "%s">""" %
|
136 |
+
(" " * indent,
|
137 |
+
token["name"],
|
138 |
+
token["systemId"]))
|
139 |
+
else:
|
140 |
+
output.append("%s<!DOCTYPE %s>" % (" " * indent,
|
141 |
+
token["name"]))
|
142 |
+
else:
|
143 |
+
output.append("%s<!DOCTYPE >" % (" " * indent,))
|
144 |
+
|
145 |
+
elif type == "Characters":
|
146 |
+
output.append("%s\"%s\"" % (" " * indent, token["data"]))
|
147 |
+
|
148 |
+
elif type == "SpaceCharacters":
|
149 |
+
assert False, "concatenateCharacterTokens should have got rid of all Space tokens"
|
150 |
+
|
151 |
+
else:
|
152 |
+
raise ValueError("Unknown token type, %s" % type)
|
153 |
+
|
154 |
+
return "\n".join(output)
|