applied-ai-018 commited on
Commit
8d9bf4c
·
verified ·
1 Parent(s): e77b29a

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/_multiprocess/__init__.py +8 -0
  2. llmeval-env/lib/python3.10/site-packages/_multiprocess/__pycache__/__init__.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/lxml/_elementpath.cpython-310-x86_64-linux-gnu.so +0 -0
  4. llmeval-env/lib/python3.10/site-packages/lxml/_elementpath.py +341 -0
  5. llmeval-env/lib/python3.10/site-packages/lxml/apihelpers.pxi +1793 -0
  6. llmeval-env/lib/python3.10/site-packages/lxml/classlookup.pxi +580 -0
  7. llmeval-env/lib/python3.10/site-packages/lxml/cssselect.py +101 -0
  8. llmeval-env/lib/python3.10/site-packages/lxml/etree.h +248 -0
  9. llmeval-env/lib/python3.10/site-packages/lxml/etree.pyx +0 -0
  10. llmeval-env/lib/python3.10/site-packages/lxml/extensions.pxi +833 -0
  11. llmeval-env/lib/python3.10/site-packages/lxml/lxml.etree.h +248 -0
  12. llmeval-env/lib/python3.10/site-packages/lxml/nsclasses.pxi +281 -0
  13. llmeval-env/lib/python3.10/site-packages/lxml/objectpath.pxi +332 -0
  14. llmeval-env/lib/python3.10/site-packages/lxml/parsertarget.pxi +180 -0
  15. llmeval-env/lib/python3.10/site-packages/lxml/proxy.pxi +619 -0
  16. llmeval-env/lib/python3.10/site-packages/lxml/pyclasslookup.py +3 -0
  17. llmeval-env/lib/python3.10/site-packages/lxml/sax.cpython-310-x86_64-linux-gnu.so +0 -0
  18. llmeval-env/lib/python3.10/site-packages/lxml/saxparser.pxi +875 -0
  19. llmeval-env/lib/python3.10/site-packages/lxml/serializer.pxi +1871 -0
  20. llmeval-env/lib/python3.10/site-packages/lxml/usedoctest.py +13 -0
  21. llmeval-env/lib/python3.10/site-packages/lxml/xmlid.pxi +179 -0
  22. llmeval-env/lib/python3.10/site-packages/lxml/xmlschema.pxi +215 -0
  23. llmeval-env/lib/python3.10/site-packages/lxml/xpath.pxi +487 -0
  24. llmeval-env/lib/python3.10/site-packages/lxml/xslt.pxi +950 -0
  25. llmeval-env/lib/python3.10/site-packages/lxml/xsltext.pxi +242 -0
  26. llmeval-env/lib/python3.10/site-packages/scikit_learn-1.4.2.dist-info/INSTALLER +1 -0
  27. llmeval-env/lib/python3.10/site-packages/scikit_learn-1.4.2.dist-info/METADATA +282 -0
  28. llmeval-env/lib/python3.10/site-packages/scikit_learn-1.4.2.dist-info/RECORD +0 -0
  29. llmeval-env/lib/python3.10/site-packages/scikit_learn-1.4.2.dist-info/WHEEL +6 -0
  30. llmeval-env/lib/python3.10/site-packages/scikit_learn-1.4.2.dist-info/top_level.txt +1 -0
  31. llmeval-env/lib/python3.10/site-packages/tokenizers/__init__.py +100 -0
  32. llmeval-env/lib/python3.10/site-packages/tokenizers/__init__.pyi +1200 -0
  33. llmeval-env/lib/python3.10/site-packages/tokenizers/__pycache__/__init__.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/tokenizers/decoders/__init__.py +14 -0
  35. llmeval-env/lib/python3.10/site-packages/tokenizers/decoders/__init__.pyi +271 -0
  36. llmeval-env/lib/python3.10/site-packages/tokenizers/decoders/__pycache__/__init__.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__init__.py +6 -0
  38. llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/__init__.cpython-310.pyc +0 -0
  39. llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/base_tokenizer.cpython-310.pyc +0 -0
  40. llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/bert_wordpiece.cpython-310.pyc +0 -0
  41. llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/byte_level_bpe.cpython-310.pyc +0 -0
  42. llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/char_level_bpe.cpython-310.pyc +0 -0
  43. llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/sentencepiece_bpe.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/sentencepiece_unigram.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/base_tokenizer.py +418 -0
  46. llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/bert_wordpiece.py +151 -0
  47. llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/byte_level_bpe.py +122 -0
  48. llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/char_level_bpe.py +150 -0
  49. llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/sentencepiece_bpe.py +103 -0
  50. llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/sentencepiece_unigram.py +196 -0
llmeval-env/lib/python3.10/site-packages/_multiprocess/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ #
3
+ # Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
4
+ # Copyright (c) 2022-2024 The Uncertainty Quantification Foundation.
5
+ # License: 3-clause BSD. The full license text is available at:
6
+ # - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE
7
+
8
+ from _multiprocessing import *
llmeval-env/lib/python3.10/site-packages/_multiprocess/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (219 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/lxml/_elementpath.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (209 kB). View file
 
llmeval-env/lib/python3.10/site-packages/lxml/_elementpath.py ADDED
@@ -0,0 +1,341 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # cython: language_level=2
2
+
3
+ #
4
+ # ElementTree
5
+ # $Id: ElementPath.py 3375 2008-02-13 08:05:08Z fredrik $
6
+ #
7
+ # limited xpath support for element trees
8
+ #
9
+ # history:
10
+ # 2003-05-23 fl created
11
+ # 2003-05-28 fl added support for // etc
12
+ # 2003-08-27 fl fixed parsing of periods in element names
13
+ # 2007-09-10 fl new selection engine
14
+ # 2007-09-12 fl fixed parent selector
15
+ # 2007-09-13 fl added iterfind; changed findall to return a list
16
+ # 2007-11-30 fl added namespaces support
17
+ # 2009-10-30 fl added child element value filter
18
+ #
19
+ # Copyright (c) 2003-2009 by Fredrik Lundh. All rights reserved.
20
+ #
21
22
+ # http://www.pythonware.com
23
+ #
24
+ # --------------------------------------------------------------------
25
+ # The ElementTree toolkit is
26
+ #
27
+ # Copyright (c) 1999-2009 by Fredrik Lundh
28
+ #
29
+ # By obtaining, using, and/or copying this software and/or its
30
+ # associated documentation, you agree that you have read, understood,
31
+ # and will comply with the following terms and conditions:
32
+ #
33
+ # Permission to use, copy, modify, and distribute this software and
34
+ # its associated documentation for any purpose and without fee is
35
+ # hereby granted, provided that the above copyright notice appears in
36
+ # all copies, and that both that copyright notice and this permission
37
+ # notice appear in supporting documentation, and that the name of
38
+ # Secret Labs AB or the author not be used in advertising or publicity
39
+ # pertaining to distribution of the software without specific, written
40
+ # prior permission.
41
+ #
42
+ # SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
43
+ # TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
44
+ # ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
45
+ # BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
46
+ # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
47
+ # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
48
+ # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
49
+ # OF THIS SOFTWARE.
50
+ # --------------------------------------------------------------------
51
+
52
+ ##
53
+ # Implementation module for XPath support. There's usually no reason
54
+ # to import this module directly; the <b>ElementTree</b> does this for
55
+ # you, if needed.
56
+ ##
57
+
58
+
59
+ import re
60
+
61
+ xpath_tokenizer_re = re.compile(
62
+ "("
63
+ "'[^']*'|\"[^\"]*\"|"
64
+ "::|"
65
+ "//?|"
66
+ r"\.\.|"
67
+ r"\(\)|"
68
+ r"[/.*:\[\]\(\)@=])|"
69
+ r"((?:\{[^}]+\})?[^/\[\]\(\)@=\s]+)|"
70
+ r"\s+"
71
+ )
72
+
73
+ def xpath_tokenizer(pattern, namespaces=None, with_prefixes=True):
74
+ # ElementTree uses '', lxml used None originally.
75
+ default_namespace = (namespaces.get(None) or namespaces.get('')) if namespaces else None
76
+ parsing_attribute = False
77
+ for token in xpath_tokenizer_re.findall(pattern):
78
+ ttype, tag = token
79
+ if tag and tag[0] != "{":
80
+ if ":" in tag and with_prefixes:
81
+ prefix, uri = tag.split(":", 1)
82
+ try:
83
+ if not namespaces:
84
+ raise KeyError
85
+ yield ttype, "{%s}%s" % (namespaces[prefix], uri)
86
+ except KeyError:
87
+ raise SyntaxError("prefix %r not found in prefix map" % prefix)
88
+ elif default_namespace and not parsing_attribute:
89
+ yield ttype, "{%s}%s" % (default_namespace, tag)
90
+ else:
91
+ yield token
92
+ parsing_attribute = False
93
+ else:
94
+ yield token
95
+ parsing_attribute = ttype == '@'
96
+
97
+
98
+ def prepare_child(next, token):
99
+ tag = token[1]
100
+ def select(result):
101
+ for elem in result:
102
+ yield from elem.iterchildren(tag)
103
+ return select
104
+
105
+ def prepare_star(next, token):
106
+ def select(result):
107
+ for elem in result:
108
+ yield from elem.iterchildren('*')
109
+ return select
110
+
111
+ def prepare_self(next, token):
112
+ def select(result):
113
+ return result
114
+ return select
115
+
116
+ def prepare_descendant(next, token):
117
+ token = next()
118
+ if token[0] == "*":
119
+ tag = "*"
120
+ elif not token[0]:
121
+ tag = token[1]
122
+ else:
123
+ raise SyntaxError("invalid descendant")
124
+ def select(result):
125
+ for elem in result:
126
+ yield from elem.iterdescendants(tag)
127
+ return select
128
+
129
+ def prepare_parent(next, token):
130
+ def select(result):
131
+ for elem in result:
132
+ parent = elem.getparent()
133
+ if parent is not None:
134
+ yield parent
135
+ return select
136
+
137
+ def prepare_predicate(next, token):
138
+ # FIXME: replace with real parser!!! refs:
139
+ # http://effbot.org/zone/simple-iterator-parser.htm
140
+ # http://javascript.crockford.com/tdop/tdop.html
141
+ signature = ''
142
+ predicate = []
143
+ while 1:
144
+ token = next()
145
+ if token[0] == "]":
146
+ break
147
+ if token == ('', ''):
148
+ # ignore whitespace
149
+ continue
150
+ if token[0] and token[0][:1] in "'\"":
151
+ token = "'", token[0][1:-1]
152
+ signature += token[0] or "-"
153
+ predicate.append(token[1])
154
+
155
+ # use signature to determine predicate type
156
+ if signature == "@-":
157
+ # [@attribute] predicate
158
+ key = predicate[1]
159
+ def select(result):
160
+ for elem in result:
161
+ if elem.get(key) is not None:
162
+ yield elem
163
+ return select
164
+ if signature == "@-='":
165
+ # [@attribute='value']
166
+ key = predicate[1]
167
+ value = predicate[-1]
168
+ def select(result):
169
+ for elem in result:
170
+ if elem.get(key) == value:
171
+ yield elem
172
+ return select
173
+ if signature == "-" and not re.match(r"-?\d+$", predicate[0]):
174
+ # [tag]
175
+ tag = predicate[0]
176
+ def select(result):
177
+ for elem in result:
178
+ for _ in elem.iterchildren(tag):
179
+ yield elem
180
+ break
181
+ return select
182
+ if signature == ".='" or (signature == "-='" and not re.match(r"-?\d+$", predicate[0])):
183
+ # [.='value'] or [tag='value']
184
+ tag = predicate[0]
185
+ value = predicate[-1]
186
+ if tag:
187
+ def select(result):
188
+ for elem in result:
189
+ for e in elem.iterchildren(tag):
190
+ if "".join(e.itertext()) == value:
191
+ yield elem
192
+ break
193
+ else:
194
+ def select(result):
195
+ for elem in result:
196
+ if "".join(elem.itertext()) == value:
197
+ yield elem
198
+ return select
199
+ if signature == "-" or signature == "-()" or signature == "-()-":
200
+ # [index] or [last()] or [last()-index]
201
+ if signature == "-":
202
+ # [index]
203
+ index = int(predicate[0]) - 1
204
+ if index < 0:
205
+ if index == -1:
206
+ raise SyntaxError(
207
+ "indices in path predicates are 1-based, not 0-based")
208
+ else:
209
+ raise SyntaxError("path index >= 1 expected")
210
+ else:
211
+ if predicate[0] != "last":
212
+ raise SyntaxError("unsupported function")
213
+ if signature == "-()-":
214
+ try:
215
+ index = int(predicate[2]) - 1
216
+ except ValueError:
217
+ raise SyntaxError("unsupported expression")
218
+ else:
219
+ index = -1
220
+ def select(result):
221
+ for elem in result:
222
+ parent = elem.getparent()
223
+ if parent is None:
224
+ continue
225
+ try:
226
+ # FIXME: what if the selector is "*" ?
227
+ elems = list(parent.iterchildren(elem.tag))
228
+ if elems[index] is elem:
229
+ yield elem
230
+ except IndexError:
231
+ pass
232
+ return select
233
+ raise SyntaxError("invalid predicate")
234
+
235
+ ops = {
236
+ "": prepare_child,
237
+ "*": prepare_star,
238
+ ".": prepare_self,
239
+ "..": prepare_parent,
240
+ "//": prepare_descendant,
241
+ "[": prepare_predicate,
242
+ }
243
+
244
+
245
+ # --------------------------------------------------------------------
246
+
247
+ _cache = {}
248
+
249
+
250
+ def _build_path_iterator(path, namespaces, with_prefixes=True):
251
+ """compile selector pattern"""
252
+ if path[-1:] == "/":
253
+ path += "*" # implicit all (FIXME: keep this?)
254
+
255
+ cache_key = (path,)
256
+ if namespaces:
257
+ # lxml originally used None for the default namespace but ElementTree uses the
258
+ # more convenient (all-strings-dict) empty string, so we support both here,
259
+ # preferring the more convenient '', as long as they aren't ambiguous.
260
+ if None in namespaces:
261
+ if '' in namespaces and namespaces[None] != namespaces['']:
262
+ raise ValueError("Ambiguous default namespace provided: %r versus %r" % (
263
+ namespaces[None], namespaces['']))
264
+ cache_key += (namespaces[None],) + tuple(sorted(
265
+ item for item in namespaces.items() if item[0] is not None))
266
+ else:
267
+ cache_key += tuple(sorted(namespaces.items()))
268
+
269
+ try:
270
+ return _cache[cache_key]
271
+ except KeyError:
272
+ pass
273
+ if len(_cache) > 100:
274
+ _cache.clear()
275
+
276
+ if path[:1] == "/":
277
+ raise SyntaxError("cannot use absolute path on element")
278
+ stream = iter(xpath_tokenizer(path, namespaces, with_prefixes=with_prefixes))
279
+ try:
280
+ _next = stream.next
281
+ except AttributeError:
282
+ # Python 3
283
+ _next = stream.__next__
284
+ try:
285
+ token = _next()
286
+ except StopIteration:
287
+ raise SyntaxError("empty path expression")
288
+ selector = []
289
+ while 1:
290
+ try:
291
+ selector.append(ops[token[0]](_next, token))
292
+ except StopIteration:
293
+ raise SyntaxError("invalid path")
294
+ try:
295
+ token = _next()
296
+ if token[0] == "/":
297
+ token = _next()
298
+ except StopIteration:
299
+ break
300
+ _cache[cache_key] = selector
301
+ return selector
302
+
303
+
304
+ ##
305
+ # Iterate over the matching nodes
306
+
307
+ def iterfind(elem, path, namespaces=None, with_prefixes=True):
308
+ selector = _build_path_iterator(path, namespaces, with_prefixes=with_prefixes)
309
+ result = iter((elem,))
310
+ for select in selector:
311
+ result = select(result)
312
+ return result
313
+
314
+
315
+ ##
316
+ # Find first matching object.
317
+
318
+ def find(elem, path, namespaces=None, with_prefixes=True):
319
+ it = iterfind(elem, path, namespaces, with_prefixes=with_prefixes)
320
+ try:
321
+ return next(it)
322
+ except StopIteration:
323
+ return None
324
+
325
+
326
+ ##
327
+ # Find all matching objects.
328
+
329
+ def findall(elem, path, namespaces=None, with_prefixes=True):
330
+ return list(iterfind(elem, path, namespaces))
331
+
332
+
333
+ ##
334
+ # Find text for first matching object.
335
+
336
+ def findtext(elem, path, default=None, namespaces=None, with_prefixes=True):
337
+ el = find(elem, path, namespaces, with_prefixes=with_prefixes)
338
+ if el is None:
339
+ return default
340
+ else:
341
+ return el.text or ''
llmeval-env/lib/python3.10/site-packages/lxml/apihelpers.pxi ADDED
@@ -0,0 +1,1793 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Private/public helper functions for API functions
2
+
3
+ from lxml.includes cimport uri
4
+
5
+
6
+ cdef void displayNode(xmlNode* c_node, indent) noexcept:
7
+ # to help with debugging
8
+ cdef xmlNode* c_child
9
+ try:
10
+ print(indent * ' ', <long>c_node)
11
+ c_child = c_node.children
12
+ while c_child is not NULL:
13
+ displayNode(c_child, indent + 1)
14
+ c_child = c_child.next
15
+ finally:
16
+ return # swallow any exceptions
17
+
18
+ cdef inline bint _isHtmlDocument(_Element element) except -1:
19
+ cdef xmlNode* c_node = element._c_node
20
+ return (
21
+ c_node is not NULL and c_node.doc is not NULL and
22
+ c_node.doc.properties & tree.XML_DOC_HTML != 0
23
+ )
24
+
25
+ cdef inline int _assertValidNode(_Element element) except -1:
26
+ assert element._c_node is not NULL, "invalid Element proxy at %s" % id(element)
27
+
28
+ cdef inline int _assertValidDoc(_Document doc) except -1:
29
+ assert doc._c_doc is not NULL, "invalid Document proxy at %s" % id(doc)
30
+
31
+ cdef _Document _documentOrRaise(object input):
32
+ """Call this to get the document of a _Document, _ElementTree or _Element
33
+ object, or to raise an exception if it can't be determined.
34
+
35
+ Should be used in all API functions for consistency.
36
+ """
37
+ cdef _Document doc
38
+ if isinstance(input, _ElementTree):
39
+ if (<_ElementTree>input)._context_node is not None:
40
+ doc = (<_ElementTree>input)._context_node._doc
41
+ else:
42
+ doc = None
43
+ elif isinstance(input, _Element):
44
+ doc = (<_Element>input)._doc
45
+ elif isinstance(input, _Document):
46
+ doc = <_Document>input
47
+ else:
48
+ raise TypeError, f"Invalid input object: {python._fqtypename(input).decode('utf8')}"
49
+ if doc is None:
50
+ raise ValueError, f"Input object has no document: {python._fqtypename(input).decode('utf8')}"
51
+ _assertValidDoc(doc)
52
+ return doc
53
+
54
+ cdef _Element _rootNodeOrRaise(object input):
55
+ """Call this to get the root node of a _Document, _ElementTree or
56
+ _Element object, or to raise an exception if it can't be determined.
57
+
58
+ Should be used in all API functions for consistency.
59
+ """
60
+ cdef _Element node
61
+ if isinstance(input, _ElementTree):
62
+ node = (<_ElementTree>input)._context_node
63
+ elif isinstance(input, _Element):
64
+ node = <_Element>input
65
+ elif isinstance(input, _Document):
66
+ node = (<_Document>input).getroot()
67
+ else:
68
+ raise TypeError, f"Invalid input object: {python._fqtypename(input).decode('utf8')}"
69
+ if (node is None or not node._c_node or
70
+ node._c_node.type != tree.XML_ELEMENT_NODE):
71
+ raise ValueError, f"Input object is not an XML element: {python._fqtypename(input).decode('utf8')}"
72
+ _assertValidNode(node)
73
+ return node
74
+
75
+ cdef bint _isAncestorOrSame(xmlNode* c_ancestor, xmlNode* c_node) noexcept:
76
+ while c_node:
77
+ if c_node is c_ancestor:
78
+ return True
79
+ c_node = c_node.parent
80
+ return False
81
+
82
+ cdef _Element _makeElement(tag, xmlDoc* c_doc, _Document doc,
83
+ _BaseParser parser, text, tail, attrib, nsmap,
84
+ dict extra_attrs):
85
+ """Create a new element and initialize text content, namespaces and
86
+ attributes.
87
+
88
+ This helper function will reuse as much of the existing document as
89
+ possible:
90
+
91
+ If 'parser' is None, the parser will be inherited from 'doc' or the
92
+ default parser will be used.
93
+
94
+ If 'doc' is None, 'c_doc' is used to create a new _Document and the new
95
+ element is made its root node.
96
+
97
+ If 'c_doc' is also NULL, a new xmlDoc will be created.
98
+ """
99
+ cdef xmlNode* c_node
100
+ if doc is not None:
101
+ c_doc = doc._c_doc
102
+ ns_utf, name_utf = _getNsTag(tag)
103
+ if parser is not None and parser._for_html:
104
+ _htmlTagValidOrRaise(name_utf)
105
+ if c_doc is NULL:
106
+ c_doc = _newHTMLDoc()
107
+ else:
108
+ _tagValidOrRaise(name_utf)
109
+ if c_doc is NULL:
110
+ c_doc = _newXMLDoc()
111
+ c_node = _createElement(c_doc, name_utf)
112
+ if c_node is NULL:
113
+ if doc is None and c_doc is not NULL:
114
+ tree.xmlFreeDoc(c_doc)
115
+ raise MemoryError()
116
+ try:
117
+ if doc is None:
118
+ tree.xmlDocSetRootElement(c_doc, c_node)
119
+ doc = _documentFactory(c_doc, parser)
120
+ if text is not None:
121
+ _setNodeText(c_node, text)
122
+ if tail is not None:
123
+ _setTailText(c_node, tail)
124
+ # add namespaces to node if necessary
125
+ _setNodeNamespaces(c_node, doc, ns_utf, nsmap)
126
+ _initNodeAttributes(c_node, doc, attrib, extra_attrs)
127
+ return _elementFactory(doc, c_node)
128
+ except:
129
+ # free allocated c_node/c_doc unless Python does it for us
130
+ if c_node.doc is not c_doc:
131
+ # node not yet in document => will not be freed by document
132
+ if tail is not None:
133
+ _removeText(c_node.next) # tail
134
+ tree.xmlFreeNode(c_node)
135
+ if doc is None:
136
+ # c_doc will not be freed by doc
137
+ tree.xmlFreeDoc(c_doc)
138
+ raise
139
+
140
+ cdef int _initNewElement(_Element element, bint is_html, name_utf, ns_utf,
141
+ _BaseParser parser, attrib, nsmap, dict extra_attrs) except -1:
142
+ """Initialise a new Element object.
143
+
144
+ This is used when users instantiate a Python Element subclass
145
+ directly, without it being mapped to an existing XML node.
146
+ """
147
+ cdef xmlDoc* c_doc
148
+ cdef xmlNode* c_node
149
+ cdef _Document doc
150
+ if is_html:
151
+ _htmlTagValidOrRaise(name_utf)
152
+ c_doc = _newHTMLDoc()
153
+ else:
154
+ _tagValidOrRaise(name_utf)
155
+ c_doc = _newXMLDoc()
156
+ c_node = _createElement(c_doc, name_utf)
157
+ if c_node is NULL:
158
+ if c_doc is not NULL:
159
+ tree.xmlFreeDoc(c_doc)
160
+ raise MemoryError()
161
+ tree.xmlDocSetRootElement(c_doc, c_node)
162
+ doc = _documentFactory(c_doc, parser)
163
+ # add namespaces to node if necessary
164
+ _setNodeNamespaces(c_node, doc, ns_utf, nsmap)
165
+ _initNodeAttributes(c_node, doc, attrib, extra_attrs)
166
+ _registerProxy(element, doc, c_node)
167
+ element._init()
168
+ return 0
169
+
170
+ cdef _Element _makeSubElement(_Element parent, tag, text, tail,
171
+ attrib, nsmap, dict extra_attrs):
172
+ """Create a new child element and initialize text content, namespaces and
173
+ attributes.
174
+ """
175
+ cdef xmlNode* c_node
176
+ cdef xmlDoc* c_doc
177
+ if parent is None or parent._doc is None:
178
+ return None
179
+ _assertValidNode(parent)
180
+ ns_utf, name_utf = _getNsTag(tag)
181
+ c_doc = parent._doc._c_doc
182
+
183
+ if parent._doc._parser is not None and parent._doc._parser._for_html:
184
+ _htmlTagValidOrRaise(name_utf)
185
+ else:
186
+ _tagValidOrRaise(name_utf)
187
+
188
+ c_node = _createElement(c_doc, name_utf)
189
+ if c_node is NULL:
190
+ raise MemoryError()
191
+ tree.xmlAddChild(parent._c_node, c_node)
192
+
193
+ try:
194
+ if text is not None:
195
+ _setNodeText(c_node, text)
196
+ if tail is not None:
197
+ _setTailText(c_node, tail)
198
+
199
+ # add namespaces to node if necessary
200
+ _setNodeNamespaces(c_node, parent._doc, ns_utf, nsmap)
201
+ _initNodeAttributes(c_node, parent._doc, attrib, extra_attrs)
202
+ return _elementFactory(parent._doc, c_node)
203
+ except:
204
+ # make sure we clean up in case of an error
205
+ _removeNode(parent._doc, c_node)
206
+ raise
207
+
208
+
209
+ cdef int _setNodeNamespaces(xmlNode* c_node, _Document doc,
210
+ object node_ns_utf, object nsmap) except -1:
211
+ """Lookup current namespace prefixes, then set namespace structure for
212
+ node (if 'node_ns_utf' was provided) and register new ns-prefix mappings.
213
+
214
+ 'node_ns_utf' should only be passed for a newly created node.
215
+ """
216
+ cdef xmlNs* c_ns
217
+ cdef list nsdefs
218
+
219
+ if nsmap:
220
+ for prefix, href in _iter_nsmap(nsmap):
221
+ href_utf = _utf8(href)
222
+ _uriValidOrRaise(href_utf)
223
+ c_href = _xcstr(href_utf)
224
+ if prefix is not None:
225
+ prefix_utf = _utf8(prefix)
226
+ _prefixValidOrRaise(prefix_utf)
227
+ c_prefix = _xcstr(prefix_utf)
228
+ else:
229
+ c_prefix = <const_xmlChar*>NULL
230
+ # add namespace with prefix if it is not already known
231
+ c_ns = tree.xmlSearchNs(doc._c_doc, c_node, c_prefix)
232
+ if c_ns is NULL or \
233
+ c_ns.href is NULL or \
234
+ tree.xmlStrcmp(c_ns.href, c_href) != 0:
235
+ c_ns = tree.xmlNewNs(c_node, c_href, c_prefix)
236
+ if href_utf == node_ns_utf:
237
+ tree.xmlSetNs(c_node, c_ns)
238
+ node_ns_utf = None
239
+
240
+ if node_ns_utf is not None:
241
+ _uriValidOrRaise(node_ns_utf)
242
+ doc._setNodeNs(c_node, _xcstr(node_ns_utf))
243
+ return 0
244
+
245
+
246
+ cdef dict _build_nsmap(xmlNode* c_node):
247
+ """
248
+ Namespace prefix->URI mapping known in the context of this Element.
249
+ This includes all namespace declarations of the parents.
250
+ """
251
+ cdef xmlNs* c_ns
252
+ nsmap = {}
253
+ while c_node is not NULL and c_node.type == tree.XML_ELEMENT_NODE:
254
+ c_ns = c_node.nsDef
255
+ while c_ns is not NULL:
256
+ if c_ns.prefix or c_ns.href:
257
+ prefix = funicodeOrNone(c_ns.prefix)
258
+ if prefix not in nsmap:
259
+ nsmap[prefix] = funicodeOrNone(c_ns.href)
260
+ c_ns = c_ns.next
261
+ c_node = c_node.parent
262
+ return nsmap
263
+
264
+
265
+ cdef _iter_nsmap(nsmap):
266
+ """
267
+ Create a reproducibly ordered iterable from an nsmap mapping.
268
+ Tries to preserve an existing order and sorts if it assumes no order.
269
+
270
+ The difference to _iter_attrib() is that None doesn't sort with strings
271
+ in Py3.x.
272
+ """
273
+ if isinstance(nsmap, dict):
274
+ # dicts are insertion-ordered in Py3.6+ => keep the user provided order.
275
+ return nsmap.items()
276
+ if len(nsmap) <= 1:
277
+ return nsmap.items()
278
+ # nsmap will usually be a plain unordered dict => avoid type checking overhead
279
+ if type(nsmap) is not dict and isinstance(nsmap, OrderedDict):
280
+ return nsmap.items() # keep existing order
281
+ if None not in nsmap:
282
+ return sorted(nsmap.items())
283
+
284
+ # Move the default namespace to the end. This makes sure libxml2
285
+ # prefers a prefix if the ns is defined redundantly on the same
286
+ # element. That way, users can work around a problem themselves
287
+ # where default namespace attributes on non-default namespaced
288
+ # elements serialise without prefix (i.e. into the non-default
289
+ # namespace).
290
+ default_ns = nsmap[None]
291
+ nsdefs = [(k, v) for k, v in nsmap.items() if k is not None]
292
+ nsdefs.sort()
293
+ nsdefs.append((None, default_ns))
294
+ return nsdefs
295
+
296
+
297
+ cdef _iter_attrib(attrib):
298
+ """
299
+ Create a reproducibly ordered iterable from an attrib mapping.
300
+ Tries to preserve an existing order and sorts if it assumes no order.
301
+ """
302
+ # dicts are insertion-ordered in Py3.6+ => keep the user provided order.
303
+ if isinstance(attrib, (dict, _Attrib, OrderedDict)):
304
+ return attrib.items()
305
+ # assume it's an unordered mapping of some kind
306
+ return sorted(attrib.items())
307
+
308
+
309
+ cdef _initNodeAttributes(xmlNode* c_node, _Document doc, attrib, dict extra):
310
+ """Initialise the attributes of an element node.
311
+ """
312
+ cdef bint is_html
313
+ cdef xmlNs* c_ns
314
+ if attrib is not None and not hasattr(attrib, 'items'):
315
+ raise TypeError, f"Invalid attribute dictionary: {python._fqtypename(attrib).decode('utf8')}"
316
+ if not attrib and not extra:
317
+ return # nothing to do
318
+ is_html = doc._parser._for_html
319
+ seen = set()
320
+ if extra:
321
+ for name, value in extra.items():
322
+ _addAttributeToNode(c_node, doc, is_html, name, value, seen)
323
+ if attrib:
324
+ for name, value in _iter_attrib(attrib):
325
+ _addAttributeToNode(c_node, doc, is_html, name, value, seen)
326
+
327
+
328
+ cdef int _addAttributeToNode(xmlNode* c_node, _Document doc, bint is_html,
329
+ name, value, set seen_tags) except -1:
330
+ ns_utf, name_utf = tag = _getNsTag(name)
331
+ if tag in seen_tags:
332
+ return 0
333
+ seen_tags.add(tag)
334
+ if not is_html:
335
+ _attributeValidOrRaise(name_utf)
336
+ value_utf = _utf8(value)
337
+ if ns_utf is None:
338
+ tree.xmlNewProp(c_node, _xcstr(name_utf), _xcstr(value_utf))
339
+ else:
340
+ _uriValidOrRaise(ns_utf)
341
+ c_ns = doc._findOrBuildNodeNs(c_node, _xcstr(ns_utf), NULL, 1)
342
+ tree.xmlNewNsProp(c_node, c_ns,
343
+ _xcstr(name_utf), _xcstr(value_utf))
344
+ return 0
345
+
346
+
347
+ ctypedef struct _ns_node_ref:
348
+ xmlNs* ns
349
+ xmlNode* node
350
+
351
+
352
+ cdef int _collectNsDefs(xmlNode* c_element, _ns_node_ref **_c_ns_list,
353
+ size_t *_c_ns_list_len, size_t *_c_ns_list_size) except -1:
354
+ c_ns_list = _c_ns_list[0]
355
+ cdef size_t c_ns_list_len = _c_ns_list_len[0]
356
+ cdef size_t c_ns_list_size = _c_ns_list_size[0]
357
+
358
+ c_nsdef = c_element.nsDef
359
+ while c_nsdef is not NULL:
360
+ if c_ns_list_len >= c_ns_list_size:
361
+ if c_ns_list is NULL:
362
+ c_ns_list_size = 20
363
+ else:
364
+ c_ns_list_size *= 2
365
+ c_nsref_ptr = <_ns_node_ref*> python.lxml_realloc(
366
+ c_ns_list, c_ns_list_size, sizeof(_ns_node_ref))
367
+ if c_nsref_ptr is NULL:
368
+ if c_ns_list is not NULL:
369
+ python.lxml_free(c_ns_list)
370
+ _c_ns_list[0] = NULL
371
+ raise MemoryError()
372
+ c_ns_list = c_nsref_ptr
373
+
374
+ c_ns_list[c_ns_list_len] = _ns_node_ref(c_nsdef, c_element)
375
+ c_ns_list_len += 1
376
+ c_nsdef = c_nsdef.next
377
+
378
+ _c_ns_list_size[0] = c_ns_list_size
379
+ _c_ns_list_len[0] = c_ns_list_len
380
+ _c_ns_list[0] = c_ns_list
381
+
382
+
383
+ cdef int _removeUnusedNamespaceDeclarations(xmlNode* c_element, set prefixes_to_keep) except -1:
384
+ """Remove any namespace declarations from a subtree that are not used by
385
+ any of its elements (or attributes).
386
+
387
+ If a 'prefixes_to_keep' is provided, it must be a set of prefixes.
388
+ Any corresponding namespace mappings will not be removed as part of the cleanup.
389
+ """
390
+ cdef xmlNode* c_node
391
+ cdef _ns_node_ref* c_ns_list = NULL
392
+ cdef size_t c_ns_list_size = 0
393
+ cdef size_t c_ns_list_len = 0
394
+ cdef size_t i
395
+
396
+ if c_element.parent and c_element.parent.type == tree.XML_DOCUMENT_NODE:
397
+ # include declarations on the document node
398
+ _collectNsDefs(c_element.parent, &c_ns_list, &c_ns_list_len, &c_ns_list_size)
399
+
400
+ tree.BEGIN_FOR_EACH_ELEMENT_FROM(c_element, c_element, 1)
401
+ # collect all new namespace declarations into the ns list
402
+ if c_element.nsDef:
403
+ _collectNsDefs(c_element, &c_ns_list, &c_ns_list_len, &c_ns_list_size)
404
+
405
+ # remove all namespace declarations from the list that are referenced
406
+ if c_ns_list_len and c_element.type == tree.XML_ELEMENT_NODE:
407
+ c_node = c_element
408
+ while c_node and c_ns_list_len:
409
+ if c_node.ns:
410
+ for i in range(c_ns_list_len):
411
+ if c_node.ns is c_ns_list[i].ns:
412
+ c_ns_list_len -= 1
413
+ c_ns_list[i] = c_ns_list[c_ns_list_len]
414
+ #c_ns_list[c_ns_list_len] = _ns_node_ref(NULL, NULL)
415
+ break
416
+ if c_node is c_element:
417
+ # continue with attributes
418
+ c_node = <xmlNode*>c_element.properties
419
+ else:
420
+ c_node = c_node.next
421
+ tree.END_FOR_EACH_ELEMENT_FROM(c_element)
422
+
423
+ if c_ns_list is NULL:
424
+ return 0
425
+
426
+ # free all namespace declarations that remained in the list,
427
+ # except for those we should keep explicitly
428
+ cdef xmlNs* c_nsdef
429
+ for i in range(c_ns_list_len):
430
+ if prefixes_to_keep is not None:
431
+ if c_ns_list[i].ns.prefix and c_ns_list[i].ns.prefix in prefixes_to_keep:
432
+ continue
433
+ c_node = c_ns_list[i].node
434
+ c_nsdef = c_node.nsDef
435
+ if c_nsdef is c_ns_list[i].ns:
436
+ c_node.nsDef = c_node.nsDef.next
437
+ else:
438
+ while c_nsdef.next is not c_ns_list[i].ns:
439
+ c_nsdef = c_nsdef.next
440
+ c_nsdef.next = c_nsdef.next.next
441
+ tree.xmlFreeNs(c_ns_list[i].ns)
442
+
443
+ if c_ns_list is not NULL:
444
+ python.lxml_free(c_ns_list)
445
+ return 0
446
+
447
+ cdef xmlNs* _searchNsByHref(xmlNode* c_node, const_xmlChar* c_href, bint is_attribute) noexcept:
448
+ """Search a namespace declaration that covers a node (element or
449
+ attribute).
450
+
451
+ For attributes, try to find a prefixed namespace declaration
452
+ instead of the default namespaces. This helps in supporting
453
+ round-trips for attributes on elements with a different namespace.
454
+ """
455
+ cdef xmlNs* c_ns
456
+ cdef xmlNs* c_default_ns = NULL
457
+ cdef xmlNode* c_element
458
+ if c_href is NULL or c_node is NULL or c_node.type == tree.XML_ENTITY_REF_NODE:
459
+ return NULL
460
+ if tree.xmlStrcmp(c_href, tree.XML_XML_NAMESPACE) == 0:
461
+ # no special cases here, let libxml2 handle this
462
+ return tree.xmlSearchNsByHref(c_node.doc, c_node, c_href)
463
+ if c_node.type == tree.XML_ATTRIBUTE_NODE:
464
+ is_attribute = 1
465
+ while c_node is not NULL and c_node.type != tree.XML_ELEMENT_NODE:
466
+ c_node = c_node.parent
467
+ c_element = c_node
468
+ while c_node is not NULL:
469
+ if c_node.type == tree.XML_ELEMENT_NODE:
470
+ c_ns = c_node.nsDef
471
+ while c_ns is not NULL:
472
+ if c_ns.href is not NULL and tree.xmlStrcmp(c_href, c_ns.href) == 0:
473
+ if c_ns.prefix is NULL and is_attribute:
474
+ # for attributes, continue searching a named
475
+ # prefix, but keep the first default namespace
476
+ # declaration that we found
477
+ if c_default_ns is NULL:
478
+ c_default_ns = c_ns
479
+ elif tree.xmlSearchNs(
480
+ c_element.doc, c_element, c_ns.prefix) is c_ns:
481
+ # start node is in namespace scope => found!
482
+ return c_ns
483
+ c_ns = c_ns.next
484
+ if c_node is not c_element and c_node.ns is not NULL:
485
+ # optimise: the node may have the namespace itself
486
+ c_ns = c_node.ns
487
+ if c_ns.href is not NULL and tree.xmlStrcmp(c_href, c_ns.href) == 0:
488
+ if c_ns.prefix is NULL and is_attribute:
489
+ # for attributes, continue searching a named
490
+ # prefix, but keep the first default namespace
491
+ # declaration that we found
492
+ if c_default_ns is NULL:
493
+ c_default_ns = c_ns
494
+ elif tree.xmlSearchNs(
495
+ c_element.doc, c_element, c_ns.prefix) is c_ns:
496
+ # start node is in namespace scope => found!
497
+ return c_ns
498
+ c_node = c_node.parent
499
+ # nothing found => use a matching default namespace or fail
500
+ if c_default_ns is not NULL:
501
+ if tree.xmlSearchNs(c_element.doc, c_element, NULL) is c_default_ns:
502
+ return c_default_ns
503
+ return NULL
504
+
505
+ cdef int _replaceNodeByChildren(_Document doc, xmlNode* c_node) except -1:
506
+ # NOTE: this does not deallocate the node, just unlink it!
507
+ cdef xmlNode* c_parent
508
+ cdef xmlNode* c_child
509
+ if c_node.children is NULL:
510
+ tree.xmlUnlinkNode(c_node)
511
+ return 0
512
+
513
+ c_parent = c_node.parent
514
+ # fix parent links of children
515
+ c_child = c_node.children
516
+ while c_child is not NULL:
517
+ c_child.parent = c_parent
518
+ c_child = c_child.next
519
+
520
+ # fix namespace references of children if their parent's namespace
521
+ # declarations get lost
522
+ if c_node.nsDef is not NULL:
523
+ c_child = c_node.children
524
+ while c_child is not NULL:
525
+ moveNodeToDocument(doc, doc._c_doc, c_child)
526
+ c_child = c_child.next
527
+
528
+ # fix sibling links to/from child slice
529
+ if c_node.prev is NULL:
530
+ c_parent.children = c_node.children
531
+ else:
532
+ c_node.prev.next = c_node.children
533
+ c_node.children.prev = c_node.prev
534
+ if c_node.next is NULL:
535
+ c_parent.last = c_node.last
536
+ else:
537
+ c_node.next.prev = c_node.last
538
+ c_node.last.next = c_node.next
539
+
540
+ # unlink c_node
541
+ c_node.children = c_node.last = NULL
542
+ c_node.parent = c_node.next = c_node.prev = NULL
543
+ return 0
544
+
545
+ cdef unicode _attributeValue(xmlNode* c_element, xmlAttr* c_attrib_node):
546
+ c_href = _getNs(<xmlNode*>c_attrib_node)
547
+ value = tree.xmlGetNsProp(c_element, c_attrib_node.name, c_href)
548
+ try:
549
+ result = funicode(value)
550
+ finally:
551
+ tree.xmlFree(value)
552
+ return result
553
+
554
+ cdef unicode _attributeValueFromNsName(xmlNode* c_element,
555
+ const_xmlChar* c_href, const_xmlChar* c_name):
556
+ c_result = tree.xmlGetNsProp(c_element, c_name, c_href)
557
+ if c_result is NULL:
558
+ return None
559
+ try:
560
+ result = funicode(c_result)
561
+ finally:
562
+ tree.xmlFree(c_result)
563
+ return result
564
+
565
+ cdef object _getNodeAttributeValue(xmlNode* c_node, key, default):
566
+ ns, tag = _getNsTag(key)
567
+ c_href = <const_xmlChar*>NULL if ns is None else _xcstr(ns)
568
+ c_result = tree.xmlGetNsProp(c_node, _xcstr(tag), c_href)
569
+ if c_result is NULL:
570
+ # XXX free namespace that is not in use..?
571
+ return default
572
+ try:
573
+ result = funicode(c_result)
574
+ finally:
575
+ tree.xmlFree(c_result)
576
+ return result
577
+
578
+ cdef inline object _getAttributeValue(_Element element, key, default):
579
+ return _getNodeAttributeValue(element._c_node, key, default)
580
+
581
+ cdef int _setAttributeValue(_Element element, key, value) except -1:
582
+ cdef const_xmlChar* c_value
583
+ cdef xmlNs* c_ns
584
+ ns, tag = _getNsTag(key)
585
+ is_html = element._doc._parser._for_html
586
+ if not is_html:
587
+ _attributeValidOrRaise(tag)
588
+ c_tag = _xcstr(tag)
589
+ if value is None and is_html:
590
+ c_value = NULL
591
+ else:
592
+ if isinstance(value, QName):
593
+ value = _resolveQNameText(element, value)
594
+ else:
595
+ value = _utf8(value)
596
+ c_value = _xcstr(value)
597
+ if ns is None:
598
+ c_ns = NULL
599
+ else:
600
+ c_ns = element._doc._findOrBuildNodeNs(element._c_node, _xcstr(ns), NULL, 1)
601
+ tree.xmlSetNsProp(element._c_node, c_ns, c_tag, c_value)
602
+ return 0
603
+
604
+ cdef int _delAttribute(_Element element, key) except -1:
605
+ ns, tag = _getNsTag(key)
606
+ c_href = <const_xmlChar*>NULL if ns is None else _xcstr(ns)
607
+ if _delAttributeFromNsName(element._c_node, c_href, _xcstr(tag)):
608
+ raise KeyError, key
609
+ return 0
610
+
611
+ cdef int _delAttributeFromNsName(xmlNode* c_node, const_xmlChar* c_href, const_xmlChar* c_name) noexcept:
612
+ c_attr = tree.xmlHasNsProp(c_node, c_name, c_href)
613
+ if c_attr is NULL:
614
+ # XXX free namespace that is not in use..?
615
+ return -1
616
+ tree.xmlRemoveProp(c_attr)
617
+ return 0
618
+
619
+ cdef list _collectAttributes(xmlNode* c_node, int collecttype):
620
+ """Collect all attributes of a node in a list. Depending on collecttype,
621
+ it collects either the name (1), the value (2) or the name-value tuples.
622
+ """
623
+ cdef Py_ssize_t count
624
+ c_attr = c_node.properties
625
+ count = 0
626
+ while c_attr is not NULL:
627
+ if c_attr.type == tree.XML_ATTRIBUTE_NODE:
628
+ count += 1
629
+ c_attr = c_attr.next
630
+
631
+ if not count:
632
+ return []
633
+
634
+ attributes = [None] * count
635
+ c_attr = c_node.properties
636
+ count = 0
637
+ while c_attr is not NULL:
638
+ if c_attr.type == tree.XML_ATTRIBUTE_NODE:
639
+ if collecttype == 1:
640
+ item = _namespacedName(<xmlNode*>c_attr)
641
+ elif collecttype == 2:
642
+ item = _attributeValue(c_node, c_attr)
643
+ else:
644
+ item = (_namespacedName(<xmlNode*>c_attr),
645
+ _attributeValue(c_node, c_attr))
646
+ attributes[count] = item
647
+ count += 1
648
+ c_attr = c_attr.next
649
+ return attributes
650
+
651
+ cdef object __RE_XML_ENCODING = re.compile(
652
+ r'^(<\?xml[^>]+)\s+encoding\s*=\s*["\'][^"\']*["\'](\s*\?>|)', re.U)
653
+
654
+ cdef object __REPLACE_XML_ENCODING = __RE_XML_ENCODING.sub
655
+ cdef object __HAS_XML_ENCODING = __RE_XML_ENCODING.match
656
+
657
+ cdef object _stripEncodingDeclaration(object xml_string):
658
+ # this is a hack to remove the XML encoding declaration from unicode
659
+ return __REPLACE_XML_ENCODING(r'\g<1>\g<2>', xml_string)
660
+
661
+ cdef bint _hasEncodingDeclaration(object xml_string) except -1:
662
+ # check if a (unicode) string has an XML encoding declaration
663
+ return __HAS_XML_ENCODING(xml_string) is not None
664
+
665
+ cdef inline bint _hasText(xmlNode* c_node) noexcept:
666
+ return c_node is not NULL and _textNodeOrSkip(c_node.children) is not NULL
667
+
668
+ cdef inline bint _hasTail(xmlNode* c_node) noexcept:
669
+ return c_node is not NULL and _textNodeOrSkip(c_node.next) is not NULL
670
+
671
+ cdef inline bint _hasNonWhitespaceTail(xmlNode* c_node) except -1:
672
+ return _hasNonWhitespaceText(c_node, tail=True)
673
+
674
+ cdef bint _hasNonWhitespaceText(xmlNode* c_node, bint tail=False) except -1:
675
+ c_text_node = c_node and _textNodeOrSkip(c_node.next if tail else c_node.children)
676
+ if c_text_node is NULL:
677
+ return False
678
+ while c_text_node is not NULL:
679
+ if c_text_node.content[0] != c'\0' and not _collectText(c_text_node).isspace():
680
+ return True
681
+ c_text_node = _textNodeOrSkip(c_text_node.next)
682
+ return False
683
+
684
+ cdef unicode _collectText(xmlNode* c_node):
685
+ """Collect all text nodes and return them as a unicode string.
686
+
687
+ Start collecting at c_node.
688
+
689
+ If there was no text to collect, return None
690
+ """
691
+ cdef Py_ssize_t scount
692
+ cdef xmlChar* c_text
693
+ cdef xmlNode* c_node_cur
694
+ # check for multiple text nodes
695
+ scount = 0
696
+ c_text = NULL
697
+ c_node_cur = c_node = _textNodeOrSkip(c_node)
698
+ while c_node_cur is not NULL:
699
+ if c_node_cur.content[0] != c'\0':
700
+ c_text = c_node_cur.content
701
+ scount += 1
702
+ c_node_cur = _textNodeOrSkip(c_node_cur.next)
703
+
704
+ # handle two most common cases first
705
+ if c_text is NULL:
706
+ return '' if scount > 0 else None
707
+ if scount == 1:
708
+ return funicode(c_text)
709
+
710
+ # the rest is not performance critical anymore
711
+ result = b''
712
+ while c_node is not NULL:
713
+ result += <unsigned char*>c_node.content
714
+ c_node = _textNodeOrSkip(c_node.next)
715
+ return funicode(<const_xmlChar*><unsigned char*>result)
716
+
717
+ cdef void _removeText(xmlNode* c_node) noexcept:
718
+ """Remove all text nodes.
719
+
720
+ Start removing at c_node.
721
+ """
722
+ cdef xmlNode* c_next
723
+ c_node = _textNodeOrSkip(c_node)
724
+ while c_node is not NULL:
725
+ c_next = _textNodeOrSkip(c_node.next)
726
+ tree.xmlUnlinkNode(c_node)
727
+ tree.xmlFreeNode(c_node)
728
+ c_node = c_next
729
+
730
+ cdef xmlNode* _createTextNode(xmlDoc* doc, value) except NULL:
731
+ cdef xmlNode* c_text_node
732
+ if isinstance(value, CDATA):
733
+ c_text_node = tree.xmlNewCDataBlock(
734
+ doc, _xcstr((<CDATA>value)._utf8_data),
735
+ python.PyBytes_GET_SIZE((<CDATA>value)._utf8_data))
736
+ else:
737
+ text = _utf8(value)
738
+ c_text_node = tree.xmlNewDocText(doc, _xcstr(text))
739
+ if not c_text_node:
740
+ raise MemoryError()
741
+ return c_text_node
742
+
743
+ cdef int _setNodeText(xmlNode* c_node, value) except -1:
744
+ # remove all text nodes at the start first
745
+ _removeText(c_node.children)
746
+ if value is None:
747
+ return 0
748
+ # now add new text node with value at start
749
+ c_text_node = _createTextNode(c_node.doc, value)
750
+ if c_node.children is NULL:
751
+ tree.xmlAddChild(c_node, c_text_node)
752
+ else:
753
+ tree.xmlAddPrevSibling(c_node.children, c_text_node)
754
+ return 0
755
+
756
+ cdef int _setTailText(xmlNode* c_node, value) except -1:
757
+ # remove all text nodes at the start first
758
+ _removeText(c_node.next)
759
+ if value is None:
760
+ return 0
761
+ # now append new text node with value
762
+ c_text_node = _createTextNode(c_node.doc, value)
763
+ tree.xmlAddNextSibling(c_node, c_text_node)
764
+ return 0
765
+
766
+ cdef bytes _resolveQNameText(_Element element, value):
767
+ cdef xmlNs* c_ns
768
+ ns, tag = _getNsTag(value)
769
+ if ns is None:
770
+ return tag
771
+ else:
772
+ c_ns = element._doc._findOrBuildNodeNs(
773
+ element._c_node, _xcstr(ns), NULL, 0)
774
+ return python.PyBytes_FromFormat('%s:%s', c_ns.prefix, _cstr(tag))
775
+
776
+ cdef inline bint _hasChild(xmlNode* c_node) noexcept:
777
+ return c_node is not NULL and _findChildForwards(c_node, 0) is not NULL
778
+
779
+ cdef inline Py_ssize_t _countElements(xmlNode* c_node) noexcept:
780
+ "Counts the elements within the following siblings and the node itself."
781
+ cdef Py_ssize_t count
782
+ count = 0
783
+ while c_node is not NULL:
784
+ if _isElement(c_node):
785
+ count += 1
786
+ c_node = c_node.next
787
+ return count
788
+
789
+ cdef int _findChildSlice(
790
+ slice sliceobject, xmlNode* c_parent,
791
+ xmlNode** c_start_node, Py_ssize_t* c_step, Py_ssize_t* c_length) except -1:
792
+ """Resolve a children slice.
793
+
794
+ Returns the start node, step size and the slice length in the
795
+ pointer arguments.
796
+ """
797
+ cdef Py_ssize_t start = 0, stop = 0, childcount
798
+ childcount = _countElements(c_parent.children)
799
+ if childcount == 0:
800
+ c_start_node[0] = NULL
801
+ c_length[0] = 0
802
+ if sliceobject.step is None:
803
+ c_step[0] = 1
804
+ else:
805
+ python._PyEval_SliceIndex(sliceobject.step, c_step)
806
+ return 0
807
+ python.PySlice_GetIndicesEx(
808
+ sliceobject, childcount, &start, &stop, c_step, c_length)
809
+ if start > childcount // 2:
810
+ c_start_node[0] = _findChildBackwards(c_parent, childcount - start - 1)
811
+ else:
812
+ c_start_node[0] = _findChild(c_parent, start)
813
+ return 0
814
+
815
+ cdef bint _isFullSlice(slice sliceobject) except -1:
816
+ """Conservative guess if this slice is a full slice as in ``s[:]``.
817
+ """
818
+ cdef Py_ssize_t step = 0
819
+ if sliceobject is None:
820
+ return 0
821
+ if sliceobject.start is None and \
822
+ sliceobject.stop is None:
823
+ if sliceobject.step is None:
824
+ return 1
825
+ python._PyEval_SliceIndex(sliceobject.step, &step)
826
+ if step == 1:
827
+ return 1
828
+ return 0
829
+ return 0
830
+
831
+ cdef _collectChildren(_Element element):
832
+ cdef xmlNode* c_node
833
+ cdef list result = []
834
+ c_node = element._c_node.children
835
+ if c_node is not NULL:
836
+ if not _isElement(c_node):
837
+ c_node = _nextElement(c_node)
838
+ while c_node is not NULL:
839
+ result.append(_elementFactory(element._doc, c_node))
840
+ c_node = _nextElement(c_node)
841
+ return result
842
+
843
+ cdef inline xmlNode* _findChild(xmlNode* c_node, Py_ssize_t index) noexcept:
844
+ if index < 0:
845
+ return _findChildBackwards(c_node, -index - 1)
846
+ else:
847
+ return _findChildForwards(c_node, index)
848
+
849
+ cdef inline xmlNode* _findChildForwards(xmlNode* c_node, Py_ssize_t index) noexcept:
850
+ """Return child element of c_node with index, or return NULL if not found.
851
+ """
852
+ cdef xmlNode* c_child
853
+ cdef Py_ssize_t c
854
+ c_child = c_node.children
855
+ c = 0
856
+ while c_child is not NULL:
857
+ if _isElement(c_child):
858
+ if c == index:
859
+ return c_child
860
+ c += 1
861
+ c_child = c_child.next
862
+ return NULL
863
+
864
+ cdef inline xmlNode* _findChildBackwards(xmlNode* c_node, Py_ssize_t index) noexcept:
865
+ """Return child element of c_node with index, or return NULL if not found.
866
+ Search from the end.
867
+ """
868
+ cdef xmlNode* c_child
869
+ cdef Py_ssize_t c
870
+ c_child = c_node.last
871
+ c = 0
872
+ while c_child is not NULL:
873
+ if _isElement(c_child):
874
+ if c == index:
875
+ return c_child
876
+ c += 1
877
+ c_child = c_child.prev
878
+ return NULL
879
+
880
+ cdef inline xmlNode* _textNodeOrSkip(xmlNode* c_node) noexcept nogil:
881
+ """Return the node if it's a text node. Skip over ignorable nodes in a
882
+ series of text nodes. Return NULL if a non-ignorable node is found.
883
+
884
+ This is used to skip over XInclude nodes when collecting adjacent text
885
+ nodes.
886
+ """
887
+ while c_node is not NULL:
888
+ if c_node.type == tree.XML_TEXT_NODE or \
889
+ c_node.type == tree.XML_CDATA_SECTION_NODE:
890
+ return c_node
891
+ elif c_node.type == tree.XML_XINCLUDE_START or \
892
+ c_node.type == tree.XML_XINCLUDE_END:
893
+ c_node = c_node.next
894
+ else:
895
+ return NULL
896
+ return NULL
897
+
898
+ cdef inline xmlNode* _nextElement(xmlNode* c_node) noexcept:
899
+ """Given a node, find the next sibling that is an element.
900
+ """
901
+ if c_node is NULL:
902
+ return NULL
903
+ c_node = c_node.next
904
+ while c_node is not NULL:
905
+ if _isElement(c_node):
906
+ return c_node
907
+ c_node = c_node.next
908
+ return NULL
909
+
910
+ cdef inline xmlNode* _previousElement(xmlNode* c_node) noexcept:
911
+ """Given a node, find the next sibling that is an element.
912
+ """
913
+ if c_node is NULL:
914
+ return NULL
915
+ c_node = c_node.prev
916
+ while c_node is not NULL:
917
+ if _isElement(c_node):
918
+ return c_node
919
+ c_node = c_node.prev
920
+ return NULL
921
+
922
+ cdef inline xmlNode* _parentElement(xmlNode* c_node) noexcept:
923
+ "Given a node, find the parent element."
924
+ if c_node is NULL or not _isElement(c_node):
925
+ return NULL
926
+ c_node = c_node.parent
927
+ if c_node is NULL or not _isElement(c_node):
928
+ return NULL
929
+ return c_node
930
+
931
+ cdef inline bint _tagMatches(xmlNode* c_node, const_xmlChar* c_href, const_xmlChar* c_name) noexcept:
932
+ """Tests if the node matches namespace URI and tag name.
933
+
934
+ A node matches if it matches both c_href and c_name.
935
+
936
+ A node matches c_href if any of the following is true:
937
+ * c_href is NULL
938
+ * its namespace is NULL and c_href is the empty string
939
+ * its namespace string equals the c_href string
940
+
941
+ A node matches c_name if any of the following is true:
942
+ * c_name is NULL
943
+ * its name string equals the c_name string
944
+ """
945
+ if c_node is NULL:
946
+ return 0
947
+ if c_node.type != tree.XML_ELEMENT_NODE:
948
+ # not an element, only succeed if we match everything
949
+ return c_name is NULL and c_href is NULL
950
+ if c_name is NULL:
951
+ if c_href is NULL:
952
+ # always match
953
+ return 1
954
+ else:
955
+ c_node_href = _getNs(c_node)
956
+ if c_node_href is NULL:
957
+ return c_href[0] == c'\0'
958
+ else:
959
+ return tree.xmlStrcmp(c_node_href, c_href) == 0
960
+ elif c_href is NULL:
961
+ if _getNs(c_node) is not NULL:
962
+ return 0
963
+ return c_node.name == c_name or tree.xmlStrcmp(c_node.name, c_name) == 0
964
+ elif c_node.name == c_name or tree.xmlStrcmp(c_node.name, c_name) == 0:
965
+ c_node_href = _getNs(c_node)
966
+ if c_node_href is NULL:
967
+ return c_href[0] == c'\0'
968
+ else:
969
+ return tree.xmlStrcmp(c_node_href, c_href) == 0
970
+ else:
971
+ return 0
972
+
973
+ cdef inline bint _tagMatchesExactly(xmlNode* c_node, qname* c_qname) noexcept:
974
+ """Tests if the node matches namespace URI and tag name.
975
+
976
+ This differs from _tagMatches() in that it does not consider a
977
+ NULL value in qname.href a wildcard, and that it expects the c_name
978
+ to be taken from the doc dict, i.e. it only compares the names by
979
+ address.
980
+
981
+ A node matches if it matches both href and c_name of the qname.
982
+
983
+ A node matches c_href if any of the following is true:
984
+ * its namespace is NULL and c_href is the empty string
985
+ * its namespace string equals the c_href string
986
+
987
+ A node matches c_name if any of the following is true:
988
+ * c_name is NULL
989
+ * its name string points to the same address (!) as c_name
990
+ """
991
+ return _nsTagMatchesExactly(_getNs(c_node), c_node.name, c_qname)
992
+
993
+ cdef inline bint _nsTagMatchesExactly(const_xmlChar* c_node_href,
994
+ const_xmlChar* c_node_name,
995
+ qname* c_qname) noexcept:
996
+ """Tests if name and namespace URI match those of c_qname.
997
+
998
+ This differs from _tagMatches() in that it does not consider a
999
+ NULL value in qname.href a wildcard, and that it expects the c_name
1000
+ to be taken from the doc dict, i.e. it only compares the names by
1001
+ address.
1002
+
1003
+ A node matches if it matches both href and c_name of the qname.
1004
+
1005
+ A node matches c_href if any of the following is true:
1006
+ * its namespace is NULL and c_href is the empty string
1007
+ * its namespace string equals the c_href string
1008
+
1009
+ A node matches c_name if any of the following is true:
1010
+ * c_name is NULL
1011
+ * its name string points to the same address (!) as c_name
1012
+ """
1013
+ cdef char* c_href
1014
+ if c_qname.c_name is not NULL and c_qname.c_name is not c_node_name:
1015
+ return 0
1016
+ if c_qname.href is NULL:
1017
+ return 1
1018
+ c_href = python.__cstr(c_qname.href)
1019
+ if c_href[0] == b'\0':
1020
+ return c_node_href is NULL or c_node_href[0] == b'\0'
1021
+ elif c_node_href is NULL:
1022
+ return 0
1023
+ else:
1024
+ return tree.xmlStrcmp(<const_xmlChar*>c_href, c_node_href) == 0
1025
+
1026
+ cdef Py_ssize_t _mapTagsToQnameMatchArray(xmlDoc* c_doc, list ns_tags,
1027
+ qname* c_ns_tags, bint force_into_dict) except -1:
1028
+ """Map a sequence of (name, namespace) pairs to a qname array for efficient
1029
+ matching with _tagMatchesExactly() above.
1030
+
1031
+ Note that each qname struct in the array owns its href byte string object
1032
+ if it is not NULL.
1033
+ """
1034
+ cdef Py_ssize_t count = 0, i
1035
+ cdef bytes ns, tag
1036
+ for ns, tag in ns_tags:
1037
+ if tag is None:
1038
+ c_tag = <const_xmlChar*>NULL
1039
+ elif force_into_dict:
1040
+ c_tag = tree.xmlDictLookup(c_doc.dict, _xcstr(tag), len(tag))
1041
+ if c_tag is NULL:
1042
+ # clean up before raising the error
1043
+ for i in xrange(count):
1044
+ cpython.ref.Py_XDECREF(c_ns_tags[i].href)
1045
+ raise MemoryError()
1046
+ else:
1047
+ c_tag = tree.xmlDictExists(c_doc.dict, _xcstr(tag), len(tag))
1048
+ if c_tag is NULL:
1049
+ # not in the dict => not in the document
1050
+ continue
1051
+ c_ns_tags[count].c_name = c_tag
1052
+ if ns is None:
1053
+ c_ns_tags[count].href = NULL
1054
+ else:
1055
+ cpython.ref.Py_INCREF(ns) # keep an owned reference!
1056
+ c_ns_tags[count].href = <python.PyObject*>ns
1057
+ count += 1
1058
+ return count
1059
+
1060
+ cdef int _removeNode(_Document doc, xmlNode* c_node) except -1:
1061
+ """Unlink and free a node and subnodes if possible. Otherwise, make sure
1062
+ it's self-contained.
1063
+ """
1064
+ cdef xmlNode* c_next
1065
+ c_next = c_node.next
1066
+ tree.xmlUnlinkNode(c_node)
1067
+ _moveTail(c_next, c_node)
1068
+ if not attemptDeallocation(c_node):
1069
+ # make namespaces absolute
1070
+ moveNodeToDocument(doc, c_node.doc, c_node)
1071
+ return 0
1072
+
1073
+ cdef int _removeSiblings(xmlNode* c_element, tree.xmlElementType node_type, bint with_tail) except -1:
1074
+ cdef xmlNode* c_node
1075
+ cdef xmlNode* c_next
1076
+ c_node = c_element.next
1077
+ while c_node is not NULL:
1078
+ c_next = _nextElement(c_node)
1079
+ if c_node.type == node_type:
1080
+ if with_tail:
1081
+ _removeText(c_node.next)
1082
+ tree.xmlUnlinkNode(c_node)
1083
+ attemptDeallocation(c_node)
1084
+ c_node = c_next
1085
+ c_node = c_element.prev
1086
+ while c_node is not NULL:
1087
+ c_next = _previousElement(c_node)
1088
+ if c_node.type == node_type:
1089
+ if with_tail:
1090
+ _removeText(c_node.next)
1091
+ tree.xmlUnlinkNode(c_node)
1092
+ attemptDeallocation(c_node)
1093
+ c_node = c_next
1094
+ return 0
1095
+
1096
+ cdef void _moveTail(xmlNode* c_tail, xmlNode* c_target) noexcept:
1097
+ cdef xmlNode* c_next
1098
+ # tail support: look for any text nodes trailing this node and
1099
+ # move them too
1100
+ c_tail = _textNodeOrSkip(c_tail)
1101
+ while c_tail is not NULL:
1102
+ c_next = _textNodeOrSkip(c_tail.next)
1103
+ c_target = tree.xmlAddNextSibling(c_target, c_tail)
1104
+ c_tail = c_next
1105
+
1106
+ cdef int _copyTail(xmlNode* c_tail, xmlNode* c_target) except -1:
1107
+ cdef xmlNode* c_new_tail
1108
+ # tail copying support: look for any text nodes trailing this node and
1109
+ # copy it to the target node
1110
+ c_tail = _textNodeOrSkip(c_tail)
1111
+ while c_tail is not NULL:
1112
+ if c_target.doc is not c_tail.doc:
1113
+ c_new_tail = tree.xmlDocCopyNode(c_tail, c_target.doc, 0)
1114
+ else:
1115
+ c_new_tail = tree.xmlCopyNode(c_tail, 0)
1116
+ if c_new_tail is NULL:
1117
+ raise MemoryError()
1118
+ c_target = tree.xmlAddNextSibling(c_target, c_new_tail)
1119
+ c_tail = _textNodeOrSkip(c_tail.next)
1120
+ return 0
1121
+
1122
+ cdef int _copyNonElementSiblings(xmlNode* c_node, xmlNode* c_target) except -1:
1123
+ cdef xmlNode* c_copy
1124
+ cdef xmlNode* c_sibling = c_node
1125
+ while c_sibling.prev != NULL and \
1126
+ (c_sibling.prev.type == tree.XML_PI_NODE or
1127
+ c_sibling.prev.type == tree.XML_COMMENT_NODE or
1128
+ c_sibling.prev.type == tree.XML_DTD_NODE):
1129
+ c_sibling = c_sibling.prev
1130
+ while c_sibling != c_node:
1131
+ if c_sibling.type == tree.XML_DTD_NODE:
1132
+ c_copy = <xmlNode*>_copyDtd(<tree.xmlDtd*>c_sibling)
1133
+ if c_sibling == <xmlNode*>c_node.doc.intSubset:
1134
+ c_target.doc.intSubset = <tree.xmlDtd*>c_copy
1135
+ else: # c_sibling == c_node.doc.extSubset
1136
+ c_target.doc.extSubset = <tree.xmlDtd*>c_copy
1137
+ else:
1138
+ c_copy = tree.xmlDocCopyNode(c_sibling, c_target.doc, 1)
1139
+ if c_copy is NULL:
1140
+ raise MemoryError()
1141
+ tree.xmlAddPrevSibling(c_target, c_copy)
1142
+ c_sibling = c_sibling.next
1143
+ while c_sibling.next != NULL and \
1144
+ (c_sibling.next.type == tree.XML_PI_NODE or
1145
+ c_sibling.next.type == tree.XML_COMMENT_NODE):
1146
+ c_sibling = c_sibling.next
1147
+ c_copy = tree.xmlDocCopyNode(c_sibling, c_target.doc, 1)
1148
+ if c_copy is NULL:
1149
+ raise MemoryError()
1150
+ tree.xmlAddNextSibling(c_target, c_copy)
1151
+
1152
+ cdef int _deleteSlice(_Document doc, xmlNode* c_node,
1153
+ Py_ssize_t count, Py_ssize_t step) except -1:
1154
+ """Delete slice, ``count`` items starting with ``c_node`` with a step
1155
+ width of ``step``.
1156
+ """
1157
+ cdef xmlNode* c_next
1158
+ cdef Py_ssize_t c, i
1159
+ cdef _node_to_node_function next_element
1160
+ if c_node is NULL:
1161
+ return 0
1162
+ if step > 0:
1163
+ next_element = _nextElement
1164
+ else:
1165
+ step = -step
1166
+ next_element = _previousElement
1167
+ # now start deleting nodes
1168
+ c = 0
1169
+ c_next = c_node
1170
+ while c_node is not NULL and c < count:
1171
+ for i in range(step):
1172
+ c_next = next_element(c_next)
1173
+ if c_next is NULL:
1174
+ break
1175
+ _removeNode(doc, c_node)
1176
+ c += 1
1177
+ c_node = c_next
1178
+ return 0
1179
+
1180
+ cdef int _replaceSlice(_Element parent, xmlNode* c_node,
1181
+ Py_ssize_t slicelength, Py_ssize_t step,
1182
+ bint left_to_right, elements) except -1:
1183
+ """Replace the slice of ``count`` elements starting at ``c_node`` with
1184
+ positive step width ``step`` by the Elements in ``elements``. The
1185
+ direction is given by the boolean argument ``left_to_right``.
1186
+
1187
+ ``c_node`` may be NULL to indicate the end of the children list.
1188
+ """
1189
+ cdef xmlNode* c_orig_neighbour
1190
+ cdef xmlNode* c_next
1191
+ cdef xmlDoc* c_source_doc
1192
+ cdef _Element element
1193
+ cdef Py_ssize_t seqlength, i, c
1194
+ cdef _node_to_node_function next_element
1195
+ assert step > 0
1196
+ if left_to_right:
1197
+ next_element = _nextElement
1198
+ else:
1199
+ next_element = _previousElement
1200
+
1201
+ if not isinstance(elements, (list, tuple)):
1202
+ elements = list(elements)
1203
+
1204
+ if step != 1 or not left_to_right:
1205
+ # *replacing* children stepwise with list => check size!
1206
+ seqlength = len(elements)
1207
+ if seqlength != slicelength:
1208
+ raise ValueError, f"attempt to assign sequence of size {seqlength} " \
1209
+ f"to extended slice of size {slicelength}"
1210
+
1211
+ if c_node is NULL:
1212
+ # no children yet => add all elements straight away
1213
+ if left_to_right:
1214
+ for element in elements:
1215
+ assert element is not None, "Node must not be None"
1216
+ _appendChild(parent, element)
1217
+ else:
1218
+ for element in elements:
1219
+ assert element is not None, "Node must not be None"
1220
+ _prependChild(parent, element)
1221
+ return 0
1222
+
1223
+ # remove the elements first as some might be re-added
1224
+ if left_to_right:
1225
+ # L->R, remember left neighbour
1226
+ c_orig_neighbour = _previousElement(c_node)
1227
+ else:
1228
+ # R->L, remember right neighbour
1229
+ c_orig_neighbour = _nextElement(c_node)
1230
+
1231
+ # We remove the original slice elements one by one. Since we hold
1232
+ # a Python reference to all elements that we will insert, it is
1233
+ # safe to let _removeNode() try (and fail) to free them even if
1234
+ # the element itself or one of its descendents will be reinserted.
1235
+ c = 0
1236
+ c_next = c_node
1237
+ while c_node is not NULL and c < slicelength:
1238
+ for i in range(step):
1239
+ c_next = next_element(c_next)
1240
+ if c_next is NULL:
1241
+ break
1242
+ _removeNode(parent._doc, c_node)
1243
+ c += 1
1244
+ c_node = c_next
1245
+
1246
+ # make sure each element is inserted only once
1247
+ elements = iter(elements)
1248
+
1249
+ # find the first node right of the new insertion point
1250
+ if left_to_right:
1251
+ if c_orig_neighbour is not NULL:
1252
+ c_node = next_element(c_orig_neighbour)
1253
+ else:
1254
+ # before the first element
1255
+ c_node = _findChildForwards(parent._c_node, 0)
1256
+ elif c_orig_neighbour is NULL:
1257
+ # at the end, but reversed stepping
1258
+ # append one element and go to the next insertion point
1259
+ for element in elements:
1260
+ assert element is not None, "Node must not be None"
1261
+ _appendChild(parent, element)
1262
+ c_node = element._c_node
1263
+ if slicelength > 0:
1264
+ slicelength -= 1
1265
+ for i in range(1, step):
1266
+ c_node = next_element(c_node)
1267
+ if c_node is NULL:
1268
+ break
1269
+ break
1270
+ else:
1271
+ c_node = c_orig_neighbour
1272
+
1273
+ if left_to_right:
1274
+ # adjust step size after removing slice as we are not stepping
1275
+ # over the newly inserted elements
1276
+ step -= 1
1277
+
1278
+ # now insert elements where we removed them
1279
+ if c_node is not NULL:
1280
+ for element in elements:
1281
+ assert element is not None, "Node must not be None"
1282
+ _assertValidNode(element)
1283
+ # move element and tail over
1284
+ c_source_doc = element._c_node.doc
1285
+ c_next = element._c_node.next
1286
+ tree.xmlAddPrevSibling(c_node, element._c_node)
1287
+ _moveTail(c_next, element._c_node)
1288
+
1289
+ # integrate element into new document
1290
+ moveNodeToDocument(parent._doc, c_source_doc, element._c_node)
1291
+
1292
+ # stop at the end of the slice
1293
+ if slicelength > 0:
1294
+ slicelength -= 1
1295
+ for i in range(step):
1296
+ c_node = next_element(c_node)
1297
+ if c_node is NULL:
1298
+ break
1299
+ if c_node is NULL:
1300
+ break
1301
+ else:
1302
+ # everything inserted
1303
+ return 0
1304
+
1305
+ # append the remaining elements at the respective end
1306
+ if left_to_right:
1307
+ for element in elements:
1308
+ assert element is not None, "Node must not be None"
1309
+ _assertValidNode(element)
1310
+ _appendChild(parent, element)
1311
+ else:
1312
+ for element in elements:
1313
+ assert element is not None, "Node must not be None"
1314
+ _assertValidNode(element)
1315
+ _prependChild(parent, element)
1316
+
1317
+ return 0
1318
+
1319
+
1320
+ cdef int _linkChild(xmlNode* c_parent, xmlNode* c_node) except -1:
1321
+ """Adaptation of 'xmlAddChild()' that deep-fix the document links iteratively.
1322
+ """
1323
+ assert _isElement(c_node)
1324
+ c_node.parent = c_parent
1325
+ if c_parent.children is NULL:
1326
+ c_parent.children = c_parent.last = c_node
1327
+ else:
1328
+ c_node.prev = c_parent.last
1329
+ c_parent.last.next = c_node
1330
+ c_parent.last = c_node
1331
+
1332
+ _setTreeDoc(c_node, c_parent.doc)
1333
+ return 0
1334
+
1335
+
1336
+ cdef int _appendChild(_Element parent, _Element child) except -1:
1337
+ """Append a new child to a parent element.
1338
+ """
1339
+ c_node = child._c_node
1340
+ c_source_doc = c_node.doc
1341
+ # prevent cycles
1342
+ if _isAncestorOrSame(c_node, parent._c_node):
1343
+ raise ValueError("cannot append parent to itself")
1344
+ # store possible text node
1345
+ c_next = c_node.next
1346
+ # move node itself
1347
+ tree.xmlUnlinkNode(c_node)
1348
+ # do not call xmlAddChild() here since it would deep-traverse the tree
1349
+ _linkChild(parent._c_node, c_node)
1350
+ _moveTail(c_next, c_node)
1351
+ # uh oh, elements may be pointing to different doc when
1352
+ # parent element has moved; change them too..
1353
+ moveNodeToDocument(parent._doc, c_source_doc, c_node)
1354
+ return 0
1355
+
1356
+ cdef int _prependChild(_Element parent, _Element child) except -1:
1357
+ """Prepend a new child to a parent element.
1358
+ """
1359
+ c_node = child._c_node
1360
+ c_source_doc = c_node.doc
1361
+ # prevent cycles
1362
+ if _isAncestorOrSame(c_node, parent._c_node):
1363
+ raise ValueError("cannot append parent to itself")
1364
+ # store possible text node
1365
+ c_next = c_node.next
1366
+ # move node itself
1367
+ c_child = _findChildForwards(parent._c_node, 0)
1368
+ if c_child is NULL:
1369
+ tree.xmlUnlinkNode(c_node)
1370
+ # do not call xmlAddChild() here since it would deep-traverse the tree
1371
+ _linkChild(parent._c_node, c_node)
1372
+ else:
1373
+ tree.xmlAddPrevSibling(c_child, c_node)
1374
+ _moveTail(c_next, c_node)
1375
+ # uh oh, elements may be pointing to different doc when
1376
+ # parent element has moved; change them too..
1377
+ moveNodeToDocument(parent._doc, c_source_doc, c_node)
1378
+ return 0
1379
+
1380
+ cdef int _appendSibling(_Element element, _Element sibling) except -1:
1381
+ """Add a new sibling behind an element.
1382
+ """
1383
+ return _addSibling(element, sibling, as_next=True)
1384
+
1385
+ cdef int _prependSibling(_Element element, _Element sibling) except -1:
1386
+ """Add a new sibling before an element.
1387
+ """
1388
+ return _addSibling(element, sibling, as_next=False)
1389
+
1390
+ cdef int _addSibling(_Element element, _Element sibling, bint as_next) except -1:
1391
+ c_node = sibling._c_node
1392
+ c_source_doc = c_node.doc
1393
+ # prevent cycles
1394
+ if _isAncestorOrSame(c_node, element._c_node):
1395
+ if element._c_node is c_node:
1396
+ return 0 # nothing to do
1397
+ raise ValueError("cannot add ancestor as sibling, please break cycle first")
1398
+ # store possible text node
1399
+ c_next = c_node.next
1400
+ # move node itself
1401
+ if as_next:
1402
+ # must insert after any tail text
1403
+ c_next_node = _nextElement(element._c_node)
1404
+ if c_next_node is NULL:
1405
+ c_next_node = element._c_node
1406
+ while c_next_node.next:
1407
+ c_next_node = c_next_node.next
1408
+ tree.xmlAddNextSibling(c_next_node, c_node)
1409
+ else:
1410
+ tree.xmlAddPrevSibling(c_next_node, c_node)
1411
+ else:
1412
+ tree.xmlAddPrevSibling(element._c_node, c_node)
1413
+ _moveTail(c_next, c_node)
1414
+ # uh oh, elements may be pointing to different doc when
1415
+ # parent element has moved; change them too..
1416
+ moveNodeToDocument(element._doc, c_source_doc, c_node)
1417
+ return 0
1418
+
1419
+ cdef inline bint isutf8(const_xmlChar* s) noexcept:
1420
+ cdef xmlChar c = s[0]
1421
+ while c != c'\0':
1422
+ if c & 0x80:
1423
+ return True
1424
+ s += 1
1425
+ c = s[0]
1426
+ return False
1427
+
1428
+ cdef bint isutf8l(const_xmlChar* s, size_t length) noexcept:
1429
+ """
1430
+ Search for non-ASCII characters in the string, knowing its length in advance.
1431
+ """
1432
+ cdef unsigned int i
1433
+ cdef unsigned long non_ascii_mask
1434
+ cdef const unsigned long *lptr = <const unsigned long*> s
1435
+
1436
+ cdef const unsigned long *end = lptr + length // sizeof(unsigned long)
1437
+ if length >= sizeof(non_ascii_mask):
1438
+ # Build constant 0x80808080... mask (and let the C compiler fold it).
1439
+ non_ascii_mask = 0
1440
+ for i in range(sizeof(non_ascii_mask) // 2):
1441
+ non_ascii_mask = (non_ascii_mask << 16) | 0x8080
1442
+
1443
+ # Advance to long-aligned character before we start reading longs.
1444
+ while (<size_t>s) % sizeof(unsigned long) and s < <const_xmlChar *>end:
1445
+ if s[0] & 0x80:
1446
+ return True
1447
+ s += 1
1448
+
1449
+ # Read one long at a time
1450
+ lptr = <const unsigned long*> s
1451
+ while lptr < end:
1452
+ if lptr[0] & non_ascii_mask:
1453
+ return True
1454
+ lptr += 1
1455
+ s = <const_xmlChar *>lptr
1456
+
1457
+ while s < (<const_xmlChar *>end + length % sizeof(unsigned long)):
1458
+ if s[0] & 0x80:
1459
+ return True
1460
+ s += 1
1461
+
1462
+ return False
1463
+
1464
+ cdef int _is_valid_xml_ascii(bytes pystring) except -1:
1465
+ """Check if a string is XML ascii content."""
1466
+ cdef signed char ch
1467
+ # When ch is a *signed* char, non-ascii characters are negative integers
1468
+ # and xmlIsChar_ch does not accept them.
1469
+ for ch in pystring:
1470
+ if not tree.xmlIsChar_ch(ch):
1471
+ return 0
1472
+ return 1
1473
+
1474
+ cdef bint _is_valid_xml_utf8(bytes pystring) except -1:
1475
+ """Check if a string is like valid UTF-8 XML content."""
1476
+ cdef const_xmlChar* s = _xcstr(pystring)
1477
+ cdef const_xmlChar* c_end = s + len(pystring)
1478
+ cdef unsigned long next3 = 0
1479
+ if s < c_end - 2:
1480
+ next3 = (s[0] << 8) | (s[1])
1481
+
1482
+ while s < c_end - 2:
1483
+ next3 = 0x00ffffff & ((next3 << 8) | s[2])
1484
+ if s[0] & 0x80:
1485
+ # 0xefbfbe and 0xefbfbf are utf-8 encodings of
1486
+ # forbidden characters \ufffe and \uffff
1487
+ if next3 == 0x00efbfbe or next3 == 0x00efbfbf:
1488
+ return 0
1489
+ # 0xeda080 and 0xedbfbf are utf-8 encodings of
1490
+ # \ud800 and \udfff. Anything between them (inclusive)
1491
+ # is forbidden, because they are surrogate blocks in utf-16.
1492
+ if 0x00eda080 <= next3 <= 0x00edbfbf:
1493
+ return 0
1494
+ elif not tree.xmlIsChar_ch(s[0]):
1495
+ return 0 # invalid ascii char
1496
+ s += 1
1497
+
1498
+ while s < c_end:
1499
+ if not s[0] & 0x80 and not tree.xmlIsChar_ch(s[0]):
1500
+ return 0 # invalid ascii char
1501
+ s += 1
1502
+
1503
+ return 1
1504
+
1505
+ cdef inline unicode funicodeOrNone(const_xmlChar* s):
1506
+ return funicode(s) if s is not NULL else None
1507
+
1508
+ cdef inline unicode funicodeOrEmpty(const_xmlChar* s):
1509
+ return funicode(s) if s is not NULL else ''
1510
+
1511
+ cdef unicode funicode(const_xmlChar* s):
1512
+ return s.decode('UTF-8')
1513
+
1514
+ cdef bytes _utf8(object s):
1515
+ """Test if a string is valid user input and encode it to UTF-8.
1516
+ Reject all bytes/unicode input that contains non-XML characters.
1517
+ Reject all bytes input that contains non-ASCII characters.
1518
+ """
1519
+ cdef int valid
1520
+ cdef bytes utf8_string
1521
+ if isinstance(s, unicode):
1522
+ utf8_string = (<unicode>s).encode('utf8')
1523
+ valid = _is_valid_xml_utf8(utf8_string)
1524
+ elif isinstance(s, (bytes, bytearray)):
1525
+ utf8_string = s if type(s) is bytes else bytes(s)
1526
+ valid = _is_valid_xml_ascii(utf8_string)
1527
+ else:
1528
+ raise TypeError("Argument must be bytes or unicode, got '%.200s'" % type(s).__name__)
1529
+ if not valid:
1530
+ raise ValueError(
1531
+ "All strings must be XML compatible: Unicode or ASCII, no NULL bytes or control characters")
1532
+ return utf8_string
1533
+
1534
+
1535
+ cdef bytes _utf8orNone(object s):
1536
+ return _utf8(s) if s is not None else None
1537
+
1538
+
1539
+ cdef enum:
1540
+ NO_FILE_PATH = 0
1541
+ ABS_UNIX_FILE_PATH = 1
1542
+ ABS_WIN_FILE_PATH = 2
1543
+ REL_FILE_PATH = 3
1544
+
1545
+
1546
+ cdef bint _isFilePath(const_xmlChar* c_path) noexcept:
1547
+ "simple heuristic to see if a path is a filename"
1548
+ cdef xmlChar c
1549
+ # test if it looks like an absolute Unix path or a Windows network path
1550
+ if c_path[0] == c'/':
1551
+ return ABS_UNIX_FILE_PATH
1552
+
1553
+ # test if it looks like an absolute Windows path or URL
1554
+ if c'a' <= c_path[0] <= c'z' or c'A' <= c_path[0] <= c'Z':
1555
+ c_path += 1
1556
+ if c_path[0] == c':' and c_path[1] in b'\0\\':
1557
+ return ABS_WIN_FILE_PATH # C: or C:\...
1558
+
1559
+ # test if it looks like a URL with scheme://
1560
+ while c'a' <= c_path[0] <= c'z' or c'A' <= c_path[0] <= c'Z':
1561
+ c_path += 1
1562
+ if c_path[0] == c':' and c_path[1] == c'/' and c_path[2] == c'/':
1563
+ return NO_FILE_PATH
1564
+
1565
+ # assume it's a relative path
1566
+ return REL_FILE_PATH
1567
+
1568
+
1569
+ cdef object _getFSPathOrObject(object obj):
1570
+ """
1571
+ Get the __fspath__ attribute of an object if it exists.
1572
+ Otherwise, the original object is returned.
1573
+ """
1574
+ if _isString(obj):
1575
+ return obj
1576
+ try:
1577
+ return python.PyOS_FSPath(obj)
1578
+ except TypeError:
1579
+ return obj
1580
+
1581
+
1582
+ cdef object _encodeFilename(object filename):
1583
+ """Make sure a filename is 8-bit encoded (or None).
1584
+ """
1585
+ if filename is None:
1586
+ return None
1587
+ elif isinstance(filename, bytes):
1588
+ return filename
1589
+ elif isinstance(filename, unicode):
1590
+ filename8 = (<unicode>filename).encode('utf8')
1591
+ if _isFilePath(<unsigned char*>filename8):
1592
+ try:
1593
+ return python.PyUnicode_AsEncodedString(
1594
+ filename, _C_FILENAME_ENCODING, NULL)
1595
+ except UnicodeEncodeError:
1596
+ pass
1597
+ return filename8
1598
+ else:
1599
+ raise TypeError("Argument must be string or unicode.")
1600
+
1601
+ cdef object _decodeFilename(const_xmlChar* c_path):
1602
+ """Make the filename a unicode string if we are in Py3.
1603
+ """
1604
+ return _decodeFilenameWithLength(c_path, tree.xmlStrlen(c_path))
1605
+
1606
+ cdef object _decodeFilenameWithLength(const_xmlChar* c_path, size_t c_len):
1607
+ """Make the filename a unicode string if we are in Py3.
1608
+ """
1609
+ if _isFilePath(c_path):
1610
+ try:
1611
+ return python.PyUnicode_Decode(
1612
+ <const_char*>c_path, c_len, _C_FILENAME_ENCODING, NULL)
1613
+ except UnicodeDecodeError:
1614
+ pass
1615
+ try:
1616
+ return (<unsigned char*>c_path)[:c_len].decode('UTF-8')
1617
+ except UnicodeDecodeError:
1618
+ # this is a stupid fallback, but it might still work...
1619
+ return (<unsigned char*>c_path)[:c_len].decode('latin-1', 'replace')
1620
+
1621
+ cdef object _encodeFilenameUTF8(object filename):
1622
+ """Recode filename as UTF-8. Tries ASCII, local filesystem encoding and
1623
+ UTF-8 as source encoding.
1624
+ """
1625
+ cdef char* c_filename
1626
+ if filename is None:
1627
+ return None
1628
+ elif isinstance(filename, bytes):
1629
+ if not isutf8l(<bytes>filename, len(<bytes>filename)):
1630
+ # plain ASCII!
1631
+ return filename
1632
+ c_filename = _cstr(<bytes>filename)
1633
+ try:
1634
+ # try to decode with default encoding
1635
+ filename = python.PyUnicode_Decode(
1636
+ c_filename, len(<bytes>filename),
1637
+ _C_FILENAME_ENCODING, NULL)
1638
+ except UnicodeDecodeError as decode_exc:
1639
+ try:
1640
+ # try if it's proper UTF-8
1641
+ (<bytes>filename).decode('utf8')
1642
+ return filename
1643
+ except UnicodeDecodeError:
1644
+ raise decode_exc # otherwise re-raise original exception
1645
+ if isinstance(filename, unicode):
1646
+ return (<unicode>filename).encode('utf8')
1647
+ else:
1648
+ raise TypeError("Argument must be string or unicode.")
1649
+
1650
+ cdef tuple _getNsTag(tag):
1651
+ """Given a tag, find namespace URI and tag name.
1652
+ Return None for NS uri if no namespace URI provided.
1653
+ """
1654
+ return __getNsTag(tag, 0)
1655
+
1656
+ cdef tuple _getNsTagWithEmptyNs(tag):
1657
+ """Given a tag, find namespace URI and tag name. Return None for NS uri
1658
+ if no namespace URI provided, or the empty string if namespace
1659
+ part is '{}'.
1660
+ """
1661
+ return __getNsTag(tag, 1)
1662
+
1663
+ cdef tuple __getNsTag(tag, bint empty_ns):
1664
+ cdef char* c_tag
1665
+ cdef char* c_ns_end
1666
+ cdef Py_ssize_t taglen
1667
+ cdef Py_ssize_t nslen
1668
+ cdef bytes ns = None
1669
+ # _isString() is much faster than isinstance()
1670
+ if not _isString(tag) and isinstance(tag, QName):
1671
+ tag = (<QName>tag).text
1672
+ tag = _utf8(tag)
1673
+ c_tag = _cstr(tag)
1674
+ if c_tag[0] == c'{':
1675
+ c_tag += 1
1676
+ c_ns_end = cstring_h.strchr(c_tag, c'}')
1677
+ if c_ns_end is NULL:
1678
+ raise ValueError, "Invalid tag name"
1679
+ nslen = c_ns_end - c_tag
1680
+ taglen = python.PyBytes_GET_SIZE(tag) - nslen - 2
1681
+ if taglen == 0:
1682
+ raise ValueError, "Empty tag name"
1683
+ if nslen > 0:
1684
+ ns = <bytes>c_tag[:nslen]
1685
+ elif empty_ns:
1686
+ ns = b''
1687
+ tag = <bytes>c_ns_end[1:taglen+1]
1688
+ elif python.PyBytes_GET_SIZE(tag) == 0:
1689
+ raise ValueError, "Empty tag name"
1690
+ return ns, tag
1691
+
1692
+ cdef inline int _pyXmlNameIsValid(name_utf8):
1693
+ return _xmlNameIsValid(_xcstr(name_utf8)) and b':' not in name_utf8
1694
+
1695
+ cdef inline int _pyHtmlNameIsValid(name_utf8):
1696
+ return _htmlNameIsValid(_xcstr(name_utf8))
1697
+
1698
+ cdef inline int _xmlNameIsValid(const_xmlChar* c_name) noexcept:
1699
+ return tree.xmlValidateNameValue(c_name)
1700
+
1701
+ cdef int _htmlNameIsValid(const_xmlChar* c_name) noexcept:
1702
+ if c_name is NULL or c_name[0] == c'\0':
1703
+ return 0
1704
+ while c_name[0] != c'\0':
1705
+ if c_name[0] in b'&<>/"\'\t\n\x0B\x0C\r ':
1706
+ return 0
1707
+ c_name += 1
1708
+ return 1
1709
+
1710
+ cdef bint _characterReferenceIsValid(const_xmlChar* c_name) noexcept:
1711
+ cdef bint is_hex
1712
+ if c_name[0] == c'x':
1713
+ c_name += 1
1714
+ is_hex = 1
1715
+ else:
1716
+ is_hex = 0
1717
+ if c_name[0] == c'\0':
1718
+ return 0
1719
+ while c_name[0] != c'\0':
1720
+ if c_name[0] < c'0' or c_name[0] > c'9':
1721
+ if not is_hex:
1722
+ return 0
1723
+ if not (c'a' <= c_name[0] <= c'f'):
1724
+ if not (c'A' <= c_name[0] <= c'F'):
1725
+ return 0
1726
+ c_name += 1
1727
+ return 1
1728
+
1729
+ cdef int _tagValidOrRaise(tag_utf) except -1:
1730
+ if not _pyXmlNameIsValid(tag_utf):
1731
+ raise ValueError(f"Invalid tag name {(<bytes>tag_utf).decode('utf8')!r}")
1732
+ return 0
1733
+
1734
+ cdef int _htmlTagValidOrRaise(tag_utf) except -1:
1735
+ if not _pyHtmlNameIsValid(tag_utf):
1736
+ raise ValueError(f"Invalid HTML tag name {(<bytes>tag_utf).decode('utf8')!r}")
1737
+ return 0
1738
+
1739
+ cdef int _attributeValidOrRaise(name_utf) except -1:
1740
+ if not _pyXmlNameIsValid(name_utf):
1741
+ raise ValueError(f"Invalid attribute name {(<bytes>name_utf).decode('utf8')!r}")
1742
+ return 0
1743
+
1744
+ cdef int _prefixValidOrRaise(tag_utf) except -1:
1745
+ if not _pyXmlNameIsValid(tag_utf):
1746
+ raise ValueError(f"Invalid namespace prefix {(<bytes>tag_utf).decode('utf8')!r}")
1747
+ return 0
1748
+
1749
+ cdef int _uriValidOrRaise(uri_utf) except -1:
1750
+ cdef uri.xmlURI* c_uri = uri.xmlParseURI(_cstr(uri_utf))
1751
+ if c_uri is NULL:
1752
+ raise ValueError(f"Invalid namespace URI {(<bytes>uri_utf).decode('utf8')!r}")
1753
+ uri.xmlFreeURI(c_uri)
1754
+ return 0
1755
+
1756
+ cdef inline unicode _namespacedName(xmlNode* c_node):
1757
+ return _namespacedNameFromNsName(_getNs(c_node), c_node.name)
1758
+
1759
+
1760
+ cdef unicode _namespacedNameFromNsName(const_xmlChar* c_href, const_xmlChar* c_name):
1761
+ name = funicode(c_name)
1762
+ if c_href is NULL:
1763
+ return name
1764
+ href = funicode(c_href)
1765
+ return f"{{{href}}}{name}"
1766
+
1767
+
1768
+ cdef _getFilenameForFile(source):
1769
+ """Given a Python File or Gzip object, give filename back.
1770
+
1771
+ Returns None if not a file object.
1772
+ """
1773
+ # urllib2 provides a geturl() method
1774
+ try:
1775
+ return source.geturl()
1776
+ except:
1777
+ pass
1778
+ # file instances have a name attribute
1779
+ try:
1780
+ filename = source.name
1781
+ if _isString(filename):
1782
+ return os_path_abspath(filename)
1783
+ except:
1784
+ pass
1785
+ # gzip file instances have a filename attribute (before Py3k)
1786
+ try:
1787
+ filename = source.filename
1788
+ if _isString(filename):
1789
+ return os_path_abspath(filename)
1790
+ except:
1791
+ pass
1792
+ # can't determine filename
1793
+ return None
llmeval-env/lib/python3.10/site-packages/lxml/classlookup.pxi ADDED
@@ -0,0 +1,580 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Configurable Element class lookup
2
+
3
+ ################################################################################
4
+ # Custom Element classes
5
+
6
+ cdef public class ElementBase(_Element) [ type LxmlElementBaseType,
7
+ object LxmlElementBase ]:
8
+ """ElementBase(*children, attrib=None, nsmap=None, **_extra)
9
+
10
+ The public Element class. All custom Element classes must inherit
11
+ from this one. To create an Element, use the `Element()` factory.
12
+
13
+ BIG FAT WARNING: Subclasses *must not* override __init__ or
14
+ __new__ as it is absolutely undefined when these objects will be
15
+ created or destroyed. All persistent state of Elements must be
16
+ stored in the underlying XML. If you really need to initialize
17
+ the object after creation, you can implement an ``_init(self)``
18
+ method that will be called directly after object creation.
19
+
20
+ Subclasses of this class can be instantiated to create a new
21
+ Element. By default, the tag name will be the class name and the
22
+ namespace will be empty. You can modify this with the following
23
+ class attributes:
24
+
25
+ * TAG - the tag name, possibly containing a namespace in Clark
26
+ notation
27
+
28
+ * NAMESPACE - the default namespace URI, unless provided as part
29
+ of the TAG attribute.
30
+
31
+ * HTML - flag if the class is an HTML tag, as opposed to an XML
32
+ tag. This only applies to un-namespaced tags and defaults to
33
+ false (i.e. XML).
34
+
35
+ * PARSER - the parser that provides the configuration for the
36
+ newly created document. Providing an HTML parser here will
37
+ default to creating an HTML element.
38
+
39
+ In user code, the latter three are commonly inherited in class
40
+ hierarchies that implement a common namespace.
41
+ """
42
+ def __init__(self, *children, attrib=None, nsmap=None, **_extra):
43
+ """ElementBase(*children, attrib=None, nsmap=None, **_extra)
44
+ """
45
+ cdef bint is_html = 0
46
+ cdef _BaseParser parser
47
+ cdef _Element last_child
48
+ # don't use normal attribute access as it might be overridden
49
+ _getattr = object.__getattribute__
50
+ try:
51
+ namespace = _utf8(_getattr(self, 'NAMESPACE'))
52
+ except AttributeError:
53
+ namespace = None
54
+ try:
55
+ ns, tag = _getNsTag(_getattr(self, 'TAG'))
56
+ if ns is not None:
57
+ namespace = ns
58
+ except AttributeError:
59
+ tag = _utf8(_getattr(_getattr(self, '__class__'), '__name__'))
60
+ if b'.' in tag:
61
+ tag = tag.split(b'.')[-1]
62
+ try:
63
+ parser = _getattr(self, 'PARSER')
64
+ except AttributeError:
65
+ parser = None
66
+ for child in children:
67
+ if isinstance(child, _Element):
68
+ parser = (<_Element>child)._doc._parser
69
+ break
70
+ if isinstance(parser, HTMLParser):
71
+ is_html = 1
72
+ if namespace is None:
73
+ try:
74
+ is_html = _getattr(self, 'HTML')
75
+ except AttributeError:
76
+ pass
77
+ _initNewElement(self, is_html, tag, namespace, parser,
78
+ attrib, nsmap, _extra)
79
+ last_child = None
80
+ for child in children:
81
+ if _isString(child):
82
+ if last_child is None:
83
+ _setNodeText(self._c_node,
84
+ (_collectText(self._c_node.children) or '') + child)
85
+ else:
86
+ _setTailText(last_child._c_node,
87
+ (_collectText(last_child._c_node.next) or '') + child)
88
+ elif isinstance(child, _Element):
89
+ last_child = child
90
+ _appendChild(self, last_child)
91
+ elif isinstance(child, type) and issubclass(child, ElementBase):
92
+ last_child = child()
93
+ _appendChild(self, last_child)
94
+ else:
95
+ raise TypeError, f"Invalid child type: {type(child)!r}"
96
+
97
+ cdef class CommentBase(_Comment):
98
+ """All custom Comment classes must inherit from this one.
99
+
100
+ To create an XML Comment instance, use the ``Comment()`` factory.
101
+
102
+ Subclasses *must not* override __init__ or __new__ as it is
103
+ absolutely undefined when these objects will be created or
104
+ destroyed. All persistent state of Comments must be stored in the
105
+ underlying XML. If you really need to initialize the object after
106
+ creation, you can implement an ``_init(self)`` method that will be
107
+ called after object creation.
108
+ """
109
+ def __init__(self, text):
110
+ # copied from Comment() factory
111
+ cdef _Document doc
112
+ cdef xmlDoc* c_doc
113
+ if text is None:
114
+ text = b''
115
+ else:
116
+ text = _utf8(text)
117
+ c_doc = _newXMLDoc()
118
+ doc = _documentFactory(c_doc, None)
119
+ self._c_node = _createComment(c_doc, _xcstr(text))
120
+ if self._c_node is NULL:
121
+ raise MemoryError()
122
+ tree.xmlAddChild(<xmlNode*>c_doc, self._c_node)
123
+ _registerProxy(self, doc, self._c_node)
124
+ self._init()
125
+
126
+ cdef class PIBase(_ProcessingInstruction):
127
+ """All custom Processing Instruction classes must inherit from this one.
128
+
129
+ To create an XML ProcessingInstruction instance, use the ``PI()``
130
+ factory.
131
+
132
+ Subclasses *must not* override __init__ or __new__ as it is
133
+ absolutely undefined when these objects will be created or
134
+ destroyed. All persistent state of PIs must be stored in the
135
+ underlying XML. If you really need to initialize the object after
136
+ creation, you can implement an ``_init(self)`` method that will be
137
+ called after object creation.
138
+ """
139
+ def __init__(self, target, text=None):
140
+ # copied from PI() factory
141
+ cdef _Document doc
142
+ cdef xmlDoc* c_doc
143
+ target = _utf8(target)
144
+ if text is None:
145
+ text = b''
146
+ else:
147
+ text = _utf8(text)
148
+ c_doc = _newXMLDoc()
149
+ doc = _documentFactory(c_doc, None)
150
+ self._c_node = _createPI(c_doc, _xcstr(target), _xcstr(text))
151
+ if self._c_node is NULL:
152
+ raise MemoryError()
153
+ tree.xmlAddChild(<xmlNode*>c_doc, self._c_node)
154
+ _registerProxy(self, doc, self._c_node)
155
+ self._init()
156
+
157
+ cdef class EntityBase(_Entity):
158
+ """All custom Entity classes must inherit from this one.
159
+
160
+ To create an XML Entity instance, use the ``Entity()`` factory.
161
+
162
+ Subclasses *must not* override __init__ or __new__ as it is
163
+ absolutely undefined when these objects will be created or
164
+ destroyed. All persistent state of Entities must be stored in the
165
+ underlying XML. If you really need to initialize the object after
166
+ creation, you can implement an ``_init(self)`` method that will be
167
+ called after object creation.
168
+ """
169
+ def __init__(self, name):
170
+ cdef _Document doc
171
+ cdef xmlDoc* c_doc
172
+ name_utf = _utf8(name)
173
+ c_name = _xcstr(name_utf)
174
+ if c_name[0] == c'#':
175
+ if not _characterReferenceIsValid(c_name + 1):
176
+ raise ValueError, f"Invalid character reference: '{name}'"
177
+ elif not _xmlNameIsValid(c_name):
178
+ raise ValueError, f"Invalid entity reference: '{name}'"
179
+ c_doc = _newXMLDoc()
180
+ doc = _documentFactory(c_doc, None)
181
+ self._c_node = _createEntity(c_doc, c_name)
182
+ if self._c_node is NULL:
183
+ raise MemoryError()
184
+ tree.xmlAddChild(<xmlNode*>c_doc, self._c_node)
185
+ _registerProxy(self, doc, self._c_node)
186
+ self._init()
187
+
188
+
189
+ cdef int _validateNodeClass(xmlNode* c_node, cls) except -1:
190
+ if c_node.type == tree.XML_ELEMENT_NODE:
191
+ expected = ElementBase
192
+ elif c_node.type == tree.XML_COMMENT_NODE:
193
+ expected = CommentBase
194
+ elif c_node.type == tree.XML_ENTITY_REF_NODE:
195
+ expected = EntityBase
196
+ elif c_node.type == tree.XML_PI_NODE:
197
+ expected = PIBase
198
+ else:
199
+ assert False, f"Unknown node type: {c_node.type}"
200
+
201
+ if not (isinstance(cls, type) and issubclass(cls, expected)):
202
+ raise TypeError(
203
+ f"result of class lookup must be subclass of {type(expected)}, got {type(cls)}")
204
+ return 0
205
+
206
+
207
+ ################################################################################
208
+ # Element class lookup
209
+
210
+ ctypedef public object (*_element_class_lookup_function)(object, _Document, xmlNode*)
211
+
212
+ # class to store element class lookup functions
213
+ cdef public class ElementClassLookup [ type LxmlElementClassLookupType,
214
+ object LxmlElementClassLookup ]:
215
+ """ElementClassLookup(self)
216
+ Superclass of Element class lookups.
217
+ """
218
+ cdef _element_class_lookup_function _lookup_function
219
+
220
+
221
+ cdef public class FallbackElementClassLookup(ElementClassLookup) \
222
+ [ type LxmlFallbackElementClassLookupType,
223
+ object LxmlFallbackElementClassLookup ]:
224
+ """FallbackElementClassLookup(self, fallback=None)
225
+
226
+ Superclass of Element class lookups with additional fallback.
227
+ """
228
+ cdef readonly ElementClassLookup fallback
229
+ cdef _element_class_lookup_function _fallback_function
230
+ def __cinit__(self):
231
+ # fall back to default lookup
232
+ self._fallback_function = _lookupDefaultElementClass
233
+
234
+ def __init__(self, ElementClassLookup fallback=None):
235
+ if fallback is not None:
236
+ self._setFallback(fallback)
237
+ else:
238
+ self._fallback_function = _lookupDefaultElementClass
239
+
240
+ cdef void _setFallback(self, ElementClassLookup lookup):
241
+ """Sets the fallback scheme for this lookup method.
242
+ """
243
+ self.fallback = lookup
244
+ self._fallback_function = lookup._lookup_function
245
+ if self._fallback_function is NULL:
246
+ self._fallback_function = _lookupDefaultElementClass
247
+
248
+ def set_fallback(self, ElementClassLookup lookup not None):
249
+ """set_fallback(self, lookup)
250
+
251
+ Sets the fallback scheme for this lookup method.
252
+ """
253
+ self._setFallback(lookup)
254
+
255
+ cdef inline object _callLookupFallback(FallbackElementClassLookup lookup,
256
+ _Document doc, xmlNode* c_node):
257
+ return lookup._fallback_function(lookup.fallback, doc, c_node)
258
+
259
+
260
+ ################################################################################
261
+ # default lookup scheme
262
+
263
+ cdef class ElementDefaultClassLookup(ElementClassLookup):
264
+ """ElementDefaultClassLookup(self, element=None, comment=None, pi=None, entity=None)
265
+ Element class lookup scheme that always returns the default Element
266
+ class.
267
+
268
+ The keyword arguments ``element``, ``comment``, ``pi`` and ``entity``
269
+ accept the respective Element classes.
270
+ """
271
+ cdef readonly object element_class
272
+ cdef readonly object comment_class
273
+ cdef readonly object pi_class
274
+ cdef readonly object entity_class
275
+ def __cinit__(self):
276
+ self._lookup_function = _lookupDefaultElementClass
277
+
278
+ def __init__(self, element=None, comment=None, pi=None, entity=None):
279
+ if element is None:
280
+ self.element_class = _Element
281
+ elif issubclass(element, ElementBase):
282
+ self.element_class = element
283
+ else:
284
+ raise TypeError, "element class must be subclass of ElementBase"
285
+
286
+ if comment is None:
287
+ self.comment_class = _Comment
288
+ elif issubclass(comment, CommentBase):
289
+ self.comment_class = comment
290
+ else:
291
+ raise TypeError, "comment class must be subclass of CommentBase"
292
+
293
+ if entity is None:
294
+ self.entity_class = _Entity
295
+ elif issubclass(entity, EntityBase):
296
+ self.entity_class = entity
297
+ else:
298
+ raise TypeError, "Entity class must be subclass of EntityBase"
299
+
300
+ if pi is None:
301
+ self.pi_class = None # special case, see below
302
+ elif issubclass(pi, PIBase):
303
+ self.pi_class = pi
304
+ else:
305
+ raise TypeError, "PI class must be subclass of PIBase"
306
+
307
+ cdef object _lookupDefaultElementClass(state, _Document _doc, xmlNode* c_node):
308
+ "Trivial class lookup function that always returns the default class."
309
+ if c_node.type == tree.XML_ELEMENT_NODE:
310
+ if state is not None:
311
+ return (<ElementDefaultClassLookup>state).element_class
312
+ else:
313
+ return _Element
314
+ elif c_node.type == tree.XML_COMMENT_NODE:
315
+ if state is not None:
316
+ return (<ElementDefaultClassLookup>state).comment_class
317
+ else:
318
+ return _Comment
319
+ elif c_node.type == tree.XML_ENTITY_REF_NODE:
320
+ if state is not None:
321
+ return (<ElementDefaultClassLookup>state).entity_class
322
+ else:
323
+ return _Entity
324
+ elif c_node.type == tree.XML_PI_NODE:
325
+ if state is None or (<ElementDefaultClassLookup>state).pi_class is None:
326
+ # special case XSLT-PI
327
+ if c_node.name is not NULL and c_node.content is not NULL:
328
+ if tree.xmlStrcmp(c_node.name, <unsigned char*>"xml-stylesheet") == 0:
329
+ if tree.xmlStrstr(c_node.content, <unsigned char*>"text/xsl") is not NULL or \
330
+ tree.xmlStrstr(c_node.content, <unsigned char*>"text/xml") is not NULL:
331
+ return _XSLTProcessingInstruction
332
+ return _ProcessingInstruction
333
+ else:
334
+ return (<ElementDefaultClassLookup>state).pi_class
335
+ else:
336
+ assert False, f"Unknown node type: {c_node.type}"
337
+
338
+
339
+ ################################################################################
340
+ # attribute based lookup scheme
341
+
342
+ cdef class AttributeBasedElementClassLookup(FallbackElementClassLookup):
343
+ """AttributeBasedElementClassLookup(self, attribute_name, class_mapping, fallback=None)
344
+ Checks an attribute of an Element and looks up the value in a
345
+ class dictionary.
346
+
347
+ Arguments:
348
+ - attribute name - '{ns}name' style string
349
+ - class mapping - Python dict mapping attribute values to Element classes
350
+ - fallback - optional fallback lookup mechanism
351
+
352
+ A None key in the class mapping will be checked if the attribute is
353
+ missing.
354
+ """
355
+ cdef object _class_mapping
356
+ cdef tuple _pytag
357
+ cdef const_xmlChar* _c_ns
358
+ cdef const_xmlChar* _c_name
359
+ def __cinit__(self):
360
+ self._lookup_function = _attribute_class_lookup
361
+
362
+ def __init__(self, attribute_name, class_mapping,
363
+ ElementClassLookup fallback=None):
364
+ self._pytag = _getNsTag(attribute_name)
365
+ ns, name = self._pytag
366
+ if ns is None:
367
+ self._c_ns = NULL
368
+ else:
369
+ self._c_ns = _xcstr(ns)
370
+ self._c_name = _xcstr(name)
371
+ self._class_mapping = dict(class_mapping)
372
+
373
+ FallbackElementClassLookup.__init__(self, fallback)
374
+
375
+ cdef object _attribute_class_lookup(state, _Document doc, xmlNode* c_node):
376
+ cdef AttributeBasedElementClassLookup lookup
377
+ cdef python.PyObject* dict_result
378
+
379
+ lookup = <AttributeBasedElementClassLookup>state
380
+ if c_node.type == tree.XML_ELEMENT_NODE:
381
+ value = _attributeValueFromNsName(
382
+ c_node, lookup._c_ns, lookup._c_name)
383
+ dict_result = python.PyDict_GetItem(lookup._class_mapping, value)
384
+ if dict_result is not NULL:
385
+ cls = <object>dict_result
386
+ _validateNodeClass(c_node, cls)
387
+ return cls
388
+ return _callLookupFallback(lookup, doc, c_node)
389
+
390
+
391
+ ################################################################################
392
+ # per-parser lookup scheme
393
+
394
+ cdef class ParserBasedElementClassLookup(FallbackElementClassLookup):
395
+ """ParserBasedElementClassLookup(self, fallback=None)
396
+ Element class lookup based on the XML parser.
397
+ """
398
+ def __cinit__(self):
399
+ self._lookup_function = _parser_class_lookup
400
+
401
+ cdef object _parser_class_lookup(state, _Document doc, xmlNode* c_node):
402
+ if doc._parser._class_lookup is not None:
403
+ return doc._parser._class_lookup._lookup_function(
404
+ doc._parser._class_lookup, doc, c_node)
405
+ return _callLookupFallback(<FallbackElementClassLookup>state, doc, c_node)
406
+
407
+
408
+ ################################################################################
409
+ # custom class lookup based on node type, namespace, name
410
+
411
+ cdef class CustomElementClassLookup(FallbackElementClassLookup):
412
+ """CustomElementClassLookup(self, fallback=None)
413
+ Element class lookup based on a subclass method.
414
+
415
+ You can inherit from this class and override the method::
416
+
417
+ lookup(self, type, doc, namespace, name)
418
+
419
+ to lookup the element class for a node. Arguments of the method:
420
+ * type: one of 'element', 'comment', 'PI', 'entity'
421
+ * doc: document that the node is in
422
+ * namespace: namespace URI of the node (or None for comments/PIs/entities)
423
+ * name: name of the element/entity, None for comments, target for PIs
424
+
425
+ If you return None from this method, the fallback will be called.
426
+ """
427
+ def __cinit__(self):
428
+ self._lookup_function = _custom_class_lookup
429
+
430
+ def lookup(self, type, doc, namespace, name):
431
+ "lookup(self, type, doc, namespace, name)"
432
+ return None
433
+
434
+ cdef object _custom_class_lookup(state, _Document doc, xmlNode* c_node):
435
+ cdef CustomElementClassLookup lookup
436
+
437
+ lookup = <CustomElementClassLookup>state
438
+
439
+ if c_node.type == tree.XML_ELEMENT_NODE:
440
+ element_type = "element"
441
+ elif c_node.type == tree.XML_COMMENT_NODE:
442
+ element_type = "comment"
443
+ elif c_node.type == tree.XML_PI_NODE:
444
+ element_type = "PI"
445
+ elif c_node.type == tree.XML_ENTITY_REF_NODE:
446
+ element_type = "entity"
447
+ else:
448
+ element_type = "element"
449
+ if c_node.name is NULL:
450
+ name = None
451
+ else:
452
+ name = funicode(c_node.name)
453
+ c_str = tree._getNs(c_node)
454
+ ns = funicode(c_str) if c_str is not NULL else None
455
+
456
+ cls = lookup.lookup(element_type, doc, ns, name)
457
+ if cls is not None:
458
+ _validateNodeClass(c_node, cls)
459
+ return cls
460
+ return _callLookupFallback(lookup, doc, c_node)
461
+
462
+
463
+ ################################################################################
464
+ # read-only tree based class lookup
465
+
466
+ cdef class PythonElementClassLookup(FallbackElementClassLookup):
467
+ """PythonElementClassLookup(self, fallback=None)
468
+ Element class lookup based on a subclass method.
469
+
470
+ This class lookup scheme allows access to the entire XML tree in
471
+ read-only mode. To use it, re-implement the ``lookup(self, doc,
472
+ root)`` method in a subclass::
473
+
474
+ from lxml import etree, pyclasslookup
475
+
476
+ class MyElementClass(etree.ElementBase):
477
+ honkey = True
478
+
479
+ class MyLookup(pyclasslookup.PythonElementClassLookup):
480
+ def lookup(self, doc, root):
481
+ if root.tag == "sometag":
482
+ return MyElementClass
483
+ else:
484
+ for child in root:
485
+ if child.tag == "someothertag":
486
+ return MyElementClass
487
+ # delegate to default
488
+ return None
489
+
490
+ If you return None from this method, the fallback will be called.
491
+
492
+ The first argument is the opaque document instance that contains
493
+ the Element. The second argument is a lightweight Element proxy
494
+ implementation that is only valid during the lookup. Do not try
495
+ to keep a reference to it. Once the lookup is done, the proxy
496
+ will be invalid.
497
+
498
+ Also, you cannot wrap such a read-only Element in an ElementTree,
499
+ and you must take care not to keep a reference to them outside of
500
+ the `lookup()` method.
501
+
502
+ Note that the API of the Element objects is not complete. It is
503
+ purely read-only and does not support all features of the normal
504
+ `lxml.etree` API (such as XPath, extended slicing or some
505
+ iteration methods).
506
+
507
+ See https://lxml.de/element_classes.html
508
+ """
509
+ def __cinit__(self):
510
+ self._lookup_function = _python_class_lookup
511
+
512
+ def lookup(self, doc, element):
513
+ """lookup(self, doc, element)
514
+
515
+ Override this method to implement your own lookup scheme.
516
+ """
517
+ return None
518
+
519
+ cdef object _python_class_lookup(state, _Document doc, tree.xmlNode* c_node):
520
+ cdef PythonElementClassLookup lookup
521
+ cdef _ReadOnlyProxy proxy
522
+ lookup = <PythonElementClassLookup>state
523
+
524
+ proxy = _newReadOnlyProxy(None, c_node)
525
+ cls = lookup.lookup(doc, proxy)
526
+ _freeReadOnlyProxies(proxy)
527
+
528
+ if cls is not None:
529
+ _validateNodeClass(c_node, cls)
530
+ return cls
531
+ return _callLookupFallback(lookup, doc, c_node)
532
+
533
+ ################################################################################
534
+ # Global setup
535
+
536
+ cdef _element_class_lookup_function LOOKUP_ELEMENT_CLASS
537
+ cdef object ELEMENT_CLASS_LOOKUP_STATE
538
+
539
+ cdef void _setElementClassLookupFunction(
540
+ _element_class_lookup_function function, object state):
541
+ global LOOKUP_ELEMENT_CLASS, ELEMENT_CLASS_LOOKUP_STATE
542
+ if function is NULL:
543
+ state = DEFAULT_ELEMENT_CLASS_LOOKUP
544
+ function = DEFAULT_ELEMENT_CLASS_LOOKUP._lookup_function
545
+
546
+ ELEMENT_CLASS_LOOKUP_STATE = state
547
+ LOOKUP_ELEMENT_CLASS = function
548
+
549
+ def set_element_class_lookup(ElementClassLookup lookup = None):
550
+ """set_element_class_lookup(lookup = None)
551
+
552
+ Set the global element class lookup method.
553
+
554
+ This defines the main entry point for looking up element implementations.
555
+ The standard implementation uses the :class:`ParserBasedElementClassLookup`
556
+ to delegate to different lookup schemes for each parser.
557
+
558
+ .. warning::
559
+
560
+ This should only be changed by applications, not by library packages.
561
+ In most cases, parser specific lookups should be preferred,
562
+ which can be configured via
563
+ :meth:`~lxml.etree.XMLParser.set_element_class_lookup`
564
+ (and the same for HTML parsers).
565
+
566
+ Globally replacing the element class lookup by something other than a
567
+ :class:`ParserBasedElementClassLookup` will prevent parser specific lookup
568
+ schemes from working. Several tools rely on parser specific lookups,
569
+ including :mod:`lxml.html` and :mod:`lxml.objectify`.
570
+ """
571
+ if lookup is None or lookup._lookup_function is NULL:
572
+ _setElementClassLookupFunction(NULL, None)
573
+ else:
574
+ _setElementClassLookupFunction(lookup._lookup_function, lookup)
575
+
576
+ # default setup: parser delegation
577
+ cdef ParserBasedElementClassLookup DEFAULT_ELEMENT_CLASS_LOOKUP
578
+ DEFAULT_ELEMENT_CLASS_LOOKUP = ParserBasedElementClassLookup()
579
+
580
+ set_element_class_lookup(DEFAULT_ELEMENT_CLASS_LOOKUP)
llmeval-env/lib/python3.10/site-packages/lxml/cssselect.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """CSS Selectors based on XPath.
2
+
3
+ This module supports selecting XML/HTML tags based on CSS selectors.
4
+ See the `CSSSelector` class for details.
5
+
6
+ This is a thin wrapper around cssselect 0.7 or later.
7
+ """
8
+
9
+
10
+ from . import etree
11
+ try:
12
+ import cssselect as external_cssselect
13
+ except ImportError:
14
+ raise ImportError(
15
+ 'cssselect does not seem to be installed. '
16
+ 'See https://pypi.org/project/cssselect/')
17
+
18
+
19
+ SelectorSyntaxError = external_cssselect.SelectorSyntaxError
20
+ ExpressionError = external_cssselect.ExpressionError
21
+ SelectorError = external_cssselect.SelectorError
22
+
23
+
24
+ __all__ = ['SelectorSyntaxError', 'ExpressionError', 'SelectorError',
25
+ 'CSSSelector']
26
+
27
+
28
+ class LxmlTranslator(external_cssselect.GenericTranslator):
29
+ """
30
+ A custom CSS selector to XPath translator with lxml-specific extensions.
31
+ """
32
+ def xpath_contains_function(self, xpath, function):
33
+ # Defined there, removed in later drafts:
34
+ # http://www.w3.org/TR/2001/CR-css3-selectors-20011113/#content-selectors
35
+ if function.argument_types() not in (['STRING'], ['IDENT']):
36
+ raise ExpressionError(
37
+ "Expected a single string or ident for :contains(), got %r"
38
+ % function.arguments)
39
+ value = function.arguments[0].value
40
+ return xpath.add_condition(
41
+ 'contains(__lxml_internal_css:lower-case(string(.)), %s)'
42
+ % self.xpath_literal(value.lower()))
43
+
44
+
45
+ class LxmlHTMLTranslator(LxmlTranslator, external_cssselect.HTMLTranslator):
46
+ """
47
+ lxml extensions + HTML support.
48
+ """
49
+
50
+
51
+ def _make_lower_case(context, s):
52
+ return s.lower()
53
+
54
+ ns = etree.FunctionNamespace('http://codespeak.net/lxml/css/')
55
+ ns.prefix = '__lxml_internal_css'
56
+ ns['lower-case'] = _make_lower_case
57
+
58
+
59
+ class CSSSelector(etree.XPath):
60
+ """A CSS selector.
61
+
62
+ Usage::
63
+
64
+ >>> from lxml import etree, cssselect
65
+ >>> select = cssselect.CSSSelector("a tag > child")
66
+
67
+ >>> root = etree.XML("<a><b><c/><tag><child>TEXT</child></tag></b></a>")
68
+ >>> [ el.tag for el in select(root) ]
69
+ ['child']
70
+
71
+ To use CSS namespaces, you need to pass a prefix-to-namespace
72
+ mapping as ``namespaces`` keyword argument::
73
+
74
+ >>> rdfns = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#'
75
+ >>> select_ns = cssselect.CSSSelector('root > rdf|Description',
76
+ ... namespaces={'rdf': rdfns})
77
+
78
+ >>> rdf = etree.XML((
79
+ ... '<root xmlns:rdf="%s">'
80
+ ... '<rdf:Description>blah</rdf:Description>'
81
+ ... '</root>') % rdfns)
82
+ >>> [(el.tag, el.text) for el in select_ns(rdf)]
83
+ [('{http://www.w3.org/1999/02/22-rdf-syntax-ns#}Description', 'blah')]
84
+
85
+ """
86
+ def __init__(self, css, namespaces=None, translator='xml'):
87
+ if translator == 'xml':
88
+ translator = LxmlTranslator()
89
+ elif translator == 'html':
90
+ translator = LxmlHTMLTranslator()
91
+ elif translator == 'xhtml':
92
+ translator = LxmlHTMLTranslator(xhtml=True)
93
+ path = translator.css_to_xpath(css)
94
+ super().__init__(path, namespaces=namespaces)
95
+ self.css = css
96
+
97
+ def __repr__(self):
98
+ return '<%s %x for %r>' % (
99
+ self.__class__.__name__,
100
+ abs(id(self)),
101
+ self.css)
llmeval-env/lib/python3.10/site-packages/lxml/etree.h ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Generated by Cython 3.0.10 */
2
+
3
+ #ifndef __PYX_HAVE__lxml__etree
4
+ #define __PYX_HAVE__lxml__etree
5
+
6
+ #include "Python.h"
7
+ struct LxmlDocument;
8
+ struct LxmlElement;
9
+ struct LxmlElementTree;
10
+ struct LxmlElementTagMatcher;
11
+ struct LxmlElementIterator;
12
+ struct LxmlElementBase;
13
+ struct LxmlElementClassLookup;
14
+ struct LxmlFallbackElementClassLookup;
15
+
16
+ /* "lxml/etree.pyx":333
17
+ *
18
+ * # type of a function that steps from node to node
19
+ * ctypedef public xmlNode* (*_node_to_node_function)(xmlNode*) # <<<<<<<<<<<<<<
20
+ *
21
+ *
22
+ */
23
+ typedef xmlNode *(*_node_to_node_function)(xmlNode *);
24
+
25
+ /* "lxml/etree.pyx":349
26
+ * @cython.final
27
+ * @cython.freelist(8)
28
+ * cdef public class _Document [ type LxmlDocumentType, object LxmlDocument ]: # <<<<<<<<<<<<<<
29
+ * """Internal base class to reference a libxml document.
30
+ *
31
+ */
32
+ struct LxmlDocument {
33
+ PyObject_HEAD
34
+ struct __pyx_vtabstruct_4lxml_5etree__Document *__pyx_vtab;
35
+ int _ns_counter;
36
+ PyObject *_prefix_tail;
37
+ xmlDoc *_c_doc;
38
+ struct __pyx_obj_4lxml_5etree__BaseParser *_parser;
39
+ };
40
+
41
+ /* "lxml/etree.pyx":698
42
+ *
43
+ * @cython.no_gc_clear
44
+ * cdef public class _Element [ type LxmlElementType, object LxmlElement ]: # <<<<<<<<<<<<<<
45
+ * """Element class.
46
+ *
47
+ */
48
+ struct LxmlElement {
49
+ PyObject_HEAD
50
+ struct LxmlDocument *_doc;
51
+ xmlNode *_c_node;
52
+ PyObject *_tag;
53
+ };
54
+
55
+ /* "lxml/etree.pyx":1872
56
+ *
57
+ *
58
+ * cdef public class _ElementTree [ type LxmlElementTreeType, # <<<<<<<<<<<<<<
59
+ * object LxmlElementTree ]:
60
+ * cdef _Document _doc
61
+ */
62
+ struct LxmlElementTree {
63
+ PyObject_HEAD
64
+ struct __pyx_vtabstruct_4lxml_5etree__ElementTree *__pyx_vtab;
65
+ struct LxmlDocument *_doc;
66
+ struct LxmlElement *_context_node;
67
+ };
68
+
69
+ /* "lxml/etree.pyx":2646
70
+ *
71
+ *
72
+ * cdef public class _ElementTagMatcher [ object LxmlElementTagMatcher, # <<<<<<<<<<<<<<
73
+ * type LxmlElementTagMatcherType ]:
74
+ * """
75
+ */
76
+ struct LxmlElementTagMatcher {
77
+ PyObject_HEAD
78
+ struct __pyx_vtabstruct_4lxml_5etree__ElementTagMatcher *__pyx_vtab;
79
+ PyObject *_pystrings;
80
+ int _node_type;
81
+ char *_href;
82
+ char *_name;
83
+ };
84
+
85
+ /* "lxml/etree.pyx":2677
86
+ * self._name = NULL
87
+ *
88
+ * cdef public class _ElementIterator(_ElementTagMatcher) [ # <<<<<<<<<<<<<<
89
+ * object LxmlElementIterator, type LxmlElementIteratorType ]:
90
+ * """
91
+ */
92
+ struct LxmlElementIterator {
93
+ struct LxmlElementTagMatcher __pyx_base;
94
+ struct LxmlElement *_node;
95
+ _node_to_node_function _next_element;
96
+ };
97
+
98
+ /* "src/lxml/classlookup.pxi":6
99
+ * # Custom Element classes
100
+ *
101
+ * cdef public class ElementBase(_Element) [ type LxmlElementBaseType, # <<<<<<<<<<<<<<
102
+ * object LxmlElementBase ]:
103
+ * """ElementBase(*children, attrib=None, nsmap=None, **_extra)
104
+ */
105
+ struct LxmlElementBase {
106
+ struct LxmlElement __pyx_base;
107
+ };
108
+
109
+ /* "src/lxml/classlookup.pxi":210
110
+ * # Element class lookup
111
+ *
112
+ * ctypedef public object (*_element_class_lookup_function)(object, _Document, xmlNode*) # <<<<<<<<<<<<<<
113
+ *
114
+ * # class to store element class lookup functions
115
+ */
116
+ typedef PyObject *(*_element_class_lookup_function)(PyObject *, struct LxmlDocument *, xmlNode *);
117
+
118
+ /* "src/lxml/classlookup.pxi":213
119
+ *
120
+ * # class to store element class lookup functions
121
+ * cdef public class ElementClassLookup [ type LxmlElementClassLookupType, # <<<<<<<<<<<<<<
122
+ * object LxmlElementClassLookup ]:
123
+ * """ElementClassLookup(self)
124
+ */
125
+ struct LxmlElementClassLookup {
126
+ PyObject_HEAD
127
+ _element_class_lookup_function _lookup_function;
128
+ };
129
+
130
+ /* "src/lxml/classlookup.pxi":221
131
+ *
132
+ *
133
+ * cdef public class FallbackElementClassLookup(ElementClassLookup) \ # <<<<<<<<<<<<<<
134
+ * [ type LxmlFallbackElementClassLookupType,
135
+ * object LxmlFallbackElementClassLookup ]:
136
+ */
137
+ struct LxmlFallbackElementClassLookup {
138
+ struct LxmlElementClassLookup __pyx_base;
139
+ struct __pyx_vtabstruct_4lxml_5etree_FallbackElementClassLookup *__pyx_vtab;
140
+ struct LxmlElementClassLookup *fallback;
141
+ _element_class_lookup_function _fallback_function;
142
+ };
143
+
144
+ #ifndef __PYX_HAVE_API__lxml__etree
145
+
146
+ #ifdef CYTHON_EXTERN_C
147
+ #undef __PYX_EXTERN_C
148
+ #define __PYX_EXTERN_C CYTHON_EXTERN_C
149
+ #elif defined(__PYX_EXTERN_C)
150
+ #ifdef _MSC_VER
151
+ #pragma message ("Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead.")
152
+ #else
153
+ #warning Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead.
154
+ #endif
155
+ #else
156
+ #ifdef __cplusplus
157
+ #define __PYX_EXTERN_C extern "C"
158
+ #else
159
+ #define __PYX_EXTERN_C extern
160
+ #endif
161
+ #endif
162
+
163
+ #ifndef DL_IMPORT
164
+ #define DL_IMPORT(_T) _T
165
+ #endif
166
+
167
+ __PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlDocumentType;
168
+ __PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlElementType;
169
+ __PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlElementTreeType;
170
+ __PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlElementTagMatcherType;
171
+ __PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlElementIteratorType;
172
+ __PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlElementBaseType;
173
+ __PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlElementClassLookupType;
174
+ __PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlFallbackElementClassLookupType;
175
+
176
+ __PYX_EXTERN_C struct LxmlElement *deepcopyNodeToDocument(struct LxmlDocument *, xmlNode *);
177
+ __PYX_EXTERN_C struct LxmlElementTree *elementTreeFactory(struct LxmlElement *);
178
+ __PYX_EXTERN_C struct LxmlElementTree *newElementTree(struct LxmlElement *, PyObject *);
179
+ __PYX_EXTERN_C struct LxmlElementTree *adoptExternalDocument(xmlDoc *, PyObject *, int);
180
+ __PYX_EXTERN_C struct LxmlElement *elementFactory(struct LxmlDocument *, xmlNode *);
181
+ __PYX_EXTERN_C struct LxmlElement *makeElement(PyObject *, struct LxmlDocument *, PyObject *, PyObject *, PyObject *, PyObject *, PyObject *);
182
+ __PYX_EXTERN_C struct LxmlElement *makeSubElement(struct LxmlElement *, PyObject *, PyObject *, PyObject *, PyObject *, PyObject *);
183
+ __PYX_EXTERN_C void setElementClassLookupFunction(_element_class_lookup_function, PyObject *);
184
+ __PYX_EXTERN_C PyObject *lookupDefaultElementClass(PyObject *, PyObject *, xmlNode *);
185
+ __PYX_EXTERN_C PyObject *lookupNamespaceElementClass(PyObject *, PyObject *, xmlNode *);
186
+ __PYX_EXTERN_C PyObject *callLookupFallback(struct LxmlFallbackElementClassLookup *, struct LxmlDocument *, xmlNode *);
187
+ __PYX_EXTERN_C int tagMatches(xmlNode *, const xmlChar *, const xmlChar *);
188
+ __PYX_EXTERN_C struct LxmlDocument *documentOrRaise(PyObject *);
189
+ __PYX_EXTERN_C struct LxmlElement *rootNodeOrRaise(PyObject *);
190
+ __PYX_EXTERN_C int hasText(xmlNode *);
191
+ __PYX_EXTERN_C int hasTail(xmlNode *);
192
+ __PYX_EXTERN_C PyObject *textOf(xmlNode *);
193
+ __PYX_EXTERN_C PyObject *tailOf(xmlNode *);
194
+ __PYX_EXTERN_C int setNodeText(xmlNode *, PyObject *);
195
+ __PYX_EXTERN_C int setTailText(xmlNode *, PyObject *);
196
+ __PYX_EXTERN_C PyObject *attributeValue(xmlNode *, xmlAttr *);
197
+ __PYX_EXTERN_C PyObject *attributeValueFromNsName(xmlNode *, const xmlChar *, const xmlChar *);
198
+ __PYX_EXTERN_C PyObject *getAttributeValue(struct LxmlElement *, PyObject *, PyObject *);
199
+ __PYX_EXTERN_C PyObject *iterattributes(struct LxmlElement *, int);
200
+ __PYX_EXTERN_C PyObject *collectAttributes(xmlNode *, int);
201
+ __PYX_EXTERN_C int setAttributeValue(struct LxmlElement *, PyObject *, PyObject *);
202
+ __PYX_EXTERN_C int delAttribute(struct LxmlElement *, PyObject *);
203
+ __PYX_EXTERN_C int delAttributeFromNsName(xmlNode *, const xmlChar *, const xmlChar *);
204
+ __PYX_EXTERN_C int hasChild(xmlNode *);
205
+ __PYX_EXTERN_C xmlNode *findChild(xmlNode *, Py_ssize_t);
206
+ __PYX_EXTERN_C xmlNode *findChildForwards(xmlNode *, Py_ssize_t);
207
+ __PYX_EXTERN_C xmlNode *findChildBackwards(xmlNode *, Py_ssize_t);
208
+ __PYX_EXTERN_C xmlNode *nextElement(xmlNode *);
209
+ __PYX_EXTERN_C xmlNode *previousElement(xmlNode *);
210
+ __PYX_EXTERN_C void appendChild(struct LxmlElement *, struct LxmlElement *);
211
+ __PYX_EXTERN_C int appendChildToElement(struct LxmlElement *, struct LxmlElement *);
212
+ __PYX_EXTERN_C PyObject *pyunicode(const xmlChar *);
213
+ __PYX_EXTERN_C PyObject *utf8(PyObject *);
214
+ __PYX_EXTERN_C PyObject *getNsTag(PyObject *);
215
+ __PYX_EXTERN_C PyObject *getNsTagWithEmptyNs(PyObject *);
216
+ __PYX_EXTERN_C PyObject *namespacedName(xmlNode *);
217
+ __PYX_EXTERN_C PyObject *namespacedNameFromNsName(const xmlChar *, const xmlChar *);
218
+ __PYX_EXTERN_C void iteratorStoreNext(struct LxmlElementIterator *, struct LxmlElement *);
219
+ __PYX_EXTERN_C void initTagMatch(struct LxmlElementTagMatcher *, PyObject *);
220
+ __PYX_EXTERN_C xmlNs *findOrBuildNodeNsPrefix(struct LxmlDocument *, xmlNode *, const xmlChar *, const xmlChar *);
221
+
222
+ #endif /* !__PYX_HAVE_API__lxml__etree */
223
+
224
+ /* WARNING: the interface of the module init function changed in CPython 3.5. */
225
+ /* It now returns a PyModuleDef instance instead of a PyModule instance. */
226
+
227
+ #if PY_MAJOR_VERSION < 3
228
+ PyMODINIT_FUNC initetree(void);
229
+ #else
230
+ /* WARNING: Use PyImport_AppendInittab("etree", PyInit_etree) instead of calling PyInit_etree directly from Python 3.5 */
231
+ PyMODINIT_FUNC PyInit_etree(void);
232
+
233
+ #if PY_VERSION_HEX >= 0x03050000 && (defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER) || (defined(__cplusplus) && __cplusplus >= 201402L))
234
+ #if defined(__cplusplus) && __cplusplus >= 201402L
235
+ [[deprecated("Use PyImport_AppendInittab(\"etree\", PyInit_etree) instead of calling PyInit_etree directly.")]] inline
236
+ #elif defined(__GNUC__) || defined(__clang__)
237
+ __attribute__ ((__deprecated__("Use PyImport_AppendInittab(\"etree\", PyInit_etree) instead of calling PyInit_etree directly."), __unused__)) __inline__
238
+ #elif defined(_MSC_VER)
239
+ __declspec(deprecated("Use PyImport_AppendInittab(\"etree\", PyInit_etree) instead of calling PyInit_etree directly.")) __inline
240
+ #endif
241
+ static PyObject* __PYX_WARN_IF_PyInit_etree_INIT_CALLED(PyObject* res) {
242
+ return res;
243
+ }
244
+ #define PyInit_etree() __PYX_WARN_IF_PyInit_etree_INIT_CALLED(PyInit_etree())
245
+ #endif
246
+ #endif
247
+
248
+ #endif /* !__PYX_HAVE__lxml__etree */
llmeval-env/lib/python3.10/site-packages/lxml/etree.pyx ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/lxml/extensions.pxi ADDED
@@ -0,0 +1,833 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # support for extension functions in XPath and XSLT
2
+
3
+ cdef class XPathError(LxmlError):
4
+ """Base class of all XPath errors.
5
+ """
6
+
7
+ cdef class XPathEvalError(XPathError):
8
+ """Error during XPath evaluation.
9
+ """
10
+
11
+ cdef class XPathFunctionError(XPathEvalError):
12
+ """Internal error looking up an XPath extension function.
13
+ """
14
+
15
+ cdef class XPathResultError(XPathEvalError):
16
+ """Error handling an XPath result.
17
+ """
18
+
19
+
20
+ # forward declarations
21
+
22
+ ctypedef int (*_register_function)(void* ctxt, name_utf, ns_uri_utf)
23
+ cdef class _ExsltRegExp
24
+
25
+ ################################################################################
26
+ # Base class for XSLT and XPath evaluation contexts: functions, namespaces, ...
27
+
28
+ @cython.internal
29
+ cdef class _BaseContext:
30
+ cdef xpath.xmlXPathContext* _xpathCtxt
31
+ cdef _Document _doc
32
+ cdef dict _extensions
33
+ cdef list _namespaces
34
+ cdef list _global_namespaces
35
+ cdef dict _utf_refs
36
+ cdef dict _function_cache
37
+ cdef dict _eval_context_dict
38
+ cdef bint _build_smart_strings
39
+ # for exception handling and temporary reference keeping:
40
+ cdef _TempStore _temp_refs
41
+ cdef set _temp_documents
42
+ cdef _ExceptionContext _exc
43
+ cdef _ErrorLog _error_log
44
+
45
+ def __cinit__(self):
46
+ self._xpathCtxt = NULL
47
+
48
+ def __init__(self, namespaces, extensions, error_log, enable_regexp,
49
+ build_smart_strings):
50
+ cdef _ExsltRegExp _regexp
51
+ cdef dict new_extensions
52
+ cdef list ns
53
+ self._utf_refs = {}
54
+ self._global_namespaces = []
55
+ self._function_cache = {}
56
+ self._eval_context_dict = None
57
+ self._error_log = error_log
58
+
59
+ if extensions is not None:
60
+ # convert extensions to UTF-8
61
+ if isinstance(extensions, dict):
62
+ extensions = (extensions,)
63
+ # format: [ {(ns, name):function} ] -> {(ns_utf, name_utf):function}
64
+ new_extensions = {}
65
+ for extension in extensions:
66
+ for (ns_uri, name), function in extension.items():
67
+ if name is None:
68
+ raise ValueError, "extensions must have non empty names"
69
+ ns_utf = self._to_utf(ns_uri)
70
+ name_utf = self._to_utf(name)
71
+ new_extensions[(ns_utf, name_utf)] = function
72
+ extensions = new_extensions or None
73
+
74
+ if namespaces is not None:
75
+ if isinstance(namespaces, dict):
76
+ namespaces = namespaces.items()
77
+ if namespaces:
78
+ ns = []
79
+ for prefix, ns_uri in namespaces:
80
+ if prefix is None or not prefix:
81
+ raise TypeError, \
82
+ "empty namespace prefix is not supported in XPath"
83
+ if ns_uri is None or not ns_uri:
84
+ raise TypeError, \
85
+ "setting default namespace is not supported in XPath"
86
+ prefix_utf = self._to_utf(prefix)
87
+ ns_uri_utf = self._to_utf(ns_uri)
88
+ ns.append( (prefix_utf, ns_uri_utf) )
89
+ namespaces = ns
90
+ else:
91
+ namespaces = None
92
+
93
+ self._doc = None
94
+ self._exc = _ExceptionContext()
95
+ self._extensions = extensions
96
+ self._namespaces = namespaces
97
+ self._temp_refs = _TempStore()
98
+ self._temp_documents = set()
99
+ self._build_smart_strings = build_smart_strings
100
+
101
+ if enable_regexp:
102
+ _regexp = _ExsltRegExp()
103
+ _regexp._register_in_context(self)
104
+
105
+ cdef _BaseContext _copy(self):
106
+ cdef _BaseContext context
107
+ if self._namespaces is not None:
108
+ namespaces = self._namespaces[:]
109
+ else:
110
+ namespaces = None
111
+ context = self.__class__(namespaces, None, self._error_log, False,
112
+ self._build_smart_strings)
113
+ if self._extensions is not None:
114
+ context._extensions = self._extensions.copy()
115
+ return context
116
+
117
+ cdef bytes _to_utf(self, s):
118
+ "Convert to UTF-8 and keep a reference to the encoded string"
119
+ cdef python.PyObject* dict_result
120
+ if s is None:
121
+ return None
122
+ dict_result = python.PyDict_GetItem(self._utf_refs, s)
123
+ if dict_result is not NULL:
124
+ return <bytes>dict_result
125
+ utf = _utf8(s)
126
+ self._utf_refs[s] = utf
127
+ if python.IS_PYPY:
128
+ # use C level refs, PyPy refs are not enough!
129
+ python.Py_INCREF(utf)
130
+ return utf
131
+
132
+ cdef void _set_xpath_context(self, xpath.xmlXPathContext* xpathCtxt) noexcept:
133
+ self._xpathCtxt = xpathCtxt
134
+ xpathCtxt.userData = <void*>self
135
+ # Need a cast here because older libxml2 releases do not use 'const' in the functype.
136
+ xpathCtxt.error = <xmlerror.xmlStructuredErrorFunc> _receiveXPathError
137
+
138
+ @cython.final
139
+ cdef _register_context(self, _Document doc):
140
+ self._doc = doc
141
+ self._exc.clear()
142
+
143
+ @cython.final
144
+ cdef _cleanup_context(self):
145
+ #xpath.xmlXPathRegisteredNsCleanup(self._xpathCtxt)
146
+ #self.unregisterGlobalNamespaces()
147
+ if python.IS_PYPY:
148
+ # clean up double refs in PyPy (see "_to_utf()" method)
149
+ for ref in self._utf_refs.itervalues():
150
+ python.Py_DECREF(ref)
151
+ self._utf_refs.clear()
152
+ self._eval_context_dict = None
153
+ self._doc = None
154
+
155
+ @cython.final
156
+ cdef _release_context(self):
157
+ if self._xpathCtxt is not NULL:
158
+ self._xpathCtxt.userData = NULL
159
+ self._xpathCtxt = NULL
160
+
161
+ # namespaces (internal UTF-8 methods with leading '_')
162
+
163
+ cdef addNamespace(self, prefix, ns_uri):
164
+ cdef list namespaces
165
+ if prefix is None:
166
+ raise TypeError, "empty prefix is not supported in XPath"
167
+ prefix_utf = self._to_utf(prefix)
168
+ ns_uri_utf = self._to_utf(ns_uri)
169
+ new_item = (prefix_utf, ns_uri_utf)
170
+ if self._namespaces is None:
171
+ self._namespaces = [new_item]
172
+ else:
173
+ namespaces = []
174
+ for item in self._namespaces:
175
+ if item[0] == prefix_utf:
176
+ item = new_item
177
+ new_item = None
178
+ namespaces.append(item)
179
+ if new_item is not None:
180
+ namespaces.append(new_item)
181
+ self._namespaces = namespaces
182
+ if self._xpathCtxt is not NULL:
183
+ xpath.xmlXPathRegisterNs(
184
+ self._xpathCtxt, _xcstr(prefix_utf), _xcstr(ns_uri_utf))
185
+
186
+ cdef registerNamespace(self, prefix, ns_uri):
187
+ if prefix is None:
188
+ raise TypeError, "empty prefix is not supported in XPath"
189
+ prefix_utf = self._to_utf(prefix)
190
+ ns_uri_utf = self._to_utf(ns_uri)
191
+ self._global_namespaces.append(prefix_utf)
192
+ xpath.xmlXPathRegisterNs(self._xpathCtxt,
193
+ _xcstr(prefix_utf), _xcstr(ns_uri_utf))
194
+
195
+ cdef registerLocalNamespaces(self):
196
+ if self._namespaces is None:
197
+ return
198
+ for prefix_utf, ns_uri_utf in self._namespaces:
199
+ xpath.xmlXPathRegisterNs(
200
+ self._xpathCtxt, _xcstr(prefix_utf), _xcstr(ns_uri_utf))
201
+
202
+ cdef registerGlobalNamespaces(self):
203
+ cdef list ns_prefixes = _find_all_extension_prefixes()
204
+ if python.PyList_GET_SIZE(ns_prefixes) > 0:
205
+ for prefix_utf, ns_uri_utf in ns_prefixes:
206
+ self._global_namespaces.append(prefix_utf)
207
+ xpath.xmlXPathRegisterNs(
208
+ self._xpathCtxt, _xcstr(prefix_utf), _xcstr(ns_uri_utf))
209
+
210
+ cdef unregisterGlobalNamespaces(self):
211
+ if python.PyList_GET_SIZE(self._global_namespaces) > 0:
212
+ for prefix_utf in self._global_namespaces:
213
+ xpath.xmlXPathRegisterNs(self._xpathCtxt,
214
+ _xcstr(prefix_utf), NULL)
215
+ del self._global_namespaces[:]
216
+
217
+ cdef void _unregisterNamespace(self, prefix_utf) noexcept:
218
+ xpath.xmlXPathRegisterNs(self._xpathCtxt,
219
+ _xcstr(prefix_utf), NULL)
220
+
221
+ # extension functions
222
+
223
+ cdef int _addLocalExtensionFunction(self, ns_utf, name_utf, function) except -1:
224
+ if self._extensions is None:
225
+ self._extensions = {}
226
+ self._extensions[(ns_utf, name_utf)] = function
227
+ return 0
228
+
229
+ cdef registerGlobalFunctions(self, void* ctxt,
230
+ _register_function reg_func):
231
+ cdef python.PyObject* dict_result
232
+ cdef dict d
233
+ for ns_utf, ns_functions in __FUNCTION_NAMESPACE_REGISTRIES.iteritems():
234
+ dict_result = python.PyDict_GetItem(
235
+ self._function_cache, ns_utf)
236
+ if dict_result is not NULL:
237
+ d = <dict>dict_result
238
+ else:
239
+ d = {}
240
+ self._function_cache[ns_utf] = d
241
+ for name_utf, function in ns_functions.iteritems():
242
+ d[name_utf] = function
243
+ reg_func(ctxt, name_utf, ns_utf)
244
+
245
+ cdef registerLocalFunctions(self, void* ctxt,
246
+ _register_function reg_func):
247
+ cdef python.PyObject* dict_result
248
+ cdef dict d
249
+ if self._extensions is None:
250
+ return # done
251
+ last_ns = None
252
+ d = None
253
+ for (ns_utf, name_utf), function in self._extensions.iteritems():
254
+ if ns_utf is not last_ns or d is None:
255
+ last_ns = ns_utf
256
+ dict_result = python.PyDict_GetItem(
257
+ self._function_cache, ns_utf)
258
+ if dict_result is not NULL:
259
+ d = <dict>dict_result
260
+ else:
261
+ d = {}
262
+ self._function_cache[ns_utf] = d
263
+ d[name_utf] = function
264
+ reg_func(ctxt, name_utf, ns_utf)
265
+
266
+ cdef unregisterAllFunctions(self, void* ctxt,
267
+ _register_function unreg_func):
268
+ for ns_utf, functions in self._function_cache.iteritems():
269
+ for name_utf in functions:
270
+ unreg_func(ctxt, name_utf, ns_utf)
271
+
272
+ cdef unregisterGlobalFunctions(self, void* ctxt,
273
+ _register_function unreg_func):
274
+ for ns_utf, functions in self._function_cache.items():
275
+ for name_utf in functions:
276
+ if self._extensions is None or \
277
+ (ns_utf, name_utf) not in self._extensions:
278
+ unreg_func(ctxt, name_utf, ns_utf)
279
+
280
+ @cython.final
281
+ cdef _find_cached_function(self, const_xmlChar* c_ns_uri, const_xmlChar* c_name):
282
+ """Lookup an extension function in the cache and return it.
283
+
284
+ Parameters: c_ns_uri may be NULL, c_name must not be NULL
285
+ """
286
+ cdef python.PyObject* c_dict
287
+ cdef python.PyObject* dict_result
288
+ c_dict = python.PyDict_GetItem(
289
+ self._function_cache, None if c_ns_uri is NULL else c_ns_uri)
290
+ if c_dict is not NULL:
291
+ dict_result = python.PyDict_GetItem(
292
+ <object>c_dict, <unsigned char*>c_name)
293
+ if dict_result is not NULL:
294
+ return <object>dict_result
295
+ return None
296
+
297
+ # Python access to the XPath context for extension functions
298
+
299
+ @property
300
+ def context_node(self):
301
+ cdef xmlNode* c_node
302
+ if self._xpathCtxt is NULL:
303
+ raise XPathError, \
304
+ "XPath context is only usable during the evaluation"
305
+ c_node = self._xpathCtxt.node
306
+ if c_node is NULL:
307
+ raise XPathError, "no context node"
308
+ if c_node.doc != self._xpathCtxt.doc:
309
+ raise XPathError, \
310
+ "document-external context nodes are not supported"
311
+ if self._doc is None:
312
+ raise XPathError, "document context is missing"
313
+ return _elementFactory(self._doc, c_node)
314
+
315
+ @property
316
+ def eval_context(self):
317
+ if self._eval_context_dict is None:
318
+ self._eval_context_dict = {}
319
+ return self._eval_context_dict
320
+
321
+ # Python reference keeping during XPath function evaluation
322
+
323
+ @cython.final
324
+ cdef _release_temp_refs(self):
325
+ "Free temporarily referenced objects from this context."
326
+ self._temp_refs.clear()
327
+ self._temp_documents.clear()
328
+
329
+ @cython.final
330
+ cdef _hold(self, obj):
331
+ """A way to temporarily hold references to nodes in the evaluator.
332
+
333
+ This is needed because otherwise nodes created in XPath extension
334
+ functions would be reference counted too soon, during the XPath
335
+ evaluation. This is most important in the case of exceptions.
336
+ """
337
+ cdef _Element element
338
+ if isinstance(obj, _Element):
339
+ self._temp_refs.add(obj)
340
+ self._temp_documents.add((<_Element>obj)._doc)
341
+ return
342
+ elif _isString(obj) or not python.PySequence_Check(obj):
343
+ return
344
+ for o in obj:
345
+ if isinstance(o, _Element):
346
+ #print "Holding element:", <int>element._c_node
347
+ self._temp_refs.add(o)
348
+ #print "Holding document:", <int>element._doc._c_doc
349
+ self._temp_documents.add((<_Element>o)._doc)
350
+
351
+ @cython.final
352
+ cdef _Document _findDocumentForNode(self, xmlNode* c_node):
353
+ """If an XPath expression returns an element from a different
354
+ document than the current context document, we call this to
355
+ see if it was possibly created by an extension and is a known
356
+ document instance.
357
+ """
358
+ cdef _Document doc
359
+ for doc in self._temp_documents:
360
+ if doc is not None and doc._c_doc is c_node.doc:
361
+ return doc
362
+ return None
363
+
364
+
365
+ # libxml2 keeps these error messages in a static array in its code
366
+ # and doesn't give us access to them ...
367
+
368
+ cdef tuple LIBXML2_XPATH_ERROR_MESSAGES = (
369
+ b"Ok",
370
+ b"Number encoding",
371
+ b"Unfinished literal",
372
+ b"Start of literal",
373
+ b"Expected $ for variable reference",
374
+ b"Undefined variable",
375
+ b"Invalid predicate",
376
+ b"Invalid expression",
377
+ b"Missing closing curly brace",
378
+ b"Unregistered function",
379
+ b"Invalid operand",
380
+ b"Invalid type",
381
+ b"Invalid number of arguments",
382
+ b"Invalid context size",
383
+ b"Invalid context position",
384
+ b"Memory allocation error",
385
+ b"Syntax error",
386
+ b"Resource error",
387
+ b"Sub resource error",
388
+ b"Undefined namespace prefix",
389
+ b"Encoding error",
390
+ b"Char out of XML range",
391
+ b"Invalid or incomplete context",
392
+ b"Stack usage error",
393
+ b"Forbidden variable\n",
394
+ b"?? Unknown error ??\n",
395
+ )
396
+
397
+ cdef void _forwardXPathError(void* c_ctxt, const xmlerror.xmlError* c_error) noexcept with gil:
398
+ cdef xmlerror.xmlError error
399
+ cdef int xpath_code
400
+ if c_error.message is not NULL:
401
+ error.message = c_error.message
402
+ else:
403
+ xpath_code = c_error.code - xmlerror.XML_XPATH_EXPRESSION_OK
404
+ if 0 <= xpath_code < len(LIBXML2_XPATH_ERROR_MESSAGES):
405
+ error.message = _cstr(LIBXML2_XPATH_ERROR_MESSAGES[xpath_code])
406
+ else:
407
+ error.message = b"unknown error"
408
+ error.domain = c_error.domain
409
+ error.code = c_error.code
410
+ error.level = c_error.level
411
+ error.line = c_error.line
412
+ error.int2 = c_error.int1 # column
413
+ error.file = c_error.file
414
+ error.node = NULL
415
+
416
+ (<_BaseContext>c_ctxt)._error_log._receive(&error)
417
+
418
+ cdef void _receiveXPathError(void* c_context, const xmlerror.xmlError* error) noexcept nogil:
419
+ if not __DEBUG:
420
+ return
421
+ if c_context is NULL:
422
+ _forwardError(NULL, error)
423
+ else:
424
+ _forwardXPathError(c_context, error)
425
+
426
+
427
+ def Extension(module, function_mapping=None, *, ns=None):
428
+ """Extension(module, function_mapping=None, ns=None)
429
+
430
+ Build a dictionary of extension functions from the functions
431
+ defined in a module or the methods of an object.
432
+
433
+ As second argument, you can pass an additional mapping of
434
+ attribute names to XPath function names, or a list of function
435
+ names that should be taken.
436
+
437
+ The ``ns`` keyword argument accepts a namespace URI for the XPath
438
+ functions.
439
+ """
440
+ cdef dict functions = {}
441
+ if isinstance(function_mapping, dict):
442
+ for function_name, xpath_name in function_mapping.items():
443
+ functions[(ns, xpath_name)] = getattr(module, function_name)
444
+ else:
445
+ if function_mapping is None:
446
+ function_mapping = [ name for name in dir(module)
447
+ if not name.startswith('_') ]
448
+ for function_name in function_mapping:
449
+ functions[(ns, function_name)] = getattr(module, function_name)
450
+ return functions
451
+
452
+ ################################################################################
453
+ # EXSLT regexp implementation
454
+
455
+ @cython.final
456
+ @cython.internal
457
+ cdef class _ExsltRegExp:
458
+ cdef dict _compile_map
459
+ def __cinit__(self):
460
+ self._compile_map = {}
461
+
462
+ cdef _make_string(self, value):
463
+ if _isString(value):
464
+ return value
465
+ elif isinstance(value, list):
466
+ # node set: take recursive text concatenation of first element
467
+ if python.PyList_GET_SIZE(value) == 0:
468
+ return ''
469
+ firstnode = value[0]
470
+ if _isString(firstnode):
471
+ return firstnode
472
+ elif isinstance(firstnode, _Element):
473
+ c_text = tree.xmlNodeGetContent((<_Element>firstnode)._c_node)
474
+ if c_text is NULL:
475
+ raise MemoryError()
476
+ try:
477
+ return funicode(c_text)
478
+ finally:
479
+ tree.xmlFree(c_text)
480
+ else:
481
+ return unicode(firstnode)
482
+ else:
483
+ return unicode(value)
484
+
485
+ cdef _compile(self, rexp, ignore_case):
486
+ cdef python.PyObject* c_result
487
+ rexp = self._make_string(rexp)
488
+ key = (rexp, ignore_case)
489
+ c_result = python.PyDict_GetItem(self._compile_map, key)
490
+ if c_result is not NULL:
491
+ return <object>c_result
492
+ py_flags = re.UNICODE
493
+ if ignore_case:
494
+ py_flags = py_flags | re.IGNORECASE
495
+ rexp_compiled = re.compile(rexp, py_flags)
496
+ self._compile_map[key] = rexp_compiled
497
+ return rexp_compiled
498
+
499
+ def test(self, ctxt, s, rexp, flags=''):
500
+ flags = self._make_string(flags)
501
+ s = self._make_string(s)
502
+ rexpc = self._compile(rexp, 'i' in flags)
503
+ if rexpc.search(s) is None:
504
+ return False
505
+ else:
506
+ return True
507
+
508
+ def match(self, ctxt, s, rexp, flags=''):
509
+ cdef list result_list
510
+ flags = self._make_string(flags)
511
+ s = self._make_string(s)
512
+ rexpc = self._compile(rexp, 'i' in flags)
513
+ if 'g' in flags:
514
+ results = rexpc.findall(s)
515
+ if not results:
516
+ return ()
517
+ else:
518
+ result = rexpc.search(s)
519
+ if not result:
520
+ return ()
521
+ results = [ result.group() ]
522
+ results.extend( result.groups('') )
523
+ result_list = []
524
+ root = Element('matches')
525
+ for s_match in results:
526
+ if python.PyTuple_CheckExact(s_match):
527
+ s_match = ''.join(s_match)
528
+ elem = SubElement(root, 'match')
529
+ elem.text = s_match
530
+ result_list.append(elem)
531
+ return result_list
532
+
533
+ def replace(self, ctxt, s, rexp, flags, replacement):
534
+ replacement = self._make_string(replacement)
535
+ flags = self._make_string(flags)
536
+ s = self._make_string(s)
537
+ rexpc = self._compile(rexp, 'i' in flags)
538
+ count: object = 0 if 'g' in flags else 1
539
+ return rexpc.sub(replacement, s, count)
540
+
541
+ cdef _register_in_context(self, _BaseContext context):
542
+ ns = b"http://exslt.org/regular-expressions"
543
+ context._addLocalExtensionFunction(ns, b"test", self.test)
544
+ context._addLocalExtensionFunction(ns, b"match", self.match)
545
+ context._addLocalExtensionFunction(ns, b"replace", self.replace)
546
+
547
+
548
+ ################################################################################
549
+ # helper functions
550
+
551
+ cdef xpath.xmlXPathObject* _wrapXPathObject(object obj, _Document doc,
552
+ _BaseContext context) except NULL:
553
+ cdef xpath.xmlNodeSet* resultSet
554
+ cdef _Element fake_node = None
555
+ cdef xmlNode* c_node
556
+
557
+ if isinstance(obj, unicode):
558
+ obj = _utf8(obj)
559
+ if isinstance(obj, bytes):
560
+ # libxml2 copies the string value
561
+ return xpath.xmlXPathNewCString(_cstr(obj))
562
+ if isinstance(obj, bool):
563
+ return xpath.xmlXPathNewBoolean(obj)
564
+ if python.PyNumber_Check(obj):
565
+ return xpath.xmlXPathNewFloat(obj)
566
+ if obj is None:
567
+ resultSet = xpath.xmlXPathNodeSetCreate(NULL)
568
+ elif isinstance(obj, _Element):
569
+ resultSet = xpath.xmlXPathNodeSetCreate((<_Element>obj)._c_node)
570
+ elif python.PySequence_Check(obj):
571
+ resultSet = xpath.xmlXPathNodeSetCreate(NULL)
572
+ try:
573
+ for value in obj:
574
+ if isinstance(value, _Element):
575
+ if context is not None:
576
+ context._hold(value)
577
+ xpath.xmlXPathNodeSetAdd(resultSet, (<_Element>value)._c_node)
578
+ else:
579
+ if context is None or doc is None:
580
+ raise XPathResultError, \
581
+ f"Non-Element values not supported at this point - got {value!r}"
582
+ # support strings by appending text nodes to an Element
583
+ if isinstance(value, unicode):
584
+ value = _utf8(value)
585
+ if isinstance(value, bytes):
586
+ if fake_node is None:
587
+ fake_node = _makeElement("text-root", NULL, doc, None,
588
+ None, None, None, None, None)
589
+ context._hold(fake_node)
590
+ else:
591
+ # append a comment node to keep the text nodes separate
592
+ c_node = tree.xmlNewDocComment(doc._c_doc, <unsigned char*>"")
593
+ if c_node is NULL:
594
+ raise MemoryError()
595
+ tree.xmlAddChild(fake_node._c_node, c_node)
596
+ context._hold(value)
597
+ c_node = tree.xmlNewDocText(doc._c_doc, _xcstr(value))
598
+ if c_node is NULL:
599
+ raise MemoryError()
600
+ tree.xmlAddChild(fake_node._c_node, c_node)
601
+ xpath.xmlXPathNodeSetAdd(resultSet, c_node)
602
+ else:
603
+ raise XPathResultError, \
604
+ f"This is not a supported node-set result: {value!r}"
605
+ except:
606
+ xpath.xmlXPathFreeNodeSet(resultSet)
607
+ raise
608
+ else:
609
+ raise XPathResultError, f"Unknown return type: {python._fqtypename(obj).decode('utf8')}"
610
+ return xpath.xmlXPathWrapNodeSet(resultSet)
611
+
612
+ cdef object _unwrapXPathObject(xpath.xmlXPathObject* xpathObj,
613
+ _Document doc, _BaseContext context):
614
+ if xpathObj.type == xpath.XPATH_UNDEFINED:
615
+ raise XPathResultError, "Undefined xpath result"
616
+ elif xpathObj.type == xpath.XPATH_NODESET:
617
+ return _createNodeSetResult(xpathObj, doc, context)
618
+ elif xpathObj.type == xpath.XPATH_BOOLEAN:
619
+ return xpathObj.boolval
620
+ elif xpathObj.type == xpath.XPATH_NUMBER:
621
+ return xpathObj.floatval
622
+ elif xpathObj.type == xpath.XPATH_STRING:
623
+ stringval = funicode(xpathObj.stringval)
624
+ if context._build_smart_strings:
625
+ stringval = _elementStringResultFactory(
626
+ stringval, None, None, False)
627
+ return stringval
628
+ elif xpathObj.type == xpath.XPATH_POINT:
629
+ raise NotImplementedError, "XPATH_POINT"
630
+ elif xpathObj.type == xpath.XPATH_RANGE:
631
+ raise NotImplementedError, "XPATH_RANGE"
632
+ elif xpathObj.type == xpath.XPATH_LOCATIONSET:
633
+ raise NotImplementedError, "XPATH_LOCATIONSET"
634
+ elif xpathObj.type == xpath.XPATH_USERS:
635
+ raise NotImplementedError, "XPATH_USERS"
636
+ elif xpathObj.type == xpath.XPATH_XSLT_TREE:
637
+ return _createNodeSetResult(xpathObj, doc, context)
638
+ else:
639
+ raise XPathResultError, f"Unknown xpath result {xpathObj.type}"
640
+
641
+ cdef object _createNodeSetResult(xpath.xmlXPathObject* xpathObj, _Document doc,
642
+ _BaseContext context):
643
+ cdef xmlNode* c_node
644
+ cdef int i
645
+ cdef list result
646
+ result = []
647
+ if xpathObj.nodesetval is NULL:
648
+ return result
649
+ for i in range(xpathObj.nodesetval.nodeNr):
650
+ c_node = xpathObj.nodesetval.nodeTab[i]
651
+ _unpackNodeSetEntry(result, c_node, doc, context,
652
+ xpathObj.type == xpath.XPATH_XSLT_TREE)
653
+ return result
654
+
655
+ cdef _unpackNodeSetEntry(list results, xmlNode* c_node, _Document doc,
656
+ _BaseContext context, bint is_fragment):
657
+ cdef xmlNode* c_child
658
+ if _isElement(c_node):
659
+ if c_node.doc != doc._c_doc and c_node.doc._private is NULL:
660
+ # XXX: works, but maybe not always the right thing to do?
661
+ # XPath: only runs when extensions create or copy trees
662
+ # -> we store Python refs to these, so that is OK
663
+ # XSLT: can it leak when merging trees from multiple sources?
664
+ c_node = tree.xmlDocCopyNode(c_node, doc._c_doc, 1)
665
+ # FIXME: call _instantiateElementFromXPath() instead?
666
+ results.append(
667
+ _fakeDocElementFactory(doc, c_node))
668
+ elif c_node.type == tree.XML_TEXT_NODE or \
669
+ c_node.type == tree.XML_CDATA_SECTION_NODE or \
670
+ c_node.type == tree.XML_ATTRIBUTE_NODE:
671
+ results.append(
672
+ _buildElementStringResult(doc, c_node, context))
673
+ elif c_node.type == tree.XML_NAMESPACE_DECL:
674
+ results.append( (funicodeOrNone((<xmlNs*>c_node).prefix),
675
+ funicodeOrNone((<xmlNs*>c_node).href)) )
676
+ elif c_node.type == tree.XML_DOCUMENT_NODE or \
677
+ c_node.type == tree.XML_HTML_DOCUMENT_NODE:
678
+ # ignored for everything but result tree fragments
679
+ if is_fragment:
680
+ c_child = c_node.children
681
+ while c_child is not NULL:
682
+ _unpackNodeSetEntry(results, c_child, doc, context, 0)
683
+ c_child = c_child.next
684
+ elif c_node.type == tree.XML_XINCLUDE_START or \
685
+ c_node.type == tree.XML_XINCLUDE_END:
686
+ pass
687
+ else:
688
+ raise NotImplementedError, \
689
+ f"Not yet implemented result node type: {c_node.type}"
690
+
691
+ cdef void _freeXPathObject(xpath.xmlXPathObject* xpathObj) noexcept:
692
+ """Free the XPath object, but *never* free the *content* of node sets.
693
+ Python dealloc will do that for us.
694
+ """
695
+ if xpathObj.nodesetval is not NULL:
696
+ xpath.xmlXPathFreeNodeSet(xpathObj.nodesetval)
697
+ xpathObj.nodesetval = NULL
698
+ xpath.xmlXPathFreeObject(xpathObj)
699
+
700
+ cdef _Element _instantiateElementFromXPath(xmlNode* c_node, _Document doc,
701
+ _BaseContext context):
702
+ # NOTE: this may copy the element - only call this when it can't leak
703
+ if c_node.doc != doc._c_doc and c_node.doc._private is NULL:
704
+ # not from the context document and not from a fake document
705
+ # either => may still be from a known document, e.g. one
706
+ # created by an extension function
707
+ node_doc = context._findDocumentForNode(c_node)
708
+ if node_doc is None:
709
+ # not from a known document at all! => can only make a
710
+ # safety copy here
711
+ c_node = tree.xmlDocCopyNode(c_node, doc._c_doc, 1)
712
+ else:
713
+ doc = node_doc
714
+ return _fakeDocElementFactory(doc, c_node)
715
+
716
+ ################################################################################
717
+ # special str/unicode subclasses
718
+
719
+ @cython.final
720
+ cdef class _ElementUnicodeResult(unicode):
721
+ cdef _Element _parent
722
+ cdef readonly object attrname
723
+ cdef readonly bint is_tail
724
+
725
+ def getparent(self):
726
+ return self._parent
727
+
728
+ @property
729
+ def is_text(self):
730
+ return self._parent is not None and not (self.is_tail or self.attrname is not None)
731
+
732
+ @property
733
+ def is_attribute(self):
734
+ return self.attrname is not None
735
+
736
+ cdef object _elementStringResultFactory(string_value, _Element parent,
737
+ attrname, bint is_tail):
738
+ result = _ElementUnicodeResult(string_value)
739
+ result._parent = parent
740
+ result.is_tail = is_tail
741
+ result.attrname = attrname
742
+ return result
743
+
744
+ cdef object _buildElementStringResult(_Document doc, xmlNode* c_node,
745
+ _BaseContext context):
746
+ cdef _Element parent = None
747
+ cdef object attrname = None
748
+ cdef xmlNode* c_element
749
+ cdef bint is_tail
750
+
751
+ if c_node.type == tree.XML_ATTRIBUTE_NODE:
752
+ attrname = _namespacedName(c_node)
753
+ is_tail = 0
754
+ s = tree.xmlNodeGetContent(c_node)
755
+ try:
756
+ value = funicode(s)
757
+ finally:
758
+ tree.xmlFree(s)
759
+ c_element = NULL
760
+ else:
761
+ #assert c_node.type == tree.XML_TEXT_NODE or c_node.type == tree.XML_CDATA_SECTION_NODE, "invalid node type"
762
+ # may be tail text or normal text
763
+ value = funicode(c_node.content)
764
+ c_element = _previousElement(c_node)
765
+ is_tail = c_element is not NULL
766
+
767
+ if not context._build_smart_strings:
768
+ return value
769
+
770
+ if c_element is NULL:
771
+ # non-tail text or attribute text
772
+ c_element = c_node.parent
773
+ while c_element is not NULL and not _isElement(c_element):
774
+ c_element = c_element.parent
775
+
776
+ if c_element is not NULL:
777
+ parent = _instantiateElementFromXPath(c_element, doc, context)
778
+
779
+ return _elementStringResultFactory(
780
+ value, parent, attrname, is_tail)
781
+
782
+ ################################################################################
783
+ # callbacks for XPath/XSLT extension functions
784
+
785
+ cdef void _extension_function_call(_BaseContext context, function,
786
+ xpath.xmlXPathParserContext* ctxt, int nargs) noexcept:
787
+ cdef _Document doc
788
+ cdef xpath.xmlXPathObject* obj
789
+ cdef list args
790
+ cdef int i
791
+ doc = context._doc
792
+ try:
793
+ args = []
794
+ for i in range(nargs):
795
+ obj = xpath.valuePop(ctxt)
796
+ o = _unwrapXPathObject(obj, doc, context)
797
+ _freeXPathObject(obj)
798
+ args.append(o)
799
+ args.reverse()
800
+
801
+ res = function(context, *args)
802
+ # wrap result for XPath consumption
803
+ obj = _wrapXPathObject(res, doc, context)
804
+ # prevent Python from deallocating elements handed to libxml2
805
+ context._hold(res)
806
+ xpath.valuePush(ctxt, obj)
807
+ except:
808
+ xpath.xmlXPathErr(ctxt, xpath.XPATH_EXPR_ERROR)
809
+ context._exc._store_raised()
810
+ finally:
811
+ return # swallow any further exceptions
812
+
813
+ # lookup the function by name and call it
814
+
815
+ cdef void _xpath_function_call(xpath.xmlXPathParserContext* ctxt,
816
+ int nargs) noexcept with gil:
817
+ cdef _BaseContext context
818
+ cdef xpath.xmlXPathContext* rctxt = ctxt.context
819
+ context = <_BaseContext> rctxt.userData
820
+ try:
821
+ function = context._find_cached_function(rctxt.functionURI, rctxt.function)
822
+ if function is not None:
823
+ _extension_function_call(context, function, ctxt, nargs)
824
+ else:
825
+ xpath.xmlXPathErr(ctxt, xpath.XPATH_UNKNOWN_FUNC_ERROR)
826
+ context._exc._store_exception(XPathFunctionError(
827
+ f"XPath function '{_namespacedNameFromNsName(rctxt.functionURI, rctxt.function)}' not found"))
828
+ except:
829
+ # may not be the right error, but we need to tell libxml2 *something*
830
+ xpath.xmlXPathErr(ctxt, xpath.XPATH_UNKNOWN_FUNC_ERROR)
831
+ context._exc._store_raised()
832
+ finally:
833
+ return # swallow any further exceptions
llmeval-env/lib/python3.10/site-packages/lxml/lxml.etree.h ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Generated by Cython 3.0.10 */
2
+
3
+ #ifndef __PYX_HAVE__lxml__etree
4
+ #define __PYX_HAVE__lxml__etree
5
+
6
+ #include "Python.h"
7
+ struct LxmlDocument;
8
+ struct LxmlElement;
9
+ struct LxmlElementTree;
10
+ struct LxmlElementTagMatcher;
11
+ struct LxmlElementIterator;
12
+ struct LxmlElementBase;
13
+ struct LxmlElementClassLookup;
14
+ struct LxmlFallbackElementClassLookup;
15
+
16
+ /* "lxml/etree.pyx":333
17
+ *
18
+ * # type of a function that steps from node to node
19
+ * ctypedef public xmlNode* (*_node_to_node_function)(xmlNode*) # <<<<<<<<<<<<<<
20
+ *
21
+ *
22
+ */
23
+ typedef xmlNode *(*_node_to_node_function)(xmlNode *);
24
+
25
+ /* "lxml/etree.pyx":349
26
+ * @cython.final
27
+ * @cython.freelist(8)
28
+ * cdef public class _Document [ type LxmlDocumentType, object LxmlDocument ]: # <<<<<<<<<<<<<<
29
+ * """Internal base class to reference a libxml document.
30
+ *
31
+ */
32
+ struct LxmlDocument {
33
+ PyObject_HEAD
34
+ struct __pyx_vtabstruct_4lxml_5etree__Document *__pyx_vtab;
35
+ int _ns_counter;
36
+ PyObject *_prefix_tail;
37
+ xmlDoc *_c_doc;
38
+ struct __pyx_obj_4lxml_5etree__BaseParser *_parser;
39
+ };
40
+
41
+ /* "lxml/etree.pyx":698
42
+ *
43
+ * @cython.no_gc_clear
44
+ * cdef public class _Element [ type LxmlElementType, object LxmlElement ]: # <<<<<<<<<<<<<<
45
+ * """Element class.
46
+ *
47
+ */
48
+ struct LxmlElement {
49
+ PyObject_HEAD
50
+ struct LxmlDocument *_doc;
51
+ xmlNode *_c_node;
52
+ PyObject *_tag;
53
+ };
54
+
55
+ /* "lxml/etree.pyx":1872
56
+ *
57
+ *
58
+ * cdef public class _ElementTree [ type LxmlElementTreeType, # <<<<<<<<<<<<<<
59
+ * object LxmlElementTree ]:
60
+ * cdef _Document _doc
61
+ */
62
+ struct LxmlElementTree {
63
+ PyObject_HEAD
64
+ struct __pyx_vtabstruct_4lxml_5etree__ElementTree *__pyx_vtab;
65
+ struct LxmlDocument *_doc;
66
+ struct LxmlElement *_context_node;
67
+ };
68
+
69
+ /* "lxml/etree.pyx":2646
70
+ *
71
+ *
72
+ * cdef public class _ElementTagMatcher [ object LxmlElementTagMatcher, # <<<<<<<<<<<<<<
73
+ * type LxmlElementTagMatcherType ]:
74
+ * """
75
+ */
76
+ struct LxmlElementTagMatcher {
77
+ PyObject_HEAD
78
+ struct __pyx_vtabstruct_4lxml_5etree__ElementTagMatcher *__pyx_vtab;
79
+ PyObject *_pystrings;
80
+ int _node_type;
81
+ char *_href;
82
+ char *_name;
83
+ };
84
+
85
+ /* "lxml/etree.pyx":2677
86
+ * self._name = NULL
87
+ *
88
+ * cdef public class _ElementIterator(_ElementTagMatcher) [ # <<<<<<<<<<<<<<
89
+ * object LxmlElementIterator, type LxmlElementIteratorType ]:
90
+ * """
91
+ */
92
+ struct LxmlElementIterator {
93
+ struct LxmlElementTagMatcher __pyx_base;
94
+ struct LxmlElement *_node;
95
+ _node_to_node_function _next_element;
96
+ };
97
+
98
+ /* "src/lxml/classlookup.pxi":6
99
+ * # Custom Element classes
100
+ *
101
+ * cdef public class ElementBase(_Element) [ type LxmlElementBaseType, # <<<<<<<<<<<<<<
102
+ * object LxmlElementBase ]:
103
+ * """ElementBase(*children, attrib=None, nsmap=None, **_extra)
104
+ */
105
+ struct LxmlElementBase {
106
+ struct LxmlElement __pyx_base;
107
+ };
108
+
109
+ /* "src/lxml/classlookup.pxi":210
110
+ * # Element class lookup
111
+ *
112
+ * ctypedef public object (*_element_class_lookup_function)(object, _Document, xmlNode*) # <<<<<<<<<<<<<<
113
+ *
114
+ * # class to store element class lookup functions
115
+ */
116
+ typedef PyObject *(*_element_class_lookup_function)(PyObject *, struct LxmlDocument *, xmlNode *);
117
+
118
+ /* "src/lxml/classlookup.pxi":213
119
+ *
120
+ * # class to store element class lookup functions
121
+ * cdef public class ElementClassLookup [ type LxmlElementClassLookupType, # <<<<<<<<<<<<<<
122
+ * object LxmlElementClassLookup ]:
123
+ * """ElementClassLookup(self)
124
+ */
125
+ struct LxmlElementClassLookup {
126
+ PyObject_HEAD
127
+ _element_class_lookup_function _lookup_function;
128
+ };
129
+
130
+ /* "src/lxml/classlookup.pxi":221
131
+ *
132
+ *
133
+ * cdef public class FallbackElementClassLookup(ElementClassLookup) \ # <<<<<<<<<<<<<<
134
+ * [ type LxmlFallbackElementClassLookupType,
135
+ * object LxmlFallbackElementClassLookup ]:
136
+ */
137
+ struct LxmlFallbackElementClassLookup {
138
+ struct LxmlElementClassLookup __pyx_base;
139
+ struct __pyx_vtabstruct_4lxml_5etree_FallbackElementClassLookup *__pyx_vtab;
140
+ struct LxmlElementClassLookup *fallback;
141
+ _element_class_lookup_function _fallback_function;
142
+ };
143
+
144
+ #ifndef __PYX_HAVE_API__lxml__etree
145
+
146
+ #ifdef CYTHON_EXTERN_C
147
+ #undef __PYX_EXTERN_C
148
+ #define __PYX_EXTERN_C CYTHON_EXTERN_C
149
+ #elif defined(__PYX_EXTERN_C)
150
+ #ifdef _MSC_VER
151
+ #pragma message ("Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead.")
152
+ #else
153
+ #warning Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead.
154
+ #endif
155
+ #else
156
+ #ifdef __cplusplus
157
+ #define __PYX_EXTERN_C extern "C"
158
+ #else
159
+ #define __PYX_EXTERN_C extern
160
+ #endif
161
+ #endif
162
+
163
+ #ifndef DL_IMPORT
164
+ #define DL_IMPORT(_T) _T
165
+ #endif
166
+
167
+ __PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlDocumentType;
168
+ __PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlElementType;
169
+ __PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlElementTreeType;
170
+ __PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlElementTagMatcherType;
171
+ __PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlElementIteratorType;
172
+ __PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlElementBaseType;
173
+ __PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlElementClassLookupType;
174
+ __PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlFallbackElementClassLookupType;
175
+
176
+ __PYX_EXTERN_C struct LxmlElement *deepcopyNodeToDocument(struct LxmlDocument *, xmlNode *);
177
+ __PYX_EXTERN_C struct LxmlElementTree *elementTreeFactory(struct LxmlElement *);
178
+ __PYX_EXTERN_C struct LxmlElementTree *newElementTree(struct LxmlElement *, PyObject *);
179
+ __PYX_EXTERN_C struct LxmlElementTree *adoptExternalDocument(xmlDoc *, PyObject *, int);
180
+ __PYX_EXTERN_C struct LxmlElement *elementFactory(struct LxmlDocument *, xmlNode *);
181
+ __PYX_EXTERN_C struct LxmlElement *makeElement(PyObject *, struct LxmlDocument *, PyObject *, PyObject *, PyObject *, PyObject *, PyObject *);
182
+ __PYX_EXTERN_C struct LxmlElement *makeSubElement(struct LxmlElement *, PyObject *, PyObject *, PyObject *, PyObject *, PyObject *);
183
+ __PYX_EXTERN_C void setElementClassLookupFunction(_element_class_lookup_function, PyObject *);
184
+ __PYX_EXTERN_C PyObject *lookupDefaultElementClass(PyObject *, PyObject *, xmlNode *);
185
+ __PYX_EXTERN_C PyObject *lookupNamespaceElementClass(PyObject *, PyObject *, xmlNode *);
186
+ __PYX_EXTERN_C PyObject *callLookupFallback(struct LxmlFallbackElementClassLookup *, struct LxmlDocument *, xmlNode *);
187
+ __PYX_EXTERN_C int tagMatches(xmlNode *, const xmlChar *, const xmlChar *);
188
+ __PYX_EXTERN_C struct LxmlDocument *documentOrRaise(PyObject *);
189
+ __PYX_EXTERN_C struct LxmlElement *rootNodeOrRaise(PyObject *);
190
+ __PYX_EXTERN_C int hasText(xmlNode *);
191
+ __PYX_EXTERN_C int hasTail(xmlNode *);
192
+ __PYX_EXTERN_C PyObject *textOf(xmlNode *);
193
+ __PYX_EXTERN_C PyObject *tailOf(xmlNode *);
194
+ __PYX_EXTERN_C int setNodeText(xmlNode *, PyObject *);
195
+ __PYX_EXTERN_C int setTailText(xmlNode *, PyObject *);
196
+ __PYX_EXTERN_C PyObject *attributeValue(xmlNode *, xmlAttr *);
197
+ __PYX_EXTERN_C PyObject *attributeValueFromNsName(xmlNode *, const xmlChar *, const xmlChar *);
198
+ __PYX_EXTERN_C PyObject *getAttributeValue(struct LxmlElement *, PyObject *, PyObject *);
199
+ __PYX_EXTERN_C PyObject *iterattributes(struct LxmlElement *, int);
200
+ __PYX_EXTERN_C PyObject *collectAttributes(xmlNode *, int);
201
+ __PYX_EXTERN_C int setAttributeValue(struct LxmlElement *, PyObject *, PyObject *);
202
+ __PYX_EXTERN_C int delAttribute(struct LxmlElement *, PyObject *);
203
+ __PYX_EXTERN_C int delAttributeFromNsName(xmlNode *, const xmlChar *, const xmlChar *);
204
+ __PYX_EXTERN_C int hasChild(xmlNode *);
205
+ __PYX_EXTERN_C xmlNode *findChild(xmlNode *, Py_ssize_t);
206
+ __PYX_EXTERN_C xmlNode *findChildForwards(xmlNode *, Py_ssize_t);
207
+ __PYX_EXTERN_C xmlNode *findChildBackwards(xmlNode *, Py_ssize_t);
208
+ __PYX_EXTERN_C xmlNode *nextElement(xmlNode *);
209
+ __PYX_EXTERN_C xmlNode *previousElement(xmlNode *);
210
+ __PYX_EXTERN_C void appendChild(struct LxmlElement *, struct LxmlElement *);
211
+ __PYX_EXTERN_C int appendChildToElement(struct LxmlElement *, struct LxmlElement *);
212
+ __PYX_EXTERN_C PyObject *pyunicode(const xmlChar *);
213
+ __PYX_EXTERN_C PyObject *utf8(PyObject *);
214
+ __PYX_EXTERN_C PyObject *getNsTag(PyObject *);
215
+ __PYX_EXTERN_C PyObject *getNsTagWithEmptyNs(PyObject *);
216
+ __PYX_EXTERN_C PyObject *namespacedName(xmlNode *);
217
+ __PYX_EXTERN_C PyObject *namespacedNameFromNsName(const xmlChar *, const xmlChar *);
218
+ __PYX_EXTERN_C void iteratorStoreNext(struct LxmlElementIterator *, struct LxmlElement *);
219
+ __PYX_EXTERN_C void initTagMatch(struct LxmlElementTagMatcher *, PyObject *);
220
+ __PYX_EXTERN_C xmlNs *findOrBuildNodeNsPrefix(struct LxmlDocument *, xmlNode *, const xmlChar *, const xmlChar *);
221
+
222
+ #endif /* !__PYX_HAVE_API__lxml__etree */
223
+
224
+ /* WARNING: the interface of the module init function changed in CPython 3.5. */
225
+ /* It now returns a PyModuleDef instance instead of a PyModule instance. */
226
+
227
+ #if PY_MAJOR_VERSION < 3
228
+ PyMODINIT_FUNC initetree(void);
229
+ #else
230
+ /* WARNING: Use PyImport_AppendInittab("etree", PyInit_etree) instead of calling PyInit_etree directly from Python 3.5 */
231
+ PyMODINIT_FUNC PyInit_etree(void);
232
+
233
+ #if PY_VERSION_HEX >= 0x03050000 && (defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER) || (defined(__cplusplus) && __cplusplus >= 201402L))
234
+ #if defined(__cplusplus) && __cplusplus >= 201402L
235
+ [[deprecated("Use PyImport_AppendInittab(\"etree\", PyInit_etree) instead of calling PyInit_etree directly.")]] inline
236
+ #elif defined(__GNUC__) || defined(__clang__)
237
+ __attribute__ ((__deprecated__("Use PyImport_AppendInittab(\"etree\", PyInit_etree) instead of calling PyInit_etree directly."), __unused__)) __inline__
238
+ #elif defined(_MSC_VER)
239
+ __declspec(deprecated("Use PyImport_AppendInittab(\"etree\", PyInit_etree) instead of calling PyInit_etree directly.")) __inline
240
+ #endif
241
+ static PyObject* __PYX_WARN_IF_PyInit_etree_INIT_CALLED(PyObject* res) {
242
+ return res;
243
+ }
244
+ #define PyInit_etree() __PYX_WARN_IF_PyInit_etree_INIT_CALLED(PyInit_etree())
245
+ #endif
246
+ #endif
247
+
248
+ #endif /* !__PYX_HAVE__lxml__etree */
llmeval-env/lib/python3.10/site-packages/lxml/nsclasses.pxi ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # module-level API for namespace implementations
2
+
3
+ cdef class LxmlRegistryError(LxmlError):
4
+ """Base class of lxml registry errors.
5
+ """
6
+
7
+ cdef class NamespaceRegistryError(LxmlRegistryError):
8
+ """Error registering a namespace extension.
9
+ """
10
+
11
+
12
+ @cython.internal
13
+ cdef class _NamespaceRegistry:
14
+ "Dictionary-like namespace registry"
15
+ cdef object _ns_uri
16
+ cdef bytes _ns_uri_utf
17
+ cdef dict _entries
18
+ cdef char* _c_ns_uri_utf
19
+ def __cinit__(self, ns_uri):
20
+ self._ns_uri = ns_uri
21
+ if ns_uri is None:
22
+ self._ns_uri_utf = None
23
+ self._c_ns_uri_utf = NULL
24
+ else:
25
+ self._ns_uri_utf = _utf8(ns_uri)
26
+ self._c_ns_uri_utf = _cstr(self._ns_uri_utf)
27
+ self._entries = {}
28
+
29
+ def update(self, class_dict_iterable):
30
+ """update(self, class_dict_iterable)
31
+
32
+ Forgivingly update the registry.
33
+
34
+ ``class_dict_iterable`` may be a dict or some other iterable
35
+ that yields (name, value) pairs.
36
+
37
+ If a value does not match the required type for this registry,
38
+ or if the name starts with '_', it will be silently discarded.
39
+ This allows registrations at the module or class level using
40
+ vars(), globals() etc."""
41
+ if hasattr(class_dict_iterable, 'items'):
42
+ class_dict_iterable = class_dict_iterable.items()
43
+ for name, item in class_dict_iterable:
44
+ if (name is None or name[:1] != '_') and callable(item):
45
+ self[name] = item
46
+
47
+ def __getitem__(self, name):
48
+ if name is not None:
49
+ name = _utf8(name)
50
+ return self._get(name)
51
+
52
+ def __delitem__(self, name):
53
+ if name is not None:
54
+ name = _utf8(name)
55
+ del self._entries[name]
56
+
57
+ cdef object _get(self, object name):
58
+ cdef python.PyObject* dict_result
59
+ dict_result = python.PyDict_GetItem(self._entries, name)
60
+ if dict_result is NULL:
61
+ raise KeyError, "Name not registered."
62
+ return <object>dict_result
63
+
64
+ cdef object _getForString(self, char* name):
65
+ cdef python.PyObject* dict_result
66
+ dict_result = python.PyDict_GetItem(self._entries, name)
67
+ if dict_result is NULL:
68
+ raise KeyError, "Name not registered."
69
+ return <object>dict_result
70
+
71
+ def __iter__(self):
72
+ return iter(self._entries)
73
+
74
+ def items(self):
75
+ return list(self._entries.items())
76
+
77
+ def iteritems(self):
78
+ return iter(self._entries.items())
79
+
80
+ def clear(self):
81
+ self._entries.clear()
82
+
83
+ def __call__(self, obj):
84
+ # Usage as decorator:
85
+ # ns = lookup.get_namespace("...")
86
+ # @ns('abc')
87
+ # class element(ElementBase): pass
88
+ #
89
+ # @ns
90
+ # class elementname(ElementBase): pass
91
+
92
+ if obj is None or python._isString(obj):
93
+ # @ns(None) or @ns('tag')
94
+ return partial(self.__deco, obj)
95
+ # plain @ns decorator
96
+ self[obj.__name__] = obj
97
+ return obj
98
+
99
+ def __deco(self, name, obj):
100
+ self[name] = obj
101
+ return obj
102
+
103
+
104
+ @cython.final
105
+ @cython.internal
106
+ cdef class _ClassNamespaceRegistry(_NamespaceRegistry):
107
+ "Dictionary-like registry for namespace implementation classes"
108
+ def __setitem__(self, name, item):
109
+ if not isinstance(item, type) or not issubclass(item, ElementBase):
110
+ raise NamespaceRegistryError, \
111
+ "Registered element classes must be subtypes of ElementBase"
112
+ if name is not None:
113
+ name = _utf8(name)
114
+ self._entries[name] = item
115
+
116
+ def __repr__(self):
117
+ return "Namespace(%r)" % self._ns_uri
118
+
119
+
120
+ cdef class ElementNamespaceClassLookup(FallbackElementClassLookup):
121
+ """ElementNamespaceClassLookup(self, fallback=None)
122
+
123
+ Element class lookup scheme that searches the Element class in the
124
+ Namespace registry.
125
+
126
+ Usage:
127
+
128
+ >>> lookup = ElementNamespaceClassLookup()
129
+ >>> ns_elements = lookup.get_namespace("http://schema.org/Movie")
130
+
131
+ >>> @ns_elements
132
+ ... class movie(ElementBase):
133
+ ... "Element implementation for 'movie' tag (using class name) in schema namespace."
134
+
135
+ >>> @ns_elements("movie")
136
+ ... class MovieElement(ElementBase):
137
+ ... "Element implementation for 'movie' tag (explicit tag name) in schema namespace."
138
+ """
139
+ cdef dict _namespace_registries
140
+ def __cinit__(self):
141
+ self._namespace_registries = {}
142
+
143
+ def __init__(self, ElementClassLookup fallback=None):
144
+ FallbackElementClassLookup.__init__(self, fallback)
145
+ self._lookup_function = _find_nselement_class
146
+
147
+ def get_namespace(self, ns_uri):
148
+ """get_namespace(self, ns_uri)
149
+
150
+ Retrieve the namespace object associated with the given URI.
151
+ Pass None for the empty namespace.
152
+
153
+ Creates a new namespace object if it does not yet exist."""
154
+ if ns_uri:
155
+ ns_utf = _utf8(ns_uri)
156
+ else:
157
+ ns_utf = None
158
+ try:
159
+ return self._namespace_registries[ns_utf]
160
+ except KeyError:
161
+ registry = self._namespace_registries[ns_utf] = \
162
+ _ClassNamespaceRegistry(ns_uri)
163
+ return registry
164
+
165
+ cdef object _find_nselement_class(state, _Document doc, xmlNode* c_node):
166
+ cdef python.PyObject* dict_result
167
+ cdef ElementNamespaceClassLookup lookup
168
+ cdef _NamespaceRegistry registry
169
+ if state is None:
170
+ return _lookupDefaultElementClass(None, doc, c_node)
171
+
172
+ lookup = <ElementNamespaceClassLookup>state
173
+ if c_node.type != tree.XML_ELEMENT_NODE:
174
+ return _callLookupFallback(lookup, doc, c_node)
175
+
176
+ c_namespace_utf = _getNs(c_node)
177
+ if c_namespace_utf is not NULL:
178
+ dict_result = python.PyDict_GetItem(
179
+ lookup._namespace_registries, <unsigned char*>c_namespace_utf)
180
+ else:
181
+ dict_result = python.PyDict_GetItem(
182
+ lookup._namespace_registries, None)
183
+ if dict_result is not NULL:
184
+ registry = <_NamespaceRegistry>dict_result
185
+ classes = registry._entries
186
+
187
+ if c_node.name is not NULL:
188
+ dict_result = python.PyDict_GetItem(
189
+ classes, <unsigned char*>c_node.name)
190
+ else:
191
+ dict_result = NULL
192
+
193
+ if dict_result is NULL:
194
+ dict_result = python.PyDict_GetItem(classes, None)
195
+
196
+ if dict_result is not NULL:
197
+ return <object>dict_result
198
+ return _callLookupFallback(lookup, doc, c_node)
199
+
200
+
201
+ ################################################################################
202
+ # XPath extension functions
203
+
204
+ cdef dict __FUNCTION_NAMESPACE_REGISTRIES
205
+ __FUNCTION_NAMESPACE_REGISTRIES = {}
206
+
207
+ def FunctionNamespace(ns_uri):
208
+ """FunctionNamespace(ns_uri)
209
+
210
+ Retrieve the function namespace object associated with the given
211
+ URI.
212
+
213
+ Creates a new one if it does not yet exist. A function namespace
214
+ can only be used to register extension functions.
215
+
216
+ Usage:
217
+
218
+ >>> ns_functions = FunctionNamespace("http://schema.org/Movie")
219
+
220
+ >>> @ns_functions # uses function name
221
+ ... def add2(x):
222
+ ... return x + 2
223
+
224
+ >>> @ns_functions("add3") # uses explicit name
225
+ ... def add_three(x):
226
+ ... return x + 3
227
+ """
228
+ ns_utf = _utf8(ns_uri) if ns_uri else None
229
+ try:
230
+ return __FUNCTION_NAMESPACE_REGISTRIES[ns_utf]
231
+ except KeyError:
232
+ registry = __FUNCTION_NAMESPACE_REGISTRIES[ns_utf] = \
233
+ _XPathFunctionNamespaceRegistry(ns_uri)
234
+ return registry
235
+
236
+ @cython.internal
237
+ cdef class _FunctionNamespaceRegistry(_NamespaceRegistry):
238
+ def __setitem__(self, name, item):
239
+ if not callable(item):
240
+ raise NamespaceRegistryError, \
241
+ "Registered functions must be callable."
242
+ if not name:
243
+ raise ValueError, \
244
+ "extensions must have non empty names"
245
+ self._entries[_utf8(name)] = item
246
+
247
+ def __repr__(self):
248
+ return "FunctionNamespace(%r)" % self._ns_uri
249
+
250
+ @cython.final
251
+ @cython.internal
252
+ cdef class _XPathFunctionNamespaceRegistry(_FunctionNamespaceRegistry):
253
+ cdef object _prefix
254
+ cdef bytes _prefix_utf
255
+
256
+ property prefix:
257
+ "Namespace prefix for extension functions."
258
+ def __del__(self):
259
+ self._prefix = None # no prefix configured
260
+ self._prefix_utf = None
261
+ def __get__(self):
262
+ if self._prefix is None:
263
+ return ''
264
+ else:
265
+ return self._prefix
266
+ def __set__(self, prefix):
267
+ if prefix == '':
268
+ prefix = None # empty prefix
269
+ self._prefix_utf = _utf8(prefix) if prefix is not None else None
270
+ self._prefix = prefix
271
+
272
+ cdef list _find_all_extension_prefixes():
273
+ "Internal lookup function to find all function prefixes for XSLT/XPath."
274
+ cdef _XPathFunctionNamespaceRegistry registry
275
+ cdef list ns_prefixes = []
276
+ for registry in __FUNCTION_NAMESPACE_REGISTRIES.itervalues():
277
+ if registry._prefix_utf is not None:
278
+ if registry._ns_uri_utf is not None:
279
+ ns_prefixes.append(
280
+ (registry._prefix_utf, registry._ns_uri_utf))
281
+ return ns_prefixes
llmeval-env/lib/python3.10/site-packages/lxml/objectpath.pxi ADDED
@@ -0,0 +1,332 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ################################################################################
2
+ # ObjectPath
3
+
4
+ ctypedef struct _ObjectPath:
5
+ const_xmlChar* href
6
+ const_xmlChar* name
7
+ Py_ssize_t index
8
+
9
+
10
+ cdef object _NO_DEFAULT = object()
11
+
12
+
13
+ cdef class ObjectPath:
14
+ """ObjectPath(path)
15
+ Immutable object that represents a compiled object path.
16
+
17
+ Example for a path: 'root.child[1].{other}child[25]'
18
+ """
19
+ cdef readonly object find
20
+ cdef list _path
21
+ cdef object _path_str
22
+ cdef _ObjectPath* _c_path
23
+ cdef Py_ssize_t _path_len
24
+ def __init__(self, path):
25
+ if python._isString(path):
26
+ self._path = _parse_object_path_string(path)
27
+ self._path_str = path
28
+ else:
29
+ self._path = _parse_object_path_list(path)
30
+ self._path_str = '.'.join(path)
31
+ self._path_len = len(self._path)
32
+ self._c_path = _build_object_path_segments(self._path)
33
+ self.find = self.__call__
34
+
35
+ def __dealloc__(self):
36
+ if self._c_path is not NULL:
37
+ python.lxml_free(self._c_path)
38
+
39
+ def __str__(self):
40
+ return self._path_str
41
+
42
+ def __call__(self, _Element root not None, *_default):
43
+ """Follow the attribute path in the object structure and return the
44
+ target attribute value.
45
+
46
+ If it it not found, either returns a default value (if one was passed
47
+ as second argument) or raises AttributeError.
48
+ """
49
+ if _default:
50
+ if len(_default) > 1:
51
+ raise TypeError, "invalid number of arguments: needs one or two"
52
+ default = _default[0]
53
+ else:
54
+ default = _NO_DEFAULT
55
+ return _find_object_path(root, self._c_path, self._path_len, default)
56
+
57
+ def hasattr(self, _Element root not None):
58
+ "hasattr(self, root)"
59
+ try:
60
+ _find_object_path(root, self._c_path, self._path_len, _NO_DEFAULT)
61
+ except AttributeError:
62
+ return False
63
+ return True
64
+
65
+ def setattr(self, _Element root not None, value):
66
+ """setattr(self, root, value)
67
+
68
+ Set the value of the target element in a subtree.
69
+
70
+ If any of the children on the path does not exist, it is created.
71
+ """
72
+ _create_object_path(root, self._c_path, self._path_len, 1, value)
73
+
74
+ def addattr(self, _Element root not None, value):
75
+ """addattr(self, root, value)
76
+
77
+ Append a value to the target element in a subtree.
78
+
79
+ If any of the children on the path does not exist, it is created.
80
+ """
81
+ _create_object_path(root, self._c_path, self._path_len, 0, value)
82
+
83
+
84
+ cdef object __MATCH_PATH_SEGMENT = re.compile(
85
+ r"(\.?)\s*(?:\{([^}]*)\})?\s*([^.{}\[\]\s]+)\s*(?:\[\s*([-0-9]+)\s*\])?",
86
+ re.U).match
87
+
88
+ cdef tuple _RELATIVE_PATH_SEGMENT = (None, None, 0)
89
+
90
+
91
+ cdef list _parse_object_path_string(_path):
92
+ """Parse object path string into a (ns, name, index) list.
93
+ """
94
+ cdef bint has_dot
95
+ cdef unicode path
96
+ new_path = []
97
+ if isinstance(_path, bytes):
98
+ path = (<bytes>_path).decode('ascii')
99
+ elif type(_path) is not unicode:
100
+ path = unicode(_path)
101
+ else:
102
+ path = _path
103
+ path = path.strip()
104
+ if path == '.':
105
+ return [_RELATIVE_PATH_SEGMENT]
106
+ path_pos = 0
107
+ while path:
108
+ match = __MATCH_PATH_SEGMENT(path, path_pos)
109
+ if match is None:
110
+ break
111
+
112
+ dot, ns, name, index = match.groups()
113
+ index = int(index) if index else 0
114
+ has_dot = dot == '.'
115
+ if not new_path:
116
+ if has_dot:
117
+ # path '.child' => ignore root
118
+ new_path.append(_RELATIVE_PATH_SEGMENT)
119
+ elif index:
120
+ raise ValueError, "index not allowed on root node"
121
+ elif not has_dot:
122
+ raise ValueError, "invalid path"
123
+ if ns is not None:
124
+ ns = python.PyUnicode_AsUTF8String(ns)
125
+ name = python.PyUnicode_AsUTF8String(name)
126
+ new_path.append( (ns, name, index) )
127
+
128
+ path_pos = match.end()
129
+ if not new_path or len(path) > path_pos:
130
+ raise ValueError, "invalid path"
131
+ return new_path
132
+
133
+
134
+ cdef list _parse_object_path_list(path):
135
+ """Parse object path sequence into a (ns, name, index) list.
136
+ """
137
+ new_path = []
138
+ for item in path:
139
+ item = item.strip()
140
+ if not new_path and item == '':
141
+ # path '.child' => ignore root
142
+ ns = name = None
143
+ index = 0
144
+ else:
145
+ ns, name = cetree.getNsTag(item)
146
+ c_name = _xcstr(name)
147
+ index_pos = tree.xmlStrchr(c_name, c'[')
148
+ if index_pos is NULL:
149
+ index = 0
150
+ else:
151
+ index_end = tree.xmlStrchr(index_pos + 1, c']')
152
+ if index_end is NULL:
153
+ raise ValueError, "index must be enclosed in []"
154
+ index = int(index_pos[1:index_end - index_pos])
155
+ if not new_path and index != 0:
156
+ raise ValueError, "index not allowed on root node"
157
+ name = <bytes>c_name[:index_pos - c_name]
158
+ new_path.append( (ns, name, index) )
159
+ if not new_path:
160
+ raise ValueError, "invalid path"
161
+ return new_path
162
+
163
+
164
+ cdef _ObjectPath* _build_object_path_segments(list path_list) except NULL:
165
+ cdef _ObjectPath* c_path
166
+ cdef _ObjectPath* c_path_segments
167
+ c_path_segments = <_ObjectPath*>python.lxml_malloc(len(path_list), sizeof(_ObjectPath))
168
+ if c_path_segments is NULL:
169
+ raise MemoryError()
170
+ c_path = c_path_segments
171
+ for href, name, index in path_list:
172
+ c_path[0].href = _xcstr(href) if href is not None else NULL
173
+ c_path[0].name = _xcstr(name) if name is not None else NULL
174
+ c_path[0].index = index
175
+ c_path += 1
176
+ return c_path_segments
177
+
178
+
179
+ cdef _find_object_path(_Element root, _ObjectPath* c_path, Py_ssize_t c_path_len, default_value):
180
+ """Follow the path to find the target element.
181
+ """
182
+ cdef tree.xmlNode* c_node
183
+ cdef Py_ssize_t c_index
184
+ c_node = root._c_node
185
+ c_name = c_path[0].name
186
+ c_href = c_path[0].href
187
+ if c_href is NULL or c_href[0] == c'\0':
188
+ c_href = tree._getNs(c_node)
189
+ if not cetree.tagMatches(c_node, c_href, c_name):
190
+ if default_value is not _NO_DEFAULT:
191
+ return default_value
192
+ else:
193
+ raise ValueError(
194
+ f"root element does not match: need {cetree.namespacedNameFromNsName(c_href, c_name)}, got {root.tag}")
195
+
196
+ while c_node is not NULL:
197
+ c_path_len -= 1
198
+ if c_path_len <= 0:
199
+ break
200
+
201
+ c_path += 1
202
+ if c_path[0].href is not NULL:
203
+ c_href = c_path[0].href # otherwise: keep parent namespace
204
+ c_name = tree.xmlDictExists(c_node.doc.dict, c_path[0].name, -1)
205
+ if c_name is NULL:
206
+ c_name = c_path[0].name
207
+ c_node = NULL
208
+ break
209
+ c_index = c_path[0].index
210
+ c_node = c_node.last if c_index < 0 else c_node.children
211
+ c_node = _findFollowingSibling(c_node, c_href, c_name, c_index)
212
+
213
+ if c_node is not NULL:
214
+ return cetree.elementFactory(root._doc, c_node)
215
+ elif default_value is not _NO_DEFAULT:
216
+ return default_value
217
+ else:
218
+ tag = cetree.namespacedNameFromNsName(c_href, c_name)
219
+ raise AttributeError, f"no such child: {tag}"
220
+
221
+
222
+ cdef _create_object_path(_Element root, _ObjectPath* c_path,
223
+ Py_ssize_t c_path_len, int replace, value):
224
+ """Follow the path to find the target element, build the missing children
225
+ as needed and set the target element to 'value'. If replace is true, an
226
+ existing value is replaced, otherwise the new value is added.
227
+ """
228
+ cdef _Element child
229
+ cdef tree.xmlNode* c_node
230
+ cdef tree.xmlNode* c_child
231
+ cdef Py_ssize_t c_index
232
+ if c_path_len == 1:
233
+ raise TypeError, "cannot update root node"
234
+
235
+ c_node = root._c_node
236
+ c_name = c_path[0].name
237
+ c_href = c_path[0].href
238
+ if c_href is NULL or c_href[0] == c'\0':
239
+ c_href = tree._getNs(c_node)
240
+ if not cetree.tagMatches(c_node, c_href, c_name):
241
+ raise ValueError(
242
+ f"root element does not match: need {cetree.namespacedNameFromNsName(c_href, c_name)}, got {root.tag}")
243
+
244
+ while c_path_len > 1:
245
+ c_path_len -= 1
246
+ c_path += 1
247
+ if c_path[0].href is not NULL:
248
+ c_href = c_path[0].href # otherwise: keep parent namespace
249
+ c_index = c_path[0].index
250
+ c_name = tree.xmlDictExists(c_node.doc.dict, c_path[0].name, -1)
251
+ if c_name is NULL:
252
+ c_name = c_path[0].name
253
+ c_child = NULL
254
+ else:
255
+ c_child = c_node.last if c_index < 0 else c_node.children
256
+ c_child = _findFollowingSibling(c_child, c_href, c_name, c_index)
257
+
258
+ if c_child is not NULL:
259
+ c_node = c_child
260
+ elif c_index != 0:
261
+ raise TypeError, "creating indexed path attributes is not supported"
262
+ elif c_path_len == 1:
263
+ _appendValue(cetree.elementFactory(root._doc, c_node),
264
+ cetree.namespacedNameFromNsName(c_href, c_name),
265
+ value)
266
+ return
267
+ else:
268
+ child = cetree.makeSubElement(
269
+ cetree.elementFactory(root._doc, c_node),
270
+ cetree.namespacedNameFromNsName(c_href, c_name),
271
+ None, None, None, None)
272
+ c_node = child._c_node
273
+
274
+ # if we get here, the entire path was already there
275
+ if replace:
276
+ element = cetree.elementFactory(root._doc, c_node)
277
+ _replaceElement(element, value)
278
+ else:
279
+ _appendValue(cetree.elementFactory(root._doc, c_node.parent),
280
+ cetree.namespacedName(c_node), value)
281
+
282
+
283
+ cdef list _build_descendant_paths(tree.xmlNode* c_node, prefix_string):
284
+ """Returns a list of all descendant paths.
285
+ """
286
+ cdef list path, path_list
287
+ tag = cetree.namespacedName(c_node)
288
+ if prefix_string:
289
+ if prefix_string[-1] != '.':
290
+ prefix_string += '.'
291
+ prefix_string = prefix_string + tag
292
+ else:
293
+ prefix_string = tag
294
+ path = [prefix_string]
295
+ path_list = []
296
+ _recursive_build_descendant_paths(c_node, path, path_list)
297
+ return path_list
298
+
299
+
300
+ cdef int _recursive_build_descendant_paths(tree.xmlNode* c_node,
301
+ list path, list path_list) except -1:
302
+ """Fills the list 'path_list' with all descendant paths, initial prefix
303
+ being in the list 'path'.
304
+ """
305
+ cdef tree.xmlNode* c_child
306
+ tags = {}
307
+ path_list.append('.'.join(path))
308
+ c_href = tree._getNs(c_node)
309
+ c_child = c_node.children
310
+ while c_child is not NULL:
311
+ while c_child.type != tree.XML_ELEMENT_NODE:
312
+ c_child = c_child.next
313
+ if c_child is NULL:
314
+ return 0
315
+ if c_href is tree._getNs(c_child):
316
+ tag = pyunicode(c_child.name)
317
+ elif c_href is not NULL and tree._getNs(c_child) is NULL:
318
+ # special case: parent has namespace, child does not
319
+ tag = '{}' + pyunicode(c_child.name)
320
+ else:
321
+ tag = cetree.namespacedName(c_child)
322
+ count = tags.get(tag)
323
+ if count is None:
324
+ tags[tag] = 1
325
+ else:
326
+ tags[tag] = count + 1
327
+ tag += f'[{count}]'
328
+ path.append(tag)
329
+ _recursive_build_descendant_paths(c_child, path, path_list)
330
+ del path[-1]
331
+ c_child = c_child.next
332
+ return 0
llmeval-env/lib/python3.10/site-packages/lxml/parsertarget.pxi ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Parser target context (ET target interface)
2
+
3
+ cdef object inspect_getargspec
4
+ try:
5
+ from inspect import getfullargspec as inspect_getargspec
6
+ except ImportError:
7
+ from inspect import getargspec as inspect_getargspec
8
+
9
+
10
+ class _TargetParserResult(Exception):
11
+ # Admittedly, this is somewhat ugly, but it's the easiest way
12
+ # to push the Python level parser result through the parser
13
+ # machinery towards the API level functions
14
+ def __init__(self, result):
15
+ self.result = result
16
+
17
+
18
+ @cython.final
19
+ @cython.internal
20
+ cdef class _PythonSaxParserTarget(_SaxParserTarget):
21
+ cdef object _target_start
22
+ cdef object _target_end
23
+ cdef object _target_data
24
+ cdef object _target_start_ns
25
+ cdef object _target_end_ns
26
+ cdef object _target_doctype
27
+ cdef object _target_pi
28
+ cdef object _target_comment
29
+ cdef bint _start_takes_nsmap
30
+
31
+ def __cinit__(self, target):
32
+ cdef int event_filter
33
+ event_filter = 0
34
+ self._start_takes_nsmap = 0
35
+ try:
36
+ self._target_start = target.start
37
+ if self._target_start is not None:
38
+ event_filter |= SAX_EVENT_START
39
+ except AttributeError:
40
+ pass
41
+ else:
42
+ try:
43
+ arguments = inspect_getargspec(self._target_start)
44
+ if len(arguments[0]) > 3 or arguments[1] is not None:
45
+ self._start_takes_nsmap = 1
46
+ except TypeError:
47
+ pass
48
+ try:
49
+ self._target_end = target.end
50
+ if self._target_end is not None:
51
+ event_filter |= SAX_EVENT_END
52
+ except AttributeError:
53
+ pass
54
+ try:
55
+ self._target_start_ns = target.start_ns
56
+ if self._target_start_ns is not None:
57
+ event_filter |= SAX_EVENT_START_NS
58
+ except AttributeError:
59
+ pass
60
+ try:
61
+ self._target_end_ns = target.end_ns
62
+ if self._target_end_ns is not None:
63
+ event_filter |= SAX_EVENT_END_NS
64
+ except AttributeError:
65
+ pass
66
+ try:
67
+ self._target_data = target.data
68
+ if self._target_data is not None:
69
+ event_filter |= SAX_EVENT_DATA
70
+ except AttributeError:
71
+ pass
72
+ try:
73
+ self._target_doctype = target.doctype
74
+ if self._target_doctype is not None:
75
+ event_filter |= SAX_EVENT_DOCTYPE
76
+ except AttributeError:
77
+ pass
78
+ try:
79
+ self._target_pi = target.pi
80
+ if self._target_pi is not None:
81
+ event_filter |= SAX_EVENT_PI
82
+ except AttributeError:
83
+ pass
84
+ try:
85
+ self._target_comment = target.comment
86
+ if self._target_comment is not None:
87
+ event_filter |= SAX_EVENT_COMMENT
88
+ except AttributeError:
89
+ pass
90
+ self._sax_event_filter = event_filter
91
+
92
+ cdef _handleSaxStart(self, tag, attrib, nsmap):
93
+ if self._start_takes_nsmap:
94
+ return self._target_start(tag, attrib, nsmap)
95
+ else:
96
+ return self._target_start(tag, attrib)
97
+
98
+ cdef _handleSaxEnd(self, tag):
99
+ return self._target_end(tag)
100
+
101
+ cdef _handleSaxStartNs(self, prefix, uri):
102
+ return self._target_start_ns(prefix, uri)
103
+
104
+ cdef _handleSaxEndNs(self, prefix):
105
+ return self._target_end_ns(prefix)
106
+
107
+ cdef int _handleSaxData(self, data) except -1:
108
+ self._target_data(data)
109
+
110
+ cdef int _handleSaxDoctype(self, root_tag, public_id, system_id) except -1:
111
+ self._target_doctype(root_tag, public_id, system_id)
112
+
113
+ cdef _handleSaxPi(self, target, data):
114
+ return self._target_pi(target, data)
115
+
116
+ cdef _handleSaxComment(self, comment):
117
+ return self._target_comment(comment)
118
+
119
+
120
+ @cython.final
121
+ @cython.internal
122
+ @cython.no_gc_clear # Required because parent class uses it - Cython bug.
123
+ cdef class _TargetParserContext(_SaxParserContext):
124
+ """This class maps SAX2 events to the ET parser target interface.
125
+ """
126
+ cdef object _python_target
127
+ cdef int _setTarget(self, target) except -1:
128
+ self._python_target = target
129
+ if not isinstance(target, _SaxParserTarget) or \
130
+ hasattr(target, '__dict__'):
131
+ target = _PythonSaxParserTarget(target)
132
+ self._setSaxParserTarget(target)
133
+ return 0
134
+
135
+ cdef _ParserContext _copy(self):
136
+ cdef _TargetParserContext context
137
+ context = _ParserContext._copy(self)
138
+ context._setTarget(self._python_target)
139
+ return context
140
+
141
+ cdef void _cleanupTargetParserContext(self, xmlDoc* result) noexcept:
142
+ if self._c_ctxt.myDoc is not NULL:
143
+ if self._c_ctxt.myDoc is not result and \
144
+ self._c_ctxt.myDoc._private is NULL:
145
+ # no _Document proxy => orphen
146
+ tree.xmlFreeDoc(self._c_ctxt.myDoc)
147
+ self._c_ctxt.myDoc = NULL
148
+
149
+ cdef object _handleParseResult(self, _BaseParser parser, xmlDoc* result,
150
+ filename):
151
+ cdef bint recover
152
+ recover = parser._parse_options & xmlparser.XML_PARSE_RECOVER
153
+ try:
154
+ if self._has_raised():
155
+ self._cleanupTargetParserContext(result)
156
+ self._raise_if_stored()
157
+ if not self._c_ctxt.wellFormed and not recover:
158
+ _raiseParseError(self._c_ctxt, filename, self._error_log)
159
+ except:
160
+ self._python_target.close()
161
+ raise
162
+ return self._python_target.close()
163
+
164
+ cdef xmlDoc* _handleParseResultDoc(self, _BaseParser parser,
165
+ xmlDoc* result, filename) except NULL:
166
+ cdef bint recover
167
+ recover = parser._parse_options & xmlparser.XML_PARSE_RECOVER
168
+ if result is not NULL and result._private is NULL:
169
+ # no _Document proxy => orphen
170
+ tree.xmlFreeDoc(result)
171
+ try:
172
+ self._cleanupTargetParserContext(result)
173
+ self._raise_if_stored()
174
+ if not self._c_ctxt.wellFormed and not recover:
175
+ _raiseParseError(self._c_ctxt, filename, self._error_log)
176
+ except:
177
+ self._python_target.close()
178
+ raise
179
+ parse_result = self._python_target.close()
180
+ raise _TargetParserResult(parse_result)
llmeval-env/lib/python3.10/site-packages/lxml/proxy.pxi ADDED
@@ -0,0 +1,619 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Proxy functions and low level node allocation stuff
2
+
3
+ # Proxies represent elements, their reference is stored in the C
4
+ # structure of the respective node to avoid multiple instantiation of
5
+ # the Python class.
6
+
7
+ @cython.linetrace(False)
8
+ @cython.profile(False)
9
+ cdef inline _Element getProxy(xmlNode* c_node):
10
+ """Get a proxy for a given node.
11
+ """
12
+ #print "getProxy for:", <int>c_node
13
+ if c_node is not NULL and c_node._private is not NULL:
14
+ return <_Element>c_node._private
15
+ else:
16
+ return None
17
+
18
+
19
+ @cython.linetrace(False)
20
+ @cython.profile(False)
21
+ cdef inline bint hasProxy(xmlNode* c_node):
22
+ if c_node._private is NULL:
23
+ return False
24
+ return True
25
+
26
+
27
+ @cython.linetrace(False)
28
+ @cython.profile(False)
29
+ cdef inline int _registerProxy(_Element proxy, _Document doc,
30
+ xmlNode* c_node) except -1:
31
+ """Register a proxy and type for the node it's proxying for.
32
+ """
33
+ #print "registering for:", <int>proxy._c_node
34
+ assert not hasProxy(c_node), "double registering proxy!"
35
+ proxy._doc = doc
36
+ proxy._c_node = c_node
37
+ c_node._private = <void*>proxy
38
+ return 0
39
+
40
+
41
+ @cython.linetrace(False)
42
+ @cython.profile(False)
43
+ cdef inline int _unregisterProxy(_Element proxy) except -1:
44
+ """Unregister a proxy for the node it's proxying for.
45
+ """
46
+ cdef xmlNode* c_node = proxy._c_node
47
+ assert c_node._private is <void*>proxy, "Tried to unregister unknown proxy"
48
+ c_node._private = NULL
49
+ return 0
50
+
51
+
52
+ ################################################################################
53
+ # temporarily make a node the root node of its document
54
+
55
+ cdef xmlDoc* _fakeRootDoc(xmlDoc* c_base_doc, xmlNode* c_node) except NULL:
56
+ return _plainFakeRootDoc(c_base_doc, c_node, 1)
57
+
58
+ cdef xmlDoc* _plainFakeRootDoc(xmlDoc* c_base_doc, xmlNode* c_node,
59
+ bint with_siblings) except NULL:
60
+ # build a temporary document that has the given node as root node
61
+ # note that copy and original must not be modified during its lifetime!!
62
+ # always call _destroyFakeDoc() after use!
63
+ cdef xmlNode* c_child
64
+ cdef xmlNode* c_root
65
+ cdef xmlNode* c_new_root
66
+ cdef xmlDoc* c_doc
67
+ if with_siblings or (c_node.prev is NULL and c_node.next is NULL):
68
+ c_root = tree.xmlDocGetRootElement(c_base_doc)
69
+ if c_root is c_node:
70
+ # already the root node, no siblings
71
+ return c_base_doc
72
+
73
+ c_doc = _copyDoc(c_base_doc, 0) # non recursive!
74
+ c_new_root = tree.xmlDocCopyNode(c_node, c_doc, 2) # non recursive!
75
+ tree.xmlDocSetRootElement(c_doc, c_new_root)
76
+ _copyParentNamespaces(c_node, c_new_root)
77
+
78
+ c_new_root.children = c_node.children
79
+ c_new_root.last = c_node.last
80
+ c_new_root.next = c_new_root.prev = NULL
81
+
82
+ # store original node
83
+ c_doc._private = c_node
84
+
85
+ # divert parent pointers of children
86
+ c_child = c_new_root.children
87
+ while c_child is not NULL:
88
+ c_child.parent = c_new_root
89
+ c_child = c_child.next
90
+
91
+ c_doc.children = c_new_root
92
+ return c_doc
93
+
94
+ cdef void _destroyFakeDoc(xmlDoc* c_base_doc, xmlDoc* c_doc) noexcept:
95
+ # delete a temporary document
96
+ cdef xmlNode* c_child
97
+ cdef xmlNode* c_parent
98
+ cdef xmlNode* c_root
99
+ if c_doc is c_base_doc:
100
+ return
101
+ c_root = tree.xmlDocGetRootElement(c_doc)
102
+
103
+ # restore parent pointers of children
104
+ c_parent = <xmlNode*>c_doc._private
105
+ c_child = c_root.children
106
+ while c_child is not NULL:
107
+ c_child.parent = c_parent
108
+ c_child = c_child.next
109
+
110
+ # prevent recursive removal of children
111
+ c_root.children = c_root.last = NULL
112
+ tree.xmlFreeDoc(c_doc)
113
+
114
+ cdef _Element _fakeDocElementFactory(_Document doc, xmlNode* c_element):
115
+ """Special element factory for cases where we need to create a fake
116
+ root document, but still need to instantiate arbitrary nodes from
117
+ it. If we instantiate the fake root node, things will turn bad
118
+ when it's destroyed.
119
+
120
+ Instead, if we are asked to instantiate the fake root node, we
121
+ instantiate the original node instead.
122
+ """
123
+ if c_element.doc is not doc._c_doc:
124
+ if c_element.doc._private is not NULL:
125
+ if c_element is c_element.doc.children:
126
+ c_element = <xmlNode*>c_element.doc._private
127
+ #assert c_element.type == tree.XML_ELEMENT_NODE
128
+ return _elementFactory(doc, c_element)
129
+
130
+ ################################################################################
131
+ # support for freeing tree elements when proxy objects are destroyed
132
+
133
+ cdef int attemptDeallocation(xmlNode* c_node) noexcept:
134
+ """Attempt deallocation of c_node (or higher up in tree).
135
+ """
136
+ cdef xmlNode* c_top
137
+ # could be we actually aren't referring to the tree at all
138
+ if c_node is NULL:
139
+ #print "not freeing, node is NULL"
140
+ return 0
141
+ c_top = getDeallocationTop(c_node)
142
+ if c_top is not NULL:
143
+ #print "freeing:", c_top.name
144
+ _removeText(c_top.next) # tail
145
+ tree.xmlFreeNode(c_top)
146
+ return 1
147
+ return 0
148
+
149
+ cdef xmlNode* getDeallocationTop(xmlNode* c_node) noexcept:
150
+ """Return the top of the tree that can be deallocated, or NULL.
151
+ """
152
+ cdef xmlNode* c_next
153
+ #print "trying to do deallocating:", c_node.type
154
+ if hasProxy(c_node):
155
+ #print "Not freeing: proxies still exist"
156
+ return NULL
157
+ while c_node.parent is not NULL:
158
+ c_node = c_node.parent
159
+ #print "checking:", c_current.type
160
+ if c_node.type == tree.XML_DOCUMENT_NODE or \
161
+ c_node.type == tree.XML_HTML_DOCUMENT_NODE:
162
+ #print "not freeing: still in doc"
163
+ return NULL
164
+ # if we're still attached to the document, don't deallocate
165
+ if hasProxy(c_node):
166
+ #print "Not freeing: proxies still exist"
167
+ return NULL
168
+ # see whether we have children to deallocate
169
+ if not canDeallocateChildNodes(c_node):
170
+ return NULL
171
+ # see whether we have siblings to deallocate
172
+ c_next = c_node.prev
173
+ while c_next:
174
+ if _isElement(c_next):
175
+ if hasProxy(c_next) or not canDeallocateChildNodes(c_next):
176
+ return NULL
177
+ c_next = c_next.prev
178
+ c_next = c_node.next
179
+ while c_next:
180
+ if _isElement(c_next):
181
+ if hasProxy(c_next) or not canDeallocateChildNodes(c_next):
182
+ return NULL
183
+ c_next = c_next.next
184
+ return c_node
185
+
186
+ cdef int canDeallocateChildNodes(xmlNode* c_parent) noexcept:
187
+ cdef xmlNode* c_node
188
+ c_node = c_parent.children
189
+ tree.BEGIN_FOR_EACH_ELEMENT_FROM(c_parent, c_node, 1)
190
+ if hasProxy(c_node):
191
+ return 0
192
+ tree.END_FOR_EACH_ELEMENT_FROM(c_node)
193
+ return 1
194
+
195
+ ################################################################################
196
+ # fix _Document references and namespaces when a node changes documents
197
+
198
+ cdef void _copyParentNamespaces(xmlNode* c_from_node, xmlNode* c_to_node) noexcept nogil:
199
+ """Copy the namespaces of all ancestors of c_from_node to c_to_node.
200
+ """
201
+ cdef xmlNode* c_parent
202
+ cdef xmlNs* c_ns
203
+ cdef xmlNs* c_new_ns
204
+ cdef int prefix_known
205
+ c_parent = c_from_node.parent
206
+ while c_parent and (tree._isElementOrXInclude(c_parent) or
207
+ c_parent.type == tree.XML_DOCUMENT_NODE):
208
+ c_new_ns = c_parent.nsDef
209
+ while c_new_ns:
210
+ # libxml2 will check if the prefix is already defined
211
+ tree.xmlNewNs(c_to_node, c_new_ns.href, c_new_ns.prefix)
212
+ c_new_ns = c_new_ns.next
213
+ c_parent = c_parent.parent
214
+
215
+
216
+ ctypedef struct _ns_update_map:
217
+ xmlNs* old
218
+ xmlNs* new
219
+
220
+
221
+ ctypedef struct _nscache:
222
+ _ns_update_map* ns_map
223
+ size_t size
224
+ size_t last
225
+
226
+
227
+ cdef int _growNsCache(_nscache* c_ns_cache) except -1:
228
+ cdef _ns_update_map* ns_map_ptr
229
+ if c_ns_cache.size == 0:
230
+ c_ns_cache.size = 20
231
+ else:
232
+ c_ns_cache.size *= 2
233
+ ns_map_ptr = <_ns_update_map*> python.lxml_realloc(
234
+ c_ns_cache.ns_map, c_ns_cache.size, sizeof(_ns_update_map))
235
+ if not ns_map_ptr:
236
+ python.lxml_free(c_ns_cache.ns_map)
237
+ c_ns_cache.ns_map = NULL
238
+ raise MemoryError()
239
+ c_ns_cache.ns_map = ns_map_ptr
240
+ return 0
241
+
242
+
243
+ cdef inline int _appendToNsCache(_nscache* c_ns_cache,
244
+ xmlNs* c_old_ns, xmlNs* c_new_ns) except -1:
245
+ if c_ns_cache.last >= c_ns_cache.size:
246
+ _growNsCache(c_ns_cache)
247
+ c_ns_cache.ns_map[c_ns_cache.last] = _ns_update_map(old=c_old_ns, new=c_new_ns)
248
+ c_ns_cache.last += 1
249
+
250
+
251
+ cdef int _stripRedundantNamespaceDeclarations(xmlNode* c_element, _nscache* c_ns_cache,
252
+ xmlNs** c_del_ns_list) except -1:
253
+ """Removes namespace declarations from an element that are already
254
+ defined in its parents. Does not free the xmlNs's, just prepends
255
+ them to the c_del_ns_list.
256
+ """
257
+ cdef xmlNs* c_ns
258
+ cdef xmlNs* c_ns_next
259
+ cdef xmlNs** c_nsdef
260
+ # use a xmlNs** to handle assignments to "c_element.nsDef" correctly
261
+ c_nsdef = &c_element.nsDef
262
+ while c_nsdef[0] is not NULL:
263
+ c_ns = tree.xmlSearchNsByHref(
264
+ c_element.doc, c_element.parent, c_nsdef[0].href)
265
+ if c_ns is NULL:
266
+ # new namespace href => keep and cache the ns declaration
267
+ _appendToNsCache(c_ns_cache, c_nsdef[0], c_nsdef[0])
268
+ c_nsdef = &c_nsdef[0].next
269
+ else:
270
+ # known namespace href => cache mapping and strip old ns
271
+ _appendToNsCache(c_ns_cache, c_nsdef[0], c_ns)
272
+ # cut out c_nsdef.next and prepend it to garbage chain
273
+ c_ns_next = c_nsdef[0].next
274
+ c_nsdef[0].next = c_del_ns_list[0]
275
+ c_del_ns_list[0] = c_nsdef[0]
276
+ c_nsdef[0] = c_ns_next
277
+ return 0
278
+
279
+
280
+ cdef void _cleanUpFromNamespaceAdaptation(xmlNode* c_start_node,
281
+ _nscache* c_ns_cache, xmlNs* c_del_ns_list) noexcept:
282
+ # Try to recover from exceptions with really bad timing. We were in the middle
283
+ # of ripping out xmlNS-es and likely ran out of memory. Try to fix up the tree
284
+ # by re-adding the original xmlNs declarations (which might still be used in some
285
+ # places).
286
+ if c_ns_cache.ns_map:
287
+ python.lxml_free(c_ns_cache.ns_map)
288
+ if c_del_ns_list:
289
+ if not c_start_node.nsDef:
290
+ c_start_node.nsDef = c_del_ns_list
291
+ else:
292
+ c_ns = c_start_node.nsDef
293
+ while c_ns.next:
294
+ c_ns = c_ns.next
295
+ c_ns.next = c_del_ns_list
296
+
297
+
298
+ cdef int moveNodeToDocument(_Document doc, xmlDoc* c_source_doc,
299
+ xmlNode* c_element) except -1:
300
+ """Fix the xmlNs pointers of a node and its subtree that were moved.
301
+
302
+ Originally copied from libxml2's xmlReconciliateNs(). Expects
303
+ libxml2 doc pointers of node to be correct already, but fixes
304
+ _Document references.
305
+
306
+ For each node in the subtree, we do this:
307
+
308
+ 1) Remove redundant declarations of namespace that are already
309
+ defined in its parents.
310
+
311
+ 2) Replace namespaces that are *not* defined on the node or its
312
+ parents by the equivalent namespace declarations that *are*
313
+ defined on the node or its parents (possibly using a different
314
+ prefix). If a namespace is unknown, declare a new one on the
315
+ node.
316
+
317
+ 3) Reassign the names of tags and attribute from the dict of the
318
+ target document *iff* it is different from the dict used in the
319
+ source subtree.
320
+
321
+ 4) Set the Document reference to the new Document (if different).
322
+ This is done on backtracking to keep the original Document
323
+ alive as long as possible, until all its elements are updated.
324
+
325
+ Note that the namespace declarations are removed from the tree in
326
+ step 1), but freed only after the complete subtree was traversed
327
+ and all occurrences were replaced by tree-internal pointers.
328
+ """
329
+ cdef xmlNode* c_start_node
330
+ cdef xmlNode* c_node
331
+ cdef xmlDoc* c_doc = doc._c_doc
332
+ cdef tree.xmlAttr* c_attr
333
+ cdef char* c_name
334
+ cdef _nscache c_ns_cache = [NULL, 0, 0]
335
+ cdef xmlNs* c_del_ns_list = NULL
336
+ cdef proxy_count = 0
337
+
338
+ if not tree._isElementOrXInclude(c_element):
339
+ return 0
340
+
341
+ c_start_node = c_element
342
+
343
+ tree.BEGIN_FOR_EACH_FROM(c_element, c_element, 1)
344
+ if tree._isElementOrXInclude(c_element):
345
+ if hasProxy(c_element):
346
+ proxy_count += 1
347
+
348
+ # 1) cut out namespaces defined here that are already known by
349
+ # the ancestors
350
+ if c_element.nsDef is not NULL:
351
+ try:
352
+ _stripRedundantNamespaceDeclarations(c_element, &c_ns_cache, &c_del_ns_list)
353
+ except:
354
+ _cleanUpFromNamespaceAdaptation(c_start_node, &c_ns_cache, c_del_ns_list)
355
+ raise
356
+
357
+ # 2) make sure the namespaces of an element and its attributes
358
+ # are declared in this document (i.e. on the node or its parents)
359
+ if c_element.ns is not NULL:
360
+ _fixCNs(doc, c_start_node, c_element, &c_ns_cache, c_del_ns_list)
361
+
362
+ c_node = <xmlNode*>c_element.properties
363
+ while c_node is not NULL:
364
+ if c_node.ns is not NULL:
365
+ _fixCNs(doc, c_start_node, c_node, &c_ns_cache, c_del_ns_list)
366
+ c_node = c_node.next
367
+
368
+ tree.END_FOR_EACH_FROM(c_element)
369
+
370
+ # free now unused namespace declarations
371
+ if c_del_ns_list is not NULL:
372
+ tree.xmlFreeNsList(c_del_ns_list)
373
+
374
+ # cleanup
375
+ if c_ns_cache.ns_map is not NULL:
376
+ python.lxml_free(c_ns_cache.ns_map)
377
+
378
+ # 3) fix the names in the tree if we moved it from a different thread
379
+ if doc._c_doc.dict is not c_source_doc.dict:
380
+ fixThreadDictNames(c_start_node, c_source_doc.dict, doc._c_doc.dict)
381
+
382
+ # 4) fix _Document references
383
+ # (and potentially deallocate the source document)
384
+ if proxy_count > 0:
385
+ if proxy_count == 1 and c_start_node._private is not NULL:
386
+ proxy = getProxy(c_start_node)
387
+ if proxy is not None:
388
+ if proxy._doc is not doc:
389
+ proxy._doc = doc
390
+ else:
391
+ fixElementDocument(c_start_node, doc, proxy_count)
392
+ else:
393
+ fixElementDocument(c_start_node, doc, proxy_count)
394
+
395
+ return 0
396
+
397
+
398
+ cdef void _setTreeDoc(xmlNode* c_node, xmlDoc* c_doc) noexcept:
399
+ """Adaptation of 'xmlSetTreeDoc()' that deep-fixes the document links iteratively.
400
+ It avoids https://gitlab.gnome.org/GNOME/libxml2/issues/42
401
+ """
402
+ tree.BEGIN_FOR_EACH_FROM(c_node, c_node, 1)
403
+ if c_node.type == tree.XML_ELEMENT_NODE:
404
+ c_attr = <tree.xmlAttr*>c_node.properties
405
+ while c_attr:
406
+ if c_attr.atype == tree.XML_ATTRIBUTE_ID:
407
+ tree.xmlRemoveID(c_node.doc, c_attr)
408
+ c_attr.doc = c_doc
409
+ _fixDocChildren(c_attr.children, c_doc)
410
+ c_attr = c_attr.next
411
+ # Set doc link for all nodes, not only elements.
412
+ c_node.doc = c_doc
413
+ tree.END_FOR_EACH_FROM(c_node)
414
+
415
+
416
+ cdef inline void _fixDocChildren(xmlNode* c_child, xmlDoc* c_doc) noexcept:
417
+ while c_child:
418
+ c_child.doc = c_doc
419
+ if c_child.children:
420
+ _fixDocChildren(c_child.children, c_doc)
421
+ c_child = c_child.next
422
+
423
+
424
+ cdef int _fixCNs(_Document doc, xmlNode* c_start_node, xmlNode* c_node,
425
+ _nscache* c_ns_cache, xmlNs* c_del_ns_list) except -1:
426
+ cdef xmlNs* c_ns = NULL
427
+ cdef bint is_prefixed_attr = (c_node.type == tree.XML_ATTRIBUTE_NODE and c_node.ns.prefix)
428
+
429
+ for ns_map in c_ns_cache.ns_map[:c_ns_cache.last]:
430
+ if c_node.ns is ns_map.old:
431
+ if is_prefixed_attr and not ns_map.new.prefix:
432
+ # avoid dropping prefix from attributes
433
+ continue
434
+ c_ns = ns_map.new
435
+ break
436
+
437
+ if c_ns:
438
+ c_node.ns = c_ns
439
+ else:
440
+ # not in cache or not acceptable
441
+ # => find a replacement from this document
442
+ try:
443
+ c_ns = doc._findOrBuildNodeNs(
444
+ c_start_node, c_node.ns.href, c_node.ns.prefix,
445
+ c_node.type == tree.XML_ATTRIBUTE_NODE)
446
+ c_node.ns = c_ns
447
+ _appendToNsCache(c_ns_cache, c_node.ns, c_ns)
448
+ except:
449
+ _cleanUpFromNamespaceAdaptation(c_start_node, c_ns_cache, c_del_ns_list)
450
+ raise
451
+ return 0
452
+
453
+
454
+ cdef int fixElementDocument(xmlNode* c_element, _Document doc,
455
+ size_t proxy_count) except -1:
456
+ cdef xmlNode* c_node = c_element
457
+ cdef _Element proxy = None # init-to-None required due to fake-loop below
458
+ tree.BEGIN_FOR_EACH_FROM(c_element, c_node, 1)
459
+ if c_node._private is not NULL:
460
+ proxy = getProxy(c_node)
461
+ if proxy is not None:
462
+ if proxy._doc is not doc:
463
+ proxy._doc = doc
464
+ proxy_count -= 1
465
+ if proxy_count == 0:
466
+ return 0
467
+ tree.END_FOR_EACH_FROM(c_node)
468
+
469
+
470
+ cdef void fixThreadDictNames(xmlNode* c_element,
471
+ tree.xmlDict* c_src_dict,
472
+ tree.xmlDict* c_dict) noexcept nogil:
473
+ # re-assign the names of tags and attributes
474
+ #
475
+ # this should only be called when the element is based on a
476
+ # different libxml2 tag name dictionary
477
+ if c_element.type == tree.XML_DOCUMENT_NODE or \
478
+ c_element.type == tree.XML_HTML_DOCUMENT_NODE:
479
+ # may define "xml" namespace
480
+ fixThreadDictNsForNode(c_element, c_src_dict, c_dict)
481
+ if c_element.doc.extSubset:
482
+ fixThreadDictNamesForDtd(c_element.doc.extSubset, c_src_dict, c_dict)
483
+ if c_element.doc.intSubset:
484
+ fixThreadDictNamesForDtd(c_element.doc.intSubset, c_src_dict, c_dict)
485
+ c_element = c_element.children
486
+ while c_element is not NULL:
487
+ fixThreadDictNamesForNode(c_element, c_src_dict, c_dict)
488
+ c_element = c_element.next
489
+ elif tree._isElementOrXInclude(c_element):
490
+ fixThreadDictNamesForNode(c_element, c_src_dict, c_dict)
491
+
492
+
493
+ cdef inline void _fixThreadDictPtr(const_xmlChar** c_ptr,
494
+ tree.xmlDict* c_src_dict,
495
+ tree.xmlDict* c_dict) noexcept nogil:
496
+ c_str = c_ptr[0]
497
+ if c_str and c_src_dict and tree.xmlDictOwns(c_src_dict, c_str):
498
+ # return value can be NULL on memory error, but we don't handle that here
499
+ c_str = tree.xmlDictLookup(c_dict, c_str, -1)
500
+ if c_str:
501
+ c_ptr[0] = c_str
502
+
503
+
504
+ cdef void fixThreadDictNamesForNode(xmlNode* c_element,
505
+ tree.xmlDict* c_src_dict,
506
+ tree.xmlDict* c_dict) noexcept nogil:
507
+ cdef xmlNode* c_node = c_element
508
+ tree.BEGIN_FOR_EACH_FROM(c_element, c_node, 1)
509
+ if c_node.type in (tree.XML_ELEMENT_NODE, tree.XML_XINCLUDE_START):
510
+ fixThreadDictNamesForAttributes(
511
+ c_node.properties, c_src_dict, c_dict)
512
+ fixThreadDictNsForNode(c_node, c_src_dict, c_dict)
513
+ _fixThreadDictPtr(&c_node.name, c_src_dict, c_dict)
514
+ elif c_node.type == tree.XML_TEXT_NODE:
515
+ # libxml2's SAX2 parser interns some indentation space
516
+ fixThreadDictContentForNode(c_node, c_src_dict, c_dict)
517
+ elif c_node.type == tree.XML_COMMENT_NODE:
518
+ pass # don't touch c_node.name
519
+ else:
520
+ _fixThreadDictPtr(&c_node.name, c_src_dict, c_dict)
521
+ tree.END_FOR_EACH_FROM(c_node)
522
+
523
+
524
+ cdef inline void fixThreadDictNamesForAttributes(tree.xmlAttr* c_attr,
525
+ tree.xmlDict* c_src_dict,
526
+ tree.xmlDict* c_dict) noexcept nogil:
527
+ cdef xmlNode* c_child
528
+ cdef xmlNode* c_node = <xmlNode*>c_attr
529
+ while c_node is not NULL:
530
+ if c_node.type not in (tree.XML_TEXT_NODE, tree.XML_COMMENT_NODE):
531
+ _fixThreadDictPtr(&c_node.name, c_src_dict, c_dict)
532
+ # libxml2 keeps some (!) attribute values in the dict
533
+ c_child = c_node.children
534
+ while c_child is not NULL:
535
+ fixThreadDictContentForNode(c_child, c_src_dict, c_dict)
536
+ c_child = c_child.next
537
+ c_node = c_node.next
538
+
539
+
540
+ cdef inline void fixThreadDictContentForNode(xmlNode* c_node,
541
+ tree.xmlDict* c_src_dict,
542
+ tree.xmlDict* c_dict) noexcept nogil:
543
+ if c_node.content is not NULL and \
544
+ c_node.content is not <xmlChar*>&c_node.properties:
545
+ if tree.xmlDictOwns(c_src_dict, c_node.content):
546
+ # result can be NULL on memory error, but we don't handle that here
547
+ c_node.content = <xmlChar*>tree.xmlDictLookup(c_dict, c_node.content, -1)
548
+
549
+
550
+ cdef inline void fixThreadDictNsForNode(xmlNode* c_node,
551
+ tree.xmlDict* c_src_dict,
552
+ tree.xmlDict* c_dict) noexcept nogil:
553
+ cdef xmlNs* c_ns = c_node.nsDef
554
+ while c_ns is not NULL:
555
+ _fixThreadDictPtr(&c_ns.href, c_src_dict, c_dict)
556
+ _fixThreadDictPtr(&c_ns.prefix, c_src_dict, c_dict)
557
+ c_ns = c_ns.next
558
+
559
+
560
+ cdef void fixThreadDictNamesForDtd(tree.xmlDtd* c_dtd,
561
+ tree.xmlDict* c_src_dict,
562
+ tree.xmlDict* c_dict) noexcept nogil:
563
+ cdef xmlNode* c_node
564
+ cdef tree.xmlElement* c_element
565
+ cdef tree.xmlAttribute* c_attribute
566
+ cdef tree.xmlEntity* c_entity
567
+
568
+ c_node = c_dtd.children
569
+ while c_node:
570
+ if c_node.type == tree.XML_ELEMENT_DECL:
571
+ c_element = <tree.xmlElement*>c_node
572
+ if c_element.content:
573
+ _fixThreadDictPtr(&c_element.content.name, c_src_dict, c_dict)
574
+ _fixThreadDictPtr(&c_element.content.prefix, c_src_dict, c_dict)
575
+ c_attribute = c_element.attributes
576
+ while c_attribute:
577
+ _fixThreadDictPtr(&c_attribute.defaultValue, c_src_dict, c_dict)
578
+ _fixThreadDictPtr(&c_attribute.name, c_src_dict, c_dict)
579
+ _fixThreadDictPtr(&c_attribute.prefix, c_src_dict, c_dict)
580
+ _fixThreadDictPtr(&c_attribute.elem, c_src_dict, c_dict)
581
+ c_attribute = c_attribute.nexth
582
+ elif c_node.type == tree.XML_ENTITY_DECL:
583
+ c_entity = <tree.xmlEntity*>c_node
584
+ _fixThreadDictPtr(&c_entity.name, c_src_dict, c_dict)
585
+ _fixThreadDictPtr(&c_entity.ExternalID, c_src_dict, c_dict)
586
+ _fixThreadDictPtr(&c_entity.SystemID, c_src_dict, c_dict)
587
+ _fixThreadDictPtr(<const_xmlChar**>&c_entity.content, c_src_dict, c_dict)
588
+ c_node = c_node.next
589
+
590
+
591
+ ################################################################################
592
+ # adopt an xmlDoc from an external libxml2 document source
593
+
594
+ cdef _Document _adoptForeignDoc(xmlDoc* c_doc, _BaseParser parser=None, bint is_owned=True):
595
+ """Convert and wrap an externally produced xmlDoc for use in lxml.
596
+ Assures that all '_private' pointers are NULL to prevent accidental
597
+ dereference into lxml proxy objects.
598
+ """
599
+ if c_doc is NULL:
600
+ raise ValueError("Illegal document provided: NULL")
601
+ if c_doc.type not in (tree.XML_DOCUMENT_NODE, tree.XML_HTML_DOCUMENT_NODE):
602
+ doc_type = c_doc.type
603
+ if is_owned:
604
+ tree.xmlFreeDoc(c_doc)
605
+ raise ValueError(f"Illegal document provided: expected XML or HTML, found {doc_type}")
606
+
607
+ cdef xmlNode* c_node = <xmlNode*>c_doc
608
+
609
+ if is_owned:
610
+ tree.BEGIN_FOR_EACH_FROM(<xmlNode*>c_doc, c_node, 1)
611
+ c_node._private = NULL
612
+ tree.END_FOR_EACH_FROM(c_node)
613
+ else:
614
+ # create a fresh copy that lxml owns
615
+ c_doc = tree.xmlCopyDoc(c_doc, 1)
616
+ if c_doc is NULL:
617
+ raise MemoryError()
618
+
619
+ return _documentFactory(c_doc, parser)
llmeval-env/lib/python3.10/site-packages/lxml/pyclasslookup.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # dummy module for backwards compatibility
2
+
3
+ from lxml.etree import PythonElementClassLookup
llmeval-env/lib/python3.10/site-packages/lxml/sax.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (186 kB). View file
 
llmeval-env/lib/python3.10/site-packages/lxml/saxparser.pxi ADDED
@@ -0,0 +1,875 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SAX-like interfaces
2
+
3
+ class XMLSyntaxAssertionError(XMLSyntaxError, AssertionError):
4
+ """
5
+ An XMLSyntaxError that additionally inherits from AssertionError for
6
+ ElementTree / backwards compatibility reasons.
7
+
8
+ This class may get replaced by a plain XMLSyntaxError in a future version.
9
+ """
10
+ def __init__(self, message):
11
+ XMLSyntaxError.__init__(self, message, None, 0, 1)
12
+
13
+
14
+ ctypedef enum _SaxParserEvents:
15
+ SAX_EVENT_START = 1 << 0
16
+ SAX_EVENT_END = 1 << 1
17
+ SAX_EVENT_DATA = 1 << 2
18
+ SAX_EVENT_DOCTYPE = 1 << 3
19
+ SAX_EVENT_PI = 1 << 4
20
+ SAX_EVENT_COMMENT = 1 << 5
21
+ SAX_EVENT_START_NS = 1 << 6
22
+ SAX_EVENT_END_NS = 1 << 7
23
+
24
+ ctypedef enum _ParseEventFilter:
25
+ PARSE_EVENT_FILTER_START = 1 << 0
26
+ PARSE_EVENT_FILTER_END = 1 << 1
27
+ PARSE_EVENT_FILTER_START_NS = 1 << 2
28
+ PARSE_EVENT_FILTER_END_NS = 1 << 3
29
+ PARSE_EVENT_FILTER_COMMENT = 1 << 4
30
+ PARSE_EVENT_FILTER_PI = 1 << 5
31
+
32
+
33
+ cdef int _buildParseEventFilter(events) except -1:
34
+ cdef int event_filter = 0
35
+ for event in events:
36
+ if event == 'start':
37
+ event_filter |= PARSE_EVENT_FILTER_START
38
+ elif event == 'end':
39
+ event_filter |= PARSE_EVENT_FILTER_END
40
+ elif event == 'start-ns':
41
+ event_filter |= PARSE_EVENT_FILTER_START_NS
42
+ elif event == 'end-ns':
43
+ event_filter |= PARSE_EVENT_FILTER_END_NS
44
+ elif event == 'comment':
45
+ event_filter |= PARSE_EVENT_FILTER_COMMENT
46
+ elif event == 'pi':
47
+ event_filter |= PARSE_EVENT_FILTER_PI
48
+ else:
49
+ raise ValueError, f"invalid event name '{event}'"
50
+ return event_filter
51
+
52
+
53
+ cdef class _SaxParserTarget:
54
+ cdef int _sax_event_filter
55
+
56
+ cdef _handleSaxStart(self, tag, attrib, nsmap):
57
+ return None
58
+ cdef _handleSaxEnd(self, tag):
59
+ return None
60
+ cdef int _handleSaxData(self, data) except -1:
61
+ return 0
62
+ cdef int _handleSaxDoctype(self, root_tag, public_id, system_id) except -1:
63
+ return 0
64
+ cdef _handleSaxPi(self, target, data):
65
+ return None
66
+ cdef _handleSaxComment(self, comment):
67
+ return None
68
+ cdef _handleSaxStartNs(self, prefix, uri):
69
+ return None
70
+ cdef _handleSaxEndNs(self, prefix):
71
+ return None
72
+
73
+
74
+ #@cython.final
75
+ @cython.internal
76
+ @cython.no_gc_clear # Required because parent class uses it - Cython bug.
77
+ cdef class _SaxParserContext(_ParserContext):
78
+ """This class maps SAX2 events to parser target events.
79
+ """
80
+ cdef _SaxParserTarget _target
81
+ cdef _BaseParser _parser
82
+ cdef xmlparser.startElementNsSAX2Func _origSaxStart
83
+ cdef xmlparser.endElementNsSAX2Func _origSaxEnd
84
+ cdef xmlparser.startElementSAXFunc _origSaxStartNoNs
85
+ cdef xmlparser.endElementSAXFunc _origSaxEndNoNs
86
+ cdef xmlparser.charactersSAXFunc _origSaxData
87
+ cdef xmlparser.cdataBlockSAXFunc _origSaxCData
88
+ cdef xmlparser.internalSubsetSAXFunc _origSaxDoctype
89
+ cdef xmlparser.commentSAXFunc _origSaxComment
90
+ cdef xmlparser.processingInstructionSAXFunc _origSaxPI
91
+ cdef xmlparser.startDocumentSAXFunc _origSaxStartDocument
92
+
93
+ # for event collecting
94
+ cdef int _event_filter
95
+ cdef list _ns_stack
96
+ cdef list _node_stack
97
+ cdef _ParseEventsIterator events_iterator
98
+
99
+ # for iterparse
100
+ cdef _Element _root
101
+ cdef _MultiTagMatcher _matcher
102
+
103
+ def __cinit__(self, _BaseParser parser):
104
+ self._ns_stack = []
105
+ self._node_stack = []
106
+ self._parser = parser
107
+ self.events_iterator = _ParseEventsIterator()
108
+
109
+ cdef void _setSaxParserTarget(self, _SaxParserTarget target) noexcept:
110
+ self._target = target
111
+
112
+ cdef void _initParserContext(self, xmlparser.xmlParserCtxt* c_ctxt) noexcept:
113
+ _ParserContext._initParserContext(self, c_ctxt)
114
+ if self._target is not None:
115
+ self._connectTarget(c_ctxt)
116
+ elif self._event_filter:
117
+ self._connectEvents(c_ctxt)
118
+
119
+ cdef void _connectTarget(self, xmlparser.xmlParserCtxt* c_ctxt) noexcept:
120
+ """Wrap original SAX2 callbacks to call into parser target.
121
+ """
122
+ sax = c_ctxt.sax
123
+ self._origSaxStart = sax.startElementNs = NULL
124
+ self._origSaxStartNoNs = sax.startElement = NULL
125
+ if self._target._sax_event_filter & (SAX_EVENT_START |
126
+ SAX_EVENT_START_NS |
127
+ SAX_EVENT_END_NS):
128
+ # intercept => overwrite orig callback
129
+ # FIXME: also intercept on when collecting END events
130
+ if sax.initialized == xmlparser.XML_SAX2_MAGIC:
131
+ sax.startElementNs = _handleSaxTargetStart
132
+ if self._target._sax_event_filter & SAX_EVENT_START:
133
+ sax.startElement = _handleSaxTargetStartNoNs
134
+
135
+ self._origSaxEnd = sax.endElementNs = NULL
136
+ self._origSaxEndNoNs = sax.endElement = NULL
137
+ if self._target._sax_event_filter & (SAX_EVENT_END |
138
+ SAX_EVENT_END_NS):
139
+ if sax.initialized == xmlparser.XML_SAX2_MAGIC:
140
+ sax.endElementNs = _handleSaxEnd
141
+ if self._target._sax_event_filter & SAX_EVENT_END:
142
+ sax.endElement = _handleSaxEndNoNs
143
+
144
+ self._origSaxData = sax.characters = sax.cdataBlock = NULL
145
+ if self._target._sax_event_filter & SAX_EVENT_DATA:
146
+ sax.characters = sax.cdataBlock = _handleSaxData
147
+
148
+ # doctype propagation is always required for entity replacement
149
+ self._origSaxDoctype = sax.internalSubset
150
+ if self._target._sax_event_filter & SAX_EVENT_DOCTYPE:
151
+ sax.internalSubset = _handleSaxTargetDoctype
152
+
153
+ self._origSaxPI = sax.processingInstruction = NULL
154
+ if self._target._sax_event_filter & SAX_EVENT_PI:
155
+ sax.processingInstruction = _handleSaxTargetPI
156
+
157
+ self._origSaxComment = sax.comment = NULL
158
+ if self._target._sax_event_filter & SAX_EVENT_COMMENT:
159
+ sax.comment = _handleSaxTargetComment
160
+
161
+ # enforce entity replacement
162
+ sax.reference = NULL
163
+ c_ctxt.replaceEntities = 1
164
+
165
+ cdef void _connectEvents(self, xmlparser.xmlParserCtxt* c_ctxt) noexcept:
166
+ """Wrap original SAX2 callbacks to collect parse events without parser target.
167
+ """
168
+ sax = c_ctxt.sax
169
+ self._origSaxStartDocument = sax.startDocument
170
+ sax.startDocument = _handleSaxStartDocument
171
+
172
+ # only override "start" event handler if needed
173
+ self._origSaxStart = sax.startElementNs
174
+ if self._event_filter == 0 or c_ctxt.html or \
175
+ self._event_filter & (PARSE_EVENT_FILTER_START |
176
+ PARSE_EVENT_FILTER_END |
177
+ PARSE_EVENT_FILTER_START_NS |
178
+ PARSE_EVENT_FILTER_END_NS):
179
+ sax.startElementNs = <xmlparser.startElementNsSAX2Func>_handleSaxStart
180
+
181
+ self._origSaxStartNoNs = sax.startElement
182
+ if self._event_filter == 0 or c_ctxt.html or \
183
+ self._event_filter & (PARSE_EVENT_FILTER_START |
184
+ PARSE_EVENT_FILTER_END):
185
+ sax.startElement = <xmlparser.startElementSAXFunc>_handleSaxStartNoNs
186
+
187
+ # only override "end" event handler if needed
188
+ self._origSaxEnd = sax.endElementNs
189
+ if self._event_filter == 0 or \
190
+ self._event_filter & (PARSE_EVENT_FILTER_END |
191
+ PARSE_EVENT_FILTER_END_NS):
192
+ sax.endElementNs = <xmlparser.endElementNsSAX2Func>_handleSaxEnd
193
+
194
+ self._origSaxEndNoNs = sax.endElement
195
+ if self._event_filter == 0 or \
196
+ self._event_filter & PARSE_EVENT_FILTER_END:
197
+ sax.endElement = <xmlparser.endElementSAXFunc>_handleSaxEndNoNs
198
+
199
+ self._origSaxComment = sax.comment
200
+ if self._event_filter & PARSE_EVENT_FILTER_COMMENT:
201
+ sax.comment = <xmlparser.commentSAXFunc>_handleSaxComment
202
+
203
+ self._origSaxPI = sax.processingInstruction
204
+ if self._event_filter & PARSE_EVENT_FILTER_PI:
205
+ sax.processingInstruction = <xmlparser.processingInstructionSAXFunc>_handleSaxPIEvent
206
+
207
+ cdef _setEventFilter(self, events, tag):
208
+ self._event_filter = _buildParseEventFilter(events)
209
+ if not self._event_filter or tag is None or tag == '*':
210
+ self._matcher = None
211
+ else:
212
+ self._matcher = _MultiTagMatcher.__new__(_MultiTagMatcher, tag)
213
+
214
+ cdef int startDocument(self, xmlDoc* c_doc) except -1:
215
+ try:
216
+ self._doc = _documentFactory(c_doc, self._parser)
217
+ finally:
218
+ self._parser = None # clear circular reference ASAP
219
+ if self._matcher is not None:
220
+ self._matcher.cacheTags(self._doc, True) # force entry in libxml2 dict
221
+ return 0
222
+
223
+ cdef int pushEvent(self, event, xmlNode* c_node) except -1:
224
+ cdef _Element root
225
+ if self._root is None:
226
+ root = self._doc.getroot()
227
+ if root is not None and root._c_node.type == tree.XML_ELEMENT_NODE:
228
+ self._root = root
229
+ node = _elementFactory(self._doc, c_node)
230
+ self.events_iterator._events.append( (event, node) )
231
+ return 0
232
+
233
+ cdef int flushEvents(self) except -1:
234
+ events = self.events_iterator._events
235
+ while self._node_stack:
236
+ events.append( ('end', self._node_stack.pop()) )
237
+ _pushSaxNsEndEvents(self)
238
+ while self._ns_stack:
239
+ _pushSaxNsEndEvents(self)
240
+
241
+ cdef void _handleSaxException(self, xmlparser.xmlParserCtxt* c_ctxt) noexcept:
242
+ if c_ctxt.errNo == xmlerror.XML_ERR_OK:
243
+ c_ctxt.errNo = xmlerror.XML_ERR_INTERNAL_ERROR
244
+ # stop parsing immediately
245
+ c_ctxt.wellFormed = 0
246
+ c_ctxt.disableSAX = 1
247
+ c_ctxt.instate = xmlparser.XML_PARSER_EOF
248
+ self._store_raised()
249
+
250
+
251
+ @cython.final
252
+ @cython.internal
253
+ cdef class _ParseEventsIterator:
254
+ """A reusable parse events iterator"""
255
+ cdef list _events
256
+ cdef int _event_index
257
+
258
+ def __cinit__(self):
259
+ self._events = []
260
+ self._event_index = 0
261
+
262
+ def __iter__(self):
263
+ return self
264
+
265
+ def __next__(self):
266
+ cdef int event_index = self._event_index
267
+ events = self._events
268
+ if event_index >= 2**10 or event_index * 2 >= len(events):
269
+ if event_index:
270
+ # clean up from time to time
271
+ del events[:event_index]
272
+ self._event_index = event_index = 0
273
+ if event_index >= len(events):
274
+ raise StopIteration
275
+ item = events[event_index]
276
+ self._event_index = event_index + 1
277
+ return item
278
+
279
+
280
+ cdef list _build_prefix_uri_list(_SaxParserContext context, int c_nb_namespaces,
281
+ const_xmlChar** c_namespaces):
282
+ "Build [(prefix, uri)] list of declared namespaces."
283
+ cdef int i
284
+ namespaces = []
285
+ for i in xrange(c_nb_namespaces):
286
+ namespaces.append((funicodeOrEmpty(c_namespaces[0]), funicode(c_namespaces[1])))
287
+ c_namespaces += 2
288
+ return namespaces
289
+
290
+
291
+ cdef void _handleSaxStart(
292
+ void* ctxt, const_xmlChar* c_localname, const_xmlChar* c_prefix,
293
+ const_xmlChar* c_namespace, int c_nb_namespaces,
294
+ const_xmlChar** c_namespaces,
295
+ int c_nb_attributes, int c_nb_defaulted,
296
+ const_xmlChar** c_attributes) noexcept with gil:
297
+ cdef int i
298
+ cdef size_t c_len
299
+ c_ctxt = <xmlparser.xmlParserCtxt*>ctxt
300
+ if c_ctxt._private is NULL or c_ctxt.disableSAX:
301
+ return
302
+ context = <_SaxParserContext>c_ctxt._private
303
+ cdef int event_filter = context._event_filter
304
+ try:
305
+ if (c_nb_namespaces and
306
+ event_filter & (PARSE_EVENT_FILTER_START_NS |
307
+ PARSE_EVENT_FILTER_END_NS)):
308
+ declared_namespaces = _build_prefix_uri_list(
309
+ context, c_nb_namespaces, c_namespaces)
310
+ if event_filter & PARSE_EVENT_FILTER_START_NS:
311
+ for prefix_uri_tuple in declared_namespaces:
312
+ context.events_iterator._events.append(("start-ns", prefix_uri_tuple))
313
+ else:
314
+ declared_namespaces = None
315
+
316
+ context._origSaxStart(c_ctxt, c_localname, c_prefix, c_namespace,
317
+ c_nb_namespaces, c_namespaces, c_nb_attributes,
318
+ c_nb_defaulted, c_attributes)
319
+ if c_ctxt.html:
320
+ _fixHtmlDictNodeNames(c_ctxt.dict, c_ctxt.node)
321
+ # The HTML parser in libxml2 reports the missing opening tags when it finds
322
+ # misplaced ones, but with tag names from C string constants that ignore the
323
+ # parser dict. Thus, we need to intern the name ourselves.
324
+ c_localname = tree.xmlDictLookup(c_ctxt.dict, c_localname, -1)
325
+ if c_localname is NULL:
326
+ raise MemoryError()
327
+
328
+ if event_filter & PARSE_EVENT_FILTER_END_NS:
329
+ context._ns_stack.append(declared_namespaces)
330
+ if event_filter & (PARSE_EVENT_FILTER_END |
331
+ PARSE_EVENT_FILTER_START):
332
+ _pushSaxStartEvent(context, c_ctxt, c_namespace, c_localname, None)
333
+ except:
334
+ context._handleSaxException(c_ctxt)
335
+ finally:
336
+ return # swallow any further exceptions
337
+
338
+
339
+ cdef void _handleSaxTargetStart(
340
+ void* ctxt, const_xmlChar* c_localname, const_xmlChar* c_prefix,
341
+ const_xmlChar* c_namespace, int c_nb_namespaces,
342
+ const_xmlChar** c_namespaces,
343
+ int c_nb_attributes, int c_nb_defaulted,
344
+ const_xmlChar** c_attributes) noexcept with gil:
345
+ cdef int i
346
+ cdef size_t c_len
347
+ c_ctxt = <xmlparser.xmlParserCtxt*>ctxt
348
+ if c_ctxt._private is NULL or c_ctxt.disableSAX:
349
+ return
350
+ context = <_SaxParserContext>c_ctxt._private
351
+
352
+ cdef int event_filter = context._event_filter
353
+ cdef int sax_event_filter = context._target._sax_event_filter
354
+ try:
355
+ if c_nb_namespaces:
356
+ declared_namespaces = _build_prefix_uri_list(
357
+ context, c_nb_namespaces, c_namespaces)
358
+
359
+ if event_filter & PARSE_EVENT_FILTER_START_NS:
360
+ for prefix_uri_tuple in declared_namespaces:
361
+ context.events_iterator._events.append(("start-ns", prefix_uri_tuple))
362
+
363
+ if sax_event_filter & SAX_EVENT_START_NS:
364
+ for prefix, uri in declared_namespaces:
365
+ context._target._handleSaxStartNs(prefix, uri)
366
+ else:
367
+ declared_namespaces = None
368
+
369
+ if sax_event_filter & SAX_EVENT_START:
370
+ if c_nb_defaulted > 0:
371
+ # only add default attributes if we asked for them
372
+ if c_ctxt.loadsubset & xmlparser.XML_COMPLETE_ATTRS == 0:
373
+ c_nb_attributes -= c_nb_defaulted
374
+ if c_nb_attributes == 0:
375
+ attrib = IMMUTABLE_EMPTY_MAPPING
376
+ else:
377
+ attrib = {}
378
+ for i in xrange(c_nb_attributes):
379
+ name = _namespacedNameFromNsName(
380
+ c_attributes[2], c_attributes[0])
381
+ if c_attributes[3] is NULL:
382
+ value = ''
383
+ else:
384
+ c_len = c_attributes[4] - c_attributes[3]
385
+ value = c_attributes[3][:c_len].decode('utf8')
386
+ attrib[name] = value
387
+ c_attributes += 5
388
+
389
+ nsmap = dict(declared_namespaces) if c_nb_namespaces else IMMUTABLE_EMPTY_MAPPING
390
+
391
+ element = _callTargetSaxStart(
392
+ context, c_ctxt,
393
+ _namespacedNameFromNsName(c_namespace, c_localname),
394
+ attrib, nsmap)
395
+ else:
396
+ element = None
397
+
398
+ if (event_filter & PARSE_EVENT_FILTER_END_NS or
399
+ sax_event_filter & SAX_EVENT_END_NS):
400
+ context._ns_stack.append(declared_namespaces)
401
+ if event_filter & (PARSE_EVENT_FILTER_END |
402
+ PARSE_EVENT_FILTER_START):
403
+ _pushSaxStartEvent(context, c_ctxt, c_namespace,
404
+ c_localname, element)
405
+ except:
406
+ context._handleSaxException(c_ctxt)
407
+ finally:
408
+ return # swallow any further exceptions
409
+
410
+
411
+ cdef void _handleSaxStartNoNs(void* ctxt, const_xmlChar* c_name,
412
+ const_xmlChar** c_attributes) noexcept with gil:
413
+ c_ctxt = <xmlparser.xmlParserCtxt*>ctxt
414
+ if c_ctxt._private is NULL or c_ctxt.disableSAX:
415
+ return
416
+ context = <_SaxParserContext>c_ctxt._private
417
+ try:
418
+ context._origSaxStartNoNs(c_ctxt, c_name, c_attributes)
419
+ if c_ctxt.html:
420
+ _fixHtmlDictNodeNames(c_ctxt.dict, c_ctxt.node)
421
+ # The HTML parser in libxml2 reports the missing opening tags when it finds
422
+ # misplaced ones, but with tag names from C string constants that ignore the
423
+ # parser dict. Thus, we need to intern the name ourselves.
424
+ c_name = tree.xmlDictLookup(c_ctxt.dict, c_name, -1)
425
+ if c_name is NULL:
426
+ raise MemoryError()
427
+ if context._event_filter & (PARSE_EVENT_FILTER_END |
428
+ PARSE_EVENT_FILTER_START):
429
+ _pushSaxStartEvent(context, c_ctxt, NULL, c_name, None)
430
+ except:
431
+ context._handleSaxException(c_ctxt)
432
+ finally:
433
+ return # swallow any further exceptions
434
+
435
+
436
+ cdef void _handleSaxTargetStartNoNs(void* ctxt, const_xmlChar* c_name,
437
+ const_xmlChar** c_attributes) noexcept with gil:
438
+ c_ctxt = <xmlparser.xmlParserCtxt*>ctxt
439
+ if c_ctxt._private is NULL or c_ctxt.disableSAX:
440
+ return
441
+ context = <_SaxParserContext>c_ctxt._private
442
+ try:
443
+ if c_attributes is NULL:
444
+ attrib = IMMUTABLE_EMPTY_MAPPING
445
+ else:
446
+ attrib = {}
447
+ while c_attributes[0] is not NULL:
448
+ name = funicode(c_attributes[0])
449
+ attrib[name] = funicodeOrEmpty(c_attributes[1])
450
+ c_attributes += 2
451
+ element = _callTargetSaxStart(
452
+ context, c_ctxt, funicode(c_name),
453
+ attrib, IMMUTABLE_EMPTY_MAPPING)
454
+ if context._event_filter & (PARSE_EVENT_FILTER_END |
455
+ PARSE_EVENT_FILTER_START):
456
+ _pushSaxStartEvent(context, c_ctxt, NULL, c_name, element)
457
+ except:
458
+ context._handleSaxException(c_ctxt)
459
+ finally:
460
+ return # swallow any further exceptions
461
+
462
+
463
+ cdef _callTargetSaxStart(_SaxParserContext context,
464
+ xmlparser.xmlParserCtxt* c_ctxt,
465
+ tag, attrib, nsmap):
466
+ element = context._target._handleSaxStart(tag, attrib, nsmap)
467
+ if element is not None and c_ctxt.input is not NULL:
468
+ if isinstance(element, _Element):
469
+ (<_Element>element)._c_node.line = (
470
+ <unsigned short>c_ctxt.input.line
471
+ if c_ctxt.input.line < 65535 else 65535)
472
+ return element
473
+
474
+
475
+ cdef int _pushSaxStartEvent(_SaxParserContext context,
476
+ xmlparser.xmlParserCtxt* c_ctxt,
477
+ const_xmlChar* c_href,
478
+ const_xmlChar* c_name, node) except -1:
479
+ if (context._matcher is None or
480
+ context._matcher.matchesNsTag(c_href, c_name)):
481
+ if node is None and context._target is None:
482
+ assert context._doc is not None
483
+ node = _elementFactory(context._doc, c_ctxt.node)
484
+ if context._event_filter & PARSE_EVENT_FILTER_START:
485
+ context.events_iterator._events.append(('start', node))
486
+ if (context._target is None and
487
+ context._event_filter & PARSE_EVENT_FILTER_END):
488
+ context._node_stack.append(node)
489
+ return 0
490
+
491
+
492
+ cdef void _handleSaxEnd(void* ctxt, const_xmlChar* c_localname,
493
+ const_xmlChar* c_prefix,
494
+ const_xmlChar* c_namespace) noexcept with gil:
495
+ c_ctxt = <xmlparser.xmlParserCtxt*>ctxt
496
+ if c_ctxt._private is NULL or c_ctxt.disableSAX:
497
+ return
498
+ context = <_SaxParserContext>c_ctxt._private
499
+ try:
500
+ if context._target is not None:
501
+ if context._target._sax_event_filter & SAX_EVENT_END:
502
+ node = context._target._handleSaxEnd(
503
+ _namespacedNameFromNsName(c_namespace, c_localname))
504
+ else:
505
+ node = None
506
+ else:
507
+ context._origSaxEnd(c_ctxt, c_localname, c_prefix, c_namespace)
508
+ node = None
509
+ _pushSaxEndEvent(context, c_namespace, c_localname, node)
510
+ _pushSaxNsEndEvents(context)
511
+ except:
512
+ context._handleSaxException(c_ctxt)
513
+ finally:
514
+ return # swallow any further exceptions
515
+
516
+
517
+ cdef void _handleSaxEndNoNs(void* ctxt, const_xmlChar* c_name) noexcept with gil:
518
+ c_ctxt = <xmlparser.xmlParserCtxt*>ctxt
519
+ if c_ctxt._private is NULL or c_ctxt.disableSAX:
520
+ return
521
+ context = <_SaxParserContext>c_ctxt._private
522
+ try:
523
+ if context._target is not None:
524
+ node = context._target._handleSaxEnd(funicode(c_name))
525
+ else:
526
+ context._origSaxEndNoNs(c_ctxt, c_name)
527
+ node = None
528
+ _pushSaxEndEvent(context, NULL, c_name, node)
529
+ except:
530
+ context._handleSaxException(c_ctxt)
531
+ finally:
532
+ return # swallow any further exceptions
533
+
534
+
535
+ cdef int _pushSaxNsEndEvents(_SaxParserContext context) except -1:
536
+ cdef bint build_events = context._event_filter & PARSE_EVENT_FILTER_END_NS
537
+ cdef bint call_target = (
538
+ context._target is not None
539
+ and context._target._sax_event_filter & SAX_EVENT_END_NS)
540
+ if not build_events and not call_target:
541
+ return 0
542
+
543
+ cdef list declared_namespaces = context._ns_stack.pop()
544
+ if declared_namespaces is None:
545
+ return 0
546
+
547
+ cdef tuple prefix_uri
548
+ for prefix_uri in reversed(declared_namespaces):
549
+ if call_target:
550
+ context._target._handleSaxEndNs(prefix_uri[0])
551
+ if build_events:
552
+ context.events_iterator._events.append(('end-ns', None))
553
+
554
+ return 0
555
+
556
+
557
+ cdef int _pushSaxEndEvent(_SaxParserContext context,
558
+ const_xmlChar* c_href,
559
+ const_xmlChar* c_name, node) except -1:
560
+ if context._event_filter & PARSE_EVENT_FILTER_END:
561
+ if (context._matcher is None or
562
+ context._matcher.matchesNsTag(c_href, c_name)):
563
+ if context._target is None:
564
+ node = context._node_stack.pop()
565
+ context.events_iterator._events.append(('end', node))
566
+ return 0
567
+
568
+
569
+ cdef void _handleSaxData(void* ctxt, const_xmlChar* c_data, int data_len) noexcept with gil:
570
+ # can only be called if parsing with a target
571
+ c_ctxt = <xmlparser.xmlParserCtxt*>ctxt
572
+ if c_ctxt._private is NULL or c_ctxt.disableSAX:
573
+ return
574
+ context = <_SaxParserContext>c_ctxt._private
575
+ try:
576
+ context._target._handleSaxData(
577
+ c_data[:data_len].decode('utf8'))
578
+ except:
579
+ context._handleSaxException(c_ctxt)
580
+ finally:
581
+ return # swallow any further exceptions
582
+
583
+
584
+ cdef void _handleSaxTargetDoctype(void* ctxt, const_xmlChar* c_name,
585
+ const_xmlChar* c_public,
586
+ const_xmlChar* c_system) noexcept with gil:
587
+ # can only be called if parsing with a target
588
+ c_ctxt = <xmlparser.xmlParserCtxt*>ctxt
589
+ if c_ctxt._private is NULL or c_ctxt.disableSAX:
590
+ return
591
+ context = <_SaxParserContext>c_ctxt._private
592
+ try:
593
+ context._target._handleSaxDoctype(
594
+ funicodeOrNone(c_name),
595
+ funicodeOrNone(c_public),
596
+ funicodeOrNone(c_system))
597
+ except:
598
+ context._handleSaxException(c_ctxt)
599
+ finally:
600
+ return # swallow any further exceptions
601
+
602
+
603
+ cdef void _handleSaxStartDocument(void* ctxt) noexcept with gil:
604
+ c_ctxt = <xmlparser.xmlParserCtxt*>ctxt
605
+ if c_ctxt._private is NULL or c_ctxt.disableSAX:
606
+ return
607
+ context = <_SaxParserContext>c_ctxt._private
608
+ context._origSaxStartDocument(ctxt)
609
+ c_doc = c_ctxt.myDoc
610
+ try:
611
+ context.startDocument(c_doc)
612
+ except:
613
+ context._handleSaxException(c_ctxt)
614
+ finally:
615
+ return # swallow any further exceptions
616
+
617
+
618
+ cdef void _handleSaxTargetPI(void* ctxt, const_xmlChar* c_target,
619
+ const_xmlChar* c_data) noexcept with gil:
620
+ # can only be called if parsing with a target
621
+ c_ctxt = <xmlparser.xmlParserCtxt*>ctxt
622
+ if c_ctxt._private is NULL or c_ctxt.disableSAX:
623
+ return
624
+ context = <_SaxParserContext>c_ctxt._private
625
+ try:
626
+ pi = context._target._handleSaxPi(
627
+ funicodeOrNone(c_target),
628
+ funicodeOrEmpty(c_data))
629
+ if context._event_filter & PARSE_EVENT_FILTER_PI:
630
+ context.events_iterator._events.append(('pi', pi))
631
+ except:
632
+ context._handleSaxException(c_ctxt)
633
+ finally:
634
+ return # swallow any further exceptions
635
+
636
+
637
+ cdef void _handleSaxPIEvent(void* ctxt, const_xmlChar* target,
638
+ const_xmlChar* data) noexcept with gil:
639
+ # can only be called when collecting pi events
640
+ c_ctxt = <xmlparser.xmlParserCtxt*>ctxt
641
+ if c_ctxt._private is NULL or c_ctxt.disableSAX:
642
+ return
643
+ context = <_SaxParserContext>c_ctxt._private
644
+ context._origSaxPI(ctxt, target, data)
645
+ c_node = _findLastEventNode(c_ctxt)
646
+ if c_node is NULL:
647
+ return
648
+ try:
649
+ context.pushEvent('pi', c_node)
650
+ except:
651
+ context._handleSaxException(c_ctxt)
652
+ finally:
653
+ return # swallow any further exceptions
654
+
655
+
656
+ cdef void _handleSaxTargetComment(void* ctxt, const_xmlChar* c_data) noexcept with gil:
657
+ # can only be called if parsing with a target
658
+ c_ctxt = <xmlparser.xmlParserCtxt*>ctxt
659
+ if c_ctxt._private is NULL or c_ctxt.disableSAX:
660
+ return
661
+ context = <_SaxParserContext>c_ctxt._private
662
+ try:
663
+ comment = context._target._handleSaxComment(funicodeOrEmpty(c_data))
664
+ if context._event_filter & PARSE_EVENT_FILTER_COMMENT:
665
+ context.events_iterator._events.append(('comment', comment))
666
+ except:
667
+ context._handleSaxException(c_ctxt)
668
+ finally:
669
+ return # swallow any further exceptions
670
+
671
+
672
+ cdef void _handleSaxComment(void* ctxt, const_xmlChar* text) noexcept with gil:
673
+ # can only be called when collecting comment events
674
+ c_ctxt = <xmlparser.xmlParserCtxt*>ctxt
675
+ if c_ctxt._private is NULL or c_ctxt.disableSAX:
676
+ return
677
+ context = <_SaxParserContext>c_ctxt._private
678
+ context._origSaxComment(ctxt, text)
679
+ c_node = _findLastEventNode(c_ctxt)
680
+ if c_node is NULL:
681
+ return
682
+ try:
683
+ context.pushEvent('comment', c_node)
684
+ except:
685
+ context._handleSaxException(c_ctxt)
686
+ finally:
687
+ return # swallow any further exceptions
688
+
689
+
690
+ cdef inline xmlNode* _findLastEventNode(xmlparser.xmlParserCtxt* c_ctxt):
691
+ # this mimics what libxml2 creates for comments/PIs
692
+ if c_ctxt.inSubset == 1:
693
+ return c_ctxt.myDoc.intSubset.last
694
+ elif c_ctxt.inSubset == 2:
695
+ return c_ctxt.myDoc.extSubset.last
696
+ elif c_ctxt.node is NULL:
697
+ return c_ctxt.myDoc.last
698
+ elif c_ctxt.node.type == tree.XML_ELEMENT_NODE:
699
+ return c_ctxt.node.last
700
+ else:
701
+ return c_ctxt.node.next
702
+
703
+
704
+ ############################################################
705
+ ## ET compatible XML tree builder
706
+ ############################################################
707
+
708
+ cdef class TreeBuilder(_SaxParserTarget):
709
+ """TreeBuilder(self, element_factory=None, parser=None,
710
+ comment_factory=None, pi_factory=None,
711
+ insert_comments=True, insert_pis=True)
712
+
713
+ Parser target that builds a tree from parse event callbacks.
714
+
715
+ The factory arguments can be used to influence the creation of
716
+ elements, comments and processing instructions.
717
+
718
+ By default, comments and processing instructions are inserted into
719
+ the tree, but they can be ignored by passing the respective flags.
720
+
721
+ The final tree is returned by the ``close()`` method.
722
+ """
723
+ cdef _BaseParser _parser
724
+ cdef object _factory
725
+ cdef object _comment_factory
726
+ cdef object _pi_factory
727
+ cdef list _data
728
+ cdef list _element_stack
729
+ cdef object _element_stack_pop
730
+ cdef _Element _last # may be None
731
+ cdef bint _in_tail
732
+ cdef bint _insert_comments
733
+ cdef bint _insert_pis
734
+
735
+ def __init__(self, *, element_factory=None, parser=None,
736
+ comment_factory=None, pi_factory=None,
737
+ bint insert_comments=True, bint insert_pis=True):
738
+ self._sax_event_filter = \
739
+ SAX_EVENT_START | SAX_EVENT_END | SAX_EVENT_DATA | \
740
+ SAX_EVENT_PI | SAX_EVENT_COMMENT
741
+ self._data = [] # data collector
742
+ self._element_stack = [] # element stack
743
+ self._element_stack_pop = self._element_stack.pop
744
+ self._last = None # last element
745
+ self._in_tail = 0 # true if we're after an end tag
746
+ self._factory = element_factory
747
+ self._comment_factory = comment_factory if comment_factory is not None else Comment
748
+ self._pi_factory = pi_factory if pi_factory is not None else ProcessingInstruction
749
+ self._insert_comments = insert_comments
750
+ self._insert_pis = insert_pis
751
+ self._parser = parser
752
+
753
+ @cython.final
754
+ cdef int _flush(self) except -1:
755
+ if self._data:
756
+ if self._last is not None:
757
+ text = "".join(self._data)
758
+ if self._in_tail:
759
+ assert self._last.tail is None, "internal error (tail)"
760
+ self._last.tail = text
761
+ else:
762
+ assert self._last.text is None, "internal error (text)"
763
+ self._last.text = text
764
+ del self._data[:]
765
+ return 0
766
+
767
+ # internal SAX event handlers
768
+
769
+ @cython.final
770
+ cdef _handleSaxStart(self, tag, attrib, nsmap):
771
+ self._flush()
772
+ if self._factory is not None:
773
+ self._last = self._factory(tag, attrib)
774
+ if self._element_stack:
775
+ _appendChild(self._element_stack[-1], self._last)
776
+ elif self._element_stack:
777
+ self._last = _makeSubElement(
778
+ self._element_stack[-1], tag, None, None, attrib, nsmap, None)
779
+ else:
780
+ self._last = _makeElement(
781
+ tag, NULL, None, self._parser, None, None, attrib, nsmap, None)
782
+ self._element_stack.append(self._last)
783
+ self._in_tail = 0
784
+ return self._last
785
+
786
+ @cython.final
787
+ cdef _handleSaxEnd(self, tag):
788
+ self._flush()
789
+ self._last = self._element_stack_pop()
790
+ self._in_tail = 1
791
+ return self._last
792
+
793
+ @cython.final
794
+ cdef int _handleSaxData(self, data) except -1:
795
+ self._data.append(data)
796
+
797
+ @cython.final
798
+ cdef _handleSaxPi(self, target, data):
799
+ elem = self._pi_factory(target, data)
800
+ if self._insert_pis:
801
+ self._flush()
802
+ self._last = elem
803
+ if self._element_stack:
804
+ _appendChild(self._element_stack[-1], self._last)
805
+ self._in_tail = 1
806
+ return self._last
807
+
808
+ @cython.final
809
+ cdef _handleSaxComment(self, comment):
810
+ elem = self._comment_factory(comment)
811
+ if self._insert_comments:
812
+ self._flush()
813
+ self._last = elem
814
+ if self._element_stack:
815
+ _appendChild(self._element_stack[-1], self._last)
816
+ self._in_tail = 1
817
+ return elem
818
+
819
+ # Python level event handlers
820
+
821
+ def close(self):
822
+ """close(self)
823
+
824
+ Flushes the builder buffers, and returns the toplevel document
825
+ element. Raises XMLSyntaxError on inconsistencies.
826
+ """
827
+ if self._element_stack:
828
+ raise XMLSyntaxAssertionError("missing end tags")
829
+ # TODO: this does not necessarily seem like an error case. Why not just return None?
830
+ if self._last is None:
831
+ raise XMLSyntaxAssertionError("missing toplevel element")
832
+ return self._last
833
+
834
+ def data(self, data):
835
+ """data(self, data)
836
+
837
+ Adds text to the current element. The value should be either an
838
+ 8-bit string containing ASCII text, or a Unicode string.
839
+ """
840
+ self._handleSaxData(data)
841
+
842
+ def start(self, tag, attrs, nsmap=None):
843
+ """start(self, tag, attrs, nsmap=None)
844
+
845
+ Opens a new element.
846
+ """
847
+ if nsmap is None:
848
+ nsmap = IMMUTABLE_EMPTY_MAPPING
849
+ return self._handleSaxStart(tag, attrs, nsmap)
850
+
851
+ def end(self, tag):
852
+ """end(self, tag)
853
+
854
+ Closes the current element.
855
+ """
856
+ element = self._handleSaxEnd(tag)
857
+ assert self._last.tag == tag,\
858
+ f"end tag mismatch (expected {self._last.tag}, got {tag})"
859
+ return element
860
+
861
+ def pi(self, target, data=None):
862
+ """pi(self, target, data=None)
863
+
864
+ Creates a processing instruction using the factory, appends it
865
+ (unless disabled) and returns it.
866
+ """
867
+ return self._handleSaxPi(target, data)
868
+
869
+ def comment(self, comment):
870
+ """comment(self, comment)
871
+
872
+ Creates a comment using the factory, appends it (unless disabled)
873
+ and returns it.
874
+ """
875
+ return self._handleSaxComment(comment)
llmeval-env/lib/python3.10/site-packages/lxml/serializer.pxi ADDED
@@ -0,0 +1,1871 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # XML serialization and output functions
2
+
3
+ cdef object GzipFile
4
+ from gzip import GzipFile
5
+
6
+
7
+ cdef class SerialisationError(LxmlError):
8
+ """A libxml2 error that occurred during serialisation.
9
+ """
10
+
11
+
12
+ cdef enum _OutputMethods:
13
+ OUTPUT_METHOD_XML
14
+ OUTPUT_METHOD_HTML
15
+ OUTPUT_METHOD_TEXT
16
+
17
+
18
+ cdef int _findOutputMethod(method) except -1:
19
+ if method is None:
20
+ return OUTPUT_METHOD_XML
21
+ method = method.lower()
22
+ if method == "xml":
23
+ return OUTPUT_METHOD_XML
24
+ if method == "html":
25
+ return OUTPUT_METHOD_HTML
26
+ if method == "text":
27
+ return OUTPUT_METHOD_TEXT
28
+ raise ValueError(f"unknown output method {method!r}")
29
+
30
+
31
+ cdef _textToString(xmlNode* c_node, encoding, bint with_tail):
32
+ cdef bint needs_conversion
33
+ cdef const_xmlChar* c_text
34
+ cdef xmlNode* c_text_node
35
+ cdef tree.xmlBuffer* c_buffer
36
+ cdef int error_result
37
+
38
+ c_buffer = tree.xmlBufferCreate()
39
+ if c_buffer is NULL:
40
+ raise MemoryError()
41
+
42
+ with nogil:
43
+ error_result = tree.xmlNodeBufGetContent(c_buffer, c_node)
44
+ if with_tail:
45
+ c_text_node = _textNodeOrSkip(c_node.next)
46
+ while c_text_node is not NULL:
47
+ tree.xmlBufferWriteChar(c_buffer, <const_char*>c_text_node.content)
48
+ c_text_node = _textNodeOrSkip(c_text_node.next)
49
+ c_text = tree.xmlBufferContent(c_buffer)
50
+
51
+ if error_result < 0 or c_text is NULL:
52
+ tree.xmlBufferFree(c_buffer)
53
+ raise SerialisationError, "Error during serialisation (out of memory?)"
54
+
55
+ try:
56
+ needs_conversion = 0
57
+ if encoding is unicode:
58
+ needs_conversion = 1
59
+ elif encoding is not None:
60
+ # Python prefers lower case encoding names
61
+ encoding = encoding.lower()
62
+ if encoding not in ('utf8', 'utf-8'):
63
+ if encoding == 'ascii':
64
+ if isutf8l(c_text, tree.xmlBufferLength(c_buffer)):
65
+ # will raise a decode error below
66
+ needs_conversion = 1
67
+ else:
68
+ needs_conversion = 1
69
+
70
+ if needs_conversion:
71
+ text = (<const_char*>c_text)[:tree.xmlBufferLength(c_buffer)].decode('utf8')
72
+ if encoding is not unicode:
73
+ encoding = _utf8(encoding)
74
+ text = python.PyUnicode_AsEncodedString(
75
+ text, encoding, 'strict')
76
+ else:
77
+ text = (<unsigned char*>c_text)[:tree.xmlBufferLength(c_buffer)]
78
+ finally:
79
+ tree.xmlBufferFree(c_buffer)
80
+ return text
81
+
82
+
83
+ cdef _tostring(_Element element, encoding, doctype, method,
84
+ bint write_xml_declaration, bint write_complete_document,
85
+ bint pretty_print, bint with_tail, int standalone):
86
+ """Serialize an element to an encoded string representation of its XML
87
+ tree.
88
+ """
89
+ cdef tree.xmlOutputBuffer* c_buffer
90
+ cdef tree.xmlBuf* c_result_buffer
91
+ cdef tree.xmlCharEncodingHandler* enchandler
92
+ cdef const_char* c_enc
93
+ cdef const_xmlChar* c_version
94
+ cdef const_xmlChar* c_doctype
95
+ cdef int c_method
96
+ cdef int error_result
97
+ if element is None:
98
+ return None
99
+ _assertValidNode(element)
100
+ c_method = _findOutputMethod(method)
101
+ if c_method == OUTPUT_METHOD_TEXT:
102
+ return _textToString(element._c_node, encoding, with_tail)
103
+ if encoding is None or encoding is unicode:
104
+ c_enc = NULL
105
+ else:
106
+ encoding = _utf8(encoding)
107
+ c_enc = _cstr(encoding)
108
+ if doctype is None:
109
+ c_doctype = NULL
110
+ else:
111
+ doctype = _utf8(doctype)
112
+ c_doctype = _xcstr(doctype)
113
+ # it is necessary to *and* find the encoding handler *and* use
114
+ # encoding during output
115
+ enchandler = tree.xmlFindCharEncodingHandler(c_enc)
116
+ if enchandler is NULL and c_enc is not NULL:
117
+ if encoding is not None:
118
+ encoding = encoding.decode('UTF-8')
119
+ raise LookupError, f"unknown encoding: '{encoding}'"
120
+ c_buffer = tree.xmlAllocOutputBuffer(enchandler)
121
+ if c_buffer is NULL:
122
+ tree.xmlCharEncCloseFunc(enchandler)
123
+ raise MemoryError()
124
+
125
+ with nogil:
126
+ _writeNodeToBuffer(c_buffer, element._c_node, c_enc, c_doctype, c_method,
127
+ write_xml_declaration, write_complete_document,
128
+ pretty_print, with_tail, standalone)
129
+ tree.xmlOutputBufferFlush(c_buffer)
130
+ if c_buffer.conv is not NULL:
131
+ c_result_buffer = c_buffer.conv
132
+ else:
133
+ c_result_buffer = c_buffer.buffer
134
+
135
+ error_result = c_buffer.error
136
+ if error_result != xmlerror.XML_ERR_OK:
137
+ tree.xmlOutputBufferClose(c_buffer)
138
+ _raiseSerialisationError(error_result)
139
+
140
+ try:
141
+ if encoding is unicode:
142
+ result = (<unsigned char*>tree.xmlBufContent(
143
+ c_result_buffer))[:tree.xmlBufUse(c_result_buffer)].decode('UTF-8')
144
+ else:
145
+ result = <bytes>(<unsigned char*>tree.xmlBufContent(
146
+ c_result_buffer))[:tree.xmlBufUse(c_result_buffer)]
147
+ finally:
148
+ error_result = tree.xmlOutputBufferClose(c_buffer)
149
+ if error_result == -1:
150
+ _raiseSerialisationError(error_result)
151
+ return result
152
+
153
+ cdef bytes _tostringC14N(element_or_tree, bint exclusive, bint with_comments, inclusive_ns_prefixes):
154
+ cdef xmlDoc* c_doc
155
+ cdef xmlChar* c_buffer = NULL
156
+ cdef int byte_count = -1
157
+ cdef bytes result
158
+ cdef _Document doc
159
+ cdef _Element element
160
+ cdef xmlChar **c_inclusive_ns_prefixes
161
+
162
+ if isinstance(element_or_tree, _Element):
163
+ _assertValidNode(<_Element>element_or_tree)
164
+ doc = (<_Element>element_or_tree)._doc
165
+ c_doc = _plainFakeRootDoc(doc._c_doc, (<_Element>element_or_tree)._c_node, 0)
166
+ else:
167
+ doc = _documentOrRaise(element_or_tree)
168
+ _assertValidDoc(doc)
169
+ c_doc = doc._c_doc
170
+
171
+ c_inclusive_ns_prefixes = _convert_ns_prefixes(c_doc.dict, inclusive_ns_prefixes) if inclusive_ns_prefixes else NULL
172
+ try:
173
+ with nogil:
174
+ byte_count = c14n.xmlC14NDocDumpMemory(
175
+ c_doc, NULL, exclusive, c_inclusive_ns_prefixes, with_comments, &c_buffer)
176
+
177
+ finally:
178
+ _destroyFakeDoc(doc._c_doc, c_doc)
179
+ if c_inclusive_ns_prefixes is not NULL:
180
+ python.lxml_free(c_inclusive_ns_prefixes)
181
+
182
+ if byte_count < 0 or c_buffer is NULL:
183
+ if c_buffer is not NULL:
184
+ tree.xmlFree(c_buffer)
185
+ raise C14NError, "C14N failed"
186
+ try:
187
+ result = c_buffer[:byte_count]
188
+ finally:
189
+ tree.xmlFree(c_buffer)
190
+ return result
191
+
192
+ cdef _raiseSerialisationError(int error_result):
193
+ if error_result == xmlerror.XML_ERR_NO_MEMORY:
194
+ raise MemoryError()
195
+ message = ErrorTypes._getName(error_result)
196
+ if message is None:
197
+ message = f"unknown error {error_result}"
198
+ raise SerialisationError, message
199
+
200
+ ############################################################
201
+ # low-level serialisation functions
202
+
203
+ cdef void _writeDoctype(tree.xmlOutputBuffer* c_buffer,
204
+ const_xmlChar* c_doctype) noexcept nogil:
205
+ tree.xmlOutputBufferWrite(c_buffer, tree.xmlStrlen(c_doctype),
206
+ <const_char*>c_doctype)
207
+ tree.xmlOutputBufferWriteString(c_buffer, "\n")
208
+
209
+ cdef void _writeNodeToBuffer(tree.xmlOutputBuffer* c_buffer,
210
+ xmlNode* c_node, const_char* encoding, const_xmlChar* c_doctype,
211
+ int c_method, bint write_xml_declaration,
212
+ bint write_complete_document,
213
+ bint pretty_print, bint with_tail,
214
+ int standalone) noexcept nogil:
215
+ cdef xmlNode* c_nsdecl_node
216
+ cdef xmlDoc* c_doc = c_node.doc
217
+ if write_xml_declaration and c_method == OUTPUT_METHOD_XML:
218
+ _writeDeclarationToBuffer(c_buffer, c_doc.version, encoding, standalone)
219
+
220
+ # comments/processing instructions before doctype declaration
221
+ if write_complete_document and not c_buffer.error and c_doc.intSubset:
222
+ _writePrevSiblings(c_buffer, <xmlNode*>c_doc.intSubset, encoding, pretty_print)
223
+
224
+ if c_doctype:
225
+ _writeDoctype(c_buffer, c_doctype)
226
+ # write internal DTD subset, preceding PIs/comments, etc.
227
+ if write_complete_document and not c_buffer.error:
228
+ if c_doctype is NULL:
229
+ _writeDtdToBuffer(c_buffer, c_doc, c_node.name, c_method, encoding)
230
+ _writePrevSiblings(c_buffer, c_node, encoding, pretty_print)
231
+
232
+ c_nsdecl_node = c_node
233
+ if not c_node.parent or c_node.parent.type != tree.XML_DOCUMENT_NODE:
234
+ # copy the node and add namespaces from parents
235
+ # this is required to make libxml write them
236
+ c_nsdecl_node = tree.xmlCopyNode(c_node, 2)
237
+ if not c_nsdecl_node:
238
+ c_buffer.error = xmlerror.XML_ERR_NO_MEMORY
239
+ return
240
+ _copyParentNamespaces(c_node, c_nsdecl_node)
241
+
242
+ c_nsdecl_node.parent = c_node.parent
243
+ c_nsdecl_node.children = c_node.children
244
+ c_nsdecl_node.last = c_node.last
245
+
246
+ # write node
247
+ if c_method == OUTPUT_METHOD_HTML:
248
+ tree.htmlNodeDumpFormatOutput(
249
+ c_buffer, c_doc, c_nsdecl_node, encoding, pretty_print)
250
+ else:
251
+ tree.xmlNodeDumpOutput(
252
+ c_buffer, c_doc, c_nsdecl_node, 0, pretty_print, encoding)
253
+
254
+ if c_nsdecl_node is not c_node:
255
+ # clean up
256
+ c_nsdecl_node.children = c_nsdecl_node.last = NULL
257
+ tree.xmlFreeNode(c_nsdecl_node)
258
+
259
+ if c_buffer.error:
260
+ return
261
+
262
+ # write tail, trailing comments, etc.
263
+ if with_tail:
264
+ _writeTail(c_buffer, c_node, encoding, c_method, pretty_print)
265
+ if write_complete_document:
266
+ _writeNextSiblings(c_buffer, c_node, encoding, pretty_print)
267
+ if pretty_print:
268
+ tree.xmlOutputBufferWrite(c_buffer, 1, "\n")
269
+
270
+ cdef void _writeDeclarationToBuffer(tree.xmlOutputBuffer* c_buffer,
271
+ const_xmlChar* version, const_char* encoding,
272
+ int standalone) noexcept nogil:
273
+ if version is NULL:
274
+ version = <unsigned char*>"1.0"
275
+ tree.xmlOutputBufferWrite(c_buffer, 15, "<?xml version='")
276
+ tree.xmlOutputBufferWriteString(c_buffer, <const_char*>version)
277
+ tree.xmlOutputBufferWrite(c_buffer, 12, "' encoding='")
278
+ tree.xmlOutputBufferWriteString(c_buffer, encoding)
279
+ if standalone == 0:
280
+ tree.xmlOutputBufferWrite(c_buffer, 20, "' standalone='no'?>\n")
281
+ elif standalone == 1:
282
+ tree.xmlOutputBufferWrite(c_buffer, 21, "' standalone='yes'?>\n")
283
+ else:
284
+ tree.xmlOutputBufferWrite(c_buffer, 4, "'?>\n")
285
+
286
+ cdef void _writeDtdToBuffer(tree.xmlOutputBuffer* c_buffer,
287
+ xmlDoc* c_doc, const_xmlChar* c_root_name,
288
+ int c_method, const_char* encoding) noexcept nogil:
289
+ cdef tree.xmlDtd* c_dtd
290
+ cdef xmlNode* c_node
291
+ cdef char* quotechar
292
+ c_dtd = c_doc.intSubset
293
+ if not c_dtd or not c_dtd.name:
294
+ return
295
+
296
+ # Name in document type declaration must match the root element tag.
297
+ # For XML, case sensitive match, for HTML insensitive.
298
+ if c_method == OUTPUT_METHOD_HTML:
299
+ if tree.xmlStrcasecmp(c_root_name, c_dtd.name) != 0:
300
+ return
301
+ else:
302
+ if tree.xmlStrcmp(c_root_name, c_dtd.name) != 0:
303
+ return
304
+
305
+ tree.xmlOutputBufferWrite(c_buffer, 10, "<!DOCTYPE ")
306
+ tree.xmlOutputBufferWriteString(c_buffer, <const_char*>c_dtd.name)
307
+
308
+ cdef const_xmlChar* public_id = c_dtd.ExternalID
309
+ cdef const_xmlChar* sys_url = c_dtd.SystemID
310
+ if public_id and public_id[0] == b'\0':
311
+ public_id = NULL
312
+ if sys_url and sys_url[0] == b'\0':
313
+ sys_url = NULL
314
+
315
+ if public_id:
316
+ tree.xmlOutputBufferWrite(c_buffer, 9, ' PUBLIC "')
317
+ tree.xmlOutputBufferWriteString(c_buffer, <const_char*>public_id)
318
+ if sys_url:
319
+ tree.xmlOutputBufferWrite(c_buffer, 2, '" ')
320
+ else:
321
+ tree.xmlOutputBufferWrite(c_buffer, 1, '"')
322
+ elif sys_url:
323
+ tree.xmlOutputBufferWrite(c_buffer, 8, ' SYSTEM ')
324
+
325
+ if sys_url:
326
+ if tree.xmlStrchr(sys_url, b'"'):
327
+ quotechar = '\''
328
+ else:
329
+ quotechar = '"'
330
+ tree.xmlOutputBufferWrite(c_buffer, 1, quotechar)
331
+ tree.xmlOutputBufferWriteString(c_buffer, <const_char*>sys_url)
332
+ tree.xmlOutputBufferWrite(c_buffer, 1, quotechar)
333
+
334
+ if (not c_dtd.entities and not c_dtd.elements and
335
+ not c_dtd.attributes and not c_dtd.notations and
336
+ not c_dtd.pentities):
337
+ tree.xmlOutputBufferWrite(c_buffer, 2, '>\n')
338
+ return
339
+
340
+ tree.xmlOutputBufferWrite(c_buffer, 3, ' [\n')
341
+ if c_dtd.notations and not c_buffer.error:
342
+ c_buf = tree.xmlBufferCreate()
343
+ if not c_buf:
344
+ c_buffer.error = xmlerror.XML_ERR_NO_MEMORY
345
+ return
346
+ tree.xmlDumpNotationTable(c_buf, <tree.xmlNotationTable*>c_dtd.notations)
347
+ tree.xmlOutputBufferWrite(
348
+ c_buffer, tree.xmlBufferLength(c_buf),
349
+ <const_char*>tree.xmlBufferContent(c_buf))
350
+ tree.xmlBufferFree(c_buf)
351
+ c_node = c_dtd.children
352
+ while c_node and not c_buffer.error:
353
+ tree.xmlNodeDumpOutput(c_buffer, c_node.doc, c_node, 0, 0, encoding)
354
+ c_node = c_node.next
355
+ tree.xmlOutputBufferWrite(c_buffer, 3, "]>\n")
356
+
357
+ cdef void _writeTail(tree.xmlOutputBuffer* c_buffer, xmlNode* c_node,
358
+ const_char* encoding, int c_method, bint pretty_print) noexcept nogil:
359
+ "Write the element tail."
360
+ c_node = c_node.next
361
+ while c_node and not c_buffer.error and c_node.type in (
362
+ tree.XML_TEXT_NODE, tree.XML_CDATA_SECTION_NODE):
363
+ if c_method == OUTPUT_METHOD_HTML:
364
+ tree.htmlNodeDumpFormatOutput(
365
+ c_buffer, c_node.doc, c_node, encoding, pretty_print)
366
+ else:
367
+ tree.xmlNodeDumpOutput(
368
+ c_buffer, c_node.doc, c_node, 0, pretty_print, encoding)
369
+ c_node = c_node.next
370
+
371
+ cdef void _writePrevSiblings(tree.xmlOutputBuffer* c_buffer, xmlNode* c_node,
372
+ const_char* encoding, bint pretty_print) noexcept nogil:
373
+ cdef xmlNode* c_sibling
374
+ if c_node.parent and _isElement(c_node.parent):
375
+ return
376
+ # we are at a root node, so add PI and comment siblings
377
+ c_sibling = c_node
378
+ while c_sibling.prev and \
379
+ (c_sibling.prev.type == tree.XML_PI_NODE or
380
+ c_sibling.prev.type == tree.XML_COMMENT_NODE):
381
+ c_sibling = c_sibling.prev
382
+ while c_sibling is not c_node and not c_buffer.error:
383
+ tree.xmlNodeDumpOutput(c_buffer, c_node.doc, c_sibling, 0,
384
+ pretty_print, encoding)
385
+ if pretty_print:
386
+ tree.xmlOutputBufferWriteString(c_buffer, "\n")
387
+ c_sibling = c_sibling.next
388
+
389
+ cdef void _writeNextSiblings(tree.xmlOutputBuffer* c_buffer, xmlNode* c_node,
390
+ const_char* encoding, bint pretty_print) noexcept nogil:
391
+ cdef xmlNode* c_sibling
392
+ if c_node.parent and _isElement(c_node.parent):
393
+ return
394
+ # we are at a root node, so add PI and comment siblings
395
+ c_sibling = c_node.next
396
+ while not c_buffer.error and c_sibling and \
397
+ (c_sibling.type == tree.XML_PI_NODE or
398
+ c_sibling.type == tree.XML_COMMENT_NODE):
399
+ if pretty_print:
400
+ tree.xmlOutputBufferWriteString(c_buffer, "\n")
401
+ tree.xmlNodeDumpOutput(c_buffer, c_node.doc, c_sibling, 0,
402
+ pretty_print, encoding)
403
+ c_sibling = c_sibling.next
404
+
405
+
406
+ # copied and adapted from libxml2
407
+ cdef unsigned char *xmlSerializeHexCharRef(unsigned char *out, int val) noexcept:
408
+ cdef xmlChar *ptr
409
+ cdef const xmlChar* hexdigits = b"0123456789ABCDEF"
410
+
411
+ out[0] = b'&'
412
+ out += 1
413
+ out[0] = b'#'
414
+ out += 1
415
+ out[0] = b'x'
416
+ out += 1
417
+
418
+ if val < 0x10:
419
+ ptr = out
420
+ elif val < 0x100:
421
+ ptr = out + 1
422
+ elif val < 0x1000:
423
+ ptr = out + 2
424
+ elif val < 0x10000:
425
+ ptr = out + 3
426
+ elif val < 0x100000:
427
+ ptr = out + 4
428
+ else:
429
+ ptr = out + 5
430
+
431
+ out = ptr + 1
432
+ while val > 0:
433
+ ptr[0] = hexdigits[val & 0xF]
434
+ ptr -= 1
435
+ val >>= 4
436
+
437
+ out[0] = b';'
438
+ out += 1
439
+ out[0] = 0
440
+
441
+ return out
442
+
443
+
444
+ # copied and adapted from libxml2 (xmlBufAttrSerializeTxtContent())
445
+ cdef _write_attr_string(tree.xmlOutputBuffer* buf, const char *string):
446
+ cdef const char *base
447
+ cdef const char *cur
448
+ cdef const unsigned char *ucur
449
+
450
+ cdef unsigned char tmp[12]
451
+ cdef int val = 0
452
+ cdef int l
453
+
454
+ if string == NULL:
455
+ return
456
+
457
+ base = cur = <const char*>string
458
+ while cur[0] != 0:
459
+ if cur[0] == b'\n':
460
+ if base != cur:
461
+ tree.xmlOutputBufferWrite(buf, cur - base, base)
462
+
463
+ tree.xmlOutputBufferWrite(buf, 5, "&#10;")
464
+ cur += 1
465
+ base = cur
466
+
467
+ elif cur[0] == b'\r':
468
+ if base != cur:
469
+ tree.xmlOutputBufferWrite(buf, cur - base, base)
470
+
471
+ tree.xmlOutputBufferWrite(buf, 5, "&#13;")
472
+ cur += 1
473
+ base = cur
474
+
475
+ elif cur[0] == b'\t':
476
+ if base != cur:
477
+ tree.xmlOutputBufferWrite(buf, cur - base, base)
478
+
479
+ tree.xmlOutputBufferWrite(buf, 4, "&#9;")
480
+ cur += 1
481
+ base = cur
482
+
483
+ elif cur[0] == b'"':
484
+ if base != cur:
485
+ tree.xmlOutputBufferWrite(buf, cur - base, base)
486
+
487
+ tree.xmlOutputBufferWrite(buf, 6, "&quot;")
488
+ cur += 1
489
+ base = cur
490
+
491
+ elif cur[0] == b'<':
492
+ if base != cur:
493
+ tree.xmlOutputBufferWrite(buf, cur - base, base)
494
+
495
+ tree.xmlOutputBufferWrite(buf, 4, "&lt;")
496
+ cur += 1
497
+ base = cur
498
+
499
+ elif cur[0] == b'>':
500
+ if base != cur:
501
+ tree.xmlOutputBufferWrite(buf, cur - base, base)
502
+
503
+ tree.xmlOutputBufferWrite(buf, 4, "&gt;")
504
+ cur += 1
505
+ base = cur
506
+ elif cur[0] == b'&':
507
+ if base != cur:
508
+ tree.xmlOutputBufferWrite(buf, cur - base, base)
509
+
510
+ tree.xmlOutputBufferWrite(buf, 5, "&amp;")
511
+ cur += 1
512
+ base = cur
513
+
514
+ elif (<const unsigned char>cur[0] >= 0x80) and (cur[1] != 0):
515
+
516
+ if base != cur:
517
+ tree.xmlOutputBufferWrite(buf, cur - base, base)
518
+
519
+ ucur = <const unsigned char *>cur
520
+
521
+ if ucur[0] < 0xC0:
522
+ # invalid UTF-8 sequence
523
+ val = ucur[0]
524
+ l = 1
525
+
526
+ elif ucur[0] < 0xE0:
527
+ val = (ucur[0]) & 0x1F
528
+ val <<= 6
529
+ val |= (ucur[1]) & 0x3F
530
+ l = 2
531
+
532
+ elif (ucur[0] < 0xF0) and (ucur[2] != 0):
533
+ val = (ucur[0]) & 0x0F
534
+ val <<= 6
535
+ val |= (ucur[1]) & 0x3F
536
+ val <<= 6
537
+ val |= (ucur[2]) & 0x3F
538
+ l = 3
539
+
540
+ elif (ucur[0] < 0xF8) and (ucur[2] != 0) and (ucur[3] != 0):
541
+ val = (ucur[0]) & 0x07
542
+ val <<= 6
543
+ val |= (ucur[1]) & 0x3F
544
+ val <<= 6
545
+ val |= (ucur[2]) & 0x3F
546
+ val <<= 6
547
+ val |= (ucur[3]) & 0x3F
548
+ l = 4
549
+ else:
550
+ # invalid UTF-8 sequence
551
+ val = ucur[0]
552
+ l = 1
553
+
554
+ if (l == 1) or (not tree.xmlIsCharQ(val)):
555
+ raise ValueError(f"Invalid character: {val:X}")
556
+
557
+ # We could do multiple things here. Just save
558
+ # as a char ref
559
+ xmlSerializeHexCharRef(tmp, val)
560
+ tree.xmlOutputBufferWrite(buf, len(tmp), <const char*> tmp)
561
+ cur += l
562
+ base = cur
563
+
564
+ else:
565
+ cur += 1
566
+
567
+ if base != cur:
568
+ tree.xmlOutputBufferWrite(buf, cur - base, base)
569
+
570
+
571
+ ############################################################
572
+ # output to file-like objects
573
+
574
+ cdef object io_open
575
+ from io import open
576
+
577
+ cdef object gzip
578
+ import gzip
579
+
580
+ cdef object getwriter
581
+ from codecs import getwriter
582
+ cdef object utf8_writer = getwriter('utf8')
583
+
584
+ cdef object contextmanager
585
+ from contextlib import contextmanager
586
+
587
+ cdef object _open_utf8_file
588
+
589
+ @contextmanager
590
+ def _open_utf8_file(file, compression=0):
591
+ file = _getFSPathOrObject(file)
592
+ if _isString(file):
593
+ if compression:
594
+ with gzip.GzipFile(file, mode='wb', compresslevel=compression) as zf:
595
+ yield utf8_writer(zf)
596
+ else:
597
+ with io_open(file, 'w', encoding='utf8') as f:
598
+ yield f
599
+ else:
600
+ if compression:
601
+ with gzip.GzipFile(fileobj=file, mode='wb', compresslevel=compression) as zf:
602
+ yield utf8_writer(zf)
603
+ else:
604
+ yield utf8_writer(file)
605
+
606
+
607
+ @cython.final
608
+ @cython.internal
609
+ cdef class _FilelikeWriter:
610
+ cdef object _filelike
611
+ cdef object _close_filelike
612
+ cdef _ExceptionContext _exc_context
613
+ cdef _ErrorLog error_log
614
+ def __cinit__(self, filelike, exc_context=None, compression=None, close=False):
615
+ if compression is not None and compression > 0:
616
+ filelike = GzipFile(
617
+ fileobj=filelike, mode='wb', compresslevel=compression)
618
+ self._close_filelike = filelike.close
619
+ elif close:
620
+ self._close_filelike = filelike.close
621
+ self._filelike = filelike
622
+ if exc_context is None:
623
+ self._exc_context = _ExceptionContext()
624
+ else:
625
+ self._exc_context = exc_context
626
+ self.error_log = _ErrorLog()
627
+
628
+ cdef tree.xmlOutputBuffer* _createOutputBuffer(
629
+ self, tree.xmlCharEncodingHandler* enchandler) except NULL:
630
+ cdef tree.xmlOutputBuffer* c_buffer
631
+ c_buffer = tree.xmlOutputBufferCreateIO(
632
+ <tree.xmlOutputWriteCallback>_writeFilelikeWriter, _closeFilelikeWriter,
633
+ <python.PyObject*>self, enchandler)
634
+ if c_buffer is NULL:
635
+ raise IOError, "Could not create I/O writer context."
636
+ return c_buffer
637
+
638
+ cdef int write(self, char* c_buffer, int size) noexcept:
639
+ try:
640
+ if self._filelike is None:
641
+ raise IOError, "File is already closed"
642
+ py_buffer = <bytes>c_buffer[:size]
643
+ self._filelike.write(py_buffer)
644
+ except:
645
+ size = -1
646
+ self._exc_context._store_raised()
647
+ finally:
648
+ return size # and swallow any further exceptions
649
+
650
+ cdef int close(self) noexcept:
651
+ retval = 0
652
+ try:
653
+ if self._close_filelike is not None:
654
+ self._close_filelike()
655
+ # we should not close the file here as we didn't open it
656
+ self._filelike = None
657
+ except:
658
+ retval = -1
659
+ self._exc_context._store_raised()
660
+ finally:
661
+ return retval # and swallow any further exceptions
662
+
663
+ cdef int _writeFilelikeWriter(void* ctxt, char* c_buffer, int length) noexcept:
664
+ return (<_FilelikeWriter>ctxt).write(c_buffer, length)
665
+
666
+ cdef int _closeFilelikeWriter(void* ctxt) noexcept:
667
+ return (<_FilelikeWriter>ctxt).close()
668
+
669
+ cdef _tofilelike(f, _Element element, encoding, doctype, method,
670
+ bint write_xml_declaration, bint write_doctype,
671
+ bint pretty_print, bint with_tail, int standalone,
672
+ int compression):
673
+ cdef _FilelikeWriter writer = None
674
+ cdef tree.xmlOutputBuffer* c_buffer
675
+ cdef tree.xmlCharEncodingHandler* enchandler
676
+ cdef const_char* c_enc
677
+ cdef const_xmlChar* c_doctype
678
+ cdef int error_result
679
+
680
+ c_method = _findOutputMethod(method)
681
+ if c_method == OUTPUT_METHOD_TEXT:
682
+ data = _textToString(element._c_node, encoding, with_tail)
683
+ if compression:
684
+ bytes_out = BytesIO()
685
+ with GzipFile(fileobj=bytes_out, mode='wb', compresslevel=compression) as gzip_file:
686
+ gzip_file.write(data)
687
+ data = bytes_out.getvalue()
688
+ f = _getFSPathOrObject(f)
689
+ if _isString(f):
690
+ filename8 = _encodeFilename(f)
691
+ with open(filename8, 'wb') as f:
692
+ f.write(data)
693
+ else:
694
+ f.write(data)
695
+ return
696
+
697
+ if encoding is None:
698
+ c_enc = NULL
699
+ else:
700
+ encoding = _utf8(encoding)
701
+ c_enc = _cstr(encoding)
702
+ if doctype is None:
703
+ c_doctype = NULL
704
+ else:
705
+ doctype = _utf8(doctype)
706
+ c_doctype = _xcstr(doctype)
707
+
708
+ writer = _create_output_buffer(f, c_enc, compression, &c_buffer, close=False)
709
+ if writer is None:
710
+ with nogil:
711
+ error_result = _serialise_node(
712
+ c_buffer, c_doctype, c_enc, element._c_node, c_method,
713
+ write_xml_declaration, write_doctype, pretty_print, with_tail, standalone)
714
+ else:
715
+ error_result = _serialise_node(
716
+ c_buffer, c_doctype, c_enc, element._c_node, c_method,
717
+ write_xml_declaration, write_doctype, pretty_print, with_tail, standalone)
718
+
719
+ if writer is not None:
720
+ writer._exc_context._raise_if_stored()
721
+ if error_result != xmlerror.XML_ERR_OK:
722
+ _raiseSerialisationError(error_result)
723
+
724
+
725
+ cdef int _serialise_node(tree.xmlOutputBuffer* c_buffer, const_xmlChar* c_doctype,
726
+ const_char* c_enc, xmlNode* c_node, int c_method,
727
+ bint write_xml_declaration, bint write_doctype, bint pretty_print,
728
+ bint with_tail, int standalone) noexcept nogil:
729
+ _writeNodeToBuffer(
730
+ c_buffer, c_node, c_enc, c_doctype, c_method,
731
+ write_xml_declaration, write_doctype, pretty_print, with_tail, standalone)
732
+ error_result = c_buffer.error
733
+ if error_result == xmlerror.XML_ERR_OK:
734
+ error_result = tree.xmlOutputBufferClose(c_buffer)
735
+ if error_result != -1:
736
+ error_result = xmlerror.XML_ERR_OK
737
+ else:
738
+ tree.xmlOutputBufferClose(c_buffer)
739
+ return error_result
740
+
741
+
742
+ cdef _FilelikeWriter _create_output_buffer(
743
+ f, const_char* c_enc, int c_compression,
744
+ tree.xmlOutputBuffer** c_buffer_ret, bint close):
745
+ cdef tree.xmlOutputBuffer* c_buffer
746
+ cdef _FilelikeWriter writer
747
+ cdef bytes filename8
748
+ enchandler = tree.xmlFindCharEncodingHandler(c_enc)
749
+ if enchandler is NULL:
750
+ raise LookupError(
751
+ f"unknown encoding: '{c_enc.decode('UTF-8') if c_enc is not NULL else u''}'")
752
+ try:
753
+ f = _getFSPathOrObject(f)
754
+ if _isString(f):
755
+ filename8 = _encodeFilename(f)
756
+ if b'%' in filename8 and (
757
+ # Exclude absolute Windows paths and file:// URLs.
758
+ _isFilePath(<const xmlChar*>filename8) not in (NO_FILE_PATH, ABS_WIN_FILE_PATH)
759
+ or filename8[:7].lower() == b'file://'):
760
+ # A file path (not a URL) containing the '%' URL escape character.
761
+ # libxml2 uses URL-unescaping on these, so escape the path before passing it in.
762
+ filename8 = filename8.replace(b'%', b'%25')
763
+ c_buffer = tree.xmlOutputBufferCreateFilename(
764
+ _cstr(filename8), enchandler, c_compression)
765
+ if c_buffer is NULL:
766
+ python.PyErr_SetFromErrno(IOError) # raises IOError
767
+ writer = None
768
+ elif hasattr(f, 'write'):
769
+ writer = _FilelikeWriter(f, compression=c_compression, close=close)
770
+ c_buffer = writer._createOutputBuffer(enchandler)
771
+ else:
772
+ raise TypeError(
773
+ f"File or filename expected, got '{python._fqtypename(f).decode('UTF-8')}'")
774
+ except:
775
+ tree.xmlCharEncCloseFunc(enchandler)
776
+ raise
777
+ c_buffer_ret[0] = c_buffer
778
+ return writer
779
+
780
+ cdef xmlChar **_convert_ns_prefixes(tree.xmlDict* c_dict, ns_prefixes) except NULL:
781
+ cdef size_t i, num_ns_prefixes = len(ns_prefixes)
782
+ # Need to allocate one extra memory block to handle last NULL entry
783
+ c_ns_prefixes = <xmlChar **>python.lxml_malloc(num_ns_prefixes + 1, sizeof(xmlChar*))
784
+ if not c_ns_prefixes:
785
+ raise MemoryError()
786
+ i = 0
787
+ try:
788
+ for prefix in ns_prefixes:
789
+ prefix_utf = _utf8(prefix)
790
+ c_prefix = tree.xmlDictExists(c_dict, _xcstr(prefix_utf), len(prefix_utf))
791
+ if c_prefix:
792
+ # unknown prefixes do not need to get serialised
793
+ c_ns_prefixes[i] = <xmlChar*>c_prefix
794
+ i += 1
795
+ except:
796
+ python.lxml_free(c_ns_prefixes)
797
+ raise
798
+
799
+ c_ns_prefixes[i] = NULL # append end marker
800
+ return c_ns_prefixes
801
+
802
+ cdef _tofilelikeC14N(f, _Element element, bint exclusive, bint with_comments,
803
+ int compression, inclusive_ns_prefixes):
804
+ cdef _FilelikeWriter writer = None
805
+ cdef tree.xmlOutputBuffer* c_buffer
806
+ cdef xmlChar **c_inclusive_ns_prefixes = NULL
807
+ cdef char* c_filename
808
+ cdef xmlDoc* c_base_doc
809
+ cdef xmlDoc* c_doc
810
+ cdef int bytes_count, error = 0
811
+
812
+ c_base_doc = element._c_node.doc
813
+ c_doc = _fakeRootDoc(c_base_doc, element._c_node)
814
+ try:
815
+ c_inclusive_ns_prefixes = (
816
+ _convert_ns_prefixes(c_doc.dict, inclusive_ns_prefixes)
817
+ if inclusive_ns_prefixes else NULL)
818
+
819
+ f = _getFSPathOrObject(f)
820
+ if _isString(f):
821
+ filename8 = _encodeFilename(f)
822
+ c_filename = _cstr(filename8)
823
+ with nogil:
824
+ error = c14n.xmlC14NDocSave(
825
+ c_doc, NULL, exclusive, c_inclusive_ns_prefixes,
826
+ with_comments, c_filename, compression)
827
+ elif hasattr(f, 'write'):
828
+ writer = _FilelikeWriter(f, compression=compression)
829
+ c_buffer = writer._createOutputBuffer(NULL)
830
+ try:
831
+ with writer.error_log:
832
+ bytes_count = c14n.xmlC14NDocSaveTo(
833
+ c_doc, NULL, exclusive, c_inclusive_ns_prefixes,
834
+ with_comments, c_buffer)
835
+ finally:
836
+ error = tree.xmlOutputBufferClose(c_buffer)
837
+ if bytes_count < 0:
838
+ error = bytes_count
839
+ elif error != -1:
840
+ error = xmlerror.XML_ERR_OK
841
+ else:
842
+ raise TypeError(f"File or filename expected, got '{python._fqtypename(f).decode('UTF-8')}'")
843
+ finally:
844
+ _destroyFakeDoc(c_base_doc, c_doc)
845
+ if c_inclusive_ns_prefixes is not NULL:
846
+ python.lxml_free(c_inclusive_ns_prefixes)
847
+
848
+ if writer is not None:
849
+ writer._exc_context._raise_if_stored()
850
+
851
+ if error < 0:
852
+ message = "C14N failed"
853
+ if writer is not None:
854
+ errors = writer.error_log
855
+ if len(errors):
856
+ message = errors[0].message
857
+ raise C14NError(message)
858
+
859
+
860
+ # C14N 2.0
861
+
862
+ def canonicalize(xml_data=None, *, out=None, from_file=None, **options):
863
+ """Convert XML to its C14N 2.0 serialised form.
864
+
865
+ If *out* is provided, it must be a file or file-like object that receives
866
+ the serialised canonical XML output (text, not bytes) through its ``.write()``
867
+ method. To write to a file, open it in text mode with encoding "utf-8".
868
+ If *out* is not provided, this function returns the output as text string.
869
+
870
+ Either *xml_data* (an XML string, tree or Element) or *file*
871
+ (a file path or file-like object) must be provided as input.
872
+
873
+ The configuration options are the same as for the ``C14NWriterTarget``.
874
+ """
875
+ if xml_data is None and from_file is None:
876
+ raise ValueError("Either 'xml_data' or 'from_file' must be provided as input")
877
+
878
+ sio = None
879
+ if out is None:
880
+ sio = out = StringIO()
881
+
882
+ target = C14NWriterTarget(out.write, **options)
883
+
884
+ if xml_data is not None and not isinstance(xml_data, basestring):
885
+ _tree_to_target(xml_data, target)
886
+ return sio.getvalue() if sio is not None else None
887
+
888
+ cdef _FeedParser parser = XMLParser(
889
+ target=target,
890
+ attribute_defaults=True,
891
+ collect_ids=False,
892
+ )
893
+
894
+ if xml_data is not None:
895
+ parser.feed(xml_data)
896
+ parser.close()
897
+ elif from_file is not None:
898
+ try:
899
+ _parseDocument(from_file, parser, base_url=None)
900
+ except _TargetParserResult:
901
+ pass
902
+
903
+ return sio.getvalue() if sio is not None else None
904
+
905
+
906
+ cdef _tree_to_target(element, target):
907
+ for event, elem in iterwalk(element, events=('start', 'end', 'start-ns', 'comment', 'pi')):
908
+ text = None
909
+ if event == 'start':
910
+ target.start(elem.tag, elem.attrib)
911
+ text = elem.text
912
+ elif event == 'end':
913
+ target.end(elem.tag)
914
+ text = elem.tail
915
+ elif event == 'start-ns':
916
+ target.start_ns(*elem)
917
+ continue
918
+ elif event == 'comment':
919
+ target.comment(elem.text)
920
+ text = elem.tail
921
+ elif event == 'pi':
922
+ target.pi(elem.target, elem.text)
923
+ text = elem.tail
924
+ if text:
925
+ target.data(text)
926
+ return target.close()
927
+
928
+
929
+ cdef object _looks_like_prefix_name = re.compile(r'^\w+:\w+$', re.UNICODE).match
930
+
931
+
932
+ cdef class C14NWriterTarget:
933
+ """
934
+ Canonicalization writer target for the XMLParser.
935
+
936
+ Serialises parse events to XML C14N 2.0.
937
+
938
+ Configuration options:
939
+
940
+ - *with_comments*: set to true to include comments
941
+ - *strip_text*: set to true to strip whitespace before and after text content
942
+ - *rewrite_prefixes*: set to true to replace namespace prefixes by "n{number}"
943
+ - *qname_aware_tags*: a set of qname aware tag names in which prefixes
944
+ should be replaced in text content
945
+ - *qname_aware_attrs*: a set of qname aware attribute names in which prefixes
946
+ should be replaced in text content
947
+ - *exclude_attrs*: a set of attribute names that should not be serialised
948
+ - *exclude_tags*: a set of tag names that should not be serialised
949
+ """
950
+ cdef object _write
951
+ cdef list _data
952
+ cdef set _qname_aware_tags
953
+ cdef object _find_qname_aware_attrs
954
+ cdef list _declared_ns_stack
955
+ cdef list _ns_stack
956
+ cdef dict _prefix_map
957
+ cdef list _preserve_space
958
+ cdef tuple _pending_start
959
+ cdef set _exclude_tags
960
+ cdef set _exclude_attrs
961
+ cdef Py_ssize_t _ignored_depth
962
+ cdef bint _with_comments
963
+ cdef bint _strip_text
964
+ cdef bint _rewrite_prefixes
965
+ cdef bint _root_seen
966
+ cdef bint _root_done
967
+
968
+ def __init__(self, write, *,
969
+ with_comments=False, strip_text=False, rewrite_prefixes=False,
970
+ qname_aware_tags=None, qname_aware_attrs=None,
971
+ exclude_attrs=None, exclude_tags=None):
972
+ self._write = write
973
+ self._data = []
974
+ self._with_comments = with_comments
975
+ self._strip_text = strip_text
976
+ self._exclude_attrs = set(exclude_attrs) if exclude_attrs else None
977
+ self._exclude_tags = set(exclude_tags) if exclude_tags else None
978
+
979
+ self._rewrite_prefixes = rewrite_prefixes
980
+ if qname_aware_tags:
981
+ self._qname_aware_tags = set(qname_aware_tags)
982
+ else:
983
+ self._qname_aware_tags = None
984
+ if qname_aware_attrs:
985
+ self._find_qname_aware_attrs = set(qname_aware_attrs).intersection
986
+ else:
987
+ self._find_qname_aware_attrs = None
988
+
989
+ # Stack with globally and newly declared namespaces as (uri, prefix) pairs.
990
+ self._declared_ns_stack = [[
991
+ ("http://www.w3.org/XML/1998/namespace", "xml"),
992
+ ]]
993
+ # Stack with user declared namespace prefixes as (uri, prefix) pairs.
994
+ self._ns_stack = []
995
+ if not rewrite_prefixes:
996
+ self._ns_stack.append(_DEFAULT_NAMESPACE_PREFIXES_ITEMS)
997
+ self._ns_stack.append([])
998
+ self._prefix_map = {}
999
+ self._preserve_space = [False]
1000
+ self._pending_start = None
1001
+ self._ignored_depth = 0
1002
+ self._root_seen = False
1003
+ self._root_done = False
1004
+
1005
+ def _iter_namespaces(self, ns_stack):
1006
+ for namespaces in reversed(ns_stack):
1007
+ if namespaces: # almost no element declares new namespaces
1008
+ yield from namespaces
1009
+
1010
+ cdef _resolve_prefix_name(self, prefixed_name):
1011
+ prefix, name = prefixed_name.split(':', 1)
1012
+ for uri, p in self._iter_namespaces(self._ns_stack):
1013
+ if p == prefix:
1014
+ return f'{{{uri}}}{name}'
1015
+ raise ValueError(f'Prefix {prefix} of QName "{prefixed_name}" is not declared in scope')
1016
+
1017
+ cdef _qname(self, qname, uri=None):
1018
+ if uri is None:
1019
+ uri, tag = qname[1:].rsplit('}', 1) if qname[:1] == '{' else ('', qname)
1020
+ else:
1021
+ tag = qname
1022
+
1023
+ prefixes_seen = set()
1024
+ for u, prefix in self._iter_namespaces(self._declared_ns_stack):
1025
+ if u == uri and prefix not in prefixes_seen:
1026
+ return f'{prefix}:{tag}' if prefix else tag, tag, uri
1027
+ prefixes_seen.add(prefix)
1028
+
1029
+ # Not declared yet => add new declaration.
1030
+ if self._rewrite_prefixes:
1031
+ if uri in self._prefix_map:
1032
+ prefix = self._prefix_map[uri]
1033
+ else:
1034
+ prefix = self._prefix_map[uri] = f'n{len(self._prefix_map)}'
1035
+ self._declared_ns_stack[-1].append((uri, prefix))
1036
+ return f'{prefix}:{tag}', tag, uri
1037
+
1038
+ if not uri and '' not in prefixes_seen:
1039
+ # No default namespace declared => no prefix needed.
1040
+ return tag, tag, uri
1041
+
1042
+ for u, prefix in self._iter_namespaces(self._ns_stack):
1043
+ if u == uri:
1044
+ self._declared_ns_stack[-1].append((uri, prefix))
1045
+ return f'{prefix}:{tag}' if prefix else tag, tag, uri
1046
+
1047
+ if not uri:
1048
+ # As soon as a default namespace is defined,
1049
+ # anything that has no namespace (and thus, no prefix) goes there.
1050
+ return tag, tag, uri
1051
+
1052
+ raise ValueError(f'Namespace "{uri}" of name "{tag}" is not declared in scope')
1053
+
1054
+ def data(self, data):
1055
+ if not self._ignored_depth:
1056
+ self._data.append(data)
1057
+
1058
+ cdef _flush(self):
1059
+ cdef unicode data = ''.join(self._data)
1060
+ del self._data[:]
1061
+ if self._strip_text and not self._preserve_space[-1]:
1062
+ data = data.strip()
1063
+ if self._pending_start is not None:
1064
+ (tag, attrs, new_namespaces), self._pending_start = self._pending_start, None
1065
+ qname_text = data if ':' in data and _looks_like_prefix_name(data) else None
1066
+ self._start(tag, attrs, new_namespaces, qname_text)
1067
+ if qname_text is not None:
1068
+ return
1069
+ if data and self._root_seen:
1070
+ self._write(_escape_cdata_c14n(data))
1071
+
1072
+ def start_ns(self, prefix, uri):
1073
+ if self._ignored_depth:
1074
+ return
1075
+ # we may have to resolve qnames in text content
1076
+ if self._data:
1077
+ self._flush()
1078
+ self._ns_stack[-1].append((uri, prefix))
1079
+
1080
+ def start(self, tag, attrs):
1081
+ if self._exclude_tags is not None and (
1082
+ self._ignored_depth or tag in self._exclude_tags):
1083
+ self._ignored_depth += 1
1084
+ return
1085
+ if self._data:
1086
+ self._flush()
1087
+
1088
+ new_namespaces = []
1089
+ self._declared_ns_stack.append(new_namespaces)
1090
+
1091
+ if self._qname_aware_tags is not None and tag in self._qname_aware_tags:
1092
+ # Need to parse text first to see if it requires a prefix declaration.
1093
+ self._pending_start = (tag, attrs, new_namespaces)
1094
+ return
1095
+ self._start(tag, attrs, new_namespaces)
1096
+
1097
+ cdef _start(self, tag, attrs, new_namespaces, qname_text=None):
1098
+ if self._exclude_attrs is not None and attrs:
1099
+ attrs = {k: v for k, v in attrs.items() if k not in self._exclude_attrs}
1100
+
1101
+ qnames = {tag, *attrs}
1102
+ resolved_names = {}
1103
+
1104
+ # Resolve prefixes in attribute and tag text.
1105
+ if qname_text is not None:
1106
+ qname = resolved_names[qname_text] = self._resolve_prefix_name(qname_text)
1107
+ qnames.add(qname)
1108
+ if self._find_qname_aware_attrs is not None and attrs:
1109
+ qattrs = self._find_qname_aware_attrs(attrs)
1110
+ if qattrs:
1111
+ for attr_name in qattrs:
1112
+ value = attrs[attr_name]
1113
+ if _looks_like_prefix_name(value):
1114
+ qname = resolved_names[value] = self._resolve_prefix_name(value)
1115
+ qnames.add(qname)
1116
+ else:
1117
+ qattrs = None
1118
+ else:
1119
+ qattrs = None
1120
+
1121
+ # Assign prefixes in lexicographical order of used URIs.
1122
+ parsed_qnames = {n: self._qname(n) for n in sorted(
1123
+ qnames, key=lambda n: n.split('}', 1))}
1124
+
1125
+ # Write namespace declarations in prefix order ...
1126
+ if new_namespaces:
1127
+ attr_list = [
1128
+ ('xmlns:' + prefix if prefix else 'xmlns', uri)
1129
+ for uri, prefix in new_namespaces
1130
+ ]
1131
+ attr_list.sort()
1132
+ else:
1133
+ # almost always empty
1134
+ attr_list = []
1135
+
1136
+ # ... followed by attributes in URI+name order
1137
+ if attrs:
1138
+ for k, v in sorted(attrs.items()):
1139
+ if qattrs is not None and k in qattrs and v in resolved_names:
1140
+ v = parsed_qnames[resolved_names[v]][0]
1141
+ attr_qname, attr_name, uri = parsed_qnames[k]
1142
+ # No prefix for attributes in default ('') namespace.
1143
+ attr_list.append((attr_qname if uri else attr_name, v))
1144
+
1145
+ # Honour xml:space attributes.
1146
+ space_behaviour = attrs.get('{http://www.w3.org/XML/1998/namespace}space')
1147
+ self._preserve_space.append(
1148
+ space_behaviour == 'preserve' if space_behaviour
1149
+ else self._preserve_space[-1])
1150
+
1151
+ # Write the tag.
1152
+ write = self._write
1153
+ write('<' + parsed_qnames[tag][0])
1154
+ if attr_list:
1155
+ write(''.join([f' {k}="{_escape_attrib_c14n(v)}"' for k, v in attr_list]))
1156
+ write('>')
1157
+
1158
+ # Write the resolved qname text content.
1159
+ if qname_text is not None:
1160
+ write(_escape_cdata_c14n(parsed_qnames[resolved_names[qname_text]][0]))
1161
+
1162
+ self._root_seen = True
1163
+ self._ns_stack.append([])
1164
+
1165
+ def end(self, tag):
1166
+ if self._ignored_depth:
1167
+ self._ignored_depth -= 1
1168
+ return
1169
+ if self._data:
1170
+ self._flush()
1171
+ self._write(f'</{self._qname(tag)[0]}>')
1172
+ self._preserve_space.pop()
1173
+ self._root_done = len(self._preserve_space) == 1
1174
+ self._declared_ns_stack.pop()
1175
+ self._ns_stack.pop()
1176
+
1177
+ def comment(self, text):
1178
+ if not self._with_comments:
1179
+ return
1180
+ if self._ignored_depth:
1181
+ return
1182
+ if self._root_done:
1183
+ self._write('\n')
1184
+ elif self._root_seen and self._data:
1185
+ self._flush()
1186
+ self._write(f'<!--{_escape_cdata_c14n(text)}-->')
1187
+ if not self._root_seen:
1188
+ self._write('\n')
1189
+
1190
+ def pi(self, target, data):
1191
+ if self._ignored_depth:
1192
+ return
1193
+ if self._root_done:
1194
+ self._write('\n')
1195
+ elif self._root_seen and self._data:
1196
+ self._flush()
1197
+ self._write(
1198
+ f'<?{target} {_escape_cdata_c14n(data)}?>' if data else f'<?{target}?>')
1199
+ if not self._root_seen:
1200
+ self._write('\n')
1201
+
1202
+ def close(self):
1203
+ return None
1204
+
1205
+
1206
+ cdef _raise_serialization_error(text):
1207
+ raise TypeError("cannot serialize %r (type %s)" % (text, type(text).__name__))
1208
+
1209
+
1210
+ cdef unicode _escape_cdata_c14n(stext):
1211
+ # escape character data
1212
+ cdef unicode text
1213
+ cdef Py_UCS4 ch
1214
+ cdef Py_ssize_t start = 0, pos = 0
1215
+ cdef list substrings = None
1216
+ try:
1217
+ text = unicode(stext)
1218
+ except (TypeError, AttributeError):
1219
+ return _raise_serialization_error(stext)
1220
+
1221
+ for pos, ch in enumerate(text):
1222
+ if ch == '&':
1223
+ escape = '&amp;'
1224
+ elif ch == '<':
1225
+ escape = '&lt;'
1226
+ elif ch == '>':
1227
+ escape = '&gt;'
1228
+ elif ch == '\r':
1229
+ escape = '&#xD;'
1230
+ else:
1231
+ continue
1232
+
1233
+ if substrings is None:
1234
+ substrings = []
1235
+ if pos > start:
1236
+ substrings.append(text[start:pos])
1237
+ substrings.append(escape)
1238
+ start = pos + 1
1239
+
1240
+ if substrings is None:
1241
+ return text
1242
+ if pos >= start:
1243
+ substrings.append(text[start:pos+1])
1244
+ return ''.join(substrings)
1245
+
1246
+
1247
+ cdef unicode _escape_attrib_c14n(stext):
1248
+ # escape attribute value
1249
+ cdef unicode text
1250
+ cdef Py_UCS4 ch
1251
+ cdef Py_ssize_t start = 0, pos = 0
1252
+ cdef list substrings = None
1253
+ try:
1254
+ text = unicode(stext)
1255
+ except (TypeError, AttributeError):
1256
+ return _raise_serialization_error(stext)
1257
+
1258
+ for pos, ch in enumerate(text):
1259
+ if ch == '&':
1260
+ escape = '&amp;'
1261
+ elif ch == '<':
1262
+ escape = '&lt;'
1263
+ elif ch == '"':
1264
+ escape = '&quot;'
1265
+ elif ch == '\t':
1266
+ escape = '&#x9;'
1267
+ elif ch == '\n':
1268
+ escape = '&#xA;'
1269
+ elif ch == '\r':
1270
+ escape = '&#xD;'
1271
+ else:
1272
+ continue
1273
+
1274
+ if substrings is None:
1275
+ substrings = []
1276
+ if pos > start:
1277
+ substrings.append(text[start:pos])
1278
+ substrings.append(escape)
1279
+ start = pos + 1
1280
+
1281
+ if substrings is None:
1282
+ return text
1283
+ if pos >= start:
1284
+ substrings.append(text[start:pos+1])
1285
+ return ''.join(substrings)
1286
+
1287
+
1288
+ # incremental serialisation
1289
+
1290
+ cdef class xmlfile:
1291
+ """xmlfile(self, output_file, encoding=None, compression=None, close=False, buffered=True)
1292
+
1293
+ A simple mechanism for incremental XML serialisation.
1294
+
1295
+ Usage example::
1296
+
1297
+ with xmlfile("somefile.xml", encoding='utf-8') as xf:
1298
+ xf.write_declaration(standalone=True)
1299
+ xf.write_doctype('<!DOCTYPE root SYSTEM "some.dtd">')
1300
+
1301
+ # generate an element (the root element)
1302
+ with xf.element('root'):
1303
+ # write a complete Element into the open root element
1304
+ xf.write(etree.Element('test'))
1305
+
1306
+ # generate and write more Elements, e.g. through iterparse
1307
+ for element in generate_some_elements():
1308
+ # serialise generated elements into the XML file
1309
+ xf.write(element)
1310
+
1311
+ # or write multiple Elements or strings at once
1312
+ xf.write(etree.Element('start'), "text", etree.Element('end'))
1313
+
1314
+ If 'output_file' is a file(-like) object, passing ``close=True`` will
1315
+ close it when exiting the context manager. By default, it is left
1316
+ to the owner to do that. When a file path is used, lxml will take care
1317
+ of opening and closing the file itself. Also, when a compression level
1318
+ is set, lxml will deliberately close the file to make sure all data gets
1319
+ compressed and written.
1320
+
1321
+ Setting ``buffered=False`` will flush the output after each operation,
1322
+ such as opening or closing an ``xf.element()`` block or calling
1323
+ ``xf.write()``. Alternatively, calling ``xf.flush()`` can be used to
1324
+ explicitly flush any pending output when buffering is enabled.
1325
+ """
1326
+ cdef object output_file
1327
+ cdef bytes encoding
1328
+ cdef _IncrementalFileWriter writer
1329
+ cdef _AsyncIncrementalFileWriter async_writer
1330
+ cdef int compresslevel
1331
+ cdef bint close
1332
+ cdef bint buffered
1333
+ cdef int method
1334
+
1335
+ def __init__(self, output_file not None, encoding=None, compression=None,
1336
+ close=False, buffered=True):
1337
+ self.output_file = output_file
1338
+ self.encoding = _utf8orNone(encoding)
1339
+ self.compresslevel = compression or 0
1340
+ self.close = close
1341
+ self.buffered = buffered
1342
+ self.method = OUTPUT_METHOD_XML
1343
+
1344
+ def __enter__(self):
1345
+ assert self.output_file is not None
1346
+ self.writer = _IncrementalFileWriter(
1347
+ self.output_file, self.encoding, self.compresslevel,
1348
+ self.close, self.buffered, self.method)
1349
+ return self.writer
1350
+
1351
+ def __exit__(self, exc_type, exc_val, exc_tb):
1352
+ if self.writer is not None:
1353
+ old_writer, self.writer = self.writer, None
1354
+ raise_on_error = exc_type is None
1355
+ old_writer._close(raise_on_error)
1356
+ if self.close:
1357
+ self.output_file = None
1358
+
1359
+ async def __aenter__(self):
1360
+ assert self.output_file is not None
1361
+ if isinstance(self.output_file, basestring):
1362
+ raise TypeError("Cannot asynchronously write to a plain file")
1363
+ if not hasattr(self.output_file, 'write'):
1364
+ raise TypeError("Output file needs an async .write() method")
1365
+ self.async_writer = _AsyncIncrementalFileWriter(
1366
+ self.output_file, self.encoding, self.compresslevel,
1367
+ self.close, self.buffered, self.method)
1368
+ return self.async_writer
1369
+
1370
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
1371
+ if self.async_writer is not None:
1372
+ old_writer, self.async_writer = self.async_writer, None
1373
+ raise_on_error = exc_type is None
1374
+ await old_writer._close(raise_on_error)
1375
+ if self.close:
1376
+ self.output_file = None
1377
+
1378
+
1379
+ cdef class htmlfile(xmlfile):
1380
+ """htmlfile(self, output_file, encoding=None, compression=None, close=False, buffered=True)
1381
+
1382
+ A simple mechanism for incremental HTML serialisation. Works the same as
1383
+ xmlfile.
1384
+ """
1385
+ def __init__(self, *args, **kwargs):
1386
+ super().__init__(*args, **kwargs)
1387
+ self.method = OUTPUT_METHOD_HTML
1388
+
1389
+
1390
+ cdef enum _IncrementalFileWriterStatus:
1391
+ WRITER_STARTING = 0
1392
+ WRITER_DECL_WRITTEN = 1
1393
+ WRITER_DTD_WRITTEN = 2
1394
+ WRITER_IN_ELEMENT = 3
1395
+ WRITER_FINISHED = 4
1396
+
1397
+
1398
+ @cython.final
1399
+ @cython.internal
1400
+ cdef class _IncrementalFileWriter:
1401
+ cdef tree.xmlOutputBuffer* _c_out
1402
+ cdef bytes _encoding
1403
+ cdef const_char* _c_encoding
1404
+ cdef _FilelikeWriter _target
1405
+ cdef list _element_stack
1406
+ cdef int _status
1407
+ cdef int _method
1408
+ cdef bint _buffered
1409
+
1410
+ def __cinit__(self, outfile, bytes encoding, int compresslevel, bint close,
1411
+ bint buffered, int method):
1412
+ self._status = WRITER_STARTING
1413
+ self._element_stack = []
1414
+ if encoding is None:
1415
+ encoding = b'ASCII'
1416
+ self._encoding = encoding
1417
+ self._c_encoding = _cstr(encoding) if encoding is not None else NULL
1418
+ self._buffered = buffered
1419
+ self._target = _create_output_buffer(
1420
+ outfile, self._c_encoding, compresslevel, &self._c_out, close)
1421
+ self._method = method
1422
+
1423
+ def __dealloc__(self):
1424
+ if self._c_out is not NULL:
1425
+ tree.xmlOutputBufferClose(self._c_out)
1426
+
1427
+ def write_declaration(self, version=None, standalone=None, doctype=None):
1428
+ """write_declaration(self, version=None, standalone=None, doctype=None)
1429
+
1430
+ Write an XML declaration and (optionally) a doctype into the file.
1431
+ """
1432
+ assert self._c_out is not NULL
1433
+ cdef const_xmlChar* c_version
1434
+ cdef int c_standalone
1435
+ if self._method != OUTPUT_METHOD_XML:
1436
+ raise LxmlSyntaxError("only XML documents have declarations")
1437
+ if self._status >= WRITER_DECL_WRITTEN:
1438
+ raise LxmlSyntaxError("XML declaration already written")
1439
+ version = _utf8orNone(version)
1440
+ c_version = _xcstr(version) if version is not None else NULL
1441
+ doctype = _utf8orNone(doctype)
1442
+ if standalone is None:
1443
+ c_standalone = -1
1444
+ else:
1445
+ c_standalone = 1 if standalone else 0
1446
+ _writeDeclarationToBuffer(self._c_out, c_version, self._c_encoding, c_standalone)
1447
+ if doctype is not None:
1448
+ _writeDoctype(self._c_out, _xcstr(doctype))
1449
+ self._status = WRITER_DTD_WRITTEN
1450
+ else:
1451
+ self._status = WRITER_DECL_WRITTEN
1452
+ if not self._buffered:
1453
+ tree.xmlOutputBufferFlush(self._c_out)
1454
+ self._handle_error(self._c_out.error)
1455
+
1456
+ def write_doctype(self, doctype):
1457
+ """write_doctype(self, doctype)
1458
+
1459
+ Writes the given doctype declaration verbatimly into the file.
1460
+ """
1461
+ assert self._c_out is not NULL
1462
+ if doctype is None:
1463
+ return
1464
+ if self._status >= WRITER_DTD_WRITTEN:
1465
+ raise LxmlSyntaxError("DOCTYPE already written or cannot write it here")
1466
+ doctype = _utf8(doctype)
1467
+ _writeDoctype(self._c_out, _xcstr(doctype))
1468
+ self._status = WRITER_DTD_WRITTEN
1469
+ if not self._buffered:
1470
+ tree.xmlOutputBufferFlush(self._c_out)
1471
+ self._handle_error(self._c_out.error)
1472
+
1473
+ def method(self, method):
1474
+ """method(self, method)
1475
+
1476
+ Returns a context manager that overrides and restores the output method.
1477
+ method is one of (None, 'xml', 'html') where None means 'xml'.
1478
+ """
1479
+ assert self._c_out is not NULL
1480
+ c_method = self._method if method is None else _findOutputMethod(method)
1481
+ return _MethodChanger(self, c_method)
1482
+
1483
+ def element(self, tag, attrib=None, nsmap=None, method=None, **_extra):
1484
+ """element(self, tag, attrib=None, nsmap=None, method, **_extra)
1485
+
1486
+ Returns a context manager that writes an opening and closing tag.
1487
+ method is one of (None, 'xml', 'html') where None means 'xml'.
1488
+ """
1489
+ assert self._c_out is not NULL
1490
+ attributes = []
1491
+ if attrib is not None:
1492
+ for name, value in _iter_attrib(attrib):
1493
+ if name not in _extra:
1494
+ ns, name = _getNsTag(name)
1495
+ attributes.append((ns, name, _utf8(value)))
1496
+ if _extra:
1497
+ for name, value in _extra.iteritems():
1498
+ ns, name = _getNsTag(name)
1499
+ attributes.append((ns, name, _utf8(value)))
1500
+ reversed_nsmap = {}
1501
+ if nsmap:
1502
+ for prefix, ns in nsmap.items():
1503
+ if prefix is not None:
1504
+ prefix = _utf8(prefix)
1505
+ _prefixValidOrRaise(prefix)
1506
+ reversed_nsmap[_utf8(ns)] = prefix
1507
+ ns, name = _getNsTag(tag)
1508
+
1509
+ c_method = self._method if method is None else _findOutputMethod(method)
1510
+
1511
+ return _FileWriterElement(self, (ns, name, attributes, reversed_nsmap), c_method)
1512
+
1513
+ cdef _write_qname(self, bytes name, bytes prefix):
1514
+ if prefix: # empty bytes for no prefix (not None to allow sorting)
1515
+ tree.xmlOutputBufferWrite(self._c_out, len(prefix), _cstr(prefix))
1516
+ tree.xmlOutputBufferWrite(self._c_out, 1, ':')
1517
+ tree.xmlOutputBufferWrite(self._c_out, len(name), _cstr(name))
1518
+
1519
+ cdef _write_start_element(self, element_config):
1520
+ if self._status > WRITER_IN_ELEMENT:
1521
+ raise LxmlSyntaxError("cannot append trailing element to complete XML document")
1522
+ ns, name, attributes, nsmap = element_config
1523
+ flat_namespace_map, new_namespaces = self._collect_namespaces(nsmap)
1524
+ prefix = self._find_prefix(ns, flat_namespace_map, new_namespaces)
1525
+ tree.xmlOutputBufferWrite(self._c_out, 1, '<')
1526
+ self._write_qname(name, prefix)
1527
+
1528
+ self._write_attributes_and_namespaces(
1529
+ attributes, flat_namespace_map, new_namespaces)
1530
+
1531
+ tree.xmlOutputBufferWrite(self._c_out, 1, '>')
1532
+ if not self._buffered:
1533
+ tree.xmlOutputBufferFlush(self._c_out)
1534
+ self._handle_error(self._c_out.error)
1535
+
1536
+ self._element_stack.append((ns, name, prefix, flat_namespace_map))
1537
+ self._status = WRITER_IN_ELEMENT
1538
+
1539
+ cdef _write_attributes_and_namespaces(self, list attributes,
1540
+ dict flat_namespace_map,
1541
+ list new_namespaces):
1542
+ if attributes:
1543
+ # _find_prefix() may append to new_namespaces => build them first
1544
+ attributes = [
1545
+ (self._find_prefix(ns, flat_namespace_map, new_namespaces), name, value)
1546
+ for ns, name, value in attributes ]
1547
+ if new_namespaces:
1548
+ new_namespaces.sort()
1549
+ self._write_attributes_list(new_namespaces)
1550
+ if attributes:
1551
+ self._write_attributes_list(attributes)
1552
+
1553
+ cdef _write_attributes_list(self, list attributes):
1554
+ for prefix, name, value in attributes:
1555
+ tree.xmlOutputBufferWrite(self._c_out, 1, ' ')
1556
+ self._write_qname(name, prefix)
1557
+ tree.xmlOutputBufferWrite(self._c_out, 2, '="')
1558
+ _write_attr_string(self._c_out, _cstr(value))
1559
+
1560
+ tree.xmlOutputBufferWrite(self._c_out, 1, '"')
1561
+
1562
+ cdef _write_end_element(self, element_config):
1563
+ if self._status != WRITER_IN_ELEMENT:
1564
+ raise LxmlSyntaxError("not in an element")
1565
+ if not self._element_stack or self._element_stack[-1][:2] != element_config[:2]:
1566
+ raise LxmlSyntaxError("inconsistent exit action in context manager")
1567
+
1568
+ # If previous write operations failed, the context manager exit might still call us.
1569
+ # That is ok, but we stop writing closing tags and handling errors in that case.
1570
+ # For all non-I/O errors, we continue writing closing tags if we can.
1571
+ ok_to_write = self._c_out.error == xmlerror.XML_ERR_OK
1572
+
1573
+ name, prefix = self._element_stack.pop()[1:3]
1574
+ if ok_to_write:
1575
+ tree.xmlOutputBufferWrite(self._c_out, 2, '</')
1576
+ self._write_qname(name, prefix)
1577
+ tree.xmlOutputBufferWrite(self._c_out, 1, '>')
1578
+
1579
+ if not self._element_stack:
1580
+ self._status = WRITER_FINISHED
1581
+ if ok_to_write:
1582
+ if not self._buffered:
1583
+ tree.xmlOutputBufferFlush(self._c_out)
1584
+ self._handle_error(self._c_out.error)
1585
+
1586
+ cdef _find_prefix(self, bytes href, dict flat_namespaces_map, list new_namespaces):
1587
+ if href is None:
1588
+ return None
1589
+ if href in flat_namespaces_map:
1590
+ return flat_namespaces_map[href]
1591
+ # need to create a new prefix
1592
+ prefixes = flat_namespaces_map.values()
1593
+ i = 0
1594
+ while True:
1595
+ prefix = _utf8('ns%d' % i)
1596
+ if prefix not in prefixes:
1597
+ new_namespaces.append((b'xmlns', prefix, href))
1598
+ flat_namespaces_map[href] = prefix
1599
+ return prefix
1600
+ i += 1
1601
+
1602
+ cdef _collect_namespaces(self, dict nsmap):
1603
+ new_namespaces = []
1604
+ flat_namespaces_map = {}
1605
+ for ns, prefix in nsmap.iteritems():
1606
+ flat_namespaces_map[ns] = prefix
1607
+ if prefix is None:
1608
+ # use empty bytes rather than None to allow sorting
1609
+ new_namespaces.append((b'', b'xmlns', ns))
1610
+ else:
1611
+ new_namespaces.append((b'xmlns', prefix, ns))
1612
+ # merge in flat namespace map of parent
1613
+ if self._element_stack:
1614
+ for ns, prefix in (<dict>self._element_stack[-1][-1]).iteritems():
1615
+ if flat_namespaces_map.get(ns) is None:
1616
+ # unknown or empty prefix => prefer a 'real' prefix
1617
+ flat_namespaces_map[ns] = prefix
1618
+ return flat_namespaces_map, new_namespaces
1619
+
1620
+ def write(self, *args, bint with_tail=True, bint pretty_print=False, method=None):
1621
+ """write(self, *args, with_tail=True, pretty_print=False, method=None)
1622
+
1623
+ Write subtrees or strings into the file.
1624
+
1625
+ If method is not None, it should be one of ('html', 'xml', 'text')
1626
+ to temporarily override the output method.
1627
+ """
1628
+ assert self._c_out is not NULL
1629
+ c_method = self._method if method is None else _findOutputMethod(method)
1630
+
1631
+ for content in args:
1632
+ if _isString(content):
1633
+ if self._status != WRITER_IN_ELEMENT:
1634
+ if self._status > WRITER_IN_ELEMENT or content.strip():
1635
+ raise LxmlSyntaxError("not in an element")
1636
+ bstring = _utf8(content)
1637
+ if not bstring:
1638
+ continue
1639
+
1640
+ ns, name, _, _ = self._element_stack[-1]
1641
+ if (c_method == OUTPUT_METHOD_HTML and
1642
+ ns in (None, b'http://www.w3.org/1999/xhtml') and
1643
+ name in (b'script', b'style')):
1644
+ tree.xmlOutputBufferWrite(self._c_out, len(bstring), _cstr(bstring))
1645
+
1646
+ else:
1647
+ tree.xmlOutputBufferWriteEscape(self._c_out, _xcstr(bstring), NULL)
1648
+
1649
+ elif iselement(content):
1650
+ if self._status > WRITER_IN_ELEMENT:
1651
+ raise LxmlSyntaxError("cannot append trailing element to complete XML document")
1652
+ _writeNodeToBuffer(self._c_out, (<_Element>content)._c_node,
1653
+ self._c_encoding, NULL, c_method,
1654
+ False, False, pretty_print, with_tail, False)
1655
+ if (<_Element>content)._c_node.type == tree.XML_ELEMENT_NODE:
1656
+ if not self._element_stack:
1657
+ self._status = WRITER_FINISHED
1658
+
1659
+ elif content is not None:
1660
+ raise TypeError(
1661
+ f"got invalid input value of type {type(content)}, expected string or Element")
1662
+ self._handle_error(self._c_out.error)
1663
+ if not self._buffered:
1664
+ tree.xmlOutputBufferFlush(self._c_out)
1665
+ self._handle_error(self._c_out.error)
1666
+
1667
+ def flush(self):
1668
+ """flush(self)
1669
+
1670
+ Write any pending content of the current output buffer to the stream.
1671
+ """
1672
+ assert self._c_out is not NULL
1673
+ tree.xmlOutputBufferFlush(self._c_out)
1674
+ self._handle_error(self._c_out.error)
1675
+
1676
+ cdef _close(self, bint raise_on_error):
1677
+ if raise_on_error:
1678
+ if self._status < WRITER_IN_ELEMENT:
1679
+ raise LxmlSyntaxError("no content written")
1680
+ if self._element_stack:
1681
+ raise LxmlSyntaxError("pending open tags on close")
1682
+ error_result = self._c_out.error
1683
+ if error_result == xmlerror.XML_ERR_OK:
1684
+ error_result = tree.xmlOutputBufferClose(self._c_out)
1685
+ if error_result != -1:
1686
+ error_result = xmlerror.XML_ERR_OK
1687
+ else:
1688
+ tree.xmlOutputBufferClose(self._c_out)
1689
+ self._status = WRITER_FINISHED
1690
+ self._c_out = NULL
1691
+ del self._element_stack[:]
1692
+ if raise_on_error:
1693
+ self._handle_error(error_result)
1694
+
1695
+ cdef _handle_error(self, int error_result):
1696
+ if error_result != xmlerror.XML_ERR_OK:
1697
+ if self._target is not None:
1698
+ self._target._exc_context._raise_if_stored()
1699
+ _raiseSerialisationError(error_result)
1700
+
1701
+
1702
+ @cython.final
1703
+ @cython.internal
1704
+ cdef class _AsyncDataWriter:
1705
+ cdef list _data
1706
+ def __cinit__(self):
1707
+ self._data = []
1708
+
1709
+ cdef bytes collect(self):
1710
+ data = b''.join(self._data)
1711
+ del self._data[:]
1712
+ return data
1713
+
1714
+ def write(self, data):
1715
+ self._data.append(data)
1716
+
1717
+ def close(self):
1718
+ pass
1719
+
1720
+
1721
+ @cython.final
1722
+ @cython.internal
1723
+ cdef class _AsyncIncrementalFileWriter:
1724
+ cdef _IncrementalFileWriter _writer
1725
+ cdef _AsyncDataWriter _buffer
1726
+ cdef object _async_outfile
1727
+ cdef int _flush_after_writes
1728
+ cdef bint _should_close
1729
+ cdef bint _buffered
1730
+
1731
+ def __cinit__(self, async_outfile, bytes encoding, int compresslevel, bint close,
1732
+ bint buffered, int method):
1733
+ self._flush_after_writes = 20
1734
+ self._async_outfile = async_outfile
1735
+ self._should_close = close
1736
+ self._buffered = buffered
1737
+ self._buffer = _AsyncDataWriter()
1738
+ self._writer = _IncrementalFileWriter(
1739
+ self._buffer, encoding, compresslevel, close=True, buffered=False, method=method)
1740
+
1741
+ cdef bytes _flush(self):
1742
+ if not self._buffered or len(self._buffer._data) > self._flush_after_writes:
1743
+ return self._buffer.collect()
1744
+ return None
1745
+
1746
+ async def flush(self):
1747
+ self._writer.flush()
1748
+ data = self._buffer.collect()
1749
+ if data:
1750
+ await self._async_outfile.write(data)
1751
+
1752
+ async def write_declaration(self, version=None, standalone=None, doctype=None):
1753
+ self._writer.write_declaration(version, standalone, doctype)
1754
+ data = self._flush()
1755
+ if data:
1756
+ await self._async_outfile.write(data)
1757
+
1758
+ async def write_doctype(self, doctype):
1759
+ self._writer.write_doctype(doctype)
1760
+ data = self._flush()
1761
+ if data:
1762
+ await self._async_outfile.write(data)
1763
+
1764
+ async def write(self, *args, with_tail=True, pretty_print=False, method=None):
1765
+ self._writer.write(*args, with_tail=with_tail, pretty_print=pretty_print, method=method)
1766
+ data = self._flush()
1767
+ if data:
1768
+ await self._async_outfile.write(data)
1769
+
1770
+ def method(self, method):
1771
+ return self._writer.method(method)
1772
+
1773
+ def element(self, tag, attrib=None, nsmap=None, method=None, **_extra):
1774
+ element_writer = self._writer.element(tag, attrib, nsmap, method, **_extra)
1775
+ return _AsyncFileWriterElement(element_writer, self)
1776
+
1777
+ async def _close(self, bint raise_on_error):
1778
+ self._writer._close(raise_on_error)
1779
+ data = self._buffer.collect()
1780
+ if data:
1781
+ await self._async_outfile.write(data)
1782
+ if self._should_close:
1783
+ await self._async_outfile.close()
1784
+
1785
+
1786
+ @cython.final
1787
+ @cython.internal
1788
+ cdef class _AsyncFileWriterElement:
1789
+ cdef _FileWriterElement _element_writer
1790
+ cdef _AsyncIncrementalFileWriter _writer
1791
+
1792
+ def __cinit__(self, _FileWriterElement element_writer not None,
1793
+ _AsyncIncrementalFileWriter writer not None):
1794
+ self._element_writer = element_writer
1795
+ self._writer = writer
1796
+
1797
+ async def __aenter__(self):
1798
+ self._element_writer.__enter__()
1799
+ data = self._writer._flush()
1800
+ if data:
1801
+ await self._writer._async_outfile.write(data)
1802
+
1803
+ async def __aexit__(self, *args):
1804
+ self._element_writer.__exit__(*args)
1805
+ data = self._writer._flush()
1806
+ if data:
1807
+ await self._writer._async_outfile.write(data)
1808
+
1809
+
1810
+ @cython.final
1811
+ @cython.internal
1812
+ @cython.freelist(8)
1813
+ cdef class _FileWriterElement:
1814
+ cdef _IncrementalFileWriter _writer
1815
+ cdef object _element
1816
+ cdef int _new_method
1817
+ cdef int _old_method
1818
+
1819
+ def __cinit__(self, _IncrementalFileWriter writer not None, element_config, int method):
1820
+ self._writer = writer
1821
+ self._element = element_config
1822
+ self._new_method = method
1823
+ self._old_method = writer._method
1824
+
1825
+ def __enter__(self):
1826
+ self._writer._method = self._new_method
1827
+ self._writer._write_start_element(self._element)
1828
+
1829
+ def __exit__(self, exc_type, exc_val, exc_tb):
1830
+ self._writer._write_end_element(self._element)
1831
+ self._writer._method = self._old_method
1832
+
1833
+
1834
+ @cython.final
1835
+ @cython.internal
1836
+ @cython.freelist(8)
1837
+ cdef class _MethodChanger:
1838
+ cdef _IncrementalFileWriter _writer
1839
+ cdef int _new_method
1840
+ cdef int _old_method
1841
+ cdef bint _entered
1842
+ cdef bint _exited
1843
+
1844
+ def __cinit__(self, _IncrementalFileWriter writer not None, int method):
1845
+ self._writer = writer
1846
+ self._new_method = method
1847
+ self._old_method = writer._method
1848
+ self._entered = False
1849
+ self._exited = False
1850
+
1851
+ def __enter__(self):
1852
+ if self._entered:
1853
+ raise LxmlSyntaxError("Inconsistent enter action in context manager")
1854
+ self._writer._method = self._new_method
1855
+ self._entered = True
1856
+
1857
+ def __exit__(self, exc_type, exc_val, exc_tb):
1858
+ if self._exited:
1859
+ raise LxmlSyntaxError("Inconsistent exit action in context manager")
1860
+ if self._writer._method != self._new_method:
1861
+ raise LxmlSyntaxError("Method changed outside of context manager")
1862
+ self._writer._method = self._old_method
1863
+ self._exited = True
1864
+
1865
+ async def __aenter__(self):
1866
+ # for your async convenience
1867
+ return self.__enter__()
1868
+
1869
+ async def __aexit__(self, *args):
1870
+ # for your async convenience
1871
+ return self.__exit__(*args)
llmeval-env/lib/python3.10/site-packages/lxml/usedoctest.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Doctest module for XML comparison.
2
+
3
+ Usage::
4
+
5
+ >>> import lxml.usedoctest
6
+ >>> # now do your XML doctests ...
7
+
8
+ See `lxml.doctestcompare`
9
+ """
10
+
11
+ from lxml import doctestcompare
12
+
13
+ doctestcompare.temp_install(del_module=__name__)
llmeval-env/lib/python3.10/site-packages/lxml/xmlid.pxi ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cdef object _find_id_attributes
2
+
3
+ def XMLID(text, parser=None, *, base_url=None):
4
+ """XMLID(text, parser=None, base_url=None)
5
+
6
+ Parse the text and return a tuple (root node, ID dictionary). The root
7
+ node is the same as returned by the XML() function. The dictionary
8
+ contains string-element pairs. The dictionary keys are the values of 'id'
9
+ attributes. The elements referenced by the ID are stored as dictionary
10
+ values.
11
+ """
12
+ cdef dict dic
13
+ global _find_id_attributes
14
+ if _find_id_attributes is None:
15
+ _find_id_attributes = XPath('//*[string(@id)]')
16
+
17
+ # ElementTree compatible implementation: parse and look for 'id' attributes
18
+ root = XML(text, parser, base_url=base_url)
19
+ dic = {}
20
+ for elem in _find_id_attributes(root):
21
+ dic[elem.get('id')] = elem
22
+ return root, dic
23
+
24
+ def XMLDTDID(text, parser=None, *, base_url=None):
25
+ """XMLDTDID(text, parser=None, base_url=None)
26
+
27
+ Parse the text and return a tuple (root node, ID dictionary). The root
28
+ node is the same as returned by the XML() function. The dictionary
29
+ contains string-element pairs. The dictionary keys are the values of ID
30
+ attributes as defined by the DTD. The elements referenced by the ID are
31
+ stored as dictionary values.
32
+
33
+ Note that you must not modify the XML tree if you use the ID dictionary.
34
+ The results are undefined.
35
+ """
36
+ cdef _Element root
37
+ root = XML(text, parser, base_url=base_url)
38
+ # xml:id spec compatible implementation: use DTD ID attributes from libxml2
39
+ if root._doc._c_doc.ids is NULL:
40
+ return root, {}
41
+ else:
42
+ return root, _IDDict(root)
43
+
44
+ def parseid(source, parser=None, *, base_url=None):
45
+ """parseid(source, parser=None)
46
+
47
+ Parses the source into a tuple containing an ElementTree object and an
48
+ ID dictionary. If no parser is provided as second argument, the default
49
+ parser is used.
50
+
51
+ Note that you must not modify the XML tree if you use the ID dictionary.
52
+ The results are undefined.
53
+ """
54
+ cdef _Document doc
55
+ doc = _parseDocument(source, parser, base_url)
56
+ return _elementTreeFactory(doc, None), _IDDict(doc)
57
+
58
+ cdef class _IDDict:
59
+ """IDDict(self, etree)
60
+ A dictionary-like proxy class that mapps ID attributes to elements.
61
+
62
+ The dictionary must be instantiated with the root element of a parsed XML
63
+ document, otherwise the behaviour is undefined. Elements and XML trees
64
+ that were created or modified 'by hand' are not supported.
65
+ """
66
+ cdef _Document _doc
67
+ cdef object _keys
68
+ cdef object _items
69
+ def __cinit__(self, etree):
70
+ cdef _Document doc
71
+ doc = _documentOrRaise(etree)
72
+ if doc._c_doc.ids is NULL:
73
+ raise ValueError, "No ID dictionary available."
74
+ self._doc = doc
75
+ self._keys = None
76
+ self._items = None
77
+
78
+ def copy(self):
79
+ return _IDDict(self._doc)
80
+
81
+ def __getitem__(self, id_name):
82
+ cdef tree.xmlHashTable* c_ids
83
+ cdef tree.xmlID* c_id
84
+ cdef xmlAttr* c_attr
85
+ c_ids = self._doc._c_doc.ids
86
+ id_utf = _utf8(id_name)
87
+ c_id = <tree.xmlID*>tree.xmlHashLookup(c_ids, _xcstr(id_utf))
88
+ if c_id is NULL:
89
+ raise KeyError, "key not found."
90
+ c_attr = c_id.attr
91
+ if c_attr is NULL or c_attr.parent is NULL:
92
+ raise KeyError, "ID attribute not found."
93
+ return _elementFactory(self._doc, c_attr.parent)
94
+
95
+ def get(self, id_name):
96
+ return self[id_name]
97
+
98
+ def __contains__(self, id_name):
99
+ cdef tree.xmlID* c_id
100
+ id_utf = _utf8(id_name)
101
+ c_id = <tree.xmlID*>tree.xmlHashLookup(
102
+ self._doc._c_doc.ids, _xcstr(id_utf))
103
+ return c_id is not NULL
104
+
105
+ def has_key(self, id_name):
106
+ return id_name in self
107
+
108
+ def __repr__(self):
109
+ return repr(dict(self))
110
+
111
+ def keys(self):
112
+ if self._keys is None:
113
+ self._keys = self._build_keys()
114
+ return self._keys[:]
115
+
116
+ def __iter__(self):
117
+ if self._keys is None:
118
+ self._keys = self._build_keys()
119
+ return iter(self._keys)
120
+
121
+ def iterkeys(self):
122
+ return self
123
+
124
+ def __len__(self):
125
+ if self._keys is None:
126
+ self._keys = self._build_keys()
127
+ return len(self._keys)
128
+
129
+ def items(self):
130
+ if self._items is None:
131
+ self._items = self._build_items()
132
+ return self._items[:]
133
+
134
+ def iteritems(self):
135
+ if self._items is None:
136
+ self._items = self._build_items()
137
+ return iter(self._items)
138
+
139
+ def values(self):
140
+ cdef list values = []
141
+ if self._items is None:
142
+ self._items = self._build_items()
143
+ for item in self._items:
144
+ value = python.PyTuple_GET_ITEM(item, 1)
145
+ python.Py_INCREF(value)
146
+ values.append(value)
147
+ return values
148
+
149
+ def itervalues(self):
150
+ return iter(self.values())
151
+
152
+ cdef object _build_keys(self):
153
+ keys = []
154
+ tree.xmlHashScan(<tree.xmlHashTable*>self._doc._c_doc.ids,
155
+ <tree.xmlHashScanner>_collectIdHashKeys, <python.PyObject*>keys)
156
+ return keys
157
+
158
+ cdef object _build_items(self):
159
+ items = []
160
+ context = (items, self._doc)
161
+ tree.xmlHashScan(<tree.xmlHashTable*>self._doc._c_doc.ids,
162
+ <tree.xmlHashScanner>_collectIdHashItemList, <python.PyObject*>context)
163
+ return items
164
+
165
+ cdef void _collectIdHashItemList(void* payload, void* context, xmlChar* name) noexcept:
166
+ # collect elements from ID attribute hash table
167
+ cdef list lst
168
+ c_id = <tree.xmlID*>payload
169
+ if c_id is NULL or c_id.attr is NULL or c_id.attr.parent is NULL:
170
+ return
171
+ lst, doc = <tuple>context
172
+ element = _elementFactory(doc, c_id.attr.parent)
173
+ lst.append( (funicode(name), element) )
174
+
175
+ cdef void _collectIdHashKeys(void* payload, void* collect_list, xmlChar* name) noexcept:
176
+ c_id = <tree.xmlID*>payload
177
+ if c_id is NULL or c_id.attr is NULL or c_id.attr.parent is NULL:
178
+ return
179
+ (<list>collect_list).append(funicode(name))
llmeval-env/lib/python3.10/site-packages/lxml/xmlschema.pxi ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # support for XMLSchema validation
2
+ from lxml.includes cimport xmlschema
3
+
4
+
5
+ cdef class XMLSchemaError(LxmlError):
6
+ """Base class of all XML Schema errors
7
+ """
8
+
9
+ cdef class XMLSchemaParseError(XMLSchemaError):
10
+ """Error while parsing an XML document as XML Schema.
11
+ """
12
+
13
+ cdef class XMLSchemaValidateError(XMLSchemaError):
14
+ """Error while validating an XML document with an XML Schema.
15
+ """
16
+
17
+
18
+ ################################################################################
19
+ # XMLSchema
20
+
21
+ cdef XPath _check_for_default_attributes = XPath(
22
+ "boolean(//xs:attribute[@default or @fixed][1])",
23
+ namespaces={'xs': 'http://www.w3.org/2001/XMLSchema'})
24
+
25
+
26
+ cdef class XMLSchema(_Validator):
27
+ """XMLSchema(self, etree=None, file=None)
28
+ Turn a document into an XML Schema validator.
29
+
30
+ Either pass a schema as Element or ElementTree, or pass a file or
31
+ filename through the ``file`` keyword argument.
32
+
33
+ Passing the ``attribute_defaults`` boolean option will make the
34
+ schema insert default/fixed attributes into validated documents.
35
+ """
36
+ cdef xmlschema.xmlSchema* _c_schema
37
+ cdef _Document _doc
38
+ cdef bint _has_default_attributes
39
+ cdef bint _add_attribute_defaults
40
+
41
+ def __cinit__(self):
42
+ self._has_default_attributes = True # play it safe
43
+ self._add_attribute_defaults = False
44
+
45
+ def __init__(self, etree=None, *, file=None, bint attribute_defaults=False):
46
+ cdef xmlschema.xmlSchemaParserCtxt* parser_ctxt
47
+ cdef xmlDoc* c_doc
48
+
49
+ self._add_attribute_defaults = attribute_defaults
50
+ _Validator.__init__(self)
51
+ c_doc = NULL
52
+ if etree is not None:
53
+ doc = _documentOrRaise(etree)
54
+ root_node = _rootNodeOrRaise(etree)
55
+ c_doc = _copyDocRoot(doc._c_doc, root_node._c_node)
56
+ self._doc = _documentFactory(c_doc, doc._parser)
57
+ parser_ctxt = xmlschema.xmlSchemaNewDocParserCtxt(c_doc)
58
+ elif file is not None:
59
+ file = _getFSPathOrObject(file)
60
+ if _isString(file):
61
+ filename = _encodeFilename(file)
62
+ parser_ctxt = xmlschema.xmlSchemaNewParserCtxt(_cstr(filename))
63
+ else:
64
+ self._doc = _parseDocument(file, None, None)
65
+ parser_ctxt = xmlschema.xmlSchemaNewDocParserCtxt(self._doc._c_doc)
66
+ else:
67
+ raise XMLSchemaParseError, "No tree or file given"
68
+
69
+ if parser_ctxt is NULL:
70
+ raise MemoryError()
71
+
72
+ # Need a cast here because older libxml2 releases do not use 'const' in the functype.
73
+ xmlschema.xmlSchemaSetParserStructuredErrors(
74
+ parser_ctxt, <xmlerror.xmlStructuredErrorFunc> _receiveError, <void*>self._error_log)
75
+ if self._doc is not None:
76
+ # calling xmlSchemaParse on a schema with imports or
77
+ # includes will cause libxml2 to create an internal
78
+ # context for parsing, so push an implied context to route
79
+ # resolve requests to the document's parser
80
+ __GLOBAL_PARSER_CONTEXT.pushImpliedContextFromParser(self._doc._parser)
81
+ with nogil:
82
+ orig_loader = _register_document_loader()
83
+ self._c_schema = xmlschema.xmlSchemaParse(parser_ctxt)
84
+ _reset_document_loader(orig_loader)
85
+ if self._doc is not None:
86
+ __GLOBAL_PARSER_CONTEXT.popImpliedContext()
87
+ xmlschema.xmlSchemaFreeParserCtxt(parser_ctxt)
88
+
89
+ if self._c_schema is NULL:
90
+ raise XMLSchemaParseError(
91
+ self._error_log._buildExceptionMessage(
92
+ "Document is not valid XML Schema"),
93
+ self._error_log)
94
+
95
+ if self._doc is not None:
96
+ self._has_default_attributes = _check_for_default_attributes(self._doc)
97
+ self._add_attribute_defaults = attribute_defaults and self._has_default_attributes
98
+
99
+ def __dealloc__(self):
100
+ xmlschema.xmlSchemaFree(self._c_schema)
101
+
102
+ def __call__(self, etree):
103
+ """__call__(self, etree)
104
+
105
+ Validate doc using XML Schema.
106
+
107
+ Returns true if document is valid, false if not.
108
+ """
109
+ cdef xmlschema.xmlSchemaValidCtxt* valid_ctxt
110
+ cdef _Document doc
111
+ cdef _Element root_node
112
+ cdef xmlDoc* c_doc
113
+ cdef int ret
114
+
115
+ assert self._c_schema is not NULL, "Schema instance not initialised"
116
+ doc = _documentOrRaise(etree)
117
+ root_node = _rootNodeOrRaise(etree)
118
+
119
+ valid_ctxt = xmlschema.xmlSchemaNewValidCtxt(self._c_schema)
120
+ if valid_ctxt is NULL:
121
+ raise MemoryError()
122
+
123
+ try:
124
+ if self._add_attribute_defaults:
125
+ xmlschema.xmlSchemaSetValidOptions(
126
+ valid_ctxt, xmlschema.XML_SCHEMA_VAL_VC_I_CREATE)
127
+
128
+ self._error_log.clear()
129
+ # Need a cast here because older libxml2 releases do not use 'const' in the functype.
130
+ xmlschema.xmlSchemaSetValidStructuredErrors(
131
+ valid_ctxt, <xmlerror.xmlStructuredErrorFunc> _receiveError, <void*>self._error_log)
132
+
133
+ c_doc = _fakeRootDoc(doc._c_doc, root_node._c_node)
134
+ with nogil:
135
+ ret = xmlschema.xmlSchemaValidateDoc(valid_ctxt, c_doc)
136
+ _destroyFakeDoc(doc._c_doc, c_doc)
137
+ finally:
138
+ xmlschema.xmlSchemaFreeValidCtxt(valid_ctxt)
139
+
140
+ if ret == -1:
141
+ raise XMLSchemaValidateError(
142
+ "Internal error in XML Schema validation.",
143
+ self._error_log)
144
+ if ret == 0:
145
+ return True
146
+ else:
147
+ return False
148
+
149
+ cdef _ParserSchemaValidationContext _newSaxValidator(
150
+ self, bint add_default_attributes):
151
+ cdef _ParserSchemaValidationContext context
152
+ context = _ParserSchemaValidationContext.__new__(_ParserSchemaValidationContext)
153
+ context._schema = self
154
+ context._add_default_attributes = (self._has_default_attributes and (
155
+ add_default_attributes or self._add_attribute_defaults))
156
+ return context
157
+
158
+ @cython.final
159
+ @cython.internal
160
+ cdef class _ParserSchemaValidationContext:
161
+ cdef XMLSchema _schema
162
+ cdef xmlschema.xmlSchemaValidCtxt* _valid_ctxt
163
+ cdef xmlschema.xmlSchemaSAXPlugStruct* _sax_plug
164
+ cdef bint _add_default_attributes
165
+ def __cinit__(self):
166
+ self._valid_ctxt = NULL
167
+ self._sax_plug = NULL
168
+ self._add_default_attributes = False
169
+
170
+ def __dealloc__(self):
171
+ self.disconnect()
172
+ if self._valid_ctxt:
173
+ xmlschema.xmlSchemaFreeValidCtxt(self._valid_ctxt)
174
+
175
+ cdef _ParserSchemaValidationContext copy(self):
176
+ assert self._schema is not None, "_ParserSchemaValidationContext not initialised"
177
+ return self._schema._newSaxValidator(
178
+ self._add_default_attributes)
179
+
180
+ cdef void inject_default_attributes(self, xmlDoc* c_doc) noexcept:
181
+ # we currently need to insert default attributes manually
182
+ # after parsing, as libxml2 does not support this at parse
183
+ # time
184
+ if self._add_default_attributes:
185
+ with nogil:
186
+ xmlschema.xmlSchemaValidateDoc(self._valid_ctxt, c_doc)
187
+
188
+ cdef int connect(self, xmlparser.xmlParserCtxt* c_ctxt, _BaseErrorLog error_log) except -1:
189
+ if self._valid_ctxt is NULL:
190
+ self._valid_ctxt = xmlschema.xmlSchemaNewValidCtxt(
191
+ self._schema._c_schema)
192
+ if self._valid_ctxt is NULL:
193
+ raise MemoryError()
194
+ if self._add_default_attributes:
195
+ xmlschema.xmlSchemaSetValidOptions(
196
+ self._valid_ctxt, xmlschema.XML_SCHEMA_VAL_VC_I_CREATE)
197
+ if error_log is not None:
198
+ # Need a cast here because older libxml2 releases do not use 'const' in the functype.
199
+ xmlschema.xmlSchemaSetValidStructuredErrors(
200
+ self._valid_ctxt, <xmlerror.xmlStructuredErrorFunc> _receiveError, <void*>error_log)
201
+ self._sax_plug = xmlschema.xmlSchemaSAXPlug(
202
+ self._valid_ctxt, &c_ctxt.sax, &c_ctxt.userData)
203
+
204
+ cdef void disconnect(self) noexcept:
205
+ if self._sax_plug is not NULL:
206
+ xmlschema.xmlSchemaSAXUnplug(self._sax_plug)
207
+ self._sax_plug = NULL
208
+ if self._valid_ctxt is not NULL:
209
+ xmlschema.xmlSchemaSetValidStructuredErrors(
210
+ self._valid_ctxt, NULL, NULL)
211
+
212
+ cdef bint isvalid(self) noexcept:
213
+ if self._valid_ctxt is NULL:
214
+ return 1 # valid
215
+ return xmlschema.xmlSchemaIsValid(self._valid_ctxt)
llmeval-env/lib/python3.10/site-packages/lxml/xpath.pxi ADDED
@@ -0,0 +1,487 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # XPath evaluation
2
+
3
+ class XPathSyntaxError(LxmlSyntaxError, XPathError):
4
+ pass
5
+
6
+ ################################################################################
7
+ # XPath
8
+
9
+ cdef object _XPATH_SYNTAX_ERRORS = (
10
+ xmlerror.XML_XPATH_NUMBER_ERROR,
11
+ xmlerror.XML_XPATH_UNFINISHED_LITERAL_ERROR,
12
+ xmlerror.XML_XPATH_VARIABLE_REF_ERROR,
13
+ xmlerror.XML_XPATH_INVALID_PREDICATE_ERROR,
14
+ xmlerror.XML_XPATH_UNCLOSED_ERROR,
15
+ xmlerror.XML_XPATH_INVALID_CHAR_ERROR
16
+ )
17
+
18
+ cdef object _XPATH_EVAL_ERRORS = (
19
+ xmlerror.XML_XPATH_UNDEF_VARIABLE_ERROR,
20
+ xmlerror.XML_XPATH_UNDEF_PREFIX_ERROR,
21
+ xmlerror.XML_XPATH_UNKNOWN_FUNC_ERROR,
22
+ xmlerror.XML_XPATH_INVALID_OPERAND,
23
+ xmlerror.XML_XPATH_INVALID_TYPE,
24
+ xmlerror.XML_XPATH_INVALID_ARITY,
25
+ xmlerror.XML_XPATH_INVALID_CTXT_SIZE,
26
+ xmlerror.XML_XPATH_INVALID_CTXT_POSITION
27
+ )
28
+
29
+ cdef int _register_xpath_function(void* ctxt, name_utf, ns_utf) noexcept:
30
+ if ns_utf is None:
31
+ return xpath.xmlXPathRegisterFunc(
32
+ <xpath.xmlXPathContext*>ctxt, _xcstr(name_utf),
33
+ _xpath_function_call)
34
+ else:
35
+ return xpath.xmlXPathRegisterFuncNS(
36
+ <xpath.xmlXPathContext*>ctxt, _xcstr(name_utf), _xcstr(ns_utf),
37
+ _xpath_function_call)
38
+
39
+ cdef int _unregister_xpath_function(void* ctxt, name_utf, ns_utf) noexcept:
40
+ if ns_utf is None:
41
+ return xpath.xmlXPathRegisterFunc(
42
+ <xpath.xmlXPathContext*>ctxt, _xcstr(name_utf), NULL)
43
+ else:
44
+ return xpath.xmlXPathRegisterFuncNS(
45
+ <xpath.xmlXPathContext*>ctxt, _xcstr(name_utf), _xcstr(ns_utf), NULL)
46
+
47
+
48
+ @cython.final
49
+ @cython.internal
50
+ cdef class _XPathContext(_BaseContext):
51
+ cdef object _variables
52
+ def __init__(self, namespaces, extensions, error_log, enable_regexp, variables,
53
+ build_smart_strings):
54
+ self._variables = variables
55
+ _BaseContext.__init__(self, namespaces, extensions, error_log, enable_regexp,
56
+ build_smart_strings)
57
+
58
+ cdef set_context(self, xpath.xmlXPathContext* xpathCtxt):
59
+ self._set_xpath_context(xpathCtxt)
60
+ # This would be a good place to set up the XPath parser dict, but
61
+ # we cannot use the current thread dict as we do not know which
62
+ # thread will execute the XPath evaluator - so, no dict for now.
63
+ self.registerLocalNamespaces()
64
+ self.registerLocalFunctions(xpathCtxt, _register_xpath_function)
65
+
66
+ cdef register_context(self, _Document doc):
67
+ self._register_context(doc)
68
+ self.registerGlobalNamespaces()
69
+ self.registerGlobalFunctions(self._xpathCtxt, _register_xpath_function)
70
+ self.registerExsltFunctions()
71
+ if self._variables is not None:
72
+ self.registerVariables(self._variables)
73
+
74
+ cdef unregister_context(self):
75
+ self.unregisterGlobalFunctions(
76
+ self._xpathCtxt, _unregister_xpath_function)
77
+ self.unregisterGlobalNamespaces()
78
+ xpath.xmlXPathRegisteredVariablesCleanup(self._xpathCtxt)
79
+ self._cleanup_context()
80
+
81
+ cdef void registerExsltFunctions(self) noexcept:
82
+ if xslt.LIBXSLT_VERSION < 10125:
83
+ # we'd only execute dummy functions anyway
84
+ return
85
+ tree.xmlHashScan(
86
+ self._xpathCtxt.nsHash, _registerExsltFunctionsForNamespaces,
87
+ self._xpathCtxt)
88
+
89
+ cdef registerVariables(self, variable_dict):
90
+ for name, value in variable_dict.items():
91
+ name_utf = self._to_utf(name)
92
+ xpath.xmlXPathRegisterVariable(
93
+ self._xpathCtxt, _xcstr(name_utf), _wrapXPathObject(value, None, None))
94
+
95
+ cdef registerVariable(self, name, value):
96
+ name_utf = self._to_utf(name)
97
+ xpath.xmlXPathRegisterVariable(
98
+ self._xpathCtxt, _xcstr(name_utf), _wrapXPathObject(value, None, None))
99
+
100
+
101
+ cdef void _registerExsltFunctionsForNamespaces(
102
+ void* _c_href, void* _ctxt, const_xmlChar* c_prefix) noexcept:
103
+ c_href = <const_xmlChar*> _c_href
104
+ ctxt = <xpath.xmlXPathContext*> _ctxt
105
+
106
+ if tree.xmlStrcmp(c_href, xslt.EXSLT_DATE_NAMESPACE) == 0:
107
+ xslt.exsltDateXpathCtxtRegister(ctxt, c_prefix)
108
+ elif tree.xmlStrcmp(c_href, xslt.EXSLT_SETS_NAMESPACE) == 0:
109
+ xslt.exsltSetsXpathCtxtRegister(ctxt, c_prefix)
110
+ elif tree.xmlStrcmp(c_href, xslt.EXSLT_MATH_NAMESPACE) == 0:
111
+ xslt.exsltMathXpathCtxtRegister(ctxt, c_prefix)
112
+ elif tree.xmlStrcmp(c_href, xslt.EXSLT_STRINGS_NAMESPACE) == 0:
113
+ xslt.exsltStrXpathCtxtRegister(ctxt, c_prefix)
114
+
115
+
116
+ cdef class _XPathEvaluatorBase:
117
+ cdef xpath.xmlXPathContext* _xpathCtxt
118
+ cdef _XPathContext _context
119
+ cdef python.PyThread_type_lock _eval_lock
120
+ cdef _ErrorLog _error_log
121
+ def __cinit__(self):
122
+ self._xpathCtxt = NULL
123
+ if config.ENABLE_THREADING:
124
+ self._eval_lock = python.PyThread_allocate_lock()
125
+ if self._eval_lock is NULL:
126
+ raise MemoryError()
127
+ self._error_log = _ErrorLog()
128
+
129
+ def __init__(self, namespaces, extensions, enable_regexp,
130
+ smart_strings):
131
+ self._context = _XPathContext(namespaces, extensions, self._error_log,
132
+ enable_regexp, None, smart_strings)
133
+
134
+ @property
135
+ def error_log(self):
136
+ assert self._error_log is not None, "XPath evaluator not initialised"
137
+ return self._error_log.copy()
138
+
139
+ def __dealloc__(self):
140
+ if self._xpathCtxt is not NULL:
141
+ xpath.xmlXPathFreeContext(self._xpathCtxt)
142
+ if config.ENABLE_THREADING:
143
+ if self._eval_lock is not NULL:
144
+ python.PyThread_free_lock(self._eval_lock)
145
+
146
+ cdef set_context(self, xpath.xmlXPathContext* xpathCtxt):
147
+ self._xpathCtxt = xpathCtxt
148
+ self._context.set_context(xpathCtxt)
149
+
150
+ cdef bint _checkAbsolutePath(self, char* path) noexcept:
151
+ cdef char c
152
+ if path is NULL:
153
+ return 0
154
+ c = path[0]
155
+ while c == c' ' or c == c'\t':
156
+ path = path + 1
157
+ c = path[0]
158
+ return c == c'/'
159
+
160
+ @cython.final
161
+ cdef int _lock(self) except -1:
162
+ cdef int result
163
+ if config.ENABLE_THREADING and self._eval_lock != NULL:
164
+ with nogil:
165
+ result = python.PyThread_acquire_lock(
166
+ self._eval_lock, python.WAIT_LOCK)
167
+ if result == 0:
168
+ raise XPathError, "XPath evaluator locking failed"
169
+ return 0
170
+
171
+ @cython.final
172
+ cdef void _unlock(self) noexcept:
173
+ if config.ENABLE_THREADING and self._eval_lock != NULL:
174
+ python.PyThread_release_lock(self._eval_lock)
175
+
176
+ cdef _build_parse_error(self):
177
+ cdef _BaseErrorLog entries
178
+ entries = self._error_log.filter_types(_XPATH_SYNTAX_ERRORS)
179
+ if entries:
180
+ message = entries._buildExceptionMessage(None)
181
+ if message is not None:
182
+ return XPathSyntaxError(message, self._error_log)
183
+ return XPathSyntaxError(
184
+ self._error_log._buildExceptionMessage("Error in xpath expression"),
185
+ self._error_log)
186
+
187
+ cdef _build_eval_error(self):
188
+ cdef _BaseErrorLog entries
189
+ entries = self._error_log.filter_types(_XPATH_EVAL_ERRORS)
190
+ if not entries:
191
+ entries = self._error_log.filter_types(_XPATH_SYNTAX_ERRORS)
192
+ if entries:
193
+ message = entries._buildExceptionMessage(None)
194
+ if message is not None:
195
+ return XPathEvalError(message, self._error_log)
196
+ return XPathEvalError(
197
+ self._error_log._buildExceptionMessage("Error in xpath expression"),
198
+ self._error_log)
199
+
200
+ cdef object _handle_result(self, xpath.xmlXPathObject* xpathObj, _Document doc):
201
+ if self._context._exc._has_raised():
202
+ if xpathObj is not NULL:
203
+ _freeXPathObject(xpathObj)
204
+ xpathObj = NULL
205
+ self._context._release_temp_refs()
206
+ self._context._exc._raise_if_stored()
207
+
208
+ if xpathObj is NULL:
209
+ self._context._release_temp_refs()
210
+ raise self._build_eval_error()
211
+
212
+ try:
213
+ result = _unwrapXPathObject(xpathObj, doc, self._context)
214
+ finally:
215
+ _freeXPathObject(xpathObj)
216
+ self._context._release_temp_refs()
217
+
218
+ return result
219
+
220
+
221
+ cdef class XPathElementEvaluator(_XPathEvaluatorBase):
222
+ """XPathElementEvaluator(self, element, namespaces=None, extensions=None, regexp=True, smart_strings=True)
223
+ Create an XPath evaluator for an element.
224
+
225
+ Absolute XPath expressions (starting with '/') will be evaluated against
226
+ the ElementTree as returned by getroottree().
227
+
228
+ Additional namespace declarations can be passed with the
229
+ 'namespace' keyword argument. EXSLT regular expression support
230
+ can be disabled with the 'regexp' boolean keyword (defaults to
231
+ True). Smart strings will be returned for string results unless
232
+ you pass ``smart_strings=False``.
233
+ """
234
+ cdef _Element _element
235
+ def __init__(self, _Element element not None, *, namespaces=None,
236
+ extensions=None, regexp=True, smart_strings=True):
237
+ cdef xpath.xmlXPathContext* xpathCtxt
238
+ cdef int ns_register_status
239
+ cdef _Document doc
240
+ _assertValidNode(element)
241
+ _assertValidDoc(element._doc)
242
+ self._element = element
243
+ doc = element._doc
244
+ _XPathEvaluatorBase.__init__(self, namespaces, extensions,
245
+ regexp, smart_strings)
246
+ xpathCtxt = xpath.xmlXPathNewContext(doc._c_doc)
247
+ if xpathCtxt is NULL:
248
+ raise MemoryError()
249
+ self.set_context(xpathCtxt)
250
+
251
+ def register_namespace(self, prefix, uri):
252
+ """Register a namespace with the XPath context.
253
+ """
254
+ assert self._xpathCtxt is not NULL, "XPath context not initialised"
255
+ self._context.addNamespace(prefix, uri)
256
+
257
+ def register_namespaces(self, namespaces):
258
+ """Register a prefix -> uri dict.
259
+ """
260
+ assert self._xpathCtxt is not NULL, "XPath context not initialised"
261
+ for prefix, uri in namespaces.items():
262
+ self._context.addNamespace(prefix, uri)
263
+
264
+ def __call__(self, _path, **_variables):
265
+ """__call__(self, _path, **_variables)
266
+
267
+ Evaluate an XPath expression on the document.
268
+
269
+ Variables may be provided as keyword arguments. Note that namespaces
270
+ are currently not supported for variables.
271
+
272
+ Absolute XPath expressions (starting with '/') will be evaluated
273
+ against the ElementTree as returned by getroottree().
274
+ """
275
+ cdef xpath.xmlXPathObject* xpathObj
276
+ cdef _Document doc
277
+ assert self._xpathCtxt is not NULL, "XPath context not initialised"
278
+ path = _utf8(_path)
279
+ doc = self._element._doc
280
+
281
+ self._lock()
282
+ self._xpathCtxt.node = self._element._c_node
283
+ try:
284
+ self._context.register_context(doc)
285
+ self._context.registerVariables(_variables)
286
+ c_path = _xcstr(path)
287
+ with nogil:
288
+ xpathObj = xpath.xmlXPathEvalExpression(
289
+ c_path, self._xpathCtxt)
290
+ result = self._handle_result(xpathObj, doc)
291
+ finally:
292
+ self._context.unregister_context()
293
+ self._unlock()
294
+
295
+ return result
296
+
297
+
298
+ cdef class XPathDocumentEvaluator(XPathElementEvaluator):
299
+ """XPathDocumentEvaluator(self, etree, namespaces=None, extensions=None, regexp=True, smart_strings=True)
300
+ Create an XPath evaluator for an ElementTree.
301
+
302
+ Additional namespace declarations can be passed with the
303
+ 'namespace' keyword argument. EXSLT regular expression support
304
+ can be disabled with the 'regexp' boolean keyword (defaults to
305
+ True). Smart strings will be returned for string results unless
306
+ you pass ``smart_strings=False``.
307
+ """
308
+ def __init__(self, _ElementTree etree not None, *, namespaces=None,
309
+ extensions=None, regexp=True, smart_strings=True):
310
+ XPathElementEvaluator.__init__(
311
+ self, etree._context_node, namespaces=namespaces,
312
+ extensions=extensions, regexp=regexp,
313
+ smart_strings=smart_strings)
314
+
315
+ def __call__(self, _path, **_variables):
316
+ """__call__(self, _path, **_variables)
317
+
318
+ Evaluate an XPath expression on the document.
319
+
320
+ Variables may be provided as keyword arguments. Note that namespaces
321
+ are currently not supported for variables.
322
+ """
323
+ cdef xpath.xmlXPathObject* xpathObj
324
+ cdef xmlDoc* c_doc
325
+ cdef _Document doc
326
+ assert self._xpathCtxt is not NULL, "XPath context not initialised"
327
+ path = _utf8(_path)
328
+ doc = self._element._doc
329
+
330
+ self._lock()
331
+ try:
332
+ self._context.register_context(doc)
333
+ c_doc = _fakeRootDoc(doc._c_doc, self._element._c_node)
334
+ try:
335
+ self._context.registerVariables(_variables)
336
+ c_path = _xcstr(path)
337
+ with nogil:
338
+ self._xpathCtxt.doc = c_doc
339
+ self._xpathCtxt.node = tree.xmlDocGetRootElement(c_doc)
340
+ xpathObj = xpath.xmlXPathEvalExpression(
341
+ c_path, self._xpathCtxt)
342
+ result = self._handle_result(xpathObj, doc)
343
+ finally:
344
+ _destroyFakeDoc(doc._c_doc, c_doc)
345
+ self._context.unregister_context()
346
+ finally:
347
+ self._unlock()
348
+
349
+ return result
350
+
351
+
352
+ def XPathEvaluator(etree_or_element, *, namespaces=None, extensions=None,
353
+ regexp=True, smart_strings=True):
354
+ """XPathEvaluator(etree_or_element, namespaces=None, extensions=None, regexp=True, smart_strings=True)
355
+
356
+ Creates an XPath evaluator for an ElementTree or an Element.
357
+
358
+ The resulting object can be called with an XPath expression as argument
359
+ and XPath variables provided as keyword arguments.
360
+
361
+ Additional namespace declarations can be passed with the
362
+ 'namespace' keyword argument. EXSLT regular expression support
363
+ can be disabled with the 'regexp' boolean keyword (defaults to
364
+ True). Smart strings will be returned for string results unless
365
+ you pass ``smart_strings=False``.
366
+ """
367
+ if isinstance(etree_or_element, _ElementTree):
368
+ return XPathDocumentEvaluator(
369
+ etree_or_element, namespaces=namespaces,
370
+ extensions=extensions, regexp=regexp, smart_strings=smart_strings)
371
+ else:
372
+ return XPathElementEvaluator(
373
+ etree_or_element, namespaces=namespaces,
374
+ extensions=extensions, regexp=regexp, smart_strings=smart_strings)
375
+
376
+
377
+ cdef class XPath(_XPathEvaluatorBase):
378
+ """XPath(self, path, namespaces=None, extensions=None, regexp=True, smart_strings=True)
379
+ A compiled XPath expression that can be called on Elements and ElementTrees.
380
+
381
+ Besides the XPath expression, you can pass prefix-namespace
382
+ mappings and extension functions to the constructor through the
383
+ keyword arguments ``namespaces`` and ``extensions``. EXSLT
384
+ regular expression support can be disabled with the 'regexp'
385
+ boolean keyword (defaults to True). Smart strings will be
386
+ returned for string results unless you pass
387
+ ``smart_strings=False``.
388
+ """
389
+ cdef xpath.xmlXPathCompExpr* _xpath
390
+ cdef bytes _path
391
+ def __cinit__(self):
392
+ self._xpath = NULL
393
+
394
+ def __init__(self, path, *, namespaces=None, extensions=None,
395
+ regexp=True, smart_strings=True):
396
+ cdef xpath.xmlXPathContext* xpathCtxt
397
+ _XPathEvaluatorBase.__init__(self, namespaces, extensions,
398
+ regexp, smart_strings)
399
+ self._path = _utf8(path)
400
+ xpathCtxt = xpath.xmlXPathNewContext(NULL)
401
+ if xpathCtxt is NULL:
402
+ raise MemoryError()
403
+ self.set_context(xpathCtxt)
404
+ self._xpath = xpath.xmlXPathCtxtCompile(xpathCtxt, _xcstr(self._path))
405
+ if self._xpath is NULL:
406
+ raise self._build_parse_error()
407
+
408
+ def __call__(self, _etree_or_element, **_variables):
409
+ "__call__(self, _etree_or_element, **_variables)"
410
+ cdef xpath.xmlXPathObject* xpathObj
411
+ cdef _Document document
412
+ cdef _Element element
413
+
414
+ assert self._xpathCtxt is not NULL, "XPath context not initialised"
415
+ document = _documentOrRaise(_etree_or_element)
416
+ element = _rootNodeOrRaise(_etree_or_element)
417
+
418
+ self._lock()
419
+ self._xpathCtxt.doc = document._c_doc
420
+ self._xpathCtxt.node = element._c_node
421
+
422
+ try:
423
+ self._context.register_context(document)
424
+ self._context.registerVariables(_variables)
425
+ with nogil:
426
+ xpathObj = xpath.xmlXPathCompiledEval(
427
+ self._xpath, self._xpathCtxt)
428
+ result = self._handle_result(xpathObj, document)
429
+ finally:
430
+ self._context.unregister_context()
431
+ self._unlock()
432
+ return result
433
+
434
+ @property
435
+ def path(self):
436
+ """The literal XPath expression.
437
+ """
438
+ return self._path.decode('UTF-8')
439
+
440
+ def __dealloc__(self):
441
+ if self._xpath is not NULL:
442
+ xpath.xmlXPathFreeCompExpr(self._xpath)
443
+
444
+ def __repr__(self):
445
+ return self.path
446
+
447
+
448
+ cdef object _replace_strings = re.compile(b'("[^"]*")|(\'[^\']*\')').sub
449
+ cdef object _find_namespaces = re.compile(b'({[^}]+})').findall
450
+
451
+ cdef class ETXPath(XPath):
452
+ """ETXPath(self, path, extensions=None, regexp=True, smart_strings=True)
453
+ Special XPath class that supports the ElementTree {uri} notation for namespaces.
454
+
455
+ Note that this class does not accept the ``namespace`` keyword
456
+ argument. All namespaces must be passed as part of the path
457
+ string. Smart strings will be returned for string results unless
458
+ you pass ``smart_strings=False``.
459
+ """
460
+ def __init__(self, path, *, extensions=None, regexp=True,
461
+ smart_strings=True):
462
+ path, namespaces = self._nsextract_path(path)
463
+ XPath.__init__(self, path, namespaces=namespaces,
464
+ extensions=extensions, regexp=regexp,
465
+ smart_strings=smart_strings)
466
+
467
+ cdef _nsextract_path(self, path):
468
+ # replace {namespaces} by new prefixes
469
+ cdef dict namespaces = {}
470
+ cdef list namespace_defs = []
471
+ cdef int i
472
+ path_utf = _utf8(path)
473
+ stripped_path = _replace_strings(b'', path_utf) # remove string literals
474
+ i = 1
475
+ for namespace_def in _find_namespaces(stripped_path):
476
+ if namespace_def not in namespace_defs:
477
+ prefix = python.PyBytes_FromFormat("__xpp%02d", i)
478
+ i += 1
479
+ namespace_defs.append(namespace_def)
480
+ namespace = namespace_def[1:-1] # remove '{}'
481
+ namespace = (<bytes>namespace).decode('utf8')
482
+ namespaces[prefix.decode('utf8')] = namespace
483
+ prefix_str = prefix + b':'
484
+ # FIXME: this also replaces {namespaces} within strings!
485
+ path_utf = path_utf.replace(namespace_def, prefix_str)
486
+ path = path_utf.decode('utf8')
487
+ return path, namespaces
llmeval-env/lib/python3.10/site-packages/lxml/xslt.pxi ADDED
@@ -0,0 +1,950 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # XSLT
2
+ from lxml.includes cimport xslt
3
+
4
+
5
+ cdef class XSLTError(LxmlError):
6
+ """Base class of all XSLT errors.
7
+ """
8
+
9
+ cdef class XSLTParseError(XSLTError):
10
+ """Error parsing a stylesheet document.
11
+ """
12
+
13
+ cdef class XSLTApplyError(XSLTError):
14
+ """Error running an XSL transformation.
15
+ """
16
+
17
+ class XSLTSaveError(XSLTError, SerialisationError):
18
+ """Error serialising an XSLT result.
19
+ """
20
+
21
+ cdef class XSLTExtensionError(XSLTError):
22
+ """Error registering an XSLT extension.
23
+ """
24
+
25
+
26
+ # version information
27
+ LIBXSLT_COMPILED_VERSION = __unpackIntVersion(xslt.LIBXSLT_VERSION)
28
+ LIBXSLT_VERSION = __unpackIntVersion(xslt.xsltLibxsltVersion)
29
+
30
+
31
+ ################################################################################
32
+ # Where do we store what?
33
+ #
34
+ # xsltStylesheet->doc->_private
35
+ # == _XSLTResolverContext for XSL stylesheet
36
+ #
37
+ # xsltTransformContext->_private
38
+ # == _XSLTResolverContext for transformed document
39
+ #
40
+ ################################################################################
41
+
42
+
43
+ ################################################################################
44
+ # XSLT document loaders
45
+
46
+ @cython.final
47
+ @cython.internal
48
+ cdef class _XSLTResolverContext(_ResolverContext):
49
+ cdef xmlDoc* _c_style_doc
50
+ cdef _BaseParser _parser
51
+
52
+ cdef _XSLTResolverContext _copy(self):
53
+ cdef _XSLTResolverContext context
54
+ context = _XSLTResolverContext()
55
+ _initXSLTResolverContext(context, self._parser)
56
+ context._c_style_doc = self._c_style_doc
57
+ return context
58
+
59
+ cdef _initXSLTResolverContext(_XSLTResolverContext context,
60
+ _BaseParser parser):
61
+ _initResolverContext(context, parser.resolvers)
62
+ context._parser = parser
63
+ context._c_style_doc = NULL
64
+
65
+ cdef xmlDoc* _xslt_resolve_from_python(const_xmlChar* c_uri, void* c_context,
66
+ int parse_options, int* error) with gil:
67
+ # call the Python document loaders
68
+ cdef _XSLTResolverContext context
69
+ cdef _ResolverRegistry resolvers
70
+ cdef _InputDocument doc_ref
71
+ cdef xmlDoc* c_doc
72
+ cdef xmlDoc* c_return_doc = NULL
73
+
74
+ error[0] = 0
75
+ context = <_XSLTResolverContext>c_context
76
+
77
+ # shortcut if we resolve the stylesheet itself
78
+ c_doc = context._c_style_doc
79
+ try:
80
+ if c_doc is not NULL and c_doc.URL is not NULL:
81
+ if tree.xmlStrcmp(c_uri, c_doc.URL) == 0:
82
+ c_return_doc = _copyDoc(c_doc, 1)
83
+ return c_return_doc # 'goto', see 'finally' below
84
+
85
+ # delegate to the Python resolvers
86
+ resolvers = context._resolvers
87
+ if tree.xmlStrncmp(<unsigned char*>'string://__STRING__XSLT__/', c_uri, 26) == 0:
88
+ c_uri += 26
89
+ uri = _decodeFilename(c_uri)
90
+ doc_ref = resolvers.resolve(uri, None, context)
91
+
92
+ if doc_ref is not None:
93
+ if doc_ref._type == PARSER_DATA_STRING:
94
+ c_return_doc = _parseDoc(
95
+ doc_ref._data_bytes, doc_ref._filename, context._parser)
96
+ elif doc_ref._type == PARSER_DATA_FILENAME:
97
+ c_return_doc = _parseDocFromFile(
98
+ doc_ref._filename, context._parser)
99
+ elif doc_ref._type == PARSER_DATA_FILE:
100
+ c_return_doc = _parseDocFromFilelike(
101
+ doc_ref._file, doc_ref._filename, context._parser)
102
+ elif doc_ref._type == PARSER_DATA_EMPTY:
103
+ c_return_doc = _newXMLDoc()
104
+ if c_return_doc is not NULL and c_return_doc.URL is NULL:
105
+ c_return_doc.URL = tree.xmlStrdup(c_uri)
106
+ except:
107
+ error[0] = 1
108
+ context._store_raised()
109
+ finally:
110
+ return c_return_doc # and swallow any further exceptions
111
+
112
+
113
+ cdef void _xslt_store_resolver_exception(const_xmlChar* c_uri, void* context,
114
+ xslt.xsltLoadType c_type) noexcept with gil:
115
+ try:
116
+ message = f"Cannot resolve URI {_decodeFilename(c_uri)}"
117
+ if c_type == xslt.XSLT_LOAD_DOCUMENT:
118
+ exception = XSLTApplyError(message)
119
+ else:
120
+ exception = XSLTParseError(message)
121
+ (<_XSLTResolverContext>context)._store_exception(exception)
122
+ except BaseException as e:
123
+ (<_XSLTResolverContext>context)._store_exception(e)
124
+ finally:
125
+ return # and swallow any further exceptions
126
+
127
+
128
+ cdef xmlDoc* _xslt_doc_loader(const_xmlChar* c_uri, tree.xmlDict* c_dict,
129
+ int parse_options, void* c_ctxt,
130
+ xslt.xsltLoadType c_type) noexcept nogil:
131
+ # nogil => no Python objects here, may be called without thread context !
132
+ cdef xmlDoc* c_doc
133
+ cdef xmlDoc* result
134
+ cdef void* c_pcontext
135
+ cdef int error = 0
136
+ # find resolver contexts of stylesheet and transformed doc
137
+ if c_type == xslt.XSLT_LOAD_DOCUMENT:
138
+ # transformation time
139
+ c_pcontext = (<xslt.xsltTransformContext*>c_ctxt)._private
140
+ elif c_type == xslt.XSLT_LOAD_STYLESHEET:
141
+ # include/import resolution while parsing
142
+ c_pcontext = (<xslt.xsltStylesheet*>c_ctxt).doc._private
143
+ else:
144
+ c_pcontext = NULL
145
+
146
+ if c_pcontext is NULL:
147
+ # can't call Python without context, fall back to default loader
148
+ return XSLT_DOC_DEFAULT_LOADER(
149
+ c_uri, c_dict, parse_options, c_ctxt, c_type)
150
+
151
+ c_doc = _xslt_resolve_from_python(c_uri, c_pcontext, parse_options, &error)
152
+ if c_doc is NULL and not error:
153
+ c_doc = XSLT_DOC_DEFAULT_LOADER(
154
+ c_uri, c_dict, parse_options, c_ctxt, c_type)
155
+ if c_doc is NULL:
156
+ _xslt_store_resolver_exception(c_uri, c_pcontext, c_type)
157
+
158
+ if c_doc is not NULL and c_type == xslt.XSLT_LOAD_STYLESHEET:
159
+ c_doc._private = c_pcontext
160
+ return c_doc
161
+
162
+ cdef xslt.xsltDocLoaderFunc XSLT_DOC_DEFAULT_LOADER = xslt.xsltDocDefaultLoader
163
+ xslt.xsltSetLoaderFunc(<xslt.xsltDocLoaderFunc>_xslt_doc_loader)
164
+
165
+ ################################################################################
166
+ # XSLT file/network access control
167
+
168
+ cdef class XSLTAccessControl:
169
+ """XSLTAccessControl(self, read_file=True, write_file=True, create_dir=True, read_network=True, write_network=True)
170
+
171
+ Access control for XSLT: reading/writing files, directories and
172
+ network I/O. Access to a type of resource is granted or denied by
173
+ passing any of the following boolean keyword arguments. All of
174
+ them default to True to allow access.
175
+
176
+ - read_file
177
+ - write_file
178
+ - create_dir
179
+ - read_network
180
+ - write_network
181
+
182
+ For convenience, there is also a class member `DENY_ALL` that
183
+ provides an XSLTAccessControl instance that is readily configured
184
+ to deny everything, and a `DENY_WRITE` member that denies all
185
+ write access but allows read access.
186
+
187
+ See `XSLT`.
188
+ """
189
+ cdef xslt.xsltSecurityPrefs* _prefs
190
+ def __cinit__(self):
191
+ self._prefs = xslt.xsltNewSecurityPrefs()
192
+ if self._prefs is NULL:
193
+ raise MemoryError()
194
+
195
+ def __init__(self, *, bint read_file=True, bint write_file=True, bint create_dir=True,
196
+ bint read_network=True, bint write_network=True):
197
+ self._setAccess(xslt.XSLT_SECPREF_READ_FILE, read_file)
198
+ self._setAccess(xslt.XSLT_SECPREF_WRITE_FILE, write_file)
199
+ self._setAccess(xslt.XSLT_SECPREF_CREATE_DIRECTORY, create_dir)
200
+ self._setAccess(xslt.XSLT_SECPREF_READ_NETWORK, read_network)
201
+ self._setAccess(xslt.XSLT_SECPREF_WRITE_NETWORK, write_network)
202
+
203
+ DENY_ALL = XSLTAccessControl(
204
+ read_file=False, write_file=False, create_dir=False,
205
+ read_network=False, write_network=False)
206
+
207
+ DENY_WRITE = XSLTAccessControl(
208
+ read_file=True, write_file=False, create_dir=False,
209
+ read_network=True, write_network=False)
210
+
211
+ def __dealloc__(self):
212
+ if self._prefs is not NULL:
213
+ xslt.xsltFreeSecurityPrefs(self._prefs)
214
+
215
+ @cython.final
216
+ cdef _setAccess(self, xslt.xsltSecurityOption option, bint allow):
217
+ cdef xslt.xsltSecurityCheck function
218
+ if allow:
219
+ function = xslt.xsltSecurityAllow
220
+ else:
221
+ function = xslt.xsltSecurityForbid
222
+ xslt.xsltSetSecurityPrefs(self._prefs, option, function)
223
+
224
+ @cython.final
225
+ cdef void _register_in_context(self, xslt.xsltTransformContext* ctxt) noexcept:
226
+ xslt.xsltSetCtxtSecurityPrefs(self._prefs, ctxt)
227
+
228
+ @property
229
+ def options(self):
230
+ """The access control configuration as a map of options."""
231
+ return {
232
+ 'read_file': self._optval(xslt.XSLT_SECPREF_READ_FILE),
233
+ 'write_file': self._optval(xslt.XSLT_SECPREF_WRITE_FILE),
234
+ 'create_dir': self._optval(xslt.XSLT_SECPREF_CREATE_DIRECTORY),
235
+ 'read_network': self._optval(xslt.XSLT_SECPREF_READ_NETWORK),
236
+ 'write_network': self._optval(xslt.XSLT_SECPREF_WRITE_NETWORK),
237
+ }
238
+
239
+ @cython.final
240
+ cdef _optval(self, xslt.xsltSecurityOption option):
241
+ cdef xslt.xsltSecurityCheck function
242
+ function = xslt.xsltGetSecurityPrefs(self._prefs, option)
243
+ if function is <xslt.xsltSecurityCheck>xslt.xsltSecurityAllow:
244
+ return True
245
+ elif function is <xslt.xsltSecurityCheck>xslt.xsltSecurityForbid:
246
+ return False
247
+ else:
248
+ return None
249
+
250
+ def __repr__(self):
251
+ items = sorted(self.options.items())
252
+ return "%s(%s)" % (
253
+ python._fqtypename(self).decode('UTF-8').split('.')[-1],
254
+ ', '.join(["%s=%r" % item for item in items]))
255
+
256
+ ################################################################################
257
+ # XSLT
258
+
259
+ cdef int _register_xslt_function(void* ctxt, name_utf, ns_utf) noexcept:
260
+ if ns_utf is None:
261
+ return 0
262
+ # libxml2 internalises the strings if ctxt has a dict
263
+ return xslt.xsltRegisterExtFunction(
264
+ <xslt.xsltTransformContext*>ctxt, _xcstr(name_utf), _xcstr(ns_utf),
265
+ <xslt.xmlXPathFunction>_xpath_function_call)
266
+
267
+ cdef dict EMPTY_DICT = {}
268
+
269
+ @cython.final
270
+ @cython.internal
271
+ cdef class _XSLTContext(_BaseContext):
272
+ cdef xslt.xsltTransformContext* _xsltCtxt
273
+ cdef _ReadOnlyElementProxy _extension_element_proxy
274
+ cdef dict _extension_elements
275
+ def __cinit__(self):
276
+ self._xsltCtxt = NULL
277
+ self._extension_elements = EMPTY_DICT
278
+
279
+ def __init__(self, namespaces, extensions, error_log, enable_regexp,
280
+ build_smart_strings):
281
+ if extensions is not None and extensions:
282
+ for ns_name_tuple, extension in extensions.items():
283
+ if ns_name_tuple[0] is None:
284
+ raise XSLTExtensionError, \
285
+ "extensions must not have empty namespaces"
286
+ if isinstance(extension, XSLTExtension):
287
+ if self._extension_elements is EMPTY_DICT:
288
+ self._extension_elements = {}
289
+ extensions = extensions.copy()
290
+ ns_utf = _utf8(ns_name_tuple[0])
291
+ name_utf = _utf8(ns_name_tuple[1])
292
+ self._extension_elements[(ns_utf, name_utf)] = extension
293
+ del extensions[ns_name_tuple]
294
+ _BaseContext.__init__(self, namespaces, extensions, error_log, enable_regexp,
295
+ build_smart_strings)
296
+
297
+ cdef _BaseContext _copy(self):
298
+ cdef _XSLTContext context
299
+ context = <_XSLTContext>_BaseContext._copy(self)
300
+ context._extension_elements = self._extension_elements
301
+ return context
302
+
303
+ cdef register_context(self, xslt.xsltTransformContext* xsltCtxt,
304
+ _Document doc):
305
+ self._xsltCtxt = xsltCtxt
306
+ self._set_xpath_context(xsltCtxt.xpathCtxt)
307
+ self._register_context(doc)
308
+ self.registerLocalFunctions(xsltCtxt, _register_xslt_function)
309
+ self.registerGlobalFunctions(xsltCtxt, _register_xslt_function)
310
+ _registerXSLTExtensions(xsltCtxt, self._extension_elements)
311
+
312
+ cdef free_context(self):
313
+ self._cleanup_context()
314
+ self._release_context()
315
+ if self._xsltCtxt is not NULL:
316
+ xslt.xsltFreeTransformContext(self._xsltCtxt)
317
+ self._xsltCtxt = NULL
318
+ self._release_temp_refs()
319
+
320
+
321
+ @cython.final
322
+ @cython.internal
323
+ @cython.freelist(8)
324
+ cdef class _XSLTQuotedStringParam:
325
+ """A wrapper class for literal XSLT string parameters that require
326
+ quote escaping.
327
+ """
328
+ cdef bytes strval
329
+ def __cinit__(self, strval):
330
+ self.strval = _utf8(strval)
331
+
332
+
333
+ @cython.no_gc_clear
334
+ cdef class XSLT:
335
+ """XSLT(self, xslt_input, extensions=None, regexp=True, access_control=None)
336
+
337
+ Turn an XSL document into an XSLT object.
338
+
339
+ Calling this object on a tree or Element will execute the XSLT::
340
+
341
+ transform = etree.XSLT(xsl_tree)
342
+ result = transform(xml_tree)
343
+
344
+ Keyword arguments of the constructor:
345
+
346
+ - extensions: a dict mapping ``(namespace, name)`` pairs to
347
+ extension functions or extension elements
348
+ - regexp: enable exslt regular expression support in XPath
349
+ (default: True)
350
+ - access_control: access restrictions for network or file
351
+ system (see `XSLTAccessControl`)
352
+
353
+ Keyword arguments of the XSLT call:
354
+
355
+ - profile_run: enable XSLT profiling and make the profile available
356
+ as XML document in ``result.xslt_profile`` (default: False)
357
+
358
+ Other keyword arguments of the call are passed to the stylesheet
359
+ as parameters.
360
+ """
361
+ cdef _XSLTContext _context
362
+ cdef xslt.xsltStylesheet* _c_style
363
+ cdef _XSLTResolverContext _xslt_resolver_context
364
+ cdef XSLTAccessControl _access_control
365
+ cdef _ErrorLog _error_log
366
+
367
+ def __cinit__(self):
368
+ self._c_style = NULL
369
+
370
+ def __init__(self, xslt_input, *, extensions=None, regexp=True,
371
+ access_control=None):
372
+ cdef xslt.xsltStylesheet* c_style = NULL
373
+ cdef xmlDoc* c_doc
374
+ cdef _Document doc
375
+ cdef _Element root_node
376
+
377
+ doc = _documentOrRaise(xslt_input)
378
+ root_node = _rootNodeOrRaise(xslt_input)
379
+
380
+ # set access control or raise TypeError
381
+ self._access_control = access_control
382
+
383
+ # make a copy of the document as stylesheet parsing modifies it
384
+ c_doc = _copyDocRoot(doc._c_doc, root_node._c_node)
385
+
386
+ # make sure we always have a stylesheet URL
387
+ if c_doc.URL is NULL:
388
+ doc_url_utf = python.PyUnicode_AsASCIIString(
389
+ f"string://__STRING__XSLT__/{id(self)}.xslt")
390
+ c_doc.URL = tree.xmlStrdup(_xcstr(doc_url_utf))
391
+
392
+ self._error_log = _ErrorLog()
393
+ self._xslt_resolver_context = _XSLTResolverContext()
394
+ _initXSLTResolverContext(self._xslt_resolver_context, doc._parser)
395
+ # keep a copy in case we need to access the stylesheet via 'document()'
396
+ self._xslt_resolver_context._c_style_doc = _copyDoc(c_doc, 1)
397
+ c_doc._private = <python.PyObject*>self._xslt_resolver_context
398
+
399
+ with self._error_log:
400
+ orig_loader = _register_document_loader()
401
+ c_style = xslt.xsltParseStylesheetDoc(c_doc)
402
+ _reset_document_loader(orig_loader)
403
+
404
+ if c_style is NULL or c_style.errors:
405
+ tree.xmlFreeDoc(c_doc)
406
+ if c_style is not NULL:
407
+ xslt.xsltFreeStylesheet(c_style)
408
+ self._xslt_resolver_context._raise_if_stored()
409
+ # last error seems to be the most accurate here
410
+ if self._error_log.last_error is not None and \
411
+ self._error_log.last_error.message:
412
+ raise XSLTParseError(self._error_log.last_error.message,
413
+ self._error_log)
414
+ else:
415
+ raise XSLTParseError(
416
+ self._error_log._buildExceptionMessage(
417
+ "Cannot parse stylesheet"),
418
+ self._error_log)
419
+
420
+ c_doc._private = NULL # no longer used!
421
+ self._c_style = c_style
422
+ self._context = _XSLTContext(None, extensions, self._error_log, regexp, True)
423
+
424
+ def __dealloc__(self):
425
+ if self._xslt_resolver_context is not None and \
426
+ self._xslt_resolver_context._c_style_doc is not NULL:
427
+ tree.xmlFreeDoc(self._xslt_resolver_context._c_style_doc)
428
+ # this cleans up the doc copy as well
429
+ if self._c_style is not NULL:
430
+ xslt.xsltFreeStylesheet(self._c_style)
431
+
432
+ @property
433
+ def error_log(self):
434
+ """The log of errors and warnings of an XSLT execution."""
435
+ return self._error_log.copy()
436
+
437
+ @staticmethod
438
+ def strparam(strval):
439
+ """strparam(strval)
440
+
441
+ Mark an XSLT string parameter that requires quote escaping
442
+ before passing it into the transformation. Use it like this::
443
+
444
+ result = transform(doc, some_strval = XSLT.strparam(
445
+ '''it's \"Monty Python's\" ...'''))
446
+
447
+ Escaped string parameters can be reused without restriction.
448
+ """
449
+ return _XSLTQuotedStringParam(strval)
450
+
451
+ @staticmethod
452
+ def set_global_max_depth(int max_depth):
453
+ """set_global_max_depth(max_depth)
454
+
455
+ The maximum traversal depth that the stylesheet engine will allow.
456
+ This does not only count the template recursion depth but also takes
457
+ the number of variables/parameters into account. The required setting
458
+ for a run depends on both the stylesheet and the input data.
459
+
460
+ Example::
461
+
462
+ XSLT.set_global_max_depth(5000)
463
+
464
+ Note that this is currently a global, module-wide setting because
465
+ libxslt does not support it at a per-stylesheet level.
466
+ """
467
+ if max_depth < 0:
468
+ raise ValueError("cannot set a maximum stylesheet traversal depth < 0")
469
+ xslt.xsltMaxDepth = max_depth
470
+
471
+ def tostring(self, _ElementTree result_tree):
472
+ """tostring(self, result_tree)
473
+
474
+ Save result doc to string based on stylesheet output method.
475
+
476
+ :deprecated: use str(result_tree) instead.
477
+ """
478
+ return str(result_tree)
479
+
480
+ def __deepcopy__(self, memo):
481
+ return self.__copy__()
482
+
483
+ def __copy__(self):
484
+ return _copyXSLT(self)
485
+
486
+ def __call__(self, _input, *, profile_run=False, **kw):
487
+ """__call__(self, _input, profile_run=False, **kw)
488
+
489
+ Execute the XSL transformation on a tree or Element.
490
+
491
+ Pass the ``profile_run`` option to get profile information
492
+ about the XSLT. The result of the XSLT will have a property
493
+ xslt_profile that holds an XML tree with profiling data.
494
+ """
495
+ cdef _XSLTContext context = None
496
+ cdef _XSLTResolverContext resolver_context
497
+ cdef _Document input_doc
498
+ cdef _Element root_node
499
+ cdef _Document result_doc
500
+ cdef _Document profile_doc = None
501
+ cdef xmlDoc* c_profile_doc
502
+ cdef xslt.xsltTransformContext* transform_ctxt
503
+ cdef xmlDoc* c_result = NULL
504
+ cdef xmlDoc* c_doc
505
+ cdef tree.xmlDict* c_dict
506
+ cdef const_char** params = NULL
507
+
508
+ assert self._c_style is not NULL, "XSLT stylesheet not initialised"
509
+ input_doc = _documentOrRaise(_input)
510
+ root_node = _rootNodeOrRaise(_input)
511
+
512
+ c_doc = _fakeRootDoc(input_doc._c_doc, root_node._c_node)
513
+
514
+ transform_ctxt = xslt.xsltNewTransformContext(self._c_style, c_doc)
515
+ if transform_ctxt is NULL:
516
+ _destroyFakeDoc(input_doc._c_doc, c_doc)
517
+ raise MemoryError()
518
+
519
+ # using the stylesheet dict is safer than using a possibly
520
+ # unrelated dict from the current thread. Almost all
521
+ # non-input tag/attr names will come from the stylesheet
522
+ # anyway.
523
+ if transform_ctxt.dict is not NULL:
524
+ xmlparser.xmlDictFree(transform_ctxt.dict)
525
+ if kw:
526
+ # parameter values are stored in the dict
527
+ # => avoid unnecessarily cluttering the global dict
528
+ transform_ctxt.dict = xmlparser.xmlDictCreateSub(self._c_style.doc.dict)
529
+ if transform_ctxt.dict is NULL:
530
+ xslt.xsltFreeTransformContext(transform_ctxt)
531
+ raise MemoryError()
532
+ else:
533
+ transform_ctxt.dict = self._c_style.doc.dict
534
+ xmlparser.xmlDictReference(transform_ctxt.dict)
535
+
536
+ xslt.xsltSetCtxtParseOptions(
537
+ transform_ctxt, input_doc._parser._parse_options)
538
+
539
+ if profile_run:
540
+ transform_ctxt.profile = 1
541
+
542
+ try:
543
+ context = self._context._copy()
544
+ context.register_context(transform_ctxt, input_doc)
545
+
546
+ resolver_context = self._xslt_resolver_context._copy()
547
+ transform_ctxt._private = <python.PyObject*>resolver_context
548
+
549
+ _convert_xslt_parameters(transform_ctxt, kw, &params)
550
+ c_result = self._run_transform(
551
+ c_doc, params, context, transform_ctxt)
552
+ if params is not NULL:
553
+ # deallocate space for parameters
554
+ python.lxml_free(params)
555
+
556
+ if transform_ctxt.state != xslt.XSLT_STATE_OK:
557
+ if c_result is not NULL:
558
+ tree.xmlFreeDoc(c_result)
559
+ c_result = NULL
560
+
561
+ if transform_ctxt.profile:
562
+ c_profile_doc = xslt.xsltGetProfileInformation(transform_ctxt)
563
+ if c_profile_doc is not NULL:
564
+ profile_doc = _documentFactory(
565
+ c_profile_doc, input_doc._parser)
566
+ finally:
567
+ if context is not None:
568
+ context.free_context()
569
+ _destroyFakeDoc(input_doc._c_doc, c_doc)
570
+
571
+ try:
572
+ if resolver_context is not None and resolver_context._has_raised():
573
+ if c_result is not NULL:
574
+ tree.xmlFreeDoc(c_result)
575
+ c_result = NULL
576
+ resolver_context._raise_if_stored()
577
+
578
+ if context._exc._has_raised():
579
+ if c_result is not NULL:
580
+ tree.xmlFreeDoc(c_result)
581
+ c_result = NULL
582
+ context._exc._raise_if_stored()
583
+
584
+ if c_result is NULL:
585
+ # last error seems to be the most accurate here
586
+ error = self._error_log.last_error
587
+ if error is not None and error.message:
588
+ if error.line > 0:
589
+ message = f"{error.message}, line {error.line}"
590
+ else:
591
+ message = error.message
592
+ elif error is not None and error.line > 0:
593
+ message = f"Error applying stylesheet, line {error.line}"
594
+ else:
595
+ message = "Error applying stylesheet"
596
+ raise XSLTApplyError(message, self._error_log)
597
+ finally:
598
+ if resolver_context is not None:
599
+ resolver_context.clear()
600
+
601
+ result_doc = _documentFactory(c_result, input_doc._parser)
602
+
603
+ c_dict = c_result.dict
604
+ xmlparser.xmlDictReference(c_dict)
605
+ __GLOBAL_PARSER_CONTEXT.initThreadDictRef(&c_result.dict)
606
+ if c_dict is not c_result.dict or \
607
+ self._c_style.doc.dict is not c_result.dict or \
608
+ input_doc._c_doc.dict is not c_result.dict:
609
+ with nogil:
610
+ if c_dict is not c_result.dict:
611
+ fixThreadDictNames(<xmlNode*>c_result,
612
+ c_dict, c_result.dict)
613
+ if self._c_style.doc.dict is not c_result.dict:
614
+ fixThreadDictNames(<xmlNode*>c_result,
615
+ self._c_style.doc.dict, c_result.dict)
616
+ if input_doc._c_doc.dict is not c_result.dict:
617
+ fixThreadDictNames(<xmlNode*>c_result,
618
+ input_doc._c_doc.dict, c_result.dict)
619
+ xmlparser.xmlDictFree(c_dict)
620
+
621
+ return _xsltResultTreeFactory(result_doc, self, profile_doc)
622
+
623
+ cdef xmlDoc* _run_transform(self, xmlDoc* c_input_doc,
624
+ const_char** params, _XSLTContext context,
625
+ xslt.xsltTransformContext* transform_ctxt):
626
+ cdef xmlDoc* c_result
627
+ xslt.xsltSetTransformErrorFunc(transform_ctxt, <void*>self._error_log,
628
+ <xmlerror.xmlGenericErrorFunc>_receiveXSLTError)
629
+ if self._access_control is not None:
630
+ self._access_control._register_in_context(transform_ctxt)
631
+ with self._error_log, nogil:
632
+ orig_loader = _register_document_loader()
633
+ c_result = xslt.xsltApplyStylesheetUser(
634
+ self._c_style, c_input_doc, params, NULL, NULL, transform_ctxt)
635
+ _reset_document_loader(orig_loader)
636
+ return c_result
637
+
638
+
639
+ cdef _convert_xslt_parameters(xslt.xsltTransformContext* transform_ctxt,
640
+ dict parameters, const_char*** params_ptr):
641
+ cdef Py_ssize_t i, parameter_count
642
+ cdef const_char** params
643
+ cdef tree.xmlDict* c_dict = transform_ctxt.dict
644
+ params_ptr[0] = NULL
645
+ parameter_count = len(parameters)
646
+ if parameter_count == 0:
647
+ return
648
+ # allocate space for parameters
649
+ # * 2 as we want an entry for both key and value,
650
+ # and + 1 as array is NULL terminated
651
+ params = <const_char**>python.lxml_malloc(parameter_count * 2 + 1, sizeof(const_char*))
652
+ if not params:
653
+ raise MemoryError()
654
+ try:
655
+ i = 0
656
+ for key, value in parameters.iteritems():
657
+ k = _utf8(key)
658
+ if isinstance(value, _XSLTQuotedStringParam):
659
+ v = (<_XSLTQuotedStringParam>value).strval
660
+ xslt.xsltQuoteOneUserParam(
661
+ transform_ctxt, _xcstr(k), _xcstr(v))
662
+ else:
663
+ if isinstance(value, XPath):
664
+ v = (<XPath>value)._path
665
+ else:
666
+ v = _utf8(value)
667
+ params[i] = <const_char*>tree.xmlDictLookup(c_dict, _xcstr(k), len(k))
668
+ i += 1
669
+ params[i] = <const_char*>tree.xmlDictLookup(c_dict, _xcstr(v), len(v))
670
+ i += 1
671
+ except:
672
+ python.lxml_free(params)
673
+ raise
674
+ params[i] = NULL
675
+ params_ptr[0] = params
676
+
677
+ cdef XSLT _copyXSLT(XSLT stylesheet):
678
+ cdef XSLT new_xslt
679
+ cdef xmlDoc* c_doc
680
+ assert stylesheet._c_style is not NULL, "XSLT stylesheet not initialised"
681
+ new_xslt = XSLT.__new__(XSLT)
682
+ new_xslt._access_control = stylesheet._access_control
683
+ new_xslt._error_log = _ErrorLog()
684
+ new_xslt._context = stylesheet._context._copy()
685
+
686
+ new_xslt._xslt_resolver_context = stylesheet._xslt_resolver_context._copy()
687
+ new_xslt._xslt_resolver_context._c_style_doc = _copyDoc(
688
+ stylesheet._xslt_resolver_context._c_style_doc, 1)
689
+
690
+ c_doc = _copyDoc(stylesheet._c_style.doc, 1)
691
+ new_xslt._c_style = xslt.xsltParseStylesheetDoc(c_doc)
692
+ if new_xslt._c_style is NULL:
693
+ tree.xmlFreeDoc(c_doc)
694
+ raise MemoryError()
695
+
696
+ return new_xslt
697
+
698
+ @cython.final
699
+ cdef class _XSLTResultTree(_ElementTree):
700
+ """The result of an XSLT evaluation.
701
+
702
+ Use ``str()`` or ``bytes()`` (or ``unicode()`` in Python 2.x) to serialise to a string,
703
+ and the ``.write_output()`` method to write serialise to a file.
704
+ """
705
+ cdef XSLT _xslt
706
+ cdef _Document _profile
707
+ cdef xmlChar* _buffer
708
+ cdef Py_ssize_t _buffer_len
709
+ cdef Py_ssize_t _buffer_refcnt
710
+
711
+ def write_output(self, file, *, compression=0):
712
+ """write_output(self, file, *, compression=0)
713
+
714
+ Serialise the XSLT output to a file or file-like object.
715
+
716
+ As opposed to the generic ``.write()`` method, ``.write_output()`` serialises
717
+ the result as defined by the ``<xsl:output>`` tag.
718
+ """
719
+ cdef _FilelikeWriter writer = None
720
+ cdef _Document doc
721
+ cdef int r, rclose, c_compression
722
+ cdef const_xmlChar* c_encoding = NULL
723
+ cdef tree.xmlOutputBuffer* c_buffer
724
+
725
+ if self._context_node is not None:
726
+ doc = self._context_node._doc
727
+ else:
728
+ doc = None
729
+ if doc is None:
730
+ doc = self._doc
731
+ if doc is None:
732
+ raise XSLTSaveError("No document to serialise")
733
+ c_compression = compression or 0
734
+ xslt.LXML_GET_XSLT_ENCODING(c_encoding, self._xslt._c_style)
735
+ writer = _create_output_buffer(file, <const_char*>c_encoding, compression, &c_buffer, close=False)
736
+ if writer is None:
737
+ with nogil:
738
+ r = xslt.xsltSaveResultTo(c_buffer, doc._c_doc, self._xslt._c_style)
739
+ rclose = tree.xmlOutputBufferClose(c_buffer)
740
+ else:
741
+ r = xslt.xsltSaveResultTo(c_buffer, doc._c_doc, self._xslt._c_style)
742
+ rclose = tree.xmlOutputBufferClose(c_buffer)
743
+ if writer is not None:
744
+ writer._exc_context._raise_if_stored()
745
+ if r < 0 or rclose == -1:
746
+ python.PyErr_SetFromErrno(IOError) # raises IOError
747
+
748
+ cdef _saveToStringAndSize(self, xmlChar** s, int* l):
749
+ cdef _Document doc
750
+ cdef int r
751
+ if self._context_node is not None:
752
+ doc = self._context_node._doc
753
+ else:
754
+ doc = None
755
+ if doc is None:
756
+ doc = self._doc
757
+ if doc is None:
758
+ s[0] = NULL
759
+ return
760
+ with nogil:
761
+ r = xslt.xsltSaveResultToString(s, l, doc._c_doc,
762
+ self._xslt._c_style)
763
+ if r == -1:
764
+ raise MemoryError()
765
+
766
+ def __str__(self):
767
+ cdef xmlChar* encoding
768
+ cdef xmlChar* s = NULL
769
+ cdef int l = 0
770
+ self._saveToStringAndSize(&s, &l)
771
+ if s is NULL:
772
+ return ''
773
+ encoding = self._xslt._c_style.encoding
774
+ try:
775
+ if encoding is NULL:
776
+ result = s[:l].decode('UTF-8')
777
+ else:
778
+ result = s[:l].decode(encoding)
779
+ finally:
780
+ tree.xmlFree(s)
781
+ return _stripEncodingDeclaration(result)
782
+
783
+ def __getbuffer__(self, Py_buffer* buffer, int flags):
784
+ cdef int l = 0
785
+ if buffer is NULL:
786
+ return
787
+ if self._buffer is NULL or flags & python.PyBUF_WRITABLE:
788
+ self._saveToStringAndSize(<xmlChar**>&buffer.buf, &l)
789
+ buffer.len = l
790
+ if self._buffer is NULL and not flags & python.PyBUF_WRITABLE:
791
+ self._buffer = <xmlChar*>buffer.buf
792
+ self._buffer_len = l
793
+ self._buffer_refcnt = 1
794
+ else:
795
+ buffer.buf = self._buffer
796
+ buffer.len = self._buffer_len
797
+ self._buffer_refcnt += 1
798
+ if flags & python.PyBUF_WRITABLE:
799
+ buffer.readonly = 0
800
+ else:
801
+ buffer.readonly = 1
802
+ if flags & python.PyBUF_FORMAT:
803
+ buffer.format = "B"
804
+ else:
805
+ buffer.format = NULL
806
+ buffer.ndim = 0
807
+ buffer.shape = NULL
808
+ buffer.strides = NULL
809
+ buffer.suboffsets = NULL
810
+ buffer.itemsize = 1
811
+ buffer.internal = NULL
812
+ if buffer.obj is not self: # set by Cython?
813
+ buffer.obj = self
814
+
815
+ def __releasebuffer__(self, Py_buffer* buffer):
816
+ if buffer is NULL:
817
+ return
818
+ if <xmlChar*>buffer.buf is self._buffer:
819
+ self._buffer_refcnt -= 1
820
+ if self._buffer_refcnt == 0:
821
+ tree.xmlFree(<char*>self._buffer)
822
+ self._buffer = NULL
823
+ else:
824
+ tree.xmlFree(<char*>buffer.buf)
825
+ buffer.buf = NULL
826
+
827
+ property xslt_profile:
828
+ """Return an ElementTree with profiling data for the stylesheet run.
829
+ """
830
+ def __get__(self):
831
+ cdef object root
832
+ if self._profile is None:
833
+ return None
834
+ root = self._profile.getroot()
835
+ if root is None:
836
+ return None
837
+ return ElementTree(root)
838
+
839
+ def __del__(self):
840
+ self._profile = None
841
+
842
+ cdef _xsltResultTreeFactory(_Document doc, XSLT xslt, _Document profile):
843
+ cdef _XSLTResultTree result
844
+ result = <_XSLTResultTree>_newElementTree(doc, None, _XSLTResultTree)
845
+ result._xslt = xslt
846
+ result._profile = profile
847
+ return result
848
+
849
+ # functions like "output" and "write" are a potential security risk, but we
850
+ # rely on the user to configure XSLTAccessControl as needed
851
+ xslt.xsltRegisterAllExtras()
852
+
853
+ # enable EXSLT support for XSLT
854
+ xslt.exsltRegisterAll()
855
+
856
+
857
+ ################################################################################
858
+ # XSLT PI support
859
+
860
+ cdef object _RE_PI_HREF = re.compile(r'\s+href\s*=\s*(?:\'([^\']*)\'|"([^"]*)")')
861
+ cdef object _FIND_PI_HREF = _RE_PI_HREF.findall
862
+ cdef object _REPLACE_PI_HREF = _RE_PI_HREF.sub
863
+ cdef XPath __findStylesheetByID = None
864
+
865
+ cdef _findStylesheetByID(_Document doc, id):
866
+ global __findStylesheetByID
867
+ if __findStylesheetByID is None:
868
+ __findStylesheetByID = XPath(
869
+ "//xsl:stylesheet[@xml:id = $id]",
870
+ namespaces={"xsl" : "http://www.w3.org/1999/XSL/Transform"})
871
+ return __findStylesheetByID(doc, id=id)
872
+
873
+ cdef class _XSLTProcessingInstruction(PIBase):
874
+ def parseXSL(self, parser=None):
875
+ """parseXSL(self, parser=None)
876
+
877
+ Try to parse the stylesheet referenced by this PI and return
878
+ an ElementTree for it. If the stylesheet is embedded in the
879
+ same document (referenced via xml:id), find and return an
880
+ ElementTree for the stylesheet Element.
881
+
882
+ The optional ``parser`` keyword argument can be passed to specify the
883
+ parser used to read from external stylesheet URLs.
884
+ """
885
+ cdef _Document result_doc
886
+ cdef _Element result_node
887
+ cdef bytes href_utf
888
+ cdef const_xmlChar* c_href
889
+ cdef xmlAttr* c_attr
890
+ _assertValidNode(self)
891
+ if self._c_node.content is NULL:
892
+ raise ValueError, "PI lacks content"
893
+ hrefs = _FIND_PI_HREF(' ' + (<unsigned char*>self._c_node.content).decode('UTF-8'))
894
+ if len(hrefs) != 1:
895
+ raise ValueError, "malformed PI attributes"
896
+ hrefs = hrefs[0]
897
+ href_utf = utf8(hrefs[0] or hrefs[1])
898
+ c_href = _xcstr(href_utf)
899
+
900
+ if c_href[0] != c'#':
901
+ # normal URL, try to parse from it
902
+ c_href = tree.xmlBuildURI(
903
+ c_href,
904
+ tree.xmlNodeGetBase(self._c_node.doc, self._c_node))
905
+ if c_href is not NULL:
906
+ try:
907
+ href_utf = <unsigned char*>c_href
908
+ finally:
909
+ tree.xmlFree(<char*>c_href)
910
+ result_doc = _parseDocumentFromURL(href_utf, parser)
911
+ return _elementTreeFactory(result_doc, None)
912
+
913
+ # ID reference to embedded stylesheet
914
+ # try XML:ID lookup
915
+ _assertValidDoc(self._doc)
916
+ c_href += 1 # skip leading '#'
917
+ c_attr = tree.xmlGetID(self._c_node.doc, c_href)
918
+ if c_attr is not NULL and c_attr.doc is self._c_node.doc:
919
+ result_node = _elementFactory(self._doc, c_attr.parent)
920
+ return _elementTreeFactory(result_node._doc, result_node)
921
+
922
+ # try XPath search
923
+ root = _findStylesheetByID(self._doc, funicode(c_href))
924
+ if not root:
925
+ raise ValueError, "reference to non-existing embedded stylesheet"
926
+ elif len(root) > 1:
927
+ raise ValueError, "ambiguous reference to embedded stylesheet"
928
+ result_node = root[0]
929
+ return _elementTreeFactory(result_node._doc, result_node)
930
+
931
+ def set(self, key, value):
932
+ """set(self, key, value)
933
+
934
+ Supports setting the 'href' pseudo-attribute in the text of
935
+ the processing instruction.
936
+ """
937
+ if key != "href":
938
+ raise AttributeError, \
939
+ "only setting the 'href' attribute is supported on XSLT-PIs"
940
+ if value is None:
941
+ attrib = ""
942
+ elif '"' in value or '>' in value:
943
+ raise ValueError, "Invalid URL, must not contain '\"' or '>'"
944
+ else:
945
+ attrib = f' href="{value}"'
946
+ text = ' ' + self.text
947
+ if _FIND_PI_HREF(text):
948
+ self.text = _REPLACE_PI_HREF(attrib, text)
949
+ else:
950
+ self.text = text + attrib
llmeval-env/lib/python3.10/site-packages/lxml/xsltext.pxi ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # XSLT extension elements
2
+
3
+ cdef class XSLTExtension:
4
+ """Base class of an XSLT extension element.
5
+ """
6
+ def execute(self, context, self_node, input_node, output_parent):
7
+ """execute(self, context, self_node, input_node, output_parent)
8
+ Execute this extension element.
9
+
10
+ Subclasses must override this method. They may append
11
+ elements to the `output_parent` element here, or set its text
12
+ content. To this end, the `input_node` provides read-only
13
+ access to the current node in the input document, and the
14
+ `self_node` points to the extension element in the stylesheet.
15
+
16
+ Note that the `output_parent` parameter may be `None` if there
17
+ is no parent element in the current context (e.g. no content
18
+ was added to the output tree yet).
19
+ """
20
+ pass
21
+
22
+ def apply_templates(self, _XSLTContext context not None, node, output_parent=None,
23
+ *, elements_only=False, remove_blank_text=False):
24
+ """apply_templates(self, context, node, output_parent=None, elements_only=False, remove_blank_text=False)
25
+
26
+ Call this method to retrieve the result of applying templates
27
+ to an element.
28
+
29
+ The return value is a list of elements or text strings that
30
+ were generated by the XSLT processor. If you pass
31
+ ``elements_only=True``, strings will be discarded from the result
32
+ list. The option ``remove_blank_text=True`` will only discard
33
+ strings that consist entirely of whitespace (e.g. formatting).
34
+ These options do not apply to Elements, only to bare string results.
35
+
36
+ If you pass an Element as `output_parent` parameter, the result
37
+ will instead be appended to the element (including attributes
38
+ etc.) and the return value will be `None`. This is a safe way
39
+ to generate content into the output document directly, without
40
+ having to take care of special values like text or attributes.
41
+ Note that the string discarding options will be ignored in this
42
+ case.
43
+ """
44
+ cdef xmlNode* c_parent
45
+ cdef xmlNode* c_node
46
+ cdef xmlNode* c_context_node
47
+ assert context._xsltCtxt is not NULL, "XSLT context not initialised"
48
+ c_context_node = _roNodeOf(node)
49
+ #assert c_context_node.doc is context._xsltContext.node.doc, \
50
+ # "switching input documents during transformation is not currently supported"
51
+
52
+ if output_parent is not None:
53
+ c_parent = _nonRoNodeOf(output_parent)
54
+ else:
55
+ c_parent = tree.xmlNewDocNode(
56
+ context._xsltCtxt.output, NULL, <unsigned char*>"fake-parent", NULL)
57
+
58
+ c_node = context._xsltCtxt.insert
59
+ context._xsltCtxt.insert = c_parent
60
+ xslt.xsltProcessOneNode(
61
+ context._xsltCtxt, c_context_node, NULL)
62
+ context._xsltCtxt.insert = c_node
63
+
64
+ if output_parent is not None:
65
+ return None
66
+
67
+ try:
68
+ return self._collectXSLTResultContent(
69
+ context, c_parent, elements_only, remove_blank_text)
70
+ finally:
71
+ # free all intermediate nodes that will not be freed by proxies
72
+ tree.xmlFreeNode(c_parent)
73
+
74
+ def process_children(self, _XSLTContext context not None, output_parent=None,
75
+ *, elements_only=False, remove_blank_text=False):
76
+ """process_children(self, context, output_parent=None, elements_only=False, remove_blank_text=False)
77
+
78
+ Call this method to process the XSLT content of the extension
79
+ element itself.
80
+
81
+ The return value is a list of elements or text strings that
82
+ were generated by the XSLT processor. If you pass
83
+ ``elements_only=True``, strings will be discarded from the result
84
+ list. The option ``remove_blank_text=True`` will only discard
85
+ strings that consist entirely of whitespace (e.g. formatting).
86
+ These options do not apply to Elements, only to bare string results.
87
+
88
+ If you pass an Element as `output_parent` parameter, the result
89
+ will instead be appended to the element (including attributes
90
+ etc.) and the return value will be `None`. This is a safe way
91
+ to generate content into the output document directly, without
92
+ having to take care of special values like text or attributes.
93
+ Note that the string discarding options will be ignored in this
94
+ case.
95
+ """
96
+ cdef xmlNode* c_parent
97
+ cdef xslt.xsltTransformContext* c_ctxt = context._xsltCtxt
98
+ cdef xmlNode* c_old_output_parent = c_ctxt.insert
99
+ assert context._xsltCtxt is not NULL, "XSLT context not initialised"
100
+
101
+ # output_parent node is used for adding results instead of
102
+ # elements list used in apply_templates, that's easier and allows to
103
+ # use attributes added to extension element with <xsl:attribute>.
104
+
105
+ if output_parent is not None:
106
+ c_parent = _nonRoNodeOf(output_parent)
107
+ else:
108
+ c_parent = tree.xmlNewDocNode(
109
+ context._xsltCtxt.output, NULL, <unsigned char*>"fake-parent", NULL)
110
+
111
+ c_ctxt.insert = c_parent
112
+ xslt.xsltApplyOneTemplate(c_ctxt,
113
+ c_ctxt.node, c_ctxt.inst.children, NULL, NULL)
114
+ c_ctxt.insert = c_old_output_parent
115
+
116
+ if output_parent is not None:
117
+ return None
118
+
119
+ try:
120
+ return self._collectXSLTResultContent(
121
+ context, c_parent, elements_only, remove_blank_text)
122
+ finally:
123
+ # free all intermediate nodes that will not be freed by proxies
124
+ tree.xmlFreeNode(c_parent)
125
+
126
+ cdef _collectXSLTResultContent(self, _XSLTContext context, xmlNode* c_parent,
127
+ bint elements_only, bint remove_blank_text):
128
+ cdef xmlNode* c_node
129
+ cdef xmlNode* c_next
130
+ cdef _ReadOnlyProxy proxy
131
+ cdef list results = [] # or maybe _collectAttributes(c_parent, 2) ?
132
+ c_node = c_parent.children
133
+ while c_node is not NULL:
134
+ c_next = c_node.next
135
+ if c_node.type == tree.XML_TEXT_NODE:
136
+ if not elements_only:
137
+ s = funicode(c_node.content)
138
+ if not remove_blank_text or s.strip():
139
+ results.append(s)
140
+ s = None
141
+ elif c_node.type == tree.XML_ELEMENT_NODE:
142
+ proxy = _newReadOnlyProxy(
143
+ context._extension_element_proxy, c_node)
144
+ results.append(proxy)
145
+ # unlink node and make sure it will be freed later on
146
+ tree.xmlUnlinkNode(c_node)
147
+ proxy.free_after_use()
148
+ else:
149
+ raise TypeError, \
150
+ f"unsupported XSLT result type: {c_node.type}"
151
+ c_node = c_next
152
+ return results
153
+
154
+
155
+ cdef _registerXSLTExtensions(xslt.xsltTransformContext* c_ctxt,
156
+ extension_dict):
157
+ for ns_utf, name_utf in extension_dict:
158
+ xslt.xsltRegisterExtElement(
159
+ c_ctxt, _xcstr(name_utf), _xcstr(ns_utf),
160
+ <xslt.xsltTransformFunction>_callExtensionElement)
161
+
162
+ cdef void _callExtensionElement(xslt.xsltTransformContext* c_ctxt,
163
+ xmlNode* c_context_node,
164
+ xmlNode* c_inst_node,
165
+ void* dummy) noexcept with gil:
166
+ cdef _XSLTContext context
167
+ cdef XSLTExtension extension
168
+ cdef python.PyObject* dict_result
169
+ cdef xmlNode* c_node
170
+ cdef _ReadOnlyProxy context_node = None, self_node = None
171
+ cdef object output_parent # not restricted to ro-nodes
172
+ c_uri = _getNs(c_inst_node)
173
+ if c_uri is NULL:
174
+ # not allowed, and should never happen
175
+ return
176
+ if c_ctxt.xpathCtxt.userData is NULL:
177
+ # just for safety, should never happen
178
+ return
179
+ context = <_XSLTContext>c_ctxt.xpathCtxt.userData
180
+ try:
181
+ try:
182
+ dict_result = python.PyDict_GetItem(
183
+ context._extension_elements, (c_uri, c_inst_node.name))
184
+ if dict_result is NULL:
185
+ raise KeyError, f"extension element {funicode(c_inst_node.name)} not found"
186
+ extension = <object>dict_result
187
+
188
+ try:
189
+ # build the context proxy nodes
190
+ self_node = _newReadOnlyProxy(None, c_inst_node)
191
+ if _isElement(c_ctxt.insert):
192
+ output_parent = _newAppendOnlyProxy(self_node, c_ctxt.insert)
193
+ else:
194
+ # may be the document node or other stuff
195
+ output_parent = _newOpaqueAppendOnlyNodeWrapper(c_ctxt.insert)
196
+ if c_context_node.type in (tree.XML_DOCUMENT_NODE,
197
+ tree.XML_HTML_DOCUMENT_NODE):
198
+ c_node = tree.xmlDocGetRootElement(<xmlDoc*>c_context_node)
199
+ if c_node is not NULL:
200
+ context_node = _newReadOnlyProxy(self_node, c_node)
201
+ else:
202
+ context_node = None
203
+ elif c_context_node.type in (tree.XML_ATTRIBUTE_NODE,
204
+ tree.XML_TEXT_NODE,
205
+ tree.XML_CDATA_SECTION_NODE):
206
+ # this isn't easy to support using read-only
207
+ # nodes, as the smart-string factory must
208
+ # instantiate the parent proxy somehow...
209
+ raise TypeError(f"Unsupported element type: {c_context_node.type}")
210
+ else:
211
+ context_node = _newReadOnlyProxy(self_node, c_context_node)
212
+
213
+ # run the XSLT extension
214
+ context._extension_element_proxy = self_node
215
+ extension.execute(context, self_node, context_node, output_parent)
216
+ finally:
217
+ context._extension_element_proxy = None
218
+ if self_node is not None:
219
+ _freeReadOnlyProxies(self_node)
220
+ except Exception as e:
221
+ try:
222
+ e = unicode(e).encode("UTF-8")
223
+ except:
224
+ e = repr(e).encode("UTF-8")
225
+ message = python.PyBytes_FromFormat(
226
+ "Error executing extension element '%s': %s",
227
+ c_inst_node.name, _cstr(e))
228
+ xslt.xsltTransformError(c_ctxt, NULL, c_inst_node, "%s", message)
229
+ context._exc._store_raised()
230
+ except:
231
+ # just in case
232
+ message = python.PyBytes_FromFormat(
233
+ "Error executing extension element '%s'", c_inst_node.name)
234
+ xslt.xsltTransformError(c_ctxt, NULL, c_inst_node, "%s", message)
235
+ context._exc._store_raised()
236
+ except:
237
+ # no Python functions here - everything can fail...
238
+ xslt.xsltTransformError(c_ctxt, NULL, c_inst_node,
239
+ "Error during XSLT extension element evaluation")
240
+ context._exc._store_raised()
241
+ finally:
242
+ return # swallow any further exceptions
llmeval-env/lib/python3.10/site-packages/scikit_learn-1.4.2.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
llmeval-env/lib/python3.10/site-packages/scikit_learn-1.4.2.dist-info/METADATA ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: scikit-learn
3
+ Version: 1.4.2
4
+ Summary: A set of python modules for machine learning and data mining
5
+ Home-page: https://scikit-learn.org
6
+ Download-URL: https://pypi.org/project/scikit-learn/#files
7
+ Maintainer: Andreas Mueller
8
+ Maintainer-email: [email protected]
9
+ License: new BSD
10
+ Project-URL: Bug Tracker, https://github.com/scikit-learn/scikit-learn/issues
11
+ Project-URL: Documentation, https://scikit-learn.org/stable/documentation.html
12
+ Project-URL: Source Code, https://github.com/scikit-learn/scikit-learn
13
+ Classifier: Intended Audience :: Science/Research
14
+ Classifier: Intended Audience :: Developers
15
+ Classifier: License :: OSI Approved :: BSD License
16
+ Classifier: Programming Language :: C
17
+ Classifier: Programming Language :: Python
18
+ Classifier: Topic :: Software Development
19
+ Classifier: Topic :: Scientific/Engineering
20
+ Classifier: Development Status :: 5 - Production/Stable
21
+ Classifier: Operating System :: Microsoft :: Windows
22
+ Classifier: Operating System :: POSIX
23
+ Classifier: Operating System :: Unix
24
+ Classifier: Operating System :: MacOS
25
+ Classifier: Programming Language :: Python :: 3
26
+ Classifier: Programming Language :: Python :: 3.9
27
+ Classifier: Programming Language :: Python :: 3.10
28
+ Classifier: Programming Language :: Python :: 3.11
29
+ Classifier: Programming Language :: Python :: 3.12
30
+ Classifier: Programming Language :: Python :: Implementation :: CPython
31
+ Classifier: Programming Language :: Python :: Implementation :: PyPy
32
+ Requires-Python: >=3.9
33
+ License-File: COPYING
34
+ Requires-Dist: numpy >=1.19.5
35
+ Requires-Dist: scipy >=1.6.0
36
+ Requires-Dist: joblib >=1.2.0
37
+ Requires-Dist: threadpoolctl >=2.0.0
38
+ Provides-Extra: benchmark
39
+ Requires-Dist: matplotlib >=3.3.4 ; extra == 'benchmark'
40
+ Requires-Dist: pandas >=1.1.5 ; extra == 'benchmark'
41
+ Requires-Dist: memory-profiler >=0.57.0 ; extra == 'benchmark'
42
+ Provides-Extra: docs
43
+ Requires-Dist: matplotlib >=3.3.4 ; extra == 'docs'
44
+ Requires-Dist: scikit-image >=0.17.2 ; extra == 'docs'
45
+ Requires-Dist: pandas >=1.1.5 ; extra == 'docs'
46
+ Requires-Dist: seaborn >=0.9.0 ; extra == 'docs'
47
+ Requires-Dist: memory-profiler >=0.57.0 ; extra == 'docs'
48
+ Requires-Dist: sphinx >=6.0.0 ; extra == 'docs'
49
+ Requires-Dist: sphinx-copybutton >=0.5.2 ; extra == 'docs'
50
+ Requires-Dist: sphinx-gallery >=0.15.0 ; extra == 'docs'
51
+ Requires-Dist: numpydoc >=1.2.0 ; extra == 'docs'
52
+ Requires-Dist: Pillow >=7.1.2 ; extra == 'docs'
53
+ Requires-Dist: pooch >=1.6.0 ; extra == 'docs'
54
+ Requires-Dist: sphinx-prompt >=1.3.0 ; extra == 'docs'
55
+ Requires-Dist: sphinxext-opengraph >=0.4.2 ; extra == 'docs'
56
+ Requires-Dist: plotly >=5.14.0 ; extra == 'docs'
57
+ Provides-Extra: examples
58
+ Requires-Dist: matplotlib >=3.3.4 ; extra == 'examples'
59
+ Requires-Dist: scikit-image >=0.17.2 ; extra == 'examples'
60
+ Requires-Dist: pandas >=1.1.5 ; extra == 'examples'
61
+ Requires-Dist: seaborn >=0.9.0 ; extra == 'examples'
62
+ Requires-Dist: pooch >=1.6.0 ; extra == 'examples'
63
+ Requires-Dist: plotly >=5.14.0 ; extra == 'examples'
64
+ Provides-Extra: tests
65
+ Requires-Dist: matplotlib >=3.3.4 ; extra == 'tests'
66
+ Requires-Dist: scikit-image >=0.17.2 ; extra == 'tests'
67
+ Requires-Dist: pandas >=1.1.5 ; extra == 'tests'
68
+ Requires-Dist: pytest >=7.1.2 ; extra == 'tests'
69
+ Requires-Dist: pytest-cov >=2.9.0 ; extra == 'tests'
70
+ Requires-Dist: ruff >=0.0.272 ; extra == 'tests'
71
+ Requires-Dist: black >=23.3.0 ; extra == 'tests'
72
+ Requires-Dist: mypy >=1.3 ; extra == 'tests'
73
+ Requires-Dist: pyamg >=4.0.0 ; extra == 'tests'
74
+ Requires-Dist: polars >=0.19.12 ; extra == 'tests'
75
+ Requires-Dist: pyarrow >=12.0.0 ; extra == 'tests'
76
+ Requires-Dist: numpydoc >=1.2.0 ; extra == 'tests'
77
+ Requires-Dist: pooch >=1.6.0 ; extra == 'tests'
78
+
79
+ .. -*- mode: rst -*-
80
+
81
+ |Azure|_ |CirrusCI|_ |Codecov|_ |CircleCI|_ |Nightly wheels|_ |Black|_ |PythonVersion|_ |PyPi|_ |DOI|_ |Benchmark|_
82
+
83
+ .. |Azure| image:: https://dev.azure.com/scikit-learn/scikit-learn/_apis/build/status/scikit-learn.scikit-learn?branchName=main
84
+ .. _Azure: https://dev.azure.com/scikit-learn/scikit-learn/_build/latest?definitionId=1&branchName=main
85
+
86
+ .. |CircleCI| image:: https://circleci.com/gh/scikit-learn/scikit-learn/tree/main.svg?style=shield
87
+ .. _CircleCI: https://circleci.com/gh/scikit-learn/scikit-learn
88
+
89
+ .. |CirrusCI| image:: https://img.shields.io/cirrus/github/scikit-learn/scikit-learn/main?label=Cirrus%20CI
90
+ .. _CirrusCI: https://cirrus-ci.com/github/scikit-learn/scikit-learn/main
91
+
92
+ .. |Codecov| image:: https://codecov.io/gh/scikit-learn/scikit-learn/branch/main/graph/badge.svg?token=Pk8G9gg3y9
93
+ .. _Codecov: https://codecov.io/gh/scikit-learn/scikit-learn
94
+
95
+ .. |Nightly wheels| image:: https://github.com/scikit-learn/scikit-learn/workflows/Wheel%20builder/badge.svg?event=schedule
96
+ .. _`Nightly wheels`: https://github.com/scikit-learn/scikit-learn/actions?query=workflow%3A%22Wheel+builder%22+event%3Aschedule
97
+
98
+ .. |PythonVersion| image:: https://img.shields.io/pypi/pyversions/scikit-learn.svg
99
+ .. _PythonVersion: https://pypi.org/project/scikit-learn/
100
+
101
+ .. |PyPi| image:: https://img.shields.io/pypi/v/scikit-learn
102
+ .. _PyPi: https://pypi.org/project/scikit-learn
103
+
104
+ .. |Black| image:: https://img.shields.io/badge/code%20style-black-000000.svg
105
+ .. _Black: https://github.com/psf/black
106
+
107
+ .. |DOI| image:: https://zenodo.org/badge/21369/scikit-learn/scikit-learn.svg
108
+ .. _DOI: https://zenodo.org/badge/latestdoi/21369/scikit-learn/scikit-learn
109
+
110
+ .. |Benchmark| image:: https://img.shields.io/badge/Benchmarked%20by-asv-blue
111
+ .. _`Benchmark`: https://scikit-learn.org/scikit-learn-benchmarks/
112
+
113
+ .. |PythonMinVersion| replace:: 3.9
114
+ .. |NumPyMinVersion| replace:: 1.19.5
115
+ .. |SciPyMinVersion| replace:: 1.6.0
116
+ .. |JoblibMinVersion| replace:: 1.2.0
117
+ .. |ThreadpoolctlMinVersion| replace:: 2.0.0
118
+ .. |MatplotlibMinVersion| replace:: 3.3.4
119
+ .. |Scikit-ImageMinVersion| replace:: 0.17.2
120
+ .. |PandasMinVersion| replace:: 1.1.5
121
+ .. |SeabornMinVersion| replace:: 0.9.0
122
+ .. |PytestMinVersion| replace:: 7.1.2
123
+ .. |PlotlyMinVersion| replace:: 5.14.0
124
+
125
+ .. image:: https://raw.githubusercontent.com/scikit-learn/scikit-learn/main/doc/logos/scikit-learn-logo.png
126
+ :target: https://scikit-learn.org/
127
+
128
+ **scikit-learn** is a Python module for machine learning built on top of
129
+ SciPy and is distributed under the 3-Clause BSD license.
130
+
131
+ The project was started in 2007 by David Cournapeau as a Google Summer
132
+ of Code project, and since then many volunteers have contributed. See
133
+ the `About us <https://scikit-learn.org/dev/about.html#authors>`__ page
134
+ for a list of core contributors.
135
+
136
+ It is currently maintained by a team of volunteers.
137
+
138
+ Website: https://scikit-learn.org
139
+
140
+ Installation
141
+ ------------
142
+
143
+ Dependencies
144
+ ~~~~~~~~~~~~
145
+
146
+ scikit-learn requires:
147
+
148
+ - Python (>= |PythonMinVersion|)
149
+ - NumPy (>= |NumPyMinVersion|)
150
+ - SciPy (>= |SciPyMinVersion|)
151
+ - joblib (>= |JoblibMinVersion|)
152
+ - threadpoolctl (>= |ThreadpoolctlMinVersion|)
153
+
154
+ =======
155
+
156
+ **Scikit-learn 0.20 was the last version to support Python 2.7 and Python 3.4.**
157
+ scikit-learn 1.0 and later require Python 3.7 or newer.
158
+ scikit-learn 1.1 and later require Python 3.8 or newer.
159
+
160
+ Scikit-learn plotting capabilities (i.e., functions start with ``plot_`` and
161
+ classes end with ``Display``) require Matplotlib (>= |MatplotlibMinVersion|).
162
+ For running the examples Matplotlib >= |MatplotlibMinVersion| is required.
163
+ A few examples require scikit-image >= |Scikit-ImageMinVersion|, a few examples
164
+ require pandas >= |PandasMinVersion|, some examples require seaborn >=
165
+ |SeabornMinVersion| and plotly >= |PlotlyMinVersion|.
166
+
167
+ User installation
168
+ ~~~~~~~~~~~~~~~~~
169
+
170
+ If you already have a working installation of NumPy and SciPy,
171
+ the easiest way to install scikit-learn is using ``pip``::
172
+
173
+ pip install -U scikit-learn
174
+
175
+ or ``conda``::
176
+
177
+ conda install -c conda-forge scikit-learn
178
+
179
+ The documentation includes more detailed `installation instructions <https://scikit-learn.org/stable/install.html>`_.
180
+
181
+
182
+ Changelog
183
+ ---------
184
+
185
+ See the `changelog <https://scikit-learn.org/dev/whats_new.html>`__
186
+ for a history of notable changes to scikit-learn.
187
+
188
+ Development
189
+ -----------
190
+
191
+ We welcome new contributors of all experience levels. The scikit-learn
192
+ community goals are to be helpful, welcoming, and effective. The
193
+ `Development Guide <https://scikit-learn.org/stable/developers/index.html>`_
194
+ has detailed information about contributing code, documentation, tests, and
195
+ more. We've included some basic information in this README.
196
+
197
+ Important links
198
+ ~~~~~~~~~~~~~~~
199
+
200
+ - Official source code repo: https://github.com/scikit-learn/scikit-learn
201
+ - Download releases: https://pypi.org/project/scikit-learn/
202
+ - Issue tracker: https://github.com/scikit-learn/scikit-learn/issues
203
+
204
+ Source code
205
+ ~~~~~~~~~~~
206
+
207
+ You can check the latest sources with the command::
208
+
209
+ git clone https://github.com/scikit-learn/scikit-learn.git
210
+
211
+ Contributing
212
+ ~~~~~~~~~~~~
213
+
214
+ To learn more about making a contribution to scikit-learn, please see our
215
+ `Contributing guide
216
+ <https://scikit-learn.org/dev/developers/contributing.html>`_.
217
+
218
+ Testing
219
+ ~~~~~~~
220
+
221
+ After installation, you can launch the test suite from outside the source
222
+ directory (you will need to have ``pytest`` >= |PyTestMinVersion| installed)::
223
+
224
+ pytest sklearn
225
+
226
+ See the web page https://scikit-learn.org/dev/developers/contributing.html#testing-and-improving-test-coverage
227
+ for more information.
228
+
229
+ Random number generation can be controlled during testing by setting
230
+ the ``SKLEARN_SEED`` environment variable.
231
+
232
+ Submitting a Pull Request
233
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
234
+
235
+ Before opening a Pull Request, have a look at the
236
+ full Contributing page to make sure your code complies
237
+ with our guidelines: https://scikit-learn.org/stable/developers/index.html
238
+
239
+ Project History
240
+ ---------------
241
+
242
+ The project was started in 2007 by David Cournapeau as a Google Summer
243
+ of Code project, and since then many volunteers have contributed. See
244
+ the `About us <https://scikit-learn.org/dev/about.html#authors>`__ page
245
+ for a list of core contributors.
246
+
247
+ The project is currently maintained by a team of volunteers.
248
+
249
+ **Note**: `scikit-learn` was previously referred to as `scikits.learn`.
250
+
251
+ Help and Support
252
+ ----------------
253
+
254
+ Documentation
255
+ ~~~~~~~~~~~~~
256
+
257
+ - HTML documentation (stable release): https://scikit-learn.org
258
+ - HTML documentation (development version): https://scikit-learn.org/dev/
259
+ - FAQ: https://scikit-learn.org/stable/faq.html
260
+
261
+ Communication
262
+ ~~~~~~~~~~~~~
263
+
264
+ - Mailing list: https://mail.python.org/mailman/listinfo/scikit-learn
265
+ - Gitter: https://gitter.im/scikit-learn/scikit-learn
266
+ - Logos & Branding: https://github.com/scikit-learn/scikit-learn/tree/main/doc/logos
267
+ - Blog: https://blog.scikit-learn.org
268
+ - Calendar: https://blog.scikit-learn.org/calendar/
269
+ - Twitter: https://twitter.com/scikit_learn
270
+ - Stack Overflow: https://stackoverflow.com/questions/tagged/scikit-learn
271
+ - Github Discussions: https://github.com/scikit-learn/scikit-learn/discussions
272
+ - Website: https://scikit-learn.org
273
+ - LinkedIn: https://www.linkedin.com/company/scikit-learn
274
+ - YouTube: https://www.youtube.com/channel/UCJosFjYm0ZYVUARxuOZqnnw/playlists
275
+ - Facebook: https://www.facebook.com/scikitlearnofficial/
276
+ - Instagram: https://www.instagram.com/scikitlearnofficial/
277
+ - TikTok: https://www.tiktok.com/@scikit.learn
278
+
279
+ Citation
280
+ ~~~~~~~~
281
+
282
+ If you use scikit-learn in a scientific publication, we would appreciate citations: https://scikit-learn.org/stable/about.html#citing-scikit-learn
llmeval-env/lib/python3.10/site-packages/scikit_learn-1.4.2.dist-info/RECORD ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/scikit_learn-1.4.2.dist-info/WHEEL ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.43.0)
3
+ Root-Is-Purelib: false
4
+ Tag: cp310-cp310-manylinux_2_17_x86_64
5
+ Tag: cp310-cp310-manylinux2014_x86_64
6
+
llmeval-env/lib/python3.10/site-packages/scikit_learn-1.4.2.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ sklearn
llmeval-env/lib/python3.10/site-packages/tokenizers/__init__.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
+ from typing import List, Tuple, Union
3
+
4
+
5
+ Offsets = Tuple[int, int]
6
+
7
+ TextInputSequence = str
8
+ """A :obj:`str` that represents an input sequence """
9
+
10
+ PreTokenizedInputSequence = Union[List[str], Tuple[str]]
11
+ """A pre-tokenized input sequence. Can be one of:
12
+
13
+ - A :obj:`List` of :obj:`str`
14
+ - A :obj:`Tuple` of :obj:`str`
15
+ """
16
+
17
+ TextEncodeInput = Union[
18
+ TextInputSequence,
19
+ Tuple[TextInputSequence, TextInputSequence],
20
+ List[TextInputSequence],
21
+ ]
22
+ """Represents a textual input for encoding. Can be either:
23
+
24
+ - A single sequence: :data:`~tokenizers.TextInputSequence`
25
+ - A pair of sequences:
26
+
27
+ - A :obj:`Tuple` of :data:`~tokenizers.TextInputSequence`
28
+ - Or a :obj:`List` of :data:`~tokenizers.TextInputSequence` of size 2
29
+ """
30
+
31
+ PreTokenizedEncodeInput = Union[
32
+ PreTokenizedInputSequence,
33
+ Tuple[PreTokenizedInputSequence, PreTokenizedInputSequence],
34
+ List[PreTokenizedInputSequence],
35
+ ]
36
+ """Represents a pre-tokenized input for encoding. Can be either:
37
+
38
+ - A single sequence: :data:`~tokenizers.PreTokenizedInputSequence`
39
+ - A pair of sequences:
40
+
41
+ - A :obj:`Tuple` of :data:`~tokenizers.PreTokenizedInputSequence`
42
+ - Or a :obj:`List` of :data:`~tokenizers.PreTokenizedInputSequence` of size 2
43
+ """
44
+
45
+ InputSequence = Union[TextInputSequence, PreTokenizedInputSequence]
46
+ """Represents all the possible types of input sequences for encoding. Can be:
47
+
48
+ - When ``is_pretokenized=False``: :data:`~TextInputSequence`
49
+ - When ``is_pretokenized=True``: :data:`~PreTokenizedInputSequence`
50
+ """
51
+
52
+ EncodeInput = Union[TextEncodeInput, PreTokenizedEncodeInput]
53
+ """Represents all the possible types of input for encoding. Can be:
54
+
55
+ - When ``is_pretokenized=False``: :data:`~TextEncodeInput`
56
+ - When ``is_pretokenized=True``: :data:`~PreTokenizedEncodeInput`
57
+ """
58
+
59
+
60
+ class OffsetReferential(Enum):
61
+ ORIGINAL = "original"
62
+ NORMALIZED = "normalized"
63
+
64
+
65
+ class OffsetType(Enum):
66
+ BYTE = "byte"
67
+ CHAR = "char"
68
+
69
+
70
+ class SplitDelimiterBehavior(Enum):
71
+ REMOVED = "removed"
72
+ ISOLATED = "isolated"
73
+ MERGED_WITH_PREVIOUS = "merged_with_previous"
74
+ MERGED_WITH_NEXT = "merged_with_next"
75
+ CONTIGUOUS = "contiguous"
76
+
77
+
78
+ from .tokenizers import (
79
+ AddedToken,
80
+ Encoding,
81
+ NormalizedString,
82
+ PreTokenizedString,
83
+ Regex,
84
+ Token,
85
+ Tokenizer,
86
+ decoders,
87
+ models,
88
+ normalizers,
89
+ pre_tokenizers,
90
+ processors,
91
+ trainers,
92
+ __version__,
93
+ )
94
+ from .implementations import (
95
+ BertWordPieceTokenizer,
96
+ ByteLevelBPETokenizer,
97
+ CharBPETokenizer,
98
+ SentencePieceBPETokenizer,
99
+ SentencePieceUnigramTokenizer,
100
+ )
llmeval-env/lib/python3.10/site-packages/tokenizers/__init__.pyi ADDED
@@ -0,0 +1,1200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated content DO NOT EDIT
2
+ class AddedToken:
3
+ """
4
+ Represents a token that can be be added to a :class:`~tokenizers.Tokenizer`.
5
+ It can have special options that defines the way it should behave.
6
+
7
+ Args:
8
+ content (:obj:`str`): The content of the token
9
+
10
+ single_word (:obj:`bool`, defaults to :obj:`False`):
11
+ Defines whether this token should only match single words. If :obj:`True`, this
12
+ token will never match inside of a word. For example the token ``ing`` would match
13
+ on ``tokenizing`` if this option is :obj:`False`, but not if it is :obj:`True`.
14
+ The notion of "`inside of a word`" is defined by the word boundaries pattern in
15
+ regular expressions (ie. the token should start and end with word boundaries).
16
+
17
+ lstrip (:obj:`bool`, defaults to :obj:`False`):
18
+ Defines whether this token should strip all potential whitespaces on its left side.
19
+ If :obj:`True`, this token will greedily match any whitespace on its left. For
20
+ example if we try to match the token ``[MASK]`` with ``lstrip=True``, in the text
21
+ ``"I saw a [MASK]"``, we would match on ``" [MASK]"``. (Note the space on the left).
22
+
23
+ rstrip (:obj:`bool`, defaults to :obj:`False`):
24
+ Defines whether this token should strip all potential whitespaces on its right
25
+ side. If :obj:`True`, this token will greedily match any whitespace on its right.
26
+ It works just like :obj:`lstrip` but on the right.
27
+
28
+ normalized (:obj:`bool`, defaults to :obj:`True` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`):
29
+ Defines whether this token should match against the normalized version of the input
30
+ text. For example, with the added token ``"yesterday"``, and a normalizer in charge of
31
+ lowercasing the text, the token could be extract from the input ``"I saw a lion
32
+ Yesterday"``.
33
+ special (:obj:`bool`, defaults to :obj:`False` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`):
34
+ Defines whether this token should be skipped when decoding.
35
+
36
+ """
37
+ def __init__(self, content, single_word=False, lstrip=False, rstrip=False, normalized=True, special=False):
38
+ pass
39
+
40
+ @property
41
+ def content(self):
42
+ """
43
+ Get the content of this :obj:`AddedToken`
44
+ """
45
+ pass
46
+
47
+ @property
48
+ def lstrip(self):
49
+ """
50
+ Get the value of the :obj:`lstrip` option
51
+ """
52
+ pass
53
+
54
+ @property
55
+ def normalized(self):
56
+ """
57
+ Get the value of the :obj:`normalized` option
58
+ """
59
+ pass
60
+
61
+ @property
62
+ def rstrip(self):
63
+ """
64
+ Get the value of the :obj:`rstrip` option
65
+ """
66
+ pass
67
+
68
+ @property
69
+ def single_word(self):
70
+ """
71
+ Get the value of the :obj:`single_word` option
72
+ """
73
+ pass
74
+
75
+ @property
76
+ def special(self):
77
+ """
78
+ Get the value of the :obj:`special` option
79
+ """
80
+ pass
81
+
82
+ class Encoding:
83
+ """
84
+ The :class:`~tokenizers.Encoding` represents the output of a :class:`~tokenizers.Tokenizer`.
85
+ """
86
+ @property
87
+ def attention_mask(self):
88
+ """
89
+ The attention mask
90
+
91
+ This indicates to the LM which tokens should be attended to, and which should not.
92
+ This is especially important when batching sequences, where we need to applying
93
+ padding.
94
+
95
+ Returns:
96
+ :obj:`List[int]`: The attention mask
97
+ """
98
+ pass
99
+
100
+ def char_to_token(self, char_pos, sequence_index=0):
101
+ """
102
+ Get the token that contains the char at the given position in the input sequence.
103
+
104
+ Args:
105
+ char_pos (:obj:`int`):
106
+ The position of a char in the input string
107
+ sequence_index (:obj:`int`, defaults to :obj:`0`):
108
+ The index of the sequence that contains the target char
109
+
110
+ Returns:
111
+ :obj:`int`: The index of the token that contains this char in the encoded sequence
112
+ """
113
+ pass
114
+
115
+ def char_to_word(self, char_pos, sequence_index=0):
116
+ """
117
+ Get the word that contains the char at the given position in the input sequence.
118
+
119
+ Args:
120
+ char_pos (:obj:`int`):
121
+ The position of a char in the input string
122
+ sequence_index (:obj:`int`, defaults to :obj:`0`):
123
+ The index of the sequence that contains the target char
124
+
125
+ Returns:
126
+ :obj:`int`: The index of the word that contains this char in the input sequence
127
+ """
128
+ pass
129
+
130
+ @property
131
+ def ids(self):
132
+ """
133
+ The generated IDs
134
+
135
+ The IDs are the main input to a Language Model. They are the token indices,
136
+ the numerical representations that a LM understands.
137
+
138
+ Returns:
139
+ :obj:`List[int]`: The list of IDs
140
+ """
141
+ pass
142
+
143
+ @staticmethod
144
+ def merge(encodings, growing_offsets=True):
145
+ """
146
+ Merge the list of encodings into one final :class:`~tokenizers.Encoding`
147
+
148
+ Args:
149
+ encodings (A :obj:`List` of :class:`~tokenizers.Encoding`):
150
+ The list of encodings that should be merged in one
151
+
152
+ growing_offsets (:obj:`bool`, defaults to :obj:`True`):
153
+ Whether the offsets should accumulate while merging
154
+
155
+ Returns:
156
+ :class:`~tokenizers.Encoding`: The resulting Encoding
157
+ """
158
+ pass
159
+
160
+ @property
161
+ def n_sequences(self):
162
+ """
163
+ The number of sequences represented
164
+
165
+ Returns:
166
+ :obj:`int`: The number of sequences in this :class:`~tokenizers.Encoding`
167
+ """
168
+ pass
169
+
170
+ @property
171
+ def offsets(self):
172
+ """
173
+ The offsets associated to each token
174
+
175
+ These offsets let's you slice the input string, and thus retrieve the original
176
+ part that led to producing the corresponding token.
177
+
178
+ Returns:
179
+ A :obj:`List` of :obj:`Tuple[int, int]`: The list of offsets
180
+ """
181
+ pass
182
+
183
+ @property
184
+ def overflowing(self):
185
+ """
186
+ A :obj:`List` of overflowing :class:`~tokenizers.Encoding`
187
+
188
+ When using truncation, the :class:`~tokenizers.Tokenizer` takes care of splitting
189
+ the output into as many pieces as required to match the specified maximum length.
190
+ This field lets you retrieve all the subsequent pieces.
191
+
192
+ When you use pairs of sequences, the overflowing pieces will contain enough
193
+ variations to cover all the possible combinations, while respecting the provided
194
+ maximum length.
195
+ """
196
+ pass
197
+
198
+ def pad(self, length, direction="right", pad_id=0, pad_type_id=0, pad_token="[PAD]"):
199
+ """
200
+ Pad the :class:`~tokenizers.Encoding` at the given length
201
+
202
+ Args:
203
+ length (:obj:`int`):
204
+ The desired length
205
+
206
+ direction: (:obj:`str`, defaults to :obj:`right`):
207
+ The expected padding direction. Can be either :obj:`right` or :obj:`left`
208
+
209
+ pad_id (:obj:`int`, defaults to :obj:`0`):
210
+ The ID corresponding to the padding token
211
+
212
+ pad_type_id (:obj:`int`, defaults to :obj:`0`):
213
+ The type ID corresponding to the padding token
214
+
215
+ pad_token (:obj:`str`, defaults to `[PAD]`):
216
+ The pad token to use
217
+ """
218
+ pass
219
+
220
+ @property
221
+ def sequence_ids(self):
222
+ """
223
+ The generated sequence indices.
224
+
225
+ They represent the index of the input sequence associated to each token.
226
+ The sequence id can be None if the token is not related to any input sequence,
227
+ like for example with special tokens.
228
+
229
+ Returns:
230
+ A :obj:`List` of :obj:`Optional[int]`: A list of optional sequence index.
231
+ """
232
+ pass
233
+
234
+ def set_sequence_id(self, sequence_id):
235
+ """
236
+ Set the given sequence index
237
+
238
+ Set the given sequence index for the whole range of tokens contained in this
239
+ :class:`~tokenizers.Encoding`.
240
+ """
241
+ pass
242
+
243
+ @property
244
+ def special_tokens_mask(self):
245
+ """
246
+ The special token mask
247
+
248
+ This indicates which tokens are special tokens, and which are not.
249
+
250
+ Returns:
251
+ :obj:`List[int]`: The special tokens mask
252
+ """
253
+ pass
254
+
255
+ def token_to_chars(self, token_index):
256
+ """
257
+ Get the offsets of the token at the given index.
258
+
259
+ The returned offsets are related to the input sequence that contains the
260
+ token. In order to determine in which input sequence it belongs, you
261
+ must call :meth:`~tokenizers.Encoding.token_to_sequence()`.
262
+
263
+ Args:
264
+ token_index (:obj:`int`):
265
+ The index of a token in the encoded sequence.
266
+
267
+ Returns:
268
+ :obj:`Tuple[int, int]`: The token offsets :obj:`(first, last + 1)`
269
+ """
270
+ pass
271
+
272
+ def token_to_sequence(self, token_index):
273
+ """
274
+ Get the index of the sequence represented by the given token.
275
+
276
+ In the general use case, this method returns :obj:`0` for a single sequence or
277
+ the first sequence of a pair, and :obj:`1` for the second sequence of a pair
278
+
279
+ Args:
280
+ token_index (:obj:`int`):
281
+ The index of a token in the encoded sequence.
282
+
283
+ Returns:
284
+ :obj:`int`: The sequence id of the given token
285
+ """
286
+ pass
287
+
288
+ def token_to_word(self, token_index):
289
+ """
290
+ Get the index of the word that contains the token in one of the input sequences.
291
+
292
+ The returned word index is related to the input sequence that contains
293
+ the token. In order to determine in which input sequence it belongs, you
294
+ must call :meth:`~tokenizers.Encoding.token_to_sequence()`.
295
+
296
+ Args:
297
+ token_index (:obj:`int`):
298
+ The index of a token in the encoded sequence.
299
+
300
+ Returns:
301
+ :obj:`int`: The index of the word in the relevant input sequence.
302
+ """
303
+ pass
304
+
305
+ @property
306
+ def tokens(self):
307
+ """
308
+ The generated tokens
309
+
310
+ They are the string representation of the IDs.
311
+
312
+ Returns:
313
+ :obj:`List[str]`: The list of tokens
314
+ """
315
+ pass
316
+
317
+ def truncate(self, max_length, stride=0, direction="right"):
318
+ """
319
+ Truncate the :class:`~tokenizers.Encoding` at the given length
320
+
321
+ If this :class:`~tokenizers.Encoding` represents multiple sequences, when truncating
322
+ this information is lost. It will be considered as representing a single sequence.
323
+
324
+ Args:
325
+ max_length (:obj:`int`):
326
+ The desired length
327
+
328
+ stride (:obj:`int`, defaults to :obj:`0`):
329
+ The length of previous content to be included in each overflowing piece
330
+
331
+ direction (:obj:`str`, defaults to :obj:`right`):
332
+ Truncate direction
333
+ """
334
+ pass
335
+
336
+ @property
337
+ def type_ids(self):
338
+ """
339
+ The generated type IDs
340
+
341
+ Generally used for tasks like sequence classification or question answering,
342
+ these tokens let the LM know which input sequence corresponds to each tokens.
343
+
344
+ Returns:
345
+ :obj:`List[int]`: The list of type ids
346
+ """
347
+ pass
348
+
349
+ @property
350
+ def word_ids(self):
351
+ """
352
+ The generated word indices.
353
+
354
+ They represent the index of the word associated to each token.
355
+ When the input is pre-tokenized, they correspond to the ID of the given input label,
356
+ otherwise they correspond to the words indices as defined by the
357
+ :class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used.
358
+
359
+ For special tokens and such (any token that was generated from something that was
360
+ not part of the input), the output is :obj:`None`
361
+
362
+ Returns:
363
+ A :obj:`List` of :obj:`Optional[int]`: A list of optional word index.
364
+ """
365
+ pass
366
+
367
+ def word_to_chars(self, word_index, sequence_index=0):
368
+ """
369
+ Get the offsets of the word at the given index in one of the input sequences.
370
+
371
+ Args:
372
+ word_index (:obj:`int`):
373
+ The index of a word in one of the input sequences.
374
+ sequence_index (:obj:`int`, defaults to :obj:`0`):
375
+ The index of the sequence that contains the target word
376
+
377
+ Returns:
378
+ :obj:`Tuple[int, int]`: The range of characters (span) :obj:`(first, last + 1)`
379
+ """
380
+ pass
381
+
382
+ def word_to_tokens(self, word_index, sequence_index=0):
383
+ """
384
+ Get the encoded tokens corresponding to the word at the given index
385
+ in one of the input sequences.
386
+
387
+ Args:
388
+ word_index (:obj:`int`):
389
+ The index of a word in one of the input sequences.
390
+ sequence_index (:obj:`int`, defaults to :obj:`0`):
391
+ The index of the sequence that contains the target word
392
+
393
+ Returns:
394
+ :obj:`Tuple[int, int]`: The range of tokens: :obj:`(first, last + 1)`
395
+ """
396
+ pass
397
+
398
+ @property
399
+ def words(self):
400
+ """
401
+ The generated word indices.
402
+
403
+ .. warning::
404
+ This is deprecated and will be removed in a future version.
405
+ Please use :obj:`~tokenizers.Encoding.word_ids` instead.
406
+
407
+ They represent the index of the word associated to each token.
408
+ When the input is pre-tokenized, they correspond to the ID of the given input label,
409
+ otherwise they correspond to the words indices as defined by the
410
+ :class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used.
411
+
412
+ For special tokens and such (any token that was generated from something that was
413
+ not part of the input), the output is :obj:`None`
414
+
415
+ Returns:
416
+ A :obj:`List` of :obj:`Optional[int]`: A list of optional word index.
417
+ """
418
+ pass
419
+
420
+ class NormalizedString:
421
+ """
422
+ NormalizedString
423
+
424
+ A NormalizedString takes care of modifying an "original" string, to obtain a "normalized" one.
425
+ While making all the requested modifications, it keeps track of the alignment information
426
+ between the two versions of the string.
427
+
428
+ Args:
429
+ sequence: str:
430
+ The string sequence used to initialize this NormalizedString
431
+ """
432
+ def append(self, s):
433
+ """
434
+ Append the given sequence to the string
435
+ """
436
+ pass
437
+
438
+ def clear(self):
439
+ """
440
+ Clears the string
441
+ """
442
+ pass
443
+
444
+ def filter(self, func):
445
+ """
446
+ Filter each character of the string using the given func
447
+ """
448
+ pass
449
+
450
+ def for_each(self, func):
451
+ """
452
+ Calls the given function for each character of the string
453
+ """
454
+ pass
455
+
456
+ def lowercase(self):
457
+ """
458
+ Lowercase the string
459
+ """
460
+ pass
461
+
462
+ def lstrip(self):
463
+ """
464
+ Strip the left of the string
465
+ """
466
+ pass
467
+
468
+ def map(self, func):
469
+ """
470
+ Calls the given function for each character of the string
471
+
472
+ Replaces each character of the string using the returned value. Each
473
+ returned value **must** be a str of length 1 (ie a character).
474
+ """
475
+ pass
476
+
477
+ def nfc(self):
478
+ """
479
+ Runs the NFC normalization
480
+ """
481
+ pass
482
+
483
+ def nfd(self):
484
+ """
485
+ Runs the NFD normalization
486
+ """
487
+ pass
488
+
489
+ def nfkc(self):
490
+ """
491
+ Runs the NFKC normalization
492
+ """
493
+ pass
494
+
495
+ def nfkd(self):
496
+ """
497
+ Runs the NFKD normalization
498
+ """
499
+ pass
500
+
501
+ @property
502
+ def normalized(self):
503
+ """
504
+ The normalized part of the string
505
+ """
506
+ pass
507
+
508
+ def prepend(self, s):
509
+ """
510
+ Prepend the given sequence to the string
511
+ """
512
+ pass
513
+
514
+ def replace(self, pattern, content):
515
+ """
516
+ Replace the content of the given pattern with the provided content
517
+
518
+ Args:
519
+ pattern: Pattern:
520
+ A pattern used to match the string. Usually a string or a Regex
521
+
522
+ content: str:
523
+ The content to be used as replacement
524
+ """
525
+ pass
526
+
527
+ def rstrip(self):
528
+ """
529
+ Strip the right of the string
530
+ """
531
+ pass
532
+
533
+ def slice(self, range):
534
+ """
535
+ Slice the string using the given range
536
+ """
537
+ pass
538
+
539
+ def split(self, pattern, behavior):
540
+ """
541
+ Split the NormalizedString using the given pattern and the specified behavior
542
+
543
+ Args:
544
+ pattern: Pattern:
545
+ A pattern used to split the string. Usually a string or a regex built with `tokenizers.Regex`
546
+
547
+ behavior: SplitDelimiterBehavior:
548
+ The behavior to use when splitting.
549
+ Choices: "removed", "isolated", "merged_with_previous", "merged_with_next",
550
+ "contiguous"
551
+
552
+ Returns:
553
+ A list of NormalizedString, representing each split
554
+ """
555
+ pass
556
+
557
+ def strip(self):
558
+ """
559
+ Strip both ends of the string
560
+ """
561
+ pass
562
+
563
+ def uppercase(self):
564
+ """
565
+ Uppercase the string
566
+ """
567
+ pass
568
+
569
+ class PreTokenizedString:
570
+ """
571
+ PreTokenizedString
572
+
573
+ Wrapper over a string, that provides a way to normalize, pre-tokenize, tokenize the
574
+ underlying string, while keeping track of the alignment information (offsets).
575
+
576
+ The PreTokenizedString manages what we call `splits`. Each split represents a substring
577
+ which is a subpart of the original string, with the relevant offsets and tokens.
578
+
579
+ When calling one of the methods used to modify the PreTokenizedString (namely one of
580
+ `split`, `normalize` or `tokenize), only the `splits` that don't have any associated
581
+ tokens will get modified.
582
+
583
+ Args:
584
+ sequence: str:
585
+ The string sequence used to initialize this PreTokenizedString
586
+ """
587
+ def __init__(self, sequence):
588
+ pass
589
+
590
+ def get_splits(self, offset_referential="original", offset_type="char"):
591
+ """
592
+ Get the splits currently managed by the PreTokenizedString
593
+
594
+ Args:
595
+ offset_referential: :obj:`str`
596
+ Whether the returned splits should have offsets expressed relative
597
+ to the original string, or the normalized one. choices: "original", "normalized".
598
+
599
+ offset_type: :obj:`str`
600
+ Whether the returned splits should have offsets expressed in bytes or chars.
601
+ When slicing an str, we usually want to use chars, which is the default value.
602
+ Now in some cases it might be interesting to get these offsets expressed in bytes,
603
+ so it is possible to change this here.
604
+ choices: "char", "bytes"
605
+
606
+ Returns
607
+ A list of splits
608
+ """
609
+ pass
610
+
611
+ def normalize(self, func):
612
+ """
613
+ Normalize each split of the `PreTokenizedString` using the given `func`
614
+
615
+ Args:
616
+ func: Callable[[NormalizedString], None]:
617
+ The function used to normalize each underlying split. This function
618
+ does not need to return anything, just calling the methods on the provided
619
+ NormalizedString allow its modification.
620
+ """
621
+ pass
622
+
623
+ def split(self, func):
624
+ """
625
+ Split the PreTokenizedString using the given `func`
626
+
627
+ Args:
628
+ func: Callable[[index, NormalizedString], List[NormalizedString]]:
629
+ The function used to split each underlying split.
630
+ It is expected to return a list of `NormalizedString`, that represent the new
631
+ splits. If the given `NormalizedString` does not need any splitting, we can
632
+ just return it directly.
633
+ In order for the offsets to be tracked accurately, any returned `NormalizedString`
634
+ should come from calling either `.split` or `.slice` on the received one.
635
+ """
636
+ pass
637
+
638
+ def to_encoding(self, type_id=0, word_idx=None):
639
+ """
640
+ Return an Encoding generated from this PreTokenizedString
641
+
642
+ Args:
643
+ type_id: int = 0:
644
+ The type_id to be used on the generated Encoding.
645
+
646
+ word_idx: Optional[int] = None:
647
+ An optional word index to be used for each token of this Encoding. If provided,
648
+ all the word indices in the generated Encoding will use this value, instead
649
+ of the one automatically tracked during pre-tokenization.
650
+
651
+ Returns:
652
+ An Encoding
653
+ """
654
+ pass
655
+
656
+ def tokenize(self, func):
657
+ """
658
+ Tokenize each split of the `PreTokenizedString` using the given `func`
659
+
660
+ Args:
661
+ func: Callable[[str], List[Token]]:
662
+ The function used to tokenize each underlying split. This function must return
663
+ a list of Token generated from the input str.
664
+ """
665
+ pass
666
+
667
+ class Regex:
668
+ """
669
+ Instantiate a new Regex with the given pattern
670
+ """
671
+ def __init__(self, pattern):
672
+ pass
673
+
674
+ class Token:
675
+ pass
676
+
677
+ class Tokenizer:
678
+ """
679
+ A :obj:`Tokenizer` works as a pipeline. It processes some raw text as input
680
+ and outputs an :class:`~tokenizers.Encoding`.
681
+
682
+ Args:
683
+ model (:class:`~tokenizers.models.Model`):
684
+ The core algorithm that this :obj:`Tokenizer` should be using.
685
+
686
+ """
687
+ def __init__(self, model):
688
+ pass
689
+
690
+ def add_special_tokens(self, tokens):
691
+ """
692
+ Add the given special tokens to the Tokenizer.
693
+
694
+ If these tokens are already part of the vocabulary, it just let the Tokenizer know about
695
+ them. If they don't exist, the Tokenizer creates them, giving them a new id.
696
+
697
+ These special tokens will never be processed by the model (ie won't be split into
698
+ multiple tokens), and they can be removed from the output when decoding.
699
+
700
+ Args:
701
+ tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`):
702
+ The list of special tokens we want to add to the vocabulary. Each token can either
703
+ be a string or an instance of :class:`~tokenizers.AddedToken` for more
704
+ customization.
705
+
706
+ Returns:
707
+ :obj:`int`: The number of tokens that were created in the vocabulary
708
+ """
709
+ pass
710
+
711
+ def add_tokens(self, tokens):
712
+ """
713
+ Add the given tokens to the vocabulary
714
+
715
+ The given tokens are added only if they don't already exist in the vocabulary.
716
+ Each token then gets a new attributed id.
717
+
718
+ Args:
719
+ tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`):
720
+ The list of tokens we want to add to the vocabulary. Each token can be either a
721
+ string or an instance of :class:`~tokenizers.AddedToken` for more customization.
722
+
723
+ Returns:
724
+ :obj:`int`: The number of tokens that were created in the vocabulary
725
+ """
726
+ pass
727
+
728
+ def decode(self, ids, skip_special_tokens=True):
729
+ """
730
+ Decode the given list of ids back to a string
731
+
732
+ This is used to decode anything coming back from a Language Model
733
+
734
+ Args:
735
+ ids (A :obj:`List/Tuple` of :obj:`int`):
736
+ The list of ids that we want to decode
737
+
738
+ skip_special_tokens (:obj:`bool`, defaults to :obj:`True`):
739
+ Whether the special tokens should be removed from the decoded string
740
+
741
+ Returns:
742
+ :obj:`str`: The decoded string
743
+ """
744
+ pass
745
+
746
+ def decode_batch(self, sequences, skip_special_tokens=True):
747
+ """
748
+ Decode a batch of ids back to their corresponding string
749
+
750
+ Args:
751
+ sequences (:obj:`List` of :obj:`List[int]`):
752
+ The batch of sequences we want to decode
753
+
754
+ skip_special_tokens (:obj:`bool`, defaults to :obj:`True`):
755
+ Whether the special tokens should be removed from the decoded strings
756
+
757
+ Returns:
758
+ :obj:`List[str]`: A list of decoded strings
759
+ """
760
+ pass
761
+
762
+ @property
763
+ def decoder(self):
764
+ """
765
+ The `optional` :class:`~tokenizers.decoders.Decoder` in use by the Tokenizer
766
+ """
767
+ pass
768
+
769
+ def enable_padding(
770
+ self, direction="right", pad_id=0, pad_type_id=0, pad_token="[PAD]", length=None, pad_to_multiple_of=None
771
+ ):
772
+ """
773
+ Enable the padding
774
+
775
+ Args:
776
+ direction (:obj:`str`, `optional`, defaults to :obj:`right`):
777
+ The direction in which to pad. Can be either ``right`` or ``left``
778
+
779
+ pad_to_multiple_of (:obj:`int`, `optional`):
780
+ If specified, the padding length should always snap to the next multiple of the
781
+ given value. For example if we were going to pad witha length of 250 but
782
+ ``pad_to_multiple_of=8`` then we will pad to 256.
783
+
784
+ pad_id (:obj:`int`, defaults to 0):
785
+ The id to be used when padding
786
+
787
+ pad_type_id (:obj:`int`, defaults to 0):
788
+ The type id to be used when padding
789
+
790
+ pad_token (:obj:`str`, defaults to :obj:`[PAD]`):
791
+ The pad token to be used when padding
792
+
793
+ length (:obj:`int`, `optional`):
794
+ If specified, the length at which to pad. If not specified we pad using the size of
795
+ the longest sequence in a batch.
796
+ """
797
+ pass
798
+
799
+ def enable_truncation(self, max_length, stride=0, strategy="longest_first", direction="right"):
800
+ """
801
+ Enable truncation
802
+
803
+ Args:
804
+ max_length (:obj:`int`):
805
+ The max length at which to truncate
806
+
807
+ stride (:obj:`int`, `optional`):
808
+ The length of the previous first sequence to be included in the overflowing
809
+ sequence
810
+
811
+ strategy (:obj:`str`, `optional`, defaults to :obj:`longest_first`):
812
+ The strategy used to truncation. Can be one of ``longest_first``, ``only_first`` or
813
+ ``only_second``.
814
+
815
+ direction (:obj:`str`, defaults to :obj:`right`):
816
+ Truncate direction
817
+ """
818
+ pass
819
+
820
+ def encode(self, sequence, pair=None, is_pretokenized=False, add_special_tokens=True):
821
+ """
822
+ Encode the given sequence and pair. This method can process raw text sequences
823
+ as well as already pre-tokenized sequences.
824
+
825
+ Example:
826
+ Here are some examples of the inputs that are accepted::
827
+
828
+ encode("A single sequence")`
829
+ encode("A sequence", "And its pair")`
830
+ encode([ "A", "pre", "tokenized", "sequence" ], is_pretokenized=True)`
831
+ encode(
832
+ [ "A", "pre", "tokenized", "sequence" ], [ "And", "its", "pair" ],
833
+ is_pretokenized=True
834
+ )
835
+
836
+ Args:
837
+ sequence (:obj:`~tokenizers.InputSequence`):
838
+ The main input sequence we want to encode. This sequence can be either raw
839
+ text or pre-tokenized, according to the ``is_pretokenized`` argument:
840
+
841
+ - If ``is_pretokenized=False``: :class:`~tokenizers.TextInputSequence`
842
+ - If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedInputSequence`
843
+
844
+ pair (:obj:`~tokenizers.InputSequence`, `optional`):
845
+ An optional input sequence. The expected format is the same that for ``sequence``.
846
+
847
+ is_pretokenized (:obj:`bool`, defaults to :obj:`False`):
848
+ Whether the input is already pre-tokenized
849
+
850
+ add_special_tokens (:obj:`bool`, defaults to :obj:`True`):
851
+ Whether to add the special tokens
852
+
853
+ Returns:
854
+ :class:`~tokenizers.Encoding`: The encoded result
855
+
856
+ """
857
+ pass
858
+
859
+ def encode_batch(self, input, is_pretokenized=False, add_special_tokens=True):
860
+ """
861
+ Encode the given batch of inputs. This method accept both raw text sequences
862
+ as well as already pre-tokenized sequences.
863
+
864
+ Example:
865
+ Here are some examples of the inputs that are accepted::
866
+
867
+ encode_batch([
868
+ "A single sequence",
869
+ ("A tuple with a sequence", "And its pair"),
870
+ [ "A", "pre", "tokenized", "sequence" ],
871
+ ([ "A", "pre", "tokenized", "sequence" ], "And its pair")
872
+ ])
873
+
874
+ Args:
875
+ input (A :obj:`List`/:obj:`Tuple` of :obj:`~tokenizers.EncodeInput`):
876
+ A list of single sequences or pair sequences to encode. Each sequence
877
+ can be either raw text or pre-tokenized, according to the ``is_pretokenized``
878
+ argument:
879
+
880
+ - If ``is_pretokenized=False``: :class:`~tokenizers.TextEncodeInput`
881
+ - If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedEncodeInput`
882
+
883
+ is_pretokenized (:obj:`bool`, defaults to :obj:`False`):
884
+ Whether the input is already pre-tokenized
885
+
886
+ add_special_tokens (:obj:`bool`, defaults to :obj:`True`):
887
+ Whether to add the special tokens
888
+
889
+ Returns:
890
+ A :obj:`List` of :class:`~tokenizers.Encoding`: The encoded batch
891
+
892
+ """
893
+ pass
894
+
895
+ @property
896
+ def encode_special_tokens(self):
897
+ """
898
+ Modifies the tokenizer in order to use or not the special tokens
899
+ during encoding.
900
+
901
+ Args:
902
+ value (:obj:`bool`):
903
+ Whether to use the special tokens or not
904
+
905
+ """
906
+ pass
907
+
908
+ @staticmethod
909
+ def from_buffer(buffer):
910
+ """
911
+ Instantiate a new :class:`~tokenizers.Tokenizer` from the given buffer.
912
+
913
+ Args:
914
+ buffer (:obj:`bytes`):
915
+ A buffer containing a previously serialized :class:`~tokenizers.Tokenizer`
916
+
917
+ Returns:
918
+ :class:`~tokenizers.Tokenizer`: The new tokenizer
919
+ """
920
+ pass
921
+
922
+ @staticmethod
923
+ def from_file(path):
924
+ """
925
+ Instantiate a new :class:`~tokenizers.Tokenizer` from the file at the given path.
926
+
927
+ Args:
928
+ path (:obj:`str`):
929
+ A path to a local JSON file representing a previously serialized
930
+ :class:`~tokenizers.Tokenizer`
931
+
932
+ Returns:
933
+ :class:`~tokenizers.Tokenizer`: The new tokenizer
934
+ """
935
+ pass
936
+
937
+ @staticmethod
938
+ def from_pretrained(identifier, revision="main", auth_token=None):
939
+ """
940
+ Instantiate a new :class:`~tokenizers.Tokenizer` from an existing file on the
941
+ Hugging Face Hub.
942
+
943
+ Args:
944
+ identifier (:obj:`str`):
945
+ The identifier of a Model on the Hugging Face Hub, that contains
946
+ a tokenizer.json file
947
+ revision (:obj:`str`, defaults to `main`):
948
+ A branch or commit id
949
+ auth_token (:obj:`str`, `optional`, defaults to `None`):
950
+ An optional auth token used to access private repositories on the
951
+ Hugging Face Hub
952
+
953
+ Returns:
954
+ :class:`~tokenizers.Tokenizer`: The new tokenizer
955
+ """
956
+ pass
957
+
958
+ @staticmethod
959
+ def from_str(json):
960
+ """
961
+ Instantiate a new :class:`~tokenizers.Tokenizer` from the given JSON string.
962
+
963
+ Args:
964
+ json (:obj:`str`):
965
+ A valid JSON string representing a previously serialized
966
+ :class:`~tokenizers.Tokenizer`
967
+
968
+ Returns:
969
+ :class:`~tokenizers.Tokenizer`: The new tokenizer
970
+ """
971
+ pass
972
+
973
+ def get_added_tokens_decoder(self):
974
+ """
975
+ Get the underlying vocabulary
976
+
977
+ Returns:
978
+ :obj:`Dict[int, AddedToken]`: The vocabulary
979
+ """
980
+ pass
981
+
982
+ def get_vocab(self, with_added_tokens=True):
983
+ """
984
+ Get the underlying vocabulary
985
+
986
+ Args:
987
+ with_added_tokens (:obj:`bool`, defaults to :obj:`True`):
988
+ Whether to include the added tokens
989
+
990
+ Returns:
991
+ :obj:`Dict[str, int]`: The vocabulary
992
+ """
993
+ pass
994
+
995
+ def get_vocab_size(self, with_added_tokens=True):
996
+ """
997
+ Get the size of the underlying vocabulary
998
+
999
+ Args:
1000
+ with_added_tokens (:obj:`bool`, defaults to :obj:`True`):
1001
+ Whether to include the added tokens
1002
+
1003
+ Returns:
1004
+ :obj:`int`: The size of the vocabulary
1005
+ """
1006
+ pass
1007
+
1008
+ def id_to_token(self, id):
1009
+ """
1010
+ Convert the given id to its corresponding token if it exists
1011
+
1012
+ Args:
1013
+ id (:obj:`int`):
1014
+ The id to convert
1015
+
1016
+ Returns:
1017
+ :obj:`Optional[str]`: An optional token, :obj:`None` if out of vocabulary
1018
+ """
1019
+ pass
1020
+
1021
+ @property
1022
+ def model(self):
1023
+ """
1024
+ The :class:`~tokenizers.models.Model` in use by the Tokenizer
1025
+ """
1026
+ pass
1027
+
1028
+ def no_padding(self):
1029
+ """
1030
+ Disable padding
1031
+ """
1032
+ pass
1033
+
1034
+ def no_truncation(self):
1035
+ """
1036
+ Disable truncation
1037
+ """
1038
+ pass
1039
+
1040
+ @property
1041
+ def normalizer(self):
1042
+ """
1043
+ The `optional` :class:`~tokenizers.normalizers.Normalizer` in use by the Tokenizer
1044
+ """
1045
+ pass
1046
+
1047
+ def num_special_tokens_to_add(self, is_pair):
1048
+ """
1049
+ Return the number of special tokens that would be added for single/pair sentences.
1050
+ :param is_pair: Boolean indicating if the input would be a single sentence or a pair
1051
+ :return:
1052
+ """
1053
+ pass
1054
+
1055
+ @property
1056
+ def padding(self):
1057
+ """
1058
+ Get the current padding parameters
1059
+
1060
+ `Cannot be set, use` :meth:`~tokenizers.Tokenizer.enable_padding` `instead`
1061
+
1062
+ Returns:
1063
+ (:obj:`dict`, `optional`):
1064
+ A dict with the current padding parameters if padding is enabled
1065
+ """
1066
+ pass
1067
+
1068
+ def post_process(self, encoding, pair=None, add_special_tokens=True):
1069
+ """
1070
+ Apply all the post-processing steps to the given encodings.
1071
+
1072
+ The various steps are:
1073
+
1074
+ 1. Truncate according to the set truncation params (provided with
1075
+ :meth:`~tokenizers.Tokenizer.enable_truncation`)
1076
+ 2. Apply the :class:`~tokenizers.processors.PostProcessor`
1077
+ 3. Pad according to the set padding params (provided with
1078
+ :meth:`~tokenizers.Tokenizer.enable_padding`)
1079
+
1080
+ Args:
1081
+ encoding (:class:`~tokenizers.Encoding`):
1082
+ The :class:`~tokenizers.Encoding` corresponding to the main sequence.
1083
+
1084
+ pair (:class:`~tokenizers.Encoding`, `optional`):
1085
+ An optional :class:`~tokenizers.Encoding` corresponding to the pair sequence.
1086
+
1087
+ add_special_tokens (:obj:`bool`):
1088
+ Whether to add the special tokens
1089
+
1090
+ Returns:
1091
+ :class:`~tokenizers.Encoding`: The final post-processed encoding
1092
+ """
1093
+ pass
1094
+
1095
+ @property
1096
+ def post_processor(self):
1097
+ """
1098
+ The `optional` :class:`~tokenizers.processors.PostProcessor` in use by the Tokenizer
1099
+ """
1100
+ pass
1101
+
1102
+ @property
1103
+ def pre_tokenizer(self):
1104
+ """
1105
+ The `optional` :class:`~tokenizers.pre_tokenizers.PreTokenizer` in use by the Tokenizer
1106
+ """
1107
+ pass
1108
+
1109
+ def save(self, path, pretty=True):
1110
+ """
1111
+ Save the :class:`~tokenizers.Tokenizer` to the file at the given path.
1112
+
1113
+ Args:
1114
+ path (:obj:`str`):
1115
+ A path to a file in which to save the serialized tokenizer.
1116
+
1117
+ pretty (:obj:`bool`, defaults to :obj:`True`):
1118
+ Whether the JSON file should be pretty formatted.
1119
+ """
1120
+ pass
1121
+
1122
+ def to_str(self, pretty=False):
1123
+ """
1124
+ Gets a serialized string representing this :class:`~tokenizers.Tokenizer`.
1125
+
1126
+ Args:
1127
+ pretty (:obj:`bool`, defaults to :obj:`False`):
1128
+ Whether the JSON string should be pretty formatted.
1129
+
1130
+ Returns:
1131
+ :obj:`str`: A string representing the serialized Tokenizer
1132
+ """
1133
+ pass
1134
+
1135
+ def token_to_id(self, token):
1136
+ """
1137
+ Convert the given token to its corresponding id if it exists
1138
+
1139
+ Args:
1140
+ token (:obj:`str`):
1141
+ The token to convert
1142
+
1143
+ Returns:
1144
+ :obj:`Optional[int]`: An optional id, :obj:`None` if out of vocabulary
1145
+ """
1146
+ pass
1147
+
1148
+ def train(self, files, trainer=None):
1149
+ """
1150
+ Train the Tokenizer using the given files.
1151
+
1152
+ Reads the files line by line, while keeping all the whitespace, even new lines.
1153
+ If you want to train from data store in-memory, you can check
1154
+ :meth:`~tokenizers.Tokenizer.train_from_iterator`
1155
+
1156
+ Args:
1157
+ files (:obj:`List[str]`):
1158
+ A list of path to the files that we should use for training
1159
+
1160
+ trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`):
1161
+ An optional trainer that should be used to train our Model
1162
+ """
1163
+ pass
1164
+
1165
+ def train_from_iterator(self, iterator, trainer=None, length=None):
1166
+ """
1167
+ Train the Tokenizer using the provided iterator.
1168
+
1169
+ You can provide anything that is a Python Iterator
1170
+
1171
+ * A list of sequences :obj:`List[str]`
1172
+ * A generator that yields :obj:`str` or :obj:`List[str]`
1173
+ * A Numpy array of strings
1174
+ * ...
1175
+
1176
+ Args:
1177
+ iterator (:obj:`Iterator`):
1178
+ Any iterator over strings or list of strings
1179
+
1180
+ trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`):
1181
+ An optional trainer that should be used to train our Model
1182
+
1183
+ length (:obj:`int`, `optional`):
1184
+ The total number of sequences in the iterator. This is used to
1185
+ provide meaningful progress tracking
1186
+ """
1187
+ pass
1188
+
1189
+ @property
1190
+ def truncation(self):
1191
+ """
1192
+ Get the currently set truncation parameters
1193
+
1194
+ `Cannot set, use` :meth:`~tokenizers.Tokenizer.enable_truncation` `instead`
1195
+
1196
+ Returns:
1197
+ (:obj:`dict`, `optional`):
1198
+ A dict with the current truncation parameters if truncation is enabled
1199
+ """
1200
+ pass
llmeval-env/lib/python3.10/site-packages/tokenizers/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/tokenizers/decoders/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .. import decoders
2
+
3
+
4
+ Decoder = decoders.Decoder
5
+ ByteLevel = decoders.ByteLevel
6
+ Replace = decoders.Replace
7
+ WordPiece = decoders.WordPiece
8
+ ByteFallback = decoders.ByteFallback
9
+ Fuse = decoders.Fuse
10
+ Strip = decoders.Strip
11
+ Metaspace = decoders.Metaspace
12
+ BPEDecoder = decoders.BPEDecoder
13
+ CTC = decoders.CTC
14
+ Sequence = decoders.Sequence
llmeval-env/lib/python3.10/site-packages/tokenizers/decoders/__init__.pyi ADDED
@@ -0,0 +1,271 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated content DO NOT EDIT
2
+ class Decoder:
3
+ """
4
+ Base class for all decoders
5
+
6
+ This class is not supposed to be instantiated directly. Instead, any implementation of
7
+ a Decoder will return an instance of this class when instantiated.
8
+ """
9
+ def decode(self, tokens):
10
+ """
11
+ Decode the given list of tokens to a final string
12
+
13
+ Args:
14
+ tokens (:obj:`List[str]`):
15
+ The list of tokens to decode
16
+
17
+ Returns:
18
+ :obj:`str`: The decoded string
19
+ """
20
+ pass
21
+
22
+ class BPEDecoder(Decoder):
23
+ """
24
+ BPEDecoder Decoder
25
+
26
+ Args:
27
+ suffix (:obj:`str`, `optional`, defaults to :obj:`</w>`):
28
+ The suffix that was used to caracterize an end-of-word. This suffix will
29
+ be replaced by whitespaces during the decoding
30
+ """
31
+ def __init__(self, suffix="</w>"):
32
+ pass
33
+
34
+ def decode(self, tokens):
35
+ """
36
+ Decode the given list of tokens to a final string
37
+
38
+ Args:
39
+ tokens (:obj:`List[str]`):
40
+ The list of tokens to decode
41
+
42
+ Returns:
43
+ :obj:`str`: The decoded string
44
+ """
45
+ pass
46
+
47
+ class ByteFallback(Decoder):
48
+ """
49
+ ByteFallback Decoder
50
+ ByteFallback is a simple trick which converts tokens looking like `<0x61>`
51
+ to pure bytes, and attempts to make them into a string. If the tokens
52
+ cannot be decoded you will get � instead for each inconvertable byte token
53
+
54
+ """
55
+ def __init__(self):
56
+ pass
57
+
58
+ def decode(self, tokens):
59
+ """
60
+ Decode the given list of tokens to a final string
61
+
62
+ Args:
63
+ tokens (:obj:`List[str]`):
64
+ The list of tokens to decode
65
+
66
+ Returns:
67
+ :obj:`str`: The decoded string
68
+ """
69
+ pass
70
+
71
+ class ByteLevel(Decoder):
72
+ """
73
+ ByteLevel Decoder
74
+
75
+ This decoder is to be used in tandem with the :class:`~tokenizers.pre_tokenizers.ByteLevel`
76
+ :class:`~tokenizers.pre_tokenizers.PreTokenizer`.
77
+ """
78
+ def __init__(self):
79
+ pass
80
+
81
+ def decode(self, tokens):
82
+ """
83
+ Decode the given list of tokens to a final string
84
+
85
+ Args:
86
+ tokens (:obj:`List[str]`):
87
+ The list of tokens to decode
88
+
89
+ Returns:
90
+ :obj:`str`: The decoded string
91
+ """
92
+ pass
93
+
94
+ class CTC(Decoder):
95
+ """
96
+ CTC Decoder
97
+
98
+ Args:
99
+ pad_token (:obj:`str`, `optional`, defaults to :obj:`<pad>`):
100
+ The pad token used by CTC to delimit a new token.
101
+ word_delimiter_token (:obj:`str`, `optional`, defaults to :obj:`|`):
102
+ The word delimiter token. It will be replaced by a <space>
103
+ cleanup (:obj:`bool`, `optional`, defaults to :obj:`True`):
104
+ Whether to cleanup some tokenization artifacts.
105
+ Mainly spaces before punctuation, and some abbreviated english forms.
106
+ """
107
+ def __init__(self, pad_token="<pad>", word_delimiter_token="|", cleanup=True):
108
+ pass
109
+
110
+ def decode(self, tokens):
111
+ """
112
+ Decode the given list of tokens to a final string
113
+
114
+ Args:
115
+ tokens (:obj:`List[str]`):
116
+ The list of tokens to decode
117
+
118
+ Returns:
119
+ :obj:`str`: The decoded string
120
+ """
121
+ pass
122
+
123
+ class Fuse(Decoder):
124
+ """
125
+ Fuse Decoder
126
+ Fuse simply fuses every token into a single string.
127
+ This is the last step of decoding, this decoder exists only if
128
+ there is need to add other decoders *after* the fusion
129
+ """
130
+ def __init__(self):
131
+ pass
132
+
133
+ def decode(self, tokens):
134
+ """
135
+ Decode the given list of tokens to a final string
136
+
137
+ Args:
138
+ tokens (:obj:`List[str]`):
139
+ The list of tokens to decode
140
+
141
+ Returns:
142
+ :obj:`str`: The decoded string
143
+ """
144
+ pass
145
+
146
+ class Metaspace(Decoder):
147
+ """
148
+ Metaspace Decoder
149
+
150
+ Args:
151
+ replacement (:obj:`str`, `optional`, defaults to :obj:`▁`):
152
+ The replacement character. Must be exactly one character. By default we
153
+ use the `▁` (U+2581) meta symbol (Same as in SentencePiece).
154
+
155
+ prepend_scheme (:obj:`str`, `optional`, defaults to :obj:`"always"`):
156
+ Whether to add a space to the first word if there isn't already one. This
157
+ lets us treat `hello` exactly like `say hello`.
158
+ Choices: "always", "never", "first". First means the space is only added on the first
159
+ token (relevant when special tokens are used or other pre_tokenizer are used).
160
+ """
161
+ def __init__(self, replacement="▁", prepend_scheme="always", split=True):
162
+ pass
163
+
164
+ def decode(self, tokens):
165
+ """
166
+ Decode the given list of tokens to a final string
167
+
168
+ Args:
169
+ tokens (:obj:`List[str]`):
170
+ The list of tokens to decode
171
+
172
+ Returns:
173
+ :obj:`str`: The decoded string
174
+ """
175
+ pass
176
+
177
+ class Replace(Decoder):
178
+ """
179
+ Replace Decoder
180
+
181
+ This decoder is to be used in tandem with the :class:`~tokenizers.pre_tokenizers.Replace`
182
+ :class:`~tokenizers.pre_tokenizers.PreTokenizer`.
183
+ """
184
+ def __init__(self, pattern, content):
185
+ pass
186
+
187
+ def decode(self, tokens):
188
+ """
189
+ Decode the given list of tokens to a final string
190
+
191
+ Args:
192
+ tokens (:obj:`List[str]`):
193
+ The list of tokens to decode
194
+
195
+ Returns:
196
+ :obj:`str`: The decoded string
197
+ """
198
+ pass
199
+
200
+ class Sequence(Decoder):
201
+ """
202
+ Sequence Decoder
203
+
204
+ Args:
205
+ decoders (:obj:`List[Decoder]`)
206
+ The decoders that need to be chained
207
+ """
208
+ def __init__(self, decoders):
209
+ pass
210
+
211
+ def decode(self, tokens):
212
+ """
213
+ Decode the given list of tokens to a final string
214
+
215
+ Args:
216
+ tokens (:obj:`List[str]`):
217
+ The list of tokens to decode
218
+
219
+ Returns:
220
+ :obj:`str`: The decoded string
221
+ """
222
+ pass
223
+
224
+ class Strip(Decoder):
225
+ """
226
+ Strip normalizer
227
+ Strips n left characters of each token, or n right characters of each token
228
+ """
229
+ def __init__(self, content, left=0, right=0):
230
+ pass
231
+
232
+ def decode(self, tokens):
233
+ """
234
+ Decode the given list of tokens to a final string
235
+
236
+ Args:
237
+ tokens (:obj:`List[str]`):
238
+ The list of tokens to decode
239
+
240
+ Returns:
241
+ :obj:`str`: The decoded string
242
+ """
243
+ pass
244
+
245
+ class WordPiece(Decoder):
246
+ """
247
+ WordPiece Decoder
248
+
249
+ Args:
250
+ prefix (:obj:`str`, `optional`, defaults to :obj:`##`):
251
+ The prefix to use for subwords that are not a beginning-of-word
252
+
253
+ cleanup (:obj:`bool`, `optional`, defaults to :obj:`True`):
254
+ Whether to cleanup some tokenization artifacts. Mainly spaces before punctuation,
255
+ and some abbreviated english forms.
256
+ """
257
+ def __init__(self, prefix="##", cleanup=True):
258
+ pass
259
+
260
+ def decode(self, tokens):
261
+ """
262
+ Decode the given list of tokens to a final string
263
+
264
+ Args:
265
+ tokens (:obj:`List[str]`):
266
+ The list of tokens to decode
267
+
268
+ Returns:
269
+ :obj:`str`: The decoded string
270
+ """
271
+ pass
llmeval-env/lib/python3.10/site-packages/tokenizers/decoders/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (418 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from .base_tokenizer import BaseTokenizer
2
+ from .bert_wordpiece import BertWordPieceTokenizer
3
+ from .byte_level_bpe import ByteLevelBPETokenizer
4
+ from .char_level_bpe import CharBPETokenizer
5
+ from .sentencepiece_bpe import SentencePieceBPETokenizer
6
+ from .sentencepiece_unigram import SentencePieceUnigramTokenizer
llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (569 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/base_tokenizer.cpython-310.pyc ADDED
Binary file (15.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/bert_wordpiece.cpython-310.pyc ADDED
Binary file (3.88 kB). View file
 
llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/byte_level_bpe.cpython-310.pyc ADDED
Binary file (3.41 kB). View file
 
llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/char_level_bpe.cpython-310.pyc ADDED
Binary file (4.27 kB). View file
 
llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/sentencepiece_bpe.cpython-310.pyc ADDED
Binary file (3.27 kB). View file
 
llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/sentencepiece_unigram.cpython-310.pyc ADDED
Binary file (6.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/base_tokenizer.py ADDED
@@ -0,0 +1,418 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Optional, Tuple, Union
2
+
3
+ from tokenizers import AddedToken, EncodeInput, Encoding, InputSequence, Tokenizer
4
+ from tokenizers.decoders import Decoder
5
+ from tokenizers.models import Model
6
+ from tokenizers.normalizers import Normalizer
7
+ from tokenizers.pre_tokenizers import PreTokenizer
8
+ from tokenizers.processors import PostProcessor
9
+
10
+
11
+ Offsets = Tuple[int, int]
12
+
13
+
14
+ class BaseTokenizer:
15
+ def __init__(self, tokenizer: Tokenizer, parameters=None):
16
+ self._tokenizer = tokenizer
17
+ self._parameters = parameters if parameters is not None else {}
18
+
19
+ def __repr__(self):
20
+ return "Tokenizer(vocabulary_size={}, {})".format(
21
+ self._tokenizer.get_vocab_size(),
22
+ ", ".join(k + "=" + str(v) for k, v in self._parameters.items()),
23
+ )
24
+
25
+ def num_special_tokens_to_add(self, is_pair: bool) -> int:
26
+ """
27
+ Return the number of special tokens that would be added for single/pair sentences.
28
+ :param is_pair: Boolean indicating if the input would be a single sentence or a pair
29
+ :return:
30
+ """
31
+ return self._tokenizer.num_special_tokens_to_add(is_pair)
32
+
33
+ def get_vocab(self, with_added_tokens: bool = True) -> Dict[str, int]:
34
+ """Returns the vocabulary
35
+
36
+ Args:
37
+ with_added_tokens: boolean:
38
+ Whether to include the added tokens in the vocabulary
39
+
40
+ Returns:
41
+ The vocabulary
42
+ """
43
+ return self._tokenizer.get_vocab(with_added_tokens=with_added_tokens)
44
+
45
+ def get_added_tokens_decoder(self) -> Dict[int, AddedToken]:
46
+ """Returns the added reverse vocabulary
47
+
48
+ Returns:
49
+ The added vocabulary mapping ints to AddedTokens
50
+ """
51
+ return self._tokenizer.get_added_tokens_decoder()
52
+
53
+ def get_vocab_size(self, with_added_tokens: bool = True) -> int:
54
+ """Return the size of vocabulary, with or without added tokens.
55
+
56
+ Args:
57
+ with_added_tokens: (`optional`) bool:
58
+ Whether to count in added special tokens or not
59
+
60
+ Returns:
61
+ Size of vocabulary
62
+ """
63
+ return self._tokenizer.get_vocab_size(with_added_tokens=with_added_tokens)
64
+
65
+ def enable_padding(
66
+ self,
67
+ direction: Optional[str] = "right",
68
+ pad_to_multiple_of: Optional[int] = None,
69
+ pad_id: Optional[int] = 0,
70
+ pad_type_id: Optional[int] = 0,
71
+ pad_token: Optional[str] = "[PAD]",
72
+ length: Optional[int] = None,
73
+ ):
74
+ """Change the padding strategy
75
+
76
+ Args:
77
+ direction: (`optional`) str:
78
+ Can be one of: `right` or `left`
79
+
80
+ pad_to_multiple_of: (`optional`) unsigned int:
81
+ If specified, the padding length should always snap to the next multiple of
82
+ the given value. For example if we were going to pad with a length of 250 but
83
+ `pad_to_multiple_of=8` then we will pad to 256.
84
+
85
+ pad_id: (`optional`) unsigned int:
86
+ The indice to be used when padding
87
+
88
+ pad_type_id: (`optional`) unsigned int:
89
+ The type indice to be used when padding
90
+
91
+ pad_token: (`optional`) str:
92
+ The pad token to be used when padding
93
+
94
+ length: (`optional`) unsigned int:
95
+ If specified, the length at which to pad. If not specified
96
+ we pad using the size of the longest sequence in a batch
97
+ """
98
+ return self._tokenizer.enable_padding(
99
+ direction=direction,
100
+ pad_to_multiple_of=pad_to_multiple_of,
101
+ pad_id=pad_id,
102
+ pad_type_id=pad_type_id,
103
+ pad_token=pad_token,
104
+ length=length,
105
+ )
106
+
107
+ def no_padding(self):
108
+ """Disable padding"""
109
+ return self._tokenizer.no_padding()
110
+
111
+ @property
112
+ def padding(self) -> Optional[dict]:
113
+ """Get the current padding parameters
114
+
115
+ Returns:
116
+ None if padding is disabled, a dict with the currently set parameters
117
+ if the padding is enabled.
118
+ """
119
+ return self._tokenizer.padding
120
+
121
+ def enable_truncation(self, max_length: int, stride: Optional[int] = 0, strategy: Optional[str] = "longest_first"):
122
+ """Change the truncation options
123
+
124
+ Args:
125
+ max_length: unsigned int:
126
+ The maximum length at which to truncate
127
+
128
+ stride: (`optional`) unsigned int:
129
+ The length of the previous first sequence to be included
130
+ in the overflowing sequence
131
+
132
+ strategy: (`optional`) str:
133
+ Can be one of `longest_first`, `only_first` or `only_second`
134
+ """
135
+ return self._tokenizer.enable_truncation(max_length, stride=stride, strategy=strategy)
136
+
137
+ def no_truncation(self):
138
+ """Disable truncation"""
139
+ return self._tokenizer.no_truncation()
140
+
141
+ @property
142
+ def truncation(self) -> Optional[dict]:
143
+ """Get the current truncation parameters
144
+
145
+ Returns:
146
+ None if truncation is disabled, a dict with the current truncation parameters if
147
+ truncation is enabled
148
+ """
149
+ return self._tokenizer.truncation
150
+
151
+ def add_tokens(self, tokens: List[Union[str, AddedToken]]) -> int:
152
+ """Add the given tokens to the vocabulary
153
+
154
+ Args:
155
+ tokens: List[Union[str, AddedToken]]:
156
+ A list of tokens to add to the vocabulary. Each token can either be
157
+ a string, or an instance of AddedToken
158
+
159
+ Returns:
160
+ The number of tokens that were added to the vocabulary
161
+ """
162
+ return self._tokenizer.add_tokens(tokens)
163
+
164
+ def add_special_tokens(self, special_tokens: List[Union[str, AddedToken]]) -> int:
165
+ """Add the given special tokens to the vocabulary, and treat them as special tokens.
166
+
167
+ The special tokens will never be processed by the model, and will be
168
+ removed while decoding.
169
+
170
+ Args:
171
+ tokens: List[Union[str, AddedToken]]:
172
+ A list of special tokens to add to the vocabulary. Each token can either be
173
+ a string, or an instance of AddedToken
174
+
175
+ Returns:
176
+ The number of tokens that were added to the vocabulary
177
+ """
178
+ return self._tokenizer.add_special_tokens(special_tokens)
179
+
180
+ def normalize(self, sequence: str) -> str:
181
+ """Normalize the given sequence
182
+
183
+ Args:
184
+ sequence: str:
185
+ The sequence to normalize
186
+
187
+ Returns:
188
+ The normalized string
189
+ """
190
+ return self._tokenizer.normalize(sequence)
191
+
192
+ def encode(
193
+ self,
194
+ sequence: InputSequence,
195
+ pair: Optional[InputSequence] = None,
196
+ is_pretokenized: bool = False,
197
+ add_special_tokens: bool = True,
198
+ ) -> Encoding:
199
+ """Encode the given sequence and pair. This method can process raw text sequences as well
200
+ as already pre-tokenized sequences.
201
+
202
+ Args:
203
+ sequence: InputSequence:
204
+ The sequence we want to encode. This sequence can be either raw text or
205
+ pre-tokenized, according to the `is_pretokenized` argument:
206
+
207
+ - If `is_pretokenized=False`: `InputSequence` is expected to be `str`
208
+ - If `is_pretokenized=True`: `InputSequence` is expected to be
209
+ `Union[List[str], Tuple[str]]`
210
+
211
+ is_pretokenized: bool:
212
+ Whether the input is already pre-tokenized.
213
+
214
+ add_special_tokens: bool:
215
+ Whether to add the special tokens while encoding.
216
+
217
+ Returns:
218
+ An Encoding
219
+ """
220
+ if sequence is None:
221
+ raise ValueError("encode: `sequence` can't be `None`")
222
+
223
+ return self._tokenizer.encode(sequence, pair, is_pretokenized, add_special_tokens)
224
+
225
+ def encode_batch(
226
+ self,
227
+ inputs: List[EncodeInput],
228
+ is_pretokenized: bool = False,
229
+ add_special_tokens: bool = True,
230
+ ) -> List[Encoding]:
231
+ """Encode the given inputs. This method accept both raw text sequences as well as already
232
+ pre-tokenized sequences.
233
+
234
+ Args:
235
+ inputs: List[EncodeInput]:
236
+ A list of single sequences or pair sequences to encode. Each `EncodeInput` is
237
+ expected to be of the following form:
238
+ `Union[InputSequence, Tuple[InputSequence, InputSequence]]`
239
+
240
+ Each `InputSequence` can either be raw text or pre-tokenized,
241
+ according to the `is_pretokenized` argument:
242
+
243
+ - If `is_pretokenized=False`: `InputSequence` is expected to be `str`
244
+ - If `is_pretokenized=True`: `InputSequence` is expected to be
245
+ `Union[List[str], Tuple[str]]`
246
+
247
+ is_pretokenized: bool:
248
+ Whether the input is already pre-tokenized.
249
+
250
+ add_special_tokens: bool:
251
+ Whether to add the special tokens while encoding.
252
+
253
+ Returns:
254
+ A list of Encoding
255
+ """
256
+
257
+ if inputs is None:
258
+ raise ValueError("encode_batch: `inputs` can't be `None`")
259
+
260
+ return self._tokenizer.encode_batch(inputs, is_pretokenized, add_special_tokens)
261
+
262
+ def decode(self, ids: List[int], skip_special_tokens: Optional[bool] = True) -> str:
263
+ """Decode the given list of ids to a string sequence
264
+
265
+ Args:
266
+ ids: List[unsigned int]:
267
+ A list of ids to be decoded
268
+
269
+ skip_special_tokens: (`optional`) boolean:
270
+ Whether to remove all the special tokens from the output string
271
+
272
+ Returns:
273
+ The decoded string
274
+ """
275
+ if ids is None:
276
+ raise ValueError("None input is not valid. Should be a list of integers.")
277
+
278
+ return self._tokenizer.decode(ids, skip_special_tokens=skip_special_tokens)
279
+
280
+ def decode_batch(self, sequences: List[List[int]], skip_special_tokens: Optional[bool] = True) -> str:
281
+ """Decode the list of sequences to a list of string sequences
282
+
283
+ Args:
284
+ sequences: List[List[unsigned int]]:
285
+ A list of sequence of ids to be decoded
286
+
287
+ skip_special_tokens: (`optional`) boolean:
288
+ Whether to remove all the special tokens from the output strings
289
+
290
+ Returns:
291
+ A list of decoded strings
292
+ """
293
+ if sequences is None:
294
+ raise ValueError("None input is not valid. Should be list of list of integers.")
295
+
296
+ return self._tokenizer.decode_batch(sequences, skip_special_tokens=skip_special_tokens)
297
+
298
+ def token_to_id(self, token: str) -> Optional[int]:
299
+ """Convert the given token to its corresponding id
300
+
301
+ Args:
302
+ token: str:
303
+ The token to convert
304
+
305
+ Returns:
306
+ The corresponding id if it exists, None otherwise
307
+ """
308
+ return self._tokenizer.token_to_id(token)
309
+
310
+ def id_to_token(self, id: int) -> Optional[str]:
311
+ """Convert the given token id to its corresponding string
312
+
313
+ Args:
314
+ token: id:
315
+ The token id to convert
316
+
317
+ Returns:
318
+ The corresponding string if it exists, None otherwise
319
+ """
320
+ return self._tokenizer.id_to_token(id)
321
+
322
+ def save_model(self, directory: str, prefix: Optional[str] = None):
323
+ """Save the current model to the given directory
324
+
325
+ Args:
326
+ directory: str:
327
+ A path to the destination directory
328
+
329
+ prefix: (Optional) str:
330
+ An optional prefix, used to prefix each file name
331
+ """
332
+ return self._tokenizer.model.save(directory, prefix=prefix)
333
+
334
+ def save(self, path: str, pretty: bool = True):
335
+ """Save the current Tokenizer at the given path
336
+
337
+ Args:
338
+ path: str:
339
+ A path to the destination Tokenizer file
340
+ """
341
+ return self._tokenizer.save(path, pretty)
342
+
343
+ def to_str(self, pretty: bool = False):
344
+ """Get a serialized JSON version of the Tokenizer as a str
345
+
346
+ Args:
347
+ pretty: bool:
348
+ Whether the JSON string should be prettified
349
+
350
+ Returns:
351
+ str
352
+ """
353
+ return self._tokenizer.to_str(pretty)
354
+
355
+ def post_process(
356
+ self, encoding: Encoding, pair: Optional[Encoding] = None, add_special_tokens: bool = True
357
+ ) -> Encoding:
358
+ """Apply all the post-processing steps to the given encodings.
359
+
360
+ The various steps are:
361
+ 1. Truncate according to global params (provided to `enable_truncation`)
362
+ 2. Apply the PostProcessor
363
+ 3. Pad according to global params. (provided to `enable_padding`)
364
+
365
+ Args:
366
+ encoding: Encoding:
367
+ The main Encoding to post process
368
+
369
+ pair: Optional[Encoding]:
370
+ An optional pair Encoding
371
+
372
+ add_special_tokens: bool:
373
+ Whether to add special tokens
374
+
375
+ Returns:
376
+ The resulting Encoding
377
+ """
378
+ return self._tokenizer.post_process(encoding, pair, add_special_tokens)
379
+
380
+ @property
381
+ def model(self) -> Model:
382
+ return self._tokenizer.model
383
+
384
+ @model.setter
385
+ def model(self, model: Model):
386
+ self._tokenizer.model = model
387
+
388
+ @property
389
+ def normalizer(self) -> Normalizer:
390
+ return self._tokenizer.normalizer
391
+
392
+ @normalizer.setter
393
+ def normalizer(self, normalizer: Normalizer):
394
+ self._tokenizer.normalizer = normalizer
395
+
396
+ @property
397
+ def pre_tokenizer(self) -> PreTokenizer:
398
+ return self._tokenizer.pre_tokenizer
399
+
400
+ @pre_tokenizer.setter
401
+ def pre_tokenizer(self, pre_tokenizer: PreTokenizer):
402
+ self._tokenizer.pre_tokenizer = pre_tokenizer
403
+
404
+ @property
405
+ def post_processor(self) -> PostProcessor:
406
+ return self._tokenizer.post_processor
407
+
408
+ @post_processor.setter
409
+ def post_processor(self, post_processor: PostProcessor):
410
+ self._tokenizer.post_processor = post_processor
411
+
412
+ @property
413
+ def decoder(self) -> Decoder:
414
+ return self._tokenizer.decoder
415
+
416
+ @decoder.setter
417
+ def decoder(self, decoder: Decoder):
418
+ self._tokenizer.decoder = decoder
llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/bert_wordpiece.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Iterator, List, Optional, Union
2
+
3
+ from tokenizers import AddedToken, Tokenizer, decoders, trainers
4
+ from tokenizers.models import WordPiece
5
+ from tokenizers.normalizers import BertNormalizer
6
+ from tokenizers.pre_tokenizers import BertPreTokenizer
7
+ from tokenizers.processors import BertProcessing
8
+
9
+ from .base_tokenizer import BaseTokenizer
10
+
11
+
12
+ class BertWordPieceTokenizer(BaseTokenizer):
13
+ """Bert WordPiece Tokenizer"""
14
+
15
+ def __init__(
16
+ self,
17
+ vocab: Optional[Union[str, Dict[str, int]]] = None,
18
+ unk_token: Union[str, AddedToken] = "[UNK]",
19
+ sep_token: Union[str, AddedToken] = "[SEP]",
20
+ cls_token: Union[str, AddedToken] = "[CLS]",
21
+ pad_token: Union[str, AddedToken] = "[PAD]",
22
+ mask_token: Union[str, AddedToken] = "[MASK]",
23
+ clean_text: bool = True,
24
+ handle_chinese_chars: bool = True,
25
+ strip_accents: Optional[bool] = None,
26
+ lowercase: bool = True,
27
+ wordpieces_prefix: str = "##",
28
+ ):
29
+ if vocab is not None:
30
+ tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(unk_token)))
31
+ else:
32
+ tokenizer = Tokenizer(WordPiece(unk_token=str(unk_token)))
33
+
34
+ # Let the tokenizer know about special tokens if they are part of the vocab
35
+ if tokenizer.token_to_id(str(unk_token)) is not None:
36
+ tokenizer.add_special_tokens([str(unk_token)])
37
+ if tokenizer.token_to_id(str(sep_token)) is not None:
38
+ tokenizer.add_special_tokens([str(sep_token)])
39
+ if tokenizer.token_to_id(str(cls_token)) is not None:
40
+ tokenizer.add_special_tokens([str(cls_token)])
41
+ if tokenizer.token_to_id(str(pad_token)) is not None:
42
+ tokenizer.add_special_tokens([str(pad_token)])
43
+ if tokenizer.token_to_id(str(mask_token)) is not None:
44
+ tokenizer.add_special_tokens([str(mask_token)])
45
+
46
+ tokenizer.normalizer = BertNormalizer(
47
+ clean_text=clean_text,
48
+ handle_chinese_chars=handle_chinese_chars,
49
+ strip_accents=strip_accents,
50
+ lowercase=lowercase,
51
+ )
52
+ tokenizer.pre_tokenizer = BertPreTokenizer()
53
+
54
+ if vocab is not None:
55
+ sep_token_id = tokenizer.token_to_id(str(sep_token))
56
+ if sep_token_id is None:
57
+ raise TypeError("sep_token not found in the vocabulary")
58
+ cls_token_id = tokenizer.token_to_id(str(cls_token))
59
+ if cls_token_id is None:
60
+ raise TypeError("cls_token not found in the vocabulary")
61
+
62
+ tokenizer.post_processor = BertProcessing((str(sep_token), sep_token_id), (str(cls_token), cls_token_id))
63
+ tokenizer.decoder = decoders.WordPiece(prefix=wordpieces_prefix)
64
+
65
+ parameters = {
66
+ "model": "BertWordPiece",
67
+ "unk_token": unk_token,
68
+ "sep_token": sep_token,
69
+ "cls_token": cls_token,
70
+ "pad_token": pad_token,
71
+ "mask_token": mask_token,
72
+ "clean_text": clean_text,
73
+ "handle_chinese_chars": handle_chinese_chars,
74
+ "strip_accents": strip_accents,
75
+ "lowercase": lowercase,
76
+ "wordpieces_prefix": wordpieces_prefix,
77
+ }
78
+
79
+ super().__init__(tokenizer, parameters)
80
+
81
+ @staticmethod
82
+ def from_file(vocab: str, **kwargs):
83
+ vocab = WordPiece.read_file(vocab)
84
+ return BertWordPieceTokenizer(vocab, **kwargs)
85
+
86
+ def train(
87
+ self,
88
+ files: Union[str, List[str]],
89
+ vocab_size: int = 30000,
90
+ min_frequency: int = 2,
91
+ limit_alphabet: int = 1000,
92
+ initial_alphabet: List[str] = [],
93
+ special_tokens: List[Union[str, AddedToken]] = [
94
+ "[PAD]",
95
+ "[UNK]",
96
+ "[CLS]",
97
+ "[SEP]",
98
+ "[MASK]",
99
+ ],
100
+ show_progress: bool = True,
101
+ wordpieces_prefix: str = "##",
102
+ ):
103
+ """Train the model using the given files"""
104
+
105
+ trainer = trainers.WordPieceTrainer(
106
+ vocab_size=vocab_size,
107
+ min_frequency=min_frequency,
108
+ limit_alphabet=limit_alphabet,
109
+ initial_alphabet=initial_alphabet,
110
+ special_tokens=special_tokens,
111
+ show_progress=show_progress,
112
+ continuing_subword_prefix=wordpieces_prefix,
113
+ )
114
+ if isinstance(files, str):
115
+ files = [files]
116
+ self._tokenizer.train(files, trainer=trainer)
117
+
118
+ def train_from_iterator(
119
+ self,
120
+ iterator: Union[Iterator[str], Iterator[Iterator[str]]],
121
+ vocab_size: int = 30000,
122
+ min_frequency: int = 2,
123
+ limit_alphabet: int = 1000,
124
+ initial_alphabet: List[str] = [],
125
+ special_tokens: List[Union[str, AddedToken]] = [
126
+ "[PAD]",
127
+ "[UNK]",
128
+ "[CLS]",
129
+ "[SEP]",
130
+ "[MASK]",
131
+ ],
132
+ show_progress: bool = True,
133
+ wordpieces_prefix: str = "##",
134
+ length: Optional[int] = None,
135
+ ):
136
+ """Train the model using the given iterator"""
137
+
138
+ trainer = trainers.WordPieceTrainer(
139
+ vocab_size=vocab_size,
140
+ min_frequency=min_frequency,
141
+ limit_alphabet=limit_alphabet,
142
+ initial_alphabet=initial_alphabet,
143
+ special_tokens=special_tokens,
144
+ show_progress=show_progress,
145
+ continuing_subword_prefix=wordpieces_prefix,
146
+ )
147
+ self._tokenizer.train_from_iterator(
148
+ iterator,
149
+ trainer=trainer,
150
+ length=length,
151
+ )
llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/byte_level_bpe.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Iterator, List, Optional, Tuple, Union
2
+
3
+ from tokenizers import AddedToken, Tokenizer, decoders, pre_tokenizers, processors, trainers
4
+ from tokenizers.models import BPE
5
+ from tokenizers.normalizers import Lowercase, Sequence, unicode_normalizer_from_str
6
+
7
+ from .base_tokenizer import BaseTokenizer
8
+
9
+
10
+ class ByteLevelBPETokenizer(BaseTokenizer):
11
+ """ByteLevelBPETokenizer
12
+
13
+ Represents a Byte-level BPE as introduced by OpenAI with their GPT-2 model
14
+ """
15
+
16
+ def __init__(
17
+ self,
18
+ vocab: Optional[Union[str, Dict[str, int]]] = None,
19
+ merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None,
20
+ add_prefix_space: bool = False,
21
+ lowercase: bool = False,
22
+ dropout: Optional[float] = None,
23
+ unicode_normalizer: Optional[str] = None,
24
+ continuing_subword_prefix: Optional[str] = None,
25
+ end_of_word_suffix: Optional[str] = None,
26
+ trim_offsets: bool = False,
27
+ ):
28
+ if vocab is not None and merges is not None:
29
+ tokenizer = Tokenizer(
30
+ BPE(
31
+ vocab,
32
+ merges,
33
+ dropout=dropout,
34
+ continuing_subword_prefix=continuing_subword_prefix or "",
35
+ end_of_word_suffix=end_of_word_suffix or "",
36
+ )
37
+ )
38
+ else:
39
+ tokenizer = Tokenizer(BPE())
40
+
41
+ # Check for Unicode normalization first (before everything else)
42
+ normalizers = []
43
+
44
+ if unicode_normalizer:
45
+ normalizers += [unicode_normalizer_from_str(unicode_normalizer)]
46
+
47
+ if lowercase:
48
+ normalizers += [Lowercase()]
49
+
50
+ # Create the normalizer structure
51
+ if len(normalizers) > 0:
52
+ if len(normalizers) > 1:
53
+ tokenizer.normalizer = Sequence(normalizers)
54
+ else:
55
+ tokenizer.normalizer = normalizers[0]
56
+
57
+ tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=add_prefix_space)
58
+ tokenizer.decoder = decoders.ByteLevel()
59
+ tokenizer.post_processor = processors.ByteLevel(trim_offsets=trim_offsets)
60
+
61
+ parameters = {
62
+ "model": "ByteLevelBPE",
63
+ "add_prefix_space": add_prefix_space,
64
+ "lowercase": lowercase,
65
+ "dropout": dropout,
66
+ "unicode_normalizer": unicode_normalizer,
67
+ "continuing_subword_prefix": continuing_subword_prefix,
68
+ "end_of_word_suffix": end_of_word_suffix,
69
+ "trim_offsets": trim_offsets,
70
+ }
71
+
72
+ super().__init__(tokenizer, parameters)
73
+
74
+ @staticmethod
75
+ def from_file(vocab_filename: str, merges_filename: str, **kwargs):
76
+ vocab, merges = BPE.read_file(vocab_filename, merges_filename)
77
+ return ByteLevelBPETokenizer(vocab, merges, **kwargs)
78
+
79
+ def train(
80
+ self,
81
+ files: Union[str, List[str]],
82
+ vocab_size: int = 30000,
83
+ min_frequency: int = 2,
84
+ show_progress: bool = True,
85
+ special_tokens: List[Union[str, AddedToken]] = [],
86
+ ):
87
+ """Train the model using the given files"""
88
+
89
+ trainer = trainers.BpeTrainer(
90
+ vocab_size=vocab_size,
91
+ min_frequency=min_frequency,
92
+ show_progress=show_progress,
93
+ special_tokens=special_tokens,
94
+ initial_alphabet=pre_tokenizers.ByteLevel.alphabet(),
95
+ )
96
+ if isinstance(files, str):
97
+ files = [files]
98
+ self._tokenizer.train(files, trainer=trainer)
99
+
100
+ def train_from_iterator(
101
+ self,
102
+ iterator: Union[Iterator[str], Iterator[Iterator[str]]],
103
+ vocab_size: int = 30000,
104
+ min_frequency: int = 2,
105
+ show_progress: bool = True,
106
+ special_tokens: List[Union[str, AddedToken]] = [],
107
+ length: Optional[int] = None,
108
+ ):
109
+ """Train the model using the given iterator"""
110
+
111
+ trainer = trainers.BpeTrainer(
112
+ vocab_size=vocab_size,
113
+ min_frequency=min_frequency,
114
+ show_progress=show_progress,
115
+ special_tokens=special_tokens,
116
+ initial_alphabet=pre_tokenizers.ByteLevel.alphabet(),
117
+ )
118
+ self._tokenizer.train_from_iterator(
119
+ iterator,
120
+ trainer=trainer,
121
+ length=length,
122
+ )
llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/char_level_bpe.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Iterator, List, Optional, Tuple, Union
2
+
3
+ from .. import AddedToken, Tokenizer, decoders, pre_tokenizers, trainers
4
+ from ..models import BPE
5
+ from ..normalizers import BertNormalizer, Lowercase, Sequence, unicode_normalizer_from_str
6
+ from .base_tokenizer import BaseTokenizer
7
+
8
+
9
+ class CharBPETokenizer(BaseTokenizer):
10
+ """Original BPE Tokenizer
11
+
12
+ Represents the BPE algorithm, as introduced by Rico Sennrich
13
+ (https://arxiv.org/abs/1508.07909)
14
+
15
+ The defaults settings corresponds to OpenAI GPT BPE tokenizers and differs from the original
16
+ Sennrich subword-nmt implementation by the following options that you can deactivate:
17
+ - adding a normalizer to clean up the text (deactivate with `bert_normalizer=False`) by:
18
+ * removing any control characters and replacing all whitespaces by the classic one.
19
+ * handle chinese chars by putting spaces around them.
20
+ * strip all accents.
21
+ - spitting on punctuation in addition to whitespaces (deactivate it with
22
+ `split_on_whitespace_only=True`)
23
+ """
24
+
25
+ def __init__(
26
+ self,
27
+ vocab: Optional[Union[str, Dict[str, int]]] = None,
28
+ merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None,
29
+ unk_token: Union[str, AddedToken] = "<unk>",
30
+ suffix: str = "</w>",
31
+ dropout: Optional[float] = None,
32
+ lowercase: bool = False,
33
+ unicode_normalizer: Optional[str] = None,
34
+ bert_normalizer: bool = True,
35
+ split_on_whitespace_only: bool = False,
36
+ ):
37
+ if vocab is not None and merges is not None:
38
+ tokenizer = Tokenizer(
39
+ BPE(
40
+ vocab,
41
+ merges,
42
+ dropout=dropout,
43
+ unk_token=str(unk_token),
44
+ end_of_word_suffix=suffix,
45
+ )
46
+ )
47
+ else:
48
+ tokenizer = Tokenizer(BPE(unk_token=str(unk_token), dropout=dropout, end_of_word_suffix=suffix))
49
+
50
+ if tokenizer.token_to_id(str(unk_token)) is not None:
51
+ tokenizer.add_special_tokens([str(unk_token)])
52
+
53
+ # Check for Unicode normalization first (before everything else)
54
+ normalizers = []
55
+
56
+ if unicode_normalizer:
57
+ normalizers += [unicode_normalizer_from_str(unicode_normalizer)]
58
+
59
+ if bert_normalizer:
60
+ normalizers += [BertNormalizer(lowercase=False)]
61
+
62
+ if lowercase:
63
+ normalizers += [Lowercase()]
64
+
65
+ # Create the normalizer structure
66
+ if len(normalizers) > 0:
67
+ if len(normalizers) > 1:
68
+ tokenizer.normalizer = Sequence(normalizers)
69
+ else:
70
+ tokenizer.normalizer = normalizers[0]
71
+
72
+ if split_on_whitespace_only:
73
+ tokenizer.pre_tokenizer = pre_tokenizers.WhitespaceSplit()
74
+ else:
75
+ tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
76
+
77
+ tokenizer.decoder = decoders.BPEDecoder(suffix=suffix)
78
+
79
+ parameters = {
80
+ "model": "BPE",
81
+ "unk_token": unk_token,
82
+ "suffix": suffix,
83
+ "dropout": dropout,
84
+ "lowercase": lowercase,
85
+ "unicode_normalizer": unicode_normalizer,
86
+ "bert_normalizer": bert_normalizer,
87
+ "split_on_whitespace_only": split_on_whitespace_only,
88
+ }
89
+
90
+ super().__init__(tokenizer, parameters)
91
+
92
+ @staticmethod
93
+ def from_file(vocab_filename: str, merges_filename: str, **kwargs):
94
+ vocab, merges = BPE.read_file(vocab_filename, merges_filename)
95
+ return CharBPETokenizer(vocab, merges, **kwargs)
96
+
97
+ def train(
98
+ self,
99
+ files: Union[str, List[str]],
100
+ vocab_size: int = 30000,
101
+ min_frequency: int = 2,
102
+ special_tokens: List[Union[str, AddedToken]] = ["<unk>"],
103
+ limit_alphabet: int = 1000,
104
+ initial_alphabet: List[str] = [],
105
+ suffix: Optional[str] = "</w>",
106
+ show_progress: bool = True,
107
+ ):
108
+ """Train the model using the given files"""
109
+
110
+ trainer = trainers.BpeTrainer(
111
+ vocab_size=vocab_size,
112
+ min_frequency=min_frequency,
113
+ special_tokens=special_tokens,
114
+ limit_alphabet=limit_alphabet,
115
+ initial_alphabet=initial_alphabet,
116
+ end_of_word_suffix=suffix,
117
+ show_progress=show_progress,
118
+ )
119
+ if isinstance(files, str):
120
+ files = [files]
121
+ self._tokenizer.train(files, trainer=trainer)
122
+
123
+ def train_from_iterator(
124
+ self,
125
+ iterator: Union[Iterator[str], Iterator[Iterator[str]]],
126
+ vocab_size: int = 30000,
127
+ min_frequency: int = 2,
128
+ special_tokens: List[Union[str, AddedToken]] = ["<unk>"],
129
+ limit_alphabet: int = 1000,
130
+ initial_alphabet: List[str] = [],
131
+ suffix: Optional[str] = "</w>",
132
+ show_progress: bool = True,
133
+ length: Optional[int] = None,
134
+ ):
135
+ """Train the model using the given iterator"""
136
+
137
+ trainer = trainers.BpeTrainer(
138
+ vocab_size=vocab_size,
139
+ min_frequency=min_frequency,
140
+ special_tokens=special_tokens,
141
+ limit_alphabet=limit_alphabet,
142
+ initial_alphabet=initial_alphabet,
143
+ end_of_word_suffix=suffix,
144
+ show_progress=show_progress,
145
+ )
146
+ self._tokenizer.train_from_iterator(
147
+ iterator,
148
+ trainer=trainer,
149
+ length=length,
150
+ )
llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/sentencepiece_bpe.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Iterator, List, Optional, Tuple, Union
2
+
3
+ from tokenizers import AddedToken, Tokenizer, decoders, pre_tokenizers, trainers
4
+ from tokenizers.models import BPE
5
+ from tokenizers.normalizers import NFKC
6
+
7
+ from .base_tokenizer import BaseTokenizer
8
+
9
+
10
+ class SentencePieceBPETokenizer(BaseTokenizer):
11
+ """SentencePiece BPE Tokenizer
12
+
13
+ Represents the BPE algorithm, with the pretokenization used by SentencePiece
14
+ """
15
+
16
+ def __init__(
17
+ self,
18
+ vocab: Optional[Union[str, Dict[str, int]]] = None,
19
+ merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None,
20
+ unk_token: Union[str, AddedToken] = "<unk>",
21
+ replacement: str = "▁",
22
+ add_prefix_space: bool = True,
23
+ dropout: Optional[float] = None,
24
+ fuse_unk: Optional[bool] = False,
25
+ ):
26
+ if vocab is not None and merges is not None:
27
+ tokenizer = Tokenizer(BPE(vocab, merges, dropout=dropout, unk_token=unk_token, fuse_unk=fuse_unk))
28
+ else:
29
+ tokenizer = Tokenizer(BPE(dropout=dropout, unk_token=unk_token, fuse_unk=fuse_unk))
30
+
31
+ if tokenizer.token_to_id(str(unk_token)) is not None:
32
+ tokenizer.add_special_tokens([str(unk_token)])
33
+
34
+ tokenizer.normalizer = NFKC()
35
+ prepend_scheme = "always" if add_prefix_space else "never"
36
+ tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
37
+ tokenizer.decoder = decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
38
+
39
+ parameters = {
40
+ "model": "SentencePieceBPE",
41
+ "unk_token": unk_token,
42
+ "replacement": replacement,
43
+ "add_prefix_space": add_prefix_space,
44
+ "dropout": dropout,
45
+ }
46
+
47
+ super().__init__(tokenizer, parameters)
48
+
49
+ @staticmethod
50
+ def from_file(vocab_filename: str, merges_filename: str, **kwargs):
51
+ vocab, merges = BPE.read_file(vocab_filename, merges_filename)
52
+ return SentencePieceBPETokenizer(vocab, merges, **kwargs)
53
+
54
+ def train(
55
+ self,
56
+ files: Union[str, List[str]],
57
+ vocab_size: int = 30000,
58
+ min_frequency: int = 2,
59
+ special_tokens: List[Union[str, AddedToken]] = ["<unk>"],
60
+ limit_alphabet: int = 1000,
61
+ initial_alphabet: List[str] = [],
62
+ show_progress: bool = True,
63
+ ):
64
+ """Train the model using the given files"""
65
+
66
+ trainer = trainers.BpeTrainer(
67
+ vocab_size=vocab_size,
68
+ min_frequency=min_frequency,
69
+ special_tokens=special_tokens,
70
+ limit_alphabet=limit_alphabet,
71
+ initial_alphabet=initial_alphabet,
72
+ show_progress=show_progress,
73
+ )
74
+ if isinstance(files, str):
75
+ files = [files]
76
+ self._tokenizer.train(files, trainer=trainer)
77
+
78
+ def train_from_iterator(
79
+ self,
80
+ iterator: Union[Iterator[str], Iterator[Iterator[str]]],
81
+ vocab_size: int = 30000,
82
+ min_frequency: int = 2,
83
+ special_tokens: List[Union[str, AddedToken]] = ["<unk>"],
84
+ limit_alphabet: int = 1000,
85
+ initial_alphabet: List[str] = [],
86
+ show_progress: bool = True,
87
+ length: Optional[int] = None,
88
+ ):
89
+ """Train the model using the given iterator"""
90
+
91
+ trainer = trainers.BpeTrainer(
92
+ vocab_size=vocab_size,
93
+ min_frequency=min_frequency,
94
+ special_tokens=special_tokens,
95
+ limit_alphabet=limit_alphabet,
96
+ initial_alphabet=initial_alphabet,
97
+ show_progress=show_progress,
98
+ )
99
+ self._tokenizer.train_from_iterator(
100
+ iterator,
101
+ trainer=trainer,
102
+ length=length,
103
+ )
llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/sentencepiece_unigram.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from typing import Iterator, List, Optional, Union, Tuple
4
+
5
+ from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
6
+ from tokenizers.models import Unigram
7
+
8
+ from .base_tokenizer import BaseTokenizer
9
+
10
+
11
+ class SentencePieceUnigramTokenizer(BaseTokenizer):
12
+ """SentencePiece Unigram Tokenizer
13
+
14
+ Represents the Unigram algorithm, with the pretokenization used by SentencePiece
15
+ """
16
+
17
+ def __init__(
18
+ self,
19
+ vocab: Optional[List[Tuple[str, float]]] = None,
20
+ replacement: str = "▁",
21
+ add_prefix_space: bool = True,
22
+ ):
23
+ if vocab is not None:
24
+ # Let Unigram(..) fail if only one of them is None
25
+ tokenizer = Tokenizer(Unigram(vocab))
26
+ else:
27
+ tokenizer = Tokenizer(Unigram())
28
+
29
+ tokenizer.normalizer = normalizers.Sequence(
30
+ [normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(" {2,}"), " ")]
31
+ )
32
+ prepend_scheme = "always" if add_prefix_space else "never"
33
+ tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
34
+ tokenizer.decoder = decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
35
+
36
+ parameters = {
37
+ "model": "SentencePieceUnigram",
38
+ "replacement": replacement,
39
+ "add_prefix_space": add_prefix_space,
40
+ }
41
+
42
+ super().__init__(tokenizer, parameters)
43
+
44
+ def train(
45
+ self,
46
+ files: Union[str, List[str]],
47
+ vocab_size: int = 8000,
48
+ show_progress: bool = True,
49
+ special_tokens: Optional[List[Union[str, AddedToken]]] = None,
50
+ initial_alphabet: Optional[List[str]] = None,
51
+ unk_token: Optional[str] = None,
52
+ ):
53
+ """
54
+ Train the model using the given files
55
+
56
+ Args:
57
+ files (:obj:`List[str]`):
58
+ A list of path to the files that we should use for training
59
+ vocab_size (:obj:`int`):
60
+ The size of the final vocabulary, including all tokens and alphabet.
61
+ show_progress (:obj:`bool`):
62
+ Whether to show progress bars while training.
63
+ special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`):
64
+ A list of special tokens the model should know of.
65
+ initial_alphabet (:obj:`List[str]`, `optional`):
66
+ A list of characters to include in the initial alphabet, even
67
+ if not seen in the training dataset.
68
+ If the strings contain more than one character, only the first one
69
+ is kept.
70
+ unk_token (:obj:`str`, `optional`):
71
+ The unknown token to be used by the model.
72
+ """
73
+
74
+ if special_tokens is None:
75
+ special_tokens = []
76
+
77
+ if initial_alphabet is None:
78
+ initial_alphabet = []
79
+
80
+ trainer = trainers.UnigramTrainer(
81
+ vocab_size=vocab_size,
82
+ special_tokens=special_tokens,
83
+ show_progress=show_progress,
84
+ initial_alphabet=initial_alphabet,
85
+ unk_token=unk_token,
86
+ )
87
+
88
+ if isinstance(files, str):
89
+ files = [files]
90
+ self._tokenizer.train(files, trainer=trainer)
91
+
92
+ def train_from_iterator(
93
+ self,
94
+ iterator: Union[Iterator[str], Iterator[Iterator[str]]],
95
+ vocab_size: int = 8000,
96
+ show_progress: bool = True,
97
+ special_tokens: Optional[List[Union[str, AddedToken]]] = None,
98
+ initial_alphabet: Optional[List[str]] = None,
99
+ unk_token: Optional[str] = None,
100
+ length: Optional[int] = None,
101
+ ):
102
+ """
103
+ Train the model using the given iterator
104
+
105
+ Args:
106
+ iterator (:obj:`Union[Iterator[str], Iterator[Iterator[str]]]`):
107
+ Any iterator over strings or list of strings
108
+ vocab_size (:obj:`int`):
109
+ The size of the final vocabulary, including all tokens and alphabet.
110
+ show_progress (:obj:`bool`):
111
+ Whether to show progress bars while training.
112
+ special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`):
113
+ A list of special tokens the model should know of.
114
+ initial_alphabet (:obj:`List[str]`, `optional`):
115
+ A list of characters to include in the initial alphabet, even
116
+ if not seen in the training dataset.
117
+ If the strings contain more than one character, only the first one
118
+ is kept.
119
+ unk_token (:obj:`str`, `optional`):
120
+ The unknown token to be used by the model.
121
+ length (:obj:`int`, `optional`):
122
+ The total number of sequences in the iterator. This is used to
123
+ provide meaningful progress tracking
124
+ """
125
+
126
+ if special_tokens is None:
127
+ special_tokens = []
128
+
129
+ if initial_alphabet is None:
130
+ initial_alphabet = []
131
+
132
+ trainer = trainers.UnigramTrainer(
133
+ vocab_size=vocab_size,
134
+ special_tokens=special_tokens,
135
+ show_progress=show_progress,
136
+ initial_alphabet=initial_alphabet,
137
+ unk_token=unk_token,
138
+ )
139
+
140
+ self._tokenizer.train_from_iterator(
141
+ iterator,
142
+ trainer=trainer,
143
+ length=length,
144
+ )
145
+
146
+ @staticmethod
147
+ def from_spm(filename: str):
148
+ try:
149
+ import sys
150
+
151
+ sys.path.append(".")
152
+
153
+ import sentencepiece_model_pb2 as model
154
+ except Exception:
155
+ raise Exception(
156
+ "You don't seem to have the required protobuf file, in order to use this function you need to run `pip install protobuf` and `wget https://raw.githubusercontent.com/google/sentencepiece/master/python/src/sentencepiece/sentencepiece_model_pb2.py` for us to be able to read the intrinsics of your spm_file. `pip install sentencepiece` is not required."
157
+ )
158
+
159
+ m = model.ModelProto()
160
+ m.ParseFromString(open(filename, "rb").read())
161
+
162
+ precompiled_charsmap = m.normalizer_spec.precompiled_charsmap
163
+ vocab = [(piece.piece, piece.score) for piece in m.pieces]
164
+ unk_id = m.trainer_spec.unk_id
165
+ model_type = m.trainer_spec.model_type
166
+ byte_fallback = m.trainer_spec.byte_fallback
167
+ if model_type != 1:
168
+ raise Exception(
169
+ "You're trying to run a `Unigram` model but you're file was trained with a different algorithm"
170
+ )
171
+
172
+ replacement = "▁"
173
+ add_prefix_space = True
174
+
175
+ tokenizer = Tokenizer(Unigram(vocab, unk_id, byte_fallback))
176
+
177
+ if precompiled_charsmap:
178
+ tokenizer.normalizer = normalizers.Sequence(
179
+ [
180
+ normalizers.Precompiled(precompiled_charsmap),
181
+ normalizers.Replace(Regex(" {2,}"), " "),
182
+ ]
183
+ )
184
+ else:
185
+ tokenizer.normalizer = normalizers.Sequence([normalizers.Replace(Regex(" {2,}"), " ")])
186
+ prepend_scheme = "always" if add_prefix_space else "never"
187
+ tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
188
+ tokenizer.decoder = decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
189
+
190
+ parameters = {
191
+ "model": "SentencePieceUnigram",
192
+ }
193
+
194
+ obj = BaseTokenizer.__new__(SentencePieceUnigramTokenizer, tokenizer, parameters)
195
+ BaseTokenizer.__init__(obj, tokenizer, parameters)
196
+ return obj