diff --git a/llmeval-env/lib/python3.10/site-packages/_multiprocess/__init__.py b/llmeval-env/lib/python3.10/site-packages/_multiprocess/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..085958182e2246471e6d6f655ff00b6e37e3e99a
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/_multiprocess/__init__.py
@@ -0,0 +1,8 @@
+#!/usr/bin/env python
+#
+# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
+# Copyright (c) 2022-2024 The Uncertainty Quantification Foundation.
+# License: 3-clause BSD. The full license text is available at:
+# - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE
+
+from _multiprocessing import *
diff --git a/llmeval-env/lib/python3.10/site-packages/_multiprocess/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/_multiprocess/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4bfde875e17f00c4d5852a819f6bc3f2af6650fa
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/_multiprocess/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/lxml/_elementpath.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/lxml/_elementpath.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..b67203995fc65fd74b7a63e09e726cb04d2e07c3
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/lxml/_elementpath.cpython-310-x86_64-linux-gnu.so differ
diff --git a/llmeval-env/lib/python3.10/site-packages/lxml/_elementpath.py b/llmeval-env/lib/python3.10/site-packages/lxml/_elementpath.py
new file mode 100644
index 0000000000000000000000000000000000000000..6233a63502600e0d7165ee51b7e3164792089a16
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/lxml/_elementpath.py
@@ -0,0 +1,341 @@
+# cython: language_level=2
+
+#
+# ElementTree
+# $Id: ElementPath.py 3375 2008-02-13 08:05:08Z fredrik $
+#
+# limited xpath support for element trees
+#
+# history:
+# 2003-05-23 fl created
+# 2003-05-28 fl added support for // etc
+# 2003-08-27 fl fixed parsing of periods in element names
+# 2007-09-10 fl new selection engine
+# 2007-09-12 fl fixed parent selector
+# 2007-09-13 fl added iterfind; changed findall to return a list
+# 2007-11-30 fl added namespaces support
+# 2009-10-30 fl added child element value filter
+#
+# Copyright (c) 2003-2009 by Fredrik Lundh. All rights reserved.
+#
+# fredrik@pythonware.com
+# http://www.pythonware.com
+#
+# --------------------------------------------------------------------
+# The ElementTree toolkit is
+#
+# Copyright (c) 1999-2009 by Fredrik Lundh
+#
+# By obtaining, using, and/or copying this software and/or its
+# associated documentation, you agree that you have read, understood,
+# and will comply with the following terms and conditions:
+#
+# Permission to use, copy, modify, and distribute this software and
+# its associated documentation for any purpose and without fee is
+# hereby granted, provided that the above copyright notice appears in
+# all copies, and that both that copyright notice and this permission
+# notice appear in supporting documentation, and that the name of
+# Secret Labs AB or the author not be used in advertising or publicity
+# pertaining to distribution of the software without specific, written
+# prior permission.
+#
+# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
+# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
+# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
+# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+# --------------------------------------------------------------------
+
+##
+# Implementation module for XPath support. There's usually no reason
+# to import this module directly; the ElementTree does this for
+# you, if needed.
+##
+
+
+import re
+
+xpath_tokenizer_re = re.compile(
+ "("
+ "'[^']*'|\"[^\"]*\"|"
+ "::|"
+ "//?|"
+ r"\.\.|"
+ r"\(\)|"
+ r"[/.*:\[\]\(\)@=])|"
+ r"((?:\{[^}]+\})?[^/\[\]\(\)@=\s]+)|"
+ r"\s+"
+ )
+
+def xpath_tokenizer(pattern, namespaces=None, with_prefixes=True):
+ # ElementTree uses '', lxml used None originally.
+ default_namespace = (namespaces.get(None) or namespaces.get('')) if namespaces else None
+ parsing_attribute = False
+ for token in xpath_tokenizer_re.findall(pattern):
+ ttype, tag = token
+ if tag and tag[0] != "{":
+ if ":" in tag and with_prefixes:
+ prefix, uri = tag.split(":", 1)
+ try:
+ if not namespaces:
+ raise KeyError
+ yield ttype, "{%s}%s" % (namespaces[prefix], uri)
+ except KeyError:
+ raise SyntaxError("prefix %r not found in prefix map" % prefix)
+ elif default_namespace and not parsing_attribute:
+ yield ttype, "{%s}%s" % (default_namespace, tag)
+ else:
+ yield token
+ parsing_attribute = False
+ else:
+ yield token
+ parsing_attribute = ttype == '@'
+
+
+def prepare_child(next, token):
+ tag = token[1]
+ def select(result):
+ for elem in result:
+ yield from elem.iterchildren(tag)
+ return select
+
+def prepare_star(next, token):
+ def select(result):
+ for elem in result:
+ yield from elem.iterchildren('*')
+ return select
+
+def prepare_self(next, token):
+ def select(result):
+ return result
+ return select
+
+def prepare_descendant(next, token):
+ token = next()
+ if token[0] == "*":
+ tag = "*"
+ elif not token[0]:
+ tag = token[1]
+ else:
+ raise SyntaxError("invalid descendant")
+ def select(result):
+ for elem in result:
+ yield from elem.iterdescendants(tag)
+ return select
+
+def prepare_parent(next, token):
+ def select(result):
+ for elem in result:
+ parent = elem.getparent()
+ if parent is not None:
+ yield parent
+ return select
+
+def prepare_predicate(next, token):
+ # FIXME: replace with real parser!!! refs:
+ # http://effbot.org/zone/simple-iterator-parser.htm
+ # http://javascript.crockford.com/tdop/tdop.html
+ signature = ''
+ predicate = []
+ while 1:
+ token = next()
+ if token[0] == "]":
+ break
+ if token == ('', ''):
+ # ignore whitespace
+ continue
+ if token[0] and token[0][:1] in "'\"":
+ token = "'", token[0][1:-1]
+ signature += token[0] or "-"
+ predicate.append(token[1])
+
+ # use signature to determine predicate type
+ if signature == "@-":
+ # [@attribute] predicate
+ key = predicate[1]
+ def select(result):
+ for elem in result:
+ if elem.get(key) is not None:
+ yield elem
+ return select
+ if signature == "@-='":
+ # [@attribute='value']
+ key = predicate[1]
+ value = predicate[-1]
+ def select(result):
+ for elem in result:
+ if elem.get(key) == value:
+ yield elem
+ return select
+ if signature == "-" and not re.match(r"-?\d+$", predicate[0]):
+ # [tag]
+ tag = predicate[0]
+ def select(result):
+ for elem in result:
+ for _ in elem.iterchildren(tag):
+ yield elem
+ break
+ return select
+ if signature == ".='" or (signature == "-='" and not re.match(r"-?\d+$", predicate[0])):
+ # [.='value'] or [tag='value']
+ tag = predicate[0]
+ value = predicate[-1]
+ if tag:
+ def select(result):
+ for elem in result:
+ for e in elem.iterchildren(tag):
+ if "".join(e.itertext()) == value:
+ yield elem
+ break
+ else:
+ def select(result):
+ for elem in result:
+ if "".join(elem.itertext()) == value:
+ yield elem
+ return select
+ if signature == "-" or signature == "-()" or signature == "-()-":
+ # [index] or [last()] or [last()-index]
+ if signature == "-":
+ # [index]
+ index = int(predicate[0]) - 1
+ if index < 0:
+ if index == -1:
+ raise SyntaxError(
+ "indices in path predicates are 1-based, not 0-based")
+ else:
+ raise SyntaxError("path index >= 1 expected")
+ else:
+ if predicate[0] != "last":
+ raise SyntaxError("unsupported function")
+ if signature == "-()-":
+ try:
+ index = int(predicate[2]) - 1
+ except ValueError:
+ raise SyntaxError("unsupported expression")
+ else:
+ index = -1
+ def select(result):
+ for elem in result:
+ parent = elem.getparent()
+ if parent is None:
+ continue
+ try:
+ # FIXME: what if the selector is "*" ?
+ elems = list(parent.iterchildren(elem.tag))
+ if elems[index] is elem:
+ yield elem
+ except IndexError:
+ pass
+ return select
+ raise SyntaxError("invalid predicate")
+
+ops = {
+ "": prepare_child,
+ "*": prepare_star,
+ ".": prepare_self,
+ "..": prepare_parent,
+ "//": prepare_descendant,
+ "[": prepare_predicate,
+}
+
+
+# --------------------------------------------------------------------
+
+_cache = {}
+
+
+def _build_path_iterator(path, namespaces, with_prefixes=True):
+ """compile selector pattern"""
+ if path[-1:] == "/":
+ path += "*" # implicit all (FIXME: keep this?)
+
+ cache_key = (path,)
+ if namespaces:
+ # lxml originally used None for the default namespace but ElementTree uses the
+ # more convenient (all-strings-dict) empty string, so we support both here,
+ # preferring the more convenient '', as long as they aren't ambiguous.
+ if None in namespaces:
+ if '' in namespaces and namespaces[None] != namespaces['']:
+ raise ValueError("Ambiguous default namespace provided: %r versus %r" % (
+ namespaces[None], namespaces['']))
+ cache_key += (namespaces[None],) + tuple(sorted(
+ item for item in namespaces.items() if item[0] is not None))
+ else:
+ cache_key += tuple(sorted(namespaces.items()))
+
+ try:
+ return _cache[cache_key]
+ except KeyError:
+ pass
+ if len(_cache) > 100:
+ _cache.clear()
+
+ if path[:1] == "/":
+ raise SyntaxError("cannot use absolute path on element")
+ stream = iter(xpath_tokenizer(path, namespaces, with_prefixes=with_prefixes))
+ try:
+ _next = stream.next
+ except AttributeError:
+ # Python 3
+ _next = stream.__next__
+ try:
+ token = _next()
+ except StopIteration:
+ raise SyntaxError("empty path expression")
+ selector = []
+ while 1:
+ try:
+ selector.append(ops[token[0]](_next, token))
+ except StopIteration:
+ raise SyntaxError("invalid path")
+ try:
+ token = _next()
+ if token[0] == "/":
+ token = _next()
+ except StopIteration:
+ break
+ _cache[cache_key] = selector
+ return selector
+
+
+##
+# Iterate over the matching nodes
+
+def iterfind(elem, path, namespaces=None, with_prefixes=True):
+ selector = _build_path_iterator(path, namespaces, with_prefixes=with_prefixes)
+ result = iter((elem,))
+ for select in selector:
+ result = select(result)
+ return result
+
+
+##
+# Find first matching object.
+
+def find(elem, path, namespaces=None, with_prefixes=True):
+ it = iterfind(elem, path, namespaces, with_prefixes=with_prefixes)
+ try:
+ return next(it)
+ except StopIteration:
+ return None
+
+
+##
+# Find all matching objects.
+
+def findall(elem, path, namespaces=None, with_prefixes=True):
+ return list(iterfind(elem, path, namespaces))
+
+
+##
+# Find text for first matching object.
+
+def findtext(elem, path, default=None, namespaces=None, with_prefixes=True):
+ el = find(elem, path, namespaces, with_prefixes=with_prefixes)
+ if el is None:
+ return default
+ else:
+ return el.text or ''
diff --git a/llmeval-env/lib/python3.10/site-packages/lxml/apihelpers.pxi b/llmeval-env/lib/python3.10/site-packages/lxml/apihelpers.pxi
new file mode 100644
index 0000000000000000000000000000000000000000..fb60af7d23766c9b5bd8a10f14a02ad099a8d249
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/lxml/apihelpers.pxi
@@ -0,0 +1,1793 @@
+# Private/public helper functions for API functions
+
+from lxml.includes cimport uri
+
+
+cdef void displayNode(xmlNode* c_node, indent) noexcept:
+ # to help with debugging
+ cdef xmlNode* c_child
+ try:
+ print(indent * ' ', c_node)
+ c_child = c_node.children
+ while c_child is not NULL:
+ displayNode(c_child, indent + 1)
+ c_child = c_child.next
+ finally:
+ return # swallow any exceptions
+
+cdef inline bint _isHtmlDocument(_Element element) except -1:
+ cdef xmlNode* c_node = element._c_node
+ return (
+ c_node is not NULL and c_node.doc is not NULL and
+ c_node.doc.properties & tree.XML_DOC_HTML != 0
+ )
+
+cdef inline int _assertValidNode(_Element element) except -1:
+ assert element._c_node is not NULL, "invalid Element proxy at %s" % id(element)
+
+cdef inline int _assertValidDoc(_Document doc) except -1:
+ assert doc._c_doc is not NULL, "invalid Document proxy at %s" % id(doc)
+
+cdef _Document _documentOrRaise(object input):
+ """Call this to get the document of a _Document, _ElementTree or _Element
+ object, or to raise an exception if it can't be determined.
+
+ Should be used in all API functions for consistency.
+ """
+ cdef _Document doc
+ if isinstance(input, _ElementTree):
+ if (<_ElementTree>input)._context_node is not None:
+ doc = (<_ElementTree>input)._context_node._doc
+ else:
+ doc = None
+ elif isinstance(input, _Element):
+ doc = (<_Element>input)._doc
+ elif isinstance(input, _Document):
+ doc = <_Document>input
+ else:
+ raise TypeError, f"Invalid input object: {python._fqtypename(input).decode('utf8')}"
+ if doc is None:
+ raise ValueError, f"Input object has no document: {python._fqtypename(input).decode('utf8')}"
+ _assertValidDoc(doc)
+ return doc
+
+cdef _Element _rootNodeOrRaise(object input):
+ """Call this to get the root node of a _Document, _ElementTree or
+ _Element object, or to raise an exception if it can't be determined.
+
+ Should be used in all API functions for consistency.
+ """
+ cdef _Element node
+ if isinstance(input, _ElementTree):
+ node = (<_ElementTree>input)._context_node
+ elif isinstance(input, _Element):
+ node = <_Element>input
+ elif isinstance(input, _Document):
+ node = (<_Document>input).getroot()
+ else:
+ raise TypeError, f"Invalid input object: {python._fqtypename(input).decode('utf8')}"
+ if (node is None or not node._c_node or
+ node._c_node.type != tree.XML_ELEMENT_NODE):
+ raise ValueError, f"Input object is not an XML element: {python._fqtypename(input).decode('utf8')}"
+ _assertValidNode(node)
+ return node
+
+cdef bint _isAncestorOrSame(xmlNode* c_ancestor, xmlNode* c_node) noexcept:
+ while c_node:
+ if c_node is c_ancestor:
+ return True
+ c_node = c_node.parent
+ return False
+
+cdef _Element _makeElement(tag, xmlDoc* c_doc, _Document doc,
+ _BaseParser parser, text, tail, attrib, nsmap,
+ dict extra_attrs):
+ """Create a new element and initialize text content, namespaces and
+ attributes.
+
+ This helper function will reuse as much of the existing document as
+ possible:
+
+ If 'parser' is None, the parser will be inherited from 'doc' or the
+ default parser will be used.
+
+ If 'doc' is None, 'c_doc' is used to create a new _Document and the new
+ element is made its root node.
+
+ If 'c_doc' is also NULL, a new xmlDoc will be created.
+ """
+ cdef xmlNode* c_node
+ if doc is not None:
+ c_doc = doc._c_doc
+ ns_utf, name_utf = _getNsTag(tag)
+ if parser is not None and parser._for_html:
+ _htmlTagValidOrRaise(name_utf)
+ if c_doc is NULL:
+ c_doc = _newHTMLDoc()
+ else:
+ _tagValidOrRaise(name_utf)
+ if c_doc is NULL:
+ c_doc = _newXMLDoc()
+ c_node = _createElement(c_doc, name_utf)
+ if c_node is NULL:
+ if doc is None and c_doc is not NULL:
+ tree.xmlFreeDoc(c_doc)
+ raise MemoryError()
+ try:
+ if doc is None:
+ tree.xmlDocSetRootElement(c_doc, c_node)
+ doc = _documentFactory(c_doc, parser)
+ if text is not None:
+ _setNodeText(c_node, text)
+ if tail is not None:
+ _setTailText(c_node, tail)
+ # add namespaces to node if necessary
+ _setNodeNamespaces(c_node, doc, ns_utf, nsmap)
+ _initNodeAttributes(c_node, doc, attrib, extra_attrs)
+ return _elementFactory(doc, c_node)
+ except:
+ # free allocated c_node/c_doc unless Python does it for us
+ if c_node.doc is not c_doc:
+ # node not yet in document => will not be freed by document
+ if tail is not None:
+ _removeText(c_node.next) # tail
+ tree.xmlFreeNode(c_node)
+ if doc is None:
+ # c_doc will not be freed by doc
+ tree.xmlFreeDoc(c_doc)
+ raise
+
+cdef int _initNewElement(_Element element, bint is_html, name_utf, ns_utf,
+ _BaseParser parser, attrib, nsmap, dict extra_attrs) except -1:
+ """Initialise a new Element object.
+
+ This is used when users instantiate a Python Element subclass
+ directly, without it being mapped to an existing XML node.
+ """
+ cdef xmlDoc* c_doc
+ cdef xmlNode* c_node
+ cdef _Document doc
+ if is_html:
+ _htmlTagValidOrRaise(name_utf)
+ c_doc = _newHTMLDoc()
+ else:
+ _tagValidOrRaise(name_utf)
+ c_doc = _newXMLDoc()
+ c_node = _createElement(c_doc, name_utf)
+ if c_node is NULL:
+ if c_doc is not NULL:
+ tree.xmlFreeDoc(c_doc)
+ raise MemoryError()
+ tree.xmlDocSetRootElement(c_doc, c_node)
+ doc = _documentFactory(c_doc, parser)
+ # add namespaces to node if necessary
+ _setNodeNamespaces(c_node, doc, ns_utf, nsmap)
+ _initNodeAttributes(c_node, doc, attrib, extra_attrs)
+ _registerProxy(element, doc, c_node)
+ element._init()
+ return 0
+
+cdef _Element _makeSubElement(_Element parent, tag, text, tail,
+ attrib, nsmap, dict extra_attrs):
+ """Create a new child element and initialize text content, namespaces and
+ attributes.
+ """
+ cdef xmlNode* c_node
+ cdef xmlDoc* c_doc
+ if parent is None or parent._doc is None:
+ return None
+ _assertValidNode(parent)
+ ns_utf, name_utf = _getNsTag(tag)
+ c_doc = parent._doc._c_doc
+
+ if parent._doc._parser is not None and parent._doc._parser._for_html:
+ _htmlTagValidOrRaise(name_utf)
+ else:
+ _tagValidOrRaise(name_utf)
+
+ c_node = _createElement(c_doc, name_utf)
+ if c_node is NULL:
+ raise MemoryError()
+ tree.xmlAddChild(parent._c_node, c_node)
+
+ try:
+ if text is not None:
+ _setNodeText(c_node, text)
+ if tail is not None:
+ _setTailText(c_node, tail)
+
+ # add namespaces to node if necessary
+ _setNodeNamespaces(c_node, parent._doc, ns_utf, nsmap)
+ _initNodeAttributes(c_node, parent._doc, attrib, extra_attrs)
+ return _elementFactory(parent._doc, c_node)
+ except:
+ # make sure we clean up in case of an error
+ _removeNode(parent._doc, c_node)
+ raise
+
+
+cdef int _setNodeNamespaces(xmlNode* c_node, _Document doc,
+ object node_ns_utf, object nsmap) except -1:
+ """Lookup current namespace prefixes, then set namespace structure for
+ node (if 'node_ns_utf' was provided) and register new ns-prefix mappings.
+
+ 'node_ns_utf' should only be passed for a newly created node.
+ """
+ cdef xmlNs* c_ns
+ cdef list nsdefs
+
+ if nsmap:
+ for prefix, href in _iter_nsmap(nsmap):
+ href_utf = _utf8(href)
+ _uriValidOrRaise(href_utf)
+ c_href = _xcstr(href_utf)
+ if prefix is not None:
+ prefix_utf = _utf8(prefix)
+ _prefixValidOrRaise(prefix_utf)
+ c_prefix = _xcstr(prefix_utf)
+ else:
+ c_prefix = NULL
+ # add namespace with prefix if it is not already known
+ c_ns = tree.xmlSearchNs(doc._c_doc, c_node, c_prefix)
+ if c_ns is NULL or \
+ c_ns.href is NULL or \
+ tree.xmlStrcmp(c_ns.href, c_href) != 0:
+ c_ns = tree.xmlNewNs(c_node, c_href, c_prefix)
+ if href_utf == node_ns_utf:
+ tree.xmlSetNs(c_node, c_ns)
+ node_ns_utf = None
+
+ if node_ns_utf is not None:
+ _uriValidOrRaise(node_ns_utf)
+ doc._setNodeNs(c_node, _xcstr(node_ns_utf))
+ return 0
+
+
+cdef dict _build_nsmap(xmlNode* c_node):
+ """
+ Namespace prefix->URI mapping known in the context of this Element.
+ This includes all namespace declarations of the parents.
+ """
+ cdef xmlNs* c_ns
+ nsmap = {}
+ while c_node is not NULL and c_node.type == tree.XML_ELEMENT_NODE:
+ c_ns = c_node.nsDef
+ while c_ns is not NULL:
+ if c_ns.prefix or c_ns.href:
+ prefix = funicodeOrNone(c_ns.prefix)
+ if prefix not in nsmap:
+ nsmap[prefix] = funicodeOrNone(c_ns.href)
+ c_ns = c_ns.next
+ c_node = c_node.parent
+ return nsmap
+
+
+cdef _iter_nsmap(nsmap):
+ """
+ Create a reproducibly ordered iterable from an nsmap mapping.
+ Tries to preserve an existing order and sorts if it assumes no order.
+
+ The difference to _iter_attrib() is that None doesn't sort with strings
+ in Py3.x.
+ """
+ if isinstance(nsmap, dict):
+ # dicts are insertion-ordered in Py3.6+ => keep the user provided order.
+ return nsmap.items()
+ if len(nsmap) <= 1:
+ return nsmap.items()
+ # nsmap will usually be a plain unordered dict => avoid type checking overhead
+ if type(nsmap) is not dict and isinstance(nsmap, OrderedDict):
+ return nsmap.items() # keep existing order
+ if None not in nsmap:
+ return sorted(nsmap.items())
+
+ # Move the default namespace to the end. This makes sure libxml2
+ # prefers a prefix if the ns is defined redundantly on the same
+ # element. That way, users can work around a problem themselves
+ # where default namespace attributes on non-default namespaced
+ # elements serialise without prefix (i.e. into the non-default
+ # namespace).
+ default_ns = nsmap[None]
+ nsdefs = [(k, v) for k, v in nsmap.items() if k is not None]
+ nsdefs.sort()
+ nsdefs.append((None, default_ns))
+ return nsdefs
+
+
+cdef _iter_attrib(attrib):
+ """
+ Create a reproducibly ordered iterable from an attrib mapping.
+ Tries to preserve an existing order and sorts if it assumes no order.
+ """
+ # dicts are insertion-ordered in Py3.6+ => keep the user provided order.
+ if isinstance(attrib, (dict, _Attrib, OrderedDict)):
+ return attrib.items()
+ # assume it's an unordered mapping of some kind
+ return sorted(attrib.items())
+
+
+cdef _initNodeAttributes(xmlNode* c_node, _Document doc, attrib, dict extra):
+ """Initialise the attributes of an element node.
+ """
+ cdef bint is_html
+ cdef xmlNs* c_ns
+ if attrib is not None and not hasattr(attrib, 'items'):
+ raise TypeError, f"Invalid attribute dictionary: {python._fqtypename(attrib).decode('utf8')}"
+ if not attrib and not extra:
+ return # nothing to do
+ is_html = doc._parser._for_html
+ seen = set()
+ if extra:
+ for name, value in extra.items():
+ _addAttributeToNode(c_node, doc, is_html, name, value, seen)
+ if attrib:
+ for name, value in _iter_attrib(attrib):
+ _addAttributeToNode(c_node, doc, is_html, name, value, seen)
+
+
+cdef int _addAttributeToNode(xmlNode* c_node, _Document doc, bint is_html,
+ name, value, set seen_tags) except -1:
+ ns_utf, name_utf = tag = _getNsTag(name)
+ if tag in seen_tags:
+ return 0
+ seen_tags.add(tag)
+ if not is_html:
+ _attributeValidOrRaise(name_utf)
+ value_utf = _utf8(value)
+ if ns_utf is None:
+ tree.xmlNewProp(c_node, _xcstr(name_utf), _xcstr(value_utf))
+ else:
+ _uriValidOrRaise(ns_utf)
+ c_ns = doc._findOrBuildNodeNs(c_node, _xcstr(ns_utf), NULL, 1)
+ tree.xmlNewNsProp(c_node, c_ns,
+ _xcstr(name_utf), _xcstr(value_utf))
+ return 0
+
+
+ctypedef struct _ns_node_ref:
+ xmlNs* ns
+ xmlNode* node
+
+
+cdef int _collectNsDefs(xmlNode* c_element, _ns_node_ref **_c_ns_list,
+ size_t *_c_ns_list_len, size_t *_c_ns_list_size) except -1:
+ c_ns_list = _c_ns_list[0]
+ cdef size_t c_ns_list_len = _c_ns_list_len[0]
+ cdef size_t c_ns_list_size = _c_ns_list_size[0]
+
+ c_nsdef = c_element.nsDef
+ while c_nsdef is not NULL:
+ if c_ns_list_len >= c_ns_list_size:
+ if c_ns_list is NULL:
+ c_ns_list_size = 20
+ else:
+ c_ns_list_size *= 2
+ c_nsref_ptr = <_ns_node_ref*> python.lxml_realloc(
+ c_ns_list, c_ns_list_size, sizeof(_ns_node_ref))
+ if c_nsref_ptr is NULL:
+ if c_ns_list is not NULL:
+ python.lxml_free(c_ns_list)
+ _c_ns_list[0] = NULL
+ raise MemoryError()
+ c_ns_list = c_nsref_ptr
+
+ c_ns_list[c_ns_list_len] = _ns_node_ref(c_nsdef, c_element)
+ c_ns_list_len += 1
+ c_nsdef = c_nsdef.next
+
+ _c_ns_list_size[0] = c_ns_list_size
+ _c_ns_list_len[0] = c_ns_list_len
+ _c_ns_list[0] = c_ns_list
+
+
+cdef int _removeUnusedNamespaceDeclarations(xmlNode* c_element, set prefixes_to_keep) except -1:
+ """Remove any namespace declarations from a subtree that are not used by
+ any of its elements (or attributes).
+
+ If a 'prefixes_to_keep' is provided, it must be a set of prefixes.
+ Any corresponding namespace mappings will not be removed as part of the cleanup.
+ """
+ cdef xmlNode* c_node
+ cdef _ns_node_ref* c_ns_list = NULL
+ cdef size_t c_ns_list_size = 0
+ cdef size_t c_ns_list_len = 0
+ cdef size_t i
+
+ if c_element.parent and c_element.parent.type == tree.XML_DOCUMENT_NODE:
+ # include declarations on the document node
+ _collectNsDefs(c_element.parent, &c_ns_list, &c_ns_list_len, &c_ns_list_size)
+
+ tree.BEGIN_FOR_EACH_ELEMENT_FROM(c_element, c_element, 1)
+ # collect all new namespace declarations into the ns list
+ if c_element.nsDef:
+ _collectNsDefs(c_element, &c_ns_list, &c_ns_list_len, &c_ns_list_size)
+
+ # remove all namespace declarations from the list that are referenced
+ if c_ns_list_len and c_element.type == tree.XML_ELEMENT_NODE:
+ c_node = c_element
+ while c_node and c_ns_list_len:
+ if c_node.ns:
+ for i in range(c_ns_list_len):
+ if c_node.ns is c_ns_list[i].ns:
+ c_ns_list_len -= 1
+ c_ns_list[i] = c_ns_list[c_ns_list_len]
+ #c_ns_list[c_ns_list_len] = _ns_node_ref(NULL, NULL)
+ break
+ if c_node is c_element:
+ # continue with attributes
+ c_node = c_element.properties
+ else:
+ c_node = c_node.next
+ tree.END_FOR_EACH_ELEMENT_FROM(c_element)
+
+ if c_ns_list is NULL:
+ return 0
+
+ # free all namespace declarations that remained in the list,
+ # except for those we should keep explicitly
+ cdef xmlNs* c_nsdef
+ for i in range(c_ns_list_len):
+ if prefixes_to_keep is not None:
+ if c_ns_list[i].ns.prefix and c_ns_list[i].ns.prefix in prefixes_to_keep:
+ continue
+ c_node = c_ns_list[i].node
+ c_nsdef = c_node.nsDef
+ if c_nsdef is c_ns_list[i].ns:
+ c_node.nsDef = c_node.nsDef.next
+ else:
+ while c_nsdef.next is not c_ns_list[i].ns:
+ c_nsdef = c_nsdef.next
+ c_nsdef.next = c_nsdef.next.next
+ tree.xmlFreeNs(c_ns_list[i].ns)
+
+ if c_ns_list is not NULL:
+ python.lxml_free(c_ns_list)
+ return 0
+
+cdef xmlNs* _searchNsByHref(xmlNode* c_node, const_xmlChar* c_href, bint is_attribute) noexcept:
+ """Search a namespace declaration that covers a node (element or
+ attribute).
+
+ For attributes, try to find a prefixed namespace declaration
+ instead of the default namespaces. This helps in supporting
+ round-trips for attributes on elements with a different namespace.
+ """
+ cdef xmlNs* c_ns
+ cdef xmlNs* c_default_ns = NULL
+ cdef xmlNode* c_element
+ if c_href is NULL or c_node is NULL or c_node.type == tree.XML_ENTITY_REF_NODE:
+ return NULL
+ if tree.xmlStrcmp(c_href, tree.XML_XML_NAMESPACE) == 0:
+ # no special cases here, let libxml2 handle this
+ return tree.xmlSearchNsByHref(c_node.doc, c_node, c_href)
+ if c_node.type == tree.XML_ATTRIBUTE_NODE:
+ is_attribute = 1
+ while c_node is not NULL and c_node.type != tree.XML_ELEMENT_NODE:
+ c_node = c_node.parent
+ c_element = c_node
+ while c_node is not NULL:
+ if c_node.type == tree.XML_ELEMENT_NODE:
+ c_ns = c_node.nsDef
+ while c_ns is not NULL:
+ if c_ns.href is not NULL and tree.xmlStrcmp(c_href, c_ns.href) == 0:
+ if c_ns.prefix is NULL and is_attribute:
+ # for attributes, continue searching a named
+ # prefix, but keep the first default namespace
+ # declaration that we found
+ if c_default_ns is NULL:
+ c_default_ns = c_ns
+ elif tree.xmlSearchNs(
+ c_element.doc, c_element, c_ns.prefix) is c_ns:
+ # start node is in namespace scope => found!
+ return c_ns
+ c_ns = c_ns.next
+ if c_node is not c_element and c_node.ns is not NULL:
+ # optimise: the node may have the namespace itself
+ c_ns = c_node.ns
+ if c_ns.href is not NULL and tree.xmlStrcmp(c_href, c_ns.href) == 0:
+ if c_ns.prefix is NULL and is_attribute:
+ # for attributes, continue searching a named
+ # prefix, but keep the first default namespace
+ # declaration that we found
+ if c_default_ns is NULL:
+ c_default_ns = c_ns
+ elif tree.xmlSearchNs(
+ c_element.doc, c_element, c_ns.prefix) is c_ns:
+ # start node is in namespace scope => found!
+ return c_ns
+ c_node = c_node.parent
+ # nothing found => use a matching default namespace or fail
+ if c_default_ns is not NULL:
+ if tree.xmlSearchNs(c_element.doc, c_element, NULL) is c_default_ns:
+ return c_default_ns
+ return NULL
+
+cdef int _replaceNodeByChildren(_Document doc, xmlNode* c_node) except -1:
+ # NOTE: this does not deallocate the node, just unlink it!
+ cdef xmlNode* c_parent
+ cdef xmlNode* c_child
+ if c_node.children is NULL:
+ tree.xmlUnlinkNode(c_node)
+ return 0
+
+ c_parent = c_node.parent
+ # fix parent links of children
+ c_child = c_node.children
+ while c_child is not NULL:
+ c_child.parent = c_parent
+ c_child = c_child.next
+
+ # fix namespace references of children if their parent's namespace
+ # declarations get lost
+ if c_node.nsDef is not NULL:
+ c_child = c_node.children
+ while c_child is not NULL:
+ moveNodeToDocument(doc, doc._c_doc, c_child)
+ c_child = c_child.next
+
+ # fix sibling links to/from child slice
+ if c_node.prev is NULL:
+ c_parent.children = c_node.children
+ else:
+ c_node.prev.next = c_node.children
+ c_node.children.prev = c_node.prev
+ if c_node.next is NULL:
+ c_parent.last = c_node.last
+ else:
+ c_node.next.prev = c_node.last
+ c_node.last.next = c_node.next
+
+ # unlink c_node
+ c_node.children = c_node.last = NULL
+ c_node.parent = c_node.next = c_node.prev = NULL
+ return 0
+
+cdef unicode _attributeValue(xmlNode* c_element, xmlAttr* c_attrib_node):
+ c_href = _getNs(c_attrib_node)
+ value = tree.xmlGetNsProp(c_element, c_attrib_node.name, c_href)
+ try:
+ result = funicode(value)
+ finally:
+ tree.xmlFree(value)
+ return result
+
+cdef unicode _attributeValueFromNsName(xmlNode* c_element,
+ const_xmlChar* c_href, const_xmlChar* c_name):
+ c_result = tree.xmlGetNsProp(c_element, c_name, c_href)
+ if c_result is NULL:
+ return None
+ try:
+ result = funicode(c_result)
+ finally:
+ tree.xmlFree(c_result)
+ return result
+
+cdef object _getNodeAttributeValue(xmlNode* c_node, key, default):
+ ns, tag = _getNsTag(key)
+ c_href = NULL if ns is None else _xcstr(ns)
+ c_result = tree.xmlGetNsProp(c_node, _xcstr(tag), c_href)
+ if c_result is NULL:
+ # XXX free namespace that is not in use..?
+ return default
+ try:
+ result = funicode(c_result)
+ finally:
+ tree.xmlFree(c_result)
+ return result
+
+cdef inline object _getAttributeValue(_Element element, key, default):
+ return _getNodeAttributeValue(element._c_node, key, default)
+
+cdef int _setAttributeValue(_Element element, key, value) except -1:
+ cdef const_xmlChar* c_value
+ cdef xmlNs* c_ns
+ ns, tag = _getNsTag(key)
+ is_html = element._doc._parser._for_html
+ if not is_html:
+ _attributeValidOrRaise(tag)
+ c_tag = _xcstr(tag)
+ if value is None and is_html:
+ c_value = NULL
+ else:
+ if isinstance(value, QName):
+ value = _resolveQNameText(element, value)
+ else:
+ value = _utf8(value)
+ c_value = _xcstr(value)
+ if ns is None:
+ c_ns = NULL
+ else:
+ c_ns = element._doc._findOrBuildNodeNs(element._c_node, _xcstr(ns), NULL, 1)
+ tree.xmlSetNsProp(element._c_node, c_ns, c_tag, c_value)
+ return 0
+
+cdef int _delAttribute(_Element element, key) except -1:
+ ns, tag = _getNsTag(key)
+ c_href = NULL if ns is None else _xcstr(ns)
+ if _delAttributeFromNsName(element._c_node, c_href, _xcstr(tag)):
+ raise KeyError, key
+ return 0
+
+cdef int _delAttributeFromNsName(xmlNode* c_node, const_xmlChar* c_href, const_xmlChar* c_name) noexcept:
+ c_attr = tree.xmlHasNsProp(c_node, c_name, c_href)
+ if c_attr is NULL:
+ # XXX free namespace that is not in use..?
+ return -1
+ tree.xmlRemoveProp(c_attr)
+ return 0
+
+cdef list _collectAttributes(xmlNode* c_node, int collecttype):
+ """Collect all attributes of a node in a list. Depending on collecttype,
+ it collects either the name (1), the value (2) or the name-value tuples.
+ """
+ cdef Py_ssize_t count
+ c_attr = c_node.properties
+ count = 0
+ while c_attr is not NULL:
+ if c_attr.type == tree.XML_ATTRIBUTE_NODE:
+ count += 1
+ c_attr = c_attr.next
+
+ if not count:
+ return []
+
+ attributes = [None] * count
+ c_attr = c_node.properties
+ count = 0
+ while c_attr is not NULL:
+ if c_attr.type == tree.XML_ATTRIBUTE_NODE:
+ if collecttype == 1:
+ item = _namespacedName(c_attr)
+ elif collecttype == 2:
+ item = _attributeValue(c_node, c_attr)
+ else:
+ item = (_namespacedName(c_attr),
+ _attributeValue(c_node, c_attr))
+ attributes[count] = item
+ count += 1
+ c_attr = c_attr.next
+ return attributes
+
+cdef object __RE_XML_ENCODING = re.compile(
+ r'^(<\?xml[^>]+)\s+encoding\s*=\s*["\'][^"\']*["\'](\s*\?>|)', re.U)
+
+cdef object __REPLACE_XML_ENCODING = __RE_XML_ENCODING.sub
+cdef object __HAS_XML_ENCODING = __RE_XML_ENCODING.match
+
+cdef object _stripEncodingDeclaration(object xml_string):
+ # this is a hack to remove the XML encoding declaration from unicode
+ return __REPLACE_XML_ENCODING(r'\g<1>\g<2>', xml_string)
+
+cdef bint _hasEncodingDeclaration(object xml_string) except -1:
+ # check if a (unicode) string has an XML encoding declaration
+ return __HAS_XML_ENCODING(xml_string) is not None
+
+cdef inline bint _hasText(xmlNode* c_node) noexcept:
+ return c_node is not NULL and _textNodeOrSkip(c_node.children) is not NULL
+
+cdef inline bint _hasTail(xmlNode* c_node) noexcept:
+ return c_node is not NULL and _textNodeOrSkip(c_node.next) is not NULL
+
+cdef inline bint _hasNonWhitespaceTail(xmlNode* c_node) except -1:
+ return _hasNonWhitespaceText(c_node, tail=True)
+
+cdef bint _hasNonWhitespaceText(xmlNode* c_node, bint tail=False) except -1:
+ c_text_node = c_node and _textNodeOrSkip(c_node.next if tail else c_node.children)
+ if c_text_node is NULL:
+ return False
+ while c_text_node is not NULL:
+ if c_text_node.content[0] != c'\0' and not _collectText(c_text_node).isspace():
+ return True
+ c_text_node = _textNodeOrSkip(c_text_node.next)
+ return False
+
+cdef unicode _collectText(xmlNode* c_node):
+ """Collect all text nodes and return them as a unicode string.
+
+ Start collecting at c_node.
+
+ If there was no text to collect, return None
+ """
+ cdef Py_ssize_t scount
+ cdef xmlChar* c_text
+ cdef xmlNode* c_node_cur
+ # check for multiple text nodes
+ scount = 0
+ c_text = NULL
+ c_node_cur = c_node = _textNodeOrSkip(c_node)
+ while c_node_cur is not NULL:
+ if c_node_cur.content[0] != c'\0':
+ c_text = c_node_cur.content
+ scount += 1
+ c_node_cur = _textNodeOrSkip(c_node_cur.next)
+
+ # handle two most common cases first
+ if c_text is NULL:
+ return '' if scount > 0 else None
+ if scount == 1:
+ return funicode(c_text)
+
+ # the rest is not performance critical anymore
+ result = b''
+ while c_node is not NULL:
+ result += c_node.content
+ c_node = _textNodeOrSkip(c_node.next)
+ return funicode(result)
+
+cdef void _removeText(xmlNode* c_node) noexcept:
+ """Remove all text nodes.
+
+ Start removing at c_node.
+ """
+ cdef xmlNode* c_next
+ c_node = _textNodeOrSkip(c_node)
+ while c_node is not NULL:
+ c_next = _textNodeOrSkip(c_node.next)
+ tree.xmlUnlinkNode(c_node)
+ tree.xmlFreeNode(c_node)
+ c_node = c_next
+
+cdef xmlNode* _createTextNode(xmlDoc* doc, value) except NULL:
+ cdef xmlNode* c_text_node
+ if isinstance(value, CDATA):
+ c_text_node = tree.xmlNewCDataBlock(
+ doc, _xcstr((value)._utf8_data),
+ python.PyBytes_GET_SIZE((value)._utf8_data))
+ else:
+ text = _utf8(value)
+ c_text_node = tree.xmlNewDocText(doc, _xcstr(text))
+ if not c_text_node:
+ raise MemoryError()
+ return c_text_node
+
+cdef int _setNodeText(xmlNode* c_node, value) except -1:
+ # remove all text nodes at the start first
+ _removeText(c_node.children)
+ if value is None:
+ return 0
+ # now add new text node with value at start
+ c_text_node = _createTextNode(c_node.doc, value)
+ if c_node.children is NULL:
+ tree.xmlAddChild(c_node, c_text_node)
+ else:
+ tree.xmlAddPrevSibling(c_node.children, c_text_node)
+ return 0
+
+cdef int _setTailText(xmlNode* c_node, value) except -1:
+ # remove all text nodes at the start first
+ _removeText(c_node.next)
+ if value is None:
+ return 0
+ # now append new text node with value
+ c_text_node = _createTextNode(c_node.doc, value)
+ tree.xmlAddNextSibling(c_node, c_text_node)
+ return 0
+
+cdef bytes _resolveQNameText(_Element element, value):
+ cdef xmlNs* c_ns
+ ns, tag = _getNsTag(value)
+ if ns is None:
+ return tag
+ else:
+ c_ns = element._doc._findOrBuildNodeNs(
+ element._c_node, _xcstr(ns), NULL, 0)
+ return python.PyBytes_FromFormat('%s:%s', c_ns.prefix, _cstr(tag))
+
+cdef inline bint _hasChild(xmlNode* c_node) noexcept:
+ return c_node is not NULL and _findChildForwards(c_node, 0) is not NULL
+
+cdef inline Py_ssize_t _countElements(xmlNode* c_node) noexcept:
+ "Counts the elements within the following siblings and the node itself."
+ cdef Py_ssize_t count
+ count = 0
+ while c_node is not NULL:
+ if _isElement(c_node):
+ count += 1
+ c_node = c_node.next
+ return count
+
+cdef int _findChildSlice(
+ slice sliceobject, xmlNode* c_parent,
+ xmlNode** c_start_node, Py_ssize_t* c_step, Py_ssize_t* c_length) except -1:
+ """Resolve a children slice.
+
+ Returns the start node, step size and the slice length in the
+ pointer arguments.
+ """
+ cdef Py_ssize_t start = 0, stop = 0, childcount
+ childcount = _countElements(c_parent.children)
+ if childcount == 0:
+ c_start_node[0] = NULL
+ c_length[0] = 0
+ if sliceobject.step is None:
+ c_step[0] = 1
+ else:
+ python._PyEval_SliceIndex(sliceobject.step, c_step)
+ return 0
+ python.PySlice_GetIndicesEx(
+ sliceobject, childcount, &start, &stop, c_step, c_length)
+ if start > childcount // 2:
+ c_start_node[0] = _findChildBackwards(c_parent, childcount - start - 1)
+ else:
+ c_start_node[0] = _findChild(c_parent, start)
+ return 0
+
+cdef bint _isFullSlice(slice sliceobject) except -1:
+ """Conservative guess if this slice is a full slice as in ``s[:]``.
+ """
+ cdef Py_ssize_t step = 0
+ if sliceobject is None:
+ return 0
+ if sliceobject.start is None and \
+ sliceobject.stop is None:
+ if sliceobject.step is None:
+ return 1
+ python._PyEval_SliceIndex(sliceobject.step, &step)
+ if step == 1:
+ return 1
+ return 0
+ return 0
+
+cdef _collectChildren(_Element element):
+ cdef xmlNode* c_node
+ cdef list result = []
+ c_node = element._c_node.children
+ if c_node is not NULL:
+ if not _isElement(c_node):
+ c_node = _nextElement(c_node)
+ while c_node is not NULL:
+ result.append(_elementFactory(element._doc, c_node))
+ c_node = _nextElement(c_node)
+ return result
+
+cdef inline xmlNode* _findChild(xmlNode* c_node, Py_ssize_t index) noexcept:
+ if index < 0:
+ return _findChildBackwards(c_node, -index - 1)
+ else:
+ return _findChildForwards(c_node, index)
+
+cdef inline xmlNode* _findChildForwards(xmlNode* c_node, Py_ssize_t index) noexcept:
+ """Return child element of c_node with index, or return NULL if not found.
+ """
+ cdef xmlNode* c_child
+ cdef Py_ssize_t c
+ c_child = c_node.children
+ c = 0
+ while c_child is not NULL:
+ if _isElement(c_child):
+ if c == index:
+ return c_child
+ c += 1
+ c_child = c_child.next
+ return NULL
+
+cdef inline xmlNode* _findChildBackwards(xmlNode* c_node, Py_ssize_t index) noexcept:
+ """Return child element of c_node with index, or return NULL if not found.
+ Search from the end.
+ """
+ cdef xmlNode* c_child
+ cdef Py_ssize_t c
+ c_child = c_node.last
+ c = 0
+ while c_child is not NULL:
+ if _isElement(c_child):
+ if c == index:
+ return c_child
+ c += 1
+ c_child = c_child.prev
+ return NULL
+
+cdef inline xmlNode* _textNodeOrSkip(xmlNode* c_node) noexcept nogil:
+ """Return the node if it's a text node. Skip over ignorable nodes in a
+ series of text nodes. Return NULL if a non-ignorable node is found.
+
+ This is used to skip over XInclude nodes when collecting adjacent text
+ nodes.
+ """
+ while c_node is not NULL:
+ if c_node.type == tree.XML_TEXT_NODE or \
+ c_node.type == tree.XML_CDATA_SECTION_NODE:
+ return c_node
+ elif c_node.type == tree.XML_XINCLUDE_START or \
+ c_node.type == tree.XML_XINCLUDE_END:
+ c_node = c_node.next
+ else:
+ return NULL
+ return NULL
+
+cdef inline xmlNode* _nextElement(xmlNode* c_node) noexcept:
+ """Given a node, find the next sibling that is an element.
+ """
+ if c_node is NULL:
+ return NULL
+ c_node = c_node.next
+ while c_node is not NULL:
+ if _isElement(c_node):
+ return c_node
+ c_node = c_node.next
+ return NULL
+
+cdef inline xmlNode* _previousElement(xmlNode* c_node) noexcept:
+ """Given a node, find the next sibling that is an element.
+ """
+ if c_node is NULL:
+ return NULL
+ c_node = c_node.prev
+ while c_node is not NULL:
+ if _isElement(c_node):
+ return c_node
+ c_node = c_node.prev
+ return NULL
+
+cdef inline xmlNode* _parentElement(xmlNode* c_node) noexcept:
+ "Given a node, find the parent element."
+ if c_node is NULL or not _isElement(c_node):
+ return NULL
+ c_node = c_node.parent
+ if c_node is NULL or not _isElement(c_node):
+ return NULL
+ return c_node
+
+cdef inline bint _tagMatches(xmlNode* c_node, const_xmlChar* c_href, const_xmlChar* c_name) noexcept:
+ """Tests if the node matches namespace URI and tag name.
+
+ A node matches if it matches both c_href and c_name.
+
+ A node matches c_href if any of the following is true:
+ * c_href is NULL
+ * its namespace is NULL and c_href is the empty string
+ * its namespace string equals the c_href string
+
+ A node matches c_name if any of the following is true:
+ * c_name is NULL
+ * its name string equals the c_name string
+ """
+ if c_node is NULL:
+ return 0
+ if c_node.type != tree.XML_ELEMENT_NODE:
+ # not an element, only succeed if we match everything
+ return c_name is NULL and c_href is NULL
+ if c_name is NULL:
+ if c_href is NULL:
+ # always match
+ return 1
+ else:
+ c_node_href = _getNs(c_node)
+ if c_node_href is NULL:
+ return c_href[0] == c'\0'
+ else:
+ return tree.xmlStrcmp(c_node_href, c_href) == 0
+ elif c_href is NULL:
+ if _getNs(c_node) is not NULL:
+ return 0
+ return c_node.name == c_name or tree.xmlStrcmp(c_node.name, c_name) == 0
+ elif c_node.name == c_name or tree.xmlStrcmp(c_node.name, c_name) == 0:
+ c_node_href = _getNs(c_node)
+ if c_node_href is NULL:
+ return c_href[0] == c'\0'
+ else:
+ return tree.xmlStrcmp(c_node_href, c_href) == 0
+ else:
+ return 0
+
+cdef inline bint _tagMatchesExactly(xmlNode* c_node, qname* c_qname) noexcept:
+ """Tests if the node matches namespace URI and tag name.
+
+ This differs from _tagMatches() in that it does not consider a
+ NULL value in qname.href a wildcard, and that it expects the c_name
+ to be taken from the doc dict, i.e. it only compares the names by
+ address.
+
+ A node matches if it matches both href and c_name of the qname.
+
+ A node matches c_href if any of the following is true:
+ * its namespace is NULL and c_href is the empty string
+ * its namespace string equals the c_href string
+
+ A node matches c_name if any of the following is true:
+ * c_name is NULL
+ * its name string points to the same address (!) as c_name
+ """
+ return _nsTagMatchesExactly(_getNs(c_node), c_node.name, c_qname)
+
+cdef inline bint _nsTagMatchesExactly(const_xmlChar* c_node_href,
+ const_xmlChar* c_node_name,
+ qname* c_qname) noexcept:
+ """Tests if name and namespace URI match those of c_qname.
+
+ This differs from _tagMatches() in that it does not consider a
+ NULL value in qname.href a wildcard, and that it expects the c_name
+ to be taken from the doc dict, i.e. it only compares the names by
+ address.
+
+ A node matches if it matches both href and c_name of the qname.
+
+ A node matches c_href if any of the following is true:
+ * its namespace is NULL and c_href is the empty string
+ * its namespace string equals the c_href string
+
+ A node matches c_name if any of the following is true:
+ * c_name is NULL
+ * its name string points to the same address (!) as c_name
+ """
+ cdef char* c_href
+ if c_qname.c_name is not NULL and c_qname.c_name is not c_node_name:
+ return 0
+ if c_qname.href is NULL:
+ return 1
+ c_href = python.__cstr(c_qname.href)
+ if c_href[0] == b'\0':
+ return c_node_href is NULL or c_node_href[0] == b'\0'
+ elif c_node_href is NULL:
+ return 0
+ else:
+ return tree.xmlStrcmp(c_href, c_node_href) == 0
+
+cdef Py_ssize_t _mapTagsToQnameMatchArray(xmlDoc* c_doc, list ns_tags,
+ qname* c_ns_tags, bint force_into_dict) except -1:
+ """Map a sequence of (name, namespace) pairs to a qname array for efficient
+ matching with _tagMatchesExactly() above.
+
+ Note that each qname struct in the array owns its href byte string object
+ if it is not NULL.
+ """
+ cdef Py_ssize_t count = 0, i
+ cdef bytes ns, tag
+ for ns, tag in ns_tags:
+ if tag is None:
+ c_tag = NULL
+ elif force_into_dict:
+ c_tag = tree.xmlDictLookup(c_doc.dict, _xcstr(tag), len(tag))
+ if c_tag is NULL:
+ # clean up before raising the error
+ for i in xrange(count):
+ cpython.ref.Py_XDECREF(c_ns_tags[i].href)
+ raise MemoryError()
+ else:
+ c_tag = tree.xmlDictExists(c_doc.dict, _xcstr(tag), len(tag))
+ if c_tag is NULL:
+ # not in the dict => not in the document
+ continue
+ c_ns_tags[count].c_name = c_tag
+ if ns is None:
+ c_ns_tags[count].href = NULL
+ else:
+ cpython.ref.Py_INCREF(ns) # keep an owned reference!
+ c_ns_tags[count].href = ns
+ count += 1
+ return count
+
+cdef int _removeNode(_Document doc, xmlNode* c_node) except -1:
+ """Unlink and free a node and subnodes if possible. Otherwise, make sure
+ it's self-contained.
+ """
+ cdef xmlNode* c_next
+ c_next = c_node.next
+ tree.xmlUnlinkNode(c_node)
+ _moveTail(c_next, c_node)
+ if not attemptDeallocation(c_node):
+ # make namespaces absolute
+ moveNodeToDocument(doc, c_node.doc, c_node)
+ return 0
+
+cdef int _removeSiblings(xmlNode* c_element, tree.xmlElementType node_type, bint with_tail) except -1:
+ cdef xmlNode* c_node
+ cdef xmlNode* c_next
+ c_node = c_element.next
+ while c_node is not NULL:
+ c_next = _nextElement(c_node)
+ if c_node.type == node_type:
+ if with_tail:
+ _removeText(c_node.next)
+ tree.xmlUnlinkNode(c_node)
+ attemptDeallocation(c_node)
+ c_node = c_next
+ c_node = c_element.prev
+ while c_node is not NULL:
+ c_next = _previousElement(c_node)
+ if c_node.type == node_type:
+ if with_tail:
+ _removeText(c_node.next)
+ tree.xmlUnlinkNode(c_node)
+ attemptDeallocation(c_node)
+ c_node = c_next
+ return 0
+
+cdef void _moveTail(xmlNode* c_tail, xmlNode* c_target) noexcept:
+ cdef xmlNode* c_next
+ # tail support: look for any text nodes trailing this node and
+ # move them too
+ c_tail = _textNodeOrSkip(c_tail)
+ while c_tail is not NULL:
+ c_next = _textNodeOrSkip(c_tail.next)
+ c_target = tree.xmlAddNextSibling(c_target, c_tail)
+ c_tail = c_next
+
+cdef int _copyTail(xmlNode* c_tail, xmlNode* c_target) except -1:
+ cdef xmlNode* c_new_tail
+ # tail copying support: look for any text nodes trailing this node and
+ # copy it to the target node
+ c_tail = _textNodeOrSkip(c_tail)
+ while c_tail is not NULL:
+ if c_target.doc is not c_tail.doc:
+ c_new_tail = tree.xmlDocCopyNode(c_tail, c_target.doc, 0)
+ else:
+ c_new_tail = tree.xmlCopyNode(c_tail, 0)
+ if c_new_tail is NULL:
+ raise MemoryError()
+ c_target = tree.xmlAddNextSibling(c_target, c_new_tail)
+ c_tail = _textNodeOrSkip(c_tail.next)
+ return 0
+
+cdef int _copyNonElementSiblings(xmlNode* c_node, xmlNode* c_target) except -1:
+ cdef xmlNode* c_copy
+ cdef xmlNode* c_sibling = c_node
+ while c_sibling.prev != NULL and \
+ (c_sibling.prev.type == tree.XML_PI_NODE or
+ c_sibling.prev.type == tree.XML_COMMENT_NODE or
+ c_sibling.prev.type == tree.XML_DTD_NODE):
+ c_sibling = c_sibling.prev
+ while c_sibling != c_node:
+ if c_sibling.type == tree.XML_DTD_NODE:
+ c_copy = _copyDtd(c_sibling)
+ if c_sibling == c_node.doc.intSubset:
+ c_target.doc.intSubset = c_copy
+ else: # c_sibling == c_node.doc.extSubset
+ c_target.doc.extSubset = c_copy
+ else:
+ c_copy = tree.xmlDocCopyNode(c_sibling, c_target.doc, 1)
+ if c_copy is NULL:
+ raise MemoryError()
+ tree.xmlAddPrevSibling(c_target, c_copy)
+ c_sibling = c_sibling.next
+ while c_sibling.next != NULL and \
+ (c_sibling.next.type == tree.XML_PI_NODE or
+ c_sibling.next.type == tree.XML_COMMENT_NODE):
+ c_sibling = c_sibling.next
+ c_copy = tree.xmlDocCopyNode(c_sibling, c_target.doc, 1)
+ if c_copy is NULL:
+ raise MemoryError()
+ tree.xmlAddNextSibling(c_target, c_copy)
+
+cdef int _deleteSlice(_Document doc, xmlNode* c_node,
+ Py_ssize_t count, Py_ssize_t step) except -1:
+ """Delete slice, ``count`` items starting with ``c_node`` with a step
+ width of ``step``.
+ """
+ cdef xmlNode* c_next
+ cdef Py_ssize_t c, i
+ cdef _node_to_node_function next_element
+ if c_node is NULL:
+ return 0
+ if step > 0:
+ next_element = _nextElement
+ else:
+ step = -step
+ next_element = _previousElement
+ # now start deleting nodes
+ c = 0
+ c_next = c_node
+ while c_node is not NULL and c < count:
+ for i in range(step):
+ c_next = next_element(c_next)
+ if c_next is NULL:
+ break
+ _removeNode(doc, c_node)
+ c += 1
+ c_node = c_next
+ return 0
+
+cdef int _replaceSlice(_Element parent, xmlNode* c_node,
+ Py_ssize_t slicelength, Py_ssize_t step,
+ bint left_to_right, elements) except -1:
+ """Replace the slice of ``count`` elements starting at ``c_node`` with
+ positive step width ``step`` by the Elements in ``elements``. The
+ direction is given by the boolean argument ``left_to_right``.
+
+ ``c_node`` may be NULL to indicate the end of the children list.
+ """
+ cdef xmlNode* c_orig_neighbour
+ cdef xmlNode* c_next
+ cdef xmlDoc* c_source_doc
+ cdef _Element element
+ cdef Py_ssize_t seqlength, i, c
+ cdef _node_to_node_function next_element
+ assert step > 0
+ if left_to_right:
+ next_element = _nextElement
+ else:
+ next_element = _previousElement
+
+ if not isinstance(elements, (list, tuple)):
+ elements = list(elements)
+
+ if step != 1 or not left_to_right:
+ # *replacing* children stepwise with list => check size!
+ seqlength = len(elements)
+ if seqlength != slicelength:
+ raise ValueError, f"attempt to assign sequence of size {seqlength} " \
+ f"to extended slice of size {slicelength}"
+
+ if c_node is NULL:
+ # no children yet => add all elements straight away
+ if left_to_right:
+ for element in elements:
+ assert element is not None, "Node must not be None"
+ _appendChild(parent, element)
+ else:
+ for element in elements:
+ assert element is not None, "Node must not be None"
+ _prependChild(parent, element)
+ return 0
+
+ # remove the elements first as some might be re-added
+ if left_to_right:
+ # L->R, remember left neighbour
+ c_orig_neighbour = _previousElement(c_node)
+ else:
+ # R->L, remember right neighbour
+ c_orig_neighbour = _nextElement(c_node)
+
+ # We remove the original slice elements one by one. Since we hold
+ # a Python reference to all elements that we will insert, it is
+ # safe to let _removeNode() try (and fail) to free them even if
+ # the element itself or one of its descendents will be reinserted.
+ c = 0
+ c_next = c_node
+ while c_node is not NULL and c < slicelength:
+ for i in range(step):
+ c_next = next_element(c_next)
+ if c_next is NULL:
+ break
+ _removeNode(parent._doc, c_node)
+ c += 1
+ c_node = c_next
+
+ # make sure each element is inserted only once
+ elements = iter(elements)
+
+ # find the first node right of the new insertion point
+ if left_to_right:
+ if c_orig_neighbour is not NULL:
+ c_node = next_element(c_orig_neighbour)
+ else:
+ # before the first element
+ c_node = _findChildForwards(parent._c_node, 0)
+ elif c_orig_neighbour is NULL:
+ # at the end, but reversed stepping
+ # append one element and go to the next insertion point
+ for element in elements:
+ assert element is not None, "Node must not be None"
+ _appendChild(parent, element)
+ c_node = element._c_node
+ if slicelength > 0:
+ slicelength -= 1
+ for i in range(1, step):
+ c_node = next_element(c_node)
+ if c_node is NULL:
+ break
+ break
+ else:
+ c_node = c_orig_neighbour
+
+ if left_to_right:
+ # adjust step size after removing slice as we are not stepping
+ # over the newly inserted elements
+ step -= 1
+
+ # now insert elements where we removed them
+ if c_node is not NULL:
+ for element in elements:
+ assert element is not None, "Node must not be None"
+ _assertValidNode(element)
+ # move element and tail over
+ c_source_doc = element._c_node.doc
+ c_next = element._c_node.next
+ tree.xmlAddPrevSibling(c_node, element._c_node)
+ _moveTail(c_next, element._c_node)
+
+ # integrate element into new document
+ moveNodeToDocument(parent._doc, c_source_doc, element._c_node)
+
+ # stop at the end of the slice
+ if slicelength > 0:
+ slicelength -= 1
+ for i in range(step):
+ c_node = next_element(c_node)
+ if c_node is NULL:
+ break
+ if c_node is NULL:
+ break
+ else:
+ # everything inserted
+ return 0
+
+ # append the remaining elements at the respective end
+ if left_to_right:
+ for element in elements:
+ assert element is not None, "Node must not be None"
+ _assertValidNode(element)
+ _appendChild(parent, element)
+ else:
+ for element in elements:
+ assert element is not None, "Node must not be None"
+ _assertValidNode(element)
+ _prependChild(parent, element)
+
+ return 0
+
+
+cdef int _linkChild(xmlNode* c_parent, xmlNode* c_node) except -1:
+ """Adaptation of 'xmlAddChild()' that deep-fix the document links iteratively.
+ """
+ assert _isElement(c_node)
+ c_node.parent = c_parent
+ if c_parent.children is NULL:
+ c_parent.children = c_parent.last = c_node
+ else:
+ c_node.prev = c_parent.last
+ c_parent.last.next = c_node
+ c_parent.last = c_node
+
+ _setTreeDoc(c_node, c_parent.doc)
+ return 0
+
+
+cdef int _appendChild(_Element parent, _Element child) except -1:
+ """Append a new child to a parent element.
+ """
+ c_node = child._c_node
+ c_source_doc = c_node.doc
+ # prevent cycles
+ if _isAncestorOrSame(c_node, parent._c_node):
+ raise ValueError("cannot append parent to itself")
+ # store possible text node
+ c_next = c_node.next
+ # move node itself
+ tree.xmlUnlinkNode(c_node)
+ # do not call xmlAddChild() here since it would deep-traverse the tree
+ _linkChild(parent._c_node, c_node)
+ _moveTail(c_next, c_node)
+ # uh oh, elements may be pointing to different doc when
+ # parent element has moved; change them too..
+ moveNodeToDocument(parent._doc, c_source_doc, c_node)
+ return 0
+
+cdef int _prependChild(_Element parent, _Element child) except -1:
+ """Prepend a new child to a parent element.
+ """
+ c_node = child._c_node
+ c_source_doc = c_node.doc
+ # prevent cycles
+ if _isAncestorOrSame(c_node, parent._c_node):
+ raise ValueError("cannot append parent to itself")
+ # store possible text node
+ c_next = c_node.next
+ # move node itself
+ c_child = _findChildForwards(parent._c_node, 0)
+ if c_child is NULL:
+ tree.xmlUnlinkNode(c_node)
+ # do not call xmlAddChild() here since it would deep-traverse the tree
+ _linkChild(parent._c_node, c_node)
+ else:
+ tree.xmlAddPrevSibling(c_child, c_node)
+ _moveTail(c_next, c_node)
+ # uh oh, elements may be pointing to different doc when
+ # parent element has moved; change them too..
+ moveNodeToDocument(parent._doc, c_source_doc, c_node)
+ return 0
+
+cdef int _appendSibling(_Element element, _Element sibling) except -1:
+ """Add a new sibling behind an element.
+ """
+ return _addSibling(element, sibling, as_next=True)
+
+cdef int _prependSibling(_Element element, _Element sibling) except -1:
+ """Add a new sibling before an element.
+ """
+ return _addSibling(element, sibling, as_next=False)
+
+cdef int _addSibling(_Element element, _Element sibling, bint as_next) except -1:
+ c_node = sibling._c_node
+ c_source_doc = c_node.doc
+ # prevent cycles
+ if _isAncestorOrSame(c_node, element._c_node):
+ if element._c_node is c_node:
+ return 0 # nothing to do
+ raise ValueError("cannot add ancestor as sibling, please break cycle first")
+ # store possible text node
+ c_next = c_node.next
+ # move node itself
+ if as_next:
+ # must insert after any tail text
+ c_next_node = _nextElement(element._c_node)
+ if c_next_node is NULL:
+ c_next_node = element._c_node
+ while c_next_node.next:
+ c_next_node = c_next_node.next
+ tree.xmlAddNextSibling(c_next_node, c_node)
+ else:
+ tree.xmlAddPrevSibling(c_next_node, c_node)
+ else:
+ tree.xmlAddPrevSibling(element._c_node, c_node)
+ _moveTail(c_next, c_node)
+ # uh oh, elements may be pointing to different doc when
+ # parent element has moved; change them too..
+ moveNodeToDocument(element._doc, c_source_doc, c_node)
+ return 0
+
+cdef inline bint isutf8(const_xmlChar* s) noexcept:
+ cdef xmlChar c = s[0]
+ while c != c'\0':
+ if c & 0x80:
+ return True
+ s += 1
+ c = s[0]
+ return False
+
+cdef bint isutf8l(const_xmlChar* s, size_t length) noexcept:
+ """
+ Search for non-ASCII characters in the string, knowing its length in advance.
+ """
+ cdef unsigned int i
+ cdef unsigned long non_ascii_mask
+ cdef const unsigned long *lptr = s
+
+ cdef const unsigned long *end = lptr + length // sizeof(unsigned long)
+ if length >= sizeof(non_ascii_mask):
+ # Build constant 0x80808080... mask (and let the C compiler fold it).
+ non_ascii_mask = 0
+ for i in range(sizeof(non_ascii_mask) // 2):
+ non_ascii_mask = (non_ascii_mask << 16) | 0x8080
+
+ # Advance to long-aligned character before we start reading longs.
+ while (s) % sizeof(unsigned long) and s < end:
+ if s[0] & 0x80:
+ return True
+ s += 1
+
+ # Read one long at a time
+ lptr = s
+ while lptr < end:
+ if lptr[0] & non_ascii_mask:
+ return True
+ lptr += 1
+ s = lptr
+
+ while s < (end + length % sizeof(unsigned long)):
+ if s[0] & 0x80:
+ return True
+ s += 1
+
+ return False
+
+cdef int _is_valid_xml_ascii(bytes pystring) except -1:
+ """Check if a string is XML ascii content."""
+ cdef signed char ch
+ # When ch is a *signed* char, non-ascii characters are negative integers
+ # and xmlIsChar_ch does not accept them.
+ for ch in pystring:
+ if not tree.xmlIsChar_ch(ch):
+ return 0
+ return 1
+
+cdef bint _is_valid_xml_utf8(bytes pystring) except -1:
+ """Check if a string is like valid UTF-8 XML content."""
+ cdef const_xmlChar* s = _xcstr(pystring)
+ cdef const_xmlChar* c_end = s + len(pystring)
+ cdef unsigned long next3 = 0
+ if s < c_end - 2:
+ next3 = (s[0] << 8) | (s[1])
+
+ while s < c_end - 2:
+ next3 = 0x00ffffff & ((next3 << 8) | s[2])
+ if s[0] & 0x80:
+ # 0xefbfbe and 0xefbfbf are utf-8 encodings of
+ # forbidden characters \ufffe and \uffff
+ if next3 == 0x00efbfbe or next3 == 0x00efbfbf:
+ return 0
+ # 0xeda080 and 0xedbfbf are utf-8 encodings of
+ # \ud800 and \udfff. Anything between them (inclusive)
+ # is forbidden, because they are surrogate blocks in utf-16.
+ if 0x00eda080 <= next3 <= 0x00edbfbf:
+ return 0
+ elif not tree.xmlIsChar_ch(s[0]):
+ return 0 # invalid ascii char
+ s += 1
+
+ while s < c_end:
+ if not s[0] & 0x80 and not tree.xmlIsChar_ch(s[0]):
+ return 0 # invalid ascii char
+ s += 1
+
+ return 1
+
+cdef inline unicode funicodeOrNone(const_xmlChar* s):
+ return funicode(s) if s is not NULL else None
+
+cdef inline unicode funicodeOrEmpty(const_xmlChar* s):
+ return funicode(s) if s is not NULL else ''
+
+cdef unicode funicode(const_xmlChar* s):
+ return s.decode('UTF-8')
+
+cdef bytes _utf8(object s):
+ """Test if a string is valid user input and encode it to UTF-8.
+ Reject all bytes/unicode input that contains non-XML characters.
+ Reject all bytes input that contains non-ASCII characters.
+ """
+ cdef int valid
+ cdef bytes utf8_string
+ if isinstance(s, unicode):
+ utf8_string = (s).encode('utf8')
+ valid = _is_valid_xml_utf8(utf8_string)
+ elif isinstance(s, (bytes, bytearray)):
+ utf8_string = s if type(s) is bytes else bytes(s)
+ valid = _is_valid_xml_ascii(utf8_string)
+ else:
+ raise TypeError("Argument must be bytes or unicode, got '%.200s'" % type(s).__name__)
+ if not valid:
+ raise ValueError(
+ "All strings must be XML compatible: Unicode or ASCII, no NULL bytes or control characters")
+ return utf8_string
+
+
+cdef bytes _utf8orNone(object s):
+ return _utf8(s) if s is not None else None
+
+
+cdef enum:
+ NO_FILE_PATH = 0
+ ABS_UNIX_FILE_PATH = 1
+ ABS_WIN_FILE_PATH = 2
+ REL_FILE_PATH = 3
+
+
+cdef bint _isFilePath(const_xmlChar* c_path) noexcept:
+ "simple heuristic to see if a path is a filename"
+ cdef xmlChar c
+ # test if it looks like an absolute Unix path or a Windows network path
+ if c_path[0] == c'/':
+ return ABS_UNIX_FILE_PATH
+
+ # test if it looks like an absolute Windows path or URL
+ if c'a' <= c_path[0] <= c'z' or c'A' <= c_path[0] <= c'Z':
+ c_path += 1
+ if c_path[0] == c':' and c_path[1] in b'\0\\':
+ return ABS_WIN_FILE_PATH # C: or C:\...
+
+ # test if it looks like a URL with scheme://
+ while c'a' <= c_path[0] <= c'z' or c'A' <= c_path[0] <= c'Z':
+ c_path += 1
+ if c_path[0] == c':' and c_path[1] == c'/' and c_path[2] == c'/':
+ return NO_FILE_PATH
+
+ # assume it's a relative path
+ return REL_FILE_PATH
+
+
+cdef object _getFSPathOrObject(object obj):
+ """
+ Get the __fspath__ attribute of an object if it exists.
+ Otherwise, the original object is returned.
+ """
+ if _isString(obj):
+ return obj
+ try:
+ return python.PyOS_FSPath(obj)
+ except TypeError:
+ return obj
+
+
+cdef object _encodeFilename(object filename):
+ """Make sure a filename is 8-bit encoded (or None).
+ """
+ if filename is None:
+ return None
+ elif isinstance(filename, bytes):
+ return filename
+ elif isinstance(filename, unicode):
+ filename8 = (filename).encode('utf8')
+ if _isFilePath(filename8):
+ try:
+ return python.PyUnicode_AsEncodedString(
+ filename, _C_FILENAME_ENCODING, NULL)
+ except UnicodeEncodeError:
+ pass
+ return filename8
+ else:
+ raise TypeError("Argument must be string or unicode.")
+
+cdef object _decodeFilename(const_xmlChar* c_path):
+ """Make the filename a unicode string if we are in Py3.
+ """
+ return _decodeFilenameWithLength(c_path, tree.xmlStrlen(c_path))
+
+cdef object _decodeFilenameWithLength(const_xmlChar* c_path, size_t c_len):
+ """Make the filename a unicode string if we are in Py3.
+ """
+ if _isFilePath(c_path):
+ try:
+ return python.PyUnicode_Decode(
+ c_path, c_len, _C_FILENAME_ENCODING, NULL)
+ except UnicodeDecodeError:
+ pass
+ try:
+ return (c_path)[:c_len].decode('UTF-8')
+ except UnicodeDecodeError:
+ # this is a stupid fallback, but it might still work...
+ return (c_path)[:c_len].decode('latin-1', 'replace')
+
+cdef object _encodeFilenameUTF8(object filename):
+ """Recode filename as UTF-8. Tries ASCII, local filesystem encoding and
+ UTF-8 as source encoding.
+ """
+ cdef char* c_filename
+ if filename is None:
+ return None
+ elif isinstance(filename, bytes):
+ if not isutf8l(filename, len(filename)):
+ # plain ASCII!
+ return filename
+ c_filename = _cstr(filename)
+ try:
+ # try to decode with default encoding
+ filename = python.PyUnicode_Decode(
+ c_filename, len(filename),
+ _C_FILENAME_ENCODING, NULL)
+ except UnicodeDecodeError as decode_exc:
+ try:
+ # try if it's proper UTF-8
+ (filename).decode('utf8')
+ return filename
+ except UnicodeDecodeError:
+ raise decode_exc # otherwise re-raise original exception
+ if isinstance(filename, unicode):
+ return (filename).encode('utf8')
+ else:
+ raise TypeError("Argument must be string or unicode.")
+
+cdef tuple _getNsTag(tag):
+ """Given a tag, find namespace URI and tag name.
+ Return None for NS uri if no namespace URI provided.
+ """
+ return __getNsTag(tag, 0)
+
+cdef tuple _getNsTagWithEmptyNs(tag):
+ """Given a tag, find namespace URI and tag name. Return None for NS uri
+ if no namespace URI provided, or the empty string if namespace
+ part is '{}'.
+ """
+ return __getNsTag(tag, 1)
+
+cdef tuple __getNsTag(tag, bint empty_ns):
+ cdef char* c_tag
+ cdef char* c_ns_end
+ cdef Py_ssize_t taglen
+ cdef Py_ssize_t nslen
+ cdef bytes ns = None
+ # _isString() is much faster than isinstance()
+ if not _isString(tag) and isinstance(tag, QName):
+ tag = (tag).text
+ tag = _utf8(tag)
+ c_tag = _cstr(tag)
+ if c_tag[0] == c'{':
+ c_tag += 1
+ c_ns_end = cstring_h.strchr(c_tag, c'}')
+ if c_ns_end is NULL:
+ raise ValueError, "Invalid tag name"
+ nslen = c_ns_end - c_tag
+ taglen = python.PyBytes_GET_SIZE(tag) - nslen - 2
+ if taglen == 0:
+ raise ValueError, "Empty tag name"
+ if nslen > 0:
+ ns = c_tag[:nslen]
+ elif empty_ns:
+ ns = b''
+ tag = c_ns_end[1:taglen+1]
+ elif python.PyBytes_GET_SIZE(tag) == 0:
+ raise ValueError, "Empty tag name"
+ return ns, tag
+
+cdef inline int _pyXmlNameIsValid(name_utf8):
+ return _xmlNameIsValid(_xcstr(name_utf8)) and b':' not in name_utf8
+
+cdef inline int _pyHtmlNameIsValid(name_utf8):
+ return _htmlNameIsValid(_xcstr(name_utf8))
+
+cdef inline int _xmlNameIsValid(const_xmlChar* c_name) noexcept:
+ return tree.xmlValidateNameValue(c_name)
+
+cdef int _htmlNameIsValid(const_xmlChar* c_name) noexcept:
+ if c_name is NULL or c_name[0] == c'\0':
+ return 0
+ while c_name[0] != c'\0':
+ if c_name[0] in b'&<>/"\'\t\n\x0B\x0C\r ':
+ return 0
+ c_name += 1
+ return 1
+
+cdef bint _characterReferenceIsValid(const_xmlChar* c_name) noexcept:
+ cdef bint is_hex
+ if c_name[0] == c'x':
+ c_name += 1
+ is_hex = 1
+ else:
+ is_hex = 0
+ if c_name[0] == c'\0':
+ return 0
+ while c_name[0] != c'\0':
+ if c_name[0] < c'0' or c_name[0] > c'9':
+ if not is_hex:
+ return 0
+ if not (c'a' <= c_name[0] <= c'f'):
+ if not (c'A' <= c_name[0] <= c'F'):
+ return 0
+ c_name += 1
+ return 1
+
+cdef int _tagValidOrRaise(tag_utf) except -1:
+ if not _pyXmlNameIsValid(tag_utf):
+ raise ValueError(f"Invalid tag name {(tag_utf).decode('utf8')!r}")
+ return 0
+
+cdef int _htmlTagValidOrRaise(tag_utf) except -1:
+ if not _pyHtmlNameIsValid(tag_utf):
+ raise ValueError(f"Invalid HTML tag name {(tag_utf).decode('utf8')!r}")
+ return 0
+
+cdef int _attributeValidOrRaise(name_utf) except -1:
+ if not _pyXmlNameIsValid(name_utf):
+ raise ValueError(f"Invalid attribute name {(name_utf).decode('utf8')!r}")
+ return 0
+
+cdef int _prefixValidOrRaise(tag_utf) except -1:
+ if not _pyXmlNameIsValid(tag_utf):
+ raise ValueError(f"Invalid namespace prefix {(tag_utf).decode('utf8')!r}")
+ return 0
+
+cdef int _uriValidOrRaise(uri_utf) except -1:
+ cdef uri.xmlURI* c_uri = uri.xmlParseURI(_cstr(uri_utf))
+ if c_uri is NULL:
+ raise ValueError(f"Invalid namespace URI {(uri_utf).decode('utf8')!r}")
+ uri.xmlFreeURI(c_uri)
+ return 0
+
+cdef inline unicode _namespacedName(xmlNode* c_node):
+ return _namespacedNameFromNsName(_getNs(c_node), c_node.name)
+
+
+cdef unicode _namespacedNameFromNsName(const_xmlChar* c_href, const_xmlChar* c_name):
+ name = funicode(c_name)
+ if c_href is NULL:
+ return name
+ href = funicode(c_href)
+ return f"{{{href}}}{name}"
+
+
+cdef _getFilenameForFile(source):
+ """Given a Python File or Gzip object, give filename back.
+
+ Returns None if not a file object.
+ """
+ # urllib2 provides a geturl() method
+ try:
+ return source.geturl()
+ except:
+ pass
+ # file instances have a name attribute
+ try:
+ filename = source.name
+ if _isString(filename):
+ return os_path_abspath(filename)
+ except:
+ pass
+ # gzip file instances have a filename attribute (before Py3k)
+ try:
+ filename = source.filename
+ if _isString(filename):
+ return os_path_abspath(filename)
+ except:
+ pass
+ # can't determine filename
+ return None
diff --git a/llmeval-env/lib/python3.10/site-packages/lxml/classlookup.pxi b/llmeval-env/lib/python3.10/site-packages/lxml/classlookup.pxi
new file mode 100644
index 0000000000000000000000000000000000000000..92d1d47a58657a7741d20f48cfe3525a66dbc722
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/lxml/classlookup.pxi
@@ -0,0 +1,580 @@
+# Configurable Element class lookup
+
+################################################################################
+# Custom Element classes
+
+cdef public class ElementBase(_Element) [ type LxmlElementBaseType,
+ object LxmlElementBase ]:
+ """ElementBase(*children, attrib=None, nsmap=None, **_extra)
+
+ The public Element class. All custom Element classes must inherit
+ from this one. To create an Element, use the `Element()` factory.
+
+ BIG FAT WARNING: Subclasses *must not* override __init__ or
+ __new__ as it is absolutely undefined when these objects will be
+ created or destroyed. All persistent state of Elements must be
+ stored in the underlying XML. If you really need to initialize
+ the object after creation, you can implement an ``_init(self)``
+ method that will be called directly after object creation.
+
+ Subclasses of this class can be instantiated to create a new
+ Element. By default, the tag name will be the class name and the
+ namespace will be empty. You can modify this with the following
+ class attributes:
+
+ * TAG - the tag name, possibly containing a namespace in Clark
+ notation
+
+ * NAMESPACE - the default namespace URI, unless provided as part
+ of the TAG attribute.
+
+ * HTML - flag if the class is an HTML tag, as opposed to an XML
+ tag. This only applies to un-namespaced tags and defaults to
+ false (i.e. XML).
+
+ * PARSER - the parser that provides the configuration for the
+ newly created document. Providing an HTML parser here will
+ default to creating an HTML element.
+
+ In user code, the latter three are commonly inherited in class
+ hierarchies that implement a common namespace.
+ """
+ def __init__(self, *children, attrib=None, nsmap=None, **_extra):
+ """ElementBase(*children, attrib=None, nsmap=None, **_extra)
+ """
+ cdef bint is_html = 0
+ cdef _BaseParser parser
+ cdef _Element last_child
+ # don't use normal attribute access as it might be overridden
+ _getattr = object.__getattribute__
+ try:
+ namespace = _utf8(_getattr(self, 'NAMESPACE'))
+ except AttributeError:
+ namespace = None
+ try:
+ ns, tag = _getNsTag(_getattr(self, 'TAG'))
+ if ns is not None:
+ namespace = ns
+ except AttributeError:
+ tag = _utf8(_getattr(_getattr(self, '__class__'), '__name__'))
+ if b'.' in tag:
+ tag = tag.split(b'.')[-1]
+ try:
+ parser = _getattr(self, 'PARSER')
+ except AttributeError:
+ parser = None
+ for child in children:
+ if isinstance(child, _Element):
+ parser = (<_Element>child)._doc._parser
+ break
+ if isinstance(parser, HTMLParser):
+ is_html = 1
+ if namespace is None:
+ try:
+ is_html = _getattr(self, 'HTML')
+ except AttributeError:
+ pass
+ _initNewElement(self, is_html, tag, namespace, parser,
+ attrib, nsmap, _extra)
+ last_child = None
+ for child in children:
+ if _isString(child):
+ if last_child is None:
+ _setNodeText(self._c_node,
+ (_collectText(self._c_node.children) or '') + child)
+ else:
+ _setTailText(last_child._c_node,
+ (_collectText(last_child._c_node.next) or '') + child)
+ elif isinstance(child, _Element):
+ last_child = child
+ _appendChild(self, last_child)
+ elif isinstance(child, type) and issubclass(child, ElementBase):
+ last_child = child()
+ _appendChild(self, last_child)
+ else:
+ raise TypeError, f"Invalid child type: {type(child)!r}"
+
+cdef class CommentBase(_Comment):
+ """All custom Comment classes must inherit from this one.
+
+ To create an XML Comment instance, use the ``Comment()`` factory.
+
+ Subclasses *must not* override __init__ or __new__ as it is
+ absolutely undefined when these objects will be created or
+ destroyed. All persistent state of Comments must be stored in the
+ underlying XML. If you really need to initialize the object after
+ creation, you can implement an ``_init(self)`` method that will be
+ called after object creation.
+ """
+ def __init__(self, text):
+ # copied from Comment() factory
+ cdef _Document doc
+ cdef xmlDoc* c_doc
+ if text is None:
+ text = b''
+ else:
+ text = _utf8(text)
+ c_doc = _newXMLDoc()
+ doc = _documentFactory(c_doc, None)
+ self._c_node = _createComment(c_doc, _xcstr(text))
+ if self._c_node is NULL:
+ raise MemoryError()
+ tree.xmlAddChild(c_doc, self._c_node)
+ _registerProxy(self, doc, self._c_node)
+ self._init()
+
+cdef class PIBase(_ProcessingInstruction):
+ """All custom Processing Instruction classes must inherit from this one.
+
+ To create an XML ProcessingInstruction instance, use the ``PI()``
+ factory.
+
+ Subclasses *must not* override __init__ or __new__ as it is
+ absolutely undefined when these objects will be created or
+ destroyed. All persistent state of PIs must be stored in the
+ underlying XML. If you really need to initialize the object after
+ creation, you can implement an ``_init(self)`` method that will be
+ called after object creation.
+ """
+ def __init__(self, target, text=None):
+ # copied from PI() factory
+ cdef _Document doc
+ cdef xmlDoc* c_doc
+ target = _utf8(target)
+ if text is None:
+ text = b''
+ else:
+ text = _utf8(text)
+ c_doc = _newXMLDoc()
+ doc = _documentFactory(c_doc, None)
+ self._c_node = _createPI(c_doc, _xcstr(target), _xcstr(text))
+ if self._c_node is NULL:
+ raise MemoryError()
+ tree.xmlAddChild(c_doc, self._c_node)
+ _registerProxy(self, doc, self._c_node)
+ self._init()
+
+cdef class EntityBase(_Entity):
+ """All custom Entity classes must inherit from this one.
+
+ To create an XML Entity instance, use the ``Entity()`` factory.
+
+ Subclasses *must not* override __init__ or __new__ as it is
+ absolutely undefined when these objects will be created or
+ destroyed. All persistent state of Entities must be stored in the
+ underlying XML. If you really need to initialize the object after
+ creation, you can implement an ``_init(self)`` method that will be
+ called after object creation.
+ """
+ def __init__(self, name):
+ cdef _Document doc
+ cdef xmlDoc* c_doc
+ name_utf = _utf8(name)
+ c_name = _xcstr(name_utf)
+ if c_name[0] == c'#':
+ if not _characterReferenceIsValid(c_name + 1):
+ raise ValueError, f"Invalid character reference: '{name}'"
+ elif not _xmlNameIsValid(c_name):
+ raise ValueError, f"Invalid entity reference: '{name}'"
+ c_doc = _newXMLDoc()
+ doc = _documentFactory(c_doc, None)
+ self._c_node = _createEntity(c_doc, c_name)
+ if self._c_node is NULL:
+ raise MemoryError()
+ tree.xmlAddChild(c_doc, self._c_node)
+ _registerProxy(self, doc, self._c_node)
+ self._init()
+
+
+cdef int _validateNodeClass(xmlNode* c_node, cls) except -1:
+ if c_node.type == tree.XML_ELEMENT_NODE:
+ expected = ElementBase
+ elif c_node.type == tree.XML_COMMENT_NODE:
+ expected = CommentBase
+ elif c_node.type == tree.XML_ENTITY_REF_NODE:
+ expected = EntityBase
+ elif c_node.type == tree.XML_PI_NODE:
+ expected = PIBase
+ else:
+ assert False, f"Unknown node type: {c_node.type}"
+
+ if not (isinstance(cls, type) and issubclass(cls, expected)):
+ raise TypeError(
+ f"result of class lookup must be subclass of {type(expected)}, got {type(cls)}")
+ return 0
+
+
+################################################################################
+# Element class lookup
+
+ctypedef public object (*_element_class_lookup_function)(object, _Document, xmlNode*)
+
+# class to store element class lookup functions
+cdef public class ElementClassLookup [ type LxmlElementClassLookupType,
+ object LxmlElementClassLookup ]:
+ """ElementClassLookup(self)
+ Superclass of Element class lookups.
+ """
+ cdef _element_class_lookup_function _lookup_function
+
+
+cdef public class FallbackElementClassLookup(ElementClassLookup) \
+ [ type LxmlFallbackElementClassLookupType,
+ object LxmlFallbackElementClassLookup ]:
+ """FallbackElementClassLookup(self, fallback=None)
+
+ Superclass of Element class lookups with additional fallback.
+ """
+ cdef readonly ElementClassLookup fallback
+ cdef _element_class_lookup_function _fallback_function
+ def __cinit__(self):
+ # fall back to default lookup
+ self._fallback_function = _lookupDefaultElementClass
+
+ def __init__(self, ElementClassLookup fallback=None):
+ if fallback is not None:
+ self._setFallback(fallback)
+ else:
+ self._fallback_function = _lookupDefaultElementClass
+
+ cdef void _setFallback(self, ElementClassLookup lookup):
+ """Sets the fallback scheme for this lookup method.
+ """
+ self.fallback = lookup
+ self._fallback_function = lookup._lookup_function
+ if self._fallback_function is NULL:
+ self._fallback_function = _lookupDefaultElementClass
+
+ def set_fallback(self, ElementClassLookup lookup not None):
+ """set_fallback(self, lookup)
+
+ Sets the fallback scheme for this lookup method.
+ """
+ self._setFallback(lookup)
+
+cdef inline object _callLookupFallback(FallbackElementClassLookup lookup,
+ _Document doc, xmlNode* c_node):
+ return lookup._fallback_function(lookup.fallback, doc, c_node)
+
+
+################################################################################
+# default lookup scheme
+
+cdef class ElementDefaultClassLookup(ElementClassLookup):
+ """ElementDefaultClassLookup(self, element=None, comment=None, pi=None, entity=None)
+ Element class lookup scheme that always returns the default Element
+ class.
+
+ The keyword arguments ``element``, ``comment``, ``pi`` and ``entity``
+ accept the respective Element classes.
+ """
+ cdef readonly object element_class
+ cdef readonly object comment_class
+ cdef readonly object pi_class
+ cdef readonly object entity_class
+ def __cinit__(self):
+ self._lookup_function = _lookupDefaultElementClass
+
+ def __init__(self, element=None, comment=None, pi=None, entity=None):
+ if element is None:
+ self.element_class = _Element
+ elif issubclass(element, ElementBase):
+ self.element_class = element
+ else:
+ raise TypeError, "element class must be subclass of ElementBase"
+
+ if comment is None:
+ self.comment_class = _Comment
+ elif issubclass(comment, CommentBase):
+ self.comment_class = comment
+ else:
+ raise TypeError, "comment class must be subclass of CommentBase"
+
+ if entity is None:
+ self.entity_class = _Entity
+ elif issubclass(entity, EntityBase):
+ self.entity_class = entity
+ else:
+ raise TypeError, "Entity class must be subclass of EntityBase"
+
+ if pi is None:
+ self.pi_class = None # special case, see below
+ elif issubclass(pi, PIBase):
+ self.pi_class = pi
+ else:
+ raise TypeError, "PI class must be subclass of PIBase"
+
+cdef object _lookupDefaultElementClass(state, _Document _doc, xmlNode* c_node):
+ "Trivial class lookup function that always returns the default class."
+ if c_node.type == tree.XML_ELEMENT_NODE:
+ if state is not None:
+ return (state).element_class
+ else:
+ return _Element
+ elif c_node.type == tree.XML_COMMENT_NODE:
+ if state is not None:
+ return (state).comment_class
+ else:
+ return _Comment
+ elif c_node.type == tree.XML_ENTITY_REF_NODE:
+ if state is not None:
+ return (state).entity_class
+ else:
+ return _Entity
+ elif c_node.type == tree.XML_PI_NODE:
+ if state is None or (state).pi_class is None:
+ # special case XSLT-PI
+ if c_node.name is not NULL and c_node.content is not NULL:
+ if tree.xmlStrcmp(c_node.name, "xml-stylesheet") == 0:
+ if tree.xmlStrstr(c_node.content, "text/xsl") is not NULL or \
+ tree.xmlStrstr(c_node.content, "text/xml") is not NULL:
+ return _XSLTProcessingInstruction
+ return _ProcessingInstruction
+ else:
+ return (state).pi_class
+ else:
+ assert False, f"Unknown node type: {c_node.type}"
+
+
+################################################################################
+# attribute based lookup scheme
+
+cdef class AttributeBasedElementClassLookup(FallbackElementClassLookup):
+ """AttributeBasedElementClassLookup(self, attribute_name, class_mapping, fallback=None)
+ Checks an attribute of an Element and looks up the value in a
+ class dictionary.
+
+ Arguments:
+ - attribute name - '{ns}name' style string
+ - class mapping - Python dict mapping attribute values to Element classes
+ - fallback - optional fallback lookup mechanism
+
+ A None key in the class mapping will be checked if the attribute is
+ missing.
+ """
+ cdef object _class_mapping
+ cdef tuple _pytag
+ cdef const_xmlChar* _c_ns
+ cdef const_xmlChar* _c_name
+ def __cinit__(self):
+ self._lookup_function = _attribute_class_lookup
+
+ def __init__(self, attribute_name, class_mapping,
+ ElementClassLookup fallback=None):
+ self._pytag = _getNsTag(attribute_name)
+ ns, name = self._pytag
+ if ns is None:
+ self._c_ns = NULL
+ else:
+ self._c_ns = _xcstr(ns)
+ self._c_name = _xcstr(name)
+ self._class_mapping = dict(class_mapping)
+
+ FallbackElementClassLookup.__init__(self, fallback)
+
+cdef object _attribute_class_lookup(state, _Document doc, xmlNode* c_node):
+ cdef AttributeBasedElementClassLookup lookup
+ cdef python.PyObject* dict_result
+
+ lookup = state
+ if c_node.type == tree.XML_ELEMENT_NODE:
+ value = _attributeValueFromNsName(
+ c_node, lookup._c_ns, lookup._c_name)
+ dict_result = python.PyDict_GetItem(lookup._class_mapping, value)
+ if dict_result is not NULL:
+ cls = dict_result
+ _validateNodeClass(c_node, cls)
+ return cls
+ return _callLookupFallback(lookup, doc, c_node)
+
+
+################################################################################
+# per-parser lookup scheme
+
+cdef class ParserBasedElementClassLookup(FallbackElementClassLookup):
+ """ParserBasedElementClassLookup(self, fallback=None)
+ Element class lookup based on the XML parser.
+ """
+ def __cinit__(self):
+ self._lookup_function = _parser_class_lookup
+
+cdef object _parser_class_lookup(state, _Document doc, xmlNode* c_node):
+ if doc._parser._class_lookup is not None:
+ return doc._parser._class_lookup._lookup_function(
+ doc._parser._class_lookup, doc, c_node)
+ return _callLookupFallback(state, doc, c_node)
+
+
+################################################################################
+# custom class lookup based on node type, namespace, name
+
+cdef class CustomElementClassLookup(FallbackElementClassLookup):
+ """CustomElementClassLookup(self, fallback=None)
+ Element class lookup based on a subclass method.
+
+ You can inherit from this class and override the method::
+
+ lookup(self, type, doc, namespace, name)
+
+ to lookup the element class for a node. Arguments of the method:
+ * type: one of 'element', 'comment', 'PI', 'entity'
+ * doc: document that the node is in
+ * namespace: namespace URI of the node (or None for comments/PIs/entities)
+ * name: name of the element/entity, None for comments, target for PIs
+
+ If you return None from this method, the fallback will be called.
+ """
+ def __cinit__(self):
+ self._lookup_function = _custom_class_lookup
+
+ def lookup(self, type, doc, namespace, name):
+ "lookup(self, type, doc, namespace, name)"
+ return None
+
+cdef object _custom_class_lookup(state, _Document doc, xmlNode* c_node):
+ cdef CustomElementClassLookup lookup
+
+ lookup = state
+
+ if c_node.type == tree.XML_ELEMENT_NODE:
+ element_type = "element"
+ elif c_node.type == tree.XML_COMMENT_NODE:
+ element_type = "comment"
+ elif c_node.type == tree.XML_PI_NODE:
+ element_type = "PI"
+ elif c_node.type == tree.XML_ENTITY_REF_NODE:
+ element_type = "entity"
+ else:
+ element_type = "element"
+ if c_node.name is NULL:
+ name = None
+ else:
+ name = funicode(c_node.name)
+ c_str = tree._getNs(c_node)
+ ns = funicode(c_str) if c_str is not NULL else None
+
+ cls = lookup.lookup(element_type, doc, ns, name)
+ if cls is not None:
+ _validateNodeClass(c_node, cls)
+ return cls
+ return _callLookupFallback(lookup, doc, c_node)
+
+
+################################################################################
+# read-only tree based class lookup
+
+cdef class PythonElementClassLookup(FallbackElementClassLookup):
+ """PythonElementClassLookup(self, fallback=None)
+ Element class lookup based on a subclass method.
+
+ This class lookup scheme allows access to the entire XML tree in
+ read-only mode. To use it, re-implement the ``lookup(self, doc,
+ root)`` method in a subclass::
+
+ from lxml import etree, pyclasslookup
+
+ class MyElementClass(etree.ElementBase):
+ honkey = True
+
+ class MyLookup(pyclasslookup.PythonElementClassLookup):
+ def lookup(self, doc, root):
+ if root.tag == "sometag":
+ return MyElementClass
+ else:
+ for child in root:
+ if child.tag == "someothertag":
+ return MyElementClass
+ # delegate to default
+ return None
+
+ If you return None from this method, the fallback will be called.
+
+ The first argument is the opaque document instance that contains
+ the Element. The second argument is a lightweight Element proxy
+ implementation that is only valid during the lookup. Do not try
+ to keep a reference to it. Once the lookup is done, the proxy
+ will be invalid.
+
+ Also, you cannot wrap such a read-only Element in an ElementTree,
+ and you must take care not to keep a reference to them outside of
+ the `lookup()` method.
+
+ Note that the API of the Element objects is not complete. It is
+ purely read-only and does not support all features of the normal
+ `lxml.etree` API (such as XPath, extended slicing or some
+ iteration methods).
+
+ See https://lxml.de/element_classes.html
+ """
+ def __cinit__(self):
+ self._lookup_function = _python_class_lookup
+
+ def lookup(self, doc, element):
+ """lookup(self, doc, element)
+
+ Override this method to implement your own lookup scheme.
+ """
+ return None
+
+cdef object _python_class_lookup(state, _Document doc, tree.xmlNode* c_node):
+ cdef PythonElementClassLookup lookup
+ cdef _ReadOnlyProxy proxy
+ lookup = state
+
+ proxy = _newReadOnlyProxy(None, c_node)
+ cls = lookup.lookup(doc, proxy)
+ _freeReadOnlyProxies(proxy)
+
+ if cls is not None:
+ _validateNodeClass(c_node, cls)
+ return cls
+ return _callLookupFallback(lookup, doc, c_node)
+
+################################################################################
+# Global setup
+
+cdef _element_class_lookup_function LOOKUP_ELEMENT_CLASS
+cdef object ELEMENT_CLASS_LOOKUP_STATE
+
+cdef void _setElementClassLookupFunction(
+ _element_class_lookup_function function, object state):
+ global LOOKUP_ELEMENT_CLASS, ELEMENT_CLASS_LOOKUP_STATE
+ if function is NULL:
+ state = DEFAULT_ELEMENT_CLASS_LOOKUP
+ function = DEFAULT_ELEMENT_CLASS_LOOKUP._lookup_function
+
+ ELEMENT_CLASS_LOOKUP_STATE = state
+ LOOKUP_ELEMENT_CLASS = function
+
+def set_element_class_lookup(ElementClassLookup lookup = None):
+ """set_element_class_lookup(lookup = None)
+
+ Set the global element class lookup method.
+
+ This defines the main entry point for looking up element implementations.
+ The standard implementation uses the :class:`ParserBasedElementClassLookup`
+ to delegate to different lookup schemes for each parser.
+
+ .. warning::
+
+ This should only be changed by applications, not by library packages.
+ In most cases, parser specific lookups should be preferred,
+ which can be configured via
+ :meth:`~lxml.etree.XMLParser.set_element_class_lookup`
+ (and the same for HTML parsers).
+
+ Globally replacing the element class lookup by something other than a
+ :class:`ParserBasedElementClassLookup` will prevent parser specific lookup
+ schemes from working. Several tools rely on parser specific lookups,
+ including :mod:`lxml.html` and :mod:`lxml.objectify`.
+ """
+ if lookup is None or lookup._lookup_function is NULL:
+ _setElementClassLookupFunction(NULL, None)
+ else:
+ _setElementClassLookupFunction(lookup._lookup_function, lookup)
+
+# default setup: parser delegation
+cdef ParserBasedElementClassLookup DEFAULT_ELEMENT_CLASS_LOOKUP
+DEFAULT_ELEMENT_CLASS_LOOKUP = ParserBasedElementClassLookup()
+
+set_element_class_lookup(DEFAULT_ELEMENT_CLASS_LOOKUP)
diff --git a/llmeval-env/lib/python3.10/site-packages/lxml/cssselect.py b/llmeval-env/lib/python3.10/site-packages/lxml/cssselect.py
new file mode 100644
index 0000000000000000000000000000000000000000..54cd75ac9bfecdec7ea81e91b0840c6edd401515
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/lxml/cssselect.py
@@ -0,0 +1,101 @@
+"""CSS Selectors based on XPath.
+
+This module supports selecting XML/HTML tags based on CSS selectors.
+See the `CSSSelector` class for details.
+
+This is a thin wrapper around cssselect 0.7 or later.
+"""
+
+
+from . import etree
+try:
+ import cssselect as external_cssselect
+except ImportError:
+ raise ImportError(
+ 'cssselect does not seem to be installed. '
+ 'See https://pypi.org/project/cssselect/')
+
+
+SelectorSyntaxError = external_cssselect.SelectorSyntaxError
+ExpressionError = external_cssselect.ExpressionError
+SelectorError = external_cssselect.SelectorError
+
+
+__all__ = ['SelectorSyntaxError', 'ExpressionError', 'SelectorError',
+ 'CSSSelector']
+
+
+class LxmlTranslator(external_cssselect.GenericTranslator):
+ """
+ A custom CSS selector to XPath translator with lxml-specific extensions.
+ """
+ def xpath_contains_function(self, xpath, function):
+ # Defined there, removed in later drafts:
+ # http://www.w3.org/TR/2001/CR-css3-selectors-20011113/#content-selectors
+ if function.argument_types() not in (['STRING'], ['IDENT']):
+ raise ExpressionError(
+ "Expected a single string or ident for :contains(), got %r"
+ % function.arguments)
+ value = function.arguments[0].value
+ return xpath.add_condition(
+ 'contains(__lxml_internal_css:lower-case(string(.)), %s)'
+ % self.xpath_literal(value.lower()))
+
+
+class LxmlHTMLTranslator(LxmlTranslator, external_cssselect.HTMLTranslator):
+ """
+ lxml extensions + HTML support.
+ """
+
+
+def _make_lower_case(context, s):
+ return s.lower()
+
+ns = etree.FunctionNamespace('http://codespeak.net/lxml/css/')
+ns.prefix = '__lxml_internal_css'
+ns['lower-case'] = _make_lower_case
+
+
+class CSSSelector(etree.XPath):
+ """A CSS selector.
+
+ Usage::
+
+ >>> from lxml import etree, cssselect
+ >>> select = cssselect.CSSSelector("a tag > child")
+
+ >>> root = etree.XML("TEXT ")
+ >>> [ el.tag for el in select(root) ]
+ ['child']
+
+ To use CSS namespaces, you need to pass a prefix-to-namespace
+ mapping as ``namespaces`` keyword argument::
+
+ >>> rdfns = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#'
+ >>> select_ns = cssselect.CSSSelector('root > rdf|Description',
+ ... namespaces={'rdf': rdfns})
+
+ >>> rdf = etree.XML((
+ ... ''
+ ... 'blah '
+ ... ' ') % rdfns)
+ >>> [(el.tag, el.text) for el in select_ns(rdf)]
+ [('{http://www.w3.org/1999/02/22-rdf-syntax-ns#}Description', 'blah')]
+
+ """
+ def __init__(self, css, namespaces=None, translator='xml'):
+ if translator == 'xml':
+ translator = LxmlTranslator()
+ elif translator == 'html':
+ translator = LxmlHTMLTranslator()
+ elif translator == 'xhtml':
+ translator = LxmlHTMLTranslator(xhtml=True)
+ path = translator.css_to_xpath(css)
+ super().__init__(path, namespaces=namespaces)
+ self.css = css
+
+ def __repr__(self):
+ return '<%s %x for %r>' % (
+ self.__class__.__name__,
+ abs(id(self)),
+ self.css)
diff --git a/llmeval-env/lib/python3.10/site-packages/lxml/etree.h b/llmeval-env/lib/python3.10/site-packages/lxml/etree.h
new file mode 100644
index 0000000000000000000000000000000000000000..5ffc7ba32670f056a6415ab60ffb8240fb6d4a28
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/lxml/etree.h
@@ -0,0 +1,248 @@
+/* Generated by Cython 3.0.10 */
+
+#ifndef __PYX_HAVE__lxml__etree
+#define __PYX_HAVE__lxml__etree
+
+#include "Python.h"
+struct LxmlDocument;
+struct LxmlElement;
+struct LxmlElementTree;
+struct LxmlElementTagMatcher;
+struct LxmlElementIterator;
+struct LxmlElementBase;
+struct LxmlElementClassLookup;
+struct LxmlFallbackElementClassLookup;
+
+/* "lxml/etree.pyx":333
+ *
+ * # type of a function that steps from node to node
+ * ctypedef public xmlNode* (*_node_to_node_function)(xmlNode*) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+typedef xmlNode *(*_node_to_node_function)(xmlNode *);
+
+/* "lxml/etree.pyx":349
+ * @cython.final
+ * @cython.freelist(8)
+ * cdef public class _Document [ type LxmlDocumentType, object LxmlDocument ]: # <<<<<<<<<<<<<<
+ * """Internal base class to reference a libxml document.
+ *
+ */
+struct LxmlDocument {
+ PyObject_HEAD
+ struct __pyx_vtabstruct_4lxml_5etree__Document *__pyx_vtab;
+ int _ns_counter;
+ PyObject *_prefix_tail;
+ xmlDoc *_c_doc;
+ struct __pyx_obj_4lxml_5etree__BaseParser *_parser;
+};
+
+/* "lxml/etree.pyx":698
+ *
+ * @cython.no_gc_clear
+ * cdef public class _Element [ type LxmlElementType, object LxmlElement ]: # <<<<<<<<<<<<<<
+ * """Element class.
+ *
+ */
+struct LxmlElement {
+ PyObject_HEAD
+ struct LxmlDocument *_doc;
+ xmlNode *_c_node;
+ PyObject *_tag;
+};
+
+/* "lxml/etree.pyx":1872
+ *
+ *
+ * cdef public class _ElementTree [ type LxmlElementTreeType, # <<<<<<<<<<<<<<
+ * object LxmlElementTree ]:
+ * cdef _Document _doc
+ */
+struct LxmlElementTree {
+ PyObject_HEAD
+ struct __pyx_vtabstruct_4lxml_5etree__ElementTree *__pyx_vtab;
+ struct LxmlDocument *_doc;
+ struct LxmlElement *_context_node;
+};
+
+/* "lxml/etree.pyx":2646
+ *
+ *
+ * cdef public class _ElementTagMatcher [ object LxmlElementTagMatcher, # <<<<<<<<<<<<<<
+ * type LxmlElementTagMatcherType ]:
+ * """
+ */
+struct LxmlElementTagMatcher {
+ PyObject_HEAD
+ struct __pyx_vtabstruct_4lxml_5etree__ElementTagMatcher *__pyx_vtab;
+ PyObject *_pystrings;
+ int _node_type;
+ char *_href;
+ char *_name;
+};
+
+/* "lxml/etree.pyx":2677
+ * self._name = NULL
+ *
+ * cdef public class _ElementIterator(_ElementTagMatcher) [ # <<<<<<<<<<<<<<
+ * object LxmlElementIterator, type LxmlElementIteratorType ]:
+ * """
+ */
+struct LxmlElementIterator {
+ struct LxmlElementTagMatcher __pyx_base;
+ struct LxmlElement *_node;
+ _node_to_node_function _next_element;
+};
+
+/* "src/lxml/classlookup.pxi":6
+ * # Custom Element classes
+ *
+ * cdef public class ElementBase(_Element) [ type LxmlElementBaseType, # <<<<<<<<<<<<<<
+ * object LxmlElementBase ]:
+ * """ElementBase(*children, attrib=None, nsmap=None, **_extra)
+ */
+struct LxmlElementBase {
+ struct LxmlElement __pyx_base;
+};
+
+/* "src/lxml/classlookup.pxi":210
+ * # Element class lookup
+ *
+ * ctypedef public object (*_element_class_lookup_function)(object, _Document, xmlNode*) # <<<<<<<<<<<<<<
+ *
+ * # class to store element class lookup functions
+ */
+typedef PyObject *(*_element_class_lookup_function)(PyObject *, struct LxmlDocument *, xmlNode *);
+
+/* "src/lxml/classlookup.pxi":213
+ *
+ * # class to store element class lookup functions
+ * cdef public class ElementClassLookup [ type LxmlElementClassLookupType, # <<<<<<<<<<<<<<
+ * object LxmlElementClassLookup ]:
+ * """ElementClassLookup(self)
+ */
+struct LxmlElementClassLookup {
+ PyObject_HEAD
+ _element_class_lookup_function _lookup_function;
+};
+
+/* "src/lxml/classlookup.pxi":221
+ *
+ *
+ * cdef public class FallbackElementClassLookup(ElementClassLookup) \ # <<<<<<<<<<<<<<
+ * [ type LxmlFallbackElementClassLookupType,
+ * object LxmlFallbackElementClassLookup ]:
+ */
+struct LxmlFallbackElementClassLookup {
+ struct LxmlElementClassLookup __pyx_base;
+ struct __pyx_vtabstruct_4lxml_5etree_FallbackElementClassLookup *__pyx_vtab;
+ struct LxmlElementClassLookup *fallback;
+ _element_class_lookup_function _fallback_function;
+};
+
+#ifndef __PYX_HAVE_API__lxml__etree
+
+#ifdef CYTHON_EXTERN_C
+ #undef __PYX_EXTERN_C
+ #define __PYX_EXTERN_C CYTHON_EXTERN_C
+#elif defined(__PYX_EXTERN_C)
+ #ifdef _MSC_VER
+ #pragma message ("Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead.")
+ #else
+ #warning Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead.
+ #endif
+#else
+ #ifdef __cplusplus
+ #define __PYX_EXTERN_C extern "C"
+ #else
+ #define __PYX_EXTERN_C extern
+ #endif
+#endif
+
+#ifndef DL_IMPORT
+ #define DL_IMPORT(_T) _T
+#endif
+
+__PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlDocumentType;
+__PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlElementType;
+__PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlElementTreeType;
+__PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlElementTagMatcherType;
+__PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlElementIteratorType;
+__PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlElementBaseType;
+__PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlElementClassLookupType;
+__PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlFallbackElementClassLookupType;
+
+__PYX_EXTERN_C struct LxmlElement *deepcopyNodeToDocument(struct LxmlDocument *, xmlNode *);
+__PYX_EXTERN_C struct LxmlElementTree *elementTreeFactory(struct LxmlElement *);
+__PYX_EXTERN_C struct LxmlElementTree *newElementTree(struct LxmlElement *, PyObject *);
+__PYX_EXTERN_C struct LxmlElementTree *adoptExternalDocument(xmlDoc *, PyObject *, int);
+__PYX_EXTERN_C struct LxmlElement *elementFactory(struct LxmlDocument *, xmlNode *);
+__PYX_EXTERN_C struct LxmlElement *makeElement(PyObject *, struct LxmlDocument *, PyObject *, PyObject *, PyObject *, PyObject *, PyObject *);
+__PYX_EXTERN_C struct LxmlElement *makeSubElement(struct LxmlElement *, PyObject *, PyObject *, PyObject *, PyObject *, PyObject *);
+__PYX_EXTERN_C void setElementClassLookupFunction(_element_class_lookup_function, PyObject *);
+__PYX_EXTERN_C PyObject *lookupDefaultElementClass(PyObject *, PyObject *, xmlNode *);
+__PYX_EXTERN_C PyObject *lookupNamespaceElementClass(PyObject *, PyObject *, xmlNode *);
+__PYX_EXTERN_C PyObject *callLookupFallback(struct LxmlFallbackElementClassLookup *, struct LxmlDocument *, xmlNode *);
+__PYX_EXTERN_C int tagMatches(xmlNode *, const xmlChar *, const xmlChar *);
+__PYX_EXTERN_C struct LxmlDocument *documentOrRaise(PyObject *);
+__PYX_EXTERN_C struct LxmlElement *rootNodeOrRaise(PyObject *);
+__PYX_EXTERN_C int hasText(xmlNode *);
+__PYX_EXTERN_C int hasTail(xmlNode *);
+__PYX_EXTERN_C PyObject *textOf(xmlNode *);
+__PYX_EXTERN_C PyObject *tailOf(xmlNode *);
+__PYX_EXTERN_C int setNodeText(xmlNode *, PyObject *);
+__PYX_EXTERN_C int setTailText(xmlNode *, PyObject *);
+__PYX_EXTERN_C PyObject *attributeValue(xmlNode *, xmlAttr *);
+__PYX_EXTERN_C PyObject *attributeValueFromNsName(xmlNode *, const xmlChar *, const xmlChar *);
+__PYX_EXTERN_C PyObject *getAttributeValue(struct LxmlElement *, PyObject *, PyObject *);
+__PYX_EXTERN_C PyObject *iterattributes(struct LxmlElement *, int);
+__PYX_EXTERN_C PyObject *collectAttributes(xmlNode *, int);
+__PYX_EXTERN_C int setAttributeValue(struct LxmlElement *, PyObject *, PyObject *);
+__PYX_EXTERN_C int delAttribute(struct LxmlElement *, PyObject *);
+__PYX_EXTERN_C int delAttributeFromNsName(xmlNode *, const xmlChar *, const xmlChar *);
+__PYX_EXTERN_C int hasChild(xmlNode *);
+__PYX_EXTERN_C xmlNode *findChild(xmlNode *, Py_ssize_t);
+__PYX_EXTERN_C xmlNode *findChildForwards(xmlNode *, Py_ssize_t);
+__PYX_EXTERN_C xmlNode *findChildBackwards(xmlNode *, Py_ssize_t);
+__PYX_EXTERN_C xmlNode *nextElement(xmlNode *);
+__PYX_EXTERN_C xmlNode *previousElement(xmlNode *);
+__PYX_EXTERN_C void appendChild(struct LxmlElement *, struct LxmlElement *);
+__PYX_EXTERN_C int appendChildToElement(struct LxmlElement *, struct LxmlElement *);
+__PYX_EXTERN_C PyObject *pyunicode(const xmlChar *);
+__PYX_EXTERN_C PyObject *utf8(PyObject *);
+__PYX_EXTERN_C PyObject *getNsTag(PyObject *);
+__PYX_EXTERN_C PyObject *getNsTagWithEmptyNs(PyObject *);
+__PYX_EXTERN_C PyObject *namespacedName(xmlNode *);
+__PYX_EXTERN_C PyObject *namespacedNameFromNsName(const xmlChar *, const xmlChar *);
+__PYX_EXTERN_C void iteratorStoreNext(struct LxmlElementIterator *, struct LxmlElement *);
+__PYX_EXTERN_C void initTagMatch(struct LxmlElementTagMatcher *, PyObject *);
+__PYX_EXTERN_C xmlNs *findOrBuildNodeNsPrefix(struct LxmlDocument *, xmlNode *, const xmlChar *, const xmlChar *);
+
+#endif /* !__PYX_HAVE_API__lxml__etree */
+
+/* WARNING: the interface of the module init function changed in CPython 3.5. */
+/* It now returns a PyModuleDef instance instead of a PyModule instance. */
+
+#if PY_MAJOR_VERSION < 3
+PyMODINIT_FUNC initetree(void);
+#else
+/* WARNING: Use PyImport_AppendInittab("etree", PyInit_etree) instead of calling PyInit_etree directly from Python 3.5 */
+PyMODINIT_FUNC PyInit_etree(void);
+
+#if PY_VERSION_HEX >= 0x03050000 && (defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER) || (defined(__cplusplus) && __cplusplus >= 201402L))
+#if defined(__cplusplus) && __cplusplus >= 201402L
+[[deprecated("Use PyImport_AppendInittab(\"etree\", PyInit_etree) instead of calling PyInit_etree directly.")]] inline
+#elif defined(__GNUC__) || defined(__clang__)
+__attribute__ ((__deprecated__("Use PyImport_AppendInittab(\"etree\", PyInit_etree) instead of calling PyInit_etree directly."), __unused__)) __inline__
+#elif defined(_MSC_VER)
+__declspec(deprecated("Use PyImport_AppendInittab(\"etree\", PyInit_etree) instead of calling PyInit_etree directly.")) __inline
+#endif
+static PyObject* __PYX_WARN_IF_PyInit_etree_INIT_CALLED(PyObject* res) {
+ return res;
+}
+#define PyInit_etree() __PYX_WARN_IF_PyInit_etree_INIT_CALLED(PyInit_etree())
+#endif
+#endif
+
+#endif /* !__PYX_HAVE__lxml__etree */
diff --git a/llmeval-env/lib/python3.10/site-packages/lxml/etree.pyx b/llmeval-env/lib/python3.10/site-packages/lxml/etree.pyx
new file mode 100644
index 0000000000000000000000000000000000000000..31b2c52da145b249d4f2804ee125d5355eb1055a
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/lxml/etree.pyx
@@ -0,0 +1,3712 @@
+# cython: binding=True
+# cython: auto_pickle=False
+# cython: language_level=3
+
+"""
+The ``lxml.etree`` module implements the extended ElementTree API for XML.
+"""
+
+__docformat__ = "restructuredtext en"
+
+__all__ = [
+ 'AttributeBasedElementClassLookup', 'C14NError', 'C14NWriterTarget', 'CDATA',
+ 'Comment', 'CommentBase', 'CustomElementClassLookup', 'DEBUG',
+ 'DTD', 'DTDError', 'DTDParseError', 'DTDValidateError',
+ 'DocumentInvalid', 'ETCompatXMLParser', 'ETXPath', 'Element',
+ 'ElementBase', 'ElementClassLookup', 'ElementDefaultClassLookup',
+ 'ElementNamespaceClassLookup', 'ElementTree', 'Entity', 'EntityBase',
+ 'Error', 'ErrorDomains', 'ErrorLevels', 'ErrorTypes', 'Extension',
+ 'FallbackElementClassLookup', 'FunctionNamespace', 'HTML',
+ 'HTMLParser', 'LIBXML_COMPILED_VERSION', 'LIBXML_VERSION',
+ 'LIBXSLT_COMPILED_VERSION', 'LIBXSLT_VERSION', 'LXML_VERSION',
+ 'LxmlError', 'LxmlRegistryError', 'LxmlSyntaxError',
+ 'NamespaceRegistryError', 'PI', 'PIBase', 'ParseError',
+ 'ParserBasedElementClassLookup', 'ParserError', 'ProcessingInstruction',
+ 'PyErrorLog', 'PythonElementClassLookup', 'QName', 'RelaxNG',
+ 'RelaxNGError', 'RelaxNGErrorTypes', 'RelaxNGParseError',
+ 'RelaxNGValidateError', 'Resolver', 'Schematron', 'SchematronError',
+ 'SchematronParseError', 'SchematronValidateError', 'SerialisationError',
+ 'SubElement', 'TreeBuilder', 'XInclude', 'XIncludeError', 'XML',
+ 'XMLDTDID', 'XMLID', 'XMLParser', 'XMLSchema', 'XMLSchemaError',
+ 'XMLSchemaParseError', 'XMLSchemaValidateError', 'XMLSyntaxError',
+ 'XMLTreeBuilder', 'XPath', 'XPathDocumentEvaluator', 'XPathError',
+ 'XPathEvalError', 'XPathEvaluator', 'XPathFunctionError', 'XPathResultError',
+ 'XPathSyntaxError', 'XSLT', 'XSLTAccessControl', 'XSLTApplyError',
+ 'XSLTError', 'XSLTExtension', 'XSLTExtensionError', 'XSLTParseError',
+ 'XSLTSaveError', 'canonicalize',
+ 'cleanup_namespaces', 'clear_error_log', 'dump',
+ 'fromstring', 'fromstringlist', 'get_default_parser', 'iselement',
+ 'iterparse', 'iterwalk', 'parse', 'parseid', 'register_namespace',
+ 'set_default_parser', 'set_element_class_lookup', 'strip_attributes',
+ 'strip_elements', 'strip_tags', 'tostring', 'tostringlist', 'tounicode',
+ 'use_global_python_log'
+ ]
+
+cimport cython
+
+from lxml cimport python
+from lxml.includes cimport tree, config
+from lxml.includes.tree cimport xmlDoc, xmlNode, xmlAttr, xmlNs, _isElement, _getNs
+from lxml.includes.tree cimport const_xmlChar, xmlChar, _xcstr
+from lxml.python cimport _cstr, _isString
+from lxml.includes cimport xpath
+from lxml.includes cimport c14n
+
+# Cython's standard declarations
+cimport cpython.mem
+cimport cpython.ref
+from libc cimport limits, stdio, stdlib
+from libc cimport string as cstring_h # not to be confused with stdlib 'string'
+from libc.string cimport const_char
+
+cdef object os_path_abspath
+from os.path import abspath as os_path_abspath
+
+cdef object BytesIO, StringIO
+from io import BytesIO, StringIO
+
+cdef object OrderedDict
+from collections import OrderedDict
+
+cdef object _elementpath
+from lxml import _elementpath
+
+cdef object sys
+import sys
+
+cdef object re
+import re
+
+cdef object partial
+from functools import partial
+
+cdef object islice
+from itertools import islice
+
+cdef object ITER_EMPTY = iter(())
+
+cdef object MutableMapping
+from collections.abc import MutableMapping
+
+class _ImmutableMapping(MutableMapping):
+ def __getitem__(self, key):
+ raise KeyError, key
+
+ def __setitem__(self, key, value):
+ raise KeyError, key
+
+ def __delitem__(self, key):
+ raise KeyError, key
+
+ def __contains__(self, key):
+ return False
+
+ def __len__(self):
+ return 0
+
+ def __iter__(self):
+ return ITER_EMPTY
+ iterkeys = itervalues = iteritems = __iter__
+
+cdef object IMMUTABLE_EMPTY_MAPPING = _ImmutableMapping()
+del _ImmutableMapping
+
+
+# the rules
+# ---------
+# any libxml C argument/variable is prefixed with c_
+# any non-public function/class is prefixed with an underscore
+# instance creation is always through factories
+
+# what to do with libxml2/libxslt error messages?
+# 0 : drop
+# 1 : use log
+DEF __DEBUG = 1
+
+# maximum number of lines in the libxml2/xslt log if __DEBUG == 1
+DEF __MAX_LOG_SIZE = 100
+
+# make the compiled-in debug state publicly available
+DEBUG = __DEBUG
+
+# A struct to store a cached qualified tag name+href pair.
+# While we can borrow the c_name from the document dict,
+# PyPy requires us to store a Python reference for the
+# namespace in order to keep the byte buffer alive.
+cdef struct qname:
+ const_xmlChar* c_name
+ python.PyObject* href
+
+# initialize parser (and threading)
+xmlparser.xmlInitParser()
+
+# global per-thread setup
+tree.xmlThrDefIndentTreeOutput(1)
+tree.xmlThrDefLineNumbersDefaultValue(1)
+
+_initThreadLogging()
+
+# filename encoding
+cdef bytes _FILENAME_ENCODING = (sys.getfilesystemencoding() or sys.getdefaultencoding() or 'ascii').encode("UTF-8")
+cdef char* _C_FILENAME_ENCODING = _cstr(_FILENAME_ENCODING)
+
+# set up some default namespace prefixes
+cdef dict _DEFAULT_NAMESPACE_PREFIXES = {
+ b"http://www.w3.org/XML/1998/namespace": b'xml',
+ b"http://www.w3.org/1999/xhtml": b"html",
+ b"http://www.w3.org/1999/XSL/Transform": b"xsl",
+ b"http://www.w3.org/1999/02/22-rdf-syntax-ns#": b"rdf",
+ b"http://schemas.xmlsoap.org/wsdl/": b"wsdl",
+ # xml schema
+ b"http://www.w3.org/2001/XMLSchema": b"xs",
+ b"http://www.w3.org/2001/XMLSchema-instance": b"xsi",
+ # dublin core
+ b"http://purl.org/dc/elements/1.1/": b"dc",
+ # objectify
+ b"http://codespeak.net/lxml/objectify/pytype" : b"py",
+}
+
+# To avoid runtime encoding overhead, we keep a Unicode copy
+# of the uri-prefix mapping as (str, str) items view.
+cdef object _DEFAULT_NAMESPACE_PREFIXES_ITEMS = []
+
+cdef _update_default_namespace_prefixes_items():
+ cdef bytes ns, prefix
+ global _DEFAULT_NAMESPACE_PREFIXES_ITEMS
+ _DEFAULT_NAMESPACE_PREFIXES_ITEMS = {
+ ns.decode('utf-8') : prefix.decode('utf-8')
+ for ns, prefix in _DEFAULT_NAMESPACE_PREFIXES.items()
+ }.items()
+
+_update_default_namespace_prefixes_items()
+
+cdef object _check_internal_prefix = re.compile(br"ns\d+$").match
+
+def register_namespace(prefix, uri):
+ """Registers a namespace prefix that newly created Elements in that
+ namespace will use. The registry is global, and any existing
+ mapping for either the given prefix or the namespace URI will be
+ removed.
+ """
+ prefix_utf, uri_utf = _utf8(prefix), _utf8(uri)
+ if _check_internal_prefix(prefix_utf):
+ raise ValueError("Prefix format reserved for internal use")
+ _tagValidOrRaise(prefix_utf)
+ _uriValidOrRaise(uri_utf)
+ if (uri_utf == b"http://www.w3.org/XML/1998/namespace" and prefix_utf != b'xml'
+ or prefix_utf == b'xml' and uri_utf != b"http://www.w3.org/XML/1998/namespace"):
+ raise ValueError("Cannot change the 'xml' prefix of the XML namespace")
+ for k, v in list(_DEFAULT_NAMESPACE_PREFIXES.items()):
+ if k == uri_utf or v == prefix_utf:
+ del _DEFAULT_NAMESPACE_PREFIXES[k]
+ _DEFAULT_NAMESPACE_PREFIXES[uri_utf] = prefix_utf
+ _update_default_namespace_prefixes_items()
+
+
+# Error superclass for ElementTree compatibility
+cdef class Error(Exception):
+ pass
+
+# module level superclass for all exceptions
+cdef class LxmlError(Error):
+ """Main exception base class for lxml. All other exceptions inherit from
+ this one.
+ """
+ def __init__(self, message, error_log=None):
+ super(_Error, self).__init__(message)
+ if error_log is None:
+ self.error_log = __copyGlobalErrorLog()
+ else:
+ self.error_log = error_log.copy()
+
+cdef object _Error = Error
+
+
+# superclass for all syntax errors
+class LxmlSyntaxError(LxmlError, SyntaxError):
+ """Base class for all syntax errors.
+ """
+
+cdef class C14NError(LxmlError):
+ """Error during C14N serialisation.
+ """
+
+# version information
+cdef __unpackDottedVersion(version):
+ version_list = []
+ l = (version.decode("ascii").replace('-', '.').split('.') + [0]*4)[:4]
+ for item in l:
+ try:
+ item = int(item)
+ except ValueError:
+ if item.startswith('dev'):
+ count = item[3:]
+ item = -300
+ elif item.startswith('alpha'):
+ count = item[5:]
+ item = -200
+ elif item.startswith('beta'):
+ count = item[4:]
+ item = -100
+ else:
+ count = 0
+ if count:
+ item += int(count)
+ version_list.append(item)
+ return tuple(version_list)
+
+cdef __unpackIntVersion(int c_version):
+ return (
+ ((c_version // (100*100)) % 100),
+ ((c_version // 100) % 100),
+ (c_version % 100)
+ )
+
+cdef int _LIBXML_VERSION_INT
+try:
+ _LIBXML_VERSION_INT = int(
+ re.match('[0-9]+', (tree.xmlParserVersion).decode("ascii")).group(0))
+except Exception:
+ print("Unknown libxml2 version: " + (tree.xmlParserVersion).decode("latin1"))
+ _LIBXML_VERSION_INT = 0
+
+LIBXML_VERSION = __unpackIntVersion(_LIBXML_VERSION_INT)
+LIBXML_COMPILED_VERSION = __unpackIntVersion(tree.LIBXML_VERSION)
+LXML_VERSION = __unpackDottedVersion(tree.LXML_VERSION_STRING)
+
+__version__ = tree.LXML_VERSION_STRING.decode("ascii")
+
+
+# class for temporary storage of Python references,
+# used e.g. for XPath results
+@cython.final
+@cython.internal
+cdef class _TempStore:
+ cdef list _storage
+ def __init__(self):
+ self._storage = []
+
+ cdef int add(self, obj) except -1:
+ self._storage.append(obj)
+ return 0
+
+ cdef int clear(self) except -1:
+ del self._storage[:]
+ return 0
+
+
+# class for temporarily storing exceptions raised in extensions
+@cython.internal
+cdef class _ExceptionContext:
+ cdef object _exc_info
+ cdef int clear(self) except -1:
+ self._exc_info = None
+ return 0
+
+ cdef void _store_raised(self) noexcept:
+ try:
+ self._exc_info = sys.exc_info()
+ except BaseException as e:
+ self._store_exception(e)
+ finally:
+ return # and swallow any further exceptions
+
+ cdef int _store_exception(self, exception) except -1:
+ self._exc_info = (exception, None, None)
+ return 0
+
+ cdef bint _has_raised(self) except -1:
+ return self._exc_info is not None
+
+ cdef int _raise_if_stored(self) except -1:
+ if self._exc_info is None:
+ return 0
+ type, value, traceback = self._exc_info
+ self._exc_info = None
+ if value is None and traceback is None:
+ raise type
+ else:
+ raise type, value, traceback
+
+
+# type of a function that steps from node to node
+ctypedef public xmlNode* (*_node_to_node_function)(xmlNode*)
+
+
+################################################################################
+# Include submodules
+
+include "proxy.pxi" # Proxy handling (element backpointers/memory/etc.)
+include "apihelpers.pxi" # Private helper functions
+include "xmlerror.pxi" # Error and log handling
+
+
+################################################################################
+# Public Python API
+
+@cython.final
+@cython.freelist(8)
+cdef public class _Document [ type LxmlDocumentType, object LxmlDocument ]:
+ """Internal base class to reference a libxml document.
+
+ When instances of this class are garbage collected, the libxml
+ document is cleaned up.
+ """
+ cdef int _ns_counter
+ cdef bytes _prefix_tail
+ cdef xmlDoc* _c_doc
+ cdef _BaseParser _parser
+
+ def __dealloc__(self):
+ # if there are no more references to the document, it is safe
+ # to clean the whole thing up, as all nodes have a reference to
+ # the document
+ tree.xmlFreeDoc(self._c_doc)
+
+ @cython.final
+ cdef getroot(self):
+ # return an element proxy for the document root
+ cdef xmlNode* c_node
+ c_node = tree.xmlDocGetRootElement(self._c_doc)
+ if c_node is NULL:
+ return None
+ return _elementFactory(self, c_node)
+
+ @cython.final
+ cdef bint hasdoctype(self) noexcept:
+ # DOCTYPE gets parsed into internal subset (xmlDTD*)
+ return self._c_doc is not NULL and self._c_doc.intSubset is not NULL
+
+ @cython.final
+ cdef getdoctype(self):
+ # get doctype info: root tag, public/system ID (or None if not known)
+ cdef tree.xmlDtd* c_dtd
+ cdef xmlNode* c_root_node
+ public_id = None
+ sys_url = None
+ c_dtd = self._c_doc.intSubset
+ if c_dtd is not NULL:
+ if c_dtd.ExternalID is not NULL:
+ public_id = funicode(c_dtd.ExternalID)
+ if c_dtd.SystemID is not NULL:
+ sys_url = funicode(c_dtd.SystemID)
+ c_dtd = self._c_doc.extSubset
+ if c_dtd is not NULL:
+ if not public_id and c_dtd.ExternalID is not NULL:
+ public_id = funicode(c_dtd.ExternalID)
+ if not sys_url and c_dtd.SystemID is not NULL:
+ sys_url = funicode(c_dtd.SystemID)
+ c_root_node = tree.xmlDocGetRootElement(self._c_doc)
+ if c_root_node is NULL:
+ root_name = None
+ else:
+ root_name = funicode(c_root_node.name)
+ return root_name, public_id, sys_url
+
+ @cython.final
+ cdef getxmlinfo(self):
+ # return XML version and encoding (or None if not known)
+ cdef xmlDoc* c_doc = self._c_doc
+ if c_doc.version is NULL:
+ version = None
+ else:
+ version = funicode(c_doc.version)
+ if c_doc.encoding is NULL:
+ encoding = None
+ else:
+ encoding = funicode(c_doc.encoding)
+ return version, encoding
+
+ @cython.final
+ cdef isstandalone(self):
+ # returns True for "standalone=true",
+ # False for "standalone=false", None if not provided
+ if self._c_doc.standalone == -1:
+ return None
+ else:
+ return (self._c_doc.standalone == 1)
+
+ @cython.final
+ cdef bytes buildNewPrefix(self):
+ # get a new unique prefix ("nsX") for this document
+ cdef bytes ns
+ if self._ns_counter < len(_PREFIX_CACHE):
+ ns = _PREFIX_CACHE[self._ns_counter]
+ else:
+ ns = python.PyBytes_FromFormat("ns%d", self._ns_counter)
+ if self._prefix_tail is not None:
+ ns += self._prefix_tail
+ self._ns_counter += 1
+ if self._ns_counter < 0:
+ # overflow!
+ self._ns_counter = 0
+ if self._prefix_tail is None:
+ self._prefix_tail = b"A"
+ else:
+ self._prefix_tail += b"A"
+ return ns
+
+ @cython.final
+ cdef xmlNs* _findOrBuildNodeNs(self, xmlNode* c_node,
+ const_xmlChar* c_href, const_xmlChar* c_prefix,
+ bint is_attribute) except NULL:
+ """Get or create namespace structure for a node. Reuses the prefix if
+ possible.
+ """
+ cdef xmlNs* c_ns
+ cdef xmlNs* c_doc_ns
+ cdef python.PyObject* dict_result
+ if c_node.type != tree.XML_ELEMENT_NODE:
+ assert c_node.type == tree.XML_ELEMENT_NODE, \
+ "invalid node type %d, expected %d" % (
+ c_node.type, tree.XML_ELEMENT_NODE)
+ # look for existing ns declaration
+ c_ns = _searchNsByHref(c_node, c_href, is_attribute)
+ if c_ns is not NULL:
+ if is_attribute and c_ns.prefix is NULL:
+ # do not put namespaced attributes into the default
+ # namespace as this would break serialisation
+ pass
+ else:
+ return c_ns
+
+ # none found => determine a suitable new prefix
+ if c_prefix is NULL:
+ dict_result = python.PyDict_GetItem(
+ _DEFAULT_NAMESPACE_PREFIXES, c_href)
+ if dict_result is not NULL:
+ prefix = dict_result
+ else:
+ prefix = self.buildNewPrefix()
+ c_prefix = _xcstr(prefix)
+
+ # make sure the prefix is not in use already
+ while tree.xmlSearchNs(self._c_doc, c_node, c_prefix) is not NULL:
+ prefix = self.buildNewPrefix()
+ c_prefix = _xcstr(prefix)
+
+ # declare the namespace and return it
+ c_ns = tree.xmlNewNs(c_node, c_href, c_prefix)
+ if c_ns is NULL:
+ raise MemoryError()
+ return c_ns
+
+ @cython.final
+ cdef int _setNodeNs(self, xmlNode* c_node, const_xmlChar* c_href) except -1:
+ "Lookup namespace structure and set it for the node."
+ c_ns = self._findOrBuildNodeNs(c_node, c_href, NULL, 0)
+ tree.xmlSetNs(c_node, c_ns)
+
+cdef tuple __initPrefixCache():
+ cdef int i
+ return tuple([ python.PyBytes_FromFormat("ns%d", i)
+ for i in range(30) ])
+
+cdef tuple _PREFIX_CACHE = __initPrefixCache()
+
+cdef _Document _documentFactory(xmlDoc* c_doc, _BaseParser parser):
+ cdef _Document result
+ result = _Document.__new__(_Document)
+ result._c_doc = c_doc
+ result._ns_counter = 0
+ result._prefix_tail = None
+ if parser is None:
+ parser = __GLOBAL_PARSER_CONTEXT.getDefaultParser()
+ result._parser = parser
+ return result
+
+
+cdef object _find_invalid_public_id_characters = re.compile(
+ ur"[^\x20\x0D\x0Aa-zA-Z0-9'()+,./:=?;!*#@$_%-]+").search
+
+
+cdef class DocInfo:
+ "Document information provided by parser and DTD."
+ cdef _Document _doc
+ def __cinit__(self, tree):
+ "Create a DocInfo object for an ElementTree object or root Element."
+ self._doc = _documentOrRaise(tree)
+ root_name, public_id, system_url = self._doc.getdoctype()
+ if not root_name and (public_id or system_url):
+ raise ValueError, "Could not find root node"
+
+ @property
+ def root_name(self):
+ """Returns the name of the root node as defined by the DOCTYPE."""
+ root_name, public_id, system_url = self._doc.getdoctype()
+ return root_name
+
+ @cython.final
+ cdef tree.xmlDtd* _get_c_dtd(self):
+ """"Return the DTD. Create it if it does not yet exist."""
+ cdef xmlDoc* c_doc = self._doc._c_doc
+ cdef xmlNode* c_root_node
+ cdef const_xmlChar* c_name
+
+ if c_doc.intSubset:
+ return c_doc.intSubset
+
+ c_root_node = tree.xmlDocGetRootElement(c_doc)
+ c_name = c_root_node.name if c_root_node else NULL
+ return tree.xmlCreateIntSubset(c_doc, c_name, NULL, NULL)
+
+ def clear(self):
+ """Removes DOCTYPE and internal subset from the document."""
+ cdef xmlDoc* c_doc = self._doc._c_doc
+ cdef tree.xmlNode* c_dtd = c_doc.intSubset
+ if c_dtd is NULL:
+ return
+ tree.xmlUnlinkNode(c_dtd)
+ tree.xmlFreeNode(c_dtd)
+
+ property public_id:
+ """Public ID of the DOCTYPE.
+
+ Mutable. May be set to a valid string or None. If a DTD does not
+ exist, setting this variable (even to None) will create one.
+ """
+ def __get__(self):
+ root_name, public_id, system_url = self._doc.getdoctype()
+ return public_id
+
+ def __set__(self, value):
+ cdef xmlChar* c_value = NULL
+ if value is not None:
+ match = _find_invalid_public_id_characters(value)
+ if match:
+ raise ValueError, f'Invalid character(s) {match.group(0)!r} in public_id.'
+ value = _utf8(value)
+ c_value = tree.xmlStrdup(_xcstr(value))
+ if not c_value:
+ raise MemoryError()
+
+ c_dtd = self._get_c_dtd()
+ if not c_dtd:
+ tree.xmlFree(c_value)
+ raise MemoryError()
+ if c_dtd.ExternalID:
+ tree.xmlFree(c_dtd.ExternalID)
+ c_dtd.ExternalID = c_value
+
+ property system_url:
+ """System ID of the DOCTYPE.
+
+ Mutable. May be set to a valid string or None. If a DTD does not
+ exist, setting this variable (even to None) will create one.
+ """
+ def __get__(self):
+ root_name, public_id, system_url = self._doc.getdoctype()
+ return system_url
+
+ def __set__(self, value):
+ cdef xmlChar* c_value = NULL
+ if value is not None:
+ bvalue = _utf8(value)
+ # sys_url may be any valid unicode string that can be
+ # enclosed in single quotes or quotes.
+ if b"'" in bvalue and b'"' in bvalue:
+ raise ValueError(
+ 'System URL may not contain both single (\') and double quotes (").')
+ c_value = tree.xmlStrdup(_xcstr(bvalue))
+ if not c_value:
+ raise MemoryError()
+
+ c_dtd = self._get_c_dtd()
+ if not c_dtd:
+ tree.xmlFree(c_value)
+ raise MemoryError()
+ if c_dtd.SystemID:
+ tree.xmlFree(c_dtd.SystemID)
+ c_dtd.SystemID = c_value
+
+ @property
+ def xml_version(self):
+ """Returns the XML version as declared by the document."""
+ xml_version, encoding = self._doc.getxmlinfo()
+ return xml_version
+
+ @property
+ def encoding(self):
+ """Returns the encoding name as declared by the document."""
+ xml_version, encoding = self._doc.getxmlinfo()
+ return encoding
+
+ @property
+ def standalone(self):
+ """Returns the standalone flag as declared by the document. The possible
+ values are True (``standalone='yes'``), False
+ (``standalone='no'`` or flag not provided in the declaration),
+ and None (unknown or no declaration found). Note that a
+ normal truth test on this value will always tell if the
+ ``standalone`` flag was set to ``'yes'`` or not.
+ """
+ return self._doc.isstandalone()
+
+ property URL:
+ "The source URL of the document (or None if unknown)."
+ def __get__(self):
+ if self._doc._c_doc.URL is NULL:
+ return None
+ return _decodeFilename(self._doc._c_doc.URL)
+ def __set__(self, url):
+ url = _encodeFilename(url)
+ c_oldurl = self._doc._c_doc.URL
+ if url is None:
+ self._doc._c_doc.URL = NULL
+ else:
+ self._doc._c_doc.URL = tree.xmlStrdup(_xcstr(url))
+ if c_oldurl is not NULL:
+ tree.xmlFree(c_oldurl)
+
+ @property
+ def doctype(self):
+ """Returns a DOCTYPE declaration string for the document."""
+ root_name, public_id, system_url = self._doc.getdoctype()
+ if system_url:
+ # If '"' in system_url, we must escape it with single
+ # quotes, otherwise escape with double quotes. If url
+ # contains both a single quote and a double quote, XML
+ # standard is being violated.
+ if '"' in system_url:
+ quoted_system_url = f"'{system_url}'"
+ else:
+ quoted_system_url = f'"{system_url}"'
+ if public_id:
+ if system_url:
+ return f''
+ else:
+ return f''
+ elif system_url:
+ return f''
+ elif self._doc.hasdoctype():
+ return f''
+ else:
+ return ''
+
+ @property
+ def internalDTD(self):
+ """Returns a DTD validator based on the internal subset of the document."""
+ return _dtdFactory(self._doc._c_doc.intSubset)
+
+ @property
+ def externalDTD(self):
+ """Returns a DTD validator based on the external subset of the document."""
+ return _dtdFactory(self._doc._c_doc.extSubset)
+
+
+@cython.no_gc_clear
+cdef public class _Element [ type LxmlElementType, object LxmlElement ]:
+ """Element class.
+
+ References a document object and a libxml node.
+
+ By pointing to a Document instance, a reference is kept to
+ _Document as long as there is some pointer to a node in it.
+ """
+ cdef _Document _doc
+ cdef xmlNode* _c_node
+ cdef object _tag
+
+ def _init(self):
+ """_init(self)
+
+ Called after object initialisation. Custom subclasses may override
+ this if they recursively call _init() in the superclasses.
+ """
+
+ @cython.linetrace(False)
+ @cython.profile(False)
+ def __dealloc__(self):
+ #print("trying to free node:", self._c_node)
+ #displayNode(self._c_node, 0)
+ if self._c_node is not NULL:
+ _unregisterProxy(self)
+ attemptDeallocation(self._c_node)
+
+ # MANIPULATORS
+
+ def __setitem__(self, x, value):
+ """__setitem__(self, x, value)
+
+ Replaces the given subelement index or slice.
+ """
+ cdef xmlNode* c_node = NULL
+ cdef xmlNode* c_next
+ cdef xmlDoc* c_source_doc
+ cdef _Element element
+ cdef bint left_to_right
+ cdef Py_ssize_t slicelength = 0, step = 0
+ _assertValidNode(self)
+ if value is None:
+ raise ValueError, "cannot assign None"
+ if isinstance(x, slice):
+ # slice assignment
+ _findChildSlice(x, self._c_node, &c_node, &step, &slicelength)
+ if step > 0:
+ left_to_right = 1
+ else:
+ left_to_right = 0
+ step = -step
+ _replaceSlice(self, c_node, slicelength, step, left_to_right, value)
+ return
+ else:
+ # otherwise: normal item assignment
+ element = value
+ _assertValidNode(element)
+ c_node = _findChild(self._c_node, x)
+ if c_node is NULL:
+ raise IndexError, "list index out of range"
+ c_source_doc = element._c_node.doc
+ c_next = element._c_node.next
+ _removeText(c_node.next)
+ tree.xmlReplaceNode(c_node, element._c_node)
+ _moveTail(c_next, element._c_node)
+ moveNodeToDocument(self._doc, c_source_doc, element._c_node)
+ if not attemptDeallocation(c_node):
+ moveNodeToDocument(self._doc, c_node.doc, c_node)
+
+ def __delitem__(self, x):
+ """__delitem__(self, x)
+
+ Deletes the given subelement or a slice.
+ """
+ cdef xmlNode* c_node = NULL
+ cdef xmlNode* c_next
+ cdef Py_ssize_t step = 0, slicelength = 0
+ _assertValidNode(self)
+ if isinstance(x, slice):
+ # slice deletion
+ if _isFullSlice(x):
+ c_node = self._c_node.children
+ if c_node is not NULL:
+ if not _isElement(c_node):
+ c_node = _nextElement(c_node)
+ while c_node is not NULL:
+ c_next = _nextElement(c_node)
+ _removeNode(self._doc, c_node)
+ c_node = c_next
+ else:
+ _findChildSlice(x, self._c_node, &c_node, &step, &slicelength)
+ _deleteSlice(self._doc, c_node, slicelength, step)
+ else:
+ # item deletion
+ c_node = _findChild(self._c_node, x)
+ if c_node is NULL:
+ raise IndexError, f"index out of range: {x}"
+ _removeNode(self._doc, c_node)
+
+ def __deepcopy__(self, memo):
+ "__deepcopy__(self, memo)"
+ return self.__copy__()
+
+ def __copy__(self):
+ "__copy__(self)"
+ cdef xmlDoc* c_doc
+ cdef xmlNode* c_node
+ cdef _Document new_doc
+ _assertValidNode(self)
+ c_doc = _copyDocRoot(self._doc._c_doc, self._c_node) # recursive
+ new_doc = _documentFactory(c_doc, self._doc._parser)
+ root = new_doc.getroot()
+ if root is not None:
+ return root
+ # Comment/PI
+ c_node = c_doc.children
+ while c_node is not NULL and c_node.type != self._c_node.type:
+ c_node = c_node.next
+ if c_node is NULL:
+ return None
+ return _elementFactory(new_doc, c_node)
+
+ def set(self, key, value):
+ """set(self, key, value)
+
+ Sets an element attribute.
+ In HTML documents (not XML or XHTML), the value None is allowed and creates
+ an attribute without value (just the attribute name).
+ """
+ _assertValidNode(self)
+ _setAttributeValue(self, key, value)
+
+ def append(self, _Element element not None):
+ """append(self, element)
+
+ Adds a subelement to the end of this element.
+ """
+ _assertValidNode(self)
+ _assertValidNode(element)
+ _appendChild(self, element)
+
+ def addnext(self, _Element element not None):
+ """addnext(self, element)
+
+ Adds the element as a following sibling directly after this
+ element.
+
+ This is normally used to set a processing instruction or comment after
+ the root node of a document. Note that tail text is automatically
+ discarded when adding at the root level.
+ """
+ _assertValidNode(self)
+ _assertValidNode(element)
+ if self._c_node.parent != NULL and not _isElement(self._c_node.parent):
+ if element._c_node.type not in (tree.XML_PI_NODE, tree.XML_COMMENT_NODE):
+ raise TypeError, "Only processing instructions and comments can be siblings of the root element"
+ element.tail = None
+ _appendSibling(self, element)
+
+ def addprevious(self, _Element element not None):
+ """addprevious(self, element)
+
+ Adds the element as a preceding sibling directly before this
+ element.
+
+ This is normally used to set a processing instruction or comment
+ before the root node of a document. Note that tail text is
+ automatically discarded when adding at the root level.
+ """
+ _assertValidNode(self)
+ _assertValidNode(element)
+ if self._c_node.parent != NULL and not _isElement(self._c_node.parent):
+ if element._c_node.type != tree.XML_PI_NODE:
+ if element._c_node.type != tree.XML_COMMENT_NODE:
+ raise TypeError, "Only processing instructions and comments can be siblings of the root element"
+ element.tail = None
+ _prependSibling(self, element)
+
+ def extend(self, elements):
+ """extend(self, elements)
+
+ Extends the current children by the elements in the iterable.
+ """
+ cdef _Element element
+ _assertValidNode(self)
+ for element in elements:
+ if element is None:
+ raise TypeError, "Node must not be None"
+ _assertValidNode(element)
+ _appendChild(self, element)
+
+ def clear(self, bint keep_tail=False):
+ """clear(self, keep_tail=False)
+
+ Resets an element. This function removes all subelements, clears
+ all attributes and sets the text and tail properties to None.
+
+ Pass ``keep_tail=True`` to leave the tail text untouched.
+ """
+ cdef xmlAttr* c_attr
+ cdef xmlAttr* c_attr_next
+ cdef xmlNode* c_node
+ cdef xmlNode* c_node_next
+ _assertValidNode(self)
+ c_node = self._c_node
+ # remove self.text and self.tail
+ _removeText(c_node.children)
+ if not keep_tail:
+ _removeText(c_node.next)
+ # remove all attributes
+ c_attr = c_node.properties
+ if c_attr:
+ c_node.properties = NULL
+ tree.xmlFreePropList(c_attr)
+ # remove all subelements
+ c_node = c_node.children
+ if c_node and not _isElement(c_node):
+ c_node = _nextElement(c_node)
+ while c_node is not NULL:
+ c_node_next = _nextElement(c_node)
+ _removeNode(self._doc, c_node)
+ c_node = c_node_next
+
+ def insert(self, index: int, _Element element not None):
+ """insert(self, index, element)
+
+ Inserts a subelement at the given position in this element
+ """
+ cdef xmlNode* c_node
+ cdef xmlNode* c_next
+ cdef xmlDoc* c_source_doc
+ _assertValidNode(self)
+ _assertValidNode(element)
+ c_node = _findChild(self._c_node, index)
+ if c_node is NULL:
+ _appendChild(self, element)
+ return
+ # prevent cycles
+ if _isAncestorOrSame(element._c_node, self._c_node):
+ raise ValueError("cannot append parent to itself")
+ c_source_doc = element._c_node.doc
+ c_next = element._c_node.next
+ tree.xmlAddPrevSibling(c_node, element._c_node)
+ _moveTail(c_next, element._c_node)
+ moveNodeToDocument(self._doc, c_source_doc, element._c_node)
+
+ def remove(self, _Element element not None):
+ """remove(self, element)
+
+ Removes a matching subelement. Unlike the find methods, this
+ method compares elements based on identity, not on tag value
+ or contents.
+ """
+ cdef xmlNode* c_node
+ cdef xmlNode* c_next
+ _assertValidNode(self)
+ _assertValidNode(element)
+ c_node = element._c_node
+ if c_node.parent is not self._c_node:
+ raise ValueError, "Element is not a child of this node."
+ c_next = element._c_node.next
+ tree.xmlUnlinkNode(c_node)
+ _moveTail(c_next, c_node)
+ # fix namespace declarations
+ moveNodeToDocument(self._doc, c_node.doc, c_node)
+
+ def replace(self, _Element old_element not None,
+ _Element new_element not None):
+ """replace(self, old_element, new_element)
+
+ Replaces a subelement with the element passed as second argument.
+ """
+ cdef xmlNode* c_old_node
+ cdef xmlNode* c_old_next
+ cdef xmlNode* c_new_node
+ cdef xmlNode* c_new_next
+ cdef xmlDoc* c_source_doc
+ _assertValidNode(self)
+ _assertValidNode(old_element)
+ _assertValidNode(new_element)
+ c_old_node = old_element._c_node
+ if c_old_node.parent is not self._c_node:
+ raise ValueError, "Element is not a child of this node."
+ c_new_node = new_element._c_node
+ # prevent cycles
+ if _isAncestorOrSame(c_new_node, self._c_node):
+ raise ValueError("cannot append parent to itself")
+ # replace node
+ c_old_next = c_old_node.next
+ c_new_next = c_new_node.next
+ c_source_doc = c_new_node.doc
+ tree.xmlReplaceNode(c_old_node, c_new_node)
+ _moveTail(c_new_next, c_new_node)
+ _moveTail(c_old_next, c_old_node)
+ moveNodeToDocument(self._doc, c_source_doc, c_new_node)
+ # fix namespace declarations
+ moveNodeToDocument(self._doc, c_old_node.doc, c_old_node)
+
+ # PROPERTIES
+ property tag:
+ """Element tag
+ """
+ def __get__(self):
+ if self._tag is not None:
+ return self._tag
+ _assertValidNode(self)
+ self._tag = _namespacedName(self._c_node)
+ return self._tag
+
+ def __set__(self, value):
+ cdef _BaseParser parser
+ _assertValidNode(self)
+ ns, name = _getNsTag(value)
+ parser = self._doc._parser
+ if parser is not None and parser._for_html:
+ _htmlTagValidOrRaise(name)
+ else:
+ _tagValidOrRaise(name)
+ self._tag = value
+ tree.xmlNodeSetName(self._c_node, _xcstr(name))
+ if ns is None:
+ self._c_node.ns = NULL
+ else:
+ self._doc._setNodeNs(self._c_node, _xcstr(ns))
+
+ @property
+ def attrib(self):
+ """Element attribute dictionary. Where possible, use get(), set(),
+ keys(), values() and items() to access element attributes.
+ """
+ return _Attrib.__new__(_Attrib, self)
+
+ property text:
+ """Text before the first subelement. This is either a string or
+ the value None, if there was no text.
+ """
+ def __get__(self):
+ _assertValidNode(self)
+ return _collectText(self._c_node.children)
+
+ def __set__(self, value):
+ _assertValidNode(self)
+ if isinstance(value, QName):
+ value = _resolveQNameText(self, value).decode('utf8')
+ _setNodeText(self._c_node, value)
+
+ # using 'del el.text' is the wrong thing to do
+ #def __del__(self):
+ # _setNodeText(self._c_node, None)
+
+ property tail:
+ """Text after this element's end tag, but before the next sibling
+ element's start tag. This is either a string or the value None, if
+ there was no text.
+ """
+ def __get__(self):
+ _assertValidNode(self)
+ return _collectText(self._c_node.next)
+
+ def __set__(self, value):
+ _assertValidNode(self)
+ _setTailText(self._c_node, value)
+
+ # using 'del el.tail' is the wrong thing to do
+ #def __del__(self):
+ # _setTailText(self._c_node, None)
+
+ # not in ElementTree, read-only
+ @property
+ def prefix(self):
+ """Namespace prefix or None.
+ """
+ if self._c_node.ns is not NULL:
+ if self._c_node.ns.prefix is not NULL:
+ return funicode(self._c_node.ns.prefix)
+ return None
+
+ # not in ElementTree, read-only
+ property sourceline:
+ """Original line number as found by the parser or None if unknown.
+ """
+ def __get__(self):
+ cdef long line
+ _assertValidNode(self)
+ line = tree.xmlGetLineNo(self._c_node)
+ return line if line > 0 else None
+
+ def __set__(self, line):
+ _assertValidNode(self)
+ if line <= 0:
+ self._c_node.line = 0
+ else:
+ self._c_node.line = line
+
+ # not in ElementTree, read-only
+ @property
+ def nsmap(self):
+ """Namespace prefix->URI mapping known in the context of this
+ Element. This includes all namespace declarations of the
+ parents.
+
+ Note that changing the returned dict has no effect on the Element.
+ """
+ _assertValidNode(self)
+ return _build_nsmap(self._c_node)
+
+ # not in ElementTree, read-only
+ property base:
+ """The base URI of the Element (xml:base or HTML base URL).
+ None if the base URI is unknown.
+
+ Note that the value depends on the URL of the document that
+ holds the Element if there is no xml:base attribute on the
+ Element or its ancestors.
+
+ Setting this property will set an xml:base attribute on the
+ Element, regardless of the document type (XML or HTML).
+ """
+ def __get__(self):
+ _assertValidNode(self)
+ c_base = tree.xmlNodeGetBase(self._doc._c_doc, self._c_node)
+ if c_base is NULL:
+ if self._doc._c_doc.URL is NULL:
+ return None
+ return _decodeFilename(self._doc._c_doc.URL)
+ try:
+ base = _decodeFilename(c_base)
+ finally:
+ tree.xmlFree(c_base)
+ return base
+
+ def __set__(self, url):
+ _assertValidNode(self)
+ if url is None:
+ c_base = NULL
+ else:
+ url = _encodeFilename(url)
+ c_base = _xcstr(url)
+ tree.xmlNodeSetBase(self._c_node, c_base)
+
+ # ACCESSORS
+ def __repr__(self):
+ "__repr__(self)"
+ return "" % (self.tag, id(self))
+
+ def __getitem__(self, x):
+ """Returns the subelement at the given position or the requested
+ slice.
+ """
+ cdef xmlNode* c_node = NULL
+ cdef Py_ssize_t step = 0, slicelength = 0
+ cdef Py_ssize_t c, i
+ cdef _node_to_node_function next_element
+ cdef list result
+ _assertValidNode(self)
+ if isinstance(x, slice):
+ # slicing
+ if _isFullSlice(x):
+ return _collectChildren(self)
+ _findChildSlice(x, self._c_node, &c_node, &step, &slicelength)
+ if c_node is NULL:
+ return []
+ if step > 0:
+ next_element = _nextElement
+ else:
+ step = -step
+ next_element = _previousElement
+ result = []
+ c = 0
+ while c_node is not NULL and c < slicelength:
+ result.append(_elementFactory(self._doc, c_node))
+ c += 1
+ for i in range(step):
+ c_node = next_element(c_node)
+ if c_node is NULL:
+ break
+ return result
+ else:
+ # indexing
+ c_node = _findChild(self._c_node, x)
+ if c_node is NULL:
+ raise IndexError, "list index out of range"
+ return _elementFactory(self._doc, c_node)
+
+ def __len__(self):
+ """__len__(self)
+
+ Returns the number of subelements.
+ """
+ _assertValidNode(self)
+ return _countElements(self._c_node.children)
+
+ def __bool__(self):
+ """__bool__(self)"""
+ import warnings
+ warnings.warn(
+ "The behavior of this method will change in future versions. "
+ "Use specific 'len(elem)' or 'elem is not None' test instead.",
+ FutureWarning
+ )
+ # emulate old behaviour
+ _assertValidNode(self)
+ return _hasChild(self._c_node)
+
+ def __contains__(self, element):
+ "__contains__(self, element)"
+ cdef xmlNode* c_node
+ _assertValidNode(self)
+ if not isinstance(element, _Element):
+ return 0
+ c_node = (<_Element>element)._c_node
+ return c_node is not NULL and c_node.parent is self._c_node
+
+ def __iter__(self):
+ "__iter__(self)"
+ return ElementChildIterator(self)
+
+ def __reversed__(self):
+ "__reversed__(self)"
+ return ElementChildIterator(self, reversed=True)
+
+ def index(self, child: _Element, start: int = None, stop: int = None):
+ """index(self, child, start=None, stop=None)
+
+ Find the position of the child within the parent.
+
+ This method is not part of the original ElementTree API.
+ """
+ cdef Py_ssize_t k, l
+ cdef Py_ssize_t c_start, c_stop
+ cdef xmlNode* c_child
+ cdef xmlNode* c_start_node
+ _assertValidNode(self)
+ _assertValidNode(child)
+ c_child = child._c_node
+ if c_child.parent is not self._c_node:
+ raise ValueError, "Element is not a child of this node."
+
+ # handle the unbounded search straight away (normal case)
+ if stop is None and (start is None or start == 0):
+ k = 0
+ c_child = c_child.prev
+ while c_child is not NULL:
+ if _isElement(c_child):
+ k += 1
+ c_child = c_child.prev
+ return k
+
+ # check indices
+ if start is None:
+ c_start = 0
+ else:
+ c_start = start
+ if stop is None:
+ c_stop = 0
+ else:
+ c_stop = stop
+ if c_stop == 0 or \
+ c_start >= c_stop and (c_stop > 0 or c_start < 0):
+ raise ValueError, "list.index(x): x not in slice"
+
+ # for negative slice indices, check slice before searching index
+ if c_start < 0 or c_stop < 0:
+ # start from right, at most up to leftmost(c_start, c_stop)
+ if c_start < c_stop:
+ k = -c_start
+ else:
+ k = -c_stop
+ c_start_node = self._c_node.last
+ l = 1
+ while c_start_node != c_child and l < k:
+ if _isElement(c_start_node):
+ l += 1
+ c_start_node = c_start_node.prev
+ if c_start_node == c_child:
+ # found! before slice end?
+ if c_stop < 0 and l <= -c_stop:
+ raise ValueError, "list.index(x): x not in slice"
+ elif c_start < 0:
+ raise ValueError, "list.index(x): x not in slice"
+
+ # now determine the index backwards from child
+ c_child = c_child.prev
+ k = 0
+ if c_stop > 0:
+ # we can optimize: stop after c_stop elements if not found
+ while c_child != NULL and k < c_stop:
+ if _isElement(c_child):
+ k += 1
+ c_child = c_child.prev
+ if k < c_stop:
+ return k
+ else:
+ # traverse all
+ while c_child != NULL:
+ if _isElement(c_child):
+ k = k + 1
+ c_child = c_child.prev
+ if c_start > 0:
+ if k >= c_start:
+ return k
+ else:
+ return k
+ if c_start != 0 or c_stop != 0:
+ raise ValueError, "list.index(x): x not in slice"
+ else:
+ raise ValueError, "list.index(x): x not in list"
+
+ def get(self, key, default=None):
+ """get(self, key, default=None)
+
+ Gets an element attribute.
+ """
+ _assertValidNode(self)
+ return _getAttributeValue(self, key, default)
+
+ def keys(self):
+ """keys(self)
+
+ Gets a list of attribute names. The names are returned in an
+ arbitrary order (just like for an ordinary Python dictionary).
+ """
+ _assertValidNode(self)
+ return _collectAttributes(self._c_node, 1)
+
+ def values(self):
+ """values(self)
+
+ Gets element attribute values as a sequence of strings. The
+ attributes are returned in an arbitrary order.
+ """
+ _assertValidNode(self)
+ return _collectAttributes(self._c_node, 2)
+
+ def items(self):
+ """items(self)
+
+ Gets element attributes, as a sequence. The attributes are returned in
+ an arbitrary order.
+ """
+ _assertValidNode(self)
+ return _collectAttributes(self._c_node, 3)
+
+ def getchildren(self):
+ """getchildren(self)
+
+ Returns all direct children. The elements are returned in document
+ order.
+
+ :deprecated: Note that this method has been deprecated as of
+ ElementTree 1.3 and lxml 2.0. New code should use
+ ``list(element)`` or simply iterate over elements.
+ """
+ _assertValidNode(self)
+ return _collectChildren(self)
+
+ def getparent(self):
+ """getparent(self)
+
+ Returns the parent of this element or None for the root element.
+ """
+ cdef xmlNode* c_node
+ #_assertValidNode(self) # not needed
+ c_node = _parentElement(self._c_node)
+ if c_node is NULL:
+ return None
+ return _elementFactory(self._doc, c_node)
+
+ def getnext(self):
+ """getnext(self)
+
+ Returns the following sibling of this element or None.
+ """
+ cdef xmlNode* c_node
+ #_assertValidNode(self) # not needed
+ c_node = _nextElement(self._c_node)
+ if c_node is NULL:
+ return None
+ return _elementFactory(self._doc, c_node)
+
+ def getprevious(self):
+ """getprevious(self)
+
+ Returns the preceding sibling of this element or None.
+ """
+ cdef xmlNode* c_node
+ #_assertValidNode(self) # not needed
+ c_node = _previousElement(self._c_node)
+ if c_node is NULL:
+ return None
+ return _elementFactory(self._doc, c_node)
+
+ def itersiblings(self, tag=None, *tags, preceding=False):
+ """itersiblings(self, tag=None, *tags, preceding=False)
+
+ Iterate over the following or preceding siblings of this element.
+
+ The direction is determined by the 'preceding' keyword which
+ defaults to False, i.e. forward iteration over the following
+ siblings. When True, the iterator yields the preceding
+ siblings in reverse document order, i.e. starting right before
+ the current element and going backwards.
+
+ Can be restricted to find only elements with specific tags,
+ see `iter`.
+ """
+ if preceding:
+ if self._c_node and not self._c_node.prev:
+ return ITER_EMPTY
+ elif self._c_node and not self._c_node.next:
+ return ITER_EMPTY
+ if tag is not None:
+ tags += (tag,)
+ return SiblingsIterator(self, tags, preceding=preceding)
+
+ def iterancestors(self, tag=None, *tags):
+ """iterancestors(self, tag=None, *tags)
+
+ Iterate over the ancestors of this element (from parent to parent).
+
+ Can be restricted to find only elements with specific tags,
+ see `iter`.
+ """
+ if self._c_node and not self._c_node.parent:
+ return ITER_EMPTY
+ if tag is not None:
+ tags += (tag,)
+ return AncestorsIterator(self, tags)
+
+ def iterdescendants(self, tag=None, *tags):
+ """iterdescendants(self, tag=None, *tags)
+
+ Iterate over the descendants of this element in document order.
+
+ As opposed to ``el.iter()``, this iterator does not yield the element
+ itself. The returned elements can be restricted to find only elements
+ with specific tags, see `iter`.
+ """
+ if self._c_node and not self._c_node.children:
+ return ITER_EMPTY
+ if tag is not None:
+ tags += (tag,)
+ return ElementDepthFirstIterator(self, tags, inclusive=False)
+
+ def iterchildren(self, tag=None, *tags, reversed=False):
+ """iterchildren(self, tag=None, *tags, reversed=False)
+
+ Iterate over the children of this element.
+
+ As opposed to using normal iteration on this element, the returned
+ elements can be reversed with the 'reversed' keyword and restricted
+ to find only elements with specific tags, see `iter`.
+ """
+ if self._c_node and not self._c_node.children:
+ return ITER_EMPTY
+ if tag is not None:
+ tags += (tag,)
+ return ElementChildIterator(self, tags, reversed=reversed)
+
+ def getroottree(self):
+ """getroottree(self)
+
+ Return an ElementTree for the root node of the document that
+ contains this element.
+
+ This is the same as following element.getparent() up the tree until it
+ returns None (for the root element) and then build an ElementTree for
+ the last parent that was returned."""
+ _assertValidDoc(self._doc)
+ return _elementTreeFactory(self._doc, None)
+
+ def getiterator(self, tag=None, *tags):
+ """getiterator(self, tag=None, *tags)
+
+ Returns a sequence or iterator of all elements in the subtree in
+ document order (depth first pre-order), starting with this
+ element.
+
+ Can be restricted to find only elements with specific tags,
+ see `iter`.
+
+ :deprecated: Note that this method is deprecated as of
+ ElementTree 1.3 and lxml 2.0. It returns an iterator in
+ lxml, which diverges from the original ElementTree
+ behaviour. If you want an efficient iterator, use the
+ ``element.iter()`` method instead. You should only use this
+ method in new code if you require backwards compatibility
+ with older versions of lxml or ElementTree.
+ """
+ if tag is not None:
+ tags += (tag,)
+ return ElementDepthFirstIterator(self, tags)
+
+ def iter(self, tag=None, *tags):
+ """iter(self, tag=None, *tags)
+
+ Iterate over all elements in the subtree in document order (depth
+ first pre-order), starting with this element.
+
+ Can be restricted to find only elements with specific tags:
+ pass ``"{ns}localname"`` as tag. Either or both of ``ns`` and
+ ``localname`` can be ``*`` for a wildcard; ``ns`` can be empty
+ for no namespace. ``"localname"`` is equivalent to ``"{}localname"``
+ (i.e. no namespace) but ``"*"`` is ``"{*}*"`` (any or no namespace),
+ not ``"{}*"``.
+
+ You can also pass the Element, Comment, ProcessingInstruction and
+ Entity factory functions to look only for the specific element type.
+
+ Passing multiple tags (or a sequence of tags) instead of a single tag
+ will let the iterator return all elements matching any of these tags,
+ in document order.
+ """
+ if tag is not None:
+ tags += (tag,)
+ return ElementDepthFirstIterator(self, tags)
+
+ def itertext(self, tag=None, *tags, with_tail=True):
+ """itertext(self, tag=None, *tags, with_tail=True)
+
+ Iterates over the text content of a subtree.
+
+ You can pass tag names to restrict text content to specific elements,
+ see `iter`.
+
+ You can set the ``with_tail`` keyword argument to ``False`` to skip
+ over tail text.
+ """
+ if tag is not None:
+ tags += (tag,)
+ return ElementTextIterator(self, tags, with_tail=with_tail)
+
+ def makeelement(self, _tag, attrib=None, nsmap=None, **_extra):
+ """makeelement(self, _tag, attrib=None, nsmap=None, **_extra)
+
+ Creates a new element associated with the same document.
+ """
+ _assertValidDoc(self._doc)
+ return _makeElement(_tag, NULL, self._doc, None, None, None,
+ attrib, nsmap, _extra)
+
+ def find(self, path, namespaces=None):
+ """find(self, path, namespaces=None)
+
+ Finds the first matching subelement, by tag name or path.
+
+ The optional ``namespaces`` argument accepts a
+ prefix-to-namespace mapping that allows the usage of XPath
+ prefixes in the path expression.
+ """
+ if isinstance(path, QName):
+ path = (path).text
+ return _elementpath.find(self, path, namespaces, with_prefixes=not _isHtmlDocument(self))
+
+ def findtext(self, path, default=None, namespaces=None):
+ """findtext(self, path, default=None, namespaces=None)
+
+ Finds text for the first matching subelement, by tag name or path.
+
+ The optional ``namespaces`` argument accepts a
+ prefix-to-namespace mapping that allows the usage of XPath
+ prefixes in the path expression.
+ """
+ if isinstance(path, QName):
+ path = (path).text
+ return _elementpath.findtext(self, path, default, namespaces, with_prefixes=not _isHtmlDocument(self))
+
+ def findall(self, path, namespaces=None):
+ """findall(self, path, namespaces=None)
+
+ Finds all matching subelements, by tag name or path.
+
+ The optional ``namespaces`` argument accepts a
+ prefix-to-namespace mapping that allows the usage of XPath
+ prefixes in the path expression.
+ """
+ if isinstance(path, QName):
+ path = (path).text
+ return _elementpath.findall(self, path, namespaces, with_prefixes=not _isHtmlDocument(self))
+
+ def iterfind(self, path, namespaces=None):
+ """iterfind(self, path, namespaces=None)
+
+ Iterates over all matching subelements, by tag name or path.
+
+ The optional ``namespaces`` argument accepts a
+ prefix-to-namespace mapping that allows the usage of XPath
+ prefixes in the path expression.
+ """
+ if isinstance(path, QName):
+ path = (path).text
+ return _elementpath.iterfind(self, path, namespaces, with_prefixes=not _isHtmlDocument(self))
+
+ def xpath(self, _path, *, namespaces=None, extensions=None,
+ smart_strings=True, **_variables):
+ """xpath(self, _path, namespaces=None, extensions=None, smart_strings=True, **_variables)
+
+ Evaluate an xpath expression using the element as context node.
+ """
+ evaluator = XPathElementEvaluator(self, namespaces=namespaces,
+ extensions=extensions,
+ smart_strings=smart_strings)
+ return evaluator(_path, **_variables)
+
+ def cssselect(self, expr, *, translator='xml'):
+ """
+ Run the CSS expression on this element and its children,
+ returning a list of the results.
+
+ Equivalent to lxml.cssselect.CSSSelect(expr)(self) -- note
+ that pre-compiling the expression can provide a substantial
+ speedup.
+ """
+ # Do the import here to make the dependency optional.
+ from lxml.cssselect import CSSSelector
+ return CSSSelector(expr, translator=translator)(self)
+
+
+cdef extern from "includes/etree_defs.h":
+ # macro call to 't->tp_new()' for fast instantiation
+ cdef object NEW_ELEMENT "PY_NEW" (object t)
+
+
+@cython.linetrace(False)
+cdef _Element _elementFactory(_Document doc, xmlNode* c_node):
+ cdef _Element result
+ result = getProxy(c_node)
+ if result is not None:
+ return result
+ if c_node is NULL:
+ return None
+
+ element_class = LOOKUP_ELEMENT_CLASS(
+ ELEMENT_CLASS_LOOKUP_STATE, doc, c_node)
+ if hasProxy(c_node):
+ # prevent re-entry race condition - we just called into Python
+ return getProxy(c_node)
+ result = NEW_ELEMENT(element_class)
+ if hasProxy(c_node):
+ # prevent re-entry race condition - we just called into Python
+ result._c_node = NULL
+ return getProxy(c_node)
+
+ _registerProxy(result, doc, c_node)
+ if element_class is not _Element:
+ result._init()
+ return result
+
+
+@cython.internal
+cdef class __ContentOnlyElement(_Element):
+ cdef int _raiseImmutable(self) except -1:
+ raise TypeError, "this element does not have children or attributes"
+
+ def set(self, key, value):
+ "set(self, key, value)"
+ self._raiseImmutable()
+
+ def append(self, value):
+ "append(self, value)"
+ self._raiseImmutable()
+
+ def insert(self, index, value):
+ "insert(self, index, value)"
+ self._raiseImmutable()
+
+ def __setitem__(self, index, value):
+ "__setitem__(self, index, value)"
+ self._raiseImmutable()
+
+ @property
+ def attrib(self):
+ return IMMUTABLE_EMPTY_MAPPING
+
+ property text:
+ def __get__(self):
+ _assertValidNode(self)
+ return funicodeOrEmpty(self._c_node.content)
+
+ def __set__(self, value):
+ cdef tree.xmlDict* c_dict
+ _assertValidNode(self)
+ if value is None:
+ c_text = NULL
+ else:
+ value = _utf8(value)
+ c_text = _xcstr(value)
+ tree.xmlNodeSetContent(self._c_node, c_text)
+
+ # ACCESSORS
+ def __getitem__(self, x):
+ "__getitem__(self, x)"
+ if isinstance(x, slice):
+ return []
+ else:
+ raise IndexError, "list index out of range"
+
+ def __len__(self):
+ "__len__(self)"
+ return 0
+
+ def get(self, key, default=None):
+ "get(self, key, default=None)"
+ return None
+
+ def keys(self):
+ "keys(self)"
+ return []
+
+ def items(self):
+ "items(self)"
+ return []
+
+ def values(self):
+ "values(self)"
+ return []
+
+cdef class _Comment(__ContentOnlyElement):
+ @property
+ def tag(self):
+ return Comment
+
+ def __repr__(self):
+ return "" % self.text
+
+cdef class _ProcessingInstruction(__ContentOnlyElement):
+ @property
+ def tag(self):
+ return ProcessingInstruction
+
+ property target:
+ # not in ElementTree
+ def __get__(self):
+ _assertValidNode(self)
+ return funicode(self._c_node.name)
+
+ def __set__(self, value):
+ _assertValidNode(self)
+ value = _utf8(value)
+ c_text = _xcstr(value)
+ tree.xmlNodeSetName(self._c_node, c_text)
+
+ def __repr__(self):
+ text = self.text
+ if text:
+ return "%s %s?>" % (self.target, text)
+ else:
+ return "%s?>" % self.target
+
+ def get(self, key, default=None):
+ """get(self, key, default=None)
+
+ Try to parse pseudo-attributes from the text content of the
+ processing instruction, search for one with the given key as
+ name and return its associated value.
+
+ Note that this is only a convenience method for the most
+ common case that all text content is structured in
+ attribute-like name-value pairs with properly quoted values.
+ It is not guaranteed to work for all possible text content.
+ """
+ return self.attrib.get(key, default)
+
+ @property
+ def attrib(self):
+ """Returns a dict containing all pseudo-attributes that can be
+ parsed from the text content of this processing instruction.
+ Note that modifying the dict currently has no effect on the
+ XML node, although this is not guaranteed to stay this way.
+ """
+ return { attr : (value1 or value2)
+ for attr, value1, value2 in _FIND_PI_ATTRIBUTES(' ' + self.text) }
+
+cdef object _FIND_PI_ATTRIBUTES = re.compile(r'\s+(\w+)\s*=\s*(?:\'([^\']*)\'|"([^"]*)")', re.U).findall
+
+cdef class _Entity(__ContentOnlyElement):
+ @property
+ def tag(self):
+ return Entity
+
+ property name:
+ # not in ElementTree
+ def __get__(self):
+ _assertValidNode(self)
+ return funicode(self._c_node.name)
+
+ def __set__(self, value):
+ _assertValidNode(self)
+ value_utf = _utf8(value)
+ if b'&' in value_utf or b';' in value_utf:
+ raise ValueError, f"Invalid entity name '{value}'"
+ tree.xmlNodeSetName(self._c_node, _xcstr(value_utf))
+
+ @property
+ def text(self):
+ # FIXME: should this be None or '&[VALUE];' or the resolved
+ # entity value ?
+ _assertValidNode(self)
+ return f'&{funicode(self._c_node.name)};'
+
+ def __repr__(self):
+ return "&%s;" % self.name
+
+
+cdef class QName:
+ """QName(text_or_uri_or_element, tag=None)
+
+ QName wrapper for qualified XML names.
+
+ Pass a tag name by itself or a namespace URI and a tag name to
+ create a qualified name. Alternatively, pass an Element to
+ extract its tag name. ``None`` as first argument is ignored in
+ order to allow for generic 2-argument usage.
+
+ The ``text`` property holds the qualified name in
+ ``{namespace}tagname`` notation. The ``namespace`` and
+ ``localname`` properties hold the respective parts of the tag
+ name.
+
+ You can pass QName objects wherever a tag name is expected. Also,
+ setting Element text from a QName will resolve the namespace prefix
+ on assignment and set a qualified text value. This is helpful in XML
+ languages like SOAP or XML-Schema that use prefixed tag names in
+ their text content.
+ """
+ cdef readonly unicode text
+ cdef readonly unicode localname
+ cdef readonly unicode namespace
+ def __init__(self, text_or_uri_or_element, tag=None):
+ if text_or_uri_or_element is None:
+ # Allow None as no namespace.
+ text_or_uri_or_element, tag = tag, None
+ if not _isString(text_or_uri_or_element):
+ if isinstance(text_or_uri_or_element, _Element):
+ text_or_uri_or_element = (<_Element>text_or_uri_or_element).tag
+ if not _isString(text_or_uri_or_element):
+ raise ValueError, f"Invalid input tag of type {type(text_or_uri_or_element)!r}"
+ elif isinstance(text_or_uri_or_element, QName):
+ text_or_uri_or_element = (text_or_uri_or_element).text
+ elif text_or_uri_or_element is not None:
+ text_or_uri_or_element = unicode(text_or_uri_or_element)
+ else:
+ raise ValueError, f"Invalid input tag of type {type(text_or_uri_or_element)!r}"
+
+ ns_utf, tag_utf = _getNsTag(text_or_uri_or_element)
+ if tag is not None:
+ # either ('ns', 'tag') or ('{ns}oldtag', 'newtag')
+ if ns_utf is None:
+ ns_utf = tag_utf # case 1: namespace ended up as tag name
+ tag_utf = _utf8(tag)
+ _tagValidOrRaise(tag_utf)
+ self.localname = (tag_utf).decode('utf8')
+ if ns_utf is None:
+ self.namespace = None
+ self.text = self.localname
+ else:
+ self.namespace = (ns_utf).decode('utf8')
+ self.text = "{%s}%s" % (self.namespace, self.localname)
+ def __str__(self):
+ return self.text
+ def __hash__(self):
+ return hash(self.text)
+ def __richcmp__(self, other, int op):
+ try:
+ if type(other) is QName:
+ other = (other).text
+ elif not isinstance(other, unicode):
+ other = unicode(other)
+ except (ValueError, UnicodeDecodeError):
+ return NotImplemented
+ return python.PyObject_RichCompare(self.text, other, op)
+
+
+cdef public class _ElementTree [ type LxmlElementTreeType,
+ object LxmlElementTree ]:
+ cdef _Document _doc
+ cdef _Element _context_node
+
+ # Note that _doc is only used to store the original document if we do not
+ # have a _context_node. All methods should prefer self._context_node._doc
+ # to honour tree restructuring. _doc can happily be None!
+
+ @cython.final
+ cdef int _assertHasRoot(self) except -1:
+ """We have to take care here: the document may not have a root node!
+ This can happen if ElementTree() is called without any argument and
+ the caller 'forgets' to call parse() afterwards, so this is a bug in
+ the caller program.
+ """
+ assert self._context_node is not None, \
+ "ElementTree not initialized, missing root"
+ return 0
+
+ def parse(self, source, _BaseParser parser=None, *, base_url=None):
+ """parse(self, source, parser=None, base_url=None)
+
+ Updates self with the content of source and returns its root.
+ """
+ cdef _Document doc = None
+ try:
+ doc = _parseDocument(source, parser, base_url)
+ except _TargetParserResult as result_container:
+ # raises a TypeError if we don't get an _Element
+ self._context_node = result_container.result
+ else:
+ self._context_node = doc.getroot()
+ self._doc = None if self._context_node is not None else doc
+ return self._context_node
+
+ def _setroot(self, _Element root not None):
+ """_setroot(self, root)
+
+ Relocate the ElementTree to a new root node.
+ """
+ _assertValidNode(root)
+ if root._c_node.type != tree.XML_ELEMENT_NODE:
+ raise TypeError, "Only elements can be the root of an ElementTree"
+ self._context_node = root
+ self._doc = None
+
+ def getroot(self):
+ """getroot(self)
+
+ Gets the root element for this tree.
+ """
+ return self._context_node
+
+ def __copy__(self):
+ return _elementTreeFactory(self._doc, self._context_node)
+
+ def __deepcopy__(self, memo):
+ cdef _Element root
+ cdef _Document doc
+ cdef xmlDoc* c_doc
+ if self._context_node is not None:
+ root = self._context_node.__copy__()
+ assert root is not None
+ _assertValidNode(root)
+ _copyNonElementSiblings(self._context_node._c_node, root._c_node)
+ return _elementTreeFactory(None, root)
+ elif self._doc is not None:
+ _assertValidDoc(self._doc)
+ c_doc = tree.xmlCopyDoc(self._doc._c_doc, 1)
+ if c_doc is NULL:
+ raise MemoryError()
+ doc = _documentFactory(c_doc, self._doc._parser)
+ return _elementTreeFactory(doc, None)
+ else:
+ # so what ...
+ return self
+
+ # not in ElementTree
+ @property
+ def docinfo(self) -> DocInfo:
+ """Information about the document provided by parser and DTD."""
+ self._assertHasRoot()
+ return DocInfo(self._context_node._doc)
+
+ # not in ElementTree, read-only
+ @property
+ def parser(self):
+ """The parser that was used to parse the document in this ElementTree.
+ """
+ if self._context_node is not None and \
+ self._context_node._doc is not None:
+ return self._context_node._doc._parser
+ if self._doc is not None:
+ return self._doc._parser
+ return None
+
+ def write(self, file, *, encoding=None, method="xml",
+ bint pretty_print=False, xml_declaration=None, bint with_tail=True,
+ standalone=None, doctype=None, compression=0,
+ bint exclusive=False, inclusive_ns_prefixes=None,
+ bint with_comments=True, bint strip_text=False,
+ docstring=None):
+ """write(self, file, encoding=None, method="xml",
+ pretty_print=False, xml_declaration=None, with_tail=True,
+ standalone=None, doctype=None, compression=0,
+ exclusive=False, inclusive_ns_prefixes=None,
+ with_comments=True, strip_text=False)
+
+ Write the tree to a filename, file or file-like object.
+
+ Defaults to ASCII encoding and writing a declaration as needed.
+
+ The keyword argument 'method' selects the output method:
+ 'xml', 'html', 'text', 'c14n' or 'c14n2'. Default is 'xml'.
+
+ With ``method="c14n"`` (C14N version 1), the options ``exclusive``,
+ ``with_comments`` and ``inclusive_ns_prefixes`` request exclusive
+ C14N, include comments, and list the inclusive prefixes respectively.
+
+ With ``method="c14n2"`` (C14N version 2), the ``with_comments`` and
+ ``strip_text`` options control the output of comments and text space
+ according to C14N 2.0.
+
+ Passing a boolean value to the ``standalone`` option will
+ output an XML declaration with the corresponding
+ ``standalone`` flag.
+
+ The ``doctype`` option allows passing in a plain string that will
+ be serialised before the XML tree. Note that passing in non
+ well-formed content here will make the XML output non well-formed.
+ Also, an existing doctype in the document tree will not be removed
+ when serialising an ElementTree instance.
+
+ The ``compression`` option enables GZip compression level 1-9.
+
+ The ``inclusive_ns_prefixes`` should be a list of namespace strings
+ (i.e. ['xs', 'xsi']) that will be promoted to the top-level element
+ during exclusive C14N serialisation. This parameter is ignored if
+ exclusive mode=False.
+
+ If exclusive=True and no list is provided, a namespace will only be
+ rendered if it is used by the immediate parent or one of its attributes
+ and its prefix and values have not already been rendered by an ancestor
+ of the namespace node's parent element.
+ """
+ cdef bint write_declaration
+ cdef int is_standalone
+
+ self._assertHasRoot()
+ _assertValidNode(self._context_node)
+ if compression is None or compression < 0:
+ compression = 0
+
+ # C14N serialisation
+ if method in ('c14n', 'c14n2'):
+ if encoding is not None:
+ raise ValueError("Cannot specify encoding with C14N")
+ if xml_declaration:
+ raise ValueError("Cannot enable XML declaration in C14N")
+
+ if method == 'c14n':
+ _tofilelikeC14N(file, self._context_node, exclusive, with_comments,
+ compression, inclusive_ns_prefixes)
+ else: # c14n2
+ with _open_utf8_file(file, compression=compression) as f:
+ target = C14NWriterTarget(
+ f.write, with_comments=with_comments, strip_text=strip_text)
+ _tree_to_target(self, target)
+ return
+
+ if not with_comments:
+ raise ValueError("Can only discard comments in C14N serialisation")
+ # suppress decl. in default case (purely for ElementTree compatibility)
+ if xml_declaration is not None:
+ write_declaration = xml_declaration
+ if encoding is None:
+ encoding = 'ASCII'
+ else:
+ encoding = encoding.upper()
+ elif encoding is None:
+ encoding = 'ASCII'
+ write_declaration = 0
+ else:
+ encoding = encoding.upper()
+ write_declaration = encoding not in (
+ 'US-ASCII', 'ASCII', 'UTF8', 'UTF-8')
+ if standalone is None:
+ is_standalone = -1
+ elif standalone:
+ write_declaration = 1
+ is_standalone = 1
+ else:
+ write_declaration = 1
+ is_standalone = 0
+
+ if docstring is not None and doctype is None:
+ import warnings
+ warnings.warn(
+ "The 'docstring' option is deprecated. Use 'doctype' instead.",
+ DeprecationWarning)
+ doctype = docstring
+
+ _tofilelike(file, self._context_node, encoding, doctype, method,
+ write_declaration, 1, pretty_print, with_tail,
+ is_standalone, compression)
+
+ def getpath(self, _Element element not None):
+ """getpath(self, element)
+
+ Returns a structural, absolute XPath expression to find the element.
+
+ For namespaced elements, the expression uses prefixes from the
+ document, which therefore need to be provided in order to make any
+ use of the expression in XPath.
+
+ Also see the method getelementpath(self, element), which returns a
+ self-contained ElementPath expression.
+ """
+ cdef _Document doc
+ cdef _Element root
+ cdef xmlDoc* c_doc
+ _assertValidNode(element)
+ if self._context_node is not None:
+ root = self._context_node
+ doc = root._doc
+ elif self._doc is not None:
+ doc = self._doc
+ root = doc.getroot()
+ else:
+ raise ValueError, "Element is not in this tree."
+ _assertValidDoc(doc)
+ _assertValidNode(root)
+ if element._doc is not doc:
+ raise ValueError, "Element is not in this tree."
+
+ c_doc = _fakeRootDoc(doc._c_doc, root._c_node)
+ c_path = tree.xmlGetNodePath(element._c_node)
+ _destroyFakeDoc(doc._c_doc, c_doc)
+ if c_path is NULL:
+ raise MemoryError()
+ path = funicode(c_path)
+ tree.xmlFree(c_path)
+ return path
+
+ def getelementpath(self, _Element element not None):
+ """getelementpath(self, element)
+
+ Returns a structural, absolute ElementPath expression to find the
+ element. This path can be used in the .find() method to look up
+ the element, provided that the elements along the path and their
+ list of immediate children were not modified in between.
+
+ ElementPath has the advantage over an XPath expression (as returned
+ by the .getpath() method) that it does not require additional prefix
+ declarations. It is always self-contained.
+ """
+ cdef _Element root
+ cdef Py_ssize_t count
+ _assertValidNode(element)
+ if element._c_node.type != tree.XML_ELEMENT_NODE:
+ raise ValueError, "input is not an Element"
+ if self._context_node is not None:
+ root = self._context_node
+ elif self._doc is not None:
+ root = self._doc.getroot()
+ else:
+ raise ValueError, "Element is not in this tree"
+ _assertValidNode(root)
+ if element._doc is not root._doc:
+ raise ValueError, "Element is not in this tree"
+
+ path = []
+ c_element = element._c_node
+ while c_element is not root._c_node:
+ c_name = c_element.name
+ c_href = _getNs(c_element)
+ tag = _namespacedNameFromNsName(c_href, c_name)
+ if c_href is NULL:
+ c_href = b'' # no namespace (NULL is wildcard)
+ # use tag[N] if there are preceding siblings with the same tag
+ count = 0
+ c_node = c_element.prev
+ while c_node is not NULL:
+ if c_node.type == tree.XML_ELEMENT_NODE:
+ if _tagMatches(c_node, c_href, c_name):
+ count += 1
+ c_node = c_node.prev
+ if count:
+ tag = f'{tag}[{count+1}]'
+ else:
+ # use tag[1] if there are following siblings with the same tag
+ c_node = c_element.next
+ while c_node is not NULL:
+ if c_node.type == tree.XML_ELEMENT_NODE:
+ if _tagMatches(c_node, c_href, c_name):
+ tag += '[1]'
+ break
+ c_node = c_node.next
+
+ path.append(tag)
+ c_element = c_element.parent
+ if c_element is NULL or c_element.type != tree.XML_ELEMENT_NODE:
+ raise ValueError, "Element is not in this tree."
+ if not path:
+ return '.'
+ path.reverse()
+ return '/'.join(path)
+
+ def getiterator(self, tag=None, *tags):
+ """getiterator(self, *tags, tag=None)
+
+ Returns a sequence or iterator of all elements in document order
+ (depth first pre-order), starting with the root element.
+
+ Can be restricted to find only elements with specific tags,
+ see `_Element.iter`.
+
+ :deprecated: Note that this method is deprecated as of
+ ElementTree 1.3 and lxml 2.0. It returns an iterator in
+ lxml, which diverges from the original ElementTree
+ behaviour. If you want an efficient iterator, use the
+ ``tree.iter()`` method instead. You should only use this
+ method in new code if you require backwards compatibility
+ with older versions of lxml or ElementTree.
+ """
+ root = self.getroot()
+ if root is None:
+ return ITER_EMPTY
+ if tag is not None:
+ tags += (tag,)
+ return root.getiterator(*tags)
+
+ def iter(self, tag=None, *tags):
+ """iter(self, tag=None, *tags)
+
+ Creates an iterator for the root element. The iterator loops over
+ all elements in this tree, in document order. Note that siblings
+ of the root element (comments or processing instructions) are not
+ returned by the iterator.
+
+ Can be restricted to find only elements with specific tags,
+ see `_Element.iter`.
+ """
+ root = self.getroot()
+ if root is None:
+ return ITER_EMPTY
+ if tag is not None:
+ tags += (tag,)
+ return root.iter(*tags)
+
+ def find(self, path, namespaces=None):
+ """find(self, path, namespaces=None)
+
+ Finds the first toplevel element with given tag. Same as
+ ``tree.getroot().find(path)``.
+
+ The optional ``namespaces`` argument accepts a
+ prefix-to-namespace mapping that allows the usage of XPath
+ prefixes in the path expression.
+ """
+ self._assertHasRoot()
+ root = self.getroot()
+ if _isString(path):
+ if path[:1] == "/":
+ path = "." + path
+ from warnings import warn
+ warn(
+ "This search incorrectly ignores the root element, and will be "
+ "fixed in a future version. If you rely on the current "
+ f"behaviour, change it to {path!r}",
+ FutureWarning, stacklevel=1
+ )
+ return root.find(path, namespaces)
+
+ def findtext(self, path, default=None, namespaces=None):
+ """findtext(self, path, default=None, namespaces=None)
+
+ Finds the text for the first element matching the ElementPath
+ expression. Same as getroot().findtext(path)
+
+ The optional ``namespaces`` argument accepts a
+ prefix-to-namespace mapping that allows the usage of XPath
+ prefixes in the path expression.
+ """
+ self._assertHasRoot()
+ root = self.getroot()
+ if _isString(path):
+ if path[:1] == "/":
+ path = "." + path
+ from warnings import warn
+ warn(
+ "This search incorrectly ignores the root element, and will be "
+ "fixed in a future version. If you rely on the current "
+ f"behaviour, change it to {path!r}",
+ FutureWarning, stacklevel=1
+ )
+ return root.findtext(path, default, namespaces)
+
+ def findall(self, path, namespaces=None):
+ """findall(self, path, namespaces=None)
+
+ Finds all elements matching the ElementPath expression. Same as
+ getroot().findall(path).
+
+ The optional ``namespaces`` argument accepts a
+ prefix-to-namespace mapping that allows the usage of XPath
+ prefixes in the path expression.
+ """
+ self._assertHasRoot()
+ root = self.getroot()
+ if _isString(path):
+ if path[:1] == "/":
+ path = "." + path
+ from warnings import warn
+ warn(
+ "This search incorrectly ignores the root element, and will be "
+ "fixed in a future version. If you rely on the current "
+ f"behaviour, change it to {path!r}",
+ FutureWarning, stacklevel=1
+ )
+ return root.findall(path, namespaces)
+
+ def iterfind(self, path, namespaces=None):
+ """iterfind(self, path, namespaces=None)
+
+ Iterates over all elements matching the ElementPath expression.
+ Same as getroot().iterfind(path).
+
+ The optional ``namespaces`` argument accepts a
+ prefix-to-namespace mapping that allows the usage of XPath
+ prefixes in the path expression.
+ """
+ self._assertHasRoot()
+ root = self.getroot()
+ if _isString(path):
+ if path[:1] == "/":
+ path = "." + path
+ from warnings import warn
+ warn(
+ "This search incorrectly ignores the root element, and will be "
+ "fixed in a future version. If you rely on the current "
+ f"behaviour, change it to {path!r}",
+ FutureWarning, stacklevel=1
+ )
+ return root.iterfind(path, namespaces)
+
+ def xpath(self, _path, *, namespaces=None, extensions=None,
+ smart_strings=True, **_variables):
+ """xpath(self, _path, namespaces=None, extensions=None, smart_strings=True, **_variables)
+
+ XPath evaluate in context of document.
+
+ ``namespaces`` is an optional dictionary with prefix to namespace URI
+ mappings, used by XPath. ``extensions`` defines additional extension
+ functions.
+
+ Returns a list (nodeset), or bool, float or string.
+
+ In case of a list result, return Element for element nodes,
+ string for text and attribute values.
+
+ Note: if you are going to apply multiple XPath expressions
+ against the same document, it is more efficient to use
+ XPathEvaluator directly.
+ """
+ self._assertHasRoot()
+ evaluator = XPathDocumentEvaluator(self, namespaces=namespaces,
+ extensions=extensions,
+ smart_strings=smart_strings)
+ return evaluator(_path, **_variables)
+
+ def xslt(self, _xslt, extensions=None, access_control=None, **_kw):
+ """xslt(self, _xslt, extensions=None, access_control=None, **_kw)
+
+ Transform this document using other document.
+
+ xslt is a tree that should be XSLT
+ keyword parameters are XSLT transformation parameters.
+
+ Returns the transformed tree.
+
+ Note: if you are going to apply the same XSLT stylesheet against
+ multiple documents, it is more efficient to use the XSLT
+ class directly.
+ """
+ self._assertHasRoot()
+ style = XSLT(_xslt, extensions=extensions,
+ access_control=access_control)
+ return style(self, **_kw)
+
+ def relaxng(self, relaxng):
+ """relaxng(self, relaxng)
+
+ Validate this document using other document.
+
+ The relaxng argument is a tree that should contain a Relax NG schema.
+
+ Returns True or False, depending on whether validation
+ succeeded.
+
+ Note: if you are going to apply the same Relax NG schema against
+ multiple documents, it is more efficient to use the RelaxNG
+ class directly.
+ """
+ self._assertHasRoot()
+ schema = RelaxNG(relaxng)
+ return schema.validate(self)
+
+ def xmlschema(self, xmlschema):
+ """xmlschema(self, xmlschema)
+
+ Validate this document using other document.
+
+ The xmlschema argument is a tree that should contain an XML Schema.
+
+ Returns True or False, depending on whether validation
+ succeeded.
+
+ Note: If you are going to apply the same XML Schema against
+ multiple documents, it is more efficient to use the XMLSchema
+ class directly.
+ """
+ self._assertHasRoot()
+ schema = XMLSchema(xmlschema)
+ return schema.validate(self)
+
+ def xinclude(self):
+ """xinclude(self)
+
+ Process the XInclude nodes in this document and include the
+ referenced XML fragments.
+
+ There is support for loading files through the file system, HTTP and
+ FTP.
+
+ Note that XInclude does not support custom resolvers in Python space
+ due to restrictions of libxml2 <= 2.6.29.
+ """
+ self._assertHasRoot()
+ XInclude()(self._context_node)
+
+ def write_c14n(self, file, *, bint exclusive=False, bint with_comments=True,
+ compression=0, inclusive_ns_prefixes=None):
+ """write_c14n(self, file, exclusive=False, with_comments=True,
+ compression=0, inclusive_ns_prefixes=None)
+
+ C14N write of document. Always writes UTF-8.
+
+ The ``compression`` option enables GZip compression level 1-9.
+
+ The ``inclusive_ns_prefixes`` should be a list of namespace strings
+ (i.e. ['xs', 'xsi']) that will be promoted to the top-level element
+ during exclusive C14N serialisation. This parameter is ignored if
+ exclusive mode=False.
+
+ If exclusive=True and no list is provided, a namespace will only be
+ rendered if it is used by the immediate parent or one of its attributes
+ and its prefix and values have not already been rendered by an ancestor
+ of the namespace node's parent element.
+
+ NOTE: This method is deprecated as of lxml 4.4 and will be removed in a
+ future release. Use ``.write(f, method="c14n")`` instead.
+ """
+ self._assertHasRoot()
+ _assertValidNode(self._context_node)
+ if compression is None or compression < 0:
+ compression = 0
+
+ _tofilelikeC14N(file, self._context_node, exclusive, with_comments,
+ compression, inclusive_ns_prefixes)
+
+cdef _ElementTree _elementTreeFactory(_Document doc, _Element context_node):
+ return _newElementTree(doc, context_node, _ElementTree)
+
+cdef _ElementTree _newElementTree(_Document doc, _Element context_node,
+ object baseclass):
+ cdef _ElementTree result
+ result = baseclass()
+ if context_node is None and doc is not None:
+ context_node = doc.getroot()
+ if context_node is None:
+ _assertValidDoc(doc)
+ result._doc = doc
+ else:
+ _assertValidNode(context_node)
+ result._context_node = context_node
+ return result
+
+
+@cython.final
+@cython.freelist(16)
+cdef class _Attrib:
+ """A dict-like proxy for the ``Element.attrib`` property.
+ """
+ cdef _Element _element
+ def __cinit__(self, _Element element not None):
+ _assertValidNode(element)
+ self._element = element
+
+ # MANIPULATORS
+ def __setitem__(self, key, value):
+ _assertValidNode(self._element)
+ _setAttributeValue(self._element, key, value)
+
+ def __delitem__(self, key):
+ _assertValidNode(self._element)
+ _delAttribute(self._element, key)
+
+ def update(self, sequence_or_dict):
+ _assertValidNode(self._element)
+ if isinstance(sequence_or_dict, (dict, _Attrib)):
+ sequence_or_dict = sequence_or_dict.items()
+ for key, value in sequence_or_dict:
+ _setAttributeValue(self._element, key, value)
+
+ def pop(self, key, *default):
+ if len(default) > 1:
+ raise TypeError, f"pop expected at most 2 arguments, got {len(default)+1}"
+ _assertValidNode(self._element)
+ result = _getAttributeValue(self._element, key, None)
+ if result is None:
+ if not default:
+ raise KeyError, key
+ result = default[0]
+ else:
+ _delAttribute(self._element, key)
+ return result
+
+ def clear(self):
+ _assertValidNode(self._element)
+ c_attrs = self._element._c_node.properties
+ if c_attrs:
+ self._element._c_node.properties = NULL
+ tree.xmlFreePropList(c_attrs)
+
+ # ACCESSORS
+ def __repr__(self):
+ _assertValidNode(self._element)
+ return repr(dict( _collectAttributes(self._element._c_node, 3) ))
+
+ def __copy__(self):
+ _assertValidNode(self._element)
+ return dict(_collectAttributes(self._element._c_node, 3))
+
+ def __deepcopy__(self, memo):
+ _assertValidNode(self._element)
+ return dict(_collectAttributes(self._element._c_node, 3))
+
+ def __getitem__(self, key):
+ _assertValidNode(self._element)
+ result = _getAttributeValue(self._element, key, None)
+ if result is None:
+ raise KeyError, key
+ return result
+
+ def __bool__(self):
+ _assertValidNode(self._element)
+ cdef xmlAttr* c_attr = self._element._c_node.properties
+ while c_attr is not NULL:
+ if c_attr.type == tree.XML_ATTRIBUTE_NODE:
+ return 1
+ c_attr = c_attr.next
+ return 0
+
+ def __len__(self):
+ _assertValidNode(self._element)
+ cdef xmlAttr* c_attr = self._element._c_node.properties
+ cdef Py_ssize_t c = 0
+ while c_attr is not NULL:
+ if c_attr.type == tree.XML_ATTRIBUTE_NODE:
+ c += 1
+ c_attr = c_attr.next
+ return c
+
+ def get(self, key, default=None):
+ _assertValidNode(self._element)
+ return _getAttributeValue(self._element, key, default)
+
+ def keys(self):
+ _assertValidNode(self._element)
+ return _collectAttributes(self._element._c_node, 1)
+
+ def __iter__(self):
+ _assertValidNode(self._element)
+ return iter(_collectAttributes(self._element._c_node, 1))
+
+ def iterkeys(self):
+ _assertValidNode(self._element)
+ return iter(_collectAttributes(self._element._c_node, 1))
+
+ def values(self):
+ _assertValidNode(self._element)
+ return _collectAttributes(self._element._c_node, 2)
+
+ def itervalues(self):
+ _assertValidNode(self._element)
+ return iter(_collectAttributes(self._element._c_node, 2))
+
+ def items(self):
+ _assertValidNode(self._element)
+ return _collectAttributes(self._element._c_node, 3)
+
+ def iteritems(self):
+ _assertValidNode(self._element)
+ return iter(_collectAttributes(self._element._c_node, 3))
+
+ def has_key(self, key):
+ _assertValidNode(self._element)
+ return key in self
+
+ def __contains__(self, key):
+ _assertValidNode(self._element)
+ cdef xmlNode* c_node
+ ns, tag = _getNsTag(key)
+ c_node = self._element._c_node
+ c_href = NULL if ns is None else _xcstr(ns)
+ return 1 if tree.xmlHasNsProp(c_node, _xcstr(tag), c_href) else 0
+
+ def __richcmp__(self, other, int op):
+ try:
+ one = dict(self.items())
+ if not isinstance(other, dict):
+ other = dict(other)
+ except (TypeError, ValueError):
+ return NotImplemented
+ return python.PyObject_RichCompare(one, other, op)
+
+MutableMapping.register(_Attrib)
+
+
+@cython.final
+@cython.internal
+cdef class _AttribIterator:
+ """Attribute iterator - for internal use only!
+ """
+ # XML attributes must not be removed while running!
+ cdef _Element _node
+ cdef xmlAttr* _c_attr
+ cdef int _keysvalues # 1 - keys, 2 - values, 3 - items (key, value)
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ cdef xmlAttr* c_attr
+ if self._node is None:
+ raise StopIteration
+ c_attr = self._c_attr
+ while c_attr is not NULL and c_attr.type != tree.XML_ATTRIBUTE_NODE:
+ c_attr = c_attr.next
+ if c_attr is NULL:
+ self._node = None
+ raise StopIteration
+
+ self._c_attr = c_attr.next
+ if self._keysvalues == 1:
+ return _namespacedName(c_attr)
+ elif self._keysvalues == 2:
+ return _attributeValue(self._node._c_node, c_attr)
+ else:
+ return (_namespacedName(c_attr),
+ _attributeValue(self._node._c_node, c_attr))
+
+cdef object _attributeIteratorFactory(_Element element, int keysvalues):
+ cdef _AttribIterator attribs
+ if element._c_node.properties is NULL:
+ return ITER_EMPTY
+ attribs = _AttribIterator()
+ attribs._node = element
+ attribs._c_attr = element._c_node.properties
+ attribs._keysvalues = keysvalues
+ return attribs
+
+
+cdef public class _ElementTagMatcher [ object LxmlElementTagMatcher,
+ type LxmlElementTagMatcherType ]:
+ """
+ Dead but public. :)
+ """
+ cdef object _pystrings
+ cdef int _node_type
+ cdef char* _href
+ cdef char* _name
+ cdef _initTagMatch(self, tag):
+ self._href = NULL
+ self._name = NULL
+ if tag is None:
+ self._node_type = 0
+ elif tag is Comment:
+ self._node_type = tree.XML_COMMENT_NODE
+ elif tag is ProcessingInstruction:
+ self._node_type = tree.XML_PI_NODE
+ elif tag is Entity:
+ self._node_type = tree.XML_ENTITY_REF_NODE
+ elif tag is Element:
+ self._node_type = tree.XML_ELEMENT_NODE
+ else:
+ self._node_type = tree.XML_ELEMENT_NODE
+ self._pystrings = _getNsTag(tag)
+ if self._pystrings[0] is not None:
+ self._href = _cstr(self._pystrings[0])
+ self._name = _cstr(self._pystrings[1])
+ if self._name[0] == c'*' and self._name[1] == c'\0':
+ self._name = NULL
+
+cdef public class _ElementIterator(_ElementTagMatcher) [
+ object LxmlElementIterator, type LxmlElementIteratorType ]:
+ """
+ Dead but public. :)
+ """
+ # we keep Python references here to control GC
+ cdef _Element _node
+ cdef _node_to_node_function _next_element
+ def __iter__(self):
+ return self
+
+ cdef void _storeNext(self, _Element node):
+ cdef xmlNode* c_node
+ c_node = self._next_element(node._c_node)
+ while c_node is not NULL and \
+ self._node_type != 0 and \
+ (self._node_type != c_node.type or
+ not _tagMatches(c_node, self._href, self._name)):
+ c_node = self._next_element(c_node)
+ if c_node is NULL:
+ self._node = None
+ else:
+ # Python ref:
+ self._node = _elementFactory(node._doc, c_node)
+
+ def __next__(self):
+ cdef xmlNode* c_node
+ cdef _Element current_node
+ if self._node is None:
+ raise StopIteration
+ # Python ref:
+ current_node = self._node
+ self._storeNext(current_node)
+ return current_node
+
+@cython.final
+@cython.internal
+cdef class _MultiTagMatcher:
+ """
+ Match an xmlNode against a list of tags.
+ """
+ cdef list _py_tags
+ cdef qname* _cached_tags
+ cdef size_t _tag_count
+ cdef size_t _cached_size
+ cdef _Document _cached_doc
+ cdef int _node_types
+
+ def __cinit__(self, tags):
+ self._py_tags = []
+ self.initTagMatch(tags)
+
+ def __dealloc__(self):
+ self._clear()
+
+ cdef bint rejectsAll(self) noexcept:
+ return not self._tag_count and not self._node_types
+
+ cdef bint rejectsAllAttributes(self) noexcept:
+ return not self._tag_count
+
+ cdef bint matchesType(self, int node_type) noexcept:
+ if node_type == tree.XML_ELEMENT_NODE and self._tag_count:
+ return True
+ return self._node_types & (1 << node_type)
+
+ cdef void _clear(self) noexcept:
+ cdef size_t i, count
+ count = self._tag_count
+ self._tag_count = 0
+ if self._cached_tags:
+ for i in range(count):
+ cpython.ref.Py_XDECREF(self._cached_tags[i].href)
+ python.lxml_free(self._cached_tags)
+ self._cached_tags = NULL
+
+ cdef initTagMatch(self, tags):
+ self._cached_doc = None
+ del self._py_tags[:]
+ self._clear()
+ if tags is None or tags == ():
+ # no selection in tags argument => match anything
+ self._node_types = (
+ 1 << tree.XML_COMMENT_NODE |
+ 1 << tree.XML_PI_NODE |
+ 1 << tree.XML_ENTITY_REF_NODE |
+ 1 << tree.XML_ELEMENT_NODE)
+ else:
+ self._node_types = 0
+ self._storeTags(tags, set())
+
+ cdef _storeTags(self, tag, set seen):
+ if tag is Comment:
+ self._node_types |= 1 << tree.XML_COMMENT_NODE
+ elif tag is ProcessingInstruction:
+ self._node_types |= 1 << tree.XML_PI_NODE
+ elif tag is Entity:
+ self._node_types |= 1 << tree.XML_ENTITY_REF_NODE
+ elif tag is Element:
+ self._node_types |= 1 << tree.XML_ELEMENT_NODE
+ elif python._isString(tag):
+ if tag in seen:
+ return
+ seen.add(tag)
+ if tag in ('*', '{*}*'):
+ self._node_types |= 1 << tree.XML_ELEMENT_NODE
+ else:
+ href, name = _getNsTag(tag)
+ if name == b'*':
+ name = None
+ if href is None:
+ href = b'' # no namespace
+ elif href == b'*':
+ href = None # wildcard: any namespace, including none
+ self._py_tags.append((href, name))
+ elif isinstance(tag, QName):
+ self._storeTags(tag.text, seen)
+ else:
+ # support a sequence of tags
+ for item in tag:
+ self._storeTags(item, seen)
+
+ cdef inline int cacheTags(self, _Document doc, bint force_into_dict=False) except -1:
+ """
+ Look up the tag names in the doc dict to enable string pointer comparisons.
+ """
+ cdef size_t dict_size = tree.xmlDictSize(doc._c_doc.dict)
+ if doc is self._cached_doc and dict_size == self._cached_size:
+ # doc and dict didn't change => names already cached
+ return 0
+ self._tag_count = 0
+ if not self._py_tags:
+ self._cached_doc = doc
+ self._cached_size = dict_size
+ return 0
+ if not self._cached_tags:
+ self._cached_tags = python.lxml_malloc(len(self._py_tags), sizeof(qname))
+ if not self._cached_tags:
+ self._cached_doc = None
+ raise MemoryError()
+ self._tag_count = _mapTagsToQnameMatchArray(
+ doc._c_doc, self._py_tags, self._cached_tags, force_into_dict)
+ self._cached_doc = doc
+ self._cached_size = dict_size
+ return 0
+
+ cdef inline bint matches(self, xmlNode* c_node) noexcept:
+ cdef qname* c_qname
+ if self._node_types & (1 << c_node.type):
+ return True
+ elif c_node.type == tree.XML_ELEMENT_NODE:
+ for c_qname in self._cached_tags[:self._tag_count]:
+ if _tagMatchesExactly(c_node, c_qname):
+ return True
+ return False
+
+ cdef inline bint matchesNsTag(self, const_xmlChar* c_href,
+ const_xmlChar* c_name) noexcept:
+ cdef qname* c_qname
+ if self._node_types & (1 << tree.XML_ELEMENT_NODE):
+ return True
+ for c_qname in self._cached_tags[:self._tag_count]:
+ if _nsTagMatchesExactly(c_href, c_name, c_qname):
+ return True
+ return False
+
+ cdef inline bint matchesAttribute(self, xmlAttr* c_attr) noexcept:
+ """Attribute matches differ from Element matches in that they do
+ not care about node types.
+ """
+ cdef qname* c_qname
+ for c_qname in self._cached_tags[:self._tag_count]:
+ if _tagMatchesExactly(c_attr, c_qname):
+ return True
+ return False
+
+cdef class _ElementMatchIterator:
+ cdef _Element _node
+ cdef _node_to_node_function _next_element
+ cdef _MultiTagMatcher _matcher
+
+ @cython.final
+ cdef _initTagMatcher(self, tags):
+ self._matcher = _MultiTagMatcher.__new__(_MultiTagMatcher, tags)
+
+ def __iter__(self):
+ return self
+
+ @cython.final
+ cdef int _storeNext(self, _Element node) except -1:
+ self._matcher.cacheTags(node._doc)
+ c_node = self._next_element(node._c_node)
+ while c_node is not NULL and not self._matcher.matches(c_node):
+ c_node = self._next_element(c_node)
+ # store Python ref to next node to make sure it's kept alive
+ self._node = _elementFactory(node._doc, c_node) if c_node is not NULL else None
+ return 0
+
+ def __next__(self):
+ cdef _Element current_node = self._node
+ if current_node is None:
+ raise StopIteration
+ self._storeNext(current_node)
+ return current_node
+
+cdef class ElementChildIterator(_ElementMatchIterator):
+ """ElementChildIterator(self, node, tag=None, reversed=False)
+ Iterates over the children of an element.
+ """
+ def __cinit__(self, _Element node not None, tag=None, *, bint reversed=False):
+ cdef xmlNode* c_node
+ _assertValidNode(node)
+ self._initTagMatcher(tag)
+ if reversed:
+ c_node = _findChildBackwards(node._c_node, 0)
+ self._next_element = _previousElement
+ else:
+ c_node = _findChildForwards(node._c_node, 0)
+ self._next_element = _nextElement
+ self._matcher.cacheTags(node._doc)
+ while c_node is not NULL and not self._matcher.matches(c_node):
+ c_node = self._next_element(c_node)
+ # store Python ref to next node to make sure it's kept alive
+ self._node = _elementFactory(node._doc, c_node) if c_node is not NULL else None
+
+cdef class SiblingsIterator(_ElementMatchIterator):
+ """SiblingsIterator(self, node, tag=None, preceding=False)
+ Iterates over the siblings of an element.
+
+ You can pass the boolean keyword ``preceding`` to specify the direction.
+ """
+ def __cinit__(self, _Element node not None, tag=None, *, bint preceding=False):
+ _assertValidNode(node)
+ self._initTagMatcher(tag)
+ if preceding:
+ self._next_element = _previousElement
+ else:
+ self._next_element = _nextElement
+ self._storeNext(node)
+
+cdef class AncestorsIterator(_ElementMatchIterator):
+ """AncestorsIterator(self, node, tag=None)
+ Iterates over the ancestors of an element (from parent to parent).
+ """
+ def __cinit__(self, _Element node not None, tag=None):
+ _assertValidNode(node)
+ self._initTagMatcher(tag)
+ self._next_element = _parentElement
+ self._storeNext(node)
+
+cdef class ElementDepthFirstIterator:
+ """ElementDepthFirstIterator(self, node, tag=None, inclusive=True)
+ Iterates over an element and its sub-elements in document order (depth
+ first pre-order).
+
+ Note that this also includes comments, entities and processing
+ instructions. To filter them out, check if the ``tag`` property
+ of the returned element is a string (i.e. not None and not a
+ factory function), or pass the ``Element`` factory for the ``tag``
+ argument to receive only Elements.
+
+ If the optional ``tag`` argument is not None, the iterator returns only
+ the elements that match the respective name and namespace.
+
+ The optional boolean argument 'inclusive' defaults to True and can be set
+ to False to exclude the start element itself.
+
+ Note that the behaviour of this iterator is completely undefined if the
+ tree it traverses is modified during iteration.
+ """
+ # we keep Python references here to control GC
+ # keep the next Element after the one we return, and the (s)top node
+ cdef _Element _next_node
+ cdef _Element _top_node
+ cdef _MultiTagMatcher _matcher
+ def __cinit__(self, _Element node not None, tag=None, *, bint inclusive=True):
+ _assertValidNode(node)
+ self._top_node = node
+ self._next_node = node
+ self._matcher = _MultiTagMatcher.__new__(_MultiTagMatcher, tag)
+ self._matcher.cacheTags(node._doc)
+ if not inclusive or not self._matcher.matches(node._c_node):
+ # find start node (this cannot raise StopIteration, self._next_node != None)
+ next(self)
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ cdef xmlNode* c_node
+ cdef _Element current_node = self._next_node
+ if current_node is None:
+ raise StopIteration
+ c_node = current_node._c_node
+ self._matcher.cacheTags(current_node._doc)
+ if not self._matcher._tag_count:
+ # no tag name was found in the dict => not in document either
+ # try to match by node type
+ c_node = self._nextNodeAnyTag(c_node)
+ else:
+ c_node = self._nextNodeMatchTag(c_node)
+ if c_node is NULL:
+ self._next_node = None
+ else:
+ self._next_node = _elementFactory(current_node._doc, c_node)
+ return current_node
+
+ @cython.final
+ cdef xmlNode* _nextNodeAnyTag(self, xmlNode* c_node) noexcept:
+ cdef int node_types = self._matcher._node_types
+ if not node_types:
+ return NULL
+ tree.BEGIN_FOR_EACH_ELEMENT_FROM(self._top_node._c_node, c_node, 0)
+ if node_types & (1 << c_node.type):
+ return c_node
+ tree.END_FOR_EACH_ELEMENT_FROM(c_node)
+ return NULL
+
+ @cython.final
+ cdef xmlNode* _nextNodeMatchTag(self, xmlNode* c_node) noexcept:
+ tree.BEGIN_FOR_EACH_ELEMENT_FROM(self._top_node._c_node, c_node, 0)
+ if self._matcher.matches(c_node):
+ return c_node
+ tree.END_FOR_EACH_ELEMENT_FROM(c_node)
+ return NULL
+
+
+cdef class ElementTextIterator:
+ """ElementTextIterator(self, element, tag=None, with_tail=True)
+ Iterates over the text content of a subtree.
+
+ You can pass the ``tag`` keyword argument to restrict text content to a
+ specific tag name.
+
+ You can set the ``with_tail`` keyword argument to ``False`` to skip over
+ tail text (e.g. if you know that it's only whitespace from pretty-printing).
+ """
+ cdef object _events
+ cdef _Element _start_element
+ def __cinit__(self, _Element element not None, tag=None, *, bint with_tail=True):
+ _assertValidNode(element)
+ if with_tail:
+ events = ("start", "comment", "pi", "end")
+ else:
+ events = ("start",)
+ self._start_element = element
+ self._events = iterwalk(element, events=events, tag=tag)
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ cdef _Element element
+ result = None
+ while result is None:
+ event, element = next(self._events) # raises StopIteration
+ if event == "start":
+ result = element.text
+ elif element is not self._start_element:
+ result = element.tail
+ return result
+
+
+cdef xmlNode* _createElement(xmlDoc* c_doc, object name_utf) except NULL:
+ cdef xmlNode* c_node
+ c_node = tree.xmlNewDocNode(c_doc, NULL, _xcstr(name_utf), NULL)
+ return c_node
+
+cdef xmlNode* _createComment(xmlDoc* c_doc, const_xmlChar* text) noexcept:
+ cdef xmlNode* c_node
+ c_node = tree.xmlNewDocComment(c_doc, text)
+ return c_node
+
+cdef xmlNode* _createPI(xmlDoc* c_doc, const_xmlChar* target, const_xmlChar* text) noexcept:
+ cdef xmlNode* c_node
+ c_node = tree.xmlNewDocPI(c_doc, target, text)
+ return c_node
+
+cdef xmlNode* _createEntity(xmlDoc* c_doc, const_xmlChar* name) noexcept:
+ cdef xmlNode* c_node
+ c_node = tree.xmlNewReference(c_doc, name)
+ return c_node
+
+# module-level API for ElementTree
+
+def Element(_tag, attrib=None, nsmap=None, **_extra):
+ """Element(_tag, attrib=None, nsmap=None, **_extra)
+
+ Element factory. This function returns an object implementing the
+ Element interface.
+
+ Also look at the `_Element.makeelement()` and
+ `_BaseParser.makeelement()` methods, which provide a faster way to
+ create an Element within a specific document or parser context.
+ """
+ return _makeElement(_tag, NULL, None, None, None, None,
+ attrib, nsmap, _extra)
+
+
+def Comment(text=None):
+ """Comment(text=None)
+
+ Comment element factory. This factory function creates a special element that will
+ be serialized as an XML comment.
+ """
+ cdef _Document doc
+ cdef xmlNode* c_node
+ cdef xmlDoc* c_doc
+
+ if text is None:
+ text = b''
+ else:
+ text = _utf8(text)
+ if b'--' in text or text.endswith(b'-'):
+ raise ValueError("Comment may not contain '--' or end with '-'")
+
+ c_doc = _newXMLDoc()
+ doc = _documentFactory(c_doc, None)
+ c_node = _createComment(c_doc, _xcstr(text))
+ tree.xmlAddChild(c_doc, c_node)
+ return _elementFactory(doc, c_node)
+
+
+def ProcessingInstruction(target, text=None):
+ """ProcessingInstruction(target, text=None)
+
+ ProcessingInstruction element factory. This factory function creates a
+ special element that will be serialized as an XML processing instruction.
+ """
+ cdef _Document doc
+ cdef xmlNode* c_node
+ cdef xmlDoc* c_doc
+
+ target = _utf8(target)
+ _tagValidOrRaise(target)
+ if target.lower() == b'xml':
+ raise ValueError, f"Invalid PI name '{target}'"
+
+ if text is None:
+ text = b''
+ else:
+ text = _utf8(text)
+ if b'?>' in text:
+ raise ValueError, "PI text must not contain '?>'"
+
+ c_doc = _newXMLDoc()
+ doc = _documentFactory(c_doc, None)
+ c_node = _createPI(c_doc, _xcstr(target), _xcstr(text))
+ tree.xmlAddChild(c_doc, c_node)
+ return _elementFactory(doc, c_node)
+
+PI = ProcessingInstruction
+
+
+cdef class CDATA:
+ """CDATA(data)
+
+ CDATA factory. This factory creates an opaque data object that
+ can be used to set Element text. The usual way to use it is::
+
+ >>> el = Element('content')
+ >>> el.text = CDATA('a string')
+
+ >>> print(el.text)
+ a string
+ >>> print(tostring(el, encoding="unicode"))
+
+ """
+ cdef bytes _utf8_data
+ def __cinit__(self, data):
+ _utf8_data = _utf8(data)
+ if b']]>' in _utf8_data:
+ raise ValueError, "']]>' not allowed inside CDATA"
+ self._utf8_data = _utf8_data
+
+
+def Entity(name):
+ """Entity(name)
+
+ Entity factory. This factory function creates a special element
+ that will be serialized as an XML entity reference or character
+ reference. Note, however, that entities will not be automatically
+ declared in the document. A document that uses entity references
+ requires a DTD to define the entities.
+ """
+ cdef _Document doc
+ cdef xmlNode* c_node
+ cdef xmlDoc* c_doc
+ name_utf = _utf8(name)
+ c_name = _xcstr(name_utf)
+ if c_name[0] == c'#':
+ if not _characterReferenceIsValid(c_name + 1):
+ raise ValueError, f"Invalid character reference: '{name}'"
+ elif not _xmlNameIsValid(c_name):
+ raise ValueError, f"Invalid entity reference: '{name}'"
+ c_doc = _newXMLDoc()
+ doc = _documentFactory(c_doc, None)
+ c_node = _createEntity(c_doc, c_name)
+ tree.xmlAddChild(c_doc, c_node)
+ return _elementFactory(doc, c_node)
+
+
+def SubElement(_Element _parent not None, _tag,
+ attrib=None, nsmap=None, **_extra):
+ """SubElement(_parent, _tag, attrib=None, nsmap=None, **_extra)
+
+ Subelement factory. This function creates an element instance, and
+ appends it to an existing element.
+ """
+ return _makeSubElement(_parent, _tag, None, None, attrib, nsmap, _extra)
+
+
+def ElementTree(_Element element=None, *, file=None, _BaseParser parser=None):
+ """ElementTree(element=None, file=None, parser=None)
+
+ ElementTree wrapper class.
+ """
+ cdef xmlNode* c_next
+ cdef xmlNode* c_node
+ cdef xmlNode* c_node_copy
+ cdef xmlDoc* c_doc
+ cdef _ElementTree etree
+ cdef _Document doc
+
+ if element is not None:
+ doc = element._doc
+ elif file is not None:
+ try:
+ doc = _parseDocument(file, parser, None)
+ except _TargetParserResult as result_container:
+ return result_container.result
+ else:
+ c_doc = _newXMLDoc()
+ doc = _documentFactory(c_doc, parser)
+
+ return _elementTreeFactory(doc, element)
+
+
+def HTML(text, _BaseParser parser=None, *, base_url=None):
+ """HTML(text, parser=None, base_url=None)
+
+ Parses an HTML document from a string constant. Returns the root
+ node (or the result returned by a parser target). This function
+ can be used to embed "HTML literals" in Python code.
+
+ To override the parser with a different ``HTMLParser`` you can pass it to
+ the ``parser`` keyword argument.
+
+ The ``base_url`` keyword argument allows to set the original base URL of
+ the document to support relative Paths when looking up external entities
+ (DTD, XInclude, ...).
+ """
+ cdef _Document doc
+ if parser is None:
+ parser = __GLOBAL_PARSER_CONTEXT.getDefaultParser()
+ if not isinstance(parser, HTMLParser):
+ parser = __DEFAULT_HTML_PARSER
+ try:
+ doc = _parseMemoryDocument(text, base_url, parser)
+ return doc.getroot()
+ except _TargetParserResult as result_container:
+ return result_container.result
+
+
+def XML(text, _BaseParser parser=None, *, base_url=None):
+ """XML(text, parser=None, base_url=None)
+
+ Parses an XML document or fragment from a string constant.
+ Returns the root node (or the result returned by a parser target).
+ This function can be used to embed "XML literals" in Python code,
+ like in
+
+ >>> root = XML(" ")
+ >>> print(root.tag)
+ root
+
+ To override the parser with a different ``XMLParser`` you can pass it to
+ the ``parser`` keyword argument.
+
+ The ``base_url`` keyword argument allows to set the original base URL of
+ the document to support relative Paths when looking up external entities
+ (DTD, XInclude, ...).
+ """
+ cdef _Document doc
+ if parser is None:
+ parser = __GLOBAL_PARSER_CONTEXT.getDefaultParser()
+ if not isinstance(parser, XMLParser):
+ parser = __DEFAULT_XML_PARSER
+ try:
+ doc = _parseMemoryDocument(text, base_url, parser)
+ return doc.getroot()
+ except _TargetParserResult as result_container:
+ return result_container.result
+
+
+def fromstring(text, _BaseParser parser=None, *, base_url=None):
+ """fromstring(text, parser=None, base_url=None)
+
+ Parses an XML document or fragment from a string. Returns the
+ root node (or the result returned by a parser target).
+
+ To override the default parser with a different parser you can pass it to
+ the ``parser`` keyword argument.
+
+ The ``base_url`` keyword argument allows to set the original base URL of
+ the document to support relative Paths when looking up external entities
+ (DTD, XInclude, ...).
+ """
+ cdef _Document doc
+ try:
+ doc = _parseMemoryDocument(text, base_url, parser)
+ return doc.getroot()
+ except _TargetParserResult as result_container:
+ return result_container.result
+
+
+def fromstringlist(strings, _BaseParser parser=None):
+ """fromstringlist(strings, parser=None)
+
+ Parses an XML document from a sequence of strings. Returns the
+ root node (or the result returned by a parser target).
+
+ To override the default parser with a different parser you can pass it to
+ the ``parser`` keyword argument.
+ """
+ cdef _Document doc
+ if isinstance(strings, (bytes, unicode)):
+ raise ValueError("passing a single string into fromstringlist() is not"
+ " efficient, use fromstring() instead")
+ if parser is None:
+ parser = __GLOBAL_PARSER_CONTEXT.getDefaultParser()
+ feed = parser.feed
+ for data in strings:
+ feed(data)
+ return parser.close()
+
+
+def iselement(element):
+ """iselement(element)
+
+ Checks if an object appears to be a valid element object.
+ """
+ return isinstance(element, _Element) and (<_Element>element)._c_node is not NULL
+
+
+def indent(tree, space=" ", *, Py_ssize_t level=0):
+ """indent(tree, space=" ", level=0)
+
+ Indent an XML document by inserting newlines and indentation space
+ after elements.
+
+ *tree* is the ElementTree or Element to modify. The (root) element
+ itself will not be changed, but the tail text of all elements in its
+ subtree will be adapted.
+
+ *space* is the whitespace to insert for each indentation level, two
+ space characters by default.
+
+ *level* is the initial indentation level. Setting this to a higher
+ value than 0 can be used for indenting subtrees that are more deeply
+ nested inside of a document.
+ """
+ root = _rootNodeOrRaise(tree)
+ if level < 0:
+ raise ValueError(f"Initial indentation level must be >= 0, got {level}")
+ if _hasChild(root._c_node):
+ space = _utf8(space)
+ indent = b"\n" + level * space
+ _indent_children(root._c_node, 1, space, [indent, indent + space])
+
+
+cdef int _indent_children(xmlNode* c_node, Py_ssize_t level, bytes one_space, list indentations) except -1:
+ # Reuse indentation strings for speed.
+ if len(indentations) <= level:
+ indentations.append(indentations[-1] + one_space)
+
+ # Start a new indentation level for the first child.
+ child_indentation = indentations[level]
+ if not _hasNonWhitespaceText(c_node):
+ _setNodeText(c_node, child_indentation)
+
+ # Recursively indent all children.
+ cdef xmlNode* c_child = _findChildForwards(c_node, 0)
+ while c_child is not NULL:
+ if _hasChild(c_child):
+ _indent_children(c_child, level+1, one_space, indentations)
+ c_next_child = _nextElement(c_child)
+ if not _hasNonWhitespaceTail(c_child):
+ if c_next_child is NULL:
+ # Dedent after the last child.
+ child_indentation = indentations[level-1]
+ _setTailText(c_child, child_indentation)
+ c_child = c_next_child
+ return 0
+
+
+def dump(_Element elem not None, *, bint pretty_print=True, bint with_tail=True):
+ """dump(elem, pretty_print=True, with_tail=True)
+
+ Writes an element tree or element structure to sys.stdout. This function
+ should be used for debugging only.
+ """
+ xml = tostring(elem, pretty_print=pretty_print, with_tail=with_tail, encoding='unicode')
+ if not pretty_print:
+ xml += '\n'
+ sys.stdout.write(xml)
+
+
+def tostring(element_or_tree, *, encoding=None, method="xml",
+ xml_declaration=None, bint pretty_print=False, bint with_tail=True,
+ standalone=None, doctype=None,
+ # method='c14n'
+ bint exclusive=False, inclusive_ns_prefixes=None,
+ # method='c14n2'
+ bint with_comments=True, bint strip_text=False,
+ ):
+ """tostring(element_or_tree, encoding=None, method="xml",
+ xml_declaration=None, pretty_print=False, with_tail=True,
+ standalone=None, doctype=None,
+ exclusive=False, inclusive_ns_prefixes=None,
+ with_comments=True, strip_text=False,
+ )
+
+ Serialize an element to an encoded string representation of its XML
+ tree.
+
+ Defaults to ASCII encoding without XML declaration. This
+ behaviour can be configured with the keyword arguments 'encoding'
+ (string) and 'xml_declaration' (bool). Note that changing the
+ encoding to a non UTF-8 compatible encoding will enable a
+ declaration by default.
+
+ You can also serialise to a Unicode string without declaration by
+ passing the name ``'unicode'`` as encoding (or the ``str`` function
+ in Py3 or ``unicode`` in Py2). This changes the return value from
+ a byte string to an unencoded unicode string.
+
+ The keyword argument 'pretty_print' (bool) enables formatted XML.
+
+ The keyword argument 'method' selects the output method: 'xml',
+ 'html', plain 'text' (text content without tags), 'c14n' or 'c14n2'.
+ Default is 'xml'.
+
+ With ``method="c14n"`` (C14N version 1), the options ``exclusive``,
+ ``with_comments`` and ``inclusive_ns_prefixes`` request exclusive
+ C14N, include comments, and list the inclusive prefixes respectively.
+
+ With ``method="c14n2"`` (C14N version 2), the ``with_comments`` and
+ ``strip_text`` options control the output of comments and text space
+ according to C14N 2.0.
+
+ Passing a boolean value to the ``standalone`` option will output
+ an XML declaration with the corresponding ``standalone`` flag.
+
+ The ``doctype`` option allows passing in a plain string that will
+ be serialised before the XML tree. Note that passing in non
+ well-formed content here will make the XML output non well-formed.
+ Also, an existing doctype in the document tree will not be removed
+ when serialising an ElementTree instance.
+
+ You can prevent the tail text of the element from being serialised
+ by passing the boolean ``with_tail`` option. This has no impact
+ on the tail text of children, which will always be serialised.
+ """
+ cdef bint write_declaration
+ cdef int is_standalone
+ # C14N serialisation
+ if method in ('c14n', 'c14n2'):
+ if encoding is not None:
+ raise ValueError("Cannot specify encoding with C14N")
+ if xml_declaration:
+ raise ValueError("Cannot enable XML declaration in C14N")
+ if method == 'c14n':
+ return _tostringC14N(element_or_tree, exclusive, with_comments, inclusive_ns_prefixes)
+ else:
+ out = BytesIO()
+ target = C14NWriterTarget(
+ utf8_writer(out).write,
+ with_comments=with_comments, strip_text=strip_text)
+ _tree_to_target(element_or_tree, target)
+ return out.getvalue()
+ if not with_comments:
+ raise ValueError("Can only discard comments in C14N serialisation")
+ if strip_text:
+ raise ValueError("Can only strip text in C14N 2.0 serialisation")
+ if encoding is unicode or (encoding is not None and encoding.lower() == 'unicode'):
+ if xml_declaration:
+ raise ValueError, \
+ "Serialisation to unicode must not request an XML declaration"
+ write_declaration = 0
+ encoding = unicode
+ elif xml_declaration is None:
+ # by default, write an XML declaration only for non-standard encodings
+ write_declaration = encoding is not None and encoding.upper() not in \
+ ('ASCII', 'UTF-8', 'UTF8', 'US-ASCII')
+ else:
+ write_declaration = xml_declaration
+ if encoding is None:
+ encoding = 'ASCII'
+ if standalone is None:
+ is_standalone = -1
+ elif standalone:
+ write_declaration = 1
+ is_standalone = 1
+ else:
+ write_declaration = 1
+ is_standalone = 0
+
+ if isinstance(element_or_tree, _Element):
+ return _tostring(<_Element>element_or_tree, encoding, doctype, method,
+ write_declaration, 0, pretty_print, with_tail,
+ is_standalone)
+ elif isinstance(element_or_tree, _ElementTree):
+ return _tostring((<_ElementTree>element_or_tree)._context_node,
+ encoding, doctype, method, write_declaration, 1,
+ pretty_print, with_tail, is_standalone)
+ else:
+ raise TypeError, f"Type '{python._fqtypename(element_or_tree).decode('utf8')}' cannot be serialized."
+
+
+
+def tostringlist(element_or_tree, *args, **kwargs):
+ """tostringlist(element_or_tree, *args, **kwargs)
+
+ Serialize an element to an encoded string representation of its XML
+ tree, stored in a list of partial strings.
+
+ This is purely for ElementTree 1.3 compatibility. The result is a
+ single string wrapped in a list.
+ """
+ return [tostring(element_or_tree, *args, **kwargs)]
+
+
+def tounicode(element_or_tree, *, method="xml", bint pretty_print=False,
+ bint with_tail=True, doctype=None):
+ """tounicode(element_or_tree, method="xml", pretty_print=False,
+ with_tail=True, doctype=None)
+
+ Serialize an element to the Python unicode representation of its XML
+ tree.
+
+ :deprecated: use ``tostring(el, encoding='unicode')`` instead.
+
+ Note that the result does not carry an XML encoding declaration and is
+ therefore not necessarily suited for serialization to byte streams without
+ further treatment.
+
+ The boolean keyword argument 'pretty_print' enables formatted XML.
+
+ The keyword argument 'method' selects the output method: 'xml',
+ 'html' or plain 'text'.
+
+ You can prevent the tail text of the element from being serialised
+ by passing the boolean ``with_tail`` option. This has no impact
+ on the tail text of children, which will always be serialised.
+ """
+ if isinstance(element_or_tree, _Element):
+ return _tostring(<_Element>element_or_tree, unicode, doctype, method,
+ 0, 0, pretty_print, with_tail, -1)
+ elif isinstance(element_or_tree, _ElementTree):
+ return _tostring((<_ElementTree>element_or_tree)._context_node,
+ unicode, doctype, method, 0, 1, pretty_print,
+ with_tail, -1)
+ else:
+ raise TypeError, f"Type '{type(element_or_tree)}' cannot be serialized."
+
+
+def parse(source, _BaseParser parser=None, *, base_url=None):
+ """parse(source, parser=None, base_url=None)
+
+ Return an ElementTree object loaded with source elements. If no parser
+ is provided as second argument, the default parser is used.
+
+ The ``source`` can be any of the following:
+
+ - a file name/path
+ - a file object
+ - a file-like object
+ - a URL using the HTTP or FTP protocol
+
+ To parse from a string, use the ``fromstring()`` function instead.
+
+ Note that it is generally faster to parse from a file path or URL
+ than from an open file object or file-like object. Transparent
+ decompression from gzip compressed sources is supported (unless
+ explicitly disabled in libxml2).
+
+ The ``base_url`` keyword allows setting a URL for the document
+ when parsing from a file-like object. This is needed when looking
+ up external entities (DTD, XInclude, ...) with relative paths.
+ """
+ cdef _Document doc
+ try:
+ doc = _parseDocument(source, parser, base_url)
+ return _elementTreeFactory(doc, None)
+ except _TargetParserResult as result_container:
+ return result_container.result
+
+
+def adopt_external_document(capsule, _BaseParser parser=None):
+ """adopt_external_document(capsule, parser=None)
+
+ Unpack a libxml2 document pointer from a PyCapsule and wrap it in an
+ lxml ElementTree object.
+
+ This allows external libraries to build XML/HTML trees using libxml2
+ and then pass them efficiently into lxml for further processing.
+
+ If a ``parser`` is provided, it will be used for configuring the
+ lxml document. No parsing will be done.
+
+ The capsule must have the name ``"libxml2:xmlDoc"`` and its pointer
+ value must reference a correct libxml2 document of type ``xmlDoc*``.
+ The creator of the capsule must take care to correctly clean up the
+ document using an appropriate capsule destructor. By default, the
+ libxml2 document will be copied to let lxml safely own the memory
+ of the internal tree that it uses.
+
+ If the capsule context is non-NULL, it must point to a C string that
+ can be compared using ``strcmp()``. If the context string equals
+ ``"destructor:xmlFreeDoc"``, the libxml2 document will not be copied
+ but the capsule invalidated instead by clearing its destructor and
+ name. That way, lxml takes ownership of the libxml2 document in memory
+ without creating a copy first, and the capsule destructor will not be
+ called. The document will then eventually be cleaned up by lxml using
+ the libxml2 API function ``xmlFreeDoc()`` once it is no longer used.
+
+ If no copy is made, later modifications of the tree outside of lxml
+ should not be attempted after transferring the ownership.
+ """
+ cdef xmlDoc* c_doc
+ cdef bint is_owned = False
+ c_doc = python.lxml_unpack_xmldoc_capsule(capsule, &is_owned)
+ doc = _adoptForeignDoc(c_doc, parser, is_owned)
+ return _elementTreeFactory(doc, None)
+
+
+################################################################################
+# Include submodules
+
+include "readonlytree.pxi" # Read-only implementation of Element proxies
+include "classlookup.pxi" # Element class lookup mechanisms
+include "nsclasses.pxi" # Namespace implementation and registry
+include "docloader.pxi" # Support for custom document loaders
+include "parser.pxi" # XML and HTML parsers
+include "saxparser.pxi" # SAX-like Parser interface and tree builder
+include "parsertarget.pxi" # ET Parser target
+include "serializer.pxi" # XML output functions
+include "iterparse.pxi" # incremental XML parsing
+include "xmlid.pxi" # XMLID and IDDict
+include "xinclude.pxi" # XInclude
+include "cleanup.pxi" # Cleanup and recursive element removal functions
+
+
+################################################################################
+# Include submodules for XPath and XSLT
+
+include "extensions.pxi" # XPath/XSLT extension functions
+include "xpath.pxi" # XPath evaluation
+include "xslt.pxi" # XSL transformations
+include "xsltext.pxi" # XSL extension elements
+
+
+################################################################################
+# Validation
+
+cdef class DocumentInvalid(LxmlError):
+ """Validation error.
+
+ Raised by all document validators when their ``assertValid(tree)``
+ method fails.
+ """
+
+
+cdef class _Validator:
+ "Base class for XML validators."
+ cdef _ErrorLog _error_log
+ def __cinit__(self):
+ self._error_log = _ErrorLog()
+
+ def validate(self, etree):
+ """validate(self, etree)
+
+ Validate the document using this schema.
+
+ Returns true if document is valid, false if not.
+ """
+ return self(etree)
+
+ def assertValid(self, etree):
+ """assertValid(self, etree)
+
+ Raises `DocumentInvalid` if the document does not comply with the schema.
+ """
+ if not self(etree):
+ raise DocumentInvalid(self._error_log._buildExceptionMessage(
+ "Document does not comply with schema"),
+ self._error_log)
+
+ def assert_(self, etree):
+ """assert_(self, etree)
+
+ Raises `AssertionError` if the document does not comply with the schema.
+ """
+ if not self(etree):
+ raise AssertionError, self._error_log._buildExceptionMessage(
+ "Document does not comply with schema")
+
+ cpdef _append_log_message(self, int domain, int type, int level, int line,
+ message, filename):
+ self._error_log._receiveGeneric(domain, type, level, line, message,
+ filename)
+
+ cpdef _clear_error_log(self):
+ self._error_log.clear()
+
+ @property
+ def error_log(self):
+ """The log of validation errors and warnings."""
+ assert self._error_log is not None, "XPath evaluator not initialised"
+ return self._error_log.copy()
+
+include "dtd.pxi" # DTD
+include "relaxng.pxi" # RelaxNG
+include "xmlschema.pxi" # XMLSchema
+include "schematron.pxi" # Schematron (requires libxml2 2.6.21+)
+
+################################################################################
+# Public C API
+
+include "public-api.pxi"
+
+################################################################################
+# Other stuff
+
+include "debug.pxi"
diff --git a/llmeval-env/lib/python3.10/site-packages/lxml/extensions.pxi b/llmeval-env/lib/python3.10/site-packages/lxml/extensions.pxi
new file mode 100644
index 0000000000000000000000000000000000000000..2a2c94ecc7a6baae223ff7d613bf76d1b699be99
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/lxml/extensions.pxi
@@ -0,0 +1,833 @@
+# support for extension functions in XPath and XSLT
+
+cdef class XPathError(LxmlError):
+ """Base class of all XPath errors.
+ """
+
+cdef class XPathEvalError(XPathError):
+ """Error during XPath evaluation.
+ """
+
+cdef class XPathFunctionError(XPathEvalError):
+ """Internal error looking up an XPath extension function.
+ """
+
+cdef class XPathResultError(XPathEvalError):
+ """Error handling an XPath result.
+ """
+
+
+# forward declarations
+
+ctypedef int (*_register_function)(void* ctxt, name_utf, ns_uri_utf)
+cdef class _ExsltRegExp
+
+################################################################################
+# Base class for XSLT and XPath evaluation contexts: functions, namespaces, ...
+
+@cython.internal
+cdef class _BaseContext:
+ cdef xpath.xmlXPathContext* _xpathCtxt
+ cdef _Document _doc
+ cdef dict _extensions
+ cdef list _namespaces
+ cdef list _global_namespaces
+ cdef dict _utf_refs
+ cdef dict _function_cache
+ cdef dict _eval_context_dict
+ cdef bint _build_smart_strings
+ # for exception handling and temporary reference keeping:
+ cdef _TempStore _temp_refs
+ cdef set _temp_documents
+ cdef _ExceptionContext _exc
+ cdef _ErrorLog _error_log
+
+ def __cinit__(self):
+ self._xpathCtxt = NULL
+
+ def __init__(self, namespaces, extensions, error_log, enable_regexp,
+ build_smart_strings):
+ cdef _ExsltRegExp _regexp
+ cdef dict new_extensions
+ cdef list ns
+ self._utf_refs = {}
+ self._global_namespaces = []
+ self._function_cache = {}
+ self._eval_context_dict = None
+ self._error_log = error_log
+
+ if extensions is not None:
+ # convert extensions to UTF-8
+ if isinstance(extensions, dict):
+ extensions = (extensions,)
+ # format: [ {(ns, name):function} ] -> {(ns_utf, name_utf):function}
+ new_extensions = {}
+ for extension in extensions:
+ for (ns_uri, name), function in extension.items():
+ if name is None:
+ raise ValueError, "extensions must have non empty names"
+ ns_utf = self._to_utf(ns_uri)
+ name_utf = self._to_utf(name)
+ new_extensions[(ns_utf, name_utf)] = function
+ extensions = new_extensions or None
+
+ if namespaces is not None:
+ if isinstance(namespaces, dict):
+ namespaces = namespaces.items()
+ if namespaces:
+ ns = []
+ for prefix, ns_uri in namespaces:
+ if prefix is None or not prefix:
+ raise TypeError, \
+ "empty namespace prefix is not supported in XPath"
+ if ns_uri is None or not ns_uri:
+ raise TypeError, \
+ "setting default namespace is not supported in XPath"
+ prefix_utf = self._to_utf(prefix)
+ ns_uri_utf = self._to_utf(ns_uri)
+ ns.append( (prefix_utf, ns_uri_utf) )
+ namespaces = ns
+ else:
+ namespaces = None
+
+ self._doc = None
+ self._exc = _ExceptionContext()
+ self._extensions = extensions
+ self._namespaces = namespaces
+ self._temp_refs = _TempStore()
+ self._temp_documents = set()
+ self._build_smart_strings = build_smart_strings
+
+ if enable_regexp:
+ _regexp = _ExsltRegExp()
+ _regexp._register_in_context(self)
+
+ cdef _BaseContext _copy(self):
+ cdef _BaseContext context
+ if self._namespaces is not None:
+ namespaces = self._namespaces[:]
+ else:
+ namespaces = None
+ context = self.__class__(namespaces, None, self._error_log, False,
+ self._build_smart_strings)
+ if self._extensions is not None:
+ context._extensions = self._extensions.copy()
+ return context
+
+ cdef bytes _to_utf(self, s):
+ "Convert to UTF-8 and keep a reference to the encoded string"
+ cdef python.PyObject* dict_result
+ if s is None:
+ return None
+ dict_result = python.PyDict_GetItem(self._utf_refs, s)
+ if dict_result is not NULL:
+ return dict_result
+ utf = _utf8(s)
+ self._utf_refs[s] = utf
+ if python.IS_PYPY:
+ # use C level refs, PyPy refs are not enough!
+ python.Py_INCREF(utf)
+ return utf
+
+ cdef void _set_xpath_context(self, xpath.xmlXPathContext* xpathCtxt) noexcept:
+ self._xpathCtxt = xpathCtxt
+ xpathCtxt.userData = self
+ # Need a cast here because older libxml2 releases do not use 'const' in the functype.
+ xpathCtxt.error = _receiveXPathError
+
+ @cython.final
+ cdef _register_context(self, _Document doc):
+ self._doc = doc
+ self._exc.clear()
+
+ @cython.final
+ cdef _cleanup_context(self):
+ #xpath.xmlXPathRegisteredNsCleanup(self._xpathCtxt)
+ #self.unregisterGlobalNamespaces()
+ if python.IS_PYPY:
+ # clean up double refs in PyPy (see "_to_utf()" method)
+ for ref in self._utf_refs.itervalues():
+ python.Py_DECREF(ref)
+ self._utf_refs.clear()
+ self._eval_context_dict = None
+ self._doc = None
+
+ @cython.final
+ cdef _release_context(self):
+ if self._xpathCtxt is not NULL:
+ self._xpathCtxt.userData = NULL
+ self._xpathCtxt = NULL
+
+ # namespaces (internal UTF-8 methods with leading '_')
+
+ cdef addNamespace(self, prefix, ns_uri):
+ cdef list namespaces
+ if prefix is None:
+ raise TypeError, "empty prefix is not supported in XPath"
+ prefix_utf = self._to_utf(prefix)
+ ns_uri_utf = self._to_utf(ns_uri)
+ new_item = (prefix_utf, ns_uri_utf)
+ if self._namespaces is None:
+ self._namespaces = [new_item]
+ else:
+ namespaces = []
+ for item in self._namespaces:
+ if item[0] == prefix_utf:
+ item = new_item
+ new_item = None
+ namespaces.append(item)
+ if new_item is not None:
+ namespaces.append(new_item)
+ self._namespaces = namespaces
+ if self._xpathCtxt is not NULL:
+ xpath.xmlXPathRegisterNs(
+ self._xpathCtxt, _xcstr(prefix_utf), _xcstr(ns_uri_utf))
+
+ cdef registerNamespace(self, prefix, ns_uri):
+ if prefix is None:
+ raise TypeError, "empty prefix is not supported in XPath"
+ prefix_utf = self._to_utf(prefix)
+ ns_uri_utf = self._to_utf(ns_uri)
+ self._global_namespaces.append(prefix_utf)
+ xpath.xmlXPathRegisterNs(self._xpathCtxt,
+ _xcstr(prefix_utf), _xcstr(ns_uri_utf))
+
+ cdef registerLocalNamespaces(self):
+ if self._namespaces is None:
+ return
+ for prefix_utf, ns_uri_utf in self._namespaces:
+ xpath.xmlXPathRegisterNs(
+ self._xpathCtxt, _xcstr(prefix_utf), _xcstr(ns_uri_utf))
+
+ cdef registerGlobalNamespaces(self):
+ cdef list ns_prefixes = _find_all_extension_prefixes()
+ if python.PyList_GET_SIZE(ns_prefixes) > 0:
+ for prefix_utf, ns_uri_utf in ns_prefixes:
+ self._global_namespaces.append(prefix_utf)
+ xpath.xmlXPathRegisterNs(
+ self._xpathCtxt, _xcstr(prefix_utf), _xcstr(ns_uri_utf))
+
+ cdef unregisterGlobalNamespaces(self):
+ if python.PyList_GET_SIZE(self._global_namespaces) > 0:
+ for prefix_utf in self._global_namespaces:
+ xpath.xmlXPathRegisterNs(self._xpathCtxt,
+ _xcstr(prefix_utf), NULL)
+ del self._global_namespaces[:]
+
+ cdef void _unregisterNamespace(self, prefix_utf) noexcept:
+ xpath.xmlXPathRegisterNs(self._xpathCtxt,
+ _xcstr(prefix_utf), NULL)
+
+ # extension functions
+
+ cdef int _addLocalExtensionFunction(self, ns_utf, name_utf, function) except -1:
+ if self._extensions is None:
+ self._extensions = {}
+ self._extensions[(ns_utf, name_utf)] = function
+ return 0
+
+ cdef registerGlobalFunctions(self, void* ctxt,
+ _register_function reg_func):
+ cdef python.PyObject* dict_result
+ cdef dict d
+ for ns_utf, ns_functions in __FUNCTION_NAMESPACE_REGISTRIES.iteritems():
+ dict_result = python.PyDict_GetItem(
+ self._function_cache, ns_utf)
+ if dict_result is not NULL:
+ d = dict_result
+ else:
+ d = {}
+ self._function_cache[ns_utf] = d
+ for name_utf, function in ns_functions.iteritems():
+ d[name_utf] = function
+ reg_func(ctxt, name_utf, ns_utf)
+
+ cdef registerLocalFunctions(self, void* ctxt,
+ _register_function reg_func):
+ cdef python.PyObject* dict_result
+ cdef dict d
+ if self._extensions is None:
+ return # done
+ last_ns = None
+ d = None
+ for (ns_utf, name_utf), function in self._extensions.iteritems():
+ if ns_utf is not last_ns or d is None:
+ last_ns = ns_utf
+ dict_result = python.PyDict_GetItem(
+ self._function_cache, ns_utf)
+ if dict_result is not NULL:
+ d = dict_result
+ else:
+ d = {}
+ self._function_cache[ns_utf] = d
+ d[name_utf] = function
+ reg_func(ctxt, name_utf, ns_utf)
+
+ cdef unregisterAllFunctions(self, void* ctxt,
+ _register_function unreg_func):
+ for ns_utf, functions in self._function_cache.iteritems():
+ for name_utf in functions:
+ unreg_func(ctxt, name_utf, ns_utf)
+
+ cdef unregisterGlobalFunctions(self, void* ctxt,
+ _register_function unreg_func):
+ for ns_utf, functions in self._function_cache.items():
+ for name_utf in functions:
+ if self._extensions is None or \
+ (ns_utf, name_utf) not in self._extensions:
+ unreg_func(ctxt, name_utf, ns_utf)
+
+ @cython.final
+ cdef _find_cached_function(self, const_xmlChar* c_ns_uri, const_xmlChar* c_name):
+ """Lookup an extension function in the cache and return it.
+
+ Parameters: c_ns_uri may be NULL, c_name must not be NULL
+ """
+ cdef python.PyObject* c_dict
+ cdef python.PyObject* dict_result
+ c_dict = python.PyDict_GetItem(
+ self._function_cache, None if c_ns_uri is NULL else c_ns_uri)
+ if c_dict is not NULL:
+ dict_result = python.PyDict_GetItem(
+ c_dict, c_name)
+ if dict_result is not NULL:
+ return dict_result
+ return None
+
+ # Python access to the XPath context for extension functions
+
+ @property
+ def context_node(self):
+ cdef xmlNode* c_node
+ if self._xpathCtxt is NULL:
+ raise XPathError, \
+ "XPath context is only usable during the evaluation"
+ c_node = self._xpathCtxt.node
+ if c_node is NULL:
+ raise XPathError, "no context node"
+ if c_node.doc != self._xpathCtxt.doc:
+ raise XPathError, \
+ "document-external context nodes are not supported"
+ if self._doc is None:
+ raise XPathError, "document context is missing"
+ return _elementFactory(self._doc, c_node)
+
+ @property
+ def eval_context(self):
+ if self._eval_context_dict is None:
+ self._eval_context_dict = {}
+ return self._eval_context_dict
+
+ # Python reference keeping during XPath function evaluation
+
+ @cython.final
+ cdef _release_temp_refs(self):
+ "Free temporarily referenced objects from this context."
+ self._temp_refs.clear()
+ self._temp_documents.clear()
+
+ @cython.final
+ cdef _hold(self, obj):
+ """A way to temporarily hold references to nodes in the evaluator.
+
+ This is needed because otherwise nodes created in XPath extension
+ functions would be reference counted too soon, during the XPath
+ evaluation. This is most important in the case of exceptions.
+ """
+ cdef _Element element
+ if isinstance(obj, _Element):
+ self._temp_refs.add(obj)
+ self._temp_documents.add((<_Element>obj)._doc)
+ return
+ elif _isString(obj) or not python.PySequence_Check(obj):
+ return
+ for o in obj:
+ if isinstance(o, _Element):
+ #print "Holding element:", element._c_node
+ self._temp_refs.add(o)
+ #print "Holding document:", element._doc._c_doc
+ self._temp_documents.add((<_Element>o)._doc)
+
+ @cython.final
+ cdef _Document _findDocumentForNode(self, xmlNode* c_node):
+ """If an XPath expression returns an element from a different
+ document than the current context document, we call this to
+ see if it was possibly created by an extension and is a known
+ document instance.
+ """
+ cdef _Document doc
+ for doc in self._temp_documents:
+ if doc is not None and doc._c_doc is c_node.doc:
+ return doc
+ return None
+
+
+# libxml2 keeps these error messages in a static array in its code
+# and doesn't give us access to them ...
+
+cdef tuple LIBXML2_XPATH_ERROR_MESSAGES = (
+ b"Ok",
+ b"Number encoding",
+ b"Unfinished literal",
+ b"Start of literal",
+ b"Expected $ for variable reference",
+ b"Undefined variable",
+ b"Invalid predicate",
+ b"Invalid expression",
+ b"Missing closing curly brace",
+ b"Unregistered function",
+ b"Invalid operand",
+ b"Invalid type",
+ b"Invalid number of arguments",
+ b"Invalid context size",
+ b"Invalid context position",
+ b"Memory allocation error",
+ b"Syntax error",
+ b"Resource error",
+ b"Sub resource error",
+ b"Undefined namespace prefix",
+ b"Encoding error",
+ b"Char out of XML range",
+ b"Invalid or incomplete context",
+ b"Stack usage error",
+ b"Forbidden variable\n",
+ b"?? Unknown error ??\n",
+)
+
+cdef void _forwardXPathError(void* c_ctxt, const xmlerror.xmlError* c_error) noexcept with gil:
+ cdef xmlerror.xmlError error
+ cdef int xpath_code
+ if c_error.message is not NULL:
+ error.message = c_error.message
+ else:
+ xpath_code = c_error.code - xmlerror.XML_XPATH_EXPRESSION_OK
+ if 0 <= xpath_code < len(LIBXML2_XPATH_ERROR_MESSAGES):
+ error.message = _cstr(LIBXML2_XPATH_ERROR_MESSAGES[xpath_code])
+ else:
+ error.message = b"unknown error"
+ error.domain = c_error.domain
+ error.code = c_error.code
+ error.level = c_error.level
+ error.line = c_error.line
+ error.int2 = c_error.int1 # column
+ error.file = c_error.file
+ error.node = NULL
+
+ (<_BaseContext>c_ctxt)._error_log._receive(&error)
+
+cdef void _receiveXPathError(void* c_context, const xmlerror.xmlError* error) noexcept nogil:
+ if not __DEBUG:
+ return
+ if c_context is NULL:
+ _forwardError(NULL, error)
+ else:
+ _forwardXPathError(c_context, error)
+
+
+def Extension(module, function_mapping=None, *, ns=None):
+ """Extension(module, function_mapping=None, ns=None)
+
+ Build a dictionary of extension functions from the functions
+ defined in a module or the methods of an object.
+
+ As second argument, you can pass an additional mapping of
+ attribute names to XPath function names, or a list of function
+ names that should be taken.
+
+ The ``ns`` keyword argument accepts a namespace URI for the XPath
+ functions.
+ """
+ cdef dict functions = {}
+ if isinstance(function_mapping, dict):
+ for function_name, xpath_name in function_mapping.items():
+ functions[(ns, xpath_name)] = getattr(module, function_name)
+ else:
+ if function_mapping is None:
+ function_mapping = [ name for name in dir(module)
+ if not name.startswith('_') ]
+ for function_name in function_mapping:
+ functions[(ns, function_name)] = getattr(module, function_name)
+ return functions
+
+################################################################################
+# EXSLT regexp implementation
+
+@cython.final
+@cython.internal
+cdef class _ExsltRegExp:
+ cdef dict _compile_map
+ def __cinit__(self):
+ self._compile_map = {}
+
+ cdef _make_string(self, value):
+ if _isString(value):
+ return value
+ elif isinstance(value, list):
+ # node set: take recursive text concatenation of first element
+ if python.PyList_GET_SIZE(value) == 0:
+ return ''
+ firstnode = value[0]
+ if _isString(firstnode):
+ return firstnode
+ elif isinstance(firstnode, _Element):
+ c_text = tree.xmlNodeGetContent((<_Element>firstnode)._c_node)
+ if c_text is NULL:
+ raise MemoryError()
+ try:
+ return funicode(c_text)
+ finally:
+ tree.xmlFree(c_text)
+ else:
+ return unicode(firstnode)
+ else:
+ return unicode(value)
+
+ cdef _compile(self, rexp, ignore_case):
+ cdef python.PyObject* c_result
+ rexp = self._make_string(rexp)
+ key = (rexp, ignore_case)
+ c_result = python.PyDict_GetItem(self._compile_map, key)
+ if c_result is not NULL:
+ return c_result
+ py_flags = re.UNICODE
+ if ignore_case:
+ py_flags = py_flags | re.IGNORECASE
+ rexp_compiled = re.compile(rexp, py_flags)
+ self._compile_map[key] = rexp_compiled
+ return rexp_compiled
+
+ def test(self, ctxt, s, rexp, flags=''):
+ flags = self._make_string(flags)
+ s = self._make_string(s)
+ rexpc = self._compile(rexp, 'i' in flags)
+ if rexpc.search(s) is None:
+ return False
+ else:
+ return True
+
+ def match(self, ctxt, s, rexp, flags=''):
+ cdef list result_list
+ flags = self._make_string(flags)
+ s = self._make_string(s)
+ rexpc = self._compile(rexp, 'i' in flags)
+ if 'g' in flags:
+ results = rexpc.findall(s)
+ if not results:
+ return ()
+ else:
+ result = rexpc.search(s)
+ if not result:
+ return ()
+ results = [ result.group() ]
+ results.extend( result.groups('') )
+ result_list = []
+ root = Element('matches')
+ for s_match in results:
+ if python.PyTuple_CheckExact(s_match):
+ s_match = ''.join(s_match)
+ elem = SubElement(root, 'match')
+ elem.text = s_match
+ result_list.append(elem)
+ return result_list
+
+ def replace(self, ctxt, s, rexp, flags, replacement):
+ replacement = self._make_string(replacement)
+ flags = self._make_string(flags)
+ s = self._make_string(s)
+ rexpc = self._compile(rexp, 'i' in flags)
+ count: object = 0 if 'g' in flags else 1
+ return rexpc.sub(replacement, s, count)
+
+ cdef _register_in_context(self, _BaseContext context):
+ ns = b"http://exslt.org/regular-expressions"
+ context._addLocalExtensionFunction(ns, b"test", self.test)
+ context._addLocalExtensionFunction(ns, b"match", self.match)
+ context._addLocalExtensionFunction(ns, b"replace", self.replace)
+
+
+################################################################################
+# helper functions
+
+cdef xpath.xmlXPathObject* _wrapXPathObject(object obj, _Document doc,
+ _BaseContext context) except NULL:
+ cdef xpath.xmlNodeSet* resultSet
+ cdef _Element fake_node = None
+ cdef xmlNode* c_node
+
+ if isinstance(obj, unicode):
+ obj = _utf8(obj)
+ if isinstance(obj, bytes):
+ # libxml2 copies the string value
+ return xpath.xmlXPathNewCString(_cstr(obj))
+ if isinstance(obj, bool):
+ return xpath.xmlXPathNewBoolean(obj)
+ if python.PyNumber_Check(obj):
+ return xpath.xmlXPathNewFloat(obj)
+ if obj is None:
+ resultSet = xpath.xmlXPathNodeSetCreate(NULL)
+ elif isinstance(obj, _Element):
+ resultSet = xpath.xmlXPathNodeSetCreate((<_Element>obj)._c_node)
+ elif python.PySequence_Check(obj):
+ resultSet = xpath.xmlXPathNodeSetCreate(NULL)
+ try:
+ for value in obj:
+ if isinstance(value, _Element):
+ if context is not None:
+ context._hold(value)
+ xpath.xmlXPathNodeSetAdd(resultSet, (<_Element>value)._c_node)
+ else:
+ if context is None or doc is None:
+ raise XPathResultError, \
+ f"Non-Element values not supported at this point - got {value!r}"
+ # support strings by appending text nodes to an Element
+ if isinstance(value, unicode):
+ value = _utf8(value)
+ if isinstance(value, bytes):
+ if fake_node is None:
+ fake_node = _makeElement("text-root", NULL, doc, None,
+ None, None, None, None, None)
+ context._hold(fake_node)
+ else:
+ # append a comment node to keep the text nodes separate
+ c_node = tree.xmlNewDocComment(doc._c_doc, "")
+ if c_node is NULL:
+ raise MemoryError()
+ tree.xmlAddChild(fake_node._c_node, c_node)
+ context._hold(value)
+ c_node = tree.xmlNewDocText(doc._c_doc, _xcstr(value))
+ if c_node is NULL:
+ raise MemoryError()
+ tree.xmlAddChild(fake_node._c_node, c_node)
+ xpath.xmlXPathNodeSetAdd(resultSet, c_node)
+ else:
+ raise XPathResultError, \
+ f"This is not a supported node-set result: {value!r}"
+ except:
+ xpath.xmlXPathFreeNodeSet(resultSet)
+ raise
+ else:
+ raise XPathResultError, f"Unknown return type: {python._fqtypename(obj).decode('utf8')}"
+ return xpath.xmlXPathWrapNodeSet(resultSet)
+
+cdef object _unwrapXPathObject(xpath.xmlXPathObject* xpathObj,
+ _Document doc, _BaseContext context):
+ if xpathObj.type == xpath.XPATH_UNDEFINED:
+ raise XPathResultError, "Undefined xpath result"
+ elif xpathObj.type == xpath.XPATH_NODESET:
+ return _createNodeSetResult(xpathObj, doc, context)
+ elif xpathObj.type == xpath.XPATH_BOOLEAN:
+ return xpathObj.boolval
+ elif xpathObj.type == xpath.XPATH_NUMBER:
+ return xpathObj.floatval
+ elif xpathObj.type == xpath.XPATH_STRING:
+ stringval = funicode(xpathObj.stringval)
+ if context._build_smart_strings:
+ stringval = _elementStringResultFactory(
+ stringval, None, None, False)
+ return stringval
+ elif xpathObj.type == xpath.XPATH_POINT:
+ raise NotImplementedError, "XPATH_POINT"
+ elif xpathObj.type == xpath.XPATH_RANGE:
+ raise NotImplementedError, "XPATH_RANGE"
+ elif xpathObj.type == xpath.XPATH_LOCATIONSET:
+ raise NotImplementedError, "XPATH_LOCATIONSET"
+ elif xpathObj.type == xpath.XPATH_USERS:
+ raise NotImplementedError, "XPATH_USERS"
+ elif xpathObj.type == xpath.XPATH_XSLT_TREE:
+ return _createNodeSetResult(xpathObj, doc, context)
+ else:
+ raise XPathResultError, f"Unknown xpath result {xpathObj.type}"
+
+cdef object _createNodeSetResult(xpath.xmlXPathObject* xpathObj, _Document doc,
+ _BaseContext context):
+ cdef xmlNode* c_node
+ cdef int i
+ cdef list result
+ result = []
+ if xpathObj.nodesetval is NULL:
+ return result
+ for i in range(xpathObj.nodesetval.nodeNr):
+ c_node = xpathObj.nodesetval.nodeTab[i]
+ _unpackNodeSetEntry(result, c_node, doc, context,
+ xpathObj.type == xpath.XPATH_XSLT_TREE)
+ return result
+
+cdef _unpackNodeSetEntry(list results, xmlNode* c_node, _Document doc,
+ _BaseContext context, bint is_fragment):
+ cdef xmlNode* c_child
+ if _isElement(c_node):
+ if c_node.doc != doc._c_doc and c_node.doc._private is NULL:
+ # XXX: works, but maybe not always the right thing to do?
+ # XPath: only runs when extensions create or copy trees
+ # -> we store Python refs to these, so that is OK
+ # XSLT: can it leak when merging trees from multiple sources?
+ c_node = tree.xmlDocCopyNode(c_node, doc._c_doc, 1)
+ # FIXME: call _instantiateElementFromXPath() instead?
+ results.append(
+ _fakeDocElementFactory(doc, c_node))
+ elif c_node.type == tree.XML_TEXT_NODE or \
+ c_node.type == tree.XML_CDATA_SECTION_NODE or \
+ c_node.type == tree.XML_ATTRIBUTE_NODE:
+ results.append(
+ _buildElementStringResult(doc, c_node, context))
+ elif c_node.type == tree.XML_NAMESPACE_DECL:
+ results.append( (funicodeOrNone((c_node).prefix),
+ funicodeOrNone((c_node).href)) )
+ elif c_node.type == tree.XML_DOCUMENT_NODE or \
+ c_node.type == tree.XML_HTML_DOCUMENT_NODE:
+ # ignored for everything but result tree fragments
+ if is_fragment:
+ c_child = c_node.children
+ while c_child is not NULL:
+ _unpackNodeSetEntry(results, c_child, doc, context, 0)
+ c_child = c_child.next
+ elif c_node.type == tree.XML_XINCLUDE_START or \
+ c_node.type == tree.XML_XINCLUDE_END:
+ pass
+ else:
+ raise NotImplementedError, \
+ f"Not yet implemented result node type: {c_node.type}"
+
+cdef void _freeXPathObject(xpath.xmlXPathObject* xpathObj) noexcept:
+ """Free the XPath object, but *never* free the *content* of node sets.
+ Python dealloc will do that for us.
+ """
+ if xpathObj.nodesetval is not NULL:
+ xpath.xmlXPathFreeNodeSet(xpathObj.nodesetval)
+ xpathObj.nodesetval = NULL
+ xpath.xmlXPathFreeObject(xpathObj)
+
+cdef _Element _instantiateElementFromXPath(xmlNode* c_node, _Document doc,
+ _BaseContext context):
+ # NOTE: this may copy the element - only call this when it can't leak
+ if c_node.doc != doc._c_doc and c_node.doc._private is NULL:
+ # not from the context document and not from a fake document
+ # either => may still be from a known document, e.g. one
+ # created by an extension function
+ node_doc = context._findDocumentForNode(c_node)
+ if node_doc is None:
+ # not from a known document at all! => can only make a
+ # safety copy here
+ c_node = tree.xmlDocCopyNode(c_node, doc._c_doc, 1)
+ else:
+ doc = node_doc
+ return _fakeDocElementFactory(doc, c_node)
+
+################################################################################
+# special str/unicode subclasses
+
+@cython.final
+cdef class _ElementUnicodeResult(unicode):
+ cdef _Element _parent
+ cdef readonly object attrname
+ cdef readonly bint is_tail
+
+ def getparent(self):
+ return self._parent
+
+ @property
+ def is_text(self):
+ return self._parent is not None and not (self.is_tail or self.attrname is not None)
+
+ @property
+ def is_attribute(self):
+ return self.attrname is not None
+
+cdef object _elementStringResultFactory(string_value, _Element parent,
+ attrname, bint is_tail):
+ result = _ElementUnicodeResult(string_value)
+ result._parent = parent
+ result.is_tail = is_tail
+ result.attrname = attrname
+ return result
+
+cdef object _buildElementStringResult(_Document doc, xmlNode* c_node,
+ _BaseContext context):
+ cdef _Element parent = None
+ cdef object attrname = None
+ cdef xmlNode* c_element
+ cdef bint is_tail
+
+ if c_node.type == tree.XML_ATTRIBUTE_NODE:
+ attrname = _namespacedName(c_node)
+ is_tail = 0
+ s = tree.xmlNodeGetContent(c_node)
+ try:
+ value = funicode(s)
+ finally:
+ tree.xmlFree(s)
+ c_element = NULL
+ else:
+ #assert c_node.type == tree.XML_TEXT_NODE or c_node.type == tree.XML_CDATA_SECTION_NODE, "invalid node type"
+ # may be tail text or normal text
+ value = funicode(c_node.content)
+ c_element = _previousElement(c_node)
+ is_tail = c_element is not NULL
+
+ if not context._build_smart_strings:
+ return value
+
+ if c_element is NULL:
+ # non-tail text or attribute text
+ c_element = c_node.parent
+ while c_element is not NULL and not _isElement(c_element):
+ c_element = c_element.parent
+
+ if c_element is not NULL:
+ parent = _instantiateElementFromXPath(c_element, doc, context)
+
+ return _elementStringResultFactory(
+ value, parent, attrname, is_tail)
+
+################################################################################
+# callbacks for XPath/XSLT extension functions
+
+cdef void _extension_function_call(_BaseContext context, function,
+ xpath.xmlXPathParserContext* ctxt, int nargs) noexcept:
+ cdef _Document doc
+ cdef xpath.xmlXPathObject* obj
+ cdef list args
+ cdef int i
+ doc = context._doc
+ try:
+ args = []
+ for i in range(nargs):
+ obj = xpath.valuePop(ctxt)
+ o = _unwrapXPathObject(obj, doc, context)
+ _freeXPathObject(obj)
+ args.append(o)
+ args.reverse()
+
+ res = function(context, *args)
+ # wrap result for XPath consumption
+ obj = _wrapXPathObject(res, doc, context)
+ # prevent Python from deallocating elements handed to libxml2
+ context._hold(res)
+ xpath.valuePush(ctxt, obj)
+ except:
+ xpath.xmlXPathErr(ctxt, xpath.XPATH_EXPR_ERROR)
+ context._exc._store_raised()
+ finally:
+ return # swallow any further exceptions
+
+# lookup the function by name and call it
+
+cdef void _xpath_function_call(xpath.xmlXPathParserContext* ctxt,
+ int nargs) noexcept with gil:
+ cdef _BaseContext context
+ cdef xpath.xmlXPathContext* rctxt = ctxt.context
+ context = <_BaseContext> rctxt.userData
+ try:
+ function = context._find_cached_function(rctxt.functionURI, rctxt.function)
+ if function is not None:
+ _extension_function_call(context, function, ctxt, nargs)
+ else:
+ xpath.xmlXPathErr(ctxt, xpath.XPATH_UNKNOWN_FUNC_ERROR)
+ context._exc._store_exception(XPathFunctionError(
+ f"XPath function '{_namespacedNameFromNsName(rctxt.functionURI, rctxt.function)}' not found"))
+ except:
+ # may not be the right error, but we need to tell libxml2 *something*
+ xpath.xmlXPathErr(ctxt, xpath.XPATH_UNKNOWN_FUNC_ERROR)
+ context._exc._store_raised()
+ finally:
+ return # swallow any further exceptions
diff --git a/llmeval-env/lib/python3.10/site-packages/lxml/lxml.etree.h b/llmeval-env/lib/python3.10/site-packages/lxml/lxml.etree.h
new file mode 100644
index 0000000000000000000000000000000000000000..5ffc7ba32670f056a6415ab60ffb8240fb6d4a28
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/lxml/lxml.etree.h
@@ -0,0 +1,248 @@
+/* Generated by Cython 3.0.10 */
+
+#ifndef __PYX_HAVE__lxml__etree
+#define __PYX_HAVE__lxml__etree
+
+#include "Python.h"
+struct LxmlDocument;
+struct LxmlElement;
+struct LxmlElementTree;
+struct LxmlElementTagMatcher;
+struct LxmlElementIterator;
+struct LxmlElementBase;
+struct LxmlElementClassLookup;
+struct LxmlFallbackElementClassLookup;
+
+/* "lxml/etree.pyx":333
+ *
+ * # type of a function that steps from node to node
+ * ctypedef public xmlNode* (*_node_to_node_function)(xmlNode*) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+typedef xmlNode *(*_node_to_node_function)(xmlNode *);
+
+/* "lxml/etree.pyx":349
+ * @cython.final
+ * @cython.freelist(8)
+ * cdef public class _Document [ type LxmlDocumentType, object LxmlDocument ]: # <<<<<<<<<<<<<<
+ * """Internal base class to reference a libxml document.
+ *
+ */
+struct LxmlDocument {
+ PyObject_HEAD
+ struct __pyx_vtabstruct_4lxml_5etree__Document *__pyx_vtab;
+ int _ns_counter;
+ PyObject *_prefix_tail;
+ xmlDoc *_c_doc;
+ struct __pyx_obj_4lxml_5etree__BaseParser *_parser;
+};
+
+/* "lxml/etree.pyx":698
+ *
+ * @cython.no_gc_clear
+ * cdef public class _Element [ type LxmlElementType, object LxmlElement ]: # <<<<<<<<<<<<<<
+ * """Element class.
+ *
+ */
+struct LxmlElement {
+ PyObject_HEAD
+ struct LxmlDocument *_doc;
+ xmlNode *_c_node;
+ PyObject *_tag;
+};
+
+/* "lxml/etree.pyx":1872
+ *
+ *
+ * cdef public class _ElementTree [ type LxmlElementTreeType, # <<<<<<<<<<<<<<
+ * object LxmlElementTree ]:
+ * cdef _Document _doc
+ */
+struct LxmlElementTree {
+ PyObject_HEAD
+ struct __pyx_vtabstruct_4lxml_5etree__ElementTree *__pyx_vtab;
+ struct LxmlDocument *_doc;
+ struct LxmlElement *_context_node;
+};
+
+/* "lxml/etree.pyx":2646
+ *
+ *
+ * cdef public class _ElementTagMatcher [ object LxmlElementTagMatcher, # <<<<<<<<<<<<<<
+ * type LxmlElementTagMatcherType ]:
+ * """
+ */
+struct LxmlElementTagMatcher {
+ PyObject_HEAD
+ struct __pyx_vtabstruct_4lxml_5etree__ElementTagMatcher *__pyx_vtab;
+ PyObject *_pystrings;
+ int _node_type;
+ char *_href;
+ char *_name;
+};
+
+/* "lxml/etree.pyx":2677
+ * self._name = NULL
+ *
+ * cdef public class _ElementIterator(_ElementTagMatcher) [ # <<<<<<<<<<<<<<
+ * object LxmlElementIterator, type LxmlElementIteratorType ]:
+ * """
+ */
+struct LxmlElementIterator {
+ struct LxmlElementTagMatcher __pyx_base;
+ struct LxmlElement *_node;
+ _node_to_node_function _next_element;
+};
+
+/* "src/lxml/classlookup.pxi":6
+ * # Custom Element classes
+ *
+ * cdef public class ElementBase(_Element) [ type LxmlElementBaseType, # <<<<<<<<<<<<<<
+ * object LxmlElementBase ]:
+ * """ElementBase(*children, attrib=None, nsmap=None, **_extra)
+ */
+struct LxmlElementBase {
+ struct LxmlElement __pyx_base;
+};
+
+/* "src/lxml/classlookup.pxi":210
+ * # Element class lookup
+ *
+ * ctypedef public object (*_element_class_lookup_function)(object, _Document, xmlNode*) # <<<<<<<<<<<<<<
+ *
+ * # class to store element class lookup functions
+ */
+typedef PyObject *(*_element_class_lookup_function)(PyObject *, struct LxmlDocument *, xmlNode *);
+
+/* "src/lxml/classlookup.pxi":213
+ *
+ * # class to store element class lookup functions
+ * cdef public class ElementClassLookup [ type LxmlElementClassLookupType, # <<<<<<<<<<<<<<
+ * object LxmlElementClassLookup ]:
+ * """ElementClassLookup(self)
+ */
+struct LxmlElementClassLookup {
+ PyObject_HEAD
+ _element_class_lookup_function _lookup_function;
+};
+
+/* "src/lxml/classlookup.pxi":221
+ *
+ *
+ * cdef public class FallbackElementClassLookup(ElementClassLookup) \ # <<<<<<<<<<<<<<
+ * [ type LxmlFallbackElementClassLookupType,
+ * object LxmlFallbackElementClassLookup ]:
+ */
+struct LxmlFallbackElementClassLookup {
+ struct LxmlElementClassLookup __pyx_base;
+ struct __pyx_vtabstruct_4lxml_5etree_FallbackElementClassLookup *__pyx_vtab;
+ struct LxmlElementClassLookup *fallback;
+ _element_class_lookup_function _fallback_function;
+};
+
+#ifndef __PYX_HAVE_API__lxml__etree
+
+#ifdef CYTHON_EXTERN_C
+ #undef __PYX_EXTERN_C
+ #define __PYX_EXTERN_C CYTHON_EXTERN_C
+#elif defined(__PYX_EXTERN_C)
+ #ifdef _MSC_VER
+ #pragma message ("Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead.")
+ #else
+ #warning Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead.
+ #endif
+#else
+ #ifdef __cplusplus
+ #define __PYX_EXTERN_C extern "C"
+ #else
+ #define __PYX_EXTERN_C extern
+ #endif
+#endif
+
+#ifndef DL_IMPORT
+ #define DL_IMPORT(_T) _T
+#endif
+
+__PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlDocumentType;
+__PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlElementType;
+__PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlElementTreeType;
+__PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlElementTagMatcherType;
+__PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlElementIteratorType;
+__PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlElementBaseType;
+__PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlElementClassLookupType;
+__PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlFallbackElementClassLookupType;
+
+__PYX_EXTERN_C struct LxmlElement *deepcopyNodeToDocument(struct LxmlDocument *, xmlNode *);
+__PYX_EXTERN_C struct LxmlElementTree *elementTreeFactory(struct LxmlElement *);
+__PYX_EXTERN_C struct LxmlElementTree *newElementTree(struct LxmlElement *, PyObject *);
+__PYX_EXTERN_C struct LxmlElementTree *adoptExternalDocument(xmlDoc *, PyObject *, int);
+__PYX_EXTERN_C struct LxmlElement *elementFactory(struct LxmlDocument *, xmlNode *);
+__PYX_EXTERN_C struct LxmlElement *makeElement(PyObject *, struct LxmlDocument *, PyObject *, PyObject *, PyObject *, PyObject *, PyObject *);
+__PYX_EXTERN_C struct LxmlElement *makeSubElement(struct LxmlElement *, PyObject *, PyObject *, PyObject *, PyObject *, PyObject *);
+__PYX_EXTERN_C void setElementClassLookupFunction(_element_class_lookup_function, PyObject *);
+__PYX_EXTERN_C PyObject *lookupDefaultElementClass(PyObject *, PyObject *, xmlNode *);
+__PYX_EXTERN_C PyObject *lookupNamespaceElementClass(PyObject *, PyObject *, xmlNode *);
+__PYX_EXTERN_C PyObject *callLookupFallback(struct LxmlFallbackElementClassLookup *, struct LxmlDocument *, xmlNode *);
+__PYX_EXTERN_C int tagMatches(xmlNode *, const xmlChar *, const xmlChar *);
+__PYX_EXTERN_C struct LxmlDocument *documentOrRaise(PyObject *);
+__PYX_EXTERN_C struct LxmlElement *rootNodeOrRaise(PyObject *);
+__PYX_EXTERN_C int hasText(xmlNode *);
+__PYX_EXTERN_C int hasTail(xmlNode *);
+__PYX_EXTERN_C PyObject *textOf(xmlNode *);
+__PYX_EXTERN_C PyObject *tailOf(xmlNode *);
+__PYX_EXTERN_C int setNodeText(xmlNode *, PyObject *);
+__PYX_EXTERN_C int setTailText(xmlNode *, PyObject *);
+__PYX_EXTERN_C PyObject *attributeValue(xmlNode *, xmlAttr *);
+__PYX_EXTERN_C PyObject *attributeValueFromNsName(xmlNode *, const xmlChar *, const xmlChar *);
+__PYX_EXTERN_C PyObject *getAttributeValue(struct LxmlElement *, PyObject *, PyObject *);
+__PYX_EXTERN_C PyObject *iterattributes(struct LxmlElement *, int);
+__PYX_EXTERN_C PyObject *collectAttributes(xmlNode *, int);
+__PYX_EXTERN_C int setAttributeValue(struct LxmlElement *, PyObject *, PyObject *);
+__PYX_EXTERN_C int delAttribute(struct LxmlElement *, PyObject *);
+__PYX_EXTERN_C int delAttributeFromNsName(xmlNode *, const xmlChar *, const xmlChar *);
+__PYX_EXTERN_C int hasChild(xmlNode *);
+__PYX_EXTERN_C xmlNode *findChild(xmlNode *, Py_ssize_t);
+__PYX_EXTERN_C xmlNode *findChildForwards(xmlNode *, Py_ssize_t);
+__PYX_EXTERN_C xmlNode *findChildBackwards(xmlNode *, Py_ssize_t);
+__PYX_EXTERN_C xmlNode *nextElement(xmlNode *);
+__PYX_EXTERN_C xmlNode *previousElement(xmlNode *);
+__PYX_EXTERN_C void appendChild(struct LxmlElement *, struct LxmlElement *);
+__PYX_EXTERN_C int appendChildToElement(struct LxmlElement *, struct LxmlElement *);
+__PYX_EXTERN_C PyObject *pyunicode(const xmlChar *);
+__PYX_EXTERN_C PyObject *utf8(PyObject *);
+__PYX_EXTERN_C PyObject *getNsTag(PyObject *);
+__PYX_EXTERN_C PyObject *getNsTagWithEmptyNs(PyObject *);
+__PYX_EXTERN_C PyObject *namespacedName(xmlNode *);
+__PYX_EXTERN_C PyObject *namespacedNameFromNsName(const xmlChar *, const xmlChar *);
+__PYX_EXTERN_C void iteratorStoreNext(struct LxmlElementIterator *, struct LxmlElement *);
+__PYX_EXTERN_C void initTagMatch(struct LxmlElementTagMatcher *, PyObject *);
+__PYX_EXTERN_C xmlNs *findOrBuildNodeNsPrefix(struct LxmlDocument *, xmlNode *, const xmlChar *, const xmlChar *);
+
+#endif /* !__PYX_HAVE_API__lxml__etree */
+
+/* WARNING: the interface of the module init function changed in CPython 3.5. */
+/* It now returns a PyModuleDef instance instead of a PyModule instance. */
+
+#if PY_MAJOR_VERSION < 3
+PyMODINIT_FUNC initetree(void);
+#else
+/* WARNING: Use PyImport_AppendInittab("etree", PyInit_etree) instead of calling PyInit_etree directly from Python 3.5 */
+PyMODINIT_FUNC PyInit_etree(void);
+
+#if PY_VERSION_HEX >= 0x03050000 && (defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER) || (defined(__cplusplus) && __cplusplus >= 201402L))
+#if defined(__cplusplus) && __cplusplus >= 201402L
+[[deprecated("Use PyImport_AppendInittab(\"etree\", PyInit_etree) instead of calling PyInit_etree directly.")]] inline
+#elif defined(__GNUC__) || defined(__clang__)
+__attribute__ ((__deprecated__("Use PyImport_AppendInittab(\"etree\", PyInit_etree) instead of calling PyInit_etree directly."), __unused__)) __inline__
+#elif defined(_MSC_VER)
+__declspec(deprecated("Use PyImport_AppendInittab(\"etree\", PyInit_etree) instead of calling PyInit_etree directly.")) __inline
+#endif
+static PyObject* __PYX_WARN_IF_PyInit_etree_INIT_CALLED(PyObject* res) {
+ return res;
+}
+#define PyInit_etree() __PYX_WARN_IF_PyInit_etree_INIT_CALLED(PyInit_etree())
+#endif
+#endif
+
+#endif /* !__PYX_HAVE__lxml__etree */
diff --git a/llmeval-env/lib/python3.10/site-packages/lxml/nsclasses.pxi b/llmeval-env/lib/python3.10/site-packages/lxml/nsclasses.pxi
new file mode 100644
index 0000000000000000000000000000000000000000..a3c86f0e0140557c0f3f2c5a2557dab1c62bfe3c
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/lxml/nsclasses.pxi
@@ -0,0 +1,281 @@
+# module-level API for namespace implementations
+
+cdef class LxmlRegistryError(LxmlError):
+ """Base class of lxml registry errors.
+ """
+
+cdef class NamespaceRegistryError(LxmlRegistryError):
+ """Error registering a namespace extension.
+ """
+
+
+@cython.internal
+cdef class _NamespaceRegistry:
+ "Dictionary-like namespace registry"
+ cdef object _ns_uri
+ cdef bytes _ns_uri_utf
+ cdef dict _entries
+ cdef char* _c_ns_uri_utf
+ def __cinit__(self, ns_uri):
+ self._ns_uri = ns_uri
+ if ns_uri is None:
+ self._ns_uri_utf = None
+ self._c_ns_uri_utf = NULL
+ else:
+ self._ns_uri_utf = _utf8(ns_uri)
+ self._c_ns_uri_utf = _cstr(self._ns_uri_utf)
+ self._entries = {}
+
+ def update(self, class_dict_iterable):
+ """update(self, class_dict_iterable)
+
+ Forgivingly update the registry.
+
+ ``class_dict_iterable`` may be a dict or some other iterable
+ that yields (name, value) pairs.
+
+ If a value does not match the required type for this registry,
+ or if the name starts with '_', it will be silently discarded.
+ This allows registrations at the module or class level using
+ vars(), globals() etc."""
+ if hasattr(class_dict_iterable, 'items'):
+ class_dict_iterable = class_dict_iterable.items()
+ for name, item in class_dict_iterable:
+ if (name is None or name[:1] != '_') and callable(item):
+ self[name] = item
+
+ def __getitem__(self, name):
+ if name is not None:
+ name = _utf8(name)
+ return self._get(name)
+
+ def __delitem__(self, name):
+ if name is not None:
+ name = _utf8(name)
+ del self._entries[name]
+
+ cdef object _get(self, object name):
+ cdef python.PyObject* dict_result
+ dict_result = python.PyDict_GetItem(self._entries, name)
+ if dict_result is NULL:
+ raise KeyError, "Name not registered."
+ return dict_result
+
+ cdef object _getForString(self, char* name):
+ cdef python.PyObject* dict_result
+ dict_result = python.PyDict_GetItem(self._entries, name)
+ if dict_result is NULL:
+ raise KeyError, "Name not registered."
+ return dict_result
+
+ def __iter__(self):
+ return iter(self._entries)
+
+ def items(self):
+ return list(self._entries.items())
+
+ def iteritems(self):
+ return iter(self._entries.items())
+
+ def clear(self):
+ self._entries.clear()
+
+ def __call__(self, obj):
+ # Usage as decorator:
+ # ns = lookup.get_namespace("...")
+ # @ns('abc')
+ # class element(ElementBase): pass
+ #
+ # @ns
+ # class elementname(ElementBase): pass
+
+ if obj is None or python._isString(obj):
+ # @ns(None) or @ns('tag')
+ return partial(self.__deco, obj)
+ # plain @ns decorator
+ self[obj.__name__] = obj
+ return obj
+
+ def __deco(self, name, obj):
+ self[name] = obj
+ return obj
+
+
+@cython.final
+@cython.internal
+cdef class _ClassNamespaceRegistry(_NamespaceRegistry):
+ "Dictionary-like registry for namespace implementation classes"
+ def __setitem__(self, name, item):
+ if not isinstance(item, type) or not issubclass(item, ElementBase):
+ raise NamespaceRegistryError, \
+ "Registered element classes must be subtypes of ElementBase"
+ if name is not None:
+ name = _utf8(name)
+ self._entries[name] = item
+
+ def __repr__(self):
+ return "Namespace(%r)" % self._ns_uri
+
+
+cdef class ElementNamespaceClassLookup(FallbackElementClassLookup):
+ """ElementNamespaceClassLookup(self, fallback=None)
+
+ Element class lookup scheme that searches the Element class in the
+ Namespace registry.
+
+ Usage:
+
+ >>> lookup = ElementNamespaceClassLookup()
+ >>> ns_elements = lookup.get_namespace("http://schema.org/Movie")
+
+ >>> @ns_elements
+ ... class movie(ElementBase):
+ ... "Element implementation for 'movie' tag (using class name) in schema namespace."
+
+ >>> @ns_elements("movie")
+ ... class MovieElement(ElementBase):
+ ... "Element implementation for 'movie' tag (explicit tag name) in schema namespace."
+ """
+ cdef dict _namespace_registries
+ def __cinit__(self):
+ self._namespace_registries = {}
+
+ def __init__(self, ElementClassLookup fallback=None):
+ FallbackElementClassLookup.__init__(self, fallback)
+ self._lookup_function = _find_nselement_class
+
+ def get_namespace(self, ns_uri):
+ """get_namespace(self, ns_uri)
+
+ Retrieve the namespace object associated with the given URI.
+ Pass None for the empty namespace.
+
+ Creates a new namespace object if it does not yet exist."""
+ if ns_uri:
+ ns_utf = _utf8(ns_uri)
+ else:
+ ns_utf = None
+ try:
+ return self._namespace_registries[ns_utf]
+ except KeyError:
+ registry = self._namespace_registries[ns_utf] = \
+ _ClassNamespaceRegistry(ns_uri)
+ return registry
+
+cdef object _find_nselement_class(state, _Document doc, xmlNode* c_node):
+ cdef python.PyObject* dict_result
+ cdef ElementNamespaceClassLookup lookup
+ cdef _NamespaceRegistry registry
+ if state is None:
+ return _lookupDefaultElementClass(None, doc, c_node)
+
+ lookup = state
+ if c_node.type != tree.XML_ELEMENT_NODE:
+ return _callLookupFallback(lookup, doc, c_node)
+
+ c_namespace_utf = _getNs(c_node)
+ if c_namespace_utf is not NULL:
+ dict_result = python.PyDict_GetItem(
+ lookup._namespace_registries, c_namespace_utf)
+ else:
+ dict_result = python.PyDict_GetItem(
+ lookup._namespace_registries, None)
+ if dict_result is not NULL:
+ registry = <_NamespaceRegistry>dict_result
+ classes = registry._entries
+
+ if c_node.name is not NULL:
+ dict_result = python.PyDict_GetItem(
+ classes, c_node.name)
+ else:
+ dict_result = NULL
+
+ if dict_result is NULL:
+ dict_result = python.PyDict_GetItem(classes, None)
+
+ if dict_result is not NULL:
+ return dict_result
+ return _callLookupFallback(lookup, doc, c_node)
+
+
+################################################################################
+# XPath extension functions
+
+cdef dict __FUNCTION_NAMESPACE_REGISTRIES
+__FUNCTION_NAMESPACE_REGISTRIES = {}
+
+def FunctionNamespace(ns_uri):
+ """FunctionNamespace(ns_uri)
+
+ Retrieve the function namespace object associated with the given
+ URI.
+
+ Creates a new one if it does not yet exist. A function namespace
+ can only be used to register extension functions.
+
+ Usage:
+
+ >>> ns_functions = FunctionNamespace("http://schema.org/Movie")
+
+ >>> @ns_functions # uses function name
+ ... def add2(x):
+ ... return x + 2
+
+ >>> @ns_functions("add3") # uses explicit name
+ ... def add_three(x):
+ ... return x + 3
+ """
+ ns_utf = _utf8(ns_uri) if ns_uri else None
+ try:
+ return __FUNCTION_NAMESPACE_REGISTRIES[ns_utf]
+ except KeyError:
+ registry = __FUNCTION_NAMESPACE_REGISTRIES[ns_utf] = \
+ _XPathFunctionNamespaceRegistry(ns_uri)
+ return registry
+
+@cython.internal
+cdef class _FunctionNamespaceRegistry(_NamespaceRegistry):
+ def __setitem__(self, name, item):
+ if not callable(item):
+ raise NamespaceRegistryError, \
+ "Registered functions must be callable."
+ if not name:
+ raise ValueError, \
+ "extensions must have non empty names"
+ self._entries[_utf8(name)] = item
+
+ def __repr__(self):
+ return "FunctionNamespace(%r)" % self._ns_uri
+
+@cython.final
+@cython.internal
+cdef class _XPathFunctionNamespaceRegistry(_FunctionNamespaceRegistry):
+ cdef object _prefix
+ cdef bytes _prefix_utf
+
+ property prefix:
+ "Namespace prefix for extension functions."
+ def __del__(self):
+ self._prefix = None # no prefix configured
+ self._prefix_utf = None
+ def __get__(self):
+ if self._prefix is None:
+ return ''
+ else:
+ return self._prefix
+ def __set__(self, prefix):
+ if prefix == '':
+ prefix = None # empty prefix
+ self._prefix_utf = _utf8(prefix) if prefix is not None else None
+ self._prefix = prefix
+
+cdef list _find_all_extension_prefixes():
+ "Internal lookup function to find all function prefixes for XSLT/XPath."
+ cdef _XPathFunctionNamespaceRegistry registry
+ cdef list ns_prefixes = []
+ for registry in __FUNCTION_NAMESPACE_REGISTRIES.itervalues():
+ if registry._prefix_utf is not None:
+ if registry._ns_uri_utf is not None:
+ ns_prefixes.append(
+ (registry._prefix_utf, registry._ns_uri_utf))
+ return ns_prefixes
diff --git a/llmeval-env/lib/python3.10/site-packages/lxml/objectpath.pxi b/llmeval-env/lib/python3.10/site-packages/lxml/objectpath.pxi
new file mode 100644
index 0000000000000000000000000000000000000000..e562a365015830bfd3d24650d1109fe891c31039
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/lxml/objectpath.pxi
@@ -0,0 +1,332 @@
+################################################################################
+# ObjectPath
+
+ctypedef struct _ObjectPath:
+ const_xmlChar* href
+ const_xmlChar* name
+ Py_ssize_t index
+
+
+cdef object _NO_DEFAULT = object()
+
+
+cdef class ObjectPath:
+ """ObjectPath(path)
+ Immutable object that represents a compiled object path.
+
+ Example for a path: 'root.child[1].{other}child[25]'
+ """
+ cdef readonly object find
+ cdef list _path
+ cdef object _path_str
+ cdef _ObjectPath* _c_path
+ cdef Py_ssize_t _path_len
+ def __init__(self, path):
+ if python._isString(path):
+ self._path = _parse_object_path_string(path)
+ self._path_str = path
+ else:
+ self._path = _parse_object_path_list(path)
+ self._path_str = '.'.join(path)
+ self._path_len = len(self._path)
+ self._c_path = _build_object_path_segments(self._path)
+ self.find = self.__call__
+
+ def __dealloc__(self):
+ if self._c_path is not NULL:
+ python.lxml_free(self._c_path)
+
+ def __str__(self):
+ return self._path_str
+
+ def __call__(self, _Element root not None, *_default):
+ """Follow the attribute path in the object structure and return the
+ target attribute value.
+
+ If it it not found, either returns a default value (if one was passed
+ as second argument) or raises AttributeError.
+ """
+ if _default:
+ if len(_default) > 1:
+ raise TypeError, "invalid number of arguments: needs one or two"
+ default = _default[0]
+ else:
+ default = _NO_DEFAULT
+ return _find_object_path(root, self._c_path, self._path_len, default)
+
+ def hasattr(self, _Element root not None):
+ "hasattr(self, root)"
+ try:
+ _find_object_path(root, self._c_path, self._path_len, _NO_DEFAULT)
+ except AttributeError:
+ return False
+ return True
+
+ def setattr(self, _Element root not None, value):
+ """setattr(self, root, value)
+
+ Set the value of the target element in a subtree.
+
+ If any of the children on the path does not exist, it is created.
+ """
+ _create_object_path(root, self._c_path, self._path_len, 1, value)
+
+ def addattr(self, _Element root not None, value):
+ """addattr(self, root, value)
+
+ Append a value to the target element in a subtree.
+
+ If any of the children on the path does not exist, it is created.
+ """
+ _create_object_path(root, self._c_path, self._path_len, 0, value)
+
+
+cdef object __MATCH_PATH_SEGMENT = re.compile(
+ r"(\.?)\s*(?:\{([^}]*)\})?\s*([^.{}\[\]\s]+)\s*(?:\[\s*([-0-9]+)\s*\])?",
+ re.U).match
+
+cdef tuple _RELATIVE_PATH_SEGMENT = (None, None, 0)
+
+
+cdef list _parse_object_path_string(_path):
+ """Parse object path string into a (ns, name, index) list.
+ """
+ cdef bint has_dot
+ cdef unicode path
+ new_path = []
+ if isinstance(_path, bytes):
+ path = (_path).decode('ascii')
+ elif type(_path) is not unicode:
+ path = unicode(_path)
+ else:
+ path = _path
+ path = path.strip()
+ if path == '.':
+ return [_RELATIVE_PATH_SEGMENT]
+ path_pos = 0
+ while path:
+ match = __MATCH_PATH_SEGMENT(path, path_pos)
+ if match is None:
+ break
+
+ dot, ns, name, index = match.groups()
+ index = int(index) if index else 0
+ has_dot = dot == '.'
+ if not new_path:
+ if has_dot:
+ # path '.child' => ignore root
+ new_path.append(_RELATIVE_PATH_SEGMENT)
+ elif index:
+ raise ValueError, "index not allowed on root node"
+ elif not has_dot:
+ raise ValueError, "invalid path"
+ if ns is not None:
+ ns = python.PyUnicode_AsUTF8String(ns)
+ name = python.PyUnicode_AsUTF8String(name)
+ new_path.append( (ns, name, index) )
+
+ path_pos = match.end()
+ if not new_path or len(path) > path_pos:
+ raise ValueError, "invalid path"
+ return new_path
+
+
+cdef list _parse_object_path_list(path):
+ """Parse object path sequence into a (ns, name, index) list.
+ """
+ new_path = []
+ for item in path:
+ item = item.strip()
+ if not new_path and item == '':
+ # path '.child' => ignore root
+ ns = name = None
+ index = 0
+ else:
+ ns, name = cetree.getNsTag(item)
+ c_name = _xcstr(name)
+ index_pos = tree.xmlStrchr(c_name, c'[')
+ if index_pos is NULL:
+ index = 0
+ else:
+ index_end = tree.xmlStrchr(index_pos + 1, c']')
+ if index_end is NULL:
+ raise ValueError, "index must be enclosed in []"
+ index = int(index_pos[1:index_end - index_pos])
+ if not new_path and index != 0:
+ raise ValueError, "index not allowed on root node"
+ name = c_name[:index_pos - c_name]
+ new_path.append( (ns, name, index) )
+ if not new_path:
+ raise ValueError, "invalid path"
+ return new_path
+
+
+cdef _ObjectPath* _build_object_path_segments(list path_list) except NULL:
+ cdef _ObjectPath* c_path
+ cdef _ObjectPath* c_path_segments
+ c_path_segments = <_ObjectPath*>python.lxml_malloc(len(path_list), sizeof(_ObjectPath))
+ if c_path_segments is NULL:
+ raise MemoryError()
+ c_path = c_path_segments
+ for href, name, index in path_list:
+ c_path[0].href = _xcstr(href) if href is not None else NULL
+ c_path[0].name = _xcstr(name) if name is not None else NULL
+ c_path[0].index = index
+ c_path += 1
+ return c_path_segments
+
+
+cdef _find_object_path(_Element root, _ObjectPath* c_path, Py_ssize_t c_path_len, default_value):
+ """Follow the path to find the target element.
+ """
+ cdef tree.xmlNode* c_node
+ cdef Py_ssize_t c_index
+ c_node = root._c_node
+ c_name = c_path[0].name
+ c_href = c_path[0].href
+ if c_href is NULL or c_href[0] == c'\0':
+ c_href = tree._getNs(c_node)
+ if not cetree.tagMatches(c_node, c_href, c_name):
+ if default_value is not _NO_DEFAULT:
+ return default_value
+ else:
+ raise ValueError(
+ f"root element does not match: need {cetree.namespacedNameFromNsName(c_href, c_name)}, got {root.tag}")
+
+ while c_node is not NULL:
+ c_path_len -= 1
+ if c_path_len <= 0:
+ break
+
+ c_path += 1
+ if c_path[0].href is not NULL:
+ c_href = c_path[0].href # otherwise: keep parent namespace
+ c_name = tree.xmlDictExists(c_node.doc.dict, c_path[0].name, -1)
+ if c_name is NULL:
+ c_name = c_path[0].name
+ c_node = NULL
+ break
+ c_index = c_path[0].index
+ c_node = c_node.last if c_index < 0 else c_node.children
+ c_node = _findFollowingSibling(c_node, c_href, c_name, c_index)
+
+ if c_node is not NULL:
+ return cetree.elementFactory(root._doc, c_node)
+ elif default_value is not _NO_DEFAULT:
+ return default_value
+ else:
+ tag = cetree.namespacedNameFromNsName(c_href, c_name)
+ raise AttributeError, f"no such child: {tag}"
+
+
+cdef _create_object_path(_Element root, _ObjectPath* c_path,
+ Py_ssize_t c_path_len, int replace, value):
+ """Follow the path to find the target element, build the missing children
+ as needed and set the target element to 'value'. If replace is true, an
+ existing value is replaced, otherwise the new value is added.
+ """
+ cdef _Element child
+ cdef tree.xmlNode* c_node
+ cdef tree.xmlNode* c_child
+ cdef Py_ssize_t c_index
+ if c_path_len == 1:
+ raise TypeError, "cannot update root node"
+
+ c_node = root._c_node
+ c_name = c_path[0].name
+ c_href = c_path[0].href
+ if c_href is NULL or c_href[0] == c'\0':
+ c_href = tree._getNs(c_node)
+ if not cetree.tagMatches(c_node, c_href, c_name):
+ raise ValueError(
+ f"root element does not match: need {cetree.namespacedNameFromNsName(c_href, c_name)}, got {root.tag}")
+
+ while c_path_len > 1:
+ c_path_len -= 1
+ c_path += 1
+ if c_path[0].href is not NULL:
+ c_href = c_path[0].href # otherwise: keep parent namespace
+ c_index = c_path[0].index
+ c_name = tree.xmlDictExists(c_node.doc.dict, c_path[0].name, -1)
+ if c_name is NULL:
+ c_name = c_path[0].name
+ c_child = NULL
+ else:
+ c_child = c_node.last if c_index < 0 else c_node.children
+ c_child = _findFollowingSibling(c_child, c_href, c_name, c_index)
+
+ if c_child is not NULL:
+ c_node = c_child
+ elif c_index != 0:
+ raise TypeError, "creating indexed path attributes is not supported"
+ elif c_path_len == 1:
+ _appendValue(cetree.elementFactory(root._doc, c_node),
+ cetree.namespacedNameFromNsName(c_href, c_name),
+ value)
+ return
+ else:
+ child = cetree.makeSubElement(
+ cetree.elementFactory(root._doc, c_node),
+ cetree.namespacedNameFromNsName(c_href, c_name),
+ None, None, None, None)
+ c_node = child._c_node
+
+ # if we get here, the entire path was already there
+ if replace:
+ element = cetree.elementFactory(root._doc, c_node)
+ _replaceElement(element, value)
+ else:
+ _appendValue(cetree.elementFactory(root._doc, c_node.parent),
+ cetree.namespacedName(c_node), value)
+
+
+cdef list _build_descendant_paths(tree.xmlNode* c_node, prefix_string):
+ """Returns a list of all descendant paths.
+ """
+ cdef list path, path_list
+ tag = cetree.namespacedName(c_node)
+ if prefix_string:
+ if prefix_string[-1] != '.':
+ prefix_string += '.'
+ prefix_string = prefix_string + tag
+ else:
+ prefix_string = tag
+ path = [prefix_string]
+ path_list = []
+ _recursive_build_descendant_paths(c_node, path, path_list)
+ return path_list
+
+
+cdef int _recursive_build_descendant_paths(tree.xmlNode* c_node,
+ list path, list path_list) except -1:
+ """Fills the list 'path_list' with all descendant paths, initial prefix
+ being in the list 'path'.
+ """
+ cdef tree.xmlNode* c_child
+ tags = {}
+ path_list.append('.'.join(path))
+ c_href = tree._getNs(c_node)
+ c_child = c_node.children
+ while c_child is not NULL:
+ while c_child.type != tree.XML_ELEMENT_NODE:
+ c_child = c_child.next
+ if c_child is NULL:
+ return 0
+ if c_href is tree._getNs(c_child):
+ tag = pyunicode(c_child.name)
+ elif c_href is not NULL and tree._getNs(c_child) is NULL:
+ # special case: parent has namespace, child does not
+ tag = '{}' + pyunicode(c_child.name)
+ else:
+ tag = cetree.namespacedName(c_child)
+ count = tags.get(tag)
+ if count is None:
+ tags[tag] = 1
+ else:
+ tags[tag] = count + 1
+ tag += f'[{count}]'
+ path.append(tag)
+ _recursive_build_descendant_paths(c_child, path, path_list)
+ del path[-1]
+ c_child = c_child.next
+ return 0
diff --git a/llmeval-env/lib/python3.10/site-packages/lxml/parsertarget.pxi b/llmeval-env/lib/python3.10/site-packages/lxml/parsertarget.pxi
new file mode 100644
index 0000000000000000000000000000000000000000..37c29957dd12e7a685a6450c408baa68e2c3de02
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/lxml/parsertarget.pxi
@@ -0,0 +1,180 @@
+# Parser target context (ET target interface)
+
+cdef object inspect_getargspec
+try:
+ from inspect import getfullargspec as inspect_getargspec
+except ImportError:
+ from inspect import getargspec as inspect_getargspec
+
+
+class _TargetParserResult(Exception):
+ # Admittedly, this is somewhat ugly, but it's the easiest way
+ # to push the Python level parser result through the parser
+ # machinery towards the API level functions
+ def __init__(self, result):
+ self.result = result
+
+
+@cython.final
+@cython.internal
+cdef class _PythonSaxParserTarget(_SaxParserTarget):
+ cdef object _target_start
+ cdef object _target_end
+ cdef object _target_data
+ cdef object _target_start_ns
+ cdef object _target_end_ns
+ cdef object _target_doctype
+ cdef object _target_pi
+ cdef object _target_comment
+ cdef bint _start_takes_nsmap
+
+ def __cinit__(self, target):
+ cdef int event_filter
+ event_filter = 0
+ self._start_takes_nsmap = 0
+ try:
+ self._target_start = target.start
+ if self._target_start is not None:
+ event_filter |= SAX_EVENT_START
+ except AttributeError:
+ pass
+ else:
+ try:
+ arguments = inspect_getargspec(self._target_start)
+ if len(arguments[0]) > 3 or arguments[1] is not None:
+ self._start_takes_nsmap = 1
+ except TypeError:
+ pass
+ try:
+ self._target_end = target.end
+ if self._target_end is not None:
+ event_filter |= SAX_EVENT_END
+ except AttributeError:
+ pass
+ try:
+ self._target_start_ns = target.start_ns
+ if self._target_start_ns is not None:
+ event_filter |= SAX_EVENT_START_NS
+ except AttributeError:
+ pass
+ try:
+ self._target_end_ns = target.end_ns
+ if self._target_end_ns is not None:
+ event_filter |= SAX_EVENT_END_NS
+ except AttributeError:
+ pass
+ try:
+ self._target_data = target.data
+ if self._target_data is not None:
+ event_filter |= SAX_EVENT_DATA
+ except AttributeError:
+ pass
+ try:
+ self._target_doctype = target.doctype
+ if self._target_doctype is not None:
+ event_filter |= SAX_EVENT_DOCTYPE
+ except AttributeError:
+ pass
+ try:
+ self._target_pi = target.pi
+ if self._target_pi is not None:
+ event_filter |= SAX_EVENT_PI
+ except AttributeError:
+ pass
+ try:
+ self._target_comment = target.comment
+ if self._target_comment is not None:
+ event_filter |= SAX_EVENT_COMMENT
+ except AttributeError:
+ pass
+ self._sax_event_filter = event_filter
+
+ cdef _handleSaxStart(self, tag, attrib, nsmap):
+ if self._start_takes_nsmap:
+ return self._target_start(tag, attrib, nsmap)
+ else:
+ return self._target_start(tag, attrib)
+
+ cdef _handleSaxEnd(self, tag):
+ return self._target_end(tag)
+
+ cdef _handleSaxStartNs(self, prefix, uri):
+ return self._target_start_ns(prefix, uri)
+
+ cdef _handleSaxEndNs(self, prefix):
+ return self._target_end_ns(prefix)
+
+ cdef int _handleSaxData(self, data) except -1:
+ self._target_data(data)
+
+ cdef int _handleSaxDoctype(self, root_tag, public_id, system_id) except -1:
+ self._target_doctype(root_tag, public_id, system_id)
+
+ cdef _handleSaxPi(self, target, data):
+ return self._target_pi(target, data)
+
+ cdef _handleSaxComment(self, comment):
+ return self._target_comment(comment)
+
+
+@cython.final
+@cython.internal
+@cython.no_gc_clear # Required because parent class uses it - Cython bug.
+cdef class _TargetParserContext(_SaxParserContext):
+ """This class maps SAX2 events to the ET parser target interface.
+ """
+ cdef object _python_target
+ cdef int _setTarget(self, target) except -1:
+ self._python_target = target
+ if not isinstance(target, _SaxParserTarget) or \
+ hasattr(target, '__dict__'):
+ target = _PythonSaxParserTarget(target)
+ self._setSaxParserTarget(target)
+ return 0
+
+ cdef _ParserContext _copy(self):
+ cdef _TargetParserContext context
+ context = _ParserContext._copy(self)
+ context._setTarget(self._python_target)
+ return context
+
+ cdef void _cleanupTargetParserContext(self, xmlDoc* result) noexcept:
+ if self._c_ctxt.myDoc is not NULL:
+ if self._c_ctxt.myDoc is not result and \
+ self._c_ctxt.myDoc._private is NULL:
+ # no _Document proxy => orphen
+ tree.xmlFreeDoc(self._c_ctxt.myDoc)
+ self._c_ctxt.myDoc = NULL
+
+ cdef object _handleParseResult(self, _BaseParser parser, xmlDoc* result,
+ filename):
+ cdef bint recover
+ recover = parser._parse_options & xmlparser.XML_PARSE_RECOVER
+ try:
+ if self._has_raised():
+ self._cleanupTargetParserContext(result)
+ self._raise_if_stored()
+ if not self._c_ctxt.wellFormed and not recover:
+ _raiseParseError(self._c_ctxt, filename, self._error_log)
+ except:
+ self._python_target.close()
+ raise
+ return self._python_target.close()
+
+ cdef xmlDoc* _handleParseResultDoc(self, _BaseParser parser,
+ xmlDoc* result, filename) except NULL:
+ cdef bint recover
+ recover = parser._parse_options & xmlparser.XML_PARSE_RECOVER
+ if result is not NULL and result._private is NULL:
+ # no _Document proxy => orphen
+ tree.xmlFreeDoc(result)
+ try:
+ self._cleanupTargetParserContext(result)
+ self._raise_if_stored()
+ if not self._c_ctxt.wellFormed and not recover:
+ _raiseParseError(self._c_ctxt, filename, self._error_log)
+ except:
+ self._python_target.close()
+ raise
+ parse_result = self._python_target.close()
+ raise _TargetParserResult(parse_result)
diff --git a/llmeval-env/lib/python3.10/site-packages/lxml/proxy.pxi b/llmeval-env/lib/python3.10/site-packages/lxml/proxy.pxi
new file mode 100644
index 0000000000000000000000000000000000000000..f7b47a73a47d3f0792365796df1b80ffd60f6e34
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/lxml/proxy.pxi
@@ -0,0 +1,619 @@
+# Proxy functions and low level node allocation stuff
+
+# Proxies represent elements, their reference is stored in the C
+# structure of the respective node to avoid multiple instantiation of
+# the Python class.
+
+@cython.linetrace(False)
+@cython.profile(False)
+cdef inline _Element getProxy(xmlNode* c_node):
+ """Get a proxy for a given node.
+ """
+ #print "getProxy for:", c_node
+ if c_node is not NULL and c_node._private is not NULL:
+ return <_Element>c_node._private
+ else:
+ return None
+
+
+@cython.linetrace(False)
+@cython.profile(False)
+cdef inline bint hasProxy(xmlNode* c_node):
+ if c_node._private is NULL:
+ return False
+ return True
+
+
+@cython.linetrace(False)
+@cython.profile(False)
+cdef inline int _registerProxy(_Element proxy, _Document doc,
+ xmlNode* c_node) except -1:
+ """Register a proxy and type for the node it's proxying for.
+ """
+ #print "registering for:", proxy._c_node
+ assert not hasProxy(c_node), "double registering proxy!"
+ proxy._doc = doc
+ proxy._c_node = c_node
+ c_node._private = proxy
+ return 0
+
+
+@cython.linetrace(False)
+@cython.profile(False)
+cdef inline int _unregisterProxy(_Element proxy) except -1:
+ """Unregister a proxy for the node it's proxying for.
+ """
+ cdef xmlNode* c_node = proxy._c_node
+ assert c_node._private is proxy, "Tried to unregister unknown proxy"
+ c_node._private = NULL
+ return 0
+
+
+################################################################################
+# temporarily make a node the root node of its document
+
+cdef xmlDoc* _fakeRootDoc(xmlDoc* c_base_doc, xmlNode* c_node) except NULL:
+ return _plainFakeRootDoc(c_base_doc, c_node, 1)
+
+cdef xmlDoc* _plainFakeRootDoc(xmlDoc* c_base_doc, xmlNode* c_node,
+ bint with_siblings) except NULL:
+ # build a temporary document that has the given node as root node
+ # note that copy and original must not be modified during its lifetime!!
+ # always call _destroyFakeDoc() after use!
+ cdef xmlNode* c_child
+ cdef xmlNode* c_root
+ cdef xmlNode* c_new_root
+ cdef xmlDoc* c_doc
+ if with_siblings or (c_node.prev is NULL and c_node.next is NULL):
+ c_root = tree.xmlDocGetRootElement(c_base_doc)
+ if c_root is c_node:
+ # already the root node, no siblings
+ return c_base_doc
+
+ c_doc = _copyDoc(c_base_doc, 0) # non recursive!
+ c_new_root = tree.xmlDocCopyNode(c_node, c_doc, 2) # non recursive!
+ tree.xmlDocSetRootElement(c_doc, c_new_root)
+ _copyParentNamespaces(c_node, c_new_root)
+
+ c_new_root.children = c_node.children
+ c_new_root.last = c_node.last
+ c_new_root.next = c_new_root.prev = NULL
+
+ # store original node
+ c_doc._private = c_node
+
+ # divert parent pointers of children
+ c_child = c_new_root.children
+ while c_child is not NULL:
+ c_child.parent = c_new_root
+ c_child = c_child.next
+
+ c_doc.children = c_new_root
+ return c_doc
+
+cdef void _destroyFakeDoc(xmlDoc* c_base_doc, xmlDoc* c_doc) noexcept:
+ # delete a temporary document
+ cdef xmlNode* c_child
+ cdef xmlNode* c_parent
+ cdef xmlNode* c_root
+ if c_doc is c_base_doc:
+ return
+ c_root = tree.xmlDocGetRootElement(c_doc)
+
+ # restore parent pointers of children
+ c_parent = c_doc._private
+ c_child = c_root.children
+ while c_child is not NULL:
+ c_child.parent = c_parent
+ c_child = c_child.next
+
+ # prevent recursive removal of children
+ c_root.children = c_root.last = NULL
+ tree.xmlFreeDoc(c_doc)
+
+cdef _Element _fakeDocElementFactory(_Document doc, xmlNode* c_element):
+ """Special element factory for cases where we need to create a fake
+ root document, but still need to instantiate arbitrary nodes from
+ it. If we instantiate the fake root node, things will turn bad
+ when it's destroyed.
+
+ Instead, if we are asked to instantiate the fake root node, we
+ instantiate the original node instead.
+ """
+ if c_element.doc is not doc._c_doc:
+ if c_element.doc._private is not NULL:
+ if c_element is c_element.doc.children:
+ c_element = c_element.doc._private
+ #assert c_element.type == tree.XML_ELEMENT_NODE
+ return _elementFactory(doc, c_element)
+
+################################################################################
+# support for freeing tree elements when proxy objects are destroyed
+
+cdef int attemptDeallocation(xmlNode* c_node) noexcept:
+ """Attempt deallocation of c_node (or higher up in tree).
+ """
+ cdef xmlNode* c_top
+ # could be we actually aren't referring to the tree at all
+ if c_node is NULL:
+ #print "not freeing, node is NULL"
+ return 0
+ c_top = getDeallocationTop(c_node)
+ if c_top is not NULL:
+ #print "freeing:", c_top.name
+ _removeText(c_top.next) # tail
+ tree.xmlFreeNode(c_top)
+ return 1
+ return 0
+
+cdef xmlNode* getDeallocationTop(xmlNode* c_node) noexcept:
+ """Return the top of the tree that can be deallocated, or NULL.
+ """
+ cdef xmlNode* c_next
+ #print "trying to do deallocating:", c_node.type
+ if hasProxy(c_node):
+ #print "Not freeing: proxies still exist"
+ return NULL
+ while c_node.parent is not NULL:
+ c_node = c_node.parent
+ #print "checking:", c_current.type
+ if c_node.type == tree.XML_DOCUMENT_NODE or \
+ c_node.type == tree.XML_HTML_DOCUMENT_NODE:
+ #print "not freeing: still in doc"
+ return NULL
+ # if we're still attached to the document, don't deallocate
+ if hasProxy(c_node):
+ #print "Not freeing: proxies still exist"
+ return NULL
+ # see whether we have children to deallocate
+ if not canDeallocateChildNodes(c_node):
+ return NULL
+ # see whether we have siblings to deallocate
+ c_next = c_node.prev
+ while c_next:
+ if _isElement(c_next):
+ if hasProxy(c_next) or not canDeallocateChildNodes(c_next):
+ return NULL
+ c_next = c_next.prev
+ c_next = c_node.next
+ while c_next:
+ if _isElement(c_next):
+ if hasProxy(c_next) or not canDeallocateChildNodes(c_next):
+ return NULL
+ c_next = c_next.next
+ return c_node
+
+cdef int canDeallocateChildNodes(xmlNode* c_parent) noexcept:
+ cdef xmlNode* c_node
+ c_node = c_parent.children
+ tree.BEGIN_FOR_EACH_ELEMENT_FROM(c_parent, c_node, 1)
+ if hasProxy(c_node):
+ return 0
+ tree.END_FOR_EACH_ELEMENT_FROM(c_node)
+ return 1
+
+################################################################################
+# fix _Document references and namespaces when a node changes documents
+
+cdef void _copyParentNamespaces(xmlNode* c_from_node, xmlNode* c_to_node) noexcept nogil:
+ """Copy the namespaces of all ancestors of c_from_node to c_to_node.
+ """
+ cdef xmlNode* c_parent
+ cdef xmlNs* c_ns
+ cdef xmlNs* c_new_ns
+ cdef int prefix_known
+ c_parent = c_from_node.parent
+ while c_parent and (tree._isElementOrXInclude(c_parent) or
+ c_parent.type == tree.XML_DOCUMENT_NODE):
+ c_new_ns = c_parent.nsDef
+ while c_new_ns:
+ # libxml2 will check if the prefix is already defined
+ tree.xmlNewNs(c_to_node, c_new_ns.href, c_new_ns.prefix)
+ c_new_ns = c_new_ns.next
+ c_parent = c_parent.parent
+
+
+ctypedef struct _ns_update_map:
+ xmlNs* old
+ xmlNs* new
+
+
+ctypedef struct _nscache:
+ _ns_update_map* ns_map
+ size_t size
+ size_t last
+
+
+cdef int _growNsCache(_nscache* c_ns_cache) except -1:
+ cdef _ns_update_map* ns_map_ptr
+ if c_ns_cache.size == 0:
+ c_ns_cache.size = 20
+ else:
+ c_ns_cache.size *= 2
+ ns_map_ptr = <_ns_update_map*> python.lxml_realloc(
+ c_ns_cache.ns_map, c_ns_cache.size, sizeof(_ns_update_map))
+ if not ns_map_ptr:
+ python.lxml_free(c_ns_cache.ns_map)
+ c_ns_cache.ns_map = NULL
+ raise MemoryError()
+ c_ns_cache.ns_map = ns_map_ptr
+ return 0
+
+
+cdef inline int _appendToNsCache(_nscache* c_ns_cache,
+ xmlNs* c_old_ns, xmlNs* c_new_ns) except -1:
+ if c_ns_cache.last >= c_ns_cache.size:
+ _growNsCache(c_ns_cache)
+ c_ns_cache.ns_map[c_ns_cache.last] = _ns_update_map(old=c_old_ns, new=c_new_ns)
+ c_ns_cache.last += 1
+
+
+cdef int _stripRedundantNamespaceDeclarations(xmlNode* c_element, _nscache* c_ns_cache,
+ xmlNs** c_del_ns_list) except -1:
+ """Removes namespace declarations from an element that are already
+ defined in its parents. Does not free the xmlNs's, just prepends
+ them to the c_del_ns_list.
+ """
+ cdef xmlNs* c_ns
+ cdef xmlNs* c_ns_next
+ cdef xmlNs** c_nsdef
+ # use a xmlNs** to handle assignments to "c_element.nsDef" correctly
+ c_nsdef = &c_element.nsDef
+ while c_nsdef[0] is not NULL:
+ c_ns = tree.xmlSearchNsByHref(
+ c_element.doc, c_element.parent, c_nsdef[0].href)
+ if c_ns is NULL:
+ # new namespace href => keep and cache the ns declaration
+ _appendToNsCache(c_ns_cache, c_nsdef[0], c_nsdef[0])
+ c_nsdef = &c_nsdef[0].next
+ else:
+ # known namespace href => cache mapping and strip old ns
+ _appendToNsCache(c_ns_cache, c_nsdef[0], c_ns)
+ # cut out c_nsdef.next and prepend it to garbage chain
+ c_ns_next = c_nsdef[0].next
+ c_nsdef[0].next = c_del_ns_list[0]
+ c_del_ns_list[0] = c_nsdef[0]
+ c_nsdef[0] = c_ns_next
+ return 0
+
+
+cdef void _cleanUpFromNamespaceAdaptation(xmlNode* c_start_node,
+ _nscache* c_ns_cache, xmlNs* c_del_ns_list) noexcept:
+ # Try to recover from exceptions with really bad timing. We were in the middle
+ # of ripping out xmlNS-es and likely ran out of memory. Try to fix up the tree
+ # by re-adding the original xmlNs declarations (which might still be used in some
+ # places).
+ if c_ns_cache.ns_map:
+ python.lxml_free(c_ns_cache.ns_map)
+ if c_del_ns_list:
+ if not c_start_node.nsDef:
+ c_start_node.nsDef = c_del_ns_list
+ else:
+ c_ns = c_start_node.nsDef
+ while c_ns.next:
+ c_ns = c_ns.next
+ c_ns.next = c_del_ns_list
+
+
+cdef int moveNodeToDocument(_Document doc, xmlDoc* c_source_doc,
+ xmlNode* c_element) except -1:
+ """Fix the xmlNs pointers of a node and its subtree that were moved.
+
+ Originally copied from libxml2's xmlReconciliateNs(). Expects
+ libxml2 doc pointers of node to be correct already, but fixes
+ _Document references.
+
+ For each node in the subtree, we do this:
+
+ 1) Remove redundant declarations of namespace that are already
+ defined in its parents.
+
+ 2) Replace namespaces that are *not* defined on the node or its
+ parents by the equivalent namespace declarations that *are*
+ defined on the node or its parents (possibly using a different
+ prefix). If a namespace is unknown, declare a new one on the
+ node.
+
+ 3) Reassign the names of tags and attribute from the dict of the
+ target document *iff* it is different from the dict used in the
+ source subtree.
+
+ 4) Set the Document reference to the new Document (if different).
+ This is done on backtracking to keep the original Document
+ alive as long as possible, until all its elements are updated.
+
+ Note that the namespace declarations are removed from the tree in
+ step 1), but freed only after the complete subtree was traversed
+ and all occurrences were replaced by tree-internal pointers.
+ """
+ cdef xmlNode* c_start_node
+ cdef xmlNode* c_node
+ cdef xmlDoc* c_doc = doc._c_doc
+ cdef tree.xmlAttr* c_attr
+ cdef char* c_name
+ cdef _nscache c_ns_cache = [NULL, 0, 0]
+ cdef xmlNs* c_del_ns_list = NULL
+ cdef proxy_count = 0
+
+ if not tree._isElementOrXInclude(c_element):
+ return 0
+
+ c_start_node = c_element
+
+ tree.BEGIN_FOR_EACH_FROM(c_element, c_element, 1)
+ if tree._isElementOrXInclude(c_element):
+ if hasProxy(c_element):
+ proxy_count += 1
+
+ # 1) cut out namespaces defined here that are already known by
+ # the ancestors
+ if c_element.nsDef is not NULL:
+ try:
+ _stripRedundantNamespaceDeclarations(c_element, &c_ns_cache, &c_del_ns_list)
+ except:
+ _cleanUpFromNamespaceAdaptation(c_start_node, &c_ns_cache, c_del_ns_list)
+ raise
+
+ # 2) make sure the namespaces of an element and its attributes
+ # are declared in this document (i.e. on the node or its parents)
+ if c_element.ns is not NULL:
+ _fixCNs(doc, c_start_node, c_element, &c_ns_cache, c_del_ns_list)
+
+ c_node = c_element.properties
+ while c_node is not NULL:
+ if c_node.ns is not NULL:
+ _fixCNs(doc, c_start_node, c_node, &c_ns_cache, c_del_ns_list)
+ c_node = c_node.next
+
+ tree.END_FOR_EACH_FROM(c_element)
+
+ # free now unused namespace declarations
+ if c_del_ns_list is not NULL:
+ tree.xmlFreeNsList(c_del_ns_list)
+
+ # cleanup
+ if c_ns_cache.ns_map is not NULL:
+ python.lxml_free(c_ns_cache.ns_map)
+
+ # 3) fix the names in the tree if we moved it from a different thread
+ if doc._c_doc.dict is not c_source_doc.dict:
+ fixThreadDictNames(c_start_node, c_source_doc.dict, doc._c_doc.dict)
+
+ # 4) fix _Document references
+ # (and potentially deallocate the source document)
+ if proxy_count > 0:
+ if proxy_count == 1 and c_start_node._private is not NULL:
+ proxy = getProxy(c_start_node)
+ if proxy is not None:
+ if proxy._doc is not doc:
+ proxy._doc = doc
+ else:
+ fixElementDocument(c_start_node, doc, proxy_count)
+ else:
+ fixElementDocument(c_start_node, doc, proxy_count)
+
+ return 0
+
+
+cdef void _setTreeDoc(xmlNode* c_node, xmlDoc* c_doc) noexcept:
+ """Adaptation of 'xmlSetTreeDoc()' that deep-fixes the document links iteratively.
+ It avoids https://gitlab.gnome.org/GNOME/libxml2/issues/42
+ """
+ tree.BEGIN_FOR_EACH_FROM(c_node, c_node, 1)
+ if c_node.type == tree.XML_ELEMENT_NODE:
+ c_attr = c_node.properties
+ while c_attr:
+ if c_attr.atype == tree.XML_ATTRIBUTE_ID:
+ tree.xmlRemoveID(c_node.doc, c_attr)
+ c_attr.doc = c_doc
+ _fixDocChildren(c_attr.children, c_doc)
+ c_attr = c_attr.next
+ # Set doc link for all nodes, not only elements.
+ c_node.doc = c_doc
+ tree.END_FOR_EACH_FROM(c_node)
+
+
+cdef inline void _fixDocChildren(xmlNode* c_child, xmlDoc* c_doc) noexcept:
+ while c_child:
+ c_child.doc = c_doc
+ if c_child.children:
+ _fixDocChildren(c_child.children, c_doc)
+ c_child = c_child.next
+
+
+cdef int _fixCNs(_Document doc, xmlNode* c_start_node, xmlNode* c_node,
+ _nscache* c_ns_cache, xmlNs* c_del_ns_list) except -1:
+ cdef xmlNs* c_ns = NULL
+ cdef bint is_prefixed_attr = (c_node.type == tree.XML_ATTRIBUTE_NODE and c_node.ns.prefix)
+
+ for ns_map in c_ns_cache.ns_map[:c_ns_cache.last]:
+ if c_node.ns is ns_map.old:
+ if is_prefixed_attr and not ns_map.new.prefix:
+ # avoid dropping prefix from attributes
+ continue
+ c_ns = ns_map.new
+ break
+
+ if c_ns:
+ c_node.ns = c_ns
+ else:
+ # not in cache or not acceptable
+ # => find a replacement from this document
+ try:
+ c_ns = doc._findOrBuildNodeNs(
+ c_start_node, c_node.ns.href, c_node.ns.prefix,
+ c_node.type == tree.XML_ATTRIBUTE_NODE)
+ c_node.ns = c_ns
+ _appendToNsCache(c_ns_cache, c_node.ns, c_ns)
+ except:
+ _cleanUpFromNamespaceAdaptation(c_start_node, c_ns_cache, c_del_ns_list)
+ raise
+ return 0
+
+
+cdef int fixElementDocument(xmlNode* c_element, _Document doc,
+ size_t proxy_count) except -1:
+ cdef xmlNode* c_node = c_element
+ cdef _Element proxy = None # init-to-None required due to fake-loop below
+ tree.BEGIN_FOR_EACH_FROM(c_element, c_node, 1)
+ if c_node._private is not NULL:
+ proxy = getProxy(c_node)
+ if proxy is not None:
+ if proxy._doc is not doc:
+ proxy._doc = doc
+ proxy_count -= 1
+ if proxy_count == 0:
+ return 0
+ tree.END_FOR_EACH_FROM(c_node)
+
+
+cdef void fixThreadDictNames(xmlNode* c_element,
+ tree.xmlDict* c_src_dict,
+ tree.xmlDict* c_dict) noexcept nogil:
+ # re-assign the names of tags and attributes
+ #
+ # this should only be called when the element is based on a
+ # different libxml2 tag name dictionary
+ if c_element.type == tree.XML_DOCUMENT_NODE or \
+ c_element.type == tree.XML_HTML_DOCUMENT_NODE:
+ # may define "xml" namespace
+ fixThreadDictNsForNode(c_element, c_src_dict, c_dict)
+ if c_element.doc.extSubset:
+ fixThreadDictNamesForDtd(c_element.doc.extSubset, c_src_dict, c_dict)
+ if c_element.doc.intSubset:
+ fixThreadDictNamesForDtd(c_element.doc.intSubset, c_src_dict, c_dict)
+ c_element = c_element.children
+ while c_element is not NULL:
+ fixThreadDictNamesForNode(c_element, c_src_dict, c_dict)
+ c_element = c_element.next
+ elif tree._isElementOrXInclude(c_element):
+ fixThreadDictNamesForNode(c_element, c_src_dict, c_dict)
+
+
+cdef inline void _fixThreadDictPtr(const_xmlChar** c_ptr,
+ tree.xmlDict* c_src_dict,
+ tree.xmlDict* c_dict) noexcept nogil:
+ c_str = c_ptr[0]
+ if c_str and c_src_dict and tree.xmlDictOwns(c_src_dict, c_str):
+ # return value can be NULL on memory error, but we don't handle that here
+ c_str = tree.xmlDictLookup(c_dict, c_str, -1)
+ if c_str:
+ c_ptr[0] = c_str
+
+
+cdef void fixThreadDictNamesForNode(xmlNode* c_element,
+ tree.xmlDict* c_src_dict,
+ tree.xmlDict* c_dict) noexcept nogil:
+ cdef xmlNode* c_node = c_element
+ tree.BEGIN_FOR_EACH_FROM(c_element, c_node, 1)
+ if c_node.type in (tree.XML_ELEMENT_NODE, tree.XML_XINCLUDE_START):
+ fixThreadDictNamesForAttributes(
+ c_node.properties, c_src_dict, c_dict)
+ fixThreadDictNsForNode(c_node, c_src_dict, c_dict)
+ _fixThreadDictPtr(&c_node.name, c_src_dict, c_dict)
+ elif c_node.type == tree.XML_TEXT_NODE:
+ # libxml2's SAX2 parser interns some indentation space
+ fixThreadDictContentForNode(c_node, c_src_dict, c_dict)
+ elif c_node.type == tree.XML_COMMENT_NODE:
+ pass # don't touch c_node.name
+ else:
+ _fixThreadDictPtr(&c_node.name, c_src_dict, c_dict)
+ tree.END_FOR_EACH_FROM(c_node)
+
+
+cdef inline void fixThreadDictNamesForAttributes(tree.xmlAttr* c_attr,
+ tree.xmlDict* c_src_dict,
+ tree.xmlDict* c_dict) noexcept nogil:
+ cdef xmlNode* c_child
+ cdef xmlNode* c_node = c_attr
+ while c_node is not NULL:
+ if c_node.type not in (tree.XML_TEXT_NODE, tree.XML_COMMENT_NODE):
+ _fixThreadDictPtr(&c_node.name, c_src_dict, c_dict)
+ # libxml2 keeps some (!) attribute values in the dict
+ c_child = c_node.children
+ while c_child is not NULL:
+ fixThreadDictContentForNode(c_child, c_src_dict, c_dict)
+ c_child = c_child.next
+ c_node = c_node.next
+
+
+cdef inline void fixThreadDictContentForNode(xmlNode* c_node,
+ tree.xmlDict* c_src_dict,
+ tree.xmlDict* c_dict) noexcept nogil:
+ if c_node.content is not NULL and \
+ c_node.content is not &c_node.properties:
+ if tree.xmlDictOwns(c_src_dict, c_node.content):
+ # result can be NULL on memory error, but we don't handle that here
+ c_node.content = tree.xmlDictLookup(c_dict, c_node.content, -1)
+
+
+cdef inline void fixThreadDictNsForNode(xmlNode* c_node,
+ tree.xmlDict* c_src_dict,
+ tree.xmlDict* c_dict) noexcept nogil:
+ cdef xmlNs* c_ns = c_node.nsDef
+ while c_ns is not NULL:
+ _fixThreadDictPtr(&c_ns.href, c_src_dict, c_dict)
+ _fixThreadDictPtr(&c_ns.prefix, c_src_dict, c_dict)
+ c_ns = c_ns.next
+
+
+cdef void fixThreadDictNamesForDtd(tree.xmlDtd* c_dtd,
+ tree.xmlDict* c_src_dict,
+ tree.xmlDict* c_dict) noexcept nogil:
+ cdef xmlNode* c_node
+ cdef tree.xmlElement* c_element
+ cdef tree.xmlAttribute* c_attribute
+ cdef tree.xmlEntity* c_entity
+
+ c_node = c_dtd.children
+ while c_node:
+ if c_node.type == tree.XML_ELEMENT_DECL:
+ c_element = c_node
+ if c_element.content:
+ _fixThreadDictPtr(&c_element.content.name, c_src_dict, c_dict)
+ _fixThreadDictPtr(&c_element.content.prefix, c_src_dict, c_dict)
+ c_attribute = c_element.attributes
+ while c_attribute:
+ _fixThreadDictPtr(&c_attribute.defaultValue, c_src_dict, c_dict)
+ _fixThreadDictPtr(&c_attribute.name, c_src_dict, c_dict)
+ _fixThreadDictPtr(&c_attribute.prefix, c_src_dict, c_dict)
+ _fixThreadDictPtr(&c_attribute.elem, c_src_dict, c_dict)
+ c_attribute = c_attribute.nexth
+ elif c_node.type == tree.XML_ENTITY_DECL:
+ c_entity = c_node
+ _fixThreadDictPtr(&c_entity.name, c_src_dict, c_dict)
+ _fixThreadDictPtr(&c_entity.ExternalID, c_src_dict, c_dict)
+ _fixThreadDictPtr(&c_entity.SystemID, c_src_dict, c_dict)
+ _fixThreadDictPtr(&c_entity.content, c_src_dict, c_dict)
+ c_node = c_node.next
+
+
+################################################################################
+# adopt an xmlDoc from an external libxml2 document source
+
+cdef _Document _adoptForeignDoc(xmlDoc* c_doc, _BaseParser parser=None, bint is_owned=True):
+ """Convert and wrap an externally produced xmlDoc for use in lxml.
+ Assures that all '_private' pointers are NULL to prevent accidental
+ dereference into lxml proxy objects.
+ """
+ if c_doc is NULL:
+ raise ValueError("Illegal document provided: NULL")
+ if c_doc.type not in (tree.XML_DOCUMENT_NODE, tree.XML_HTML_DOCUMENT_NODE):
+ doc_type = c_doc.type
+ if is_owned:
+ tree.xmlFreeDoc(c_doc)
+ raise ValueError(f"Illegal document provided: expected XML or HTML, found {doc_type}")
+
+ cdef xmlNode* c_node = c_doc
+
+ if is_owned:
+ tree.BEGIN_FOR_EACH_FROM(c_doc, c_node, 1)
+ c_node._private = NULL
+ tree.END_FOR_EACH_FROM(c_node)
+ else:
+ # create a fresh copy that lxml owns
+ c_doc = tree.xmlCopyDoc(c_doc, 1)
+ if c_doc is NULL:
+ raise MemoryError()
+
+ return _documentFactory(c_doc, parser)
diff --git a/llmeval-env/lib/python3.10/site-packages/lxml/pyclasslookup.py b/llmeval-env/lib/python3.10/site-packages/lxml/pyclasslookup.py
new file mode 100644
index 0000000000000000000000000000000000000000..9e1496dfb762108154a0c6c321a5e8fcf73de909
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/lxml/pyclasslookup.py
@@ -0,0 +1,3 @@
+# dummy module for backwards compatibility
+
+from lxml.etree import PythonElementClassLookup
diff --git a/llmeval-env/lib/python3.10/site-packages/lxml/sax.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/lxml/sax.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..e01f7c9f85b11f983f2642a154b62a51b8f416ef
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/lxml/sax.cpython-310-x86_64-linux-gnu.so differ
diff --git a/llmeval-env/lib/python3.10/site-packages/lxml/saxparser.pxi b/llmeval-env/lib/python3.10/site-packages/lxml/saxparser.pxi
new file mode 100644
index 0000000000000000000000000000000000000000..dc03df9af112bf093b6027a52c1760b83ab1b5c6
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/lxml/saxparser.pxi
@@ -0,0 +1,875 @@
+# SAX-like interfaces
+
+class XMLSyntaxAssertionError(XMLSyntaxError, AssertionError):
+ """
+ An XMLSyntaxError that additionally inherits from AssertionError for
+ ElementTree / backwards compatibility reasons.
+
+ This class may get replaced by a plain XMLSyntaxError in a future version.
+ """
+ def __init__(self, message):
+ XMLSyntaxError.__init__(self, message, None, 0, 1)
+
+
+ctypedef enum _SaxParserEvents:
+ SAX_EVENT_START = 1 << 0
+ SAX_EVENT_END = 1 << 1
+ SAX_EVENT_DATA = 1 << 2
+ SAX_EVENT_DOCTYPE = 1 << 3
+ SAX_EVENT_PI = 1 << 4
+ SAX_EVENT_COMMENT = 1 << 5
+ SAX_EVENT_START_NS = 1 << 6
+ SAX_EVENT_END_NS = 1 << 7
+
+ctypedef enum _ParseEventFilter:
+ PARSE_EVENT_FILTER_START = 1 << 0
+ PARSE_EVENT_FILTER_END = 1 << 1
+ PARSE_EVENT_FILTER_START_NS = 1 << 2
+ PARSE_EVENT_FILTER_END_NS = 1 << 3
+ PARSE_EVENT_FILTER_COMMENT = 1 << 4
+ PARSE_EVENT_FILTER_PI = 1 << 5
+
+
+cdef int _buildParseEventFilter(events) except -1:
+ cdef int event_filter = 0
+ for event in events:
+ if event == 'start':
+ event_filter |= PARSE_EVENT_FILTER_START
+ elif event == 'end':
+ event_filter |= PARSE_EVENT_FILTER_END
+ elif event == 'start-ns':
+ event_filter |= PARSE_EVENT_FILTER_START_NS
+ elif event == 'end-ns':
+ event_filter |= PARSE_EVENT_FILTER_END_NS
+ elif event == 'comment':
+ event_filter |= PARSE_EVENT_FILTER_COMMENT
+ elif event == 'pi':
+ event_filter |= PARSE_EVENT_FILTER_PI
+ else:
+ raise ValueError, f"invalid event name '{event}'"
+ return event_filter
+
+
+cdef class _SaxParserTarget:
+ cdef int _sax_event_filter
+
+ cdef _handleSaxStart(self, tag, attrib, nsmap):
+ return None
+ cdef _handleSaxEnd(self, tag):
+ return None
+ cdef int _handleSaxData(self, data) except -1:
+ return 0
+ cdef int _handleSaxDoctype(self, root_tag, public_id, system_id) except -1:
+ return 0
+ cdef _handleSaxPi(self, target, data):
+ return None
+ cdef _handleSaxComment(self, comment):
+ return None
+ cdef _handleSaxStartNs(self, prefix, uri):
+ return None
+ cdef _handleSaxEndNs(self, prefix):
+ return None
+
+
+#@cython.final
+@cython.internal
+@cython.no_gc_clear # Required because parent class uses it - Cython bug.
+cdef class _SaxParserContext(_ParserContext):
+ """This class maps SAX2 events to parser target events.
+ """
+ cdef _SaxParserTarget _target
+ cdef _BaseParser _parser
+ cdef xmlparser.startElementNsSAX2Func _origSaxStart
+ cdef xmlparser.endElementNsSAX2Func _origSaxEnd
+ cdef xmlparser.startElementSAXFunc _origSaxStartNoNs
+ cdef xmlparser.endElementSAXFunc _origSaxEndNoNs
+ cdef xmlparser.charactersSAXFunc _origSaxData
+ cdef xmlparser.cdataBlockSAXFunc _origSaxCData
+ cdef xmlparser.internalSubsetSAXFunc _origSaxDoctype
+ cdef xmlparser.commentSAXFunc _origSaxComment
+ cdef xmlparser.processingInstructionSAXFunc _origSaxPI
+ cdef xmlparser.startDocumentSAXFunc _origSaxStartDocument
+
+ # for event collecting
+ cdef int _event_filter
+ cdef list _ns_stack
+ cdef list _node_stack
+ cdef _ParseEventsIterator events_iterator
+
+ # for iterparse
+ cdef _Element _root
+ cdef _MultiTagMatcher _matcher
+
+ def __cinit__(self, _BaseParser parser):
+ self._ns_stack = []
+ self._node_stack = []
+ self._parser = parser
+ self.events_iterator = _ParseEventsIterator()
+
+ cdef void _setSaxParserTarget(self, _SaxParserTarget target) noexcept:
+ self._target = target
+
+ cdef void _initParserContext(self, xmlparser.xmlParserCtxt* c_ctxt) noexcept:
+ _ParserContext._initParserContext(self, c_ctxt)
+ if self._target is not None:
+ self._connectTarget(c_ctxt)
+ elif self._event_filter:
+ self._connectEvents(c_ctxt)
+
+ cdef void _connectTarget(self, xmlparser.xmlParserCtxt* c_ctxt) noexcept:
+ """Wrap original SAX2 callbacks to call into parser target.
+ """
+ sax = c_ctxt.sax
+ self._origSaxStart = sax.startElementNs = NULL
+ self._origSaxStartNoNs = sax.startElement = NULL
+ if self._target._sax_event_filter & (SAX_EVENT_START |
+ SAX_EVENT_START_NS |
+ SAX_EVENT_END_NS):
+ # intercept => overwrite orig callback
+ # FIXME: also intercept on when collecting END events
+ if sax.initialized == xmlparser.XML_SAX2_MAGIC:
+ sax.startElementNs = _handleSaxTargetStart
+ if self._target._sax_event_filter & SAX_EVENT_START:
+ sax.startElement = _handleSaxTargetStartNoNs
+
+ self._origSaxEnd = sax.endElementNs = NULL
+ self._origSaxEndNoNs = sax.endElement = NULL
+ if self._target._sax_event_filter & (SAX_EVENT_END |
+ SAX_EVENT_END_NS):
+ if sax.initialized == xmlparser.XML_SAX2_MAGIC:
+ sax.endElementNs = _handleSaxEnd
+ if self._target._sax_event_filter & SAX_EVENT_END:
+ sax.endElement = _handleSaxEndNoNs
+
+ self._origSaxData = sax.characters = sax.cdataBlock = NULL
+ if self._target._sax_event_filter & SAX_EVENT_DATA:
+ sax.characters = sax.cdataBlock = _handleSaxData
+
+ # doctype propagation is always required for entity replacement
+ self._origSaxDoctype = sax.internalSubset
+ if self._target._sax_event_filter & SAX_EVENT_DOCTYPE:
+ sax.internalSubset = _handleSaxTargetDoctype
+
+ self._origSaxPI = sax.processingInstruction = NULL
+ if self._target._sax_event_filter & SAX_EVENT_PI:
+ sax.processingInstruction = _handleSaxTargetPI
+
+ self._origSaxComment = sax.comment = NULL
+ if self._target._sax_event_filter & SAX_EVENT_COMMENT:
+ sax.comment = _handleSaxTargetComment
+
+ # enforce entity replacement
+ sax.reference = NULL
+ c_ctxt.replaceEntities = 1
+
+ cdef void _connectEvents(self, xmlparser.xmlParserCtxt* c_ctxt) noexcept:
+ """Wrap original SAX2 callbacks to collect parse events without parser target.
+ """
+ sax = c_ctxt.sax
+ self._origSaxStartDocument = sax.startDocument
+ sax.startDocument = _handleSaxStartDocument
+
+ # only override "start" event handler if needed
+ self._origSaxStart = sax.startElementNs
+ if self._event_filter == 0 or c_ctxt.html or \
+ self._event_filter & (PARSE_EVENT_FILTER_START |
+ PARSE_EVENT_FILTER_END |
+ PARSE_EVENT_FILTER_START_NS |
+ PARSE_EVENT_FILTER_END_NS):
+ sax.startElementNs = _handleSaxStart
+
+ self._origSaxStartNoNs = sax.startElement
+ if self._event_filter == 0 or c_ctxt.html or \
+ self._event_filter & (PARSE_EVENT_FILTER_START |
+ PARSE_EVENT_FILTER_END):
+ sax.startElement = _handleSaxStartNoNs
+
+ # only override "end" event handler if needed
+ self._origSaxEnd = sax.endElementNs
+ if self._event_filter == 0 or \
+ self._event_filter & (PARSE_EVENT_FILTER_END |
+ PARSE_EVENT_FILTER_END_NS):
+ sax.endElementNs = _handleSaxEnd
+
+ self._origSaxEndNoNs = sax.endElement
+ if self._event_filter == 0 or \
+ self._event_filter & PARSE_EVENT_FILTER_END:
+ sax.endElement = _handleSaxEndNoNs
+
+ self._origSaxComment = sax.comment
+ if self._event_filter & PARSE_EVENT_FILTER_COMMENT:
+ sax.comment = _handleSaxComment
+
+ self._origSaxPI = sax.processingInstruction
+ if self._event_filter & PARSE_EVENT_FILTER_PI:
+ sax.processingInstruction = _handleSaxPIEvent
+
+ cdef _setEventFilter(self, events, tag):
+ self._event_filter = _buildParseEventFilter(events)
+ if not self._event_filter or tag is None or tag == '*':
+ self._matcher = None
+ else:
+ self._matcher = _MultiTagMatcher.__new__(_MultiTagMatcher, tag)
+
+ cdef int startDocument(self, xmlDoc* c_doc) except -1:
+ try:
+ self._doc = _documentFactory(c_doc, self._parser)
+ finally:
+ self._parser = None # clear circular reference ASAP
+ if self._matcher is not None:
+ self._matcher.cacheTags(self._doc, True) # force entry in libxml2 dict
+ return 0
+
+ cdef int pushEvent(self, event, xmlNode* c_node) except -1:
+ cdef _Element root
+ if self._root is None:
+ root = self._doc.getroot()
+ if root is not None and root._c_node.type == tree.XML_ELEMENT_NODE:
+ self._root = root
+ node = _elementFactory(self._doc, c_node)
+ self.events_iterator._events.append( (event, node) )
+ return 0
+
+ cdef int flushEvents(self) except -1:
+ events = self.events_iterator._events
+ while self._node_stack:
+ events.append( ('end', self._node_stack.pop()) )
+ _pushSaxNsEndEvents(self)
+ while self._ns_stack:
+ _pushSaxNsEndEvents(self)
+
+ cdef void _handleSaxException(self, xmlparser.xmlParserCtxt* c_ctxt) noexcept:
+ if c_ctxt.errNo == xmlerror.XML_ERR_OK:
+ c_ctxt.errNo = xmlerror.XML_ERR_INTERNAL_ERROR
+ # stop parsing immediately
+ c_ctxt.wellFormed = 0
+ c_ctxt.disableSAX = 1
+ c_ctxt.instate = xmlparser.XML_PARSER_EOF
+ self._store_raised()
+
+
+@cython.final
+@cython.internal
+cdef class _ParseEventsIterator:
+ """A reusable parse events iterator"""
+ cdef list _events
+ cdef int _event_index
+
+ def __cinit__(self):
+ self._events = []
+ self._event_index = 0
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ cdef int event_index = self._event_index
+ events = self._events
+ if event_index >= 2**10 or event_index * 2 >= len(events):
+ if event_index:
+ # clean up from time to time
+ del events[:event_index]
+ self._event_index = event_index = 0
+ if event_index >= len(events):
+ raise StopIteration
+ item = events[event_index]
+ self._event_index = event_index + 1
+ return item
+
+
+cdef list _build_prefix_uri_list(_SaxParserContext context, int c_nb_namespaces,
+ const_xmlChar** c_namespaces):
+ "Build [(prefix, uri)] list of declared namespaces."
+ cdef int i
+ namespaces = []
+ for i in xrange(c_nb_namespaces):
+ namespaces.append((funicodeOrEmpty(c_namespaces[0]), funicode(c_namespaces[1])))
+ c_namespaces += 2
+ return namespaces
+
+
+cdef void _handleSaxStart(
+ void* ctxt, const_xmlChar* c_localname, const_xmlChar* c_prefix,
+ const_xmlChar* c_namespace, int c_nb_namespaces,
+ const_xmlChar** c_namespaces,
+ int c_nb_attributes, int c_nb_defaulted,
+ const_xmlChar** c_attributes) noexcept with gil:
+ cdef int i
+ cdef size_t c_len
+ c_ctxt = ctxt
+ if c_ctxt._private is NULL or c_ctxt.disableSAX:
+ return
+ context = <_SaxParserContext>c_ctxt._private
+ cdef int event_filter = context._event_filter
+ try:
+ if (c_nb_namespaces and
+ event_filter & (PARSE_EVENT_FILTER_START_NS |
+ PARSE_EVENT_FILTER_END_NS)):
+ declared_namespaces = _build_prefix_uri_list(
+ context, c_nb_namespaces, c_namespaces)
+ if event_filter & PARSE_EVENT_FILTER_START_NS:
+ for prefix_uri_tuple in declared_namespaces:
+ context.events_iterator._events.append(("start-ns", prefix_uri_tuple))
+ else:
+ declared_namespaces = None
+
+ context._origSaxStart(c_ctxt, c_localname, c_prefix, c_namespace,
+ c_nb_namespaces, c_namespaces, c_nb_attributes,
+ c_nb_defaulted, c_attributes)
+ if c_ctxt.html:
+ _fixHtmlDictNodeNames(c_ctxt.dict, c_ctxt.node)
+ # The HTML parser in libxml2 reports the missing opening tags when it finds
+ # misplaced ones, but with tag names from C string constants that ignore the
+ # parser dict. Thus, we need to intern the name ourselves.
+ c_localname = tree.xmlDictLookup(c_ctxt.dict, c_localname, -1)
+ if c_localname is NULL:
+ raise MemoryError()
+
+ if event_filter & PARSE_EVENT_FILTER_END_NS:
+ context._ns_stack.append(declared_namespaces)
+ if event_filter & (PARSE_EVENT_FILTER_END |
+ PARSE_EVENT_FILTER_START):
+ _pushSaxStartEvent(context, c_ctxt, c_namespace, c_localname, None)
+ except:
+ context._handleSaxException(c_ctxt)
+ finally:
+ return # swallow any further exceptions
+
+
+cdef void _handleSaxTargetStart(
+ void* ctxt, const_xmlChar* c_localname, const_xmlChar* c_prefix,
+ const_xmlChar* c_namespace, int c_nb_namespaces,
+ const_xmlChar** c_namespaces,
+ int c_nb_attributes, int c_nb_defaulted,
+ const_xmlChar** c_attributes) noexcept with gil:
+ cdef int i
+ cdef size_t c_len
+ c_ctxt = ctxt
+ if c_ctxt._private is NULL or c_ctxt.disableSAX:
+ return
+ context = <_SaxParserContext>c_ctxt._private
+
+ cdef int event_filter = context._event_filter
+ cdef int sax_event_filter = context._target._sax_event_filter
+ try:
+ if c_nb_namespaces:
+ declared_namespaces = _build_prefix_uri_list(
+ context, c_nb_namespaces, c_namespaces)
+
+ if event_filter & PARSE_EVENT_FILTER_START_NS:
+ for prefix_uri_tuple in declared_namespaces:
+ context.events_iterator._events.append(("start-ns", prefix_uri_tuple))
+
+ if sax_event_filter & SAX_EVENT_START_NS:
+ for prefix, uri in declared_namespaces:
+ context._target._handleSaxStartNs(prefix, uri)
+ else:
+ declared_namespaces = None
+
+ if sax_event_filter & SAX_EVENT_START:
+ if c_nb_defaulted > 0:
+ # only add default attributes if we asked for them
+ if c_ctxt.loadsubset & xmlparser.XML_COMPLETE_ATTRS == 0:
+ c_nb_attributes -= c_nb_defaulted
+ if c_nb_attributes == 0:
+ attrib = IMMUTABLE_EMPTY_MAPPING
+ else:
+ attrib = {}
+ for i in xrange(c_nb_attributes):
+ name = _namespacedNameFromNsName(
+ c_attributes[2], c_attributes[0])
+ if c_attributes[3] is NULL:
+ value = ''
+ else:
+ c_len = c_attributes[4] - c_attributes[3]
+ value = c_attributes[3][:c_len].decode('utf8')
+ attrib[name] = value
+ c_attributes += 5
+
+ nsmap = dict(declared_namespaces) if c_nb_namespaces else IMMUTABLE_EMPTY_MAPPING
+
+ element = _callTargetSaxStart(
+ context, c_ctxt,
+ _namespacedNameFromNsName(c_namespace, c_localname),
+ attrib, nsmap)
+ else:
+ element = None
+
+ if (event_filter & PARSE_EVENT_FILTER_END_NS or
+ sax_event_filter & SAX_EVENT_END_NS):
+ context._ns_stack.append(declared_namespaces)
+ if event_filter & (PARSE_EVENT_FILTER_END |
+ PARSE_EVENT_FILTER_START):
+ _pushSaxStartEvent(context, c_ctxt, c_namespace,
+ c_localname, element)
+ except:
+ context._handleSaxException(c_ctxt)
+ finally:
+ return # swallow any further exceptions
+
+
+cdef void _handleSaxStartNoNs(void* ctxt, const_xmlChar* c_name,
+ const_xmlChar** c_attributes) noexcept with gil:
+ c_ctxt = ctxt
+ if c_ctxt._private is NULL or c_ctxt.disableSAX:
+ return
+ context = <_SaxParserContext>c_ctxt._private
+ try:
+ context._origSaxStartNoNs(c_ctxt, c_name, c_attributes)
+ if c_ctxt.html:
+ _fixHtmlDictNodeNames(c_ctxt.dict, c_ctxt.node)
+ # The HTML parser in libxml2 reports the missing opening tags when it finds
+ # misplaced ones, but with tag names from C string constants that ignore the
+ # parser dict. Thus, we need to intern the name ourselves.
+ c_name = tree.xmlDictLookup(c_ctxt.dict, c_name, -1)
+ if c_name is NULL:
+ raise MemoryError()
+ if context._event_filter & (PARSE_EVENT_FILTER_END |
+ PARSE_EVENT_FILTER_START):
+ _pushSaxStartEvent(context, c_ctxt, NULL, c_name, None)
+ except:
+ context._handleSaxException(c_ctxt)
+ finally:
+ return # swallow any further exceptions
+
+
+cdef void _handleSaxTargetStartNoNs(void* ctxt, const_xmlChar* c_name,
+ const_xmlChar** c_attributes) noexcept with gil:
+ c_ctxt = ctxt
+ if c_ctxt._private is NULL or c_ctxt.disableSAX:
+ return
+ context = <_SaxParserContext>c_ctxt._private
+ try:
+ if c_attributes is NULL:
+ attrib = IMMUTABLE_EMPTY_MAPPING
+ else:
+ attrib = {}
+ while c_attributes[0] is not NULL:
+ name = funicode(c_attributes[0])
+ attrib[name] = funicodeOrEmpty(c_attributes[1])
+ c_attributes += 2
+ element = _callTargetSaxStart(
+ context, c_ctxt, funicode(c_name),
+ attrib, IMMUTABLE_EMPTY_MAPPING)
+ if context._event_filter & (PARSE_EVENT_FILTER_END |
+ PARSE_EVENT_FILTER_START):
+ _pushSaxStartEvent(context, c_ctxt, NULL, c_name, element)
+ except:
+ context._handleSaxException(c_ctxt)
+ finally:
+ return # swallow any further exceptions
+
+
+cdef _callTargetSaxStart(_SaxParserContext context,
+ xmlparser.xmlParserCtxt* c_ctxt,
+ tag, attrib, nsmap):
+ element = context._target._handleSaxStart(tag, attrib, nsmap)
+ if element is not None and c_ctxt.input is not NULL:
+ if isinstance(element, _Element):
+ (<_Element>element)._c_node.line = (
+ c_ctxt.input.line
+ if c_ctxt.input.line < 65535 else 65535)
+ return element
+
+
+cdef int _pushSaxStartEvent(_SaxParserContext context,
+ xmlparser.xmlParserCtxt* c_ctxt,
+ const_xmlChar* c_href,
+ const_xmlChar* c_name, node) except -1:
+ if (context._matcher is None or
+ context._matcher.matchesNsTag(c_href, c_name)):
+ if node is None and context._target is None:
+ assert context._doc is not None
+ node = _elementFactory(context._doc, c_ctxt.node)
+ if context._event_filter & PARSE_EVENT_FILTER_START:
+ context.events_iterator._events.append(('start', node))
+ if (context._target is None and
+ context._event_filter & PARSE_EVENT_FILTER_END):
+ context._node_stack.append(node)
+ return 0
+
+
+cdef void _handleSaxEnd(void* ctxt, const_xmlChar* c_localname,
+ const_xmlChar* c_prefix,
+ const_xmlChar* c_namespace) noexcept with gil:
+ c_ctxt = ctxt
+ if c_ctxt._private is NULL or c_ctxt.disableSAX:
+ return
+ context = <_SaxParserContext>c_ctxt._private
+ try:
+ if context._target is not None:
+ if context._target._sax_event_filter & SAX_EVENT_END:
+ node = context._target._handleSaxEnd(
+ _namespacedNameFromNsName(c_namespace, c_localname))
+ else:
+ node = None
+ else:
+ context._origSaxEnd(c_ctxt, c_localname, c_prefix, c_namespace)
+ node = None
+ _pushSaxEndEvent(context, c_namespace, c_localname, node)
+ _pushSaxNsEndEvents(context)
+ except:
+ context._handleSaxException(c_ctxt)
+ finally:
+ return # swallow any further exceptions
+
+
+cdef void _handleSaxEndNoNs(void* ctxt, const_xmlChar* c_name) noexcept with gil:
+ c_ctxt = ctxt
+ if c_ctxt._private is NULL or c_ctxt.disableSAX:
+ return
+ context = <_SaxParserContext>c_ctxt._private
+ try:
+ if context._target is not None:
+ node = context._target._handleSaxEnd(funicode(c_name))
+ else:
+ context._origSaxEndNoNs(c_ctxt, c_name)
+ node = None
+ _pushSaxEndEvent(context, NULL, c_name, node)
+ except:
+ context._handleSaxException(c_ctxt)
+ finally:
+ return # swallow any further exceptions
+
+
+cdef int _pushSaxNsEndEvents(_SaxParserContext context) except -1:
+ cdef bint build_events = context._event_filter & PARSE_EVENT_FILTER_END_NS
+ cdef bint call_target = (
+ context._target is not None
+ and context._target._sax_event_filter & SAX_EVENT_END_NS)
+ if not build_events and not call_target:
+ return 0
+
+ cdef list declared_namespaces = context._ns_stack.pop()
+ if declared_namespaces is None:
+ return 0
+
+ cdef tuple prefix_uri
+ for prefix_uri in reversed(declared_namespaces):
+ if call_target:
+ context._target._handleSaxEndNs(prefix_uri[0])
+ if build_events:
+ context.events_iterator._events.append(('end-ns', None))
+
+ return 0
+
+
+cdef int _pushSaxEndEvent(_SaxParserContext context,
+ const_xmlChar* c_href,
+ const_xmlChar* c_name, node) except -1:
+ if context._event_filter & PARSE_EVENT_FILTER_END:
+ if (context._matcher is None or
+ context._matcher.matchesNsTag(c_href, c_name)):
+ if context._target is None:
+ node = context._node_stack.pop()
+ context.events_iterator._events.append(('end', node))
+ return 0
+
+
+cdef void _handleSaxData(void* ctxt, const_xmlChar* c_data, int data_len) noexcept with gil:
+ # can only be called if parsing with a target
+ c_ctxt = ctxt
+ if c_ctxt._private is NULL or c_ctxt.disableSAX:
+ return
+ context = <_SaxParserContext>c_ctxt._private
+ try:
+ context._target._handleSaxData(
+ c_data[:data_len].decode('utf8'))
+ except:
+ context._handleSaxException(c_ctxt)
+ finally:
+ return # swallow any further exceptions
+
+
+cdef void _handleSaxTargetDoctype(void* ctxt, const_xmlChar* c_name,
+ const_xmlChar* c_public,
+ const_xmlChar* c_system) noexcept with gil:
+ # can only be called if parsing with a target
+ c_ctxt = ctxt
+ if c_ctxt._private is NULL or c_ctxt.disableSAX:
+ return
+ context = <_SaxParserContext>c_ctxt._private
+ try:
+ context._target._handleSaxDoctype(
+ funicodeOrNone(c_name),
+ funicodeOrNone(c_public),
+ funicodeOrNone(c_system))
+ except:
+ context._handleSaxException(c_ctxt)
+ finally:
+ return # swallow any further exceptions
+
+
+cdef void _handleSaxStartDocument(void* ctxt) noexcept with gil:
+ c_ctxt = ctxt
+ if c_ctxt._private is NULL or c_ctxt.disableSAX:
+ return
+ context = <_SaxParserContext>c_ctxt._private
+ context._origSaxStartDocument(ctxt)
+ c_doc = c_ctxt.myDoc
+ try:
+ context.startDocument(c_doc)
+ except:
+ context._handleSaxException(c_ctxt)
+ finally:
+ return # swallow any further exceptions
+
+
+cdef void _handleSaxTargetPI(void* ctxt, const_xmlChar* c_target,
+ const_xmlChar* c_data) noexcept with gil:
+ # can only be called if parsing with a target
+ c_ctxt = ctxt
+ if c_ctxt._private is NULL or c_ctxt.disableSAX:
+ return
+ context = <_SaxParserContext>c_ctxt._private
+ try:
+ pi = context._target._handleSaxPi(
+ funicodeOrNone(c_target),
+ funicodeOrEmpty(c_data))
+ if context._event_filter & PARSE_EVENT_FILTER_PI:
+ context.events_iterator._events.append(('pi', pi))
+ except:
+ context._handleSaxException(c_ctxt)
+ finally:
+ return # swallow any further exceptions
+
+
+cdef void _handleSaxPIEvent(void* ctxt, const_xmlChar* target,
+ const_xmlChar* data) noexcept with gil:
+ # can only be called when collecting pi events
+ c_ctxt = ctxt
+ if c_ctxt._private is NULL or c_ctxt.disableSAX:
+ return
+ context = <_SaxParserContext>c_ctxt._private
+ context._origSaxPI(ctxt, target, data)
+ c_node = _findLastEventNode(c_ctxt)
+ if c_node is NULL:
+ return
+ try:
+ context.pushEvent('pi', c_node)
+ except:
+ context._handleSaxException(c_ctxt)
+ finally:
+ return # swallow any further exceptions
+
+
+cdef void _handleSaxTargetComment(void* ctxt, const_xmlChar* c_data) noexcept with gil:
+ # can only be called if parsing with a target
+ c_ctxt = ctxt
+ if c_ctxt._private is NULL or c_ctxt.disableSAX:
+ return
+ context = <_SaxParserContext>c_ctxt._private
+ try:
+ comment = context._target._handleSaxComment(funicodeOrEmpty(c_data))
+ if context._event_filter & PARSE_EVENT_FILTER_COMMENT:
+ context.events_iterator._events.append(('comment', comment))
+ except:
+ context._handleSaxException(c_ctxt)
+ finally:
+ return # swallow any further exceptions
+
+
+cdef void _handleSaxComment(void* ctxt, const_xmlChar* text) noexcept with gil:
+ # can only be called when collecting comment events
+ c_ctxt = ctxt
+ if c_ctxt._private is NULL or c_ctxt.disableSAX:
+ return
+ context = <_SaxParserContext>c_ctxt._private
+ context._origSaxComment(ctxt, text)
+ c_node = _findLastEventNode(c_ctxt)
+ if c_node is NULL:
+ return
+ try:
+ context.pushEvent('comment', c_node)
+ except:
+ context._handleSaxException(c_ctxt)
+ finally:
+ return # swallow any further exceptions
+
+
+cdef inline xmlNode* _findLastEventNode(xmlparser.xmlParserCtxt* c_ctxt):
+ # this mimics what libxml2 creates for comments/PIs
+ if c_ctxt.inSubset == 1:
+ return c_ctxt.myDoc.intSubset.last
+ elif c_ctxt.inSubset == 2:
+ return c_ctxt.myDoc.extSubset.last
+ elif c_ctxt.node is NULL:
+ return c_ctxt.myDoc.last
+ elif c_ctxt.node.type == tree.XML_ELEMENT_NODE:
+ return c_ctxt.node.last
+ else:
+ return c_ctxt.node.next
+
+
+############################################################
+## ET compatible XML tree builder
+############################################################
+
+cdef class TreeBuilder(_SaxParserTarget):
+ """TreeBuilder(self, element_factory=None, parser=None,
+ comment_factory=None, pi_factory=None,
+ insert_comments=True, insert_pis=True)
+
+ Parser target that builds a tree from parse event callbacks.
+
+ The factory arguments can be used to influence the creation of
+ elements, comments and processing instructions.
+
+ By default, comments and processing instructions are inserted into
+ the tree, but they can be ignored by passing the respective flags.
+
+ The final tree is returned by the ``close()`` method.
+ """
+ cdef _BaseParser _parser
+ cdef object _factory
+ cdef object _comment_factory
+ cdef object _pi_factory
+ cdef list _data
+ cdef list _element_stack
+ cdef object _element_stack_pop
+ cdef _Element _last # may be None
+ cdef bint _in_tail
+ cdef bint _insert_comments
+ cdef bint _insert_pis
+
+ def __init__(self, *, element_factory=None, parser=None,
+ comment_factory=None, pi_factory=None,
+ bint insert_comments=True, bint insert_pis=True):
+ self._sax_event_filter = \
+ SAX_EVENT_START | SAX_EVENT_END | SAX_EVENT_DATA | \
+ SAX_EVENT_PI | SAX_EVENT_COMMENT
+ self._data = [] # data collector
+ self._element_stack = [] # element stack
+ self._element_stack_pop = self._element_stack.pop
+ self._last = None # last element
+ self._in_tail = 0 # true if we're after an end tag
+ self._factory = element_factory
+ self._comment_factory = comment_factory if comment_factory is not None else Comment
+ self._pi_factory = pi_factory if pi_factory is not None else ProcessingInstruction
+ self._insert_comments = insert_comments
+ self._insert_pis = insert_pis
+ self._parser = parser
+
+ @cython.final
+ cdef int _flush(self) except -1:
+ if self._data:
+ if self._last is not None:
+ text = "".join(self._data)
+ if self._in_tail:
+ assert self._last.tail is None, "internal error (tail)"
+ self._last.tail = text
+ else:
+ assert self._last.text is None, "internal error (text)"
+ self._last.text = text
+ del self._data[:]
+ return 0
+
+ # internal SAX event handlers
+
+ @cython.final
+ cdef _handleSaxStart(self, tag, attrib, nsmap):
+ self._flush()
+ if self._factory is not None:
+ self._last = self._factory(tag, attrib)
+ if self._element_stack:
+ _appendChild(self._element_stack[-1], self._last)
+ elif self._element_stack:
+ self._last = _makeSubElement(
+ self._element_stack[-1], tag, None, None, attrib, nsmap, None)
+ else:
+ self._last = _makeElement(
+ tag, NULL, None, self._parser, None, None, attrib, nsmap, None)
+ self._element_stack.append(self._last)
+ self._in_tail = 0
+ return self._last
+
+ @cython.final
+ cdef _handleSaxEnd(self, tag):
+ self._flush()
+ self._last = self._element_stack_pop()
+ self._in_tail = 1
+ return self._last
+
+ @cython.final
+ cdef int _handleSaxData(self, data) except -1:
+ self._data.append(data)
+
+ @cython.final
+ cdef _handleSaxPi(self, target, data):
+ elem = self._pi_factory(target, data)
+ if self._insert_pis:
+ self._flush()
+ self._last = elem
+ if self._element_stack:
+ _appendChild(self._element_stack[-1], self._last)
+ self._in_tail = 1
+ return self._last
+
+ @cython.final
+ cdef _handleSaxComment(self, comment):
+ elem = self._comment_factory(comment)
+ if self._insert_comments:
+ self._flush()
+ self._last = elem
+ if self._element_stack:
+ _appendChild(self._element_stack[-1], self._last)
+ self._in_tail = 1
+ return elem
+
+ # Python level event handlers
+
+ def close(self):
+ """close(self)
+
+ Flushes the builder buffers, and returns the toplevel document
+ element. Raises XMLSyntaxError on inconsistencies.
+ """
+ if self._element_stack:
+ raise XMLSyntaxAssertionError("missing end tags")
+ # TODO: this does not necessarily seem like an error case. Why not just return None?
+ if self._last is None:
+ raise XMLSyntaxAssertionError("missing toplevel element")
+ return self._last
+
+ def data(self, data):
+ """data(self, data)
+
+ Adds text to the current element. The value should be either an
+ 8-bit string containing ASCII text, or a Unicode string.
+ """
+ self._handleSaxData(data)
+
+ def start(self, tag, attrs, nsmap=None):
+ """start(self, tag, attrs, nsmap=None)
+
+ Opens a new element.
+ """
+ if nsmap is None:
+ nsmap = IMMUTABLE_EMPTY_MAPPING
+ return self._handleSaxStart(tag, attrs, nsmap)
+
+ def end(self, tag):
+ """end(self, tag)
+
+ Closes the current element.
+ """
+ element = self._handleSaxEnd(tag)
+ assert self._last.tag == tag,\
+ f"end tag mismatch (expected {self._last.tag}, got {tag})"
+ return element
+
+ def pi(self, target, data=None):
+ """pi(self, target, data=None)
+
+ Creates a processing instruction using the factory, appends it
+ (unless disabled) and returns it.
+ """
+ return self._handleSaxPi(target, data)
+
+ def comment(self, comment):
+ """comment(self, comment)
+
+ Creates a comment using the factory, appends it (unless disabled)
+ and returns it.
+ """
+ return self._handleSaxComment(comment)
diff --git a/llmeval-env/lib/python3.10/site-packages/lxml/serializer.pxi b/llmeval-env/lib/python3.10/site-packages/lxml/serializer.pxi
new file mode 100644
index 0000000000000000000000000000000000000000..0a7a1e43649912603e98105e4e248b9eba3af6da
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/lxml/serializer.pxi
@@ -0,0 +1,1871 @@
+# XML serialization and output functions
+
+cdef object GzipFile
+from gzip import GzipFile
+
+
+cdef class SerialisationError(LxmlError):
+ """A libxml2 error that occurred during serialisation.
+ """
+
+
+cdef enum _OutputMethods:
+ OUTPUT_METHOD_XML
+ OUTPUT_METHOD_HTML
+ OUTPUT_METHOD_TEXT
+
+
+cdef int _findOutputMethod(method) except -1:
+ if method is None:
+ return OUTPUT_METHOD_XML
+ method = method.lower()
+ if method == "xml":
+ return OUTPUT_METHOD_XML
+ if method == "html":
+ return OUTPUT_METHOD_HTML
+ if method == "text":
+ return OUTPUT_METHOD_TEXT
+ raise ValueError(f"unknown output method {method!r}")
+
+
+cdef _textToString(xmlNode* c_node, encoding, bint with_tail):
+ cdef bint needs_conversion
+ cdef const_xmlChar* c_text
+ cdef xmlNode* c_text_node
+ cdef tree.xmlBuffer* c_buffer
+ cdef int error_result
+
+ c_buffer = tree.xmlBufferCreate()
+ if c_buffer is NULL:
+ raise MemoryError()
+
+ with nogil:
+ error_result = tree.xmlNodeBufGetContent(c_buffer, c_node)
+ if with_tail:
+ c_text_node = _textNodeOrSkip(c_node.next)
+ while c_text_node is not NULL:
+ tree.xmlBufferWriteChar(c_buffer, c_text_node.content)
+ c_text_node = _textNodeOrSkip(c_text_node.next)
+ c_text = tree.xmlBufferContent(c_buffer)
+
+ if error_result < 0 or c_text is NULL:
+ tree.xmlBufferFree(c_buffer)
+ raise SerialisationError, "Error during serialisation (out of memory?)"
+
+ try:
+ needs_conversion = 0
+ if encoding is unicode:
+ needs_conversion = 1
+ elif encoding is not None:
+ # Python prefers lower case encoding names
+ encoding = encoding.lower()
+ if encoding not in ('utf8', 'utf-8'):
+ if encoding == 'ascii':
+ if isutf8l(c_text, tree.xmlBufferLength(c_buffer)):
+ # will raise a decode error below
+ needs_conversion = 1
+ else:
+ needs_conversion = 1
+
+ if needs_conversion:
+ text = (c_text)[:tree.xmlBufferLength(c_buffer)].decode('utf8')
+ if encoding is not unicode:
+ encoding = _utf8(encoding)
+ text = python.PyUnicode_AsEncodedString(
+ text, encoding, 'strict')
+ else:
+ text = (c_text)[:tree.xmlBufferLength(c_buffer)]
+ finally:
+ tree.xmlBufferFree(c_buffer)
+ return text
+
+
+cdef _tostring(_Element element, encoding, doctype, method,
+ bint write_xml_declaration, bint write_complete_document,
+ bint pretty_print, bint with_tail, int standalone):
+ """Serialize an element to an encoded string representation of its XML
+ tree.
+ """
+ cdef tree.xmlOutputBuffer* c_buffer
+ cdef tree.xmlBuf* c_result_buffer
+ cdef tree.xmlCharEncodingHandler* enchandler
+ cdef const_char* c_enc
+ cdef const_xmlChar* c_version
+ cdef const_xmlChar* c_doctype
+ cdef int c_method
+ cdef int error_result
+ if element is None:
+ return None
+ _assertValidNode(element)
+ c_method = _findOutputMethod(method)
+ if c_method == OUTPUT_METHOD_TEXT:
+ return _textToString(element._c_node, encoding, with_tail)
+ if encoding is None or encoding is unicode:
+ c_enc = NULL
+ else:
+ encoding = _utf8(encoding)
+ c_enc = _cstr(encoding)
+ if doctype is None:
+ c_doctype = NULL
+ else:
+ doctype = _utf8(doctype)
+ c_doctype = _xcstr(doctype)
+ # it is necessary to *and* find the encoding handler *and* use
+ # encoding during output
+ enchandler = tree.xmlFindCharEncodingHandler(c_enc)
+ if enchandler is NULL and c_enc is not NULL:
+ if encoding is not None:
+ encoding = encoding.decode('UTF-8')
+ raise LookupError, f"unknown encoding: '{encoding}'"
+ c_buffer = tree.xmlAllocOutputBuffer(enchandler)
+ if c_buffer is NULL:
+ tree.xmlCharEncCloseFunc(enchandler)
+ raise MemoryError()
+
+ with nogil:
+ _writeNodeToBuffer(c_buffer, element._c_node, c_enc, c_doctype, c_method,
+ write_xml_declaration, write_complete_document,
+ pretty_print, with_tail, standalone)
+ tree.xmlOutputBufferFlush(c_buffer)
+ if c_buffer.conv is not NULL:
+ c_result_buffer = c_buffer.conv
+ else:
+ c_result_buffer = c_buffer.buffer
+
+ error_result = c_buffer.error
+ if error_result != xmlerror.XML_ERR_OK:
+ tree.xmlOutputBufferClose(c_buffer)
+ _raiseSerialisationError(error_result)
+
+ try:
+ if encoding is unicode:
+ result = (tree.xmlBufContent(
+ c_result_buffer))[:tree.xmlBufUse(c_result_buffer)].decode('UTF-8')
+ else:
+ result = (tree.xmlBufContent(
+ c_result_buffer))[:tree.xmlBufUse(c_result_buffer)]
+ finally:
+ error_result = tree.xmlOutputBufferClose(c_buffer)
+ if error_result == -1:
+ _raiseSerialisationError(error_result)
+ return result
+
+cdef bytes _tostringC14N(element_or_tree, bint exclusive, bint with_comments, inclusive_ns_prefixes):
+ cdef xmlDoc* c_doc
+ cdef xmlChar* c_buffer = NULL
+ cdef int byte_count = -1
+ cdef bytes result
+ cdef _Document doc
+ cdef _Element element
+ cdef xmlChar **c_inclusive_ns_prefixes
+
+ if isinstance(element_or_tree, _Element):
+ _assertValidNode(<_Element>element_or_tree)
+ doc = (<_Element>element_or_tree)._doc
+ c_doc = _plainFakeRootDoc(doc._c_doc, (<_Element>element_or_tree)._c_node, 0)
+ else:
+ doc = _documentOrRaise(element_or_tree)
+ _assertValidDoc(doc)
+ c_doc = doc._c_doc
+
+ c_inclusive_ns_prefixes = _convert_ns_prefixes(c_doc.dict, inclusive_ns_prefixes) if inclusive_ns_prefixes else NULL
+ try:
+ with nogil:
+ byte_count = c14n.xmlC14NDocDumpMemory(
+ c_doc, NULL, exclusive, c_inclusive_ns_prefixes, with_comments, &c_buffer)
+
+ finally:
+ _destroyFakeDoc(doc._c_doc, c_doc)
+ if c_inclusive_ns_prefixes is not NULL:
+ python.lxml_free(c_inclusive_ns_prefixes)
+
+ if byte_count < 0 or c_buffer is NULL:
+ if c_buffer is not NULL:
+ tree.xmlFree(c_buffer)
+ raise C14NError, "C14N failed"
+ try:
+ result = c_buffer[:byte_count]
+ finally:
+ tree.xmlFree(c_buffer)
+ return result
+
+cdef _raiseSerialisationError(int error_result):
+ if error_result == xmlerror.XML_ERR_NO_MEMORY:
+ raise MemoryError()
+ message = ErrorTypes._getName(error_result)
+ if message is None:
+ message = f"unknown error {error_result}"
+ raise SerialisationError, message
+
+############################################################
+# low-level serialisation functions
+
+cdef void _writeDoctype(tree.xmlOutputBuffer* c_buffer,
+ const_xmlChar* c_doctype) noexcept nogil:
+ tree.xmlOutputBufferWrite(c_buffer, tree.xmlStrlen(c_doctype),
+ c_doctype)
+ tree.xmlOutputBufferWriteString(c_buffer, "\n")
+
+cdef void _writeNodeToBuffer(tree.xmlOutputBuffer* c_buffer,
+ xmlNode* c_node, const_char* encoding, const_xmlChar* c_doctype,
+ int c_method, bint write_xml_declaration,
+ bint write_complete_document,
+ bint pretty_print, bint with_tail,
+ int standalone) noexcept nogil:
+ cdef xmlNode* c_nsdecl_node
+ cdef xmlDoc* c_doc = c_node.doc
+ if write_xml_declaration and c_method == OUTPUT_METHOD_XML:
+ _writeDeclarationToBuffer(c_buffer, c_doc.version, encoding, standalone)
+
+ # comments/processing instructions before doctype declaration
+ if write_complete_document and not c_buffer.error and c_doc.intSubset:
+ _writePrevSiblings(c_buffer, c_doc.intSubset, encoding, pretty_print)
+
+ if c_doctype:
+ _writeDoctype(c_buffer, c_doctype)
+ # write internal DTD subset, preceding PIs/comments, etc.
+ if write_complete_document and not c_buffer.error:
+ if c_doctype is NULL:
+ _writeDtdToBuffer(c_buffer, c_doc, c_node.name, c_method, encoding)
+ _writePrevSiblings(c_buffer, c_node, encoding, pretty_print)
+
+ c_nsdecl_node = c_node
+ if not c_node.parent or c_node.parent.type != tree.XML_DOCUMENT_NODE:
+ # copy the node and add namespaces from parents
+ # this is required to make libxml write them
+ c_nsdecl_node = tree.xmlCopyNode(c_node, 2)
+ if not c_nsdecl_node:
+ c_buffer.error = xmlerror.XML_ERR_NO_MEMORY
+ return
+ _copyParentNamespaces(c_node, c_nsdecl_node)
+
+ c_nsdecl_node.parent = c_node.parent
+ c_nsdecl_node.children = c_node.children
+ c_nsdecl_node.last = c_node.last
+
+ # write node
+ if c_method == OUTPUT_METHOD_HTML:
+ tree.htmlNodeDumpFormatOutput(
+ c_buffer, c_doc, c_nsdecl_node, encoding, pretty_print)
+ else:
+ tree.xmlNodeDumpOutput(
+ c_buffer, c_doc, c_nsdecl_node, 0, pretty_print, encoding)
+
+ if c_nsdecl_node is not c_node:
+ # clean up
+ c_nsdecl_node.children = c_nsdecl_node.last = NULL
+ tree.xmlFreeNode(c_nsdecl_node)
+
+ if c_buffer.error:
+ return
+
+ # write tail, trailing comments, etc.
+ if with_tail:
+ _writeTail(c_buffer, c_node, encoding, c_method, pretty_print)
+ if write_complete_document:
+ _writeNextSiblings(c_buffer, c_node, encoding, pretty_print)
+ if pretty_print:
+ tree.xmlOutputBufferWrite(c_buffer, 1, "\n")
+
+cdef void _writeDeclarationToBuffer(tree.xmlOutputBuffer* c_buffer,
+ const_xmlChar* version, const_char* encoding,
+ int standalone) noexcept nogil:
+ if version is NULL:
+ version = "1.0"
+ tree.xmlOutputBufferWrite(c_buffer, 15, "version)
+ tree.xmlOutputBufferWrite(c_buffer, 12, "' encoding='")
+ tree.xmlOutputBufferWriteString(c_buffer, encoding)
+ if standalone == 0:
+ tree.xmlOutputBufferWrite(c_buffer, 20, "' standalone='no'?>\n")
+ elif standalone == 1:
+ tree.xmlOutputBufferWrite(c_buffer, 21, "' standalone='yes'?>\n")
+ else:
+ tree.xmlOutputBufferWrite(c_buffer, 4, "'?>\n")
+
+cdef void _writeDtdToBuffer(tree.xmlOutputBuffer* c_buffer,
+ xmlDoc* c_doc, const_xmlChar* c_root_name,
+ int c_method, const_char* encoding) noexcept nogil:
+ cdef tree.xmlDtd* c_dtd
+ cdef xmlNode* c_node
+ cdef char* quotechar
+ c_dtd = c_doc.intSubset
+ if not c_dtd or not c_dtd.name:
+ return
+
+ # Name in document type declaration must match the root element tag.
+ # For XML, case sensitive match, for HTML insensitive.
+ if c_method == OUTPUT_METHOD_HTML:
+ if tree.xmlStrcasecmp(c_root_name, c_dtd.name) != 0:
+ return
+ else:
+ if tree.xmlStrcmp(c_root_name, c_dtd.name) != 0:
+ return
+
+ tree.xmlOutputBufferWrite(c_buffer, 10, "c_dtd.name)
+
+ cdef const_xmlChar* public_id = c_dtd.ExternalID
+ cdef const_xmlChar* sys_url = c_dtd.SystemID
+ if public_id and public_id[0] == b'\0':
+ public_id = NULL
+ if sys_url and sys_url[0] == b'\0':
+ sys_url = NULL
+
+ if public_id:
+ tree.xmlOutputBufferWrite(c_buffer, 9, ' PUBLIC "')
+ tree.xmlOutputBufferWriteString(c_buffer, public_id)
+ if sys_url:
+ tree.xmlOutputBufferWrite(c_buffer, 2, '" ')
+ else:
+ tree.xmlOutputBufferWrite(c_buffer, 1, '"')
+ elif sys_url:
+ tree.xmlOutputBufferWrite(c_buffer, 8, ' SYSTEM ')
+
+ if sys_url:
+ if tree.xmlStrchr(sys_url, b'"'):
+ quotechar = '\''
+ else:
+ quotechar = '"'
+ tree.xmlOutputBufferWrite(c_buffer, 1, quotechar)
+ tree.xmlOutputBufferWriteString(c_buffer, sys_url)
+ tree.xmlOutputBufferWrite(c_buffer, 1, quotechar)
+
+ if (not c_dtd.entities and not c_dtd.elements and
+ not c_dtd.attributes and not c_dtd.notations and
+ not c_dtd.pentities):
+ tree.xmlOutputBufferWrite(c_buffer, 2, '>\n')
+ return
+
+ tree.xmlOutputBufferWrite(c_buffer, 3, ' [\n')
+ if c_dtd.notations and not c_buffer.error:
+ c_buf = tree.xmlBufferCreate()
+ if not c_buf:
+ c_buffer.error = xmlerror.XML_ERR_NO_MEMORY
+ return
+ tree.xmlDumpNotationTable(c_buf, c_dtd.notations)
+ tree.xmlOutputBufferWrite(
+ c_buffer, tree.xmlBufferLength(c_buf),
+ tree.xmlBufferContent(c_buf))
+ tree.xmlBufferFree(c_buf)
+ c_node = c_dtd.children
+ while c_node and not c_buffer.error:
+ tree.xmlNodeDumpOutput(c_buffer, c_node.doc, c_node, 0, 0, encoding)
+ c_node = c_node.next
+ tree.xmlOutputBufferWrite(c_buffer, 3, "]>\n")
+
+cdef void _writeTail(tree.xmlOutputBuffer* c_buffer, xmlNode* c_node,
+ const_char* encoding, int c_method, bint pretty_print) noexcept nogil:
+ "Write the element tail."
+ c_node = c_node.next
+ while c_node and not c_buffer.error and c_node.type in (
+ tree.XML_TEXT_NODE, tree.XML_CDATA_SECTION_NODE):
+ if c_method == OUTPUT_METHOD_HTML:
+ tree.htmlNodeDumpFormatOutput(
+ c_buffer, c_node.doc, c_node, encoding, pretty_print)
+ else:
+ tree.xmlNodeDumpOutput(
+ c_buffer, c_node.doc, c_node, 0, pretty_print, encoding)
+ c_node = c_node.next
+
+cdef void _writePrevSiblings(tree.xmlOutputBuffer* c_buffer, xmlNode* c_node,
+ const_char* encoding, bint pretty_print) noexcept nogil:
+ cdef xmlNode* c_sibling
+ if c_node.parent and _isElement(c_node.parent):
+ return
+ # we are at a root node, so add PI and comment siblings
+ c_sibling = c_node
+ while c_sibling.prev and \
+ (c_sibling.prev.type == tree.XML_PI_NODE or
+ c_sibling.prev.type == tree.XML_COMMENT_NODE):
+ c_sibling = c_sibling.prev
+ while c_sibling is not c_node and not c_buffer.error:
+ tree.xmlNodeDumpOutput(c_buffer, c_node.doc, c_sibling, 0,
+ pretty_print, encoding)
+ if pretty_print:
+ tree.xmlOutputBufferWriteString(c_buffer, "\n")
+ c_sibling = c_sibling.next
+
+cdef void _writeNextSiblings(tree.xmlOutputBuffer* c_buffer, xmlNode* c_node,
+ const_char* encoding, bint pretty_print) noexcept nogil:
+ cdef xmlNode* c_sibling
+ if c_node.parent and _isElement(c_node.parent):
+ return
+ # we are at a root node, so add PI and comment siblings
+ c_sibling = c_node.next
+ while not c_buffer.error and c_sibling and \
+ (c_sibling.type == tree.XML_PI_NODE or
+ c_sibling.type == tree.XML_COMMENT_NODE):
+ if pretty_print:
+ tree.xmlOutputBufferWriteString(c_buffer, "\n")
+ tree.xmlNodeDumpOutput(c_buffer, c_node.doc, c_sibling, 0,
+ pretty_print, encoding)
+ c_sibling = c_sibling.next
+
+
+# copied and adapted from libxml2
+cdef unsigned char *xmlSerializeHexCharRef(unsigned char *out, int val) noexcept:
+ cdef xmlChar *ptr
+ cdef const xmlChar* hexdigits = b"0123456789ABCDEF"
+
+ out[0] = b'&'
+ out += 1
+ out[0] = b'#'
+ out += 1
+ out[0] = b'x'
+ out += 1
+
+ if val < 0x10:
+ ptr = out
+ elif val < 0x100:
+ ptr = out + 1
+ elif val < 0x1000:
+ ptr = out + 2
+ elif val < 0x10000:
+ ptr = out + 3
+ elif val < 0x100000:
+ ptr = out + 4
+ else:
+ ptr = out + 5
+
+ out = ptr + 1
+ while val > 0:
+ ptr[0] = hexdigits[val & 0xF]
+ ptr -= 1
+ val >>= 4
+
+ out[0] = b';'
+ out += 1
+ out[0] = 0
+
+ return out
+
+
+# copied and adapted from libxml2 (xmlBufAttrSerializeTxtContent())
+cdef _write_attr_string(tree.xmlOutputBuffer* buf, const char *string):
+ cdef const char *base
+ cdef const char *cur
+ cdef const unsigned char *ucur
+
+ cdef unsigned char tmp[12]
+ cdef int val = 0
+ cdef int l
+
+ if string == NULL:
+ return
+
+ base = cur = string
+ while cur[0] != 0:
+ if cur[0] == b'\n':
+ if base != cur:
+ tree.xmlOutputBufferWrite(buf, cur - base, base)
+
+ tree.xmlOutputBufferWrite(buf, 5, "
")
+ cur += 1
+ base = cur
+
+ elif cur[0] == b'\r':
+ if base != cur:
+ tree.xmlOutputBufferWrite(buf, cur - base, base)
+
+ tree.xmlOutputBufferWrite(buf, 5, "
")
+ cur += 1
+ base = cur
+
+ elif cur[0] == b'\t':
+ if base != cur:
+ tree.xmlOutputBufferWrite(buf, cur - base, base)
+
+ tree.xmlOutputBufferWrite(buf, 4, " ")
+ cur += 1
+ base = cur
+
+ elif cur[0] == b'"':
+ if base != cur:
+ tree.xmlOutputBufferWrite(buf, cur - base, base)
+
+ tree.xmlOutputBufferWrite(buf, 6, """)
+ cur += 1
+ base = cur
+
+ elif cur[0] == b'<':
+ if base != cur:
+ tree.xmlOutputBufferWrite(buf, cur - base, base)
+
+ tree.xmlOutputBufferWrite(buf, 4, "<")
+ cur += 1
+ base = cur
+
+ elif cur[0] == b'>':
+ if base != cur:
+ tree.xmlOutputBufferWrite(buf, cur - base, base)
+
+ tree.xmlOutputBufferWrite(buf, 4, ">")
+ cur += 1
+ base = cur
+ elif cur[0] == b'&':
+ if base != cur:
+ tree.xmlOutputBufferWrite(buf, cur - base, base)
+
+ tree.xmlOutputBufferWrite(buf, 5, "&")
+ cur += 1
+ base = cur
+
+ elif (cur[0] >= 0x80) and (cur[1] != 0):
+
+ if base != cur:
+ tree.xmlOutputBufferWrite(buf, cur - base, base)
+
+ ucur = cur
+
+ if ucur[0] < 0xC0:
+ # invalid UTF-8 sequence
+ val = ucur[0]
+ l = 1
+
+ elif ucur[0] < 0xE0:
+ val = (ucur[0]) & 0x1F
+ val <<= 6
+ val |= (ucur[1]) & 0x3F
+ l = 2
+
+ elif (ucur[0] < 0xF0) and (ucur[2] != 0):
+ val = (ucur[0]) & 0x0F
+ val <<= 6
+ val |= (ucur[1]) & 0x3F
+ val <<= 6
+ val |= (ucur[2]) & 0x3F
+ l = 3
+
+ elif (ucur[0] < 0xF8) and (ucur[2] != 0) and (ucur[3] != 0):
+ val = (ucur[0]) & 0x07
+ val <<= 6
+ val |= (ucur[1]) & 0x3F
+ val <<= 6
+ val |= (ucur[2]) & 0x3F
+ val <<= 6
+ val |= (ucur[3]) & 0x3F
+ l = 4
+ else:
+ # invalid UTF-8 sequence
+ val = ucur[0]
+ l = 1
+
+ if (l == 1) or (not tree.xmlIsCharQ(val)):
+ raise ValueError(f"Invalid character: {val:X}")
+
+ # We could do multiple things here. Just save
+ # as a char ref
+ xmlSerializeHexCharRef(tmp, val)
+ tree.xmlOutputBufferWrite(buf, len(tmp), tmp)
+ cur += l
+ base = cur
+
+ else:
+ cur += 1
+
+ if base != cur:
+ tree.xmlOutputBufferWrite(buf, cur - base, base)
+
+
+############################################################
+# output to file-like objects
+
+cdef object io_open
+from io import open
+
+cdef object gzip
+import gzip
+
+cdef object getwriter
+from codecs import getwriter
+cdef object utf8_writer = getwriter('utf8')
+
+cdef object contextmanager
+from contextlib import contextmanager
+
+cdef object _open_utf8_file
+
+@contextmanager
+def _open_utf8_file(file, compression=0):
+ file = _getFSPathOrObject(file)
+ if _isString(file):
+ if compression:
+ with gzip.GzipFile(file, mode='wb', compresslevel=compression) as zf:
+ yield utf8_writer(zf)
+ else:
+ with io_open(file, 'w', encoding='utf8') as f:
+ yield f
+ else:
+ if compression:
+ with gzip.GzipFile(fileobj=file, mode='wb', compresslevel=compression) as zf:
+ yield utf8_writer(zf)
+ else:
+ yield utf8_writer(file)
+
+
+@cython.final
+@cython.internal
+cdef class _FilelikeWriter:
+ cdef object _filelike
+ cdef object _close_filelike
+ cdef _ExceptionContext _exc_context
+ cdef _ErrorLog error_log
+ def __cinit__(self, filelike, exc_context=None, compression=None, close=False):
+ if compression is not None and compression > 0:
+ filelike = GzipFile(
+ fileobj=filelike, mode='wb', compresslevel=compression)
+ self._close_filelike = filelike.close
+ elif close:
+ self._close_filelike = filelike.close
+ self._filelike = filelike
+ if exc_context is None:
+ self._exc_context = _ExceptionContext()
+ else:
+ self._exc_context = exc_context
+ self.error_log = _ErrorLog()
+
+ cdef tree.xmlOutputBuffer* _createOutputBuffer(
+ self, tree.xmlCharEncodingHandler* enchandler) except NULL:
+ cdef tree.xmlOutputBuffer* c_buffer
+ c_buffer = tree.xmlOutputBufferCreateIO(
+ _writeFilelikeWriter, _closeFilelikeWriter,
+ self, enchandler)
+ if c_buffer is NULL:
+ raise IOError, "Could not create I/O writer context."
+ return c_buffer
+
+ cdef int write(self, char* c_buffer, int size) noexcept:
+ try:
+ if self._filelike is None:
+ raise IOError, "File is already closed"
+ py_buffer = c_buffer[:size]
+ self._filelike.write(py_buffer)
+ except:
+ size = -1
+ self._exc_context._store_raised()
+ finally:
+ return size # and swallow any further exceptions
+
+ cdef int close(self) noexcept:
+ retval = 0
+ try:
+ if self._close_filelike is not None:
+ self._close_filelike()
+ # we should not close the file here as we didn't open it
+ self._filelike = None
+ except:
+ retval = -1
+ self._exc_context._store_raised()
+ finally:
+ return retval # and swallow any further exceptions
+
+cdef int _writeFilelikeWriter(void* ctxt, char* c_buffer, int length) noexcept:
+ return (<_FilelikeWriter>ctxt).write(c_buffer, length)
+
+cdef int _closeFilelikeWriter(void* ctxt) noexcept:
+ return (<_FilelikeWriter>ctxt).close()
+
+cdef _tofilelike(f, _Element element, encoding, doctype, method,
+ bint write_xml_declaration, bint write_doctype,
+ bint pretty_print, bint with_tail, int standalone,
+ int compression):
+ cdef _FilelikeWriter writer = None
+ cdef tree.xmlOutputBuffer* c_buffer
+ cdef tree.xmlCharEncodingHandler* enchandler
+ cdef const_char* c_enc
+ cdef const_xmlChar* c_doctype
+ cdef int error_result
+
+ c_method = _findOutputMethod(method)
+ if c_method == OUTPUT_METHOD_TEXT:
+ data = _textToString(element._c_node, encoding, with_tail)
+ if compression:
+ bytes_out = BytesIO()
+ with GzipFile(fileobj=bytes_out, mode='wb', compresslevel=compression) as gzip_file:
+ gzip_file.write(data)
+ data = bytes_out.getvalue()
+ f = _getFSPathOrObject(f)
+ if _isString(f):
+ filename8 = _encodeFilename(f)
+ with open(filename8, 'wb') as f:
+ f.write(data)
+ else:
+ f.write(data)
+ return
+
+ if encoding is None:
+ c_enc = NULL
+ else:
+ encoding = _utf8(encoding)
+ c_enc = _cstr(encoding)
+ if doctype is None:
+ c_doctype = NULL
+ else:
+ doctype = _utf8(doctype)
+ c_doctype = _xcstr(doctype)
+
+ writer = _create_output_buffer(f, c_enc, compression, &c_buffer, close=False)
+ if writer is None:
+ with nogil:
+ error_result = _serialise_node(
+ c_buffer, c_doctype, c_enc, element._c_node, c_method,
+ write_xml_declaration, write_doctype, pretty_print, with_tail, standalone)
+ else:
+ error_result = _serialise_node(
+ c_buffer, c_doctype, c_enc, element._c_node, c_method,
+ write_xml_declaration, write_doctype, pretty_print, with_tail, standalone)
+
+ if writer is not None:
+ writer._exc_context._raise_if_stored()
+ if error_result != xmlerror.XML_ERR_OK:
+ _raiseSerialisationError(error_result)
+
+
+cdef int _serialise_node(tree.xmlOutputBuffer* c_buffer, const_xmlChar* c_doctype,
+ const_char* c_enc, xmlNode* c_node, int c_method,
+ bint write_xml_declaration, bint write_doctype, bint pretty_print,
+ bint with_tail, int standalone) noexcept nogil:
+ _writeNodeToBuffer(
+ c_buffer, c_node, c_enc, c_doctype, c_method,
+ write_xml_declaration, write_doctype, pretty_print, with_tail, standalone)
+ error_result = c_buffer.error
+ if error_result == xmlerror.XML_ERR_OK:
+ error_result = tree.xmlOutputBufferClose(c_buffer)
+ if error_result != -1:
+ error_result = xmlerror.XML_ERR_OK
+ else:
+ tree.xmlOutputBufferClose(c_buffer)
+ return error_result
+
+
+cdef _FilelikeWriter _create_output_buffer(
+ f, const_char* c_enc, int c_compression,
+ tree.xmlOutputBuffer** c_buffer_ret, bint close):
+ cdef tree.xmlOutputBuffer* c_buffer
+ cdef _FilelikeWriter writer
+ cdef bytes filename8
+ enchandler = tree.xmlFindCharEncodingHandler(c_enc)
+ if enchandler is NULL:
+ raise LookupError(
+ f"unknown encoding: '{c_enc.decode('UTF-8') if c_enc is not NULL else u''}'")
+ try:
+ f = _getFSPathOrObject(f)
+ if _isString(f):
+ filename8 = _encodeFilename(f)
+ if b'%' in filename8 and (
+ # Exclude absolute Windows paths and file:// URLs.
+ _isFilePath(filename8) not in (NO_FILE_PATH, ABS_WIN_FILE_PATH)
+ or filename8[:7].lower() == b'file://'):
+ # A file path (not a URL) containing the '%' URL escape character.
+ # libxml2 uses URL-unescaping on these, so escape the path before passing it in.
+ filename8 = filename8.replace(b'%', b'%25')
+ c_buffer = tree.xmlOutputBufferCreateFilename(
+ _cstr(filename8), enchandler, c_compression)
+ if c_buffer is NULL:
+ python.PyErr_SetFromErrno(IOError) # raises IOError
+ writer = None
+ elif hasattr(f, 'write'):
+ writer = _FilelikeWriter(f, compression=c_compression, close=close)
+ c_buffer = writer._createOutputBuffer(enchandler)
+ else:
+ raise TypeError(
+ f"File or filename expected, got '{python._fqtypename(f).decode('UTF-8')}'")
+ except:
+ tree.xmlCharEncCloseFunc(enchandler)
+ raise
+ c_buffer_ret[0] = c_buffer
+ return writer
+
+cdef xmlChar **_convert_ns_prefixes(tree.xmlDict* c_dict, ns_prefixes) except NULL:
+ cdef size_t i, num_ns_prefixes = len(ns_prefixes)
+ # Need to allocate one extra memory block to handle last NULL entry
+ c_ns_prefixes = python.lxml_malloc(num_ns_prefixes + 1, sizeof(xmlChar*))
+ if not c_ns_prefixes:
+ raise MemoryError()
+ i = 0
+ try:
+ for prefix in ns_prefixes:
+ prefix_utf = _utf8(prefix)
+ c_prefix = tree.xmlDictExists(c_dict, _xcstr(prefix_utf), len(prefix_utf))
+ if c_prefix:
+ # unknown prefixes do not need to get serialised
+ c_ns_prefixes[i] = c_prefix
+ i += 1
+ except:
+ python.lxml_free(c_ns_prefixes)
+ raise
+
+ c_ns_prefixes[i] = NULL # append end marker
+ return c_ns_prefixes
+
+cdef _tofilelikeC14N(f, _Element element, bint exclusive, bint with_comments,
+ int compression, inclusive_ns_prefixes):
+ cdef _FilelikeWriter writer = None
+ cdef tree.xmlOutputBuffer* c_buffer
+ cdef xmlChar **c_inclusive_ns_prefixes = NULL
+ cdef char* c_filename
+ cdef xmlDoc* c_base_doc
+ cdef xmlDoc* c_doc
+ cdef int bytes_count, error = 0
+
+ c_base_doc = element._c_node.doc
+ c_doc = _fakeRootDoc(c_base_doc, element._c_node)
+ try:
+ c_inclusive_ns_prefixes = (
+ _convert_ns_prefixes(c_doc.dict, inclusive_ns_prefixes)
+ if inclusive_ns_prefixes else NULL)
+
+ f = _getFSPathOrObject(f)
+ if _isString(f):
+ filename8 = _encodeFilename(f)
+ c_filename = _cstr(filename8)
+ with nogil:
+ error = c14n.xmlC14NDocSave(
+ c_doc, NULL, exclusive, c_inclusive_ns_prefixes,
+ with_comments, c_filename, compression)
+ elif hasattr(f, 'write'):
+ writer = _FilelikeWriter(f, compression=compression)
+ c_buffer = writer._createOutputBuffer(NULL)
+ try:
+ with writer.error_log:
+ bytes_count = c14n.xmlC14NDocSaveTo(
+ c_doc, NULL, exclusive, c_inclusive_ns_prefixes,
+ with_comments, c_buffer)
+ finally:
+ error = tree.xmlOutputBufferClose(c_buffer)
+ if bytes_count < 0:
+ error = bytes_count
+ elif error != -1:
+ error = xmlerror.XML_ERR_OK
+ else:
+ raise TypeError(f"File or filename expected, got '{python._fqtypename(f).decode('UTF-8')}'")
+ finally:
+ _destroyFakeDoc(c_base_doc, c_doc)
+ if c_inclusive_ns_prefixes is not NULL:
+ python.lxml_free(c_inclusive_ns_prefixes)
+
+ if writer is not None:
+ writer._exc_context._raise_if_stored()
+
+ if error < 0:
+ message = "C14N failed"
+ if writer is not None:
+ errors = writer.error_log
+ if len(errors):
+ message = errors[0].message
+ raise C14NError(message)
+
+
+# C14N 2.0
+
+def canonicalize(xml_data=None, *, out=None, from_file=None, **options):
+ """Convert XML to its C14N 2.0 serialised form.
+
+ If *out* is provided, it must be a file or file-like object that receives
+ the serialised canonical XML output (text, not bytes) through its ``.write()``
+ method. To write to a file, open it in text mode with encoding "utf-8".
+ If *out* is not provided, this function returns the output as text string.
+
+ Either *xml_data* (an XML string, tree or Element) or *file*
+ (a file path or file-like object) must be provided as input.
+
+ The configuration options are the same as for the ``C14NWriterTarget``.
+ """
+ if xml_data is None and from_file is None:
+ raise ValueError("Either 'xml_data' or 'from_file' must be provided as input")
+
+ sio = None
+ if out is None:
+ sio = out = StringIO()
+
+ target = C14NWriterTarget(out.write, **options)
+
+ if xml_data is not None and not isinstance(xml_data, basestring):
+ _tree_to_target(xml_data, target)
+ return sio.getvalue() if sio is not None else None
+
+ cdef _FeedParser parser = XMLParser(
+ target=target,
+ attribute_defaults=True,
+ collect_ids=False,
+ )
+
+ if xml_data is not None:
+ parser.feed(xml_data)
+ parser.close()
+ elif from_file is not None:
+ try:
+ _parseDocument(from_file, parser, base_url=None)
+ except _TargetParserResult:
+ pass
+
+ return sio.getvalue() if sio is not None else None
+
+
+cdef _tree_to_target(element, target):
+ for event, elem in iterwalk(element, events=('start', 'end', 'start-ns', 'comment', 'pi')):
+ text = None
+ if event == 'start':
+ target.start(elem.tag, elem.attrib)
+ text = elem.text
+ elif event == 'end':
+ target.end(elem.tag)
+ text = elem.tail
+ elif event == 'start-ns':
+ target.start_ns(*elem)
+ continue
+ elif event == 'comment':
+ target.comment(elem.text)
+ text = elem.tail
+ elif event == 'pi':
+ target.pi(elem.target, elem.text)
+ text = elem.tail
+ if text:
+ target.data(text)
+ return target.close()
+
+
+cdef object _looks_like_prefix_name = re.compile(r'^\w+:\w+$', re.UNICODE).match
+
+
+cdef class C14NWriterTarget:
+ """
+ Canonicalization writer target for the XMLParser.
+
+ Serialises parse events to XML C14N 2.0.
+
+ Configuration options:
+
+ - *with_comments*: set to true to include comments
+ - *strip_text*: set to true to strip whitespace before and after text content
+ - *rewrite_prefixes*: set to true to replace namespace prefixes by "n{number}"
+ - *qname_aware_tags*: a set of qname aware tag names in which prefixes
+ should be replaced in text content
+ - *qname_aware_attrs*: a set of qname aware attribute names in which prefixes
+ should be replaced in text content
+ - *exclude_attrs*: a set of attribute names that should not be serialised
+ - *exclude_tags*: a set of tag names that should not be serialised
+ """
+ cdef object _write
+ cdef list _data
+ cdef set _qname_aware_tags
+ cdef object _find_qname_aware_attrs
+ cdef list _declared_ns_stack
+ cdef list _ns_stack
+ cdef dict _prefix_map
+ cdef list _preserve_space
+ cdef tuple _pending_start
+ cdef set _exclude_tags
+ cdef set _exclude_attrs
+ cdef Py_ssize_t _ignored_depth
+ cdef bint _with_comments
+ cdef bint _strip_text
+ cdef bint _rewrite_prefixes
+ cdef bint _root_seen
+ cdef bint _root_done
+
+ def __init__(self, write, *,
+ with_comments=False, strip_text=False, rewrite_prefixes=False,
+ qname_aware_tags=None, qname_aware_attrs=None,
+ exclude_attrs=None, exclude_tags=None):
+ self._write = write
+ self._data = []
+ self._with_comments = with_comments
+ self._strip_text = strip_text
+ self._exclude_attrs = set(exclude_attrs) if exclude_attrs else None
+ self._exclude_tags = set(exclude_tags) if exclude_tags else None
+
+ self._rewrite_prefixes = rewrite_prefixes
+ if qname_aware_tags:
+ self._qname_aware_tags = set(qname_aware_tags)
+ else:
+ self._qname_aware_tags = None
+ if qname_aware_attrs:
+ self._find_qname_aware_attrs = set(qname_aware_attrs).intersection
+ else:
+ self._find_qname_aware_attrs = None
+
+ # Stack with globally and newly declared namespaces as (uri, prefix) pairs.
+ self._declared_ns_stack = [[
+ ("http://www.w3.org/XML/1998/namespace", "xml"),
+ ]]
+ # Stack with user declared namespace prefixes as (uri, prefix) pairs.
+ self._ns_stack = []
+ if not rewrite_prefixes:
+ self._ns_stack.append(_DEFAULT_NAMESPACE_PREFIXES_ITEMS)
+ self._ns_stack.append([])
+ self._prefix_map = {}
+ self._preserve_space = [False]
+ self._pending_start = None
+ self._ignored_depth = 0
+ self._root_seen = False
+ self._root_done = False
+
+ def _iter_namespaces(self, ns_stack):
+ for namespaces in reversed(ns_stack):
+ if namespaces: # almost no element declares new namespaces
+ yield from namespaces
+
+ cdef _resolve_prefix_name(self, prefixed_name):
+ prefix, name = prefixed_name.split(':', 1)
+ for uri, p in self._iter_namespaces(self._ns_stack):
+ if p == prefix:
+ return f'{{{uri}}}{name}'
+ raise ValueError(f'Prefix {prefix} of QName "{prefixed_name}" is not declared in scope')
+
+ cdef _qname(self, qname, uri=None):
+ if uri is None:
+ uri, tag = qname[1:].rsplit('}', 1) if qname[:1] == '{' else ('', qname)
+ else:
+ tag = qname
+
+ prefixes_seen = set()
+ for u, prefix in self._iter_namespaces(self._declared_ns_stack):
+ if u == uri and prefix not in prefixes_seen:
+ return f'{prefix}:{tag}' if prefix else tag, tag, uri
+ prefixes_seen.add(prefix)
+
+ # Not declared yet => add new declaration.
+ if self._rewrite_prefixes:
+ if uri in self._prefix_map:
+ prefix = self._prefix_map[uri]
+ else:
+ prefix = self._prefix_map[uri] = f'n{len(self._prefix_map)}'
+ self._declared_ns_stack[-1].append((uri, prefix))
+ return f'{prefix}:{tag}', tag, uri
+
+ if not uri and '' not in prefixes_seen:
+ # No default namespace declared => no prefix needed.
+ return tag, tag, uri
+
+ for u, prefix in self._iter_namespaces(self._ns_stack):
+ if u == uri:
+ self._declared_ns_stack[-1].append((uri, prefix))
+ return f'{prefix}:{tag}' if prefix else tag, tag, uri
+
+ if not uri:
+ # As soon as a default namespace is defined,
+ # anything that has no namespace (and thus, no prefix) goes there.
+ return tag, tag, uri
+
+ raise ValueError(f'Namespace "{uri}" of name "{tag}" is not declared in scope')
+
+ def data(self, data):
+ if not self._ignored_depth:
+ self._data.append(data)
+
+ cdef _flush(self):
+ cdef unicode data = ''.join(self._data)
+ del self._data[:]
+ if self._strip_text and not self._preserve_space[-1]:
+ data = data.strip()
+ if self._pending_start is not None:
+ (tag, attrs, new_namespaces), self._pending_start = self._pending_start, None
+ qname_text = data if ':' in data and _looks_like_prefix_name(data) else None
+ self._start(tag, attrs, new_namespaces, qname_text)
+ if qname_text is not None:
+ return
+ if data and self._root_seen:
+ self._write(_escape_cdata_c14n(data))
+
+ def start_ns(self, prefix, uri):
+ if self._ignored_depth:
+ return
+ # we may have to resolve qnames in text content
+ if self._data:
+ self._flush()
+ self._ns_stack[-1].append((uri, prefix))
+
+ def start(self, tag, attrs):
+ if self._exclude_tags is not None and (
+ self._ignored_depth or tag in self._exclude_tags):
+ self._ignored_depth += 1
+ return
+ if self._data:
+ self._flush()
+
+ new_namespaces = []
+ self._declared_ns_stack.append(new_namespaces)
+
+ if self._qname_aware_tags is not None and tag in self._qname_aware_tags:
+ # Need to parse text first to see if it requires a prefix declaration.
+ self._pending_start = (tag, attrs, new_namespaces)
+ return
+ self._start(tag, attrs, new_namespaces)
+
+ cdef _start(self, tag, attrs, new_namespaces, qname_text=None):
+ if self._exclude_attrs is not None and attrs:
+ attrs = {k: v for k, v in attrs.items() if k not in self._exclude_attrs}
+
+ qnames = {tag, *attrs}
+ resolved_names = {}
+
+ # Resolve prefixes in attribute and tag text.
+ if qname_text is not None:
+ qname = resolved_names[qname_text] = self._resolve_prefix_name(qname_text)
+ qnames.add(qname)
+ if self._find_qname_aware_attrs is not None and attrs:
+ qattrs = self._find_qname_aware_attrs(attrs)
+ if qattrs:
+ for attr_name in qattrs:
+ value = attrs[attr_name]
+ if _looks_like_prefix_name(value):
+ qname = resolved_names[value] = self._resolve_prefix_name(value)
+ qnames.add(qname)
+ else:
+ qattrs = None
+ else:
+ qattrs = None
+
+ # Assign prefixes in lexicographical order of used URIs.
+ parsed_qnames = {n: self._qname(n) for n in sorted(
+ qnames, key=lambda n: n.split('}', 1))}
+
+ # Write namespace declarations in prefix order ...
+ if new_namespaces:
+ attr_list = [
+ ('xmlns:' + prefix if prefix else 'xmlns', uri)
+ for uri, prefix in new_namespaces
+ ]
+ attr_list.sort()
+ else:
+ # almost always empty
+ attr_list = []
+
+ # ... followed by attributes in URI+name order
+ if attrs:
+ for k, v in sorted(attrs.items()):
+ if qattrs is not None and k in qattrs and v in resolved_names:
+ v = parsed_qnames[resolved_names[v]][0]
+ attr_qname, attr_name, uri = parsed_qnames[k]
+ # No prefix for attributes in default ('') namespace.
+ attr_list.append((attr_qname if uri else attr_name, v))
+
+ # Honour xml:space attributes.
+ space_behaviour = attrs.get('{http://www.w3.org/XML/1998/namespace}space')
+ self._preserve_space.append(
+ space_behaviour == 'preserve' if space_behaviour
+ else self._preserve_space[-1])
+
+ # Write the tag.
+ write = self._write
+ write('<' + parsed_qnames[tag][0])
+ if attr_list:
+ write(''.join([f' {k}="{_escape_attrib_c14n(v)}"' for k, v in attr_list]))
+ write('>')
+
+ # Write the resolved qname text content.
+ if qname_text is not None:
+ write(_escape_cdata_c14n(parsed_qnames[resolved_names[qname_text]][0]))
+
+ self._root_seen = True
+ self._ns_stack.append([])
+
+ def end(self, tag):
+ if self._ignored_depth:
+ self._ignored_depth -= 1
+ return
+ if self._data:
+ self._flush()
+ self._write(f'{self._qname(tag)[0]}>')
+ self._preserve_space.pop()
+ self._root_done = len(self._preserve_space) == 1
+ self._declared_ns_stack.pop()
+ self._ns_stack.pop()
+
+ def comment(self, text):
+ if not self._with_comments:
+ return
+ if self._ignored_depth:
+ return
+ if self._root_done:
+ self._write('\n')
+ elif self._root_seen and self._data:
+ self._flush()
+ self._write(f'')
+ if not self._root_seen:
+ self._write('\n')
+
+ def pi(self, target, data):
+ if self._ignored_depth:
+ return
+ if self._root_done:
+ self._write('\n')
+ elif self._root_seen and self._data:
+ self._flush()
+ self._write(
+ f'{target} {_escape_cdata_c14n(data)}?>' if data else f'{target}?>')
+ if not self._root_seen:
+ self._write('\n')
+
+ def close(self):
+ return None
+
+
+cdef _raise_serialization_error(text):
+ raise TypeError("cannot serialize %r (type %s)" % (text, type(text).__name__))
+
+
+cdef unicode _escape_cdata_c14n(stext):
+ # escape character data
+ cdef unicode text
+ cdef Py_UCS4 ch
+ cdef Py_ssize_t start = 0, pos = 0
+ cdef list substrings = None
+ try:
+ text = unicode(stext)
+ except (TypeError, AttributeError):
+ return _raise_serialization_error(stext)
+
+ for pos, ch in enumerate(text):
+ if ch == '&':
+ escape = '&'
+ elif ch == '<':
+ escape = '<'
+ elif ch == '>':
+ escape = '>'
+ elif ch == '\r':
+ escape = '
'
+ else:
+ continue
+
+ if substrings is None:
+ substrings = []
+ if pos > start:
+ substrings.append(text[start:pos])
+ substrings.append(escape)
+ start = pos + 1
+
+ if substrings is None:
+ return text
+ if pos >= start:
+ substrings.append(text[start:pos+1])
+ return ''.join(substrings)
+
+
+cdef unicode _escape_attrib_c14n(stext):
+ # escape attribute value
+ cdef unicode text
+ cdef Py_UCS4 ch
+ cdef Py_ssize_t start = 0, pos = 0
+ cdef list substrings = None
+ try:
+ text = unicode(stext)
+ except (TypeError, AttributeError):
+ return _raise_serialization_error(stext)
+
+ for pos, ch in enumerate(text):
+ if ch == '&':
+ escape = '&'
+ elif ch == '<':
+ escape = '<'
+ elif ch == '"':
+ escape = '"'
+ elif ch == '\t':
+ escape = ' '
+ elif ch == '\n':
+ escape = '
'
+ elif ch == '\r':
+ escape = '
'
+ else:
+ continue
+
+ if substrings is None:
+ substrings = []
+ if pos > start:
+ substrings.append(text[start:pos])
+ substrings.append(escape)
+ start = pos + 1
+
+ if substrings is None:
+ return text
+ if pos >= start:
+ substrings.append(text[start:pos+1])
+ return ''.join(substrings)
+
+
+# incremental serialisation
+
+cdef class xmlfile:
+ """xmlfile(self, output_file, encoding=None, compression=None, close=False, buffered=True)
+
+ A simple mechanism for incremental XML serialisation.
+
+ Usage example::
+
+ with xmlfile("somefile.xml", encoding='utf-8') as xf:
+ xf.write_declaration(standalone=True)
+ xf.write_doctype('')
+
+ # generate an element (the root element)
+ with xf.element('root'):
+ # write a complete Element into the open root element
+ xf.write(etree.Element('test'))
+
+ # generate and write more Elements, e.g. through iterparse
+ for element in generate_some_elements():
+ # serialise generated elements into the XML file
+ xf.write(element)
+
+ # or write multiple Elements or strings at once
+ xf.write(etree.Element('start'), "text", etree.Element('end'))
+
+ If 'output_file' is a file(-like) object, passing ``close=True`` will
+ close it when exiting the context manager. By default, it is left
+ to the owner to do that. When a file path is used, lxml will take care
+ of opening and closing the file itself. Also, when a compression level
+ is set, lxml will deliberately close the file to make sure all data gets
+ compressed and written.
+
+ Setting ``buffered=False`` will flush the output after each operation,
+ such as opening or closing an ``xf.element()`` block or calling
+ ``xf.write()``. Alternatively, calling ``xf.flush()`` can be used to
+ explicitly flush any pending output when buffering is enabled.
+ """
+ cdef object output_file
+ cdef bytes encoding
+ cdef _IncrementalFileWriter writer
+ cdef _AsyncIncrementalFileWriter async_writer
+ cdef int compresslevel
+ cdef bint close
+ cdef bint buffered
+ cdef int method
+
+ def __init__(self, output_file not None, encoding=None, compression=None,
+ close=False, buffered=True):
+ self.output_file = output_file
+ self.encoding = _utf8orNone(encoding)
+ self.compresslevel = compression or 0
+ self.close = close
+ self.buffered = buffered
+ self.method = OUTPUT_METHOD_XML
+
+ def __enter__(self):
+ assert self.output_file is not None
+ self.writer = _IncrementalFileWriter(
+ self.output_file, self.encoding, self.compresslevel,
+ self.close, self.buffered, self.method)
+ return self.writer
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ if self.writer is not None:
+ old_writer, self.writer = self.writer, None
+ raise_on_error = exc_type is None
+ old_writer._close(raise_on_error)
+ if self.close:
+ self.output_file = None
+
+ async def __aenter__(self):
+ assert self.output_file is not None
+ if isinstance(self.output_file, basestring):
+ raise TypeError("Cannot asynchronously write to a plain file")
+ if not hasattr(self.output_file, 'write'):
+ raise TypeError("Output file needs an async .write() method")
+ self.async_writer = _AsyncIncrementalFileWriter(
+ self.output_file, self.encoding, self.compresslevel,
+ self.close, self.buffered, self.method)
+ return self.async_writer
+
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
+ if self.async_writer is not None:
+ old_writer, self.async_writer = self.async_writer, None
+ raise_on_error = exc_type is None
+ await old_writer._close(raise_on_error)
+ if self.close:
+ self.output_file = None
+
+
+cdef class htmlfile(xmlfile):
+ """htmlfile(self, output_file, encoding=None, compression=None, close=False, buffered=True)
+
+ A simple mechanism for incremental HTML serialisation. Works the same as
+ xmlfile.
+ """
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.method = OUTPUT_METHOD_HTML
+
+
+cdef enum _IncrementalFileWriterStatus:
+ WRITER_STARTING = 0
+ WRITER_DECL_WRITTEN = 1
+ WRITER_DTD_WRITTEN = 2
+ WRITER_IN_ELEMENT = 3
+ WRITER_FINISHED = 4
+
+
+@cython.final
+@cython.internal
+cdef class _IncrementalFileWriter:
+ cdef tree.xmlOutputBuffer* _c_out
+ cdef bytes _encoding
+ cdef const_char* _c_encoding
+ cdef _FilelikeWriter _target
+ cdef list _element_stack
+ cdef int _status
+ cdef int _method
+ cdef bint _buffered
+
+ def __cinit__(self, outfile, bytes encoding, int compresslevel, bint close,
+ bint buffered, int method):
+ self._status = WRITER_STARTING
+ self._element_stack = []
+ if encoding is None:
+ encoding = b'ASCII'
+ self._encoding = encoding
+ self._c_encoding = _cstr(encoding) if encoding is not None else NULL
+ self._buffered = buffered
+ self._target = _create_output_buffer(
+ outfile, self._c_encoding, compresslevel, &self._c_out, close)
+ self._method = method
+
+ def __dealloc__(self):
+ if self._c_out is not NULL:
+ tree.xmlOutputBufferClose(self._c_out)
+
+ def write_declaration(self, version=None, standalone=None, doctype=None):
+ """write_declaration(self, version=None, standalone=None, doctype=None)
+
+ Write an XML declaration and (optionally) a doctype into the file.
+ """
+ assert self._c_out is not NULL
+ cdef const_xmlChar* c_version
+ cdef int c_standalone
+ if self._method != OUTPUT_METHOD_XML:
+ raise LxmlSyntaxError("only XML documents have declarations")
+ if self._status >= WRITER_DECL_WRITTEN:
+ raise LxmlSyntaxError("XML declaration already written")
+ version = _utf8orNone(version)
+ c_version = _xcstr(version) if version is not None else NULL
+ doctype = _utf8orNone(doctype)
+ if standalone is None:
+ c_standalone = -1
+ else:
+ c_standalone = 1 if standalone else 0
+ _writeDeclarationToBuffer(self._c_out, c_version, self._c_encoding, c_standalone)
+ if doctype is not None:
+ _writeDoctype(self._c_out, _xcstr(doctype))
+ self._status = WRITER_DTD_WRITTEN
+ else:
+ self._status = WRITER_DECL_WRITTEN
+ if not self._buffered:
+ tree.xmlOutputBufferFlush(self._c_out)
+ self._handle_error(self._c_out.error)
+
+ def write_doctype(self, doctype):
+ """write_doctype(self, doctype)
+
+ Writes the given doctype declaration verbatimly into the file.
+ """
+ assert self._c_out is not NULL
+ if doctype is None:
+ return
+ if self._status >= WRITER_DTD_WRITTEN:
+ raise LxmlSyntaxError("DOCTYPE already written or cannot write it here")
+ doctype = _utf8(doctype)
+ _writeDoctype(self._c_out, _xcstr(doctype))
+ self._status = WRITER_DTD_WRITTEN
+ if not self._buffered:
+ tree.xmlOutputBufferFlush(self._c_out)
+ self._handle_error(self._c_out.error)
+
+ def method(self, method):
+ """method(self, method)
+
+ Returns a context manager that overrides and restores the output method.
+ method is one of (None, 'xml', 'html') where None means 'xml'.
+ """
+ assert self._c_out is not NULL
+ c_method = self._method if method is None else _findOutputMethod(method)
+ return _MethodChanger(self, c_method)
+
+ def element(self, tag, attrib=None, nsmap=None, method=None, **_extra):
+ """element(self, tag, attrib=None, nsmap=None, method, **_extra)
+
+ Returns a context manager that writes an opening and closing tag.
+ method is one of (None, 'xml', 'html') where None means 'xml'.
+ """
+ assert self._c_out is not NULL
+ attributes = []
+ if attrib is not None:
+ for name, value in _iter_attrib(attrib):
+ if name not in _extra:
+ ns, name = _getNsTag(name)
+ attributes.append((ns, name, _utf8(value)))
+ if _extra:
+ for name, value in _extra.iteritems():
+ ns, name = _getNsTag(name)
+ attributes.append((ns, name, _utf8(value)))
+ reversed_nsmap = {}
+ if nsmap:
+ for prefix, ns in nsmap.items():
+ if prefix is not None:
+ prefix = _utf8(prefix)
+ _prefixValidOrRaise(prefix)
+ reversed_nsmap[_utf8(ns)] = prefix
+ ns, name = _getNsTag(tag)
+
+ c_method = self._method if method is None else _findOutputMethod(method)
+
+ return _FileWriterElement(self, (ns, name, attributes, reversed_nsmap), c_method)
+
+ cdef _write_qname(self, bytes name, bytes prefix):
+ if prefix: # empty bytes for no prefix (not None to allow sorting)
+ tree.xmlOutputBufferWrite(self._c_out, len(prefix), _cstr(prefix))
+ tree.xmlOutputBufferWrite(self._c_out, 1, ':')
+ tree.xmlOutputBufferWrite(self._c_out, len(name), _cstr(name))
+
+ cdef _write_start_element(self, element_config):
+ if self._status > WRITER_IN_ELEMENT:
+ raise LxmlSyntaxError("cannot append trailing element to complete XML document")
+ ns, name, attributes, nsmap = element_config
+ flat_namespace_map, new_namespaces = self._collect_namespaces(nsmap)
+ prefix = self._find_prefix(ns, flat_namespace_map, new_namespaces)
+ tree.xmlOutputBufferWrite(self._c_out, 1, '<')
+ self._write_qname(name, prefix)
+
+ self._write_attributes_and_namespaces(
+ attributes, flat_namespace_map, new_namespaces)
+
+ tree.xmlOutputBufferWrite(self._c_out, 1, '>')
+ if not self._buffered:
+ tree.xmlOutputBufferFlush(self._c_out)
+ self._handle_error(self._c_out.error)
+
+ self._element_stack.append((ns, name, prefix, flat_namespace_map))
+ self._status = WRITER_IN_ELEMENT
+
+ cdef _write_attributes_and_namespaces(self, list attributes,
+ dict flat_namespace_map,
+ list new_namespaces):
+ if attributes:
+ # _find_prefix() may append to new_namespaces => build them first
+ attributes = [
+ (self._find_prefix(ns, flat_namespace_map, new_namespaces), name, value)
+ for ns, name, value in attributes ]
+ if new_namespaces:
+ new_namespaces.sort()
+ self._write_attributes_list(new_namespaces)
+ if attributes:
+ self._write_attributes_list(attributes)
+
+ cdef _write_attributes_list(self, list attributes):
+ for prefix, name, value in attributes:
+ tree.xmlOutputBufferWrite(self._c_out, 1, ' ')
+ self._write_qname(name, prefix)
+ tree.xmlOutputBufferWrite(self._c_out, 2, '="')
+ _write_attr_string(self._c_out, _cstr(value))
+
+ tree.xmlOutputBufferWrite(self._c_out, 1, '"')
+
+ cdef _write_end_element(self, element_config):
+ if self._status != WRITER_IN_ELEMENT:
+ raise LxmlSyntaxError("not in an element")
+ if not self._element_stack or self._element_stack[-1][:2] != element_config[:2]:
+ raise LxmlSyntaxError("inconsistent exit action in context manager")
+
+ # If previous write operations failed, the context manager exit might still call us.
+ # That is ok, but we stop writing closing tags and handling errors in that case.
+ # For all non-I/O errors, we continue writing closing tags if we can.
+ ok_to_write = self._c_out.error == xmlerror.XML_ERR_OK
+
+ name, prefix = self._element_stack.pop()[1:3]
+ if ok_to_write:
+ tree.xmlOutputBufferWrite(self._c_out, 2, '')
+ self._write_qname(name, prefix)
+ tree.xmlOutputBufferWrite(self._c_out, 1, '>')
+
+ if not self._element_stack:
+ self._status = WRITER_FINISHED
+ if ok_to_write:
+ if not self._buffered:
+ tree.xmlOutputBufferFlush(self._c_out)
+ self._handle_error(self._c_out.error)
+
+ cdef _find_prefix(self, bytes href, dict flat_namespaces_map, list new_namespaces):
+ if href is None:
+ return None
+ if href in flat_namespaces_map:
+ return flat_namespaces_map[href]
+ # need to create a new prefix
+ prefixes = flat_namespaces_map.values()
+ i = 0
+ while True:
+ prefix = _utf8('ns%d' % i)
+ if prefix not in prefixes:
+ new_namespaces.append((b'xmlns', prefix, href))
+ flat_namespaces_map[href] = prefix
+ return prefix
+ i += 1
+
+ cdef _collect_namespaces(self, dict nsmap):
+ new_namespaces = []
+ flat_namespaces_map = {}
+ for ns, prefix in nsmap.iteritems():
+ flat_namespaces_map[ns] = prefix
+ if prefix is None:
+ # use empty bytes rather than None to allow sorting
+ new_namespaces.append((b'', b'xmlns', ns))
+ else:
+ new_namespaces.append((b'xmlns', prefix, ns))
+ # merge in flat namespace map of parent
+ if self._element_stack:
+ for ns, prefix in (self._element_stack[-1][-1]).iteritems():
+ if flat_namespaces_map.get(ns) is None:
+ # unknown or empty prefix => prefer a 'real' prefix
+ flat_namespaces_map[ns] = prefix
+ return flat_namespaces_map, new_namespaces
+
+ def write(self, *args, bint with_tail=True, bint pretty_print=False, method=None):
+ """write(self, *args, with_tail=True, pretty_print=False, method=None)
+
+ Write subtrees or strings into the file.
+
+ If method is not None, it should be one of ('html', 'xml', 'text')
+ to temporarily override the output method.
+ """
+ assert self._c_out is not NULL
+ c_method = self._method if method is None else _findOutputMethod(method)
+
+ for content in args:
+ if _isString(content):
+ if self._status != WRITER_IN_ELEMENT:
+ if self._status > WRITER_IN_ELEMENT or content.strip():
+ raise LxmlSyntaxError("not in an element")
+ bstring = _utf8(content)
+ if not bstring:
+ continue
+
+ ns, name, _, _ = self._element_stack[-1]
+ if (c_method == OUTPUT_METHOD_HTML and
+ ns in (None, b'http://www.w3.org/1999/xhtml') and
+ name in (b'script', b'style')):
+ tree.xmlOutputBufferWrite(self._c_out, len(bstring), _cstr(bstring))
+
+ else:
+ tree.xmlOutputBufferWriteEscape(self._c_out, _xcstr(bstring), NULL)
+
+ elif iselement(content):
+ if self._status > WRITER_IN_ELEMENT:
+ raise LxmlSyntaxError("cannot append trailing element to complete XML document")
+ _writeNodeToBuffer(self._c_out, (<_Element>content)._c_node,
+ self._c_encoding, NULL, c_method,
+ False, False, pretty_print, with_tail, False)
+ if (<_Element>content)._c_node.type == tree.XML_ELEMENT_NODE:
+ if not self._element_stack:
+ self._status = WRITER_FINISHED
+
+ elif content is not None:
+ raise TypeError(
+ f"got invalid input value of type {type(content)}, expected string or Element")
+ self._handle_error(self._c_out.error)
+ if not self._buffered:
+ tree.xmlOutputBufferFlush(self._c_out)
+ self._handle_error(self._c_out.error)
+
+ def flush(self):
+ """flush(self)
+
+ Write any pending content of the current output buffer to the stream.
+ """
+ assert self._c_out is not NULL
+ tree.xmlOutputBufferFlush(self._c_out)
+ self._handle_error(self._c_out.error)
+
+ cdef _close(self, bint raise_on_error):
+ if raise_on_error:
+ if self._status < WRITER_IN_ELEMENT:
+ raise LxmlSyntaxError("no content written")
+ if self._element_stack:
+ raise LxmlSyntaxError("pending open tags on close")
+ error_result = self._c_out.error
+ if error_result == xmlerror.XML_ERR_OK:
+ error_result = tree.xmlOutputBufferClose(self._c_out)
+ if error_result != -1:
+ error_result = xmlerror.XML_ERR_OK
+ else:
+ tree.xmlOutputBufferClose(self._c_out)
+ self._status = WRITER_FINISHED
+ self._c_out = NULL
+ del self._element_stack[:]
+ if raise_on_error:
+ self._handle_error(error_result)
+
+ cdef _handle_error(self, int error_result):
+ if error_result != xmlerror.XML_ERR_OK:
+ if self._target is not None:
+ self._target._exc_context._raise_if_stored()
+ _raiseSerialisationError(error_result)
+
+
+@cython.final
+@cython.internal
+cdef class _AsyncDataWriter:
+ cdef list _data
+ def __cinit__(self):
+ self._data = []
+
+ cdef bytes collect(self):
+ data = b''.join(self._data)
+ del self._data[:]
+ return data
+
+ def write(self, data):
+ self._data.append(data)
+
+ def close(self):
+ pass
+
+
+@cython.final
+@cython.internal
+cdef class _AsyncIncrementalFileWriter:
+ cdef _IncrementalFileWriter _writer
+ cdef _AsyncDataWriter _buffer
+ cdef object _async_outfile
+ cdef int _flush_after_writes
+ cdef bint _should_close
+ cdef bint _buffered
+
+ def __cinit__(self, async_outfile, bytes encoding, int compresslevel, bint close,
+ bint buffered, int method):
+ self._flush_after_writes = 20
+ self._async_outfile = async_outfile
+ self._should_close = close
+ self._buffered = buffered
+ self._buffer = _AsyncDataWriter()
+ self._writer = _IncrementalFileWriter(
+ self._buffer, encoding, compresslevel, close=True, buffered=False, method=method)
+
+ cdef bytes _flush(self):
+ if not self._buffered or len(self._buffer._data) > self._flush_after_writes:
+ return self._buffer.collect()
+ return None
+
+ async def flush(self):
+ self._writer.flush()
+ data = self._buffer.collect()
+ if data:
+ await self._async_outfile.write(data)
+
+ async def write_declaration(self, version=None, standalone=None, doctype=None):
+ self._writer.write_declaration(version, standalone, doctype)
+ data = self._flush()
+ if data:
+ await self._async_outfile.write(data)
+
+ async def write_doctype(self, doctype):
+ self._writer.write_doctype(doctype)
+ data = self._flush()
+ if data:
+ await self._async_outfile.write(data)
+
+ async def write(self, *args, with_tail=True, pretty_print=False, method=None):
+ self._writer.write(*args, with_tail=with_tail, pretty_print=pretty_print, method=method)
+ data = self._flush()
+ if data:
+ await self._async_outfile.write(data)
+
+ def method(self, method):
+ return self._writer.method(method)
+
+ def element(self, tag, attrib=None, nsmap=None, method=None, **_extra):
+ element_writer = self._writer.element(tag, attrib, nsmap, method, **_extra)
+ return _AsyncFileWriterElement(element_writer, self)
+
+ async def _close(self, bint raise_on_error):
+ self._writer._close(raise_on_error)
+ data = self._buffer.collect()
+ if data:
+ await self._async_outfile.write(data)
+ if self._should_close:
+ await self._async_outfile.close()
+
+
+@cython.final
+@cython.internal
+cdef class _AsyncFileWriterElement:
+ cdef _FileWriterElement _element_writer
+ cdef _AsyncIncrementalFileWriter _writer
+
+ def __cinit__(self, _FileWriterElement element_writer not None,
+ _AsyncIncrementalFileWriter writer not None):
+ self._element_writer = element_writer
+ self._writer = writer
+
+ async def __aenter__(self):
+ self._element_writer.__enter__()
+ data = self._writer._flush()
+ if data:
+ await self._writer._async_outfile.write(data)
+
+ async def __aexit__(self, *args):
+ self._element_writer.__exit__(*args)
+ data = self._writer._flush()
+ if data:
+ await self._writer._async_outfile.write(data)
+
+
+@cython.final
+@cython.internal
+@cython.freelist(8)
+cdef class _FileWriterElement:
+ cdef _IncrementalFileWriter _writer
+ cdef object _element
+ cdef int _new_method
+ cdef int _old_method
+
+ def __cinit__(self, _IncrementalFileWriter writer not None, element_config, int method):
+ self._writer = writer
+ self._element = element_config
+ self._new_method = method
+ self._old_method = writer._method
+
+ def __enter__(self):
+ self._writer._method = self._new_method
+ self._writer._write_start_element(self._element)
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self._writer._write_end_element(self._element)
+ self._writer._method = self._old_method
+
+
+@cython.final
+@cython.internal
+@cython.freelist(8)
+cdef class _MethodChanger:
+ cdef _IncrementalFileWriter _writer
+ cdef int _new_method
+ cdef int _old_method
+ cdef bint _entered
+ cdef bint _exited
+
+ def __cinit__(self, _IncrementalFileWriter writer not None, int method):
+ self._writer = writer
+ self._new_method = method
+ self._old_method = writer._method
+ self._entered = False
+ self._exited = False
+
+ def __enter__(self):
+ if self._entered:
+ raise LxmlSyntaxError("Inconsistent enter action in context manager")
+ self._writer._method = self._new_method
+ self._entered = True
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ if self._exited:
+ raise LxmlSyntaxError("Inconsistent exit action in context manager")
+ if self._writer._method != self._new_method:
+ raise LxmlSyntaxError("Method changed outside of context manager")
+ self._writer._method = self._old_method
+ self._exited = True
+
+ async def __aenter__(self):
+ # for your async convenience
+ return self.__enter__()
+
+ async def __aexit__(self, *args):
+ # for your async convenience
+ return self.__exit__(*args)
diff --git a/llmeval-env/lib/python3.10/site-packages/lxml/usedoctest.py b/llmeval-env/lib/python3.10/site-packages/lxml/usedoctest.py
new file mode 100644
index 0000000000000000000000000000000000000000..f1da8cadfe710e215330c8ae3d28dfb7190dfd6b
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/lxml/usedoctest.py
@@ -0,0 +1,13 @@
+"""Doctest module for XML comparison.
+
+Usage::
+
+ >>> import lxml.usedoctest
+ >>> # now do your XML doctests ...
+
+See `lxml.doctestcompare`
+"""
+
+from lxml import doctestcompare
+
+doctestcompare.temp_install(del_module=__name__)
diff --git a/llmeval-env/lib/python3.10/site-packages/lxml/xmlid.pxi b/llmeval-env/lib/python3.10/site-packages/lxml/xmlid.pxi
new file mode 100644
index 0000000000000000000000000000000000000000..1531f6d98fdde3319e453826e12e375a80fafcc2
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/lxml/xmlid.pxi
@@ -0,0 +1,179 @@
+cdef object _find_id_attributes
+
+def XMLID(text, parser=None, *, base_url=None):
+ """XMLID(text, parser=None, base_url=None)
+
+ Parse the text and return a tuple (root node, ID dictionary). The root
+ node is the same as returned by the XML() function. The dictionary
+ contains string-element pairs. The dictionary keys are the values of 'id'
+ attributes. The elements referenced by the ID are stored as dictionary
+ values.
+ """
+ cdef dict dic
+ global _find_id_attributes
+ if _find_id_attributes is None:
+ _find_id_attributes = XPath('//*[string(@id)]')
+
+ # ElementTree compatible implementation: parse and look for 'id' attributes
+ root = XML(text, parser, base_url=base_url)
+ dic = {}
+ for elem in _find_id_attributes(root):
+ dic[elem.get('id')] = elem
+ return root, dic
+
+def XMLDTDID(text, parser=None, *, base_url=None):
+ """XMLDTDID(text, parser=None, base_url=None)
+
+ Parse the text and return a tuple (root node, ID dictionary). The root
+ node is the same as returned by the XML() function. The dictionary
+ contains string-element pairs. The dictionary keys are the values of ID
+ attributes as defined by the DTD. The elements referenced by the ID are
+ stored as dictionary values.
+
+ Note that you must not modify the XML tree if you use the ID dictionary.
+ The results are undefined.
+ """
+ cdef _Element root
+ root = XML(text, parser, base_url=base_url)
+ # xml:id spec compatible implementation: use DTD ID attributes from libxml2
+ if root._doc._c_doc.ids is NULL:
+ return root, {}
+ else:
+ return root, _IDDict(root)
+
+def parseid(source, parser=None, *, base_url=None):
+ """parseid(source, parser=None)
+
+ Parses the source into a tuple containing an ElementTree object and an
+ ID dictionary. If no parser is provided as second argument, the default
+ parser is used.
+
+ Note that you must not modify the XML tree if you use the ID dictionary.
+ The results are undefined.
+ """
+ cdef _Document doc
+ doc = _parseDocument(source, parser, base_url)
+ return _elementTreeFactory(doc, None), _IDDict(doc)
+
+cdef class _IDDict:
+ """IDDict(self, etree)
+ A dictionary-like proxy class that mapps ID attributes to elements.
+
+ The dictionary must be instantiated with the root element of a parsed XML
+ document, otherwise the behaviour is undefined. Elements and XML trees
+ that were created or modified 'by hand' are not supported.
+ """
+ cdef _Document _doc
+ cdef object _keys
+ cdef object _items
+ def __cinit__(self, etree):
+ cdef _Document doc
+ doc = _documentOrRaise(etree)
+ if doc._c_doc.ids is NULL:
+ raise ValueError, "No ID dictionary available."
+ self._doc = doc
+ self._keys = None
+ self._items = None
+
+ def copy(self):
+ return _IDDict(self._doc)
+
+ def __getitem__(self, id_name):
+ cdef tree.xmlHashTable* c_ids
+ cdef tree.xmlID* c_id
+ cdef xmlAttr* c_attr
+ c_ids = self._doc._c_doc.ids
+ id_utf = _utf8(id_name)
+ c_id = tree.xmlHashLookup(c_ids, _xcstr(id_utf))
+ if c_id is NULL:
+ raise KeyError, "key not found."
+ c_attr = c_id.attr
+ if c_attr is NULL or c_attr.parent is NULL:
+ raise KeyError, "ID attribute not found."
+ return _elementFactory(self._doc, c_attr.parent)
+
+ def get(self, id_name):
+ return self[id_name]
+
+ def __contains__(self, id_name):
+ cdef tree.xmlID* c_id
+ id_utf = _utf8(id_name)
+ c_id = tree.xmlHashLookup(
+ self._doc._c_doc.ids, _xcstr(id_utf))
+ return c_id is not NULL
+
+ def has_key(self, id_name):
+ return id_name in self
+
+ def __repr__(self):
+ return repr(dict(self))
+
+ def keys(self):
+ if self._keys is None:
+ self._keys = self._build_keys()
+ return self._keys[:]
+
+ def __iter__(self):
+ if self._keys is None:
+ self._keys = self._build_keys()
+ return iter(self._keys)
+
+ def iterkeys(self):
+ return self
+
+ def __len__(self):
+ if self._keys is None:
+ self._keys = self._build_keys()
+ return len(self._keys)
+
+ def items(self):
+ if self._items is None:
+ self._items = self._build_items()
+ return self._items[:]
+
+ def iteritems(self):
+ if self._items is None:
+ self._items = self._build_items()
+ return iter(self._items)
+
+ def values(self):
+ cdef list values = []
+ if self._items is None:
+ self._items = self._build_items()
+ for item in self._items:
+ value = python.PyTuple_GET_ITEM(item, 1)
+ python.Py_INCREF(value)
+ values.append(value)
+ return values
+
+ def itervalues(self):
+ return iter(self.values())
+
+ cdef object _build_keys(self):
+ keys = []
+ tree.xmlHashScan(self._doc._c_doc.ids,
+ _collectIdHashKeys, keys)
+ return keys
+
+ cdef object _build_items(self):
+ items = []
+ context = (items, self._doc)
+ tree.xmlHashScan(self._doc._c_doc.ids,
+ _collectIdHashItemList, context)
+ return items
+
+cdef void _collectIdHashItemList(void* payload, void* context, xmlChar* name) noexcept:
+ # collect elements from ID attribute hash table
+ cdef list lst
+ c_id = payload
+ if c_id is NULL or c_id.attr is NULL or c_id.attr.parent is NULL:
+ return
+ lst, doc = context
+ element = _elementFactory(doc, c_id.attr.parent)
+ lst.append( (funicode(name), element) )
+
+cdef void _collectIdHashKeys(void* payload, void* collect_list, xmlChar* name) noexcept:
+ c_id = payload
+ if c_id is NULL or c_id.attr is NULL or c_id.attr.parent is NULL:
+ return
+ (collect_list).append(funicode(name))
diff --git a/llmeval-env/lib/python3.10/site-packages/lxml/xmlschema.pxi b/llmeval-env/lib/python3.10/site-packages/lxml/xmlschema.pxi
new file mode 100644
index 0000000000000000000000000000000000000000..ac5f95876e06147f481ca3f52a3185c6478428d8
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/lxml/xmlschema.pxi
@@ -0,0 +1,215 @@
+# support for XMLSchema validation
+from lxml.includes cimport xmlschema
+
+
+cdef class XMLSchemaError(LxmlError):
+ """Base class of all XML Schema errors
+ """
+
+cdef class XMLSchemaParseError(XMLSchemaError):
+ """Error while parsing an XML document as XML Schema.
+ """
+
+cdef class XMLSchemaValidateError(XMLSchemaError):
+ """Error while validating an XML document with an XML Schema.
+ """
+
+
+################################################################################
+# XMLSchema
+
+cdef XPath _check_for_default_attributes = XPath(
+ "boolean(//xs:attribute[@default or @fixed][1])",
+ namespaces={'xs': 'http://www.w3.org/2001/XMLSchema'})
+
+
+cdef class XMLSchema(_Validator):
+ """XMLSchema(self, etree=None, file=None)
+ Turn a document into an XML Schema validator.
+
+ Either pass a schema as Element or ElementTree, or pass a file or
+ filename through the ``file`` keyword argument.
+
+ Passing the ``attribute_defaults`` boolean option will make the
+ schema insert default/fixed attributes into validated documents.
+ """
+ cdef xmlschema.xmlSchema* _c_schema
+ cdef _Document _doc
+ cdef bint _has_default_attributes
+ cdef bint _add_attribute_defaults
+
+ def __cinit__(self):
+ self._has_default_attributes = True # play it safe
+ self._add_attribute_defaults = False
+
+ def __init__(self, etree=None, *, file=None, bint attribute_defaults=False):
+ cdef xmlschema.xmlSchemaParserCtxt* parser_ctxt
+ cdef xmlDoc* c_doc
+
+ self._add_attribute_defaults = attribute_defaults
+ _Validator.__init__(self)
+ c_doc = NULL
+ if etree is not None:
+ doc = _documentOrRaise(etree)
+ root_node = _rootNodeOrRaise(etree)
+ c_doc = _copyDocRoot(doc._c_doc, root_node._c_node)
+ self._doc = _documentFactory(c_doc, doc._parser)
+ parser_ctxt = xmlschema.xmlSchemaNewDocParserCtxt(c_doc)
+ elif file is not None:
+ file = _getFSPathOrObject(file)
+ if _isString(file):
+ filename = _encodeFilename(file)
+ parser_ctxt = xmlschema.xmlSchemaNewParserCtxt(_cstr(filename))
+ else:
+ self._doc = _parseDocument(file, None, None)
+ parser_ctxt = xmlschema.xmlSchemaNewDocParserCtxt(self._doc._c_doc)
+ else:
+ raise XMLSchemaParseError, "No tree or file given"
+
+ if parser_ctxt is NULL:
+ raise MemoryError()
+
+ # Need a cast here because older libxml2 releases do not use 'const' in the functype.
+ xmlschema.xmlSchemaSetParserStructuredErrors(
+ parser_ctxt, _receiveError, self._error_log)
+ if self._doc is not None:
+ # calling xmlSchemaParse on a schema with imports or
+ # includes will cause libxml2 to create an internal
+ # context for parsing, so push an implied context to route
+ # resolve requests to the document's parser
+ __GLOBAL_PARSER_CONTEXT.pushImpliedContextFromParser(self._doc._parser)
+ with nogil:
+ orig_loader = _register_document_loader()
+ self._c_schema = xmlschema.xmlSchemaParse(parser_ctxt)
+ _reset_document_loader(orig_loader)
+ if self._doc is not None:
+ __GLOBAL_PARSER_CONTEXT.popImpliedContext()
+ xmlschema.xmlSchemaFreeParserCtxt(parser_ctxt)
+
+ if self._c_schema is NULL:
+ raise XMLSchemaParseError(
+ self._error_log._buildExceptionMessage(
+ "Document is not valid XML Schema"),
+ self._error_log)
+
+ if self._doc is not None:
+ self._has_default_attributes = _check_for_default_attributes(self._doc)
+ self._add_attribute_defaults = attribute_defaults and self._has_default_attributes
+
+ def __dealloc__(self):
+ xmlschema.xmlSchemaFree(self._c_schema)
+
+ def __call__(self, etree):
+ """__call__(self, etree)
+
+ Validate doc using XML Schema.
+
+ Returns true if document is valid, false if not.
+ """
+ cdef xmlschema.xmlSchemaValidCtxt* valid_ctxt
+ cdef _Document doc
+ cdef _Element root_node
+ cdef xmlDoc* c_doc
+ cdef int ret
+
+ assert self._c_schema is not NULL, "Schema instance not initialised"
+ doc = _documentOrRaise(etree)
+ root_node = _rootNodeOrRaise(etree)
+
+ valid_ctxt = xmlschema.xmlSchemaNewValidCtxt(self._c_schema)
+ if valid_ctxt is NULL:
+ raise MemoryError()
+
+ try:
+ if self._add_attribute_defaults:
+ xmlschema.xmlSchemaSetValidOptions(
+ valid_ctxt, xmlschema.XML_SCHEMA_VAL_VC_I_CREATE)
+
+ self._error_log.clear()
+ # Need a cast here because older libxml2 releases do not use 'const' in the functype.
+ xmlschema.xmlSchemaSetValidStructuredErrors(
+ valid_ctxt, _receiveError, self._error_log)
+
+ c_doc = _fakeRootDoc(doc._c_doc, root_node._c_node)
+ with nogil:
+ ret = xmlschema.xmlSchemaValidateDoc(valid_ctxt, c_doc)
+ _destroyFakeDoc(doc._c_doc, c_doc)
+ finally:
+ xmlschema.xmlSchemaFreeValidCtxt(valid_ctxt)
+
+ if ret == -1:
+ raise XMLSchemaValidateError(
+ "Internal error in XML Schema validation.",
+ self._error_log)
+ if ret == 0:
+ return True
+ else:
+ return False
+
+ cdef _ParserSchemaValidationContext _newSaxValidator(
+ self, bint add_default_attributes):
+ cdef _ParserSchemaValidationContext context
+ context = _ParserSchemaValidationContext.__new__(_ParserSchemaValidationContext)
+ context._schema = self
+ context._add_default_attributes = (self._has_default_attributes and (
+ add_default_attributes or self._add_attribute_defaults))
+ return context
+
+@cython.final
+@cython.internal
+cdef class _ParserSchemaValidationContext:
+ cdef XMLSchema _schema
+ cdef xmlschema.xmlSchemaValidCtxt* _valid_ctxt
+ cdef xmlschema.xmlSchemaSAXPlugStruct* _sax_plug
+ cdef bint _add_default_attributes
+ def __cinit__(self):
+ self._valid_ctxt = NULL
+ self._sax_plug = NULL
+ self._add_default_attributes = False
+
+ def __dealloc__(self):
+ self.disconnect()
+ if self._valid_ctxt:
+ xmlschema.xmlSchemaFreeValidCtxt(self._valid_ctxt)
+
+ cdef _ParserSchemaValidationContext copy(self):
+ assert self._schema is not None, "_ParserSchemaValidationContext not initialised"
+ return self._schema._newSaxValidator(
+ self._add_default_attributes)
+
+ cdef void inject_default_attributes(self, xmlDoc* c_doc) noexcept:
+ # we currently need to insert default attributes manually
+ # after parsing, as libxml2 does not support this at parse
+ # time
+ if self._add_default_attributes:
+ with nogil:
+ xmlschema.xmlSchemaValidateDoc(self._valid_ctxt, c_doc)
+
+ cdef int connect(self, xmlparser.xmlParserCtxt* c_ctxt, _BaseErrorLog error_log) except -1:
+ if self._valid_ctxt is NULL:
+ self._valid_ctxt = xmlschema.xmlSchemaNewValidCtxt(
+ self._schema._c_schema)
+ if self._valid_ctxt is NULL:
+ raise MemoryError()
+ if self._add_default_attributes:
+ xmlschema.xmlSchemaSetValidOptions(
+ self._valid_ctxt, xmlschema.XML_SCHEMA_VAL_VC_I_CREATE)
+ if error_log is not None:
+ # Need a cast here because older libxml2 releases do not use 'const' in the functype.
+ xmlschema.xmlSchemaSetValidStructuredErrors(
+ self._valid_ctxt, _receiveError, error_log)
+ self._sax_plug = xmlschema.xmlSchemaSAXPlug(
+ self._valid_ctxt, &c_ctxt.sax, &c_ctxt.userData)
+
+ cdef void disconnect(self) noexcept:
+ if self._sax_plug is not NULL:
+ xmlschema.xmlSchemaSAXUnplug(self._sax_plug)
+ self._sax_plug = NULL
+ if self._valid_ctxt is not NULL:
+ xmlschema.xmlSchemaSetValidStructuredErrors(
+ self._valid_ctxt, NULL, NULL)
+
+ cdef bint isvalid(self) noexcept:
+ if self._valid_ctxt is NULL:
+ return 1 # valid
+ return xmlschema.xmlSchemaIsValid(self._valid_ctxt)
diff --git a/llmeval-env/lib/python3.10/site-packages/lxml/xpath.pxi b/llmeval-env/lib/python3.10/site-packages/lxml/xpath.pxi
new file mode 100644
index 0000000000000000000000000000000000000000..352f63134734780e5a9c869ccb59b4cb4e4ade40
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/lxml/xpath.pxi
@@ -0,0 +1,487 @@
+# XPath evaluation
+
+class XPathSyntaxError(LxmlSyntaxError, XPathError):
+ pass
+
+################################################################################
+# XPath
+
+cdef object _XPATH_SYNTAX_ERRORS = (
+ xmlerror.XML_XPATH_NUMBER_ERROR,
+ xmlerror.XML_XPATH_UNFINISHED_LITERAL_ERROR,
+ xmlerror.XML_XPATH_VARIABLE_REF_ERROR,
+ xmlerror.XML_XPATH_INVALID_PREDICATE_ERROR,
+ xmlerror.XML_XPATH_UNCLOSED_ERROR,
+ xmlerror.XML_XPATH_INVALID_CHAR_ERROR
+)
+
+cdef object _XPATH_EVAL_ERRORS = (
+ xmlerror.XML_XPATH_UNDEF_VARIABLE_ERROR,
+ xmlerror.XML_XPATH_UNDEF_PREFIX_ERROR,
+ xmlerror.XML_XPATH_UNKNOWN_FUNC_ERROR,
+ xmlerror.XML_XPATH_INVALID_OPERAND,
+ xmlerror.XML_XPATH_INVALID_TYPE,
+ xmlerror.XML_XPATH_INVALID_ARITY,
+ xmlerror.XML_XPATH_INVALID_CTXT_SIZE,
+ xmlerror.XML_XPATH_INVALID_CTXT_POSITION
+)
+
+cdef int _register_xpath_function(void* ctxt, name_utf, ns_utf) noexcept:
+ if ns_utf is None:
+ return xpath.xmlXPathRegisterFunc(
+ ctxt, _xcstr(name_utf),
+ _xpath_function_call)
+ else:
+ return xpath.xmlXPathRegisterFuncNS(
+ ctxt, _xcstr(name_utf), _xcstr(ns_utf),
+ _xpath_function_call)
+
+cdef int _unregister_xpath_function(void* ctxt, name_utf, ns_utf) noexcept:
+ if ns_utf is None:
+ return xpath.xmlXPathRegisterFunc(
+ ctxt, _xcstr(name_utf), NULL)
+ else:
+ return xpath.xmlXPathRegisterFuncNS(
+ ctxt, _xcstr(name_utf), _xcstr(ns_utf), NULL)
+
+
+@cython.final
+@cython.internal
+cdef class _XPathContext(_BaseContext):
+ cdef object _variables
+ def __init__(self, namespaces, extensions, error_log, enable_regexp, variables,
+ build_smart_strings):
+ self._variables = variables
+ _BaseContext.__init__(self, namespaces, extensions, error_log, enable_regexp,
+ build_smart_strings)
+
+ cdef set_context(self, xpath.xmlXPathContext* xpathCtxt):
+ self._set_xpath_context(xpathCtxt)
+ # This would be a good place to set up the XPath parser dict, but
+ # we cannot use the current thread dict as we do not know which
+ # thread will execute the XPath evaluator - so, no dict for now.
+ self.registerLocalNamespaces()
+ self.registerLocalFunctions(xpathCtxt, _register_xpath_function)
+
+ cdef register_context(self, _Document doc):
+ self._register_context(doc)
+ self.registerGlobalNamespaces()
+ self.registerGlobalFunctions(self._xpathCtxt, _register_xpath_function)
+ self.registerExsltFunctions()
+ if self._variables is not None:
+ self.registerVariables(self._variables)
+
+ cdef unregister_context(self):
+ self.unregisterGlobalFunctions(
+ self._xpathCtxt, _unregister_xpath_function)
+ self.unregisterGlobalNamespaces()
+ xpath.xmlXPathRegisteredVariablesCleanup(self._xpathCtxt)
+ self._cleanup_context()
+
+ cdef void registerExsltFunctions(self) noexcept:
+ if xslt.LIBXSLT_VERSION < 10125:
+ # we'd only execute dummy functions anyway
+ return
+ tree.xmlHashScan(
+ self._xpathCtxt.nsHash, _registerExsltFunctionsForNamespaces,
+ self._xpathCtxt)
+
+ cdef registerVariables(self, variable_dict):
+ for name, value in variable_dict.items():
+ name_utf = self._to_utf(name)
+ xpath.xmlXPathRegisterVariable(
+ self._xpathCtxt, _xcstr(name_utf), _wrapXPathObject(value, None, None))
+
+ cdef registerVariable(self, name, value):
+ name_utf = self._to_utf(name)
+ xpath.xmlXPathRegisterVariable(
+ self._xpathCtxt, _xcstr(name_utf), _wrapXPathObject(value, None, None))
+
+
+cdef void _registerExsltFunctionsForNamespaces(
+ void* _c_href, void* _ctxt, const_xmlChar* c_prefix) noexcept:
+ c_href = _c_href
+ ctxt = _ctxt
+
+ if tree.xmlStrcmp(c_href, xslt.EXSLT_DATE_NAMESPACE) == 0:
+ xslt.exsltDateXpathCtxtRegister(ctxt, c_prefix)
+ elif tree.xmlStrcmp(c_href, xslt.EXSLT_SETS_NAMESPACE) == 0:
+ xslt.exsltSetsXpathCtxtRegister(ctxt, c_prefix)
+ elif tree.xmlStrcmp(c_href, xslt.EXSLT_MATH_NAMESPACE) == 0:
+ xslt.exsltMathXpathCtxtRegister(ctxt, c_prefix)
+ elif tree.xmlStrcmp(c_href, xslt.EXSLT_STRINGS_NAMESPACE) == 0:
+ xslt.exsltStrXpathCtxtRegister(ctxt, c_prefix)
+
+
+cdef class _XPathEvaluatorBase:
+ cdef xpath.xmlXPathContext* _xpathCtxt
+ cdef _XPathContext _context
+ cdef python.PyThread_type_lock _eval_lock
+ cdef _ErrorLog _error_log
+ def __cinit__(self):
+ self._xpathCtxt = NULL
+ if config.ENABLE_THREADING:
+ self._eval_lock = python.PyThread_allocate_lock()
+ if self._eval_lock is NULL:
+ raise MemoryError()
+ self._error_log = _ErrorLog()
+
+ def __init__(self, namespaces, extensions, enable_regexp,
+ smart_strings):
+ self._context = _XPathContext(namespaces, extensions, self._error_log,
+ enable_regexp, None, smart_strings)
+
+ @property
+ def error_log(self):
+ assert self._error_log is not None, "XPath evaluator not initialised"
+ return self._error_log.copy()
+
+ def __dealloc__(self):
+ if self._xpathCtxt is not NULL:
+ xpath.xmlXPathFreeContext(self._xpathCtxt)
+ if config.ENABLE_THREADING:
+ if self._eval_lock is not NULL:
+ python.PyThread_free_lock(self._eval_lock)
+
+ cdef set_context(self, xpath.xmlXPathContext* xpathCtxt):
+ self._xpathCtxt = xpathCtxt
+ self._context.set_context(xpathCtxt)
+
+ cdef bint _checkAbsolutePath(self, char* path) noexcept:
+ cdef char c
+ if path is NULL:
+ return 0
+ c = path[0]
+ while c == c' ' or c == c'\t':
+ path = path + 1
+ c = path[0]
+ return c == c'/'
+
+ @cython.final
+ cdef int _lock(self) except -1:
+ cdef int result
+ if config.ENABLE_THREADING and self._eval_lock != NULL:
+ with nogil:
+ result = python.PyThread_acquire_lock(
+ self._eval_lock, python.WAIT_LOCK)
+ if result == 0:
+ raise XPathError, "XPath evaluator locking failed"
+ return 0
+
+ @cython.final
+ cdef void _unlock(self) noexcept:
+ if config.ENABLE_THREADING and self._eval_lock != NULL:
+ python.PyThread_release_lock(self._eval_lock)
+
+ cdef _build_parse_error(self):
+ cdef _BaseErrorLog entries
+ entries = self._error_log.filter_types(_XPATH_SYNTAX_ERRORS)
+ if entries:
+ message = entries._buildExceptionMessage(None)
+ if message is not None:
+ return XPathSyntaxError(message, self._error_log)
+ return XPathSyntaxError(
+ self._error_log._buildExceptionMessage("Error in xpath expression"),
+ self._error_log)
+
+ cdef _build_eval_error(self):
+ cdef _BaseErrorLog entries
+ entries = self._error_log.filter_types(_XPATH_EVAL_ERRORS)
+ if not entries:
+ entries = self._error_log.filter_types(_XPATH_SYNTAX_ERRORS)
+ if entries:
+ message = entries._buildExceptionMessage(None)
+ if message is not None:
+ return XPathEvalError(message, self._error_log)
+ return XPathEvalError(
+ self._error_log._buildExceptionMessage("Error in xpath expression"),
+ self._error_log)
+
+ cdef object _handle_result(self, xpath.xmlXPathObject* xpathObj, _Document doc):
+ if self._context._exc._has_raised():
+ if xpathObj is not NULL:
+ _freeXPathObject(xpathObj)
+ xpathObj = NULL
+ self._context._release_temp_refs()
+ self._context._exc._raise_if_stored()
+
+ if xpathObj is NULL:
+ self._context._release_temp_refs()
+ raise self._build_eval_error()
+
+ try:
+ result = _unwrapXPathObject(xpathObj, doc, self._context)
+ finally:
+ _freeXPathObject(xpathObj)
+ self._context._release_temp_refs()
+
+ return result
+
+
+cdef class XPathElementEvaluator(_XPathEvaluatorBase):
+ """XPathElementEvaluator(self, element, namespaces=None, extensions=None, regexp=True, smart_strings=True)
+ Create an XPath evaluator for an element.
+
+ Absolute XPath expressions (starting with '/') will be evaluated against
+ the ElementTree as returned by getroottree().
+
+ Additional namespace declarations can be passed with the
+ 'namespace' keyword argument. EXSLT regular expression support
+ can be disabled with the 'regexp' boolean keyword (defaults to
+ True). Smart strings will be returned for string results unless
+ you pass ``smart_strings=False``.
+ """
+ cdef _Element _element
+ def __init__(self, _Element element not None, *, namespaces=None,
+ extensions=None, regexp=True, smart_strings=True):
+ cdef xpath.xmlXPathContext* xpathCtxt
+ cdef int ns_register_status
+ cdef _Document doc
+ _assertValidNode(element)
+ _assertValidDoc(element._doc)
+ self._element = element
+ doc = element._doc
+ _XPathEvaluatorBase.__init__(self, namespaces, extensions,
+ regexp, smart_strings)
+ xpathCtxt = xpath.xmlXPathNewContext(doc._c_doc)
+ if xpathCtxt is NULL:
+ raise MemoryError()
+ self.set_context(xpathCtxt)
+
+ def register_namespace(self, prefix, uri):
+ """Register a namespace with the XPath context.
+ """
+ assert self._xpathCtxt is not NULL, "XPath context not initialised"
+ self._context.addNamespace(prefix, uri)
+
+ def register_namespaces(self, namespaces):
+ """Register a prefix -> uri dict.
+ """
+ assert self._xpathCtxt is not NULL, "XPath context not initialised"
+ for prefix, uri in namespaces.items():
+ self._context.addNamespace(prefix, uri)
+
+ def __call__(self, _path, **_variables):
+ """__call__(self, _path, **_variables)
+
+ Evaluate an XPath expression on the document.
+
+ Variables may be provided as keyword arguments. Note that namespaces
+ are currently not supported for variables.
+
+ Absolute XPath expressions (starting with '/') will be evaluated
+ against the ElementTree as returned by getroottree().
+ """
+ cdef xpath.xmlXPathObject* xpathObj
+ cdef _Document doc
+ assert self._xpathCtxt is not NULL, "XPath context not initialised"
+ path = _utf8(_path)
+ doc = self._element._doc
+
+ self._lock()
+ self._xpathCtxt.node = self._element._c_node
+ try:
+ self._context.register_context(doc)
+ self._context.registerVariables(_variables)
+ c_path = _xcstr(path)
+ with nogil:
+ xpathObj = xpath.xmlXPathEvalExpression(
+ c_path, self._xpathCtxt)
+ result = self._handle_result(xpathObj, doc)
+ finally:
+ self._context.unregister_context()
+ self._unlock()
+
+ return result
+
+
+cdef class XPathDocumentEvaluator(XPathElementEvaluator):
+ """XPathDocumentEvaluator(self, etree, namespaces=None, extensions=None, regexp=True, smart_strings=True)
+ Create an XPath evaluator for an ElementTree.
+
+ Additional namespace declarations can be passed with the
+ 'namespace' keyword argument. EXSLT regular expression support
+ can be disabled with the 'regexp' boolean keyword (defaults to
+ True). Smart strings will be returned for string results unless
+ you pass ``smart_strings=False``.
+ """
+ def __init__(self, _ElementTree etree not None, *, namespaces=None,
+ extensions=None, regexp=True, smart_strings=True):
+ XPathElementEvaluator.__init__(
+ self, etree._context_node, namespaces=namespaces,
+ extensions=extensions, regexp=regexp,
+ smart_strings=smart_strings)
+
+ def __call__(self, _path, **_variables):
+ """__call__(self, _path, **_variables)
+
+ Evaluate an XPath expression on the document.
+
+ Variables may be provided as keyword arguments. Note that namespaces
+ are currently not supported for variables.
+ """
+ cdef xpath.xmlXPathObject* xpathObj
+ cdef xmlDoc* c_doc
+ cdef _Document doc
+ assert self._xpathCtxt is not NULL, "XPath context not initialised"
+ path = _utf8(_path)
+ doc = self._element._doc
+
+ self._lock()
+ try:
+ self._context.register_context(doc)
+ c_doc = _fakeRootDoc(doc._c_doc, self._element._c_node)
+ try:
+ self._context.registerVariables(_variables)
+ c_path = _xcstr(path)
+ with nogil:
+ self._xpathCtxt.doc = c_doc
+ self._xpathCtxt.node = tree.xmlDocGetRootElement(c_doc)
+ xpathObj = xpath.xmlXPathEvalExpression(
+ c_path, self._xpathCtxt)
+ result = self._handle_result(xpathObj, doc)
+ finally:
+ _destroyFakeDoc(doc._c_doc, c_doc)
+ self._context.unregister_context()
+ finally:
+ self._unlock()
+
+ return result
+
+
+def XPathEvaluator(etree_or_element, *, namespaces=None, extensions=None,
+ regexp=True, smart_strings=True):
+ """XPathEvaluator(etree_or_element, namespaces=None, extensions=None, regexp=True, smart_strings=True)
+
+ Creates an XPath evaluator for an ElementTree or an Element.
+
+ The resulting object can be called with an XPath expression as argument
+ and XPath variables provided as keyword arguments.
+
+ Additional namespace declarations can be passed with the
+ 'namespace' keyword argument. EXSLT regular expression support
+ can be disabled with the 'regexp' boolean keyword (defaults to
+ True). Smart strings will be returned for string results unless
+ you pass ``smart_strings=False``.
+ """
+ if isinstance(etree_or_element, _ElementTree):
+ return XPathDocumentEvaluator(
+ etree_or_element, namespaces=namespaces,
+ extensions=extensions, regexp=regexp, smart_strings=smart_strings)
+ else:
+ return XPathElementEvaluator(
+ etree_or_element, namespaces=namespaces,
+ extensions=extensions, regexp=regexp, smart_strings=smart_strings)
+
+
+cdef class XPath(_XPathEvaluatorBase):
+ """XPath(self, path, namespaces=None, extensions=None, regexp=True, smart_strings=True)
+ A compiled XPath expression that can be called on Elements and ElementTrees.
+
+ Besides the XPath expression, you can pass prefix-namespace
+ mappings and extension functions to the constructor through the
+ keyword arguments ``namespaces`` and ``extensions``. EXSLT
+ regular expression support can be disabled with the 'regexp'
+ boolean keyword (defaults to True). Smart strings will be
+ returned for string results unless you pass
+ ``smart_strings=False``.
+ """
+ cdef xpath.xmlXPathCompExpr* _xpath
+ cdef bytes _path
+ def __cinit__(self):
+ self._xpath = NULL
+
+ def __init__(self, path, *, namespaces=None, extensions=None,
+ regexp=True, smart_strings=True):
+ cdef xpath.xmlXPathContext* xpathCtxt
+ _XPathEvaluatorBase.__init__(self, namespaces, extensions,
+ regexp, smart_strings)
+ self._path = _utf8(path)
+ xpathCtxt = xpath.xmlXPathNewContext(NULL)
+ if xpathCtxt is NULL:
+ raise MemoryError()
+ self.set_context(xpathCtxt)
+ self._xpath = xpath.xmlXPathCtxtCompile(xpathCtxt, _xcstr(self._path))
+ if self._xpath is NULL:
+ raise self._build_parse_error()
+
+ def __call__(self, _etree_or_element, **_variables):
+ "__call__(self, _etree_or_element, **_variables)"
+ cdef xpath.xmlXPathObject* xpathObj
+ cdef _Document document
+ cdef _Element element
+
+ assert self._xpathCtxt is not NULL, "XPath context not initialised"
+ document = _documentOrRaise(_etree_or_element)
+ element = _rootNodeOrRaise(_etree_or_element)
+
+ self._lock()
+ self._xpathCtxt.doc = document._c_doc
+ self._xpathCtxt.node = element._c_node
+
+ try:
+ self._context.register_context(document)
+ self._context.registerVariables(_variables)
+ with nogil:
+ xpathObj = xpath.xmlXPathCompiledEval(
+ self._xpath, self._xpathCtxt)
+ result = self._handle_result(xpathObj, document)
+ finally:
+ self._context.unregister_context()
+ self._unlock()
+ return result
+
+ @property
+ def path(self):
+ """The literal XPath expression.
+ """
+ return self._path.decode('UTF-8')
+
+ def __dealloc__(self):
+ if self._xpath is not NULL:
+ xpath.xmlXPathFreeCompExpr(self._xpath)
+
+ def __repr__(self):
+ return self.path
+
+
+cdef object _replace_strings = re.compile(b'("[^"]*")|(\'[^\']*\')').sub
+cdef object _find_namespaces = re.compile(b'({[^}]+})').findall
+
+cdef class ETXPath(XPath):
+ """ETXPath(self, path, extensions=None, regexp=True, smart_strings=True)
+ Special XPath class that supports the ElementTree {uri} notation for namespaces.
+
+ Note that this class does not accept the ``namespace`` keyword
+ argument. All namespaces must be passed as part of the path
+ string. Smart strings will be returned for string results unless
+ you pass ``smart_strings=False``.
+ """
+ def __init__(self, path, *, extensions=None, regexp=True,
+ smart_strings=True):
+ path, namespaces = self._nsextract_path(path)
+ XPath.__init__(self, path, namespaces=namespaces,
+ extensions=extensions, regexp=regexp,
+ smart_strings=smart_strings)
+
+ cdef _nsextract_path(self, path):
+ # replace {namespaces} by new prefixes
+ cdef dict namespaces = {}
+ cdef list namespace_defs = []
+ cdef int i
+ path_utf = _utf8(path)
+ stripped_path = _replace_strings(b'', path_utf) # remove string literals
+ i = 1
+ for namespace_def in _find_namespaces(stripped_path):
+ if namespace_def not in namespace_defs:
+ prefix = python.PyBytes_FromFormat("__xpp%02d", i)
+ i += 1
+ namespace_defs.append(namespace_def)
+ namespace = namespace_def[1:-1] # remove '{}'
+ namespace = (namespace).decode('utf8')
+ namespaces[prefix.decode('utf8')] = namespace
+ prefix_str = prefix + b':'
+ # FIXME: this also replaces {namespaces} within strings!
+ path_utf = path_utf.replace(namespace_def, prefix_str)
+ path = path_utf.decode('utf8')
+ return path, namespaces
diff --git a/llmeval-env/lib/python3.10/site-packages/lxml/xslt.pxi b/llmeval-env/lib/python3.10/site-packages/lxml/xslt.pxi
new file mode 100644
index 0000000000000000000000000000000000000000..f7a7be294edf2ae69f4e37895e1f7a143b5b79cb
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/lxml/xslt.pxi
@@ -0,0 +1,950 @@
+# XSLT
+from lxml.includes cimport xslt
+
+
+cdef class XSLTError(LxmlError):
+ """Base class of all XSLT errors.
+ """
+
+cdef class XSLTParseError(XSLTError):
+ """Error parsing a stylesheet document.
+ """
+
+cdef class XSLTApplyError(XSLTError):
+ """Error running an XSL transformation.
+ """
+
+class XSLTSaveError(XSLTError, SerialisationError):
+ """Error serialising an XSLT result.
+ """
+
+cdef class XSLTExtensionError(XSLTError):
+ """Error registering an XSLT extension.
+ """
+
+
+# version information
+LIBXSLT_COMPILED_VERSION = __unpackIntVersion(xslt.LIBXSLT_VERSION)
+LIBXSLT_VERSION = __unpackIntVersion(xslt.xsltLibxsltVersion)
+
+
+################################################################################
+# Where do we store what?
+#
+# xsltStylesheet->doc->_private
+# == _XSLTResolverContext for XSL stylesheet
+#
+# xsltTransformContext->_private
+# == _XSLTResolverContext for transformed document
+#
+################################################################################
+
+
+################################################################################
+# XSLT document loaders
+
+@cython.final
+@cython.internal
+cdef class _XSLTResolverContext(_ResolverContext):
+ cdef xmlDoc* _c_style_doc
+ cdef _BaseParser _parser
+
+ cdef _XSLTResolverContext _copy(self):
+ cdef _XSLTResolverContext context
+ context = _XSLTResolverContext()
+ _initXSLTResolverContext(context, self._parser)
+ context._c_style_doc = self._c_style_doc
+ return context
+
+cdef _initXSLTResolverContext(_XSLTResolverContext context,
+ _BaseParser parser):
+ _initResolverContext(context, parser.resolvers)
+ context._parser = parser
+ context._c_style_doc = NULL
+
+cdef xmlDoc* _xslt_resolve_from_python(const_xmlChar* c_uri, void* c_context,
+ int parse_options, int* error) with gil:
+ # call the Python document loaders
+ cdef _XSLTResolverContext context
+ cdef _ResolverRegistry resolvers
+ cdef _InputDocument doc_ref
+ cdef xmlDoc* c_doc
+ cdef xmlDoc* c_return_doc = NULL
+
+ error[0] = 0
+ context = <_XSLTResolverContext>c_context
+
+ # shortcut if we resolve the stylesheet itself
+ c_doc = context._c_style_doc
+ try:
+ if c_doc is not NULL and c_doc.URL is not NULL:
+ if tree.xmlStrcmp(c_uri, c_doc.URL) == 0:
+ c_return_doc = _copyDoc(c_doc, 1)
+ return c_return_doc # 'goto', see 'finally' below
+
+ # delegate to the Python resolvers
+ resolvers = context._resolvers
+ if tree.xmlStrncmp('string://__STRING__XSLT__/', c_uri, 26) == 0:
+ c_uri += 26
+ uri = _decodeFilename(c_uri)
+ doc_ref = resolvers.resolve(uri, None, context)
+
+ if doc_ref is not None:
+ if doc_ref._type == PARSER_DATA_STRING:
+ c_return_doc = _parseDoc(
+ doc_ref._data_bytes, doc_ref._filename, context._parser)
+ elif doc_ref._type == PARSER_DATA_FILENAME:
+ c_return_doc = _parseDocFromFile(
+ doc_ref._filename, context._parser)
+ elif doc_ref._type == PARSER_DATA_FILE:
+ c_return_doc = _parseDocFromFilelike(
+ doc_ref._file, doc_ref._filename, context._parser)
+ elif doc_ref._type == PARSER_DATA_EMPTY:
+ c_return_doc = _newXMLDoc()
+ if c_return_doc is not NULL and c_return_doc.URL is NULL:
+ c_return_doc.URL = tree.xmlStrdup(c_uri)
+ except:
+ error[0] = 1
+ context._store_raised()
+ finally:
+ return c_return_doc # and swallow any further exceptions
+
+
+cdef void _xslt_store_resolver_exception(const_xmlChar* c_uri, void* context,
+ xslt.xsltLoadType c_type) noexcept with gil:
+ try:
+ message = f"Cannot resolve URI {_decodeFilename(c_uri)}"
+ if c_type == xslt.XSLT_LOAD_DOCUMENT:
+ exception = XSLTApplyError(message)
+ else:
+ exception = XSLTParseError(message)
+ (<_XSLTResolverContext>context)._store_exception(exception)
+ except BaseException as e:
+ (<_XSLTResolverContext>context)._store_exception(e)
+ finally:
+ return # and swallow any further exceptions
+
+
+cdef xmlDoc* _xslt_doc_loader(const_xmlChar* c_uri, tree.xmlDict* c_dict,
+ int parse_options, void* c_ctxt,
+ xslt.xsltLoadType c_type) noexcept nogil:
+ # nogil => no Python objects here, may be called without thread context !
+ cdef xmlDoc* c_doc
+ cdef xmlDoc* result
+ cdef void* c_pcontext
+ cdef int error = 0
+ # find resolver contexts of stylesheet and transformed doc
+ if c_type == xslt.XSLT_LOAD_DOCUMENT:
+ # transformation time
+ c_pcontext = (c_ctxt)._private
+ elif c_type == xslt.XSLT_LOAD_STYLESHEET:
+ # include/import resolution while parsing
+ c_pcontext = (c_ctxt).doc._private
+ else:
+ c_pcontext = NULL
+
+ if c_pcontext is NULL:
+ # can't call Python without context, fall back to default loader
+ return XSLT_DOC_DEFAULT_LOADER(
+ c_uri, c_dict, parse_options, c_ctxt, c_type)
+
+ c_doc = _xslt_resolve_from_python(c_uri, c_pcontext, parse_options, &error)
+ if c_doc is NULL and not error:
+ c_doc = XSLT_DOC_DEFAULT_LOADER(
+ c_uri, c_dict, parse_options, c_ctxt, c_type)
+ if c_doc is NULL:
+ _xslt_store_resolver_exception(c_uri, c_pcontext, c_type)
+
+ if c_doc is not NULL and c_type == xslt.XSLT_LOAD_STYLESHEET:
+ c_doc._private = c_pcontext
+ return c_doc
+
+cdef xslt.xsltDocLoaderFunc XSLT_DOC_DEFAULT_LOADER = xslt.xsltDocDefaultLoader
+xslt.xsltSetLoaderFunc(_xslt_doc_loader)
+
+################################################################################
+# XSLT file/network access control
+
+cdef class XSLTAccessControl:
+ """XSLTAccessControl(self, read_file=True, write_file=True, create_dir=True, read_network=True, write_network=True)
+
+ Access control for XSLT: reading/writing files, directories and
+ network I/O. Access to a type of resource is granted or denied by
+ passing any of the following boolean keyword arguments. All of
+ them default to True to allow access.
+
+ - read_file
+ - write_file
+ - create_dir
+ - read_network
+ - write_network
+
+ For convenience, there is also a class member `DENY_ALL` that
+ provides an XSLTAccessControl instance that is readily configured
+ to deny everything, and a `DENY_WRITE` member that denies all
+ write access but allows read access.
+
+ See `XSLT`.
+ """
+ cdef xslt.xsltSecurityPrefs* _prefs
+ def __cinit__(self):
+ self._prefs = xslt.xsltNewSecurityPrefs()
+ if self._prefs is NULL:
+ raise MemoryError()
+
+ def __init__(self, *, bint read_file=True, bint write_file=True, bint create_dir=True,
+ bint read_network=True, bint write_network=True):
+ self._setAccess(xslt.XSLT_SECPREF_READ_FILE, read_file)
+ self._setAccess(xslt.XSLT_SECPREF_WRITE_FILE, write_file)
+ self._setAccess(xslt.XSLT_SECPREF_CREATE_DIRECTORY, create_dir)
+ self._setAccess(xslt.XSLT_SECPREF_READ_NETWORK, read_network)
+ self._setAccess(xslt.XSLT_SECPREF_WRITE_NETWORK, write_network)
+
+ DENY_ALL = XSLTAccessControl(
+ read_file=False, write_file=False, create_dir=False,
+ read_network=False, write_network=False)
+
+ DENY_WRITE = XSLTAccessControl(
+ read_file=True, write_file=False, create_dir=False,
+ read_network=True, write_network=False)
+
+ def __dealloc__(self):
+ if self._prefs is not NULL:
+ xslt.xsltFreeSecurityPrefs(self._prefs)
+
+ @cython.final
+ cdef _setAccess(self, xslt.xsltSecurityOption option, bint allow):
+ cdef xslt.xsltSecurityCheck function
+ if allow:
+ function = xslt.xsltSecurityAllow
+ else:
+ function = xslt.xsltSecurityForbid
+ xslt.xsltSetSecurityPrefs(self._prefs, option, function)
+
+ @cython.final
+ cdef void _register_in_context(self, xslt.xsltTransformContext* ctxt) noexcept:
+ xslt.xsltSetCtxtSecurityPrefs(self._prefs, ctxt)
+
+ @property
+ def options(self):
+ """The access control configuration as a map of options."""
+ return {
+ 'read_file': self._optval(xslt.XSLT_SECPREF_READ_FILE),
+ 'write_file': self._optval(xslt.XSLT_SECPREF_WRITE_FILE),
+ 'create_dir': self._optval(xslt.XSLT_SECPREF_CREATE_DIRECTORY),
+ 'read_network': self._optval(xslt.XSLT_SECPREF_READ_NETWORK),
+ 'write_network': self._optval(xslt.XSLT_SECPREF_WRITE_NETWORK),
+ }
+
+ @cython.final
+ cdef _optval(self, xslt.xsltSecurityOption option):
+ cdef xslt.xsltSecurityCheck function
+ function = xslt.xsltGetSecurityPrefs(self._prefs, option)
+ if function is xslt.xsltSecurityAllow:
+ return True
+ elif function is xslt.xsltSecurityForbid:
+ return False
+ else:
+ return None
+
+ def __repr__(self):
+ items = sorted(self.options.items())
+ return "%s(%s)" % (
+ python._fqtypename(self).decode('UTF-8').split('.')[-1],
+ ', '.join(["%s=%r" % item for item in items]))
+
+################################################################################
+# XSLT
+
+cdef int _register_xslt_function(void* ctxt, name_utf, ns_utf) noexcept:
+ if ns_utf is None:
+ return 0
+ # libxml2 internalises the strings if ctxt has a dict
+ return xslt.xsltRegisterExtFunction(
+ ctxt, _xcstr(name_utf), _xcstr(ns_utf),
+ _xpath_function_call)
+
+cdef dict EMPTY_DICT = {}
+
+@cython.final
+@cython.internal
+cdef class _XSLTContext(_BaseContext):
+ cdef xslt.xsltTransformContext* _xsltCtxt
+ cdef _ReadOnlyElementProxy _extension_element_proxy
+ cdef dict _extension_elements
+ def __cinit__(self):
+ self._xsltCtxt = NULL
+ self._extension_elements = EMPTY_DICT
+
+ def __init__(self, namespaces, extensions, error_log, enable_regexp,
+ build_smart_strings):
+ if extensions is not None and extensions:
+ for ns_name_tuple, extension in extensions.items():
+ if ns_name_tuple[0] is None:
+ raise XSLTExtensionError, \
+ "extensions must not have empty namespaces"
+ if isinstance(extension, XSLTExtension):
+ if self._extension_elements is EMPTY_DICT:
+ self._extension_elements = {}
+ extensions = extensions.copy()
+ ns_utf = _utf8(ns_name_tuple[0])
+ name_utf = _utf8(ns_name_tuple[1])
+ self._extension_elements[(ns_utf, name_utf)] = extension
+ del extensions[ns_name_tuple]
+ _BaseContext.__init__(self, namespaces, extensions, error_log, enable_regexp,
+ build_smart_strings)
+
+ cdef _BaseContext _copy(self):
+ cdef _XSLTContext context
+ context = <_XSLTContext>_BaseContext._copy(self)
+ context._extension_elements = self._extension_elements
+ return context
+
+ cdef register_context(self, xslt.xsltTransformContext* xsltCtxt,
+ _Document doc):
+ self._xsltCtxt = xsltCtxt
+ self._set_xpath_context(xsltCtxt.xpathCtxt)
+ self._register_context(doc)
+ self.registerLocalFunctions(xsltCtxt, _register_xslt_function)
+ self.registerGlobalFunctions(xsltCtxt, _register_xslt_function)
+ _registerXSLTExtensions(xsltCtxt, self._extension_elements)
+
+ cdef free_context(self):
+ self._cleanup_context()
+ self._release_context()
+ if self._xsltCtxt is not NULL:
+ xslt.xsltFreeTransformContext(self._xsltCtxt)
+ self._xsltCtxt = NULL
+ self._release_temp_refs()
+
+
+@cython.final
+@cython.internal
+@cython.freelist(8)
+cdef class _XSLTQuotedStringParam:
+ """A wrapper class for literal XSLT string parameters that require
+ quote escaping.
+ """
+ cdef bytes strval
+ def __cinit__(self, strval):
+ self.strval = _utf8(strval)
+
+
+@cython.no_gc_clear
+cdef class XSLT:
+ """XSLT(self, xslt_input, extensions=None, regexp=True, access_control=None)
+
+ Turn an XSL document into an XSLT object.
+
+ Calling this object on a tree or Element will execute the XSLT::
+
+ transform = etree.XSLT(xsl_tree)
+ result = transform(xml_tree)
+
+ Keyword arguments of the constructor:
+
+ - extensions: a dict mapping ``(namespace, name)`` pairs to
+ extension functions or extension elements
+ - regexp: enable exslt regular expression support in XPath
+ (default: True)
+ - access_control: access restrictions for network or file
+ system (see `XSLTAccessControl`)
+
+ Keyword arguments of the XSLT call:
+
+ - profile_run: enable XSLT profiling and make the profile available
+ as XML document in ``result.xslt_profile`` (default: False)
+
+ Other keyword arguments of the call are passed to the stylesheet
+ as parameters.
+ """
+ cdef _XSLTContext _context
+ cdef xslt.xsltStylesheet* _c_style
+ cdef _XSLTResolverContext _xslt_resolver_context
+ cdef XSLTAccessControl _access_control
+ cdef _ErrorLog _error_log
+
+ def __cinit__(self):
+ self._c_style = NULL
+
+ def __init__(self, xslt_input, *, extensions=None, regexp=True,
+ access_control=None):
+ cdef xslt.xsltStylesheet* c_style = NULL
+ cdef xmlDoc* c_doc
+ cdef _Document doc
+ cdef _Element root_node
+
+ doc = _documentOrRaise(xslt_input)
+ root_node = _rootNodeOrRaise(xslt_input)
+
+ # set access control or raise TypeError
+ self._access_control = access_control
+
+ # make a copy of the document as stylesheet parsing modifies it
+ c_doc = _copyDocRoot(doc._c_doc, root_node._c_node)
+
+ # make sure we always have a stylesheet URL
+ if c_doc.URL is NULL:
+ doc_url_utf = python.PyUnicode_AsASCIIString(
+ f"string://__STRING__XSLT__/{id(self)}.xslt")
+ c_doc.URL = tree.xmlStrdup(_xcstr(doc_url_utf))
+
+ self._error_log = _ErrorLog()
+ self._xslt_resolver_context = _XSLTResolverContext()
+ _initXSLTResolverContext(self._xslt_resolver_context, doc._parser)
+ # keep a copy in case we need to access the stylesheet via 'document()'
+ self._xslt_resolver_context._c_style_doc = _copyDoc(c_doc, 1)
+ c_doc._private = self._xslt_resolver_context
+
+ with self._error_log:
+ orig_loader = _register_document_loader()
+ c_style = xslt.xsltParseStylesheetDoc(c_doc)
+ _reset_document_loader(orig_loader)
+
+ if c_style is NULL or c_style.errors:
+ tree.xmlFreeDoc(c_doc)
+ if c_style is not NULL:
+ xslt.xsltFreeStylesheet(c_style)
+ self._xslt_resolver_context._raise_if_stored()
+ # last error seems to be the most accurate here
+ if self._error_log.last_error is not None and \
+ self._error_log.last_error.message:
+ raise XSLTParseError(self._error_log.last_error.message,
+ self._error_log)
+ else:
+ raise XSLTParseError(
+ self._error_log._buildExceptionMessage(
+ "Cannot parse stylesheet"),
+ self._error_log)
+
+ c_doc._private = NULL # no longer used!
+ self._c_style = c_style
+ self._context = _XSLTContext(None, extensions, self._error_log, regexp, True)
+
+ def __dealloc__(self):
+ if self._xslt_resolver_context is not None and \
+ self._xslt_resolver_context._c_style_doc is not NULL:
+ tree.xmlFreeDoc(self._xslt_resolver_context._c_style_doc)
+ # this cleans up the doc copy as well
+ if self._c_style is not NULL:
+ xslt.xsltFreeStylesheet(self._c_style)
+
+ @property
+ def error_log(self):
+ """The log of errors and warnings of an XSLT execution."""
+ return self._error_log.copy()
+
+ @staticmethod
+ def strparam(strval):
+ """strparam(strval)
+
+ Mark an XSLT string parameter that requires quote escaping
+ before passing it into the transformation. Use it like this::
+
+ result = transform(doc, some_strval = XSLT.strparam(
+ '''it's \"Monty Python's\" ...'''))
+
+ Escaped string parameters can be reused without restriction.
+ """
+ return _XSLTQuotedStringParam(strval)
+
+ @staticmethod
+ def set_global_max_depth(int max_depth):
+ """set_global_max_depth(max_depth)
+
+ The maximum traversal depth that the stylesheet engine will allow.
+ This does not only count the template recursion depth but also takes
+ the number of variables/parameters into account. The required setting
+ for a run depends on both the stylesheet and the input data.
+
+ Example::
+
+ XSLT.set_global_max_depth(5000)
+
+ Note that this is currently a global, module-wide setting because
+ libxslt does not support it at a per-stylesheet level.
+ """
+ if max_depth < 0:
+ raise ValueError("cannot set a maximum stylesheet traversal depth < 0")
+ xslt.xsltMaxDepth = max_depth
+
+ def tostring(self, _ElementTree result_tree):
+ """tostring(self, result_tree)
+
+ Save result doc to string based on stylesheet output method.
+
+ :deprecated: use str(result_tree) instead.
+ """
+ return str(result_tree)
+
+ def __deepcopy__(self, memo):
+ return self.__copy__()
+
+ def __copy__(self):
+ return _copyXSLT(self)
+
+ def __call__(self, _input, *, profile_run=False, **kw):
+ """__call__(self, _input, profile_run=False, **kw)
+
+ Execute the XSL transformation on a tree or Element.
+
+ Pass the ``profile_run`` option to get profile information
+ about the XSLT. The result of the XSLT will have a property
+ xslt_profile that holds an XML tree with profiling data.
+ """
+ cdef _XSLTContext context = None
+ cdef _XSLTResolverContext resolver_context
+ cdef _Document input_doc
+ cdef _Element root_node
+ cdef _Document result_doc
+ cdef _Document profile_doc = None
+ cdef xmlDoc* c_profile_doc
+ cdef xslt.xsltTransformContext* transform_ctxt
+ cdef xmlDoc* c_result = NULL
+ cdef xmlDoc* c_doc
+ cdef tree.xmlDict* c_dict
+ cdef const_char** params = NULL
+
+ assert self._c_style is not NULL, "XSLT stylesheet not initialised"
+ input_doc = _documentOrRaise(_input)
+ root_node = _rootNodeOrRaise(_input)
+
+ c_doc = _fakeRootDoc(input_doc._c_doc, root_node._c_node)
+
+ transform_ctxt = xslt.xsltNewTransformContext(self._c_style, c_doc)
+ if transform_ctxt is NULL:
+ _destroyFakeDoc(input_doc._c_doc, c_doc)
+ raise MemoryError()
+
+ # using the stylesheet dict is safer than using a possibly
+ # unrelated dict from the current thread. Almost all
+ # non-input tag/attr names will come from the stylesheet
+ # anyway.
+ if transform_ctxt.dict is not NULL:
+ xmlparser.xmlDictFree(transform_ctxt.dict)
+ if kw:
+ # parameter values are stored in the dict
+ # => avoid unnecessarily cluttering the global dict
+ transform_ctxt.dict = xmlparser.xmlDictCreateSub(self._c_style.doc.dict)
+ if transform_ctxt.dict is NULL:
+ xslt.xsltFreeTransformContext(transform_ctxt)
+ raise MemoryError()
+ else:
+ transform_ctxt.dict = self._c_style.doc.dict
+ xmlparser.xmlDictReference(transform_ctxt.dict)
+
+ xslt.xsltSetCtxtParseOptions(
+ transform_ctxt, input_doc._parser._parse_options)
+
+ if profile_run:
+ transform_ctxt.profile = 1
+
+ try:
+ context = self._context._copy()
+ context.register_context(transform_ctxt, input_doc)
+
+ resolver_context = self._xslt_resolver_context._copy()
+ transform_ctxt._private = resolver_context
+
+ _convert_xslt_parameters(transform_ctxt, kw, ¶ms)
+ c_result = self._run_transform(
+ c_doc, params, context, transform_ctxt)
+ if params is not NULL:
+ # deallocate space for parameters
+ python.lxml_free(params)
+
+ if transform_ctxt.state != xslt.XSLT_STATE_OK:
+ if c_result is not NULL:
+ tree.xmlFreeDoc(c_result)
+ c_result = NULL
+
+ if transform_ctxt.profile:
+ c_profile_doc = xslt.xsltGetProfileInformation(transform_ctxt)
+ if c_profile_doc is not NULL:
+ profile_doc = _documentFactory(
+ c_profile_doc, input_doc._parser)
+ finally:
+ if context is not None:
+ context.free_context()
+ _destroyFakeDoc(input_doc._c_doc, c_doc)
+
+ try:
+ if resolver_context is not None and resolver_context._has_raised():
+ if c_result is not NULL:
+ tree.xmlFreeDoc(c_result)
+ c_result = NULL
+ resolver_context._raise_if_stored()
+
+ if context._exc._has_raised():
+ if c_result is not NULL:
+ tree.xmlFreeDoc(c_result)
+ c_result = NULL
+ context._exc._raise_if_stored()
+
+ if c_result is NULL:
+ # last error seems to be the most accurate here
+ error = self._error_log.last_error
+ if error is not None and error.message:
+ if error.line > 0:
+ message = f"{error.message}, line {error.line}"
+ else:
+ message = error.message
+ elif error is not None and error.line > 0:
+ message = f"Error applying stylesheet, line {error.line}"
+ else:
+ message = "Error applying stylesheet"
+ raise XSLTApplyError(message, self._error_log)
+ finally:
+ if resolver_context is not None:
+ resolver_context.clear()
+
+ result_doc = _documentFactory(c_result, input_doc._parser)
+
+ c_dict = c_result.dict
+ xmlparser.xmlDictReference(c_dict)
+ __GLOBAL_PARSER_CONTEXT.initThreadDictRef(&c_result.dict)
+ if c_dict is not c_result.dict or \
+ self._c_style.doc.dict is not c_result.dict or \
+ input_doc._c_doc.dict is not c_result.dict:
+ with nogil:
+ if c_dict is not c_result.dict:
+ fixThreadDictNames(c_result,
+ c_dict, c_result.dict)
+ if self._c_style.doc.dict is not c_result.dict:
+ fixThreadDictNames(c_result,
+ self._c_style.doc.dict, c_result.dict)
+ if input_doc._c_doc.dict is not c_result.dict:
+ fixThreadDictNames(c_result,
+ input_doc._c_doc.dict, c_result.dict)
+ xmlparser.xmlDictFree(c_dict)
+
+ return _xsltResultTreeFactory(result_doc, self, profile_doc)
+
+ cdef xmlDoc* _run_transform(self, xmlDoc* c_input_doc,
+ const_char** params, _XSLTContext context,
+ xslt.xsltTransformContext* transform_ctxt):
+ cdef xmlDoc* c_result
+ xslt.xsltSetTransformErrorFunc(transform_ctxt, self._error_log,
+ _receiveXSLTError)
+ if self._access_control is not None:
+ self._access_control._register_in_context(transform_ctxt)
+ with self._error_log, nogil:
+ orig_loader = _register_document_loader()
+ c_result = xslt.xsltApplyStylesheetUser(
+ self._c_style, c_input_doc, params, NULL, NULL, transform_ctxt)
+ _reset_document_loader(orig_loader)
+ return c_result
+
+
+cdef _convert_xslt_parameters(xslt.xsltTransformContext* transform_ctxt,
+ dict parameters, const_char*** params_ptr):
+ cdef Py_ssize_t i, parameter_count
+ cdef const_char** params
+ cdef tree.xmlDict* c_dict = transform_ctxt.dict
+ params_ptr[0] = NULL
+ parameter_count = len(parameters)
+ if parameter_count == 0:
+ return
+ # allocate space for parameters
+ # * 2 as we want an entry for both key and value,
+ # and + 1 as array is NULL terminated
+ params = python.lxml_malloc(parameter_count * 2 + 1, sizeof(const_char*))
+ if not params:
+ raise MemoryError()
+ try:
+ i = 0
+ for key, value in parameters.iteritems():
+ k = _utf8(key)
+ if isinstance(value, _XSLTQuotedStringParam):
+ v = (<_XSLTQuotedStringParam>value).strval
+ xslt.xsltQuoteOneUserParam(
+ transform_ctxt, _xcstr(k), _xcstr(v))
+ else:
+ if isinstance(value, XPath):
+ v = (value)._path
+ else:
+ v = _utf8(value)
+ params[i] = tree.xmlDictLookup(c_dict, _xcstr(k), len(k))
+ i += 1
+ params[i] = tree.xmlDictLookup(c_dict, _xcstr(v), len(v))
+ i += 1
+ except:
+ python.lxml_free(params)
+ raise
+ params[i] = NULL
+ params_ptr[0] = params
+
+cdef XSLT _copyXSLT(XSLT stylesheet):
+ cdef XSLT new_xslt
+ cdef xmlDoc* c_doc
+ assert stylesheet._c_style is not NULL, "XSLT stylesheet not initialised"
+ new_xslt = XSLT.__new__(XSLT)
+ new_xslt._access_control = stylesheet._access_control
+ new_xslt._error_log = _ErrorLog()
+ new_xslt._context = stylesheet._context._copy()
+
+ new_xslt._xslt_resolver_context = stylesheet._xslt_resolver_context._copy()
+ new_xslt._xslt_resolver_context._c_style_doc = _copyDoc(
+ stylesheet._xslt_resolver_context._c_style_doc, 1)
+
+ c_doc = _copyDoc(stylesheet._c_style.doc, 1)
+ new_xslt._c_style = xslt.xsltParseStylesheetDoc(c_doc)
+ if new_xslt._c_style is NULL:
+ tree.xmlFreeDoc(c_doc)
+ raise MemoryError()
+
+ return new_xslt
+
+@cython.final
+cdef class _XSLTResultTree(_ElementTree):
+ """The result of an XSLT evaluation.
+
+ Use ``str()`` or ``bytes()`` (or ``unicode()`` in Python 2.x) to serialise to a string,
+ and the ``.write_output()`` method to write serialise to a file.
+ """
+ cdef XSLT _xslt
+ cdef _Document _profile
+ cdef xmlChar* _buffer
+ cdef Py_ssize_t _buffer_len
+ cdef Py_ssize_t _buffer_refcnt
+
+ def write_output(self, file, *, compression=0):
+ """write_output(self, file, *, compression=0)
+
+ Serialise the XSLT output to a file or file-like object.
+
+ As opposed to the generic ``.write()`` method, ``.write_output()`` serialises
+ the result as defined by the ```` tag.
+ """
+ cdef _FilelikeWriter writer = None
+ cdef _Document doc
+ cdef int r, rclose, c_compression
+ cdef const_xmlChar* c_encoding = NULL
+ cdef tree.xmlOutputBuffer* c_buffer
+
+ if self._context_node is not None:
+ doc = self._context_node._doc
+ else:
+ doc = None
+ if doc is None:
+ doc = self._doc
+ if doc is None:
+ raise XSLTSaveError("No document to serialise")
+ c_compression = compression or 0
+ xslt.LXML_GET_XSLT_ENCODING(c_encoding, self._xslt._c_style)
+ writer = _create_output_buffer(file, c_encoding, compression, &c_buffer, close=False)
+ if writer is None:
+ with nogil:
+ r = xslt.xsltSaveResultTo(c_buffer, doc._c_doc, self._xslt._c_style)
+ rclose = tree.xmlOutputBufferClose(c_buffer)
+ else:
+ r = xslt.xsltSaveResultTo(c_buffer, doc._c_doc, self._xslt._c_style)
+ rclose = tree.xmlOutputBufferClose(c_buffer)
+ if writer is not None:
+ writer._exc_context._raise_if_stored()
+ if r < 0 or rclose == -1:
+ python.PyErr_SetFromErrno(IOError) # raises IOError
+
+ cdef _saveToStringAndSize(self, xmlChar** s, int* l):
+ cdef _Document doc
+ cdef int r
+ if self._context_node is not None:
+ doc = self._context_node._doc
+ else:
+ doc = None
+ if doc is None:
+ doc = self._doc
+ if doc is None:
+ s[0] = NULL
+ return
+ with nogil:
+ r = xslt.xsltSaveResultToString(s, l, doc._c_doc,
+ self._xslt._c_style)
+ if r == -1:
+ raise MemoryError()
+
+ def __str__(self):
+ cdef xmlChar* encoding
+ cdef xmlChar* s = NULL
+ cdef int l = 0
+ self._saveToStringAndSize(&s, &l)
+ if s is NULL:
+ return ''
+ encoding = self._xslt._c_style.encoding
+ try:
+ if encoding is NULL:
+ result = s[:l].decode('UTF-8')
+ else:
+ result = s[:l].decode(encoding)
+ finally:
+ tree.xmlFree(s)
+ return _stripEncodingDeclaration(result)
+
+ def __getbuffer__(self, Py_buffer* buffer, int flags):
+ cdef int l = 0
+ if buffer is NULL:
+ return
+ if self._buffer is NULL or flags & python.PyBUF_WRITABLE:
+ self._saveToStringAndSize(&buffer.buf, &l)
+ buffer.len = l
+ if self._buffer is NULL and not flags & python.PyBUF_WRITABLE:
+ self._buffer = buffer.buf
+ self._buffer_len = l
+ self._buffer_refcnt = 1
+ else:
+ buffer.buf = self._buffer
+ buffer.len = self._buffer_len
+ self._buffer_refcnt += 1
+ if flags & python.PyBUF_WRITABLE:
+ buffer.readonly = 0
+ else:
+ buffer.readonly = 1
+ if flags & python.PyBUF_FORMAT:
+ buffer.format = "B"
+ else:
+ buffer.format = NULL
+ buffer.ndim = 0
+ buffer.shape = NULL
+ buffer.strides = NULL
+ buffer.suboffsets = NULL
+ buffer.itemsize = 1
+ buffer.internal = NULL
+ if buffer.obj is not self: # set by Cython?
+ buffer.obj = self
+
+ def __releasebuffer__(self, Py_buffer* buffer):
+ if buffer is NULL:
+ return
+ if buffer.buf is self._buffer:
+ self._buffer_refcnt -= 1
+ if self._buffer_refcnt == 0:
+ tree.xmlFree(self._buffer)
+ self._buffer = NULL
+ else:
+ tree.xmlFree(buffer.buf)
+ buffer.buf = NULL
+
+ property xslt_profile:
+ """Return an ElementTree with profiling data for the stylesheet run.
+ """
+ def __get__(self):
+ cdef object root
+ if self._profile is None:
+ return None
+ root = self._profile.getroot()
+ if root is None:
+ return None
+ return ElementTree(root)
+
+ def __del__(self):
+ self._profile = None
+
+cdef _xsltResultTreeFactory(_Document doc, XSLT xslt, _Document profile):
+ cdef _XSLTResultTree result
+ result = <_XSLTResultTree>_newElementTree(doc, None, _XSLTResultTree)
+ result._xslt = xslt
+ result._profile = profile
+ return result
+
+# functions like "output" and "write" are a potential security risk, but we
+# rely on the user to configure XSLTAccessControl as needed
+xslt.xsltRegisterAllExtras()
+
+# enable EXSLT support for XSLT
+xslt.exsltRegisterAll()
+
+
+################################################################################
+# XSLT PI support
+
+cdef object _RE_PI_HREF = re.compile(r'\s+href\s*=\s*(?:\'([^\']*)\'|"([^"]*)")')
+cdef object _FIND_PI_HREF = _RE_PI_HREF.findall
+cdef object _REPLACE_PI_HREF = _RE_PI_HREF.sub
+cdef XPath __findStylesheetByID = None
+
+cdef _findStylesheetByID(_Document doc, id):
+ global __findStylesheetByID
+ if __findStylesheetByID is None:
+ __findStylesheetByID = XPath(
+ "//xsl:stylesheet[@xml:id = $id]",
+ namespaces={"xsl" : "http://www.w3.org/1999/XSL/Transform"})
+ return __findStylesheetByID(doc, id=id)
+
+cdef class _XSLTProcessingInstruction(PIBase):
+ def parseXSL(self, parser=None):
+ """parseXSL(self, parser=None)
+
+ Try to parse the stylesheet referenced by this PI and return
+ an ElementTree for it. If the stylesheet is embedded in the
+ same document (referenced via xml:id), find and return an
+ ElementTree for the stylesheet Element.
+
+ The optional ``parser`` keyword argument can be passed to specify the
+ parser used to read from external stylesheet URLs.
+ """
+ cdef _Document result_doc
+ cdef _Element result_node
+ cdef bytes href_utf
+ cdef const_xmlChar* c_href
+ cdef xmlAttr* c_attr
+ _assertValidNode(self)
+ if self._c_node.content is NULL:
+ raise ValueError, "PI lacks content"
+ hrefs = _FIND_PI_HREF(' ' + (self._c_node.content).decode('UTF-8'))
+ if len(hrefs) != 1:
+ raise ValueError, "malformed PI attributes"
+ hrefs = hrefs[0]
+ href_utf = utf8(hrefs[0] or hrefs[1])
+ c_href = _xcstr(href_utf)
+
+ if c_href[0] != c'#':
+ # normal URL, try to parse from it
+ c_href = tree.xmlBuildURI(
+ c_href,
+ tree.xmlNodeGetBase(self._c_node.doc, self._c_node))
+ if c_href is not NULL:
+ try:
+ href_utf = c_href
+ finally:
+ tree.xmlFree(c_href)
+ result_doc = _parseDocumentFromURL(href_utf, parser)
+ return _elementTreeFactory(result_doc, None)
+
+ # ID reference to embedded stylesheet
+ # try XML:ID lookup
+ _assertValidDoc(self._doc)
+ c_href += 1 # skip leading '#'
+ c_attr = tree.xmlGetID(self._c_node.doc, c_href)
+ if c_attr is not NULL and c_attr.doc is self._c_node.doc:
+ result_node = _elementFactory(self._doc, c_attr.parent)
+ return _elementTreeFactory(result_node._doc, result_node)
+
+ # try XPath search
+ root = _findStylesheetByID(self._doc, funicode(c_href))
+ if not root:
+ raise ValueError, "reference to non-existing embedded stylesheet"
+ elif len(root) > 1:
+ raise ValueError, "ambiguous reference to embedded stylesheet"
+ result_node = root[0]
+ return _elementTreeFactory(result_node._doc, result_node)
+
+ def set(self, key, value):
+ """set(self, key, value)
+
+ Supports setting the 'href' pseudo-attribute in the text of
+ the processing instruction.
+ """
+ if key != "href":
+ raise AttributeError, \
+ "only setting the 'href' attribute is supported on XSLT-PIs"
+ if value is None:
+ attrib = ""
+ elif '"' in value or '>' in value:
+ raise ValueError, "Invalid URL, must not contain '\"' or '>'"
+ else:
+ attrib = f' href="{value}"'
+ text = ' ' + self.text
+ if _FIND_PI_HREF(text):
+ self.text = _REPLACE_PI_HREF(attrib, text)
+ else:
+ self.text = text + attrib
diff --git a/llmeval-env/lib/python3.10/site-packages/lxml/xsltext.pxi b/llmeval-env/lib/python3.10/site-packages/lxml/xsltext.pxi
new file mode 100644
index 0000000000000000000000000000000000000000..21894b9ef5859a455fd2f9f4443e805818b94517
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/lxml/xsltext.pxi
@@ -0,0 +1,242 @@
+# XSLT extension elements
+
+cdef class XSLTExtension:
+ """Base class of an XSLT extension element.
+ """
+ def execute(self, context, self_node, input_node, output_parent):
+ """execute(self, context, self_node, input_node, output_parent)
+ Execute this extension element.
+
+ Subclasses must override this method. They may append
+ elements to the `output_parent` element here, or set its text
+ content. To this end, the `input_node` provides read-only
+ access to the current node in the input document, and the
+ `self_node` points to the extension element in the stylesheet.
+
+ Note that the `output_parent` parameter may be `None` if there
+ is no parent element in the current context (e.g. no content
+ was added to the output tree yet).
+ """
+ pass
+
+ def apply_templates(self, _XSLTContext context not None, node, output_parent=None,
+ *, elements_only=False, remove_blank_text=False):
+ """apply_templates(self, context, node, output_parent=None, elements_only=False, remove_blank_text=False)
+
+ Call this method to retrieve the result of applying templates
+ to an element.
+
+ The return value is a list of elements or text strings that
+ were generated by the XSLT processor. If you pass
+ ``elements_only=True``, strings will be discarded from the result
+ list. The option ``remove_blank_text=True`` will only discard
+ strings that consist entirely of whitespace (e.g. formatting).
+ These options do not apply to Elements, only to bare string results.
+
+ If you pass an Element as `output_parent` parameter, the result
+ will instead be appended to the element (including attributes
+ etc.) and the return value will be `None`. This is a safe way
+ to generate content into the output document directly, without
+ having to take care of special values like text or attributes.
+ Note that the string discarding options will be ignored in this
+ case.
+ """
+ cdef xmlNode* c_parent
+ cdef xmlNode* c_node
+ cdef xmlNode* c_context_node
+ assert context._xsltCtxt is not NULL, "XSLT context not initialised"
+ c_context_node = _roNodeOf(node)
+ #assert c_context_node.doc is context._xsltContext.node.doc, \
+ # "switching input documents during transformation is not currently supported"
+
+ if output_parent is not None:
+ c_parent = _nonRoNodeOf(output_parent)
+ else:
+ c_parent = tree.xmlNewDocNode(
+ context._xsltCtxt.output, NULL, "fake-parent", NULL)
+
+ c_node = context._xsltCtxt.insert
+ context._xsltCtxt.insert = c_parent
+ xslt.xsltProcessOneNode(
+ context._xsltCtxt, c_context_node, NULL)
+ context._xsltCtxt.insert = c_node
+
+ if output_parent is not None:
+ return None
+
+ try:
+ return self._collectXSLTResultContent(
+ context, c_parent, elements_only, remove_blank_text)
+ finally:
+ # free all intermediate nodes that will not be freed by proxies
+ tree.xmlFreeNode(c_parent)
+
+ def process_children(self, _XSLTContext context not None, output_parent=None,
+ *, elements_only=False, remove_blank_text=False):
+ """process_children(self, context, output_parent=None, elements_only=False, remove_blank_text=False)
+
+ Call this method to process the XSLT content of the extension
+ element itself.
+
+ The return value is a list of elements or text strings that
+ were generated by the XSLT processor. If you pass
+ ``elements_only=True``, strings will be discarded from the result
+ list. The option ``remove_blank_text=True`` will only discard
+ strings that consist entirely of whitespace (e.g. formatting).
+ These options do not apply to Elements, only to bare string results.
+
+ If you pass an Element as `output_parent` parameter, the result
+ will instead be appended to the element (including attributes
+ etc.) and the return value will be `None`. This is a safe way
+ to generate content into the output document directly, without
+ having to take care of special values like text or attributes.
+ Note that the string discarding options will be ignored in this
+ case.
+ """
+ cdef xmlNode* c_parent
+ cdef xslt.xsltTransformContext* c_ctxt = context._xsltCtxt
+ cdef xmlNode* c_old_output_parent = c_ctxt.insert
+ assert context._xsltCtxt is not NULL, "XSLT context not initialised"
+
+ # output_parent node is used for adding results instead of
+ # elements list used in apply_templates, that's easier and allows to
+ # use attributes added to extension element with .
+
+ if output_parent is not None:
+ c_parent = _nonRoNodeOf(output_parent)
+ else:
+ c_parent = tree.xmlNewDocNode(
+ context._xsltCtxt.output, NULL, "fake-parent", NULL)
+
+ c_ctxt.insert = c_parent
+ xslt.xsltApplyOneTemplate(c_ctxt,
+ c_ctxt.node, c_ctxt.inst.children, NULL, NULL)
+ c_ctxt.insert = c_old_output_parent
+
+ if output_parent is not None:
+ return None
+
+ try:
+ return self._collectXSLTResultContent(
+ context, c_parent, elements_only, remove_blank_text)
+ finally:
+ # free all intermediate nodes that will not be freed by proxies
+ tree.xmlFreeNode(c_parent)
+
+ cdef _collectXSLTResultContent(self, _XSLTContext context, xmlNode* c_parent,
+ bint elements_only, bint remove_blank_text):
+ cdef xmlNode* c_node
+ cdef xmlNode* c_next
+ cdef _ReadOnlyProxy proxy
+ cdef list results = [] # or maybe _collectAttributes(c_parent, 2) ?
+ c_node = c_parent.children
+ while c_node is not NULL:
+ c_next = c_node.next
+ if c_node.type == tree.XML_TEXT_NODE:
+ if not elements_only:
+ s = funicode(c_node.content)
+ if not remove_blank_text or s.strip():
+ results.append(s)
+ s = None
+ elif c_node.type == tree.XML_ELEMENT_NODE:
+ proxy = _newReadOnlyProxy(
+ context._extension_element_proxy, c_node)
+ results.append(proxy)
+ # unlink node and make sure it will be freed later on
+ tree.xmlUnlinkNode(c_node)
+ proxy.free_after_use()
+ else:
+ raise TypeError, \
+ f"unsupported XSLT result type: {c_node.type}"
+ c_node = c_next
+ return results
+
+
+cdef _registerXSLTExtensions(xslt.xsltTransformContext* c_ctxt,
+ extension_dict):
+ for ns_utf, name_utf in extension_dict:
+ xslt.xsltRegisterExtElement(
+ c_ctxt, _xcstr(name_utf), _xcstr(ns_utf),
+ _callExtensionElement)
+
+cdef void _callExtensionElement(xslt.xsltTransformContext* c_ctxt,
+ xmlNode* c_context_node,
+ xmlNode* c_inst_node,
+ void* dummy) noexcept with gil:
+ cdef _XSLTContext context
+ cdef XSLTExtension extension
+ cdef python.PyObject* dict_result
+ cdef xmlNode* c_node
+ cdef _ReadOnlyProxy context_node = None, self_node = None
+ cdef object output_parent # not restricted to ro-nodes
+ c_uri = _getNs(c_inst_node)
+ if c_uri is NULL:
+ # not allowed, and should never happen
+ return
+ if c_ctxt.xpathCtxt.userData is NULL:
+ # just for safety, should never happen
+ return
+ context = <_XSLTContext>c_ctxt.xpathCtxt.userData
+ try:
+ try:
+ dict_result = python.PyDict_GetItem(
+ context._extension_elements, (c_uri, c_inst_node.name))
+ if dict_result is NULL:
+ raise KeyError, f"extension element {funicode(c_inst_node.name)} not found"
+ extension = dict_result
+
+ try:
+ # build the context proxy nodes
+ self_node = _newReadOnlyProxy(None, c_inst_node)
+ if _isElement(c_ctxt.insert):
+ output_parent = _newAppendOnlyProxy(self_node, c_ctxt.insert)
+ else:
+ # may be the document node or other stuff
+ output_parent = _newOpaqueAppendOnlyNodeWrapper(c_ctxt.insert)
+ if c_context_node.type in (tree.XML_DOCUMENT_NODE,
+ tree.XML_HTML_DOCUMENT_NODE):
+ c_node = tree.xmlDocGetRootElement(c_context_node)
+ if c_node is not NULL:
+ context_node = _newReadOnlyProxy(self_node, c_node)
+ else:
+ context_node = None
+ elif c_context_node.type in (tree.XML_ATTRIBUTE_NODE,
+ tree.XML_TEXT_NODE,
+ tree.XML_CDATA_SECTION_NODE):
+ # this isn't easy to support using read-only
+ # nodes, as the smart-string factory must
+ # instantiate the parent proxy somehow...
+ raise TypeError(f"Unsupported element type: {c_context_node.type}")
+ else:
+ context_node = _newReadOnlyProxy(self_node, c_context_node)
+
+ # run the XSLT extension
+ context._extension_element_proxy = self_node
+ extension.execute(context, self_node, context_node, output_parent)
+ finally:
+ context._extension_element_proxy = None
+ if self_node is not None:
+ _freeReadOnlyProxies(self_node)
+ except Exception as e:
+ try:
+ e = unicode(e).encode("UTF-8")
+ except:
+ e = repr(e).encode("UTF-8")
+ message = python.PyBytes_FromFormat(
+ "Error executing extension element '%s': %s",
+ c_inst_node.name, _cstr(e))
+ xslt.xsltTransformError(c_ctxt, NULL, c_inst_node, "%s", message)
+ context._exc._store_raised()
+ except:
+ # just in case
+ message = python.PyBytes_FromFormat(
+ "Error executing extension element '%s'", c_inst_node.name)
+ xslt.xsltTransformError(c_ctxt, NULL, c_inst_node, "%s", message)
+ context._exc._store_raised()
+ except:
+ # no Python functions here - everything can fail...
+ xslt.xsltTransformError(c_ctxt, NULL, c_inst_node,
+ "Error during XSLT extension element evaluation")
+ context._exc._store_raised()
+ finally:
+ return # swallow any further exceptions
diff --git a/llmeval-env/lib/python3.10/site-packages/scikit_learn-1.4.2.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/scikit_learn-1.4.2.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scikit_learn-1.4.2.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/llmeval-env/lib/python3.10/site-packages/scikit_learn-1.4.2.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/scikit_learn-1.4.2.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..c9a5851e27e0dd1886429f94f9902df0a25c13d2
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scikit_learn-1.4.2.dist-info/METADATA
@@ -0,0 +1,282 @@
+Metadata-Version: 2.1
+Name: scikit-learn
+Version: 1.4.2
+Summary: A set of python modules for machine learning and data mining
+Home-page: https://scikit-learn.org
+Download-URL: https://pypi.org/project/scikit-learn/#files
+Maintainer: Andreas Mueller
+Maintainer-email: amueller@ais.uni-bonn.de
+License: new BSD
+Project-URL: Bug Tracker, https://github.com/scikit-learn/scikit-learn/issues
+Project-URL: Documentation, https://scikit-learn.org/stable/documentation.html
+Project-URL: Source Code, https://github.com/scikit-learn/scikit-learn
+Classifier: Intended Audience :: Science/Research
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Programming Language :: C
+Classifier: Programming Language :: Python
+Classifier: Topic :: Software Development
+Classifier: Topic :: Scientific/Engineering
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: POSIX
+Classifier: Operating System :: Unix
+Classifier: Operating System :: MacOS
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Requires-Python: >=3.9
+License-File: COPYING
+Requires-Dist: numpy >=1.19.5
+Requires-Dist: scipy >=1.6.0
+Requires-Dist: joblib >=1.2.0
+Requires-Dist: threadpoolctl >=2.0.0
+Provides-Extra: benchmark
+Requires-Dist: matplotlib >=3.3.4 ; extra == 'benchmark'
+Requires-Dist: pandas >=1.1.5 ; extra == 'benchmark'
+Requires-Dist: memory-profiler >=0.57.0 ; extra == 'benchmark'
+Provides-Extra: docs
+Requires-Dist: matplotlib >=3.3.4 ; extra == 'docs'
+Requires-Dist: scikit-image >=0.17.2 ; extra == 'docs'
+Requires-Dist: pandas >=1.1.5 ; extra == 'docs'
+Requires-Dist: seaborn >=0.9.0 ; extra == 'docs'
+Requires-Dist: memory-profiler >=0.57.0 ; extra == 'docs'
+Requires-Dist: sphinx >=6.0.0 ; extra == 'docs'
+Requires-Dist: sphinx-copybutton >=0.5.2 ; extra == 'docs'
+Requires-Dist: sphinx-gallery >=0.15.0 ; extra == 'docs'
+Requires-Dist: numpydoc >=1.2.0 ; extra == 'docs'
+Requires-Dist: Pillow >=7.1.2 ; extra == 'docs'
+Requires-Dist: pooch >=1.6.0 ; extra == 'docs'
+Requires-Dist: sphinx-prompt >=1.3.0 ; extra == 'docs'
+Requires-Dist: sphinxext-opengraph >=0.4.2 ; extra == 'docs'
+Requires-Dist: plotly >=5.14.0 ; extra == 'docs'
+Provides-Extra: examples
+Requires-Dist: matplotlib >=3.3.4 ; extra == 'examples'
+Requires-Dist: scikit-image >=0.17.2 ; extra == 'examples'
+Requires-Dist: pandas >=1.1.5 ; extra == 'examples'
+Requires-Dist: seaborn >=0.9.0 ; extra == 'examples'
+Requires-Dist: pooch >=1.6.0 ; extra == 'examples'
+Requires-Dist: plotly >=5.14.0 ; extra == 'examples'
+Provides-Extra: tests
+Requires-Dist: matplotlib >=3.3.4 ; extra == 'tests'
+Requires-Dist: scikit-image >=0.17.2 ; extra == 'tests'
+Requires-Dist: pandas >=1.1.5 ; extra == 'tests'
+Requires-Dist: pytest >=7.1.2 ; extra == 'tests'
+Requires-Dist: pytest-cov >=2.9.0 ; extra == 'tests'
+Requires-Dist: ruff >=0.0.272 ; extra == 'tests'
+Requires-Dist: black >=23.3.0 ; extra == 'tests'
+Requires-Dist: mypy >=1.3 ; extra == 'tests'
+Requires-Dist: pyamg >=4.0.0 ; extra == 'tests'
+Requires-Dist: polars >=0.19.12 ; extra == 'tests'
+Requires-Dist: pyarrow >=12.0.0 ; extra == 'tests'
+Requires-Dist: numpydoc >=1.2.0 ; extra == 'tests'
+Requires-Dist: pooch >=1.6.0 ; extra == 'tests'
+
+.. -*- mode: rst -*-
+
+|Azure|_ |CirrusCI|_ |Codecov|_ |CircleCI|_ |Nightly wheels|_ |Black|_ |PythonVersion|_ |PyPi|_ |DOI|_ |Benchmark|_
+
+.. |Azure| image:: https://dev.azure.com/scikit-learn/scikit-learn/_apis/build/status/scikit-learn.scikit-learn?branchName=main
+.. _Azure: https://dev.azure.com/scikit-learn/scikit-learn/_build/latest?definitionId=1&branchName=main
+
+.. |CircleCI| image:: https://circleci.com/gh/scikit-learn/scikit-learn/tree/main.svg?style=shield
+.. _CircleCI: https://circleci.com/gh/scikit-learn/scikit-learn
+
+.. |CirrusCI| image:: https://img.shields.io/cirrus/github/scikit-learn/scikit-learn/main?label=Cirrus%20CI
+.. _CirrusCI: https://cirrus-ci.com/github/scikit-learn/scikit-learn/main
+
+.. |Codecov| image:: https://codecov.io/gh/scikit-learn/scikit-learn/branch/main/graph/badge.svg?token=Pk8G9gg3y9
+.. _Codecov: https://codecov.io/gh/scikit-learn/scikit-learn
+
+.. |Nightly wheels| image:: https://github.com/scikit-learn/scikit-learn/workflows/Wheel%20builder/badge.svg?event=schedule
+.. _`Nightly wheels`: https://github.com/scikit-learn/scikit-learn/actions?query=workflow%3A%22Wheel+builder%22+event%3Aschedule
+
+.. |PythonVersion| image:: https://img.shields.io/pypi/pyversions/scikit-learn.svg
+.. _PythonVersion: https://pypi.org/project/scikit-learn/
+
+.. |PyPi| image:: https://img.shields.io/pypi/v/scikit-learn
+.. _PyPi: https://pypi.org/project/scikit-learn
+
+.. |Black| image:: https://img.shields.io/badge/code%20style-black-000000.svg
+.. _Black: https://github.com/psf/black
+
+.. |DOI| image:: https://zenodo.org/badge/21369/scikit-learn/scikit-learn.svg
+.. _DOI: https://zenodo.org/badge/latestdoi/21369/scikit-learn/scikit-learn
+
+.. |Benchmark| image:: https://img.shields.io/badge/Benchmarked%20by-asv-blue
+.. _`Benchmark`: https://scikit-learn.org/scikit-learn-benchmarks/
+
+.. |PythonMinVersion| replace:: 3.9
+.. |NumPyMinVersion| replace:: 1.19.5
+.. |SciPyMinVersion| replace:: 1.6.0
+.. |JoblibMinVersion| replace:: 1.2.0
+.. |ThreadpoolctlMinVersion| replace:: 2.0.0
+.. |MatplotlibMinVersion| replace:: 3.3.4
+.. |Scikit-ImageMinVersion| replace:: 0.17.2
+.. |PandasMinVersion| replace:: 1.1.5
+.. |SeabornMinVersion| replace:: 0.9.0
+.. |PytestMinVersion| replace:: 7.1.2
+.. |PlotlyMinVersion| replace:: 5.14.0
+
+.. image:: https://raw.githubusercontent.com/scikit-learn/scikit-learn/main/doc/logos/scikit-learn-logo.png
+ :target: https://scikit-learn.org/
+
+**scikit-learn** is a Python module for machine learning built on top of
+SciPy and is distributed under the 3-Clause BSD license.
+
+The project was started in 2007 by David Cournapeau as a Google Summer
+of Code project, and since then many volunteers have contributed. See
+the `About us `__ page
+for a list of core contributors.
+
+It is currently maintained by a team of volunteers.
+
+Website: https://scikit-learn.org
+
+Installation
+------------
+
+Dependencies
+~~~~~~~~~~~~
+
+scikit-learn requires:
+
+- Python (>= |PythonMinVersion|)
+- NumPy (>= |NumPyMinVersion|)
+- SciPy (>= |SciPyMinVersion|)
+- joblib (>= |JoblibMinVersion|)
+- threadpoolctl (>= |ThreadpoolctlMinVersion|)
+
+=======
+
+**Scikit-learn 0.20 was the last version to support Python 2.7 and Python 3.4.**
+scikit-learn 1.0 and later require Python 3.7 or newer.
+scikit-learn 1.1 and later require Python 3.8 or newer.
+
+Scikit-learn plotting capabilities (i.e., functions start with ``plot_`` and
+classes end with ``Display``) require Matplotlib (>= |MatplotlibMinVersion|).
+For running the examples Matplotlib >= |MatplotlibMinVersion| is required.
+A few examples require scikit-image >= |Scikit-ImageMinVersion|, a few examples
+require pandas >= |PandasMinVersion|, some examples require seaborn >=
+|SeabornMinVersion| and plotly >= |PlotlyMinVersion|.
+
+User installation
+~~~~~~~~~~~~~~~~~
+
+If you already have a working installation of NumPy and SciPy,
+the easiest way to install scikit-learn is using ``pip``::
+
+ pip install -U scikit-learn
+
+or ``conda``::
+
+ conda install -c conda-forge scikit-learn
+
+The documentation includes more detailed `installation instructions `_.
+
+
+Changelog
+---------
+
+See the `changelog `__
+for a history of notable changes to scikit-learn.
+
+Development
+-----------
+
+We welcome new contributors of all experience levels. The scikit-learn
+community goals are to be helpful, welcoming, and effective. The
+`Development Guide `_
+has detailed information about contributing code, documentation, tests, and
+more. We've included some basic information in this README.
+
+Important links
+~~~~~~~~~~~~~~~
+
+- Official source code repo: https://github.com/scikit-learn/scikit-learn
+- Download releases: https://pypi.org/project/scikit-learn/
+- Issue tracker: https://github.com/scikit-learn/scikit-learn/issues
+
+Source code
+~~~~~~~~~~~
+
+You can check the latest sources with the command::
+
+ git clone https://github.com/scikit-learn/scikit-learn.git
+
+Contributing
+~~~~~~~~~~~~
+
+To learn more about making a contribution to scikit-learn, please see our
+`Contributing guide
+`_.
+
+Testing
+~~~~~~~
+
+After installation, you can launch the test suite from outside the source
+directory (you will need to have ``pytest`` >= |PyTestMinVersion| installed)::
+
+ pytest sklearn
+
+See the web page https://scikit-learn.org/dev/developers/contributing.html#testing-and-improving-test-coverage
+for more information.
+
+ Random number generation can be controlled during testing by setting
+ the ``SKLEARN_SEED`` environment variable.
+
+Submitting a Pull Request
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Before opening a Pull Request, have a look at the
+full Contributing page to make sure your code complies
+with our guidelines: https://scikit-learn.org/stable/developers/index.html
+
+Project History
+---------------
+
+The project was started in 2007 by David Cournapeau as a Google Summer
+of Code project, and since then many volunteers have contributed. See
+the `About us `__ page
+for a list of core contributors.
+
+The project is currently maintained by a team of volunteers.
+
+**Note**: `scikit-learn` was previously referred to as `scikits.learn`.
+
+Help and Support
+----------------
+
+Documentation
+~~~~~~~~~~~~~
+
+- HTML documentation (stable release): https://scikit-learn.org
+- HTML documentation (development version): https://scikit-learn.org/dev/
+- FAQ: https://scikit-learn.org/stable/faq.html
+
+Communication
+~~~~~~~~~~~~~
+
+- Mailing list: https://mail.python.org/mailman/listinfo/scikit-learn
+- Gitter: https://gitter.im/scikit-learn/scikit-learn
+- Logos & Branding: https://github.com/scikit-learn/scikit-learn/tree/main/doc/logos
+- Blog: https://blog.scikit-learn.org
+- Calendar: https://blog.scikit-learn.org/calendar/
+- Twitter: https://twitter.com/scikit_learn
+- Stack Overflow: https://stackoverflow.com/questions/tagged/scikit-learn
+- Github Discussions: https://github.com/scikit-learn/scikit-learn/discussions
+- Website: https://scikit-learn.org
+- LinkedIn: https://www.linkedin.com/company/scikit-learn
+- YouTube: https://www.youtube.com/channel/UCJosFjYm0ZYVUARxuOZqnnw/playlists
+- Facebook: https://www.facebook.com/scikitlearnofficial/
+- Instagram: https://www.instagram.com/scikitlearnofficial/
+- TikTok: https://www.tiktok.com/@scikit.learn
+
+Citation
+~~~~~~~~
+
+If you use scikit-learn in a scientific publication, we would appreciate citations: https://scikit-learn.org/stable/about.html#citing-scikit-learn
diff --git a/llmeval-env/lib/python3.10/site-packages/scikit_learn-1.4.2.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/scikit_learn-1.4.2.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..bf26f7e9a8dd1c2a84ad78d749adeff86a58e0c4
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scikit_learn-1.4.2.dist-info/RECORD
@@ -0,0 +1,1320 @@
+scikit_learn-1.4.2.dist-info/COPYING,sha256=2B5zOc_vVX3r9tghTkoxrNAbMUTSs55CUTw8flwp5cI,1532
+scikit_learn-1.4.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+scikit_learn-1.4.2.dist-info/METADATA,sha256=LSlaPFxWsJ917qsSh9jKvdQuQFOxhWMKq6OOwxyXYkU,11206
+scikit_learn-1.4.2.dist-info/RECORD,,
+scikit_learn-1.4.2.dist-info/WHEEL,sha256=CzQQWV-lNyM92gr3iaBk8dvO35YDHRxgzkZ-dxumUIM,152
+scikit_learn-1.4.2.dist-info/top_level.txt,sha256=RED9Cd42eES2ITQsRYJc34r65tejDc9eVxnPLzvX9Qg,8
+scikit_learn.libs/libgomp-a34b3233.so.1.0.0,sha256=On6uznIxkRvi-7Gz58tMtcLg-E4MK7c3OUcrWh_uyME,168193
+sklearn/__check_build/__init__.py,sha256=JruJx_tWLpC-K3O0b8tNRXthdPV86qfy9SpbOvscsgM,1680
+sklearn/__check_build/__pycache__/__init__.cpython-310.pyc,,
+sklearn/__check_build/_check_build.cpython-310-x86_64-linux-gnu.so,sha256=cdFJiee-y59Nj5inW-9HZZ1kjghw6aSGkFErNZDOTco,51281
+sklearn/__init__.py,sha256=2TACobDuZbVMiTtXNcfnyrofI2TY0hgS8QJYZklaBT0,5015
+sklearn/__pycache__/__init__.cpython-310.pyc,,
+sklearn/__pycache__/_config.cpython-310.pyc,,
+sklearn/__pycache__/_distributor_init.cpython-310.pyc,,
+sklearn/__pycache__/_min_dependencies.cpython-310.pyc,,
+sklearn/__pycache__/base.cpython-310.pyc,,
+sklearn/__pycache__/calibration.cpython-310.pyc,,
+sklearn/__pycache__/conftest.cpython-310.pyc,,
+sklearn/__pycache__/discriminant_analysis.cpython-310.pyc,,
+sklearn/__pycache__/dummy.cpython-310.pyc,,
+sklearn/__pycache__/exceptions.cpython-310.pyc,,
+sklearn/__pycache__/isotonic.cpython-310.pyc,,
+sklearn/__pycache__/kernel_approximation.cpython-310.pyc,,
+sklearn/__pycache__/kernel_ridge.cpython-310.pyc,,
+sklearn/__pycache__/multiclass.cpython-310.pyc,,
+sklearn/__pycache__/multioutput.cpython-310.pyc,,
+sklearn/__pycache__/naive_bayes.cpython-310.pyc,,
+sklearn/__pycache__/pipeline.cpython-310.pyc,,
+sklearn/__pycache__/random_projection.cpython-310.pyc,,
+sklearn/_build_utils/__init__.py,sha256=9Ik8pfXFJRnME--_KzzrtIphncGUuYtXWQ3NFebLGs4,3610
+sklearn/_build_utils/__pycache__/__init__.cpython-310.pyc,,
+sklearn/_build_utils/__pycache__/openmp_helpers.cpython-310.pyc,,
+sklearn/_build_utils/__pycache__/pre_build_helpers.cpython-310.pyc,,
+sklearn/_build_utils/__pycache__/tempita.cpython-310.pyc,,
+sklearn/_build_utils/__pycache__/version.cpython-310.pyc,,
+sklearn/_build_utils/openmp_helpers.py,sha256=qQn0P1DaPeAVRJ9ex9S6R3bX9rQKPs3bilwbaVHFzzc,4531
+sklearn/_build_utils/pre_build_helpers.py,sha256=wD5ICjm_mnx5vu671Gji6TTQwnxnRtzwStbP4b8tHww,2175
+sklearn/_build_utils/tempita.py,sha256=tbdUwI2z8sZsILx8HW14wvvnZ3nPJu3WSnbrSKNHv44,1580
+sklearn/_build_utils/version.py,sha256=XBgTAN5lyGcAdjCdO7m2Q7CDb5uqdmYzn-5te2TjRwc,369
+sklearn/_config.py,sha256=FNcObaJ5G2Wt8I7RoVvg16XETLNfRAYq-32nc7LRm8c,13493
+sklearn/_distributor_init.py,sha256=WNbFpommZbSnO0E2dEGphWbiyDPYluRs6Zm3M6qVl3g,345
+sklearn/_isotonic.cpython-310-x86_64-linux-gnu.so,sha256=kctj3XZuNiWdwmWEXBPdOeLyE4_5r8yCyx0pY_bsxAg,307073
+sklearn/_loss/__init__.py,sha256=h6rcvQ8upIDgpKkknp4tiCpB4Tc_F64SF0mqwIqFOOo,607
+sklearn/_loss/__pycache__/__init__.cpython-310.pyc,,
+sklearn/_loss/__pycache__/link.cpython-310.pyc,,
+sklearn/_loss/__pycache__/loss.cpython-310.pyc,,
+sklearn/_loss/_loss.cpython-310-x86_64-linux-gnu.so,sha256=e4IlZL0WW-i9jhOvGbYSOV-MrnDflK3iE64cYwYweOA,3043289
+sklearn/_loss/_loss.pxd,sha256=Bfj-t0M0JWMNEJPSztH7TC4VTgzAiH82VcjM16IzzrY,4315
+sklearn/_loss/link.py,sha256=lTNOfhB8uJKu_gksS0Xs3L41z_-OS4fhJjiGGacElnE,8101
+sklearn/_loss/loss.py,sha256=7loXQtpcKLBssIYsskSf9MvFk6_BrPMM8aPhaNOW5pg,41236
+sklearn/_loss/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/_loss/tests/__pycache__/__init__.cpython-310.pyc,,
+sklearn/_loss/tests/__pycache__/test_link.cpython-310.pyc,,
+sklearn/_loss/tests/__pycache__/test_loss.cpython-310.pyc,,
+sklearn/_loss/tests/test_link.py,sha256=XMHMLjmPA1RHLrAhexoMzVQRlokcCT9LMhajVVnneS0,3954
+sklearn/_loss/tests/test_loss.py,sha256=8XZkS-kHAxk_NLb8JtMQAbVe7umsrEpUAbFc9fXQAL8,48281
+sklearn/_min_dependencies.py,sha256=qia4qBZhJAkE0kswB7R1SZz3RthvyZCOcjZd360r1yc,2481
+sklearn/base.py,sha256=h_wcRlgLC8J94i0UP4OAt0emcPFWdT5apsyxzO3ZWyI,53077
+sklearn/calibration.py,sha256=sRaaATaWWO4ulouLs8GAu2TMoEN543MpjWcRoLd9FfI,49547
+sklearn/cluster/__init__.py,sha256=gmJNjlPlWd3BDjFysHYNjOVikk6Sya7sr_RdIliyHhs,1440
+sklearn/cluster/__pycache__/__init__.cpython-310.pyc,,
+sklearn/cluster/__pycache__/_affinity_propagation.cpython-310.pyc,,
+sklearn/cluster/__pycache__/_agglomerative.cpython-310.pyc,,
+sklearn/cluster/__pycache__/_bicluster.cpython-310.pyc,,
+sklearn/cluster/__pycache__/_birch.cpython-310.pyc,,
+sklearn/cluster/__pycache__/_bisect_k_means.cpython-310.pyc,,
+sklearn/cluster/__pycache__/_dbscan.cpython-310.pyc,,
+sklearn/cluster/__pycache__/_feature_agglomeration.cpython-310.pyc,,
+sklearn/cluster/__pycache__/_kmeans.cpython-310.pyc,,
+sklearn/cluster/__pycache__/_mean_shift.cpython-310.pyc,,
+sklearn/cluster/__pycache__/_optics.cpython-310.pyc,,
+sklearn/cluster/__pycache__/_spectral.cpython-310.pyc,,
+sklearn/cluster/_affinity_propagation.py,sha256=svVmof62bCXaxHNXiyYtdpC0vBGOBwZxDurQVHGP2Yg,20512
+sklearn/cluster/_agglomerative.py,sha256=qN-zIElSTWT8fV7z_FZD5wUFww5LxGyPAI8_9Yyt8ME,49039
+sklearn/cluster/_bicluster.py,sha256=4S3MjZWSfCnPCd0VQfB610PJxfp3-wpRtoT9yvpFjLI,22157
+sklearn/cluster/_birch.py,sha256=iXlHqWOouJNbMIHOi0-PsnNrxg5klhc1NID_dTV4KII,26249
+sklearn/cluster/_bisect_k_means.py,sha256=dn6AaJCv1XkNSZA4M8phB3T94chx10lk377cDn-f-A8,19040
+sklearn/cluster/_dbscan.py,sha256=1T2rDaQitn0iDDF0jef8W27XXw0Tvve6Qdy2BTDjDvw,18290
+sklearn/cluster/_dbscan_inner.cpython-310-x86_64-linux-gnu.so,sha256=LhSRAcFX-Gy_gjokUDO1n7AZzXb6g2nSLtDULjWcjc8,221449
+sklearn/cluster/_feature_agglomeration.py,sha256=9eyY-7HLVKBNVy9uYXHshCWRchkZLaRac1FU_JmPKxI,3347
+sklearn/cluster/_hdbscan/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/cluster/_hdbscan/__pycache__/__init__.cpython-310.pyc,,
+sklearn/cluster/_hdbscan/__pycache__/hdbscan.cpython-310.pyc,,
+sklearn/cluster/_hdbscan/_linkage.cpython-310-x86_64-linux-gnu.so,sha256=QPKua9nh1i0ZnQY2wZTr0EkPpUabBRa_D-yv5drum-I,257737
+sklearn/cluster/_hdbscan/_reachability.cpython-310-x86_64-linux-gnu.so,sha256=fohsD__saJsOWrcAGW5K22rWsWj5SJpQcrKX21JjRbY,364553
+sklearn/cluster/_hdbscan/_tree.cpython-310-x86_64-linux-gnu.so,sha256=786Y1WBySzFR-pjID3c3hjt-UEUKPS-rB9d7DDGbRps,385041
+sklearn/cluster/_hdbscan/_tree.pxd,sha256=Nm7ghFqifD2vLnyBoCQCn9eFsmoB8ITpEuCMItJZoM4,2150
+sklearn/cluster/_hdbscan/hdbscan.py,sha256=dC89rO5W8uMsTfnVTkZr7QDlkIFJnM_2BhozF5ompRk,41849
+sklearn/cluster/_hdbscan/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/cluster/_hdbscan/tests/__pycache__/__init__.cpython-310.pyc,,
+sklearn/cluster/_hdbscan/tests/__pycache__/test_reachibility.cpython-310.pyc,,
+sklearn/cluster/_hdbscan/tests/test_reachibility.py,sha256=XTEaYbj7i48dD3ImCC4jiVVPF1chsCTJvL78A6kgwVI,2064
+sklearn/cluster/_hierarchical_fast.cpython-310-x86_64-linux-gnu.so,sha256=5IWsSZgy4g_ZW2Tkij1rEHrZ6KRFAeJnaTzT0hdwagM,332041
+sklearn/cluster/_hierarchical_fast.pxd,sha256=JlWtArNtEgc2RBeCJRADftNTPwNV_M-OAsAJz7lHqzY,245
+sklearn/cluster/_k_means_common.cpython-310-x86_64-linux-gnu.so,sha256=zDNJO2tPldG--L7w6oPPCjpYecxrnJ9yafiwRx8-Pzg,528553
+sklearn/cluster/_k_means_common.pxd,sha256=6QW18TtC1wGpyTd0cdG9PxSYTiP4ZN3hj6ltJWrdaic,887
+sklearn/cluster/_k_means_elkan.cpython-310-x86_64-linux-gnu.so,sha256=lZS6V4CUPoS_k15xEUVeBynMzNcMAV8rKMdFsdrCFJ4,525713
+sklearn/cluster/_k_means_lloyd.cpython-310-x86_64-linux-gnu.so,sha256=EAMizxqj6XUAtJljO10E1r5UYs9kF9ssOjHjKHd19rE,381249
+sklearn/cluster/_k_means_minibatch.cpython-310-x86_64-linux-gnu.so,sha256=2uA5WGiNe_-73bwbxPntDtvFqTwIQi0M__ja3EklQ6Y,323521
+sklearn/cluster/_kmeans.py,sha256=tQoOJiD6SkTge6863fMpP_pJN5qET5tqqj75UDbG0Io,82683
+sklearn/cluster/_mean_shift.py,sha256=HVOYLcRVbJagF0olj3atEx_7dl-93AxYrpLfzQtcR-Q,20156
+sklearn/cluster/_optics.py,sha256=WGairyeJcQgkc90bLiIEzV4jpTnTz0XGNsEI3Nmb_wQ,44710
+sklearn/cluster/_spectral.py,sha256=Ll9UD3VCzj9fjpzXjhfkmiCFh39PS6w1Nm4_xaK5Wvo,30496
+sklearn/cluster/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/cluster/tests/__pycache__/__init__.cpython-310.pyc,,
+sklearn/cluster/tests/__pycache__/common.cpython-310.pyc,,
+sklearn/cluster/tests/__pycache__/test_affinity_propagation.cpython-310.pyc,,
+sklearn/cluster/tests/__pycache__/test_bicluster.cpython-310.pyc,,
+sklearn/cluster/tests/__pycache__/test_birch.cpython-310.pyc,,
+sklearn/cluster/tests/__pycache__/test_bisect_k_means.cpython-310.pyc,,
+sklearn/cluster/tests/__pycache__/test_dbscan.cpython-310.pyc,,
+sklearn/cluster/tests/__pycache__/test_feature_agglomeration.cpython-310.pyc,,
+sklearn/cluster/tests/__pycache__/test_hdbscan.cpython-310.pyc,,
+sklearn/cluster/tests/__pycache__/test_hierarchical.cpython-310.pyc,,
+sklearn/cluster/tests/__pycache__/test_k_means.cpython-310.pyc,,
+sklearn/cluster/tests/__pycache__/test_mean_shift.cpython-310.pyc,,
+sklearn/cluster/tests/__pycache__/test_optics.cpython-310.pyc,,
+sklearn/cluster/tests/__pycache__/test_spectral.cpython-310.pyc,,
+sklearn/cluster/tests/common.py,sha256=1jmt9fXRXYt9TYCwJFcgDGV10slNNjJW7_2tRCSzJBY,880
+sklearn/cluster/tests/test_affinity_propagation.py,sha256=p-q92owXh0cG1oPD3d5VZOfQoZMwEeDfRlTAS25NTa0,11898
+sklearn/cluster/tests/test_bicluster.py,sha256=JJjahw-5rSvyNcKpz0ZtM1jl07jvLAB5D9zdzcqMXU4,9126
+sklearn/cluster/tests/test_birch.py,sha256=1vCTQlIByLia7dwJ8Lequ6OhDuuUjnIbayPgoxk0lD0,8606
+sklearn/cluster/tests/test_bisect_k_means.py,sha256=1hf2vfXJ_0aIncY-bZMgx5TXTzGI49YCfVxChYrsLno,5139
+sklearn/cluster/tests/test_dbscan.py,sha256=JSM4FNsCxmH0VyOFSqkdgyB85JdXUMQtd-8lwS7yXSQ,15704
+sklearn/cluster/tests/test_feature_agglomeration.py,sha256=2QkZ3T7Mvfg1XwGh_3-NoNnsnHpIgCxSg5yz4z3KVeo,2758
+sklearn/cluster/tests/test_hdbscan.py,sha256=0GqV7x2I03IjsH5IcDLK8IiwHnZzmYi_BOIG87FNpxY,19392
+sklearn/cluster/tests/test_hierarchical.py,sha256=UJCK18h_TwJ84wn02hOtT3Au8KjLFowRxjkxJ-gVgtg,32593
+sklearn/cluster/tests/test_k_means.py,sha256=CLs8bYbbOY32A0Xpy3kwejgXwcNkHO9P1iJ5nUiIYYA,48900
+sklearn/cluster/tests/test_mean_shift.py,sha256=5lHWOiId4Amtf2QKOYAhRvKREqOTRoQmYo-JLAYUBCc,6740
+sklearn/cluster/tests/test_optics.py,sha256=cEfXbSTjRIQH-sSyermCf_eS7isa40mfAdIqYSsTMY0,23214
+sklearn/cluster/tests/test_spectral.py,sha256=2Iz8eEYiS1d390L3KFpHffjfUXhq1FsGvysIIKXZt64,11903
+sklearn/compose/__init__.py,sha256=3oylapF_cdAjGu_O0dG5y_lVgysP_82YCgMw-dGvEwU,497
+sklearn/compose/__pycache__/__init__.cpython-310.pyc,,
+sklearn/compose/__pycache__/_column_transformer.cpython-310.pyc,,
+sklearn/compose/__pycache__/_target.cpython-310.pyc,,
+sklearn/compose/_column_transformer.py,sha256=WwDIU3wRqL_3JGmaSM7NMr5Y8UXR-cxLqfbhst_urQM,57863
+sklearn/compose/_target.py,sha256=wMqlqp9siDJnPXLt6PZVw0vy23T2v5s4eP-Dl9l_sdU,11914
+sklearn/compose/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/compose/tests/__pycache__/__init__.cpython-310.pyc,,
+sklearn/compose/tests/__pycache__/test_column_transformer.cpython-310.pyc,,
+sklearn/compose/tests/__pycache__/test_target.cpython-310.pyc,,
+sklearn/compose/tests/test_column_transformer.py,sha256=4ImMmeQ-opmKw3h3ityPoXNuM726gUUYwvLjLhEpWSc,88405
+sklearn/compose/tests/test_target.py,sha256=QitfNBvImH78zJ6PZq2djXqN3TRIXUoEjUrk6zrMzM0,13153
+sklearn/conftest.py,sha256=mvWtOrXq6ckIvxYL-WJEeKA22Bu4t87aKaV7KeMw4-U,10497
+sklearn/covariance/__init__.py,sha256=QIZ6_Kv3C0mYMzIiUVrLV78CABMB5XgDjviFL3vLuvs,1116
+sklearn/covariance/__pycache__/__init__.cpython-310.pyc,,
+sklearn/covariance/__pycache__/_elliptic_envelope.cpython-310.pyc,,
+sklearn/covariance/__pycache__/_empirical_covariance.cpython-310.pyc,,
+sklearn/covariance/__pycache__/_graph_lasso.cpython-310.pyc,,
+sklearn/covariance/__pycache__/_robust_covariance.cpython-310.pyc,,
+sklearn/covariance/__pycache__/_shrunk_covariance.cpython-310.pyc,,
+sklearn/covariance/_elliptic_envelope.py,sha256=ceE6aGsrBFdEWkASDAWcJz0mk8Qv2meEu3hRBN5mAlw,9073
+sklearn/covariance/_empirical_covariance.py,sha256=2WJLKg6Rtf2sX4APZIU1AF0Ak2AiOCTP4rbWycf8lY8,12066
+sklearn/covariance/_graph_lasso.py,sha256=ZUBmJZwBFV8v1SeYJ--NRTPZMDH575IAaWMc0BIgDfE,39099
+sklearn/covariance/_robust_covariance.py,sha256=YxvsfJNRwjOJvbnU-9DdVBxRUcDQyZ2PD30pC6reN1Y,33901
+sklearn/covariance/_shrunk_covariance.py,sha256=Wh-9P54AOAYuEY1HHxBhc-Y0Kxlo7q8ryL3i3VwCmcg,27837
+sklearn/covariance/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/covariance/tests/__pycache__/__init__.cpython-310.pyc,,
+sklearn/covariance/tests/__pycache__/test_covariance.cpython-310.pyc,,
+sklearn/covariance/tests/__pycache__/test_elliptic_envelope.cpython-310.pyc,,
+sklearn/covariance/tests/__pycache__/test_graphical_lasso.cpython-310.pyc,,
+sklearn/covariance/tests/__pycache__/test_robust_covariance.cpython-310.pyc,,
+sklearn/covariance/tests/test_covariance.py,sha256=68giftSkZ5lXHCVKZ6TYXdp0ukYSznhIbliJT83qOTE,14154
+sklearn/covariance/tests/test_elliptic_envelope.py,sha256=xCxtRYDNADB22KJURLGRqI2OoWh4LLfazpjgsIWOzH4,1587
+sklearn/covariance/tests/test_graphical_lasso.py,sha256=XOSg2PN_MACNcsnoh44juz7jozxrJlYr1-WqfC21tO0,10237
+sklearn/covariance/tests/test_robust_covariance.py,sha256=dM2CUrR9oPcVLhDjTHpvSHHU2GE2EBgtsFsiPNEPEOE,6384
+sklearn/cross_decomposition/__init__.py,sha256=Ga98Z9vAIoQO6RD5C2HMdRdQd-LcbcuyF0HmkfWYmFY,121
+sklearn/cross_decomposition/__pycache__/__init__.cpython-310.pyc,,
+sklearn/cross_decomposition/__pycache__/_pls.cpython-310.pyc,,
+sklearn/cross_decomposition/_pls.py,sha256=W19rbWEYhqrG5fH2LTd0-iaD6hHAIOnJf5RgdWzzrys,36773
+sklearn/cross_decomposition/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/cross_decomposition/tests/__pycache__/__init__.cpython-310.pyc,,
+sklearn/cross_decomposition/tests/__pycache__/test_pls.cpython-310.pyc,,
+sklearn/cross_decomposition/tests/test_pls.py,sha256=gfAaqMbUIRfz_XKIFktcddiRoO8Li3y4_zcKYbIuEcI,22294
+sklearn/datasets/__init__.py,sha256=Pbx-Q71GbemvICwD8-ISxH3iKb7nyrB2SYsyIJdBZVk,5170
+sklearn/datasets/__pycache__/__init__.cpython-310.pyc,,
+sklearn/datasets/__pycache__/_arff_parser.cpython-310.pyc,,
+sklearn/datasets/__pycache__/_base.cpython-310.pyc,,
+sklearn/datasets/__pycache__/_california_housing.cpython-310.pyc,,
+sklearn/datasets/__pycache__/_covtype.cpython-310.pyc,,
+sklearn/datasets/__pycache__/_kddcup99.cpython-310.pyc,,
+sklearn/datasets/__pycache__/_lfw.cpython-310.pyc,,
+sklearn/datasets/__pycache__/_olivetti_faces.cpython-310.pyc,,
+sklearn/datasets/__pycache__/_openml.cpython-310.pyc,,
+sklearn/datasets/__pycache__/_rcv1.cpython-310.pyc,,
+sklearn/datasets/__pycache__/_samples_generator.cpython-310.pyc,,
+sklearn/datasets/__pycache__/_species_distributions.cpython-310.pyc,,
+sklearn/datasets/__pycache__/_svmlight_format_io.cpython-310.pyc,,
+sklearn/datasets/__pycache__/_twenty_newsgroups.cpython-310.pyc,,
+sklearn/datasets/_arff_parser.py,sha256=-4QnUA8iBRy3ke3rJAnBTSJcGvP-ESyzdzjWNnTe1yE,19046
+sklearn/datasets/_base.py,sha256=BJ1O3bH96OsLMrHMk-Gmdeo1xGO7dGCEbLEjIeeJk5w,46825
+sklearn/datasets/_california_housing.py,sha256=MgHNMr3nKsTBwzNJAnOuEIRRiJufO8umPIDuCBSLw4Y,6707
+sklearn/datasets/_covtype.py,sha256=W9gRIVPQtRIWlrkldUDeksIjXTDYx6c0e_OmEBI8MVc,7603
+sklearn/datasets/_kddcup99.py,sha256=R9CH2XoZLq9w9o6ENKuXfgb1BBxf-Nu65mGm_WFz7Aw,13168
+sklearn/datasets/_lfw.py,sha256=xexQxyAGh3DlPqoiy_rZhA4lVJXFD-8FodfQX2Zjzis,20477
+sklearn/datasets/_olivetti_faces.py,sha256=y7-SibQqxGfuSxD_Pp2ATjpSsq1rdieAGdYiyjQ-cG4,5322
+sklearn/datasets/_openml.py,sha256=XQ1CF6rqHqoI41LB9QL_iMxy10feNCkkmlG_mPW5kHA,41405
+sklearn/datasets/_rcv1.py,sha256=2dxAgHqOYIQpc4S8wHyyS_VTEa7ecYKrEDFB0MLkz58,11086
+sklearn/datasets/_samples_generator.py,sha256=DKy7kYCYpSdnZnJqScF0N6z7789g4VZOrIoMIHc4wdU,74553
+sklearn/datasets/_species_distributions.py,sha256=1wR3_fJrWCgIP-mYXp0GPRQicIBv7QYH9GfBFnMvyQ8,9189
+sklearn/datasets/_svmlight_format_fast.cpython-310-x86_64-linux-gnu.so,sha256=5dAqQdCGu4VaqcaW8tp6fAg-6RqJ1QHT9gZkmsZ7Gk0,590249
+sklearn/datasets/_svmlight_format_io.py,sha256=xj_LWToq8qMPjnPXtHaf_GGa8vZEP-IdRg5T2bTPTcE,20735
+sklearn/datasets/_twenty_newsgroups.py,sha256=voYQ_guybY3A77HnGYkJIc2LeJatPRirx3QqWNCvzUY,18913
+sklearn/datasets/data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/datasets/data/__pycache__/__init__.cpython-310.pyc,,
+sklearn/datasets/data/boston_house_prices.csv,sha256=2YISY2AmVE_JucDzcuX4GYcub6b6dXqwJe_doiGA8tY,34742
+sklearn/datasets/data/breast_cancer.csv,sha256=_tPrctBXXvYZIpP1CTxugBsUdrV30Dhr9EVVBFIhcu0,119913
+sklearn/datasets/data/diabetes_data_raw.csv.gz,sha256=o-lMx86gD4qE-l9jRSA5E6aO-kLfGPh935vq1yG_1QM,7105
+sklearn/datasets/data/diabetes_target.csv.gz,sha256=jlP2XrgR30PCBvNTS7OvDl_tITvDfta6NjEBV9YCOAM,1050
+sklearn/datasets/data/digits.csv.gz,sha256=CfZubeve4s0rWuWeDWq7tz_CsOAYXS4ZV-nrtR4jqiI,57523
+sklearn/datasets/data/iris.csv,sha256=8T_6j91W_Y5sjRbUCBo_vTEUvNCq5CVsQyBRac2dFEk,2734
+sklearn/datasets/data/linnerud_exercise.csv,sha256=y42MJJN2Q_okWWgu-4bF5me81t2TEJ7vgZZNnp8Rv4w,212
+sklearn/datasets/data/linnerud_physiological.csv,sha256=K_fgXBzX0K3w7KHkVpQfYkvtCk_JZpTWDQ_3hT7F_Pc,219
+sklearn/datasets/data/wine_data.csv,sha256=EOioApCLNPhuXajOli88gGaUvJhFChj2GFGvWfMkvt4,11157
+sklearn/datasets/descr/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/datasets/descr/__pycache__/__init__.cpython-310.pyc,,
+sklearn/datasets/descr/breast_cancer.rst,sha256=gQy8SJmgJRvjH4zMTmGFkeKK0jyrFyJ-08By1PFzJ7Q,4811
+sklearn/datasets/descr/california_housing.rst,sha256=HBKYZzFjy739ZXX7vWoaRR0yfbw9xkJh6T9u7hQ0znA,1727
+sklearn/datasets/descr/covtype.rst,sha256=C6DmczitjtnrO-XhCIi8WqNT0uPgYnPWNYtKwJTwcn4,1191
+sklearn/datasets/descr/diabetes.rst,sha256=B9z8E5V6gkhb385Ers_7py55d1lZZtEYuB8WLLgn44E,1455
+sklearn/datasets/descr/digits.rst,sha256=iSCr2_fkFa_kMqJQpXkTAImrVDRXBArpZyEkebwVGag,2024
+sklearn/datasets/descr/iris.rst,sha256=t_96OerSrGIHOcfcya-69T0f36nWy6f7TUw9Ex_GwP4,2665
+sklearn/datasets/descr/kddcup99.rst,sha256=lcQA9DcRf3q0Okx8CLUG9avp86rxu6XhsEvrI99FoOQ,3950
+sklearn/datasets/descr/lfw.rst,sha256=soc3irgBLfBmHhRV5reiNq_fdtYrt70p_8LvKTsKQpw,4305
+sklearn/datasets/descr/linnerud.rst,sha256=mwQCX1cbqAqI9YgyXTv_SpMFJaBRgLjTYM-J2Q_M_4U,735
+sklearn/datasets/descr/olivetti_faces.rst,sha256=i8Y7-g4fOPdLvupgJ8i_ze1pA0hGpfDgAoPCGvCPFxI,1834
+sklearn/datasets/descr/rcv1.rst,sha256=3YqlbAwTbg72GXQjlNDtVXJdUPYoRcnZ-bG5vv8o-GM,2466
+sklearn/datasets/descr/species_distributions.rst,sha256=tu6kc_gE8kpjmg60LCjvUMIFDUEy0JadbTrpkvug_Ew,1547
+sklearn/datasets/descr/twenty_newsgroups.rst,sha256=1ADsYCB1zlNfXGYe6dzmeItGNp7kUec68s32dMUjjUQ,10823
+sklearn/datasets/descr/wine_data.rst,sha256=B9qRn3lNE-_1K68Mbv0lX-Y0bffqNEZ6dn_PjMHo-6I,3332
+sklearn/datasets/images/README.txt,sha256=P39i_fcnXC9qTHhglwo57LiFnc-1BiWgFGjRlg_MwG8,712
+sklearn/datasets/images/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/datasets/images/__pycache__/__init__.cpython-310.pyc,,
+sklearn/datasets/images/china.jpg,sha256=g3gCWtJRnWSdAuMr2YmQ20q1cjV9nwmEHC-_u0_vrSk,196653
+sklearn/datasets/images/flower.jpg,sha256=p39uxB41Ov34vf8uqYGylVU12NgylPjPpJz05CPdVjg,142987
+sklearn/datasets/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/datasets/tests/__pycache__/__init__.cpython-310.pyc,,
+sklearn/datasets/tests/__pycache__/test_20news.cpython-310.pyc,,
+sklearn/datasets/tests/__pycache__/test_arff_parser.cpython-310.pyc,,
+sklearn/datasets/tests/__pycache__/test_base.cpython-310.pyc,,
+sklearn/datasets/tests/__pycache__/test_california_housing.cpython-310.pyc,,
+sklearn/datasets/tests/__pycache__/test_common.cpython-310.pyc,,
+sklearn/datasets/tests/__pycache__/test_covtype.cpython-310.pyc,,
+sklearn/datasets/tests/__pycache__/test_kddcup99.cpython-310.pyc,,
+sklearn/datasets/tests/__pycache__/test_lfw.cpython-310.pyc,,
+sklearn/datasets/tests/__pycache__/test_olivetti_faces.cpython-310.pyc,,
+sklearn/datasets/tests/__pycache__/test_openml.cpython-310.pyc,,
+sklearn/datasets/tests/__pycache__/test_rcv1.cpython-310.pyc,,
+sklearn/datasets/tests/__pycache__/test_samples_generator.cpython-310.pyc,,
+sklearn/datasets/tests/__pycache__/test_svmlight_format.cpython-310.pyc,,
+sklearn/datasets/tests/data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/datasets/tests/data/__pycache__/__init__.cpython-310.pyc,,
+sklearn/datasets/tests/data/openml/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/datasets/tests/data/openml/__pycache__/__init__.cpython-310.pyc,,
+sklearn/datasets/tests/data/openml/id_1/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/datasets/tests/data/openml/id_1/__pycache__/__init__.cpython-310.pyc,,
+sklearn/datasets/tests/data/openml/id_1/api-v1-jd-1.json.gz,sha256=hi4IUgokM6SVo7066f2ebHxUCpxjLbKbuCUnhMva13k,1786
+sklearn/datasets/tests/data/openml/id_1/api-v1-jdf-1.json.gz,sha256=qWba1Yz1-8kUo3StVVbAQU9e2WIjftVaN5_pbjCNAN4,889
+sklearn/datasets/tests/data/openml/id_1/api-v1-jdq-1.json.gz,sha256=hKhybSw_i7ynnVTYsZEVh0SxmTFG-PCDsRGo6nhTYFc,145
+sklearn/datasets/tests/data/openml/id_1/data-v1-dl-1.arff.gz,sha256=z-iUW5SXcLDaQtr1jOZ9HF_uJc97T9FFFhg3wqvAlCk,1841
+sklearn/datasets/tests/data/openml/id_1119/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/datasets/tests/data/openml/id_1119/__pycache__/__init__.cpython-310.pyc,,
+sklearn/datasets/tests/data/openml/id_1119/api-v1-jd-1119.json.gz,sha256=xB5fuz5ZzU3oge18j4j5sDp1DVN7pjWByv3mqv13rcE,711
+sklearn/datasets/tests/data/openml/id_1119/api-v1-jdf-1119.json.gz,sha256=gviZ7cWctB_dZxslaiKOXgbfxeJMknEudQBbJRsACGU,1108
+sklearn/datasets/tests/data/openml/id_1119/api-v1-jdl-dn-adult-census-l-2-dv-1.json.gz,sha256=Sl3DbKl1gxOXiyqdecznY8b4TV2V8VrFV7PXSC8i7iE,364
+sklearn/datasets/tests/data/openml/id_1119/api-v1-jdl-dn-adult-census-l-2-s-act-.json.gz,sha256=bsCVV4iRT6gfaY6XpNGv93PXoSXtbnacYnGgtI_EAR0,363
+sklearn/datasets/tests/data/openml/id_1119/api-v1-jdq-1119.json.gz,sha256=73y8tYwu3P6kXAWLdR-vd4PnEEYqkk6arK2NR6fp-Us,1549
+sklearn/datasets/tests/data/openml/id_1119/data-v1-dl-54002.arff.gz,sha256=aTGvJWGV_N0uR92LD57fFvvwOxmOd7cOPf2Yd83wlRU,1190
+sklearn/datasets/tests/data/openml/id_1590/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/datasets/tests/data/openml/id_1590/__pycache__/__init__.cpython-310.pyc,,
+sklearn/datasets/tests/data/openml/id_1590/api-v1-jd-1590.json.gz,sha256=mxBa3-3GtrgvRpXKm_4jI5MDTN95gDUj85em3Fv4JNE,1544
+sklearn/datasets/tests/data/openml/id_1590/api-v1-jdf-1590.json.gz,sha256=BG9eYFZGk_DzuOOCclyAEsPgWGRxOcJGhc7JhOQPzQA,1032
+sklearn/datasets/tests/data/openml/id_1590/api-v1-jdq-1590.json.gz,sha256=RLmw0pCh4zlpWkMUOPhAgAccVjUWHDl33Rf0wnsAo0o,1507
+sklearn/datasets/tests/data/openml/id_1590/data-v1-dl-1595261.arff.gz,sha256=7h3N9Y8vEHL33RtDOIlpxRvGz-d24-lGWuanVuXdsQo,1152
+sklearn/datasets/tests/data/openml/id_2/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/datasets/tests/data/openml/id_2/__pycache__/__init__.cpython-310.pyc,,
+sklearn/datasets/tests/data/openml/id_2/api-v1-jd-2.json.gz,sha256=pnLUNbl6YDPf0dKlyCPSN60YZRAb1eQDzZm1vguk4Ds,1363
+sklearn/datasets/tests/data/openml/id_2/api-v1-jdf-2.json.gz,sha256=wbg4en0IAUocCYB65FjKdmarijxXnL-xieCcbX3okqY,866
+sklearn/datasets/tests/data/openml/id_2/api-v1-jdl-dn-anneal-l-2-dv-1.json.gz,sha256=6QCxkHlSJP9I5GocArEAINTJhroUKIDALIbwtHLe08k,309
+sklearn/datasets/tests/data/openml/id_2/api-v1-jdl-dn-anneal-l-2-s-act-.json.gz,sha256=_2Ily5gmDKTr7AFaGidU8qew2_tNDxfc9nJ1QhVOKhA,346
+sklearn/datasets/tests/data/openml/id_2/api-v1-jdq-2.json.gz,sha256=xG9sXyIdh33mBLkGQDsgy99nTxIlvNuz4VvRiCpppHE,1501
+sklearn/datasets/tests/data/openml/id_2/data-v1-dl-1666876.arff.gz,sha256=z-iUW5SXcLDaQtr1jOZ9HF_uJc97T9FFFhg3wqvAlCk,1841
+sklearn/datasets/tests/data/openml/id_292/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/datasets/tests/data/openml/id_292/__pycache__/__init__.cpython-310.pyc,,
+sklearn/datasets/tests/data/openml/id_292/api-v1-jd-292.json.gz,sha256=Hmo4152PnlOizhG2i0FTBi1OluwLNo0CsuZPGzPFFpM,551
+sklearn/datasets/tests/data/openml/id_292/api-v1-jd-40981.json.gz,sha256=wm3L4wz7ORYfMFsrPUOptQrcizaNB0lWjEcQbL2yCJc,553
+sklearn/datasets/tests/data/openml/id_292/api-v1-jdf-292.json.gz,sha256=JVwW8z7Sln_hAM2AEafmn3iWA3JLHsLs-R3-tyBnwZA,306
+sklearn/datasets/tests/data/openml/id_292/api-v1-jdf-40981.json.gz,sha256=JVwW8z7Sln_hAM2AEafmn3iWA3JLHsLs-R3-tyBnwZA,306
+sklearn/datasets/tests/data/openml/id_292/api-v1-jdl-dn-australian-l-2-dv-1-s-dact.json.gz,sha256=jvYCVCX9_F9zZVXqOFJSr1vL9iODYV24JIk2bU-WoKc,327
+sklearn/datasets/tests/data/openml/id_292/api-v1-jdl-dn-australian-l-2-dv-1.json.gz,sha256=naCemmAx0GDsQW9jmmvzSYnmyIzmQdEGIeuQa6HYwpM,99
+sklearn/datasets/tests/data/openml/id_292/api-v1-jdl-dn-australian-l-2-s-act-.json.gz,sha256=NYkNCBZcgEUmtIqtRi18zAnoCL15dbpgS9YSuWCHl6w,319
+sklearn/datasets/tests/data/openml/id_292/data-v1-dl-49822.arff.gz,sha256=t-4kravUqu1kGbQ_6dP4bVX89L7g8WmK4h2GwnATFOM,2532
+sklearn/datasets/tests/data/openml/id_3/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/datasets/tests/data/openml/id_3/__pycache__/__init__.cpython-310.pyc,,
+sklearn/datasets/tests/data/openml/id_3/api-v1-jd-3.json.gz,sha256=BmohZnmxl8xRlG4X7pouKCFUJZkbDOt_EJiMFPfz-Gk,2473
+sklearn/datasets/tests/data/openml/id_3/api-v1-jdf-3.json.gz,sha256=7E8ta8TfOIKwi7oBVx4HkqVveeCpItmEiXdzrNKEtCY,535
+sklearn/datasets/tests/data/openml/id_3/api-v1-jdq-3.json.gz,sha256=Ce8Zz60lxd5Ifduu88TQaMowY3d3MKKI39b1CWoMb0Y,1407
+sklearn/datasets/tests/data/openml/id_3/data-v1-dl-3.arff.gz,sha256=xj_fiGF2HxynBQn30tFpp8wFOYjHt8CcCabbYSTiCL4,19485
+sklearn/datasets/tests/data/openml/id_40589/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/datasets/tests/data/openml/id_40589/__pycache__/__init__.cpython-310.pyc,,
+sklearn/datasets/tests/data/openml/id_40589/api-v1-jd-40589.json.gz,sha256=WdGqawLSNYwW-p5Pvv9SOjvRDr04x8NxkR-oM1573L8,598
+sklearn/datasets/tests/data/openml/id_40589/api-v1-jdf-40589.json.gz,sha256=gmurBXo5KfQRibxRr6ChdSaV5jzPIOEoymEp6eMyH8I,856
+sklearn/datasets/tests/data/openml/id_40589/api-v1-jdl-dn-emotions-l-2-dv-3.json.gz,sha256=Geayoqj-xUA8FGZCpNwuB31mo6Gsh-gjm9HdMckoq5w,315
+sklearn/datasets/tests/data/openml/id_40589/api-v1-jdl-dn-emotions-l-2-s-act-.json.gz,sha256=TaY6YBYzQLbhiSKr_n8fKnp9oj2mPCaTJJhdYf-qYHU,318
+sklearn/datasets/tests/data/openml/id_40589/api-v1-jdq-40589.json.gz,sha256=0PeXMZPrNdGemdHYvKPH86i40EEFCK80rVca7o7FqwU,913
+sklearn/datasets/tests/data/openml/id_40589/data-v1-dl-4644182.arff.gz,sha256=LEImVQgnzv81CcZxecRz4UOFzuIGU2Ni5XxeDfx3Ub8,4344
+sklearn/datasets/tests/data/openml/id_40675/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/datasets/tests/data/openml/id_40675/__pycache__/__init__.cpython-310.pyc,,
+sklearn/datasets/tests/data/openml/id_40675/api-v1-jd-40675.json.gz,sha256=p4d3LWD7_MIaDpb9gZBvA1QuC5QtGdzJXa5HSYlTpP0,323
+sklearn/datasets/tests/data/openml/id_40675/api-v1-jdf-40675.json.gz,sha256=1I2WeXida699DTw0bjV211ibZjw2QJQvnB26duNV-qo,307
+sklearn/datasets/tests/data/openml/id_40675/api-v1-jdl-dn-glass2-l-2-dv-1-s-dact.json.gz,sha256=Ie0ezF2HSVbpUak2HyUa-yFlrdqSeYyJyl4vl66A3Y8,317
+sklearn/datasets/tests/data/openml/id_40675/api-v1-jdl-dn-glass2-l-2-dv-1.json.gz,sha256=rQpKVHdgU4D4gZzoQNu5KKPQhCZ8US9stQ1b4vfHa8I,85
+sklearn/datasets/tests/data/openml/id_40675/api-v1-jdl-dn-glass2-l-2-s-act-.json.gz,sha256=FBumMOA56kS7rvkqKI4tlk_Dqi74BalyO0qsc4ompic,88
+sklearn/datasets/tests/data/openml/id_40675/api-v1-jdq-40675.json.gz,sha256=iPzcOm_tVpfzbcJi9pv_-4FHZ84zb_KKId7zqsk3sIw,886
+sklearn/datasets/tests/data/openml/id_40675/data-v1-dl-4965250.arff.gz,sha256=VD0IhzEvQ9n2Wn4dCL54okNjafYy1zgrQTTOu1JaSKM,3000
+sklearn/datasets/tests/data/openml/id_40945/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/datasets/tests/data/openml/id_40945/__pycache__/__init__.cpython-310.pyc,,
+sklearn/datasets/tests/data/openml/id_40945/api-v1-jd-40945.json.gz,sha256=AogsawLE4GjvKxbzfzOuPV6d0XyinQFmLGkk4WQn610,437
+sklearn/datasets/tests/data/openml/id_40945/api-v1-jdf-40945.json.gz,sha256=lfCTjf3xuH0P_E1SbyyR4JfvdolIC2k5cBJtkI8pEDA,320
+sklearn/datasets/tests/data/openml/id_40945/api-v1-jdq-40945.json.gz,sha256=nH5aRlVKtqgSGDLcDNn3pg9QNM7xpafWE0a72RJRa1Q,1042
+sklearn/datasets/tests/data/openml/id_40945/data-v1-dl-16826755.arff.gz,sha256=UW6WH1GYduX4mzOaA2SgjdZBYKw6TXbV7GKVW_1tbOU,32243
+sklearn/datasets/tests/data/openml/id_40966/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/datasets/tests/data/openml/id_40966/__pycache__/__init__.cpython-310.pyc,,
+sklearn/datasets/tests/data/openml/id_40966/api-v1-jd-40966.json.gz,sha256=NsY8OsjJ21mRCsv0x3LNUwQMzQ6sCwRSYR3XrY2lBHQ,1660
+sklearn/datasets/tests/data/openml/id_40966/api-v1-jdf-40966.json.gz,sha256=itrI4vjLy_qWd6zdSSepYUMEZdLJlAGDIWC-RVz6ztg,3690
+sklearn/datasets/tests/data/openml/id_40966/api-v1-jdl-dn-miceprotein-l-2-dv-4.json.gz,sha256=8MIDtGJxdc679SfYGRekmZEa-RX28vRu5ySEKKlI1gM,325
+sklearn/datasets/tests/data/openml/id_40966/api-v1-jdl-dn-miceprotein-l-2-s-act-.json.gz,sha256=MBOWtKQsgUsaFQON38vPXIWQUBIxdH0NwqUAuEsv0N8,328
+sklearn/datasets/tests/data/openml/id_40966/api-v1-jdq-40966.json.gz,sha256=Pe6DmH__qOwg4js8q8ANQr63pGmva9gDkJmYwWh_pjQ,934
+sklearn/datasets/tests/data/openml/id_40966/data-v1-dl-17928620.arff.gz,sha256=HF_ZP_7H3rY6lA_WmFNN1-u32zSfwYOTAEHL8X5g4sw,6471
+sklearn/datasets/tests/data/openml/id_42074/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/datasets/tests/data/openml/id_42074/__pycache__/__init__.cpython-310.pyc,,
+sklearn/datasets/tests/data/openml/id_42074/api-v1-jd-42074.json.gz,sha256=9EOzrdc3XKkuzpKWuESaB4AwXTtSEMhJlL3qs2Jx1io,584
+sklearn/datasets/tests/data/openml/id_42074/api-v1-jdf-42074.json.gz,sha256=OLdOfwKmH_Vbz6xNhxA9W__EP-uwwBnZqqFi-PdpMGg,272
+sklearn/datasets/tests/data/openml/id_42074/api-v1-jdq-42074.json.gz,sha256=h0KnS9W8EgrNkYbIqHN8tCDtmwCfreALJOfOUhd5fyw,722
+sklearn/datasets/tests/data/openml/id_42074/data-v1-dl-21552912.arff.gz,sha256=9iPnd8CjaubIL64Qp8IIjLODKY6iRFlb-NyVRJyb5MQ,2326
+sklearn/datasets/tests/data/openml/id_42585/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/datasets/tests/data/openml/id_42585/__pycache__/__init__.cpython-310.pyc,,
+sklearn/datasets/tests/data/openml/id_42585/api-v1-jd-42585.json.gz,sha256=fMvxOOBmOJX5z1ERNrxjlcFT9iOK8urLajZ-huFdGnE,1492
+sklearn/datasets/tests/data/openml/id_42585/api-v1-jdf-42585.json.gz,sha256=CYUEWkVMgYa05pDr77bOoe98EyksmNUKvaRwoP861CU,312
+sklearn/datasets/tests/data/openml/id_42585/api-v1-jdq-42585.json.gz,sha256=Nzbn_retMMaGdcLE5IqfsmLoAwjJCDsQDd0DOdofwoI,348
+sklearn/datasets/tests/data/openml/id_42585/data-v1-dl-21854866.arff.gz,sha256=yNAMZpBXap7Dnhy3cFThMpa-D966sPs1pkoOhie25vM,4519
+sklearn/datasets/tests/data/openml/id_561/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/datasets/tests/data/openml/id_561/__pycache__/__init__.cpython-310.pyc,,
+sklearn/datasets/tests/data/openml/id_561/api-v1-jd-561.json.gz,sha256=odOP3WAbZ7ucbRYVL1Pd8Wagz8_vT6hkOOiZv-RJImw,1798
+sklearn/datasets/tests/data/openml/id_561/api-v1-jdf-561.json.gz,sha256=QHQk-3nMMLjp_5CQCzvykkSsfzeX8ni1vmAoQ_lZtO4,425
+sklearn/datasets/tests/data/openml/id_561/api-v1-jdl-dn-cpu-l-2-dv-1.json.gz,sha256=BwOwriC5_3UIfcYBZA7ljxwq1naIWOohokUVHam6jkw,301
+sklearn/datasets/tests/data/openml/id_561/api-v1-jdl-dn-cpu-l-2-s-act-.json.gz,sha256=cNRZath5VHhjEJ2oZ1wreJ0H32a1Jtfry86WFsTJuUw,347
+sklearn/datasets/tests/data/openml/id_561/api-v1-jdq-561.json.gz,sha256=h0Oy2T0sYqgvtH4fvAArl-Ja3Ptb8fyya1itC-0VvUg,1074
+sklearn/datasets/tests/data/openml/id_561/data-v1-dl-52739.arff.gz,sha256=6WFCteAN_sJhewwi1xkrNAriwo7D_8OolMW-dGuXClk,3303
+sklearn/datasets/tests/data/openml/id_61/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/datasets/tests/data/openml/id_61/__pycache__/__init__.cpython-310.pyc,,
+sklearn/datasets/tests/data/openml/id_61/api-v1-jd-61.json.gz,sha256=pcfnmqQe9YCDj7n8GQYoDwdsR74XQf3dUATdtQDrV_4,898
+sklearn/datasets/tests/data/openml/id_61/api-v1-jdf-61.json.gz,sha256=M8vWrpRboElpNwqzVgTpNjyHJWOTSTOCtRGKidWThtY,268
+sklearn/datasets/tests/data/openml/id_61/api-v1-jdl-dn-iris-l-2-dv-1.json.gz,sha256=C84gquf9kDeW2W1bOjZ3twWPvF8_4Jlu6dSR5O4j0TI,293
+sklearn/datasets/tests/data/openml/id_61/api-v1-jdl-dn-iris-l-2-s-act-.json.gz,sha256=qfS5MXmX32PtjSuwc6OQY0TA4L4Bf9OE6uw2zti5S64,330
+sklearn/datasets/tests/data/openml/id_61/api-v1-jdq-61.json.gz,sha256=QkzUfBKlHHu42BafrID7VgHxUr14RoskHUsRW_fSLyA,1121
+sklearn/datasets/tests/data/openml/id_61/data-v1-dl-61.arff.gz,sha256=r-RzaSRgZjiYTlcyNRkQJdQZxUXTHciHTJa3L17F23M,2342
+sklearn/datasets/tests/data/openml/id_62/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/datasets/tests/data/openml/id_62/__pycache__/__init__.cpython-310.pyc,,
+sklearn/datasets/tests/data/openml/id_62/api-v1-jd-62.json.gz,sha256=fvNVGtR9SAI8Wh8c8HcEeppLlVRLuR1Khgl_i1dPjQc,656
+sklearn/datasets/tests/data/openml/id_62/api-v1-jdf-62.json.gz,sha256=SJsXcSbLfzNcsiBwkjO5RtOgrXHTi7ptSLeRhxRuWFo,817
+sklearn/datasets/tests/data/openml/id_62/api-v1-jdq-62.json.gz,sha256=J4pSpS1WnwfRTGp4d7EEdix32qxCn7H9mBegN41uxjQ,805
+sklearn/datasets/tests/data/openml/id_62/data-v1-dl-52352.arff.gz,sha256=-1gwyCES9ipADIKsHxtethwpwKfMcrpW0q7_D66KYPk,1625
+sklearn/datasets/tests/data/svmlight_classification.txt,sha256=6u8QK0PeHOxvx7fOYdPsJZTgJfS6SD58WWPYgYz4B3U,254
+sklearn/datasets/tests/data/svmlight_invalid.txt,sha256=ueCvdPekdiYpH8FAH_AW9MHiyMd9SulhrkJ8FQm3ol8,54
+sklearn/datasets/tests/data/svmlight_invalid_order.txt,sha256=xSNKVNcM7TuWkTyTZnQSTTcoBdERxUKoM2yz_gFCaHA,23
+sklearn/datasets/tests/data/svmlight_multilabel.txt,sha256=Pvs1p_nQFKLOfjLJEXNjJeOadVqVulQ_AGVkj7Js5vA,105
+sklearn/datasets/tests/test_20news.py,sha256=Hym_e4P4GkhyBIZ6JfisnCxWEwp1M1qfsfp9shxqCzE,5339
+sklearn/datasets/tests/test_arff_parser.py,sha256=d-kOepobTzFS1w4U23hp0NAC4t8h7d6fP2mWiim89xo,8088
+sklearn/datasets/tests/test_base.py,sha256=3aqHTrwaZW9PVSHK1jeWwW32SqMAugkwEmhbVlobnpo,11996
+sklearn/datasets/tests/test_california_housing.py,sha256=pHO5B6_RrfyPp4bwUrrwHn6VUhJSqizOe5DPw7I9uGs,1368
+sklearn/datasets/tests/test_common.py,sha256=KC0OCuZCSsjzodQXY8aVkgFLLMylkeYK-VJyUJx7S1E,4379
+sklearn/datasets/tests/test_covtype.py,sha256=bjNddYKznLuc-GICOfZD_G0wOgEDiRawc7yg0W0TurM,1756
+sklearn/datasets/tests/test_kddcup99.py,sha256=RAP_s4uVrHYtkmDapHLjjl36heImoGa42VAvU9vZPV4,2606
+sklearn/datasets/tests/test_lfw.py,sha256=ymd_LfoU1WZKDOXgsQEjdMUpfgXIjIxV3lMb47KKujs,8229
+sklearn/datasets/tests/test_olivetti_faces.py,sha256=d2r43YseviKoA9OyX6JvDyXvY8lFRfV__j5hippkYY0,919
+sklearn/datasets/tests/test_openml.py,sha256=6XejfdYalo507PbvwdeinI1ubAOAr_QR-BBpv07ahpY,55329
+sklearn/datasets/tests/test_rcv1.py,sha256=_MI_VuGKrZIIV-WMVxOEKMh94DqzhCrxV7l1E3NGkNM,2343
+sklearn/datasets/tests/test_samples_generator.py,sha256=Nrov2s39olFK7S4fpmCwbdgdxrphJnkXCr3o6lQjVb4,23704
+sklearn/datasets/tests/test_svmlight_format.py,sha256=zvfWSYxjakXnTX6TfokviHxyaZcmjzglCHV1QnkyHAg,20269
+sklearn/decomposition/__init__.py,sha256=hDGJOjAgeeYsMsO5o8PSGLAYmfiS6WRjIGO8OdvSxN4,1296
+sklearn/decomposition/__pycache__/__init__.cpython-310.pyc,,
+sklearn/decomposition/__pycache__/_base.cpython-310.pyc,,
+sklearn/decomposition/__pycache__/_dict_learning.cpython-310.pyc,,
+sklearn/decomposition/__pycache__/_factor_analysis.cpython-310.pyc,,
+sklearn/decomposition/__pycache__/_fastica.cpython-310.pyc,,
+sklearn/decomposition/__pycache__/_incremental_pca.cpython-310.pyc,,
+sklearn/decomposition/__pycache__/_kernel_pca.cpython-310.pyc,,
+sklearn/decomposition/__pycache__/_lda.cpython-310.pyc,,
+sklearn/decomposition/__pycache__/_nmf.cpython-310.pyc,,
+sklearn/decomposition/__pycache__/_pca.cpython-310.pyc,,
+sklearn/decomposition/__pycache__/_sparse_pca.cpython-310.pyc,,
+sklearn/decomposition/__pycache__/_truncated_svd.cpython-310.pyc,,
+sklearn/decomposition/_base.py,sha256=ffm1H9Yi3QG8z8pdM3GH4KHx0dUS_BPmqv1UMdvHiWo,6545
+sklearn/decomposition/_cdnmf_fast.cpython-310-x86_64-linux-gnu.so,sha256=E2g9knNUDZhCsUYdgJINy6ZdOgJ3htYOYzXK_SaZ0r4,245625
+sklearn/decomposition/_dict_learning.py,sha256=KHSSXRymhMbdCbFZYN5ZfuMReiDUW7h5n5_ikEOPfh0,76447
+sklearn/decomposition/_factor_analysis.py,sha256=dfC3QYDEyATPFRI8D_2SyhpSG1VJphX7l749PcT_8IU,15301
+sklearn/decomposition/_fastica.py,sha256=tL6X8rpkoH-taeWuHXHJEF0NAiJ3k_IwZaVPW0dv0v8,26439
+sklearn/decomposition/_incremental_pca.py,sha256=-xyUuHdG-0hE4ytUIc8dUXA-SzN0KMMU5ukE_Q1UsEo,15895
+sklearn/decomposition/_kernel_pca.py,sha256=XZmi51u5a314Ucx4leic0CJ-kmXzWCDi3ifZkzEtWqA,21922
+sklearn/decomposition/_lda.py,sha256=PxzDM2sKtBY5F5-6glXWj_XQwhu48nAE5gSzxvkwFcE,33064
+sklearn/decomposition/_nmf.py,sha256=cL5sFfYvdv4QrOyw2T0R8a5yf3l3Oxnw6aufG5iuOL4,82483
+sklearn/decomposition/_online_lda_fast.cpython-310-x86_64-linux-gnu.so,sha256=mV1lYjA0IxVAB84t5CAlHCcl3mS3mNneDyOwz1uW0i4,307137
+sklearn/decomposition/_pca.py,sha256=KV8m7CIfxJe6pKuV05b9srz0MKAdj35CnoqgJtDJalY,28402
+sklearn/decomposition/_sparse_pca.py,sha256=1QEwZkZ61wRWDnWmh5BBsI7NkubbYNOfpFyQtOGWlqA,18014
+sklearn/decomposition/_truncated_svd.py,sha256=WbIoXcppX5_MRfNH-XaNpnOR0hiQbS9Zn5Zx3pGHLjU,11489
+sklearn/decomposition/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/decomposition/tests/__pycache__/__init__.cpython-310.pyc,,
+sklearn/decomposition/tests/__pycache__/test_dict_learning.cpython-310.pyc,,
+sklearn/decomposition/tests/__pycache__/test_factor_analysis.cpython-310.pyc,,
+sklearn/decomposition/tests/__pycache__/test_fastica.cpython-310.pyc,,
+sklearn/decomposition/tests/__pycache__/test_incremental_pca.cpython-310.pyc,,
+sklearn/decomposition/tests/__pycache__/test_kernel_pca.cpython-310.pyc,,
+sklearn/decomposition/tests/__pycache__/test_nmf.cpython-310.pyc,,
+sklearn/decomposition/tests/__pycache__/test_online_lda.cpython-310.pyc,,
+sklearn/decomposition/tests/__pycache__/test_pca.cpython-310.pyc,,
+sklearn/decomposition/tests/__pycache__/test_sparse_pca.cpython-310.pyc,,
+sklearn/decomposition/tests/__pycache__/test_truncated_svd.cpython-310.pyc,,
+sklearn/decomposition/tests/test_dict_learning.py,sha256=2DSscxDCuzl4aTJHIOyxwXIzf_-UFwNpS-N_eXhx420,30432
+sklearn/decomposition/tests/test_factor_analysis.py,sha256=vRdG7UgNwXVith_j7hi7NN-i7zEuDDVrmvrI2rWMDig,4172
+sklearn/decomposition/tests/test_fastica.py,sha256=rQjBsHH1Byt0JSHVLoC_iexG_RHZCS3vWaE6A9dvdfk,15503
+sklearn/decomposition/tests/test_incremental_pca.py,sha256=y8eL0TuPq3pozAlSnUa_IO_35ACQlUgqFceVKKLnQ1Q,15317
+sklearn/decomposition/tests/test_kernel_pca.py,sha256=pifqNdYRI8wYcGzn9aiidQDkauBhzy86vBCElzgTtl4,20772
+sklearn/decomposition/tests/test_nmf.py,sha256=VjLVciOIuLtcLRocB2Nd1GyMpgcaceFurTNEe00Jc_4,34178
+sklearn/decomposition/tests/test_online_lda.py,sha256=WOTsRNAdCr0yy2IjFa_XH7QQ0nTRQKMdpcgEb7Ru8tA,15825
+sklearn/decomposition/tests/test_pca.py,sha256=mUMr5Dss5UhpT-U1sFC1Cl9iH-D3g2G4gO4up_UBduA,35081
+sklearn/decomposition/tests/test_sparse_pca.py,sha256=ktJ8to_V8qYGOOrEt012xmVNWNsKLccjo252xwUdd6w,12866
+sklearn/decomposition/tests/test_truncated_svd.py,sha256=GEh38HYV9jjcbP0FCXzjTo4szDla6NqgLXgIxgW1yvA,7168
+sklearn/discriminant_analysis.py,sha256=GWA89yRds7Ry3ONy-1idzG4l39nsy3GnOgaC7sVTCK4,37501
+sklearn/dummy.py,sha256=xpThXdClt0lf-KFPdVnb2QAHSOAlTz35P2O3oNxyxDM,23817
+sklearn/ensemble/__init__.py,sha256=RSvD9tEa46dcyJkKZqTD2CZS1B7KAD2raG82TRMCuD4,1339
+sklearn/ensemble/__pycache__/__init__.cpython-310.pyc,,
+sklearn/ensemble/__pycache__/_bagging.cpython-310.pyc,,
+sklearn/ensemble/__pycache__/_base.cpython-310.pyc,,
+sklearn/ensemble/__pycache__/_forest.cpython-310.pyc,,
+sklearn/ensemble/__pycache__/_gb.cpython-310.pyc,,
+sklearn/ensemble/__pycache__/_iforest.cpython-310.pyc,,
+sklearn/ensemble/__pycache__/_stacking.cpython-310.pyc,,
+sklearn/ensemble/__pycache__/_voting.cpython-310.pyc,,
+sklearn/ensemble/__pycache__/_weight_boosting.cpython-310.pyc,,
+sklearn/ensemble/_bagging.py,sha256=qXIILoAWZQJHt0C3GOdGwdC0EQ_K-db6BZLZDK6faSg,43259
+sklearn/ensemble/_base.py,sha256=qc_d7GmyO8EY9DBw9sazg5j1BtvfisfwDtThY3tZ4ow,10069
+sklearn/ensemble/_forest.py,sha256=MhVW5h7tnQMJpUrv2yKbLCIPStU1z3L95MdduZ9aroo,114121
+sklearn/ensemble/_gb.py,sha256=ealNbODotELREJAk15F403zjLwUuulaP1hADiL5Mmcw,86549
+sklearn/ensemble/_gradient_boosting.cpython-310-x86_64-linux-gnu.so,sha256=eBmh15i2ccMICZraHan_NJgJWteJ9WAScN0mrbRovMA,253745
+sklearn/ensemble/_hist_gradient_boosting/__init__.py,sha256=eQB-q0KuYskMBmetF1cg6AQnakxh9VaQsYfuALI2HNc,166
+sklearn/ensemble/_hist_gradient_boosting/__pycache__/__init__.cpython-310.pyc,,
+sklearn/ensemble/_hist_gradient_boosting/__pycache__/binning.cpython-310.pyc,,
+sklearn/ensemble/_hist_gradient_boosting/__pycache__/gradient_boosting.cpython-310.pyc,,
+sklearn/ensemble/_hist_gradient_boosting/__pycache__/grower.cpython-310.pyc,,
+sklearn/ensemble/_hist_gradient_boosting/__pycache__/predictor.cpython-310.pyc,,
+sklearn/ensemble/_hist_gradient_boosting/_binning.cpython-310-x86_64-linux-gnu.so,sha256=yiY9l38txoZld5nU_ZDlo812Ofvx8pCsVZxbV7cCcP8,220953
+sklearn/ensemble/_hist_gradient_boosting/_bitset.cpython-310-x86_64-linux-gnu.so,sha256=--g_QAaOBxxrj4AUAeTOJbhcxnlE6QuzYGxaTSiq-Nk,220841
+sklearn/ensemble/_hist_gradient_boosting/_bitset.pxd,sha256=nzEGYyR63gAQmXbMoNwqu_erw51e3TveA9rTcfNCfuM,692
+sklearn/ensemble/_hist_gradient_boosting/_gradient_boosting.cpython-310-x86_64-linux-gnu.so,sha256=B2BeViDAb7pGPMkHFwec8CSvwQeLLgImlyFqLcjaUBE,229137
+sklearn/ensemble/_hist_gradient_boosting/_predictor.cpython-310-x86_64-linux-gnu.so,sha256=JZzVk9WBKZOC1ZKESWwOOMioHFcpU9rqxaWdMFJMFsA,249681
+sklearn/ensemble/_hist_gradient_boosting/binning.py,sha256=J1RrSjj3cDj7YfYBxDYtK_KI5AJlb_gcqgW0Szw8Gls,13385
+sklearn/ensemble/_hist_gradient_boosting/common.cpython-310-x86_64-linux-gnu.so,sha256=HvhZ6F8lDaigcvIRETe6CnPZd-Ossj2P3RhnI0bpbYk,146481
+sklearn/ensemble/_hist_gradient_boosting/common.pxd,sha256=x93vLeNO7RyWRLAG7LLXw8p2nxyfIb1CfOrsKkmlHqU,1295
+sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py,sha256=z4FtBxTvGg3xS34Xy22kBXD_m3tM5CuDJMJCxUgUsmo,92936
+sklearn/ensemble/_hist_gradient_boosting/grower.py,sha256=OWpVug6qGP51eJxXtB52cAHYK9d20XkcDtKihgYTTmY,31643
+sklearn/ensemble/_hist_gradient_boosting/histogram.cpython-310-x86_64-linux-gnu.so,sha256=fbSXsPQEjxtAK24yGPSGRaqk8R7q5KcLUCfm7ZtPPcg,327577
+sklearn/ensemble/_hist_gradient_boosting/predictor.py,sha256=J0Jo3GMj9oP3Qh9tpFF4aAtOnANGjgIlebOnFaVogLA,4971
+sklearn/ensemble/_hist_gradient_boosting/splitting.cpython-310-x86_64-linux-gnu.so,sha256=ic91kMoqWcsntsyuv1SiS6h01GUWdBgpIk1Jz-HYRC0,368601
+sklearn/ensemble/_hist_gradient_boosting/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/__init__.cpython-310.pyc,,
+sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_binning.cpython-310.pyc,,
+sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_bitset.cpython-310.pyc,,
+sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_compare_lightgbm.cpython-310.pyc,,
+sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_gradient_boosting.cpython-310.pyc,,
+sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_grower.cpython-310.pyc,,
+sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_histogram.cpython-310.pyc,,
+sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_monotonic_contraints.cpython-310.pyc,,
+sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_predictor.cpython-310.pyc,,
+sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_splitting.cpython-310.pyc,,
+sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_warm_start.cpython-310.pyc,,
+sklearn/ensemble/_hist_gradient_boosting/tests/test_binning.py,sha256=aNXHw7u7IRAdEfHO2TWdjAmlj9y_SdhJir-w0yQ-fkc,16252
+sklearn/ensemble/_hist_gradient_boosting/tests/test_bitset.py,sha256=5QHny5G3p9tyExBsdsUVV2vFKgPI-vYDt-zvLpMBHXQ,2100
+sklearn/ensemble/_hist_gradient_boosting/tests/test_compare_lightgbm.py,sha256=cdWR8t7G8T4py8jKkF-nKj7st5vbf7ZYEGW4PqbuJpQ,10112
+sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py,sha256=An1lUfNtW_wOdNI2JAck6vNA_rwcN87juB17UQtUerM,60892
+sklearn/ensemble/_hist_gradient_boosting/tests/test_grower.py,sha256=mDda3Xp-vF2Kgqdz3bj5UUtC4jUZR--dCesLwmDI50c,23152
+sklearn/ensemble/_hist_gradient_boosting/tests/test_histogram.py,sha256=PBoacgv-6rOI5lTpzCyaafC9eDvyA6tb94RnDw_wLhs,8681
+sklearn/ensemble/_hist_gradient_boosting/tests/test_monotonic_contraints.py,sha256=nL5zxXNASEW6pgDuif2SNoaGsV4O5llKmBSt0-8YQCM,16257
+sklearn/ensemble/_hist_gradient_boosting/tests/test_predictor.py,sha256=wq5vXIMwh7Fr3wDeHGO2F-oNNXEH_hUdyOyS7SIGXpE,6345
+sklearn/ensemble/_hist_gradient_boosting/tests/test_splitting.py,sha256=nkX5rAlTeO6tPR4_K4Gc9bvViPu1HUboA7-vRdiTETo,38639
+sklearn/ensemble/_hist_gradient_boosting/tests/test_warm_start.py,sha256=3Q_3ZhKf94uvmADlNMj0Vpyp7gqjDd1czBzFW8pUuAQ,7933
+sklearn/ensemble/_hist_gradient_boosting/utils.cpython-310-x86_64-linux-gnu.so,sha256=iVTRDHU7XGxC0IFJe6deOk57Y202CNWJvsrIWQMxH7g,249745
+sklearn/ensemble/_iforest.py,sha256=aL_W3uV-8E9Bp0S5HS7jNCQ4N1ABD8pCRtSDb4CWJeU,20427
+sklearn/ensemble/_stacking.py,sha256=u0v3-k2GXMXrdB0fV3UfhJ28CAyUuOyYinnmK0BUgBI,39027
+sklearn/ensemble/_voting.py,sha256=vy6WPhUfaz9aChF0W2bdebHN1wBrD98i3UIEpPV8Fng,23331
+sklearn/ensemble/_weight_boosting.py,sha256=ggLOV-Vr_KND_lYJiD_9EhoyBJT-8LKPzjhipCDlkyE,45369
+sklearn/ensemble/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/ensemble/tests/__pycache__/__init__.cpython-310.pyc,,
+sklearn/ensemble/tests/__pycache__/test_bagging.cpython-310.pyc,,
+sklearn/ensemble/tests/__pycache__/test_base.cpython-310.pyc,,
+sklearn/ensemble/tests/__pycache__/test_common.cpython-310.pyc,,
+sklearn/ensemble/tests/__pycache__/test_forest.cpython-310.pyc,,
+sklearn/ensemble/tests/__pycache__/test_gradient_boosting.cpython-310.pyc,,
+sklearn/ensemble/tests/__pycache__/test_iforest.cpython-310.pyc,,
+sklearn/ensemble/tests/__pycache__/test_stacking.cpython-310.pyc,,
+sklearn/ensemble/tests/__pycache__/test_voting.cpython-310.pyc,,
+sklearn/ensemble/tests/__pycache__/test_weight_boosting.cpython-310.pyc,,
+sklearn/ensemble/tests/test_bagging.py,sha256=IK_ta2LyqKQVsA4VSNtYYTSo5-P9UIEKQOu3hcWdSOw,30204
+sklearn/ensemble/tests/test_base.py,sha256=7E-TUzBWj17l1AFOx_iXSFeVnpk-ySFsQxc--_WkfrQ,3637
+sklearn/ensemble/tests/test_common.py,sha256=xgl3R6ry0FhN-cbhYcuPKzPpPawqsizBDgdDzeB5Fog,9150
+sklearn/ensemble/tests/test_forest.py,sha256=d_cWIBIG_AVwvDdxcD4ezqM_BqROdqba31eLtbhC0_o,62547
+sklearn/ensemble/tests/test_gradient_boosting.py,sha256=VQ03n0WqmdAp38a9VtQXLw5xrQyui50aBv1LOBBI-vc,58784
+sklearn/ensemble/tests/test_iforest.py,sha256=d3St6sjEGD5d2OmlBZcMnMwXRWCWNrzvmqW9_-XU9aU,12484
+sklearn/ensemble/tests/test_stacking.py,sha256=fpbB1ap5QUddE6KkU3XHDnSZrr3T3hPYmYENBVa1dsg,29896
+sklearn/ensemble/tests/test_voting.py,sha256=VPnVqIr3-PXRS1pFyrBX1dGVsY8vH81uIb92AmUfuLc,24015
+sklearn/ensemble/tests/test_weight_boosting.py,sha256=YmLdvEYIEZML19TnRL7PVJX5PIM4bqdoViuOlZ6PwGY,25403
+sklearn/exceptions.py,sha256=h1GgvgS6iQrKSPYhJHP-XL4ucrWw3VWybIH-PBx_qlQ,6117
+sklearn/experimental/__init__.py,sha256=pWa_UcYBSxmQSZSajN60f97qpKLnE_2etPGLxv1aGsM,252
+sklearn/experimental/__pycache__/__init__.cpython-310.pyc,,
+sklearn/experimental/__pycache__/enable_halving_search_cv.cpython-310.pyc,,
+sklearn/experimental/__pycache__/enable_hist_gradient_boosting.cpython-310.pyc,,
+sklearn/experimental/__pycache__/enable_iterative_imputer.cpython-310.pyc,,
+sklearn/experimental/enable_halving_search_cv.py,sha256=BkTrG-7xI1EBAamq4bLsSn_vwGyALDGRPxn3p0PcqHY,1210
+sklearn/experimental/enable_hist_gradient_boosting.py,sha256=bBYxZhuti1zFRJpSrkNSgbpSFd4E4gr7z-WKEA9rOQo,746
+sklearn/experimental/enable_iterative_imputer.py,sha256=4DpNhRtWoYgDHXVLsBL30zqAwupL5HRKz40TJWwv4qo,688
+sklearn/experimental/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/experimental/tests/__pycache__/__init__.cpython-310.pyc,,
+sklearn/experimental/tests/__pycache__/test_enable_hist_gradient_boosting.cpython-310.pyc,,
+sklearn/experimental/tests/__pycache__/test_enable_iterative_imputer.cpython-310.pyc,,
+sklearn/experimental/tests/__pycache__/test_enable_successive_halving.cpython-310.pyc,,
+sklearn/experimental/tests/test_enable_hist_gradient_boosting.py,sha256=yK6u6Rw-18L3BvCXFuAoiYu0ejaRs2jo0U4trj8txPk,666
+sklearn/experimental/tests/test_enable_iterative_imputer.py,sha256=DhjyTu1pyNtknhA2seGOUH-Ygq07G6PFE-Y6gxf-1Fs,1683
+sklearn/experimental/tests/test_enable_successive_halving.py,sha256=uMIi0nVBvAOVGwx0hioe7G7YtsKB_z2ACpHCW2kpFMc,1890
+sklearn/externals/__init__.py,sha256=jo7XxwlsquXvHghwURnScmXn3XraDerjG1fNR_e11-U,42
+sklearn/externals/__pycache__/__init__.cpython-310.pyc,,
+sklearn/externals/__pycache__/_arff.cpython-310.pyc,,
+sklearn/externals/__pycache__/conftest.cpython-310.pyc,,
+sklearn/externals/_arff.py,sha256=YXR8xgF1IxyugQV70YHNjmza2yuz86zhVM1i6AI-RSA,38341
+sklearn/externals/_packaging/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/externals/_packaging/__pycache__/__init__.cpython-310.pyc,,
+sklearn/externals/_packaging/__pycache__/_structures.cpython-310.pyc,,
+sklearn/externals/_packaging/__pycache__/version.cpython-310.pyc,,
+sklearn/externals/_packaging/_structures.py,sha256=Ofe3RryZqacr5auj4s7MsEylGigfeyf8sagFvK-rPv0,2922
+sklearn/externals/_packaging/version.py,sha256=IDbp4Q6S9OZ3mP57YCDerh4Xm0s6AUqSi6CbFJ3eQyI,16134
+sklearn/externals/_scipy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/externals/_scipy/__pycache__/__init__.cpython-310.pyc,,
+sklearn/externals/_scipy/sparse/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/externals/_scipy/sparse/__pycache__/__init__.cpython-310.pyc,,
+sklearn/externals/_scipy/sparse/csgraph/__init__.py,sha256=GMAcZXBWt9Dp0QEOeCsQglt8CWB6_stqr7Wf_LfH0tE,34
+sklearn/externals/_scipy/sparse/csgraph/__pycache__/__init__.cpython-310.pyc,,
+sklearn/externals/_scipy/sparse/csgraph/__pycache__/_laplacian.cpython-310.pyc,,
+sklearn/externals/_scipy/sparse/csgraph/_laplacian.py,sha256=-CiehMT6O8Fk0Yxo4K4BVXrkRTymisAsjv8ck3XSgfw,18150
+sklearn/externals/conftest.py,sha256=NDEaeaJzvW8UZUaFivG1pF9FnrwPR2CfVajgJqISkZY,301
+sklearn/feature_extraction/__init__.py,sha256=3fQtwgSfy-w5zrveCeVDj3CDmXll-ZVFTqPvNsGK-Ns,439
+sklearn/feature_extraction/__pycache__/__init__.cpython-310.pyc,,
+sklearn/feature_extraction/__pycache__/_dict_vectorizer.cpython-310.pyc,,
+sklearn/feature_extraction/__pycache__/_hash.cpython-310.pyc,,
+sklearn/feature_extraction/__pycache__/_stop_words.cpython-310.pyc,,
+sklearn/feature_extraction/__pycache__/image.cpython-310.pyc,,
+sklearn/feature_extraction/__pycache__/text.cpython-310.pyc,,
+sklearn/feature_extraction/_dict_vectorizer.py,sha256=YsI6v7I8x-u1-RvhbMHmzPSMSwuOe033yFi7DHzOziE,15718
+sklearn/feature_extraction/_hash.py,sha256=PCcvcBVR4Q28VmByIpiJBZkNs3tM-r7-r64oqKc__3s,7382
+sklearn/feature_extraction/_hashing_fast.cpython-310-x86_64-linux-gnu.so,sha256=zNz_tyaGipADzrYVFBYQ_DUCieWxc-iFpVXjXQsI92E,110289
+sklearn/feature_extraction/_stop_words.py,sha256=ErqFJABA33Wr92H4VSH7ZqrYJ2CbTioOMMASuoB7hrs,5645
+sklearn/feature_extraction/image.py,sha256=jRWj-655lyLhpEgGfG6DeKn3gpmpKMnS1tRmW-MZ5YE,23024
+sklearn/feature_extraction/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/feature_extraction/tests/__pycache__/__init__.cpython-310.pyc,,
+sklearn/feature_extraction/tests/__pycache__/test_dict_vectorizer.cpython-310.pyc,,
+sklearn/feature_extraction/tests/__pycache__/test_feature_hasher.cpython-310.pyc,,
+sklearn/feature_extraction/tests/__pycache__/test_image.cpython-310.pyc,,
+sklearn/feature_extraction/tests/__pycache__/test_text.cpython-310.pyc,,
+sklearn/feature_extraction/tests/test_dict_vectorizer.py,sha256=FqXV_QEoKyHqgUGouZb3UWB9C-nTfS_wNOuo25o78a0,8272
+sklearn/feature_extraction/tests/test_feature_hasher.py,sha256=WT6h7r7k7gwS3-CvxO4F4ssw4jXSfptTGQKJL9i4D58,5046
+sklearn/feature_extraction/tests/test_image.py,sha256=tfkHiWMMuAeBViFDy3XK-XylnteOYk_cPtlJ1GBmxpk,12154
+sklearn/feature_extraction/tests/test_text.py,sha256=kyBV6n2o-7cTfd1eB011MxOvQucyOmMUOgI1gTLW5xU,53098
+sklearn/feature_extraction/text.py,sha256=wwLJa5ZDZktGwkFN0Ll99BXqX9wfNEeEIgJ91Hv-n_k,78015
+sklearn/feature_selection/__init__.py,sha256=Y3yLE0hnG6-DSRjVSpaFbJhm0SwXfrkkrGSnMu_l4sA,1111
+sklearn/feature_selection/__pycache__/__init__.cpython-310.pyc,,
+sklearn/feature_selection/__pycache__/_base.cpython-310.pyc,,
+sklearn/feature_selection/__pycache__/_from_model.cpython-310.pyc,,
+sklearn/feature_selection/__pycache__/_mutual_info.cpython-310.pyc,,
+sklearn/feature_selection/__pycache__/_rfe.cpython-310.pyc,,
+sklearn/feature_selection/__pycache__/_sequential.cpython-310.pyc,,
+sklearn/feature_selection/__pycache__/_univariate_selection.cpython-310.pyc,,
+sklearn/feature_selection/__pycache__/_variance_threshold.cpython-310.pyc,,
+sklearn/feature_selection/_base.py,sha256=r-ccLUsyjkUtX0uV5p1fOIxKLKI2LRKYMfiRnxkOFPg,9398
+sklearn/feature_selection/_from_model.py,sha256=1eKI8Pp2jTOg6PSvR646jll8EoIGVxnfvUBKFYFRrBo,18920
+sklearn/feature_selection/_mutual_info.py,sha256=B3d9U6rzWilnQZwbmqFiivV3jKTxYF33-3Q9tlxdJ1o,18323
+sklearn/feature_selection/_rfe.py,sha256=Rz0C7SBxBZL5FPmjZNO5CAGzvTpjqRX-Pw7cfOLFRfI,28067
+sklearn/feature_selection/_sequential.py,sha256=tEYDrx1EJ4dbDfP8mPq9iMqkCZJ7GJWBwlMNkzceDNQ,11461
+sklearn/feature_selection/_univariate_selection.py,sha256=D_9Wgi_oUS-P7u96yE8dGZJersILOkrngPDrdNl1c9I,40350
+sklearn/feature_selection/_variance_threshold.py,sha256=WP4plcHw5VoDPc8DXMnHCOgP_sXn8RVHK36KQWkUh2A,4467
+sklearn/feature_selection/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/feature_selection/tests/__pycache__/__init__.cpython-310.pyc,,
+sklearn/feature_selection/tests/__pycache__/test_base.cpython-310.pyc,,
+sklearn/feature_selection/tests/__pycache__/test_chi2.cpython-310.pyc,,
+sklearn/feature_selection/tests/__pycache__/test_feature_select.cpython-310.pyc,,
+sklearn/feature_selection/tests/__pycache__/test_from_model.cpython-310.pyc,,
+sklearn/feature_selection/tests/__pycache__/test_mutual_info.cpython-310.pyc,,
+sklearn/feature_selection/tests/__pycache__/test_rfe.cpython-310.pyc,,
+sklearn/feature_selection/tests/__pycache__/test_sequential.cpython-310.pyc,,
+sklearn/feature_selection/tests/__pycache__/test_variance_threshold.cpython-310.pyc,,
+sklearn/feature_selection/tests/test_base.py,sha256=jerTDsOutG3KjUUuBmGvt7GKKrTckCFYt1_49raVM1c,4781
+sklearn/feature_selection/tests/test_chi2.py,sha256=c6L3cs9DYulMNUTjnZJo7VURucjhUHLYzG2EaRE9N1c,3139
+sklearn/feature_selection/tests/test_feature_select.py,sha256=eve6wxRgLiNJCbAY1hA-zULouSMg4PciPDCU4uKUt08,32506
+sklearn/feature_selection/tests/test_from_model.py,sha256=nqWJPXgTEG6rlls6mpMN1wNKuFQrFs4ULd3-TfB_4BA,23051
+sklearn/feature_selection/tests/test_mutual_info.py,sha256=OK0btiS0OVknVErUhOMWaIiz8qvZlrG2KmqCZnl09wg,9182
+sklearn/feature_selection/tests/test_rfe.py,sha256=jNZdzozS_FfP9qZOjSNl01cq3D5WIXW5FDyubhXYP0Y,20881
+sklearn/feature_selection/tests/test_sequential.py,sha256=LcjXZDFL5WOgzvwgIWkQDC6EWXoA5LYc-VeWYy2SblM,10592
+sklearn/feature_selection/tests/test_variance_threshold.py,sha256=tKaSBkRgVBzo3xC0lT6nLNNzKW4M-5t_sAFJgUmr--g,2640
+sklearn/gaussian_process/__init__.py,sha256=FjnQR6y5UQeaO_EURpIMUHhAHRCvhKYke-i5NUcQipE,504
+sklearn/gaussian_process/__pycache__/__init__.cpython-310.pyc,,
+sklearn/gaussian_process/__pycache__/_gpc.cpython-310.pyc,,
+sklearn/gaussian_process/__pycache__/_gpr.cpython-310.pyc,,
+sklearn/gaussian_process/__pycache__/kernels.cpython-310.pyc,,
+sklearn/gaussian_process/_gpc.py,sha256=5sKWdevP-qiItuMkFdMNUcxiAr8eQ8al0cfraSYWLYk,36524
+sklearn/gaussian_process/_gpr.py,sha256=ExqrXEWWLocBB4Tb4FYLJ6eedPNtcYQDmjlb1tK8bcI,28140
+sklearn/gaussian_process/kernels.py,sha256=KOkGjxDi-tyuXHNnBWS45S9g_05IEGfP-q25wRhb_UE,85400
+sklearn/gaussian_process/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/gaussian_process/tests/__pycache__/__init__.cpython-310.pyc,,
+sklearn/gaussian_process/tests/__pycache__/_mini_sequence_kernel.cpython-310.pyc,,
+sklearn/gaussian_process/tests/__pycache__/test_gpc.cpython-310.pyc,,
+sklearn/gaussian_process/tests/__pycache__/test_gpr.cpython-310.pyc,,
+sklearn/gaussian_process/tests/__pycache__/test_kernels.cpython-310.pyc,,
+sklearn/gaussian_process/tests/_mini_sequence_kernel.py,sha256=YpD-vtJFSVdzVmJxHDmEdFGl6cOQ4J98mLpjFCFThys,1571
+sklearn/gaussian_process/tests/test_gpc.py,sha256=wEmrJy4QXzLK9yMitspUDtbo7ah9iOe7oUlwKAJhvkM,10020
+sklearn/gaussian_process/tests/test_gpr.py,sha256=rL6gsDsK0Grlspgkuf8V6ziqE4M2EPIrRwvvnrZmtIs,29775
+sklearn/gaussian_process/tests/test_kernels.py,sha256=MVsPw2Ie4bdtRiAwkuXXix_fPkCK56lqYtW5JWsmJDs,13570
+sklearn/impute/__init__.py,sha256=Ph4HQbNzVak2mVqSkq82KnXGyum-l6GmmSCPPPWLsrY,943
+sklearn/impute/__pycache__/__init__.cpython-310.pyc,,
+sklearn/impute/__pycache__/_base.cpython-310.pyc,,
+sklearn/impute/__pycache__/_iterative.cpython-310.pyc,,
+sklearn/impute/__pycache__/_knn.cpython-310.pyc,,
+sklearn/impute/_base.py,sha256=eBDVk6ojF4dBVKEn4OybjpaN72iSPGGlfo2a8tkIEiw,40012
+sklearn/impute/_iterative.py,sha256=2oxREIeBvtuGNGUd89dUv65ZGt2b3naVOGB0IiKC8bs,35716
+sklearn/impute/_knn.py,sha256=d4mbTlYwQtpuSzTX8PyDsoNbU5LzjtSHalttCLvm8xw,14662
+sklearn/impute/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/impute/tests/__pycache__/__init__.cpython-310.pyc,,
+sklearn/impute/tests/__pycache__/test_base.cpython-310.pyc,,
+sklearn/impute/tests/__pycache__/test_common.cpython-310.pyc,,
+sklearn/impute/tests/__pycache__/test_impute.cpython-310.pyc,,
+sklearn/impute/tests/__pycache__/test_knn.cpython-310.pyc,,
+sklearn/impute/tests/test_base.py,sha256=L-RND6V8s4g40Uy65BIdQG1oEtHgOWBliBH4bUVdVQc,3367
+sklearn/impute/tests/test_common.py,sha256=fObDDBu87W8j6Rpc61GtAkNILvWe2s49Wskb7thMvSM,7610
+sklearn/impute/tests/test_impute.py,sha256=u4nZ8I6kP0oXoySNFsBRUAjLXqECkTnP_5Cg2dO5snU,59674
+sklearn/impute/tests/test_knn.py,sha256=kPLvYHuZlY0BgUTgf_bsenI0vkdtNNiF4vC7526QQXw,16638
+sklearn/inspection/__init__.py,sha256=3z35-RH859bsJGG-xoyx4ug0RAHl4JzipPJYRR29vzY,452
+sklearn/inspection/__pycache__/__init__.cpython-310.pyc,,
+sklearn/inspection/__pycache__/_partial_dependence.cpython-310.pyc,,
+sklearn/inspection/__pycache__/_pd_utils.cpython-310.pyc,,
+sklearn/inspection/__pycache__/_permutation_importance.cpython-310.pyc,,
+sklearn/inspection/_partial_dependence.py,sha256=GSVLBS4jsAFaVNtgGMhvkJPfCLl9-P6wE6JoLIgI_-0,31782
+sklearn/inspection/_pd_utils.py,sha256=ABl-9L-ISk5SgQbG9LntK5PqFz-DJN2A0-yZftEzD1A,2137
+sklearn/inspection/_permutation_importance.py,sha256=Xn_9XqRA7nmjaQmMy04YXPXt9DwZgDcisXTJGayc-Lg,11501
+sklearn/inspection/_plot/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/inspection/_plot/__pycache__/__init__.cpython-310.pyc,,
+sklearn/inspection/_plot/__pycache__/decision_boundary.cpython-310.pyc,,
+sklearn/inspection/_plot/__pycache__/partial_dependence.cpython-310.pyc,,
+sklearn/inspection/_plot/decision_boundary.py,sha256=UnZ6utsvYn32dn0mHR4YGFdjgmolIuoZudIvRfYRs0Q,15199
+sklearn/inspection/_plot/partial_dependence.py,sha256=LYPQOMrQ4XtOT9nEqZN8W6MKfcELoSovaHZBf49NsJ0,59995
+sklearn/inspection/_plot/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/inspection/_plot/tests/__pycache__/__init__.cpython-310.pyc,,
+sklearn/inspection/_plot/tests/__pycache__/test_boundary_decision_display.cpython-310.pyc,,
+sklearn/inspection/_plot/tests/__pycache__/test_plot_partial_dependence.cpython-310.pyc,,
+sklearn/inspection/_plot/tests/test_boundary_decision_display.py,sha256=4HjTW_PMSGJ8fSD8Hoe2UOXFww4WUf9Hpgg-YKIE1e4,21278
+sklearn/inspection/_plot/tests/test_plot_partial_dependence.py,sha256=YwkCq8P24iylYexK-KQm2ZEF8zSWZ_HDNnENRwuD5-8,36426
+sklearn/inspection/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/inspection/tests/__pycache__/__init__.cpython-310.pyc,,
+sklearn/inspection/tests/__pycache__/test_partial_dependence.cpython-310.pyc,,
+sklearn/inspection/tests/__pycache__/test_pd_utils.cpython-310.pyc,,
+sklearn/inspection/tests/__pycache__/test_permutation_importance.cpython-310.pyc,,
+sklearn/inspection/tests/test_partial_dependence.py,sha256=Bou6O0F0gybQQjHKWnwUBsMwM3_t8bq06fE1ilv3Cfs,33329
+sklearn/inspection/tests/test_pd_utils.py,sha256=t-8K4YbQAbVK4pcI1P9hr8-0iEgc72x_1-868HAhLBg,1640
+sklearn/inspection/tests/test_permutation_importance.py,sha256=O6G4XbuR34O8M-AcjgNX_AJtSt6NAiuFr-2h8HemaVE,19940
+sklearn/isotonic.py,sha256=-eugGsWN64AoM1UMVYMuvLdnL45_eht-EUU2_ZdKIdc,16637
+sklearn/kernel_approximation.py,sha256=nU45YzkZLn1HoRbPFvZO4_oIO_Pzbwf6qkcBddQvMSU,40838
+sklearn/kernel_ridge.py,sha256=TOKKFRzoTUD99s_uc1dqs8KoDYlT3OqQgIJ5CeqVE1M,9185
+sklearn/linear_model/__init__.py,sha256=6tSQhCEgg7AKO4qNZnWtySPRfzrKlprmeG4NehEPkBo,2529
+sklearn/linear_model/__pycache__/__init__.cpython-310.pyc,,
+sklearn/linear_model/__pycache__/_base.cpython-310.pyc,,
+sklearn/linear_model/__pycache__/_bayes.cpython-310.pyc,,
+sklearn/linear_model/__pycache__/_coordinate_descent.cpython-310.pyc,,
+sklearn/linear_model/__pycache__/_huber.cpython-310.pyc,,
+sklearn/linear_model/__pycache__/_least_angle.cpython-310.pyc,,
+sklearn/linear_model/__pycache__/_linear_loss.cpython-310.pyc,,
+sklearn/linear_model/__pycache__/_logistic.cpython-310.pyc,,
+sklearn/linear_model/__pycache__/_omp.cpython-310.pyc,,
+sklearn/linear_model/__pycache__/_passive_aggressive.cpython-310.pyc,,
+sklearn/linear_model/__pycache__/_perceptron.cpython-310.pyc,,
+sklearn/linear_model/__pycache__/_quantile.cpython-310.pyc,,
+sklearn/linear_model/__pycache__/_ransac.cpython-310.pyc,,
+sklearn/linear_model/__pycache__/_ridge.cpython-310.pyc,,
+sklearn/linear_model/__pycache__/_sag.cpython-310.pyc,,
+sklearn/linear_model/__pycache__/_stochastic_gradient.cpython-310.pyc,,
+sklearn/linear_model/__pycache__/_theil_sen.cpython-310.pyc,,
+sklearn/linear_model/_base.py,sha256=bZ9tuHHeI0J0Jk5_vzLpDO75mmZQT3nc3dFh69ZDxNY,27254
+sklearn/linear_model/_bayes.py,sha256=STnQ-r57Ll32FyDxIjNJK6VqlnygIuyREHa-HYc9Oxc,29747
+sklearn/linear_model/_cd_fast.cpython-310-x86_64-linux-gnu.so,sha256=bHWtW1OqO54G2p0ukhdI463Vk6wlTOkpyaNmTsYTqA8,495809
+sklearn/linear_model/_coordinate_descent.py,sha256=RGbCsBXn31g_l1VidU2GYayHJBgFKC-vMnLN0hyLol0,109065
+sklearn/linear_model/_glm/__init__.py,sha256=vaLhPXiACndKUaLvWZDBlUbipsuEb79-dbKqbMrIppc,263
+sklearn/linear_model/_glm/__pycache__/__init__.cpython-310.pyc,,
+sklearn/linear_model/_glm/__pycache__/_newton_solver.cpython-310.pyc,,
+sklearn/linear_model/_glm/__pycache__/glm.cpython-310.pyc,,
+sklearn/linear_model/_glm/_newton_solver.py,sha256=UHuEPm1ZYAQ4_aiW7yf5UEfojJilAWE_eVgwMfuVG8M,19275
+sklearn/linear_model/_glm/glm.py,sha256=jEhldFwsBWOXejoNSzsEnsB2w2QRYMtjh4s9XOwIG28,31930
+sklearn/linear_model/_glm/tests/__init__.py,sha256=-YHpqhr5PuflTU66U6sex61pDz1R8jl2sTr22hcbUL0,24
+sklearn/linear_model/_glm/tests/__pycache__/__init__.cpython-310.pyc,,
+sklearn/linear_model/_glm/tests/__pycache__/test_glm.cpython-310.pyc,,
+sklearn/linear_model/_glm/tests/test_glm.py,sha256=z7U7x5RvXSa41dBfooROVKVooYnBUadgcxM0wHgPei8,40696
+sklearn/linear_model/_huber.py,sha256=9n8GzFCh1jmOnzcDExJFTGRUuiVkp8TNX8zuZfYR6_k,12346
+sklearn/linear_model/_least_angle.py,sha256=JV5HDFqHhWXi6HABad7sfOSaYyx8mB-lk7sZqa25pEs,81551
+sklearn/linear_model/_linear_loss.py,sha256=nGJM2OZJuEMB9-Hu1g6x7_t4ZRkPwRQiPNgCtWjueIs,26796
+sklearn/linear_model/_logistic.py,sha256=a3WNhzGlrQgPzNjSKBAOmeuOCtAGZmU6G7Oa8qSxOBs,84028
+sklearn/linear_model/_omp.py,sha256=KBvFm7SyXg4CKo5HG60Lbfz1FoIlpVcfe4H6De589eI,37378
+sklearn/linear_model/_passive_aggressive.py,sha256=b_W3ZwCFkl-sRI3QcUvuXaV9zJFASgx1s5aB7EYfBc0,19323
+sklearn/linear_model/_perceptron.py,sha256=p9ZHZbhx8Tg11zaU6sixaIbf0RE3_2v2g3-FgKJMhxM,7707
+sklearn/linear_model/_quantile.py,sha256=XPD5jYhfLSMJNH05LMD6G-XeebHejjZS9VmvI-wS050,10790
+sklearn/linear_model/_ransac.py,sha256=HjA6TdSe8cTngxekVrveldi9cKLjrnRzPYEp8EoQnNU,22163
+sklearn/linear_model/_ridge.py,sha256=alJBBnePRcxPm_ObfnrOi96wc9dBN25JSVIRDNKOh5Q,93020
+sklearn/linear_model/_sag.py,sha256=imvKwV9VFDYBS8dYvad7yLzH47VSqwqoGtERrsKyWOY,12320
+sklearn/linear_model/_sag_fast.cpython-310-x86_64-linux-gnu.so,sha256=ZcQjB29GZLkJtf_zF_pUY0A94GczJ7XAtk_-6ZrcQVQ,306897
+sklearn/linear_model/_sgd_fast.cpython-310-x86_64-linux-gnu.so,sha256=ryq0M3lgptV73gV53lIUormar9vpU5fKDD93--Jqwuo,385001
+sklearn/linear_model/_sgd_fast.pxd,sha256=vamOXNBtVxqTymLrbwjNp3NHfjWX0q701CrnocsX-0M,897
+sklearn/linear_model/_stochastic_gradient.py,sha256=1P2ddZnhCvm8MehuNi6bZZ6iKbFt7Acgt273DzZORD8,89642
+sklearn/linear_model/_theil_sen.py,sha256=WCBxOSHe_-5TkzCWo37vyVLutITbgcwv0V_W4jNs4C8,15814
+sklearn/linear_model/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/linear_model/tests/__pycache__/__init__.cpython-310.pyc,,
+sklearn/linear_model/tests/__pycache__/test_base.cpython-310.pyc,,
+sklearn/linear_model/tests/__pycache__/test_bayes.cpython-310.pyc,,
+sklearn/linear_model/tests/__pycache__/test_common.cpython-310.pyc,,
+sklearn/linear_model/tests/__pycache__/test_coordinate_descent.cpython-310.pyc,,
+sklearn/linear_model/tests/__pycache__/test_huber.cpython-310.pyc,,
+sklearn/linear_model/tests/__pycache__/test_least_angle.cpython-310.pyc,,
+sklearn/linear_model/tests/__pycache__/test_linear_loss.cpython-310.pyc,,
+sklearn/linear_model/tests/__pycache__/test_logistic.cpython-310.pyc,,
+sklearn/linear_model/tests/__pycache__/test_omp.cpython-310.pyc,,
+sklearn/linear_model/tests/__pycache__/test_passive_aggressive.cpython-310.pyc,,
+sklearn/linear_model/tests/__pycache__/test_perceptron.cpython-310.pyc,,
+sklearn/linear_model/tests/__pycache__/test_quantile.cpython-310.pyc,,
+sklearn/linear_model/tests/__pycache__/test_ransac.cpython-310.pyc,,
+sklearn/linear_model/tests/__pycache__/test_ridge.cpython-310.pyc,,
+sklearn/linear_model/tests/__pycache__/test_sag.cpython-310.pyc,,
+sklearn/linear_model/tests/__pycache__/test_sgd.cpython-310.pyc,,
+sklearn/linear_model/tests/__pycache__/test_sparse_coordinate_descent.cpython-310.pyc,,
+sklearn/linear_model/tests/__pycache__/test_theil_sen.cpython-310.pyc,,
+sklearn/linear_model/tests/test_base.py,sha256=2NhWnz2FjsOF411kl1ESnTJ7ZD3r8Fr9vcCHklK15I8,27229
+sklearn/linear_model/tests/test_bayes.py,sha256=Uc6AdYjx0-5yrylL1hng-iWdEMzxgoRCxAmmYrkApnw,11584
+sklearn/linear_model/tests/test_common.py,sha256=SY7Dd966eZh_skRPCn_hxTfRVFvQkx-NvT8gQINk8hg,4687
+sklearn/linear_model/tests/test_coordinate_descent.py,sha256=FATr-ebv3iSAJwIW_9OD-FT8pBNgJvC4SjcLw0Q6djk,56926
+sklearn/linear_model/tests/test_huber.py,sha256=ExSNx9Xbze4i63cPj7JzsDbPLe6sh8E6CIz4ExEAHtg,7598
+sklearn/linear_model/tests/test_least_angle.py,sha256=9Lk2lhvQpjHfCijgAMUqUj1oNXsCFA0dk1b6AmMtK8k,29553
+sklearn/linear_model/tests/test_linear_loss.py,sha256=cb5WAL0Im_kHbfKJwXWc1wevR5BICpsloWIdDKWbQ3w,12851
+sklearn/linear_model/tests/test_logistic.py,sha256=KM9CDPRWSPilAwQHcTsmZ8FufcyL0YQwI-WIOO1spjc,75541
+sklearn/linear_model/tests/test_omp.py,sha256=bNwa-VyUMMAIUZWaT7gjJJAqq2YaeJMXxQHp8a4Vlac,8913
+sklearn/linear_model/tests/test_passive_aggressive.py,sha256=oylJ8F5LNg0br4zX2LXZRU8FMUECnhsaVL0r8mxmd1Q,8994
+sklearn/linear_model/tests/test_perceptron.py,sha256=rsNfXmS37bAZeZ04kRNhc2PXr4WjjTWDaxW_gNmMCkI,2608
+sklearn/linear_model/tests/test_quantile.py,sha256=nRCu1mou5LF5L9PBQsredTuikzzNwaCMxEFpEVN56T0,11425
+sklearn/linear_model/tests/test_ransac.py,sha256=Nwf_kbnOOyEQvrsfJ9sWHRRxp7BCnupAiX1qbFmsT1k,16778
+sklearn/linear_model/tests/test_ridge.py,sha256=r6xw8diYWugeZmEnZi92UcesKImu5_7dzWkAq_eFFFw,70145
+sklearn/linear_model/tests/test_sag.py,sha256=50W_5pcpA12znM0FGSQOoZP0OycRu6EarHv1LJBK9Go,31398
+sklearn/linear_model/tests/test_sgd.py,sha256=3Nw4IrdNDkgzu6EAqg9oByvZr4lLez75OqcRvoGkoRo,70696
+sklearn/linear_model/tests/test_sparse_coordinate_descent.py,sha256=2_IRPgEBCa6eWM_vtHfVHqX8-LDN7pj027WSpFHjWys,12654
+sklearn/linear_model/tests/test_theil_sen.py,sha256=JzsRgQy-uFE4cscZeRkWoO8fnNMbs4WbWrmcsictlQI,9881
+sklearn/manifold/__init__.py,sha256=yvhOlk50TCT-OMkivl5v6FeUGmmAgnC-C8o1IF0okcU,533
+sklearn/manifold/__pycache__/__init__.cpython-310.pyc,,
+sklearn/manifold/__pycache__/_isomap.cpython-310.pyc,,
+sklearn/manifold/__pycache__/_locally_linear.cpython-310.pyc,,
+sklearn/manifold/__pycache__/_mds.cpython-310.pyc,,
+sklearn/manifold/__pycache__/_spectral_embedding.cpython-310.pyc,,
+sklearn/manifold/__pycache__/_t_sne.cpython-310.pyc,,
+sklearn/manifold/_barnes_hut_tsne.cpython-310-x86_64-linux-gnu.so,sha256=HbcOT8ngJhizKXTOpu8g2WTfwaUBAAgLnM0TT3cmA6s,249777
+sklearn/manifold/_isomap.py,sha256=ECrmC0y1lLVBLeHqorF0MypxTbMZY3IIK0mQDD-bHDM,15587
+sklearn/manifold/_locally_linear.py,sha256=9X65zCp6mms8QR-GNMjgbJUYl2ZDf9jfhlgHbzcC2kA,29408
+sklearn/manifold/_mds.py,sha256=BUrTzatb6oG-GG5J_Exf6aEsnhv_KoY5DacPU1kxBV8,23693
+sklearn/manifold/_spectral_embedding.py,sha256=_n6DsuOIuvCiZoeScRjfilf7dYBErqM6qU6-ZJUax6Q,29120
+sklearn/manifold/_t_sne.py,sha256=rBZ3HPZlyNMXWcBJrJO1w6wJ_Vha5KRWOz8W7OV2Kz0,43994
+sklearn/manifold/_utils.cpython-310-x86_64-linux-gnu.so,sha256=fnVMELLk-VXByqxRiWw6p9-37NhDNoU4I_R4PzFjPG0,224969
+sklearn/manifold/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/manifold/tests/__pycache__/__init__.cpython-310.pyc,,
+sklearn/manifold/tests/__pycache__/test_isomap.cpython-310.pyc,,
+sklearn/manifold/tests/__pycache__/test_locally_linear.cpython-310.pyc,,
+sklearn/manifold/tests/__pycache__/test_mds.cpython-310.pyc,,
+sklearn/manifold/tests/__pycache__/test_spectral_embedding.cpython-310.pyc,,
+sklearn/manifold/tests/__pycache__/test_t_sne.cpython-310.pyc,,
+sklearn/manifold/tests/test_isomap.py,sha256=Wl4voE-7ZRpK6YS6JKyEpAhccA0ReBlGyLNU-p4hQWc,12074
+sklearn/manifold/tests/test_locally_linear.py,sha256=yxsUuJ7vzm2VxiLi1fuZjzICS_0mXrwIicJHLC79eDM,5772
+sklearn/manifold/tests/test_mds.py,sha256=x9fZ7tRHUoq4cN7JeW80pZARF-vouj1w4fZRBvjsMKc,3043
+sklearn/manifold/tests/test_spectral_embedding.py,sha256=maxnLdSJmSqTwgOxmOQ4wLVQ47KZ5hZEgU2an20GQfo,19398
+sklearn/manifold/tests/test_t_sne.py,sha256=-wY0kWKltNWS_p2XqA9HRXNpn-nuu2X_DbCEypxEb6s,38871
+sklearn/metrics/__init__.py,sha256=JUVKI03c4fpjAdNWDUjmxkCSlF61Y8mBD64yQ9Q44xc,4554
+sklearn/metrics/__pycache__/__init__.cpython-310.pyc,,
+sklearn/metrics/__pycache__/_base.cpython-310.pyc,,
+sklearn/metrics/__pycache__/_classification.cpython-310.pyc,,
+sklearn/metrics/__pycache__/_ranking.cpython-310.pyc,,
+sklearn/metrics/__pycache__/_regression.cpython-310.pyc,,
+sklearn/metrics/__pycache__/_scorer.cpython-310.pyc,,
+sklearn/metrics/__pycache__/pairwise.cpython-310.pyc,,
+sklearn/metrics/_base.py,sha256=WjC_Z-C5TGadsgstffA5dd25jfNoqnBf4750ZzVMw8c,7292
+sklearn/metrics/_classification.py,sha256=kMgwCC5p-wfLynHxsRSLX0Dba7YoDugk54jPc6oHHds,121687
+sklearn/metrics/_dist_metrics.cpython-310-x86_64-linux-gnu.so,sha256=hU6F2nIORqrT0NixeGPK2jAH9FUjWTIOZ3VeRWvpgvE,720825
+sklearn/metrics/_dist_metrics.pxd,sha256=uyKN7HH9w1cim9EvV3VOenBva6an14EAeKik5I6CLC4,7481
+sklearn/metrics/_pairwise_distances_reduction/__init__.py,sha256=L5bCYy276KPl6CLELT4oXlRr1irwb5GUz5PdlgEAxow,5122
+sklearn/metrics/_pairwise_distances_reduction/__pycache__/__init__.cpython-310.pyc,,
+sklearn/metrics/_pairwise_distances_reduction/__pycache__/_dispatcher.cpython-310.pyc,,
+sklearn/metrics/_pairwise_distances_reduction/_argkmin.cpython-310-x86_64-linux-gnu.so,sha256=o9TUMftV1dLCQUmJe2EBlIXNix0PeyFTDMzhXhAysT0,377049
+sklearn/metrics/_pairwise_distances_reduction/_argkmin.pxd,sha256=IKvJiVHOjDUl--qruBhGyeasyY9zwfWwH--PXRmbXeg,1752
+sklearn/metrics/_pairwise_distances_reduction/_argkmin_classmode.cpython-310-x86_64-linux-gnu.so,sha256=MMAmifIhM62zM4mn_vdgsp0xskSAFxflR5fkMuMNh-E,282617
+sklearn/metrics/_pairwise_distances_reduction/_base.cpython-310-x86_64-linux-gnu.so,sha256=FyhO3-NvE4VKiAw6ngTTagjzVUH_S0duaTNxR4oR57U,344609
+sklearn/metrics/_pairwise_distances_reduction/_base.pxd,sha256=MihUlUL1_7nH_dN_j17OfY8aZ7aqU34YbCRGZHdxVhg,6996
+sklearn/metrics/_pairwise_distances_reduction/_classmode.pxd,sha256=DndeCKL21LyIGbp42nlWI9CKoyErDByZyQawUagL1XE,151
+sklearn/metrics/_pairwise_distances_reduction/_datasets_pair.cpython-310-x86_64-linux-gnu.so,sha256=GuNkOEcxSR1JiLXce2ajVjSc03At9OXuXC-p2UyBTLY,499553
+sklearn/metrics/_pairwise_distances_reduction/_datasets_pair.pxd,sha256=YIRh4JJd0PpYIaCmXkgIxadeS617pD9oaqaCRGDQ_wg,2911
+sklearn/metrics/_pairwise_distances_reduction/_dispatcher.py,sha256=qeMkh0ViX29TRnZkEEMkBXxi0G0vwTnVKyrhwzyE3rE,29726
+sklearn/metrics/_pairwise_distances_reduction/_middle_term_computer.cpython-310-x86_64-linux-gnu.so,sha256=ui_jc74TKc-D5bHuLLDZEXcR0o4jSEpxFb5nbk7lxOA,509657
+sklearn/metrics/_pairwise_distances_reduction/_middle_term_computer.pxd,sha256=aDdwzZrYwTCD1By1UZ81e13TerdXhGCHD6rdlo8Q17M,10110
+sklearn/metrics/_pairwise_distances_reduction/_radius_neighbors.cpython-310-x86_64-linux-gnu.so,sha256=JDOvMHxaIkR_D9Ep4jXObh1NFBLaEzVbMhwjuSxQEyg,392833
+sklearn/metrics/_pairwise_distances_reduction/_radius_neighbors.pxd,sha256=igbfvppcbA4SXjHS_aqMpK33JOCPgIMh3MFjvbKfCSk,5662
+sklearn/metrics/_pairwise_distances_reduction/_radius_neighbors_classmode.cpython-310-x86_64-linux-gnu.so,sha256=3Ya0WE39JlMZf2gBxc3nKMEI0LJ6ZhaI2aZnV5yBo1Q,307249
+sklearn/metrics/_pairwise_fast.cpython-310-x86_64-linux-gnu.so,sha256=7DXkxWTfHq2FKzP_2UbeA-tXGgtnB3sexm29UaaQ2uI,307209
+sklearn/metrics/_plot/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/metrics/_plot/__pycache__/__init__.cpython-310.pyc,,
+sklearn/metrics/_plot/__pycache__/confusion_matrix.cpython-310.pyc,,
+sklearn/metrics/_plot/__pycache__/det_curve.cpython-310.pyc,,
+sklearn/metrics/_plot/__pycache__/precision_recall_curve.cpython-310.pyc,,
+sklearn/metrics/_plot/__pycache__/regression.cpython-310.pyc,,
+sklearn/metrics/_plot/__pycache__/roc_curve.cpython-310.pyc,,
+sklearn/metrics/_plot/confusion_matrix.py,sha256=dL2DklWIB38whTFvYrT7RcZiebpbRRlanZm7CekXuaQ,16355
+sklearn/metrics/_plot/det_curve.py,sha256=twcA1JQzSTLE8D4Z68xY6m-qq8P9MlWvfkrosZC-J0s,10770
+sklearn/metrics/_plot/precision_recall_curve.py,sha256=XJnO4-s3fSMv0y5LQ9ln-Ct4ADebDJ5B-bACiKX8VDo,17688
+sklearn/metrics/_plot/regression.py,sha256=eXOZiBoDCBomDxd80N1oNkem1SrgjQHTLEpslCIESco,14346
+sklearn/metrics/_plot/roc_curve.py,sha256=o01tY9ci7B0YRGhia-ToGEuk54t0n0xYjvDetcntTeA,13545
+sklearn/metrics/_plot/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/metrics/_plot/tests/__pycache__/__init__.cpython-310.pyc,,
+sklearn/metrics/_plot/tests/__pycache__/test_common_curve_display.cpython-310.pyc,,
+sklearn/metrics/_plot/tests/__pycache__/test_confusion_matrix_display.cpython-310.pyc,,
+sklearn/metrics/_plot/tests/__pycache__/test_det_curve_display.cpython-310.pyc,,
+sklearn/metrics/_plot/tests/__pycache__/test_precision_recall_display.cpython-310.pyc,,
+sklearn/metrics/_plot/tests/__pycache__/test_predict_error_display.cpython-310.pyc,,
+sklearn/metrics/_plot/tests/__pycache__/test_roc_curve_display.cpython-310.pyc,,
+sklearn/metrics/_plot/tests/test_common_curve_display.py,sha256=A2r2-yd6syyfyamG1h_GOaL2W9S8wcv88LYfynHbw6M,8815
+sklearn/metrics/_plot/tests/test_confusion_matrix_display.py,sha256=OpUPXurw7DNM9kpp1ATus2RXjxWD7iHTAsqHo8t0JKc,13705
+sklearn/metrics/_plot/tests/test_det_curve_display.py,sha256=uNXmzZ3pIWvnzDiPHBVB0jmuLRsRrO3eK7P-btuyJvQ,3426
+sklearn/metrics/_plot/tests/test_precision_recall_display.py,sha256=TdDMh3ws9RjvLZlKGjkG-TzDRFTc-WNyRL7XGiIrSEE,13100
+sklearn/metrics/_plot/tests/test_predict_error_display.py,sha256=N2GkMVTVXT2miRmNJXTVqnD6JOJu76Om3p4qqBBpr_Y,5786
+sklearn/metrics/_plot/tests/test_roc_curve_display.py,sha256=zGCZrlRPWui4P159bMaVJUISQ4k8l_96vwx7Pw96XAQ,10135
+sklearn/metrics/_ranking.py,sha256=OdDOWILlnxCwnlV7Mgvc5I6vmr60mCMW5wHUvwSk_Jg,75994
+sklearn/metrics/_regression.py,sha256=AP-bMHBlv3Wvd6ED4DMKviUOeRjuFGlVBVH_rRvdgp8,61720
+sklearn/metrics/_scorer.py,sha256=gnfOJ_op78C5jj-N7bUwRHAUJ2j5jvazjXuX9ieIbh4,33389
+sklearn/metrics/cluster/__init__.py,sha256=6pZddbfb9ZXYQyX64s6Jjgv_m_4c9RUcIAHRM7VQcj0,1396
+sklearn/metrics/cluster/__pycache__/__init__.cpython-310.pyc,,
+sklearn/metrics/cluster/__pycache__/_bicluster.cpython-310.pyc,,
+sklearn/metrics/cluster/__pycache__/_supervised.cpython-310.pyc,,
+sklearn/metrics/cluster/__pycache__/_unsupervised.cpython-310.pyc,,
+sklearn/metrics/cluster/_bicluster.py,sha256=jTfT3BsLJgAHGiQXrYWAwY0kSczOCdgkA58Ob9Q1vus,3378
+sklearn/metrics/cluster/_expected_mutual_info_fast.cpython-310-x86_64-linux-gnu.so,sha256=eBAEpEW0R3wamiWAC0Y1Itnga8tBtyTQulsIDv2GOx0,249617
+sklearn/metrics/cluster/_supervised.py,sha256=icnAgeCbQVZwMwQBe23QpF6MaMZ1lSZR6cLqJ8dGrXQ,44498
+sklearn/metrics/cluster/_unsupervised.py,sha256=7ZGogwkpWOCQMm6JqyWfF6b6SV5hStsO_IXPC4Oil3A,17063
+sklearn/metrics/cluster/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/metrics/cluster/tests/__pycache__/__init__.cpython-310.pyc,,
+sklearn/metrics/cluster/tests/__pycache__/test_bicluster.cpython-310.pyc,,
+sklearn/metrics/cluster/tests/__pycache__/test_common.cpython-310.pyc,,
+sklearn/metrics/cluster/tests/__pycache__/test_supervised.cpython-310.pyc,,
+sklearn/metrics/cluster/tests/__pycache__/test_unsupervised.cpython-310.pyc,,
+sklearn/metrics/cluster/tests/test_bicluster.py,sha256=KecSxviHfRfUMNVZ0g77Ykx96QAuKoax0YUY8paQjFg,1719
+sklearn/metrics/cluster/tests/test_common.py,sha256=OMkbcRz79VNnWsb2EDctzuD5jWHDpbPHT77jyDc_zWg,7755
+sklearn/metrics/cluster/tests/test_supervised.py,sha256=RROPHFeKWDeEtzuPsIn-CP8_i10m2f9txI0PR9_V8hE,17873
+sklearn/metrics/cluster/tests/test_unsupervised.py,sha256=eAic9M_89S8Xbk1hEX0xyIeBW2GrAwPOTpNuNob3TaU,12269
+sklearn/metrics/pairwise.py,sha256=4geFniY4-JPM5GWl1XPFKqaNQmMS6YY2khZe98nBvqo,85973
+sklearn/metrics/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/metrics/tests/__pycache__/__init__.cpython-310.pyc,,
+sklearn/metrics/tests/__pycache__/test_classification.cpython-310.pyc,,
+sklearn/metrics/tests/__pycache__/test_common.cpython-310.pyc,,
+sklearn/metrics/tests/__pycache__/test_dist_metrics.cpython-310.pyc,,
+sklearn/metrics/tests/__pycache__/test_pairwise.cpython-310.pyc,,
+sklearn/metrics/tests/__pycache__/test_pairwise_distances_reduction.cpython-310.pyc,,
+sklearn/metrics/tests/__pycache__/test_ranking.cpython-310.pyc,,
+sklearn/metrics/tests/__pycache__/test_regression.cpython-310.pyc,,
+sklearn/metrics/tests/__pycache__/test_score_objects.cpython-310.pyc,,
+sklearn/metrics/tests/test_classification.py,sha256=BhDlDMA3xkk7M4pr3gq3OeazCOQGv5I0DYNahHl5mcg,101469
+sklearn/metrics/tests/test_common.py,sha256=v0hzqsDVldMoegkFPUmuz5IQX1s72ZX4kxQCi4Lhra4,60017
+sklearn/metrics/tests/test_dist_metrics.py,sha256=W-yVTM8LQ1C-VYgLu450dDy3mYvgvqjoRX476-bM0E0,14802
+sklearn/metrics/tests/test_pairwise.py,sha256=G7e4tg-YJCP09-FiWxx0CfpdudhO2SvhdL_bXiso_W4,56671
+sklearn/metrics/tests/test_pairwise_distances_reduction.py,sha256=-hMjyDA0OHHyfjYQLEk2EkwlRnCaB61iZgrF5cpHCDI,53017
+sklearn/metrics/tests/test_ranking.py,sha256=5bkKeiU6_sqFN-tfztL6nUYJUH1bTd1YVLxlCdUnsSY,82385
+sklearn/metrics/tests/test_regression.py,sha256=03afzfelew-lWexi9kwfGI8piVd2pefS-xcJAUbtav8,27231
+sklearn/metrics/tests/test_score_objects.py,sha256=nFN_XqApK5NFN05gZZOVRjo9h8ZSiRYCahaUTAmnOc8,53184
+sklearn/mixture/__init__.py,sha256=1c5Ss-UEnXZNlyEKN11jPw5QzPYNxn6n1YY3VtzWyXA,243
+sklearn/mixture/__pycache__/__init__.cpython-310.pyc,,
+sklearn/mixture/__pycache__/_base.cpython-310.pyc,,
+sklearn/mixture/__pycache__/_bayesian_mixture.cpython-310.pyc,,
+sklearn/mixture/__pycache__/_gaussian_mixture.cpython-310.pyc,,
+sklearn/mixture/_base.py,sha256=mI0zB_yhFCaog5XQqOKACQy0GztM-MhlSmXLYb0PK4g,18718
+sklearn/mixture/_bayesian_mixture.py,sha256=9_bWvxgoUrW6PJCM3CMNh4H27_a2ycMZVUGGD5-4LVU,33467
+sklearn/mixture/_gaussian_mixture.py,sha256=Xn4Oavq0PZOTJNX1bw32IMSTNmj_jewxNxTLiYV6jQs,31659
+sklearn/mixture/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/mixture/tests/__pycache__/__init__.cpython-310.pyc,,
+sklearn/mixture/tests/__pycache__/test_bayesian_mixture.cpython-310.pyc,,
+sklearn/mixture/tests/__pycache__/test_gaussian_mixture.cpython-310.pyc,,
+sklearn/mixture/tests/__pycache__/test_mixture.cpython-310.pyc,,
+sklearn/mixture/tests/test_bayesian_mixture.py,sha256=rNDmWnACHub_f4CGl5GeRPt-l3MqhGcREMtZFszIgXk,17110
+sklearn/mixture/tests/test_gaussian_mixture.py,sha256=LBWfWSpeWBB0_nr3AiQwM0isQhv7Z32-655AsCstFHg,47721
+sklearn/mixture/tests/test_mixture.py,sha256=8TOVUJp9u9bi73KU2GaYhxPrB_s3U5vD0jLtIXDiAbM,992
+sklearn/model_selection/__init__.py,sha256=CUpYGOYvE8nZoMR6gdqc3rB3wsrOqRYIrg5-gouoOzI,2316
+sklearn/model_selection/__pycache__/__init__.cpython-310.pyc,,
+sklearn/model_selection/__pycache__/_plot.cpython-310.pyc,,
+sklearn/model_selection/__pycache__/_search.cpython-310.pyc,,
+sklearn/model_selection/__pycache__/_search_successive_halving.cpython-310.pyc,,
+sklearn/model_selection/__pycache__/_split.cpython-310.pyc,,
+sklearn/model_selection/__pycache__/_validation.cpython-310.pyc,,
+sklearn/model_selection/_plot.py,sha256=h68rAhmsYQ7ubnxEtYpPA5WPxqy98kqxU-vcu615j5E,35291
+sklearn/model_selection/_search.py,sha256=M8HbvD99GlmA70fEnSe5m0x1jUkYnbDM-MavFLRAXTk,75511
+sklearn/model_selection/_search_successive_halving.py,sha256=4LzlzkYXpMVDscX-6OqiLC_auZ8nQq8y8mQBuDBxPaI,43900
+sklearn/model_selection/_split.py,sha256=lCjgz-MketLeqSW1djkS8KFpBS6AZQvmNEfSQyUM-Lk,99167
+sklearn/model_selection/_validation.py,sha256=f-bKWlKHHuaqmsLTfcgpzZAxL8lHNH5Xnsbox9uZOgA,88502
+sklearn/model_selection/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/model_selection/tests/__pycache__/__init__.cpython-310.pyc,,
+sklearn/model_selection/tests/__pycache__/common.cpython-310.pyc,,
+sklearn/model_selection/tests/__pycache__/test_plot.cpython-310.pyc,,
+sklearn/model_selection/tests/__pycache__/test_search.cpython-310.pyc,,
+sklearn/model_selection/tests/__pycache__/test_split.cpython-310.pyc,,
+sklearn/model_selection/tests/__pycache__/test_successive_halving.cpython-310.pyc,,
+sklearn/model_selection/tests/__pycache__/test_validation.cpython-310.pyc,,
+sklearn/model_selection/tests/common.py,sha256=PrR7WoVcn4MdG4DPrOvuZ1jrOIZPFPok20zannr4dwI,641
+sklearn/model_selection/tests/test_plot.py,sha256=SHEVhEgg8Ar4Aic-uLQxxHRpitKdkF50SxyosZXWeWY,19330
+sklearn/model_selection/tests/test_search.py,sha256=b2vco1mttHy5xlkZf6C2-oy0g8EKjN1zrA_DceZw8dc,84676
+sklearn/model_selection/tests/test_split.py,sha256=89bSigW200izAmnePEfGuOEf9kcEyuij0PG4SJ-jaX8,71585
+sklearn/model_selection/tests/test_successive_halving.py,sha256=XRpwkf-GLoqKyS7BGpPXJgiU4Hu9K4OOymMxRd2R3bM,28876
+sklearn/model_selection/tests/test_validation.py,sha256=SCutRhi7WgozJ2HiCZL_KaEBkFZZ4BP2s-mUBK1tpXQ,88874
+sklearn/multiclass.py,sha256=kDl_rhoa6x2KEK0PYyysQQHcmel4mOV2nFezGUG-u5c,43819
+sklearn/multioutput.py,sha256=eNg00fTjQJ52KrYQLe5szjZyP6xyHCE43r2n9tBuerw,40821
+sklearn/naive_bayes.py,sha256=hzSLegXMPpspWqxOagemcpIeTiTv9j4-F7trAqei29s,55670
+sklearn/neighbors/__init__.py,sha256=oYtShbfAlQmHKtukZN1cB4mANgk0XH5Je6LcBcc3KWU,1219
+sklearn/neighbors/__pycache__/__init__.cpython-310.pyc,,
+sklearn/neighbors/__pycache__/_base.cpython-310.pyc,,
+sklearn/neighbors/__pycache__/_classification.cpython-310.pyc,,
+sklearn/neighbors/__pycache__/_graph.cpython-310.pyc,,
+sklearn/neighbors/__pycache__/_kde.cpython-310.pyc,,
+sklearn/neighbors/__pycache__/_lof.cpython-310.pyc,,
+sklearn/neighbors/__pycache__/_nca.cpython-310.pyc,,
+sklearn/neighbors/__pycache__/_nearest_centroid.cpython-310.pyc,,
+sklearn/neighbors/__pycache__/_regression.cpython-310.pyc,,
+sklearn/neighbors/__pycache__/_unsupervised.cpython-310.pyc,,
+sklearn/neighbors/_ball_tree.cpython-310-x86_64-linux-gnu.so,sha256=zr7PTp2pnXfCifgefJdUF8fOKQTMqhpGN-eEi8axna0,774265
+sklearn/neighbors/_base.py,sha256=heTIm4F5hLlWdS0K9YumnBn0_UAytJgoWXweYxaBtiY,51658
+sklearn/neighbors/_classification.py,sha256=KTg3Zaw3iBtrUJw1MqrCXzppVDen3NC3QfJbur7TKjk,31731
+sklearn/neighbors/_graph.py,sha256=8fxtFbnUAJK5a-KpvZ4L-JjrhwcF4xT-BsoevmvJybU,25037
+sklearn/neighbors/_kd_tree.cpython-310-x86_64-linux-gnu.so,sha256=uLTbqpqXGyV6mW_kPznddB479YWg2AedIGdbOUcGGxg,774265
+sklearn/neighbors/_kde.py,sha256=Q33slUSLFOS0tX6sbpC1oLplc56xPETeJzE7vmWYTzQ,12461
+sklearn/neighbors/_lof.py,sha256=DiDUtX_2AD7N44vaU4Az7aMN_ZmgHe0LLnolm7-NjZQ,19708
+sklearn/neighbors/_nca.py,sha256=GUF6lhQh5Tl8xr725oxHOSeOGlVAg8ZRVST-T0U4CdI,19586
+sklearn/neighbors/_nearest_centroid.py,sha256=sZhThggQZbynbh7YwbyE98SzK6wQAgnwy3SD1yjyMDM,9645
+sklearn/neighbors/_partition_nodes.cpython-310-x86_64-linux-gnu.so,sha256=wnts5K9J-5wKXjrbxN6tbjYuc4eGv44fIx8tUiHjJP8,45289
+sklearn/neighbors/_partition_nodes.pxd,sha256=rngZZqkJWPnBW8BRvk0FgM817-lcHgCoBWEd91X0Dbc,288
+sklearn/neighbors/_quad_tree.cpython-310-x86_64-linux-gnu.so,sha256=jSTqsOd4dfQA3LhsaGWzLkbHZrZR0q3Cn4_pOLZxGeo,315401
+sklearn/neighbors/_quad_tree.pxd,sha256=G4ohAXB3bNYtGCQR-2U-hiGYk4yD6iL9OcpXqE8Xxms,4259
+sklearn/neighbors/_regression.py,sha256=NTRA-FqsIXCGkamHQozTImdsxIZ9YeNx1fPwXrqKeBY,18123
+sklearn/neighbors/_unsupervised.py,sha256=-cZCICevUiAmYm8mcI5ysPjxHQLQ4slcZKQGhEqj2BU,6179
+sklearn/neighbors/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/neighbors/tests/__pycache__/__init__.cpython-310.pyc,,
+sklearn/neighbors/tests/__pycache__/test_ball_tree.cpython-310.pyc,,
+sklearn/neighbors/tests/__pycache__/test_graph.cpython-310.pyc,,
+sklearn/neighbors/tests/__pycache__/test_kd_tree.cpython-310.pyc,,
+sklearn/neighbors/tests/__pycache__/test_kde.cpython-310.pyc,,
+sklearn/neighbors/tests/__pycache__/test_lof.cpython-310.pyc,,
+sklearn/neighbors/tests/__pycache__/test_nca.cpython-310.pyc,,
+sklearn/neighbors/tests/__pycache__/test_nearest_centroid.cpython-310.pyc,,
+sklearn/neighbors/tests/__pycache__/test_neighbors.cpython-310.pyc,,
+sklearn/neighbors/tests/__pycache__/test_neighbors_pipeline.cpython-310.pyc,,
+sklearn/neighbors/tests/__pycache__/test_neighbors_tree.cpython-310.pyc,,
+sklearn/neighbors/tests/__pycache__/test_quad_tree.cpython-310.pyc,,
+sklearn/neighbors/tests/test_ball_tree.py,sha256=hpoJiFpMrGxw0FEhd-KghO8zWtonaqSr1JgbKB7sdN0,7097
+sklearn/neighbors/tests/test_graph.py,sha256=QdJvyK2N138biDPhixx_Z9xbJ7R-aSxz5mhSSvh-HRg,3547
+sklearn/neighbors/tests/test_kd_tree.py,sha256=4cE2XJO0umuWnWPQluOMR9jfeJKDXmFETowsLElwKCI,3898
+sklearn/neighbors/tests/test_kde.py,sha256=kEZsv-8U0oWrkAVuzRidsqL5w1jQZ2b7tK9pFZYnm44,9745
+sklearn/neighbors/tests/test_lof.py,sha256=8ZOHkxKArqaVUHDn9yspJV3y-MzRYYTm-CykXxLx2yQ,12899
+sklearn/neighbors/tests/test_nca.py,sha256=dzdlTxsEUc7RBeVh12BCWlAcJRjCkfIQxOTh16kxkek,19052
+sklearn/neighbors/tests/test_nearest_centroid.py,sha256=uevLVoTjrbYAzKCokCjNnE2lTbzdOH67kgxBGBf7wio,5672
+sklearn/neighbors/tests/test_neighbors.py,sha256=E-sVzwTuE4ZGTMpLI2Mif0W9VsbV94lsjLO9scClQKs,81900
+sklearn/neighbors/tests/test_neighbors_pipeline.py,sha256=CZRj-Kvkeib7ljclLMW05ILcpk3jcgjmqXHaU4PWCE0,8137
+sklearn/neighbors/tests/test_neighbors_tree.py,sha256=8PMvvvHE8LkYT9oKeRvYvsi97HhmG1-_pTeFcTslpOM,9281
+sklearn/neighbors/tests/test_quad_tree.py,sha256=y_WE4jNxliYos_SiICl_miGIya2IJlu71rXzwvQw2qk,4856
+sklearn/neural_network/__init__.py,sha256=xEslFJWTVDhzQzsuBV2VCvfmSxUnxYkcuWu6SrlO-rU,273
+sklearn/neural_network/__pycache__/__init__.cpython-310.pyc,,
+sklearn/neural_network/__pycache__/_base.cpython-310.pyc,,
+sklearn/neural_network/__pycache__/_multilayer_perceptron.cpython-310.pyc,,
+sklearn/neural_network/__pycache__/_rbm.cpython-310.pyc,,
+sklearn/neural_network/__pycache__/_stochastic_optimizers.cpython-310.pyc,,
+sklearn/neural_network/_base.py,sha256=NfmnHxbXeY7PAtWwpcFxqQYGCpJyaStHVug44WGX0WE,6329
+sklearn/neural_network/_multilayer_perceptron.py,sha256=h0ShX9OEVERF5bq8kWBNNYOe4-ImA0aHjm0-G02ZNqo,60524
+sklearn/neural_network/_rbm.py,sha256=svNo2BJCW7Nz4jV44hyYGSH1xQfcGCJRv0J0OYFO07E,15121
+sklearn/neural_network/_stochastic_optimizers.py,sha256=ZJSXQyJzouUJjj0X2CK463EgI4wpQYtrrMptkCye-2c,8823
+sklearn/neural_network/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/neural_network/tests/__pycache__/__init__.cpython-310.pyc,,
+sklearn/neural_network/tests/__pycache__/test_base.cpython-310.pyc,,
+sklearn/neural_network/tests/__pycache__/test_mlp.cpython-310.pyc,,
+sklearn/neural_network/tests/__pycache__/test_rbm.cpython-310.pyc,,
+sklearn/neural_network/tests/__pycache__/test_stochastic_optimizers.cpython-310.pyc,,
+sklearn/neural_network/tests/test_base.py,sha256=YwqN20qHbC_5ZsPRDJyducMZfu-D3V5b1zmpj0C3au8,796
+sklearn/neural_network/tests/test_mlp.py,sha256=VCtmb1cVy9nJrtFLdwQfYFRVz94vvHGyPSR8Hf6-5Ek,31862
+sklearn/neural_network/tests/test_rbm.py,sha256=Ucezw6y1X0HU9PEC9lniKrqXplVXjfX5yjWueHIPPkg,8048
+sklearn/neural_network/tests/test_stochastic_optimizers.py,sha256=9JhAPo1Qc0sA735qPORoKtS04bCTts9lQ65P9Qlhtyo,4137
+sklearn/pipeline.py,sha256=s0uOf3NQ1ZArYI3nwZ1DWejNLo55nBhkBl8nPU9aEhA,66341
+sklearn/preprocessing/__init__.py,sha256=FVVSh0K0CzwHL_zgm_cfrFrmTfEJ7dwh24RzDYyHwyo,1460
+sklearn/preprocessing/__pycache__/__init__.cpython-310.pyc,,
+sklearn/preprocessing/__pycache__/_data.cpython-310.pyc,,
+sklearn/preprocessing/__pycache__/_discretization.cpython-310.pyc,,
+sklearn/preprocessing/__pycache__/_encoders.cpython-310.pyc,,
+sklearn/preprocessing/__pycache__/_function_transformer.cpython-310.pyc,,
+sklearn/preprocessing/__pycache__/_label.cpython-310.pyc,,
+sklearn/preprocessing/__pycache__/_polynomial.cpython-310.pyc,,
+sklearn/preprocessing/__pycache__/_target_encoder.cpython-310.pyc,,
+sklearn/preprocessing/_csr_polynomial_expansion.cpython-310-x86_64-linux-gnu.so,sha256=qLpJ53RN2aePBI1-H7IOczWKmEkoaKoSA1Dxyko4pFM,491585
+sklearn/preprocessing/_data.py,sha256=pkUi4XZuy_l7d9_NPMEhyEbht9ZGIwcvac8a6PwQGhM,125337
+sklearn/preprocessing/_discretization.py,sha256=CcQMmW4Cc4vL5OnhrBAcs8604IbirUISkUP9eQNDjQI,17376
+sklearn/preprocessing/_encoders.py,sha256=xUPLS-jhshgHPgeGQteIgMOwEoDRwwoemK9fi9FwT80,67754
+sklearn/preprocessing/_function_transformer.py,sha256=JR9vVxGCHFL17DlH1zM9StrCnUR84JplmpopLlsZUTY,16633
+sklearn/preprocessing/_label.py,sha256=Oud8OIGsfp4tuIcm-67rGgP9CxUomQOoG50EAsesICc,30800
+sklearn/preprocessing/_polynomial.py,sha256=EyM35Uk7krG65JXm1-jwBvFmS7Rfu-FLX1WYfokK_LY,47444
+sklearn/preprocessing/_target_encoder.py,sha256=Vr29eER6I3L-g4fsVyX4P_PKHs0R_qLFDfGfUbfg6vY,20476
+sklearn/preprocessing/_target_encoder_fast.cpython-310-x86_64-linux-gnu.so,sha256=NxImRT6vYf08qn6Ech4UC8OgRj0yzhRBn7O8hz2TBVM,569905
+sklearn/preprocessing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/preprocessing/tests/__pycache__/__init__.cpython-310.pyc,,
+sklearn/preprocessing/tests/__pycache__/test_common.cpython-310.pyc,,
+sklearn/preprocessing/tests/__pycache__/test_data.cpython-310.pyc,,
+sklearn/preprocessing/tests/__pycache__/test_discretization.cpython-310.pyc,,
+sklearn/preprocessing/tests/__pycache__/test_encoders.cpython-310.pyc,,
+sklearn/preprocessing/tests/__pycache__/test_function_transformer.cpython-310.pyc,,
+sklearn/preprocessing/tests/__pycache__/test_label.cpython-310.pyc,,
+sklearn/preprocessing/tests/__pycache__/test_polynomial.cpython-310.pyc,,
+sklearn/preprocessing/tests/__pycache__/test_target_encoder.cpython-310.pyc,,
+sklearn/preprocessing/tests/test_common.py,sha256=1gLqwBEMTpCJOMsftAwACox0d8wbqfB9z-JtRLlx9NM,6793
+sklearn/preprocessing/tests/test_data.py,sha256=8FwhXLqwlBtvmdowUH1ta-oBR_9rzTH5TlLxUELon58,94473
+sklearn/preprocessing/tests/test_discretization.py,sha256=DONU9R99_U13m8X7ov0WhFXVaj0C0a0Fu3xDQBqioKo,18054
+sklearn/preprocessing/tests/test_encoders.py,sha256=Jiz_L7a9thKlffgFi97evLvSuzsjBKytvmrxgQrdt-8,78684
+sklearn/preprocessing/tests/test_function_transformer.py,sha256=vo9Fei9PXJ3a9zqYbqwbFkwPpmlpffxNUOJ0Pgt3VhA,19827
+sklearn/preprocessing/tests/test_label.py,sha256=ufVtUsrYCn30ovYRnokrZN7F1_I2CgP7E53J9xz0Q3w,23634
+sklearn/preprocessing/tests/test_polynomial.py,sha256=Irt2g5oMUJQkz9Px5MPxa6yAh77IK15XbuwF-Yui74c,42411
+sklearn/preprocessing/tests/test_target_encoder.py,sha256=Q-KoQ6CvyMbgqI9DsPJ7p06QjfANqFsgibwtgyad5L0,27915
+sklearn/random_projection.py,sha256=GgJM_r7GffPW12bMjsWG0AUFlXNSH6lhgLtD3Hm_oq4,28095
+sklearn/semi_supervised/__init__.py,sha256=7JKLmXpZsl1U-4PY8V9IwqjIGWxvngEQWaEqMokg1Rg,448
+sklearn/semi_supervised/__pycache__/__init__.cpython-310.pyc,,
+sklearn/semi_supervised/__pycache__/_label_propagation.cpython-310.pyc,,
+sklearn/semi_supervised/__pycache__/_self_training.cpython-310.pyc,,
+sklearn/semi_supervised/_label_propagation.py,sha256=_yfmtGcm_9KYy6W8CQpaOEc-CEKWbNTdFwCR_GRtUUc,21294
+sklearn/semi_supervised/_self_training.py,sha256=FiXc9mfrFh33C71N4enyTVxg4il7PHfCcU2lwS2uMhk,14342
+sklearn/semi_supervised/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/semi_supervised/tests/__pycache__/__init__.cpython-310.pyc,,
+sklearn/semi_supervised/tests/__pycache__/test_label_propagation.cpython-310.pyc,,
+sklearn/semi_supervised/tests/__pycache__/test_self_training.cpython-310.pyc,,
+sklearn/semi_supervised/tests/test_label_propagation.py,sha256=R-Lkmzc5k5JFpcSUi0_EbUyTUBNTw4HrGAeOALICiK8,8803
+sklearn/semi_supervised/tests/test_self_training.py,sha256=QrLkQD4b2wLwZ2uvVk0AOGFwCaYobirgPUqBMZMmKgU,12543
+sklearn/svm/__init__.py,sha256=kxgE5R-_9xxQTPiwVXfSTkaWdaH2lfWzooXqpDzXZNQ,636
+sklearn/svm/__pycache__/__init__.cpython-310.pyc,,
+sklearn/svm/__pycache__/_base.cpython-310.pyc,,
+sklearn/svm/__pycache__/_bounds.cpython-310.pyc,,
+sklearn/svm/__pycache__/_classes.cpython-310.pyc,,
+sklearn/svm/_base.py,sha256=xtK0rgQe2XNBJfGRetK2nzPIRXUJdlH8gEh_b6trlGU,42474
+sklearn/svm/_bounds.py,sha256=8N53Wjx_F76pHCOP1EfTm8GRjjt9U-onTvb7IHmgFCs,3251
+sklearn/svm/_classes.py,sha256=rzmphrEcRUindolDnLgiClHonEYGTO57uV284sFYZ8E,66940
+sklearn/svm/_liblinear.cpython-310-x86_64-linux-gnu.so,sha256=3FM1IV0qEcOnEfw3VYgXBaiHD5r4Osm2GQH43dQy-4E,543113
+sklearn/svm/_libsvm.cpython-310-x86_64-linux-gnu.so,sha256=hyU5JuzpTMfH18KOGK6oXWblsVzyLgI-b_4UxMQOmBE,968673
+sklearn/svm/_libsvm_sparse.cpython-310-x86_64-linux-gnu.so,sha256=qH-Lgg4p0Cy1C55lxhwpCE4kZj_6txkU1i7mtIeJFCY,927689
+sklearn/svm/_newrand.cpython-310-x86_64-linux-gnu.so,sha256=srKE3DH6gtMwcQFLnHgWG05UPC2xaQCDpPVBxs-T7DE,68241
+sklearn/svm/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/svm/tests/__pycache__/__init__.cpython-310.pyc,,
+sklearn/svm/tests/__pycache__/test_bounds.cpython-310.pyc,,
+sklearn/svm/tests/__pycache__/test_sparse.cpython-310.pyc,,
+sklearn/svm/tests/__pycache__/test_svm.cpython-310.pyc,,
+sklearn/svm/tests/test_bounds.py,sha256=PcJwSEhh_ChQfEASag3RU92Q_ASyNDeAYcQmlSmNrOI,5232
+sklearn/svm/tests/test_sparse.py,sha256=EPW5UE4LKBfN-Xh39vC55784Jb38W7466CWwfPUZJL8,15697
+sklearn/svm/tests/test_svm.py,sha256=0LyLMzJDto7zy5B_i4e4SXPUofzujLugkuokyfauZBo,49059
+sklearn/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/tests/__pycache__/__init__.cpython-310.pyc,,
+sklearn/tests/__pycache__/metadata_routing_common.cpython-310.pyc,,
+sklearn/tests/__pycache__/random_seed.cpython-310.pyc,,
+sklearn/tests/__pycache__/test_base.cpython-310.pyc,,
+sklearn/tests/__pycache__/test_build.cpython-310.pyc,,
+sklearn/tests/__pycache__/test_calibration.cpython-310.pyc,,
+sklearn/tests/__pycache__/test_check_build.cpython-310.pyc,,
+sklearn/tests/__pycache__/test_common.cpython-310.pyc,,
+sklearn/tests/__pycache__/test_config.cpython-310.pyc,,
+sklearn/tests/__pycache__/test_discriminant_analysis.cpython-310.pyc,,
+sklearn/tests/__pycache__/test_docstring_parameters.cpython-310.pyc,,
+sklearn/tests/__pycache__/test_docstrings.cpython-310.pyc,,
+sklearn/tests/__pycache__/test_dummy.cpython-310.pyc,,
+sklearn/tests/__pycache__/test_init.cpython-310.pyc,,
+sklearn/tests/__pycache__/test_isotonic.cpython-310.pyc,,
+sklearn/tests/__pycache__/test_kernel_approximation.cpython-310.pyc,,
+sklearn/tests/__pycache__/test_kernel_ridge.cpython-310.pyc,,
+sklearn/tests/__pycache__/test_metadata_routing.cpython-310.pyc,,
+sklearn/tests/__pycache__/test_metaestimators.cpython-310.pyc,,
+sklearn/tests/__pycache__/test_metaestimators_metadata_routing.cpython-310.pyc,,
+sklearn/tests/__pycache__/test_min_dependencies_readme.cpython-310.pyc,,
+sklearn/tests/__pycache__/test_multiclass.cpython-310.pyc,,
+sklearn/tests/__pycache__/test_multioutput.cpython-310.pyc,,
+sklearn/tests/__pycache__/test_naive_bayes.cpython-310.pyc,,
+sklearn/tests/__pycache__/test_pipeline.cpython-310.pyc,,
+sklearn/tests/__pycache__/test_public_functions.cpython-310.pyc,,
+sklearn/tests/__pycache__/test_random_projection.cpython-310.pyc,,
+sklearn/tests/metadata_routing_common.py,sha256=zNcUP_zepvLMZWqAztIOnt7qy1zehlISMV5bui21xk0,15568
+sklearn/tests/random_seed.py,sha256=QfxcXUSQsP1MlJYJmBaNJX8UrdnIBZmR0EMIqDISMqM,3311
+sklearn/tests/test_base.py,sha256=CoGYp8RW9JlWKtUxlVkryv5dAHxHiVJBc49LSCmu9Kw,28614
+sklearn/tests/test_build.py,sha256=xvLlCpUSgJQzAFcPp4bo8je9a5gcvN4PIIXmTo19G1s,1167
+sklearn/tests/test_calibration.py,sha256=cF8PhN5kY9Hzv1mB3_cjxyBP9Ahfl_bUaGEzM6BfDS4,40422
+sklearn/tests/test_check_build.py,sha256=bHvDtqeNsptzCqNMSqfWa00eg1CqCubL8KqvxBbXm84,267
+sklearn/tests/test_common.py,sha256=yCkbySLNEM3KjDfU2qKf-Z50cYIHbYvOdmr7eeIstR0,19553
+sklearn/tests/test_config.py,sha256=eQNukGqspjvxLNZZReyoqCGBrqhQDwV1fh3mpbOKWzA,6807
+sklearn/tests/test_discriminant_analysis.py,sha256=L6_53Mk0Vuhv9zV_BQ87DdC_pyKEsSczr3tU_sfR1p8,23194
+sklearn/tests/test_docstring_parameters.py,sha256=wY29rvWXTAQe0csBPYGOYfKy8H_124KEBst7wQ_Z4_A,11813
+sklearn/tests/test_docstrings.py,sha256=s6EDWnfj4P9K826oMAQlOHYI5L8s1Dqg3WGAYUbdsoM,6841
+sklearn/tests/test_dummy.py,sha256=pzBeUN_-sqLVcY2jpyDkQA6oh8IIOmlxI9-587NCKjs,21319
+sklearn/tests/test_init.py,sha256=sK3-WZX96Zphoh3SGS6sxs0UxhjFzlXmXVK-mQNl2KU,470
+sklearn/tests/test_isotonic.py,sha256=MHOW5pF9-Gc9TT7Ew1GdKmaw3_gCc3J0iCxXaDu1qHk,22169
+sklearn/tests/test_kernel_approximation.py,sha256=m_Dchs6_fyZ7vGw68LIAGD-Mz0bPjX-EP79dHKuenx8,16878
+sklearn/tests/test_kernel_ridge.py,sha256=qkwUUjuY5O1uMiXi9gAS-wXOCHa62F5T7VJnNdZdGOE,2888
+sklearn/tests/test_metadata_routing.py,sha256=8Nof-JOSlyekPE_cwO_Snoh0_cS8HjaebhuWylDF96E,34758
+sklearn/tests/test_metaestimators.py,sha256=ok5sztXRlL5XQNgfj6tYUe09VNb7mNjqCfPVsdr9J4k,10298
+sklearn/tests/test_metaestimators_metadata_routing.py,sha256=aOr9knXDL7kQDfWqEhTHWzyjFkwfSaEBG7og6drXW90,22521
+sklearn/tests/test_min_dependencies_readme.py,sha256=tCsOzkLFf4ObXqLR6qvg9sCjT6uV2qaGoUwfqdqgxM8,3222
+sklearn/tests/test_multiclass.py,sha256=bwHLep1y4CadAjpnvBpXjARqOrTHiN_IEUb8Tdl13Uo,33480
+sklearn/tests/test_multioutput.py,sha256=hQmcZ6vLb3iHyDEysWY_LyNtzCC9gSueu24JBB0L3ZQ,29175
+sklearn/tests/test_naive_bayes.py,sha256=ZKqQGY0kqh_GHN48RAC6e7lcaQ_anFHTI3p50ni4-_s,35027
+sklearn/tests/test_pipeline.py,sha256=NJXdd6d_VYOMSX_riyEPdaHM41jOrB65MgOEIOgr5Bw,63837
+sklearn/tests/test_public_functions.py,sha256=0mrfIpgnzgZ97K3EQzFVgiHb9Qm_Dqe8YnkMFXzB5cE,16477
+sklearn/tests/test_random_projection.py,sha256=PHgMtjxt5qvy6IM0YY6eWmNLelxdT2H4kF8BQbUBeRc,19583
+sklearn/tree/__init__.py,sha256=Bycl-qSX3rkvyaJEP9zbQ0u1XaWIhf6YG_j-1vQi8qM,534
+sklearn/tree/__pycache__/__init__.cpython-310.pyc,,
+sklearn/tree/__pycache__/_classes.cpython-310.pyc,,
+sklearn/tree/__pycache__/_export.cpython-310.pyc,,
+sklearn/tree/__pycache__/_reingold_tilford.cpython-310.pyc,,
+sklearn/tree/_classes.py,sha256=hUopTFu7OY3pOG55ysoNF43EEj8wnnL1Fqg1At-8JjI,75272
+sklearn/tree/_criterion.cpython-310-x86_64-linux-gnu.so,sha256=tEu3Q9K5adp3zkGpb3i2rTsTby7zdjUdnCT4u32K9I0,364313
+sklearn/tree/_criterion.pxd,sha256=ZDm4GRd_Gv1Drqj3074zi9KRb_P6m2vVx8rsMGt0-AI,4760
+sklearn/tree/_export.py,sha256=l4L2TuKDltxl_swcSV0NRnd5znRVlY9-UuD5KG8WljQ,39293
+sklearn/tree/_reingold_tilford.py,sha256=bFtCLvNsGkYC7FrAA0106nSU2BsL7mV0EYluA9D9cv4,5142
+sklearn/tree/_splitter.cpython-310-x86_64-linux-gnu.so,sha256=Swpo6CQLirkAn0PJzBxEZIlvACM-XpfBy6slEVd4TU4,388873
+sklearn/tree/_splitter.pxd,sha256=TDEF6Mpn5kgwNFdrW5pZcHrsRPmKSaGKmnUXUoIGITA,4784
+sklearn/tree/_tree.cpython-310-x86_64-linux-gnu.so,sha256=TwlVjS7PY_h11EquRAZT8qrlFGHOTSsvlqCxPIvPYIE,674257
+sklearn/tree/_tree.pxd,sha256=nkqGcmtdlUpoJ2tc3kzuxrKyLMn13p3HV3VMXvaTFKs,4679
+sklearn/tree/_utils.cpython-310-x86_64-linux-gnu.so,sha256=u9hZX8dliOv-nQeQz9vRGyGRFD735g2dYmyV4nJpurc,294689
+sklearn/tree/_utils.pxd,sha256=-33LOA5M0UZY2vcp8JhkC_c_glaTueCAQ0GnzJwT5WE,3818
+sklearn/tree/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/tree/tests/__pycache__/__init__.cpython-310.pyc,,
+sklearn/tree/tests/__pycache__/test_export.cpython-310.pyc,,
+sklearn/tree/tests/__pycache__/test_monotonic_tree.cpython-310.pyc,,
+sklearn/tree/tests/__pycache__/test_reingold_tilford.cpython-310.pyc,,
+sklearn/tree/tests/__pycache__/test_tree.cpython-310.pyc,,
+sklearn/tree/tests/test_export.py,sha256=Z2TX6RK1ltOBNlwI53BshtXRTd2yG0rkjD9_x1pBOE0,17471
+sklearn/tree/tests/test_monotonic_tree.py,sha256=MSmAMPPKeqyZ9EyD0G77PoRKPBPoWBaDd8WV3GHP8ZQ,18590
+sklearn/tree/tests/test_reingold_tilford.py,sha256=xRt_Hlm-fGJ2onva4L9eL5mNdcHwWhPEppwNjP4VEJs,1461
+sklearn/tree/tests/test_tree.py,sha256=w5B2AYCmUeB5A-NY0cPkbPEpe9_1yyZ57aEbVkWM5Mk,94675
+sklearn/utils/__init__.py,sha256=-QnEFTn4QK5RaFdxU1ckf-gKrQpvBPm-3j5HEu1mQzs,40703
+sklearn/utils/__pycache__/__init__.cpython-310.pyc,,
+sklearn/utils/__pycache__/_arpack.cpython-310.pyc,,
+sklearn/utils/__pycache__/_array_api.cpython-310.pyc,,
+sklearn/utils/__pycache__/_available_if.cpython-310.pyc,,
+sklearn/utils/__pycache__/_bunch.cpython-310.pyc,,
+sklearn/utils/__pycache__/_encode.cpython-310.pyc,,
+sklearn/utils/__pycache__/_estimator_html_repr.cpython-310.pyc,,
+sklearn/utils/__pycache__/_joblib.cpython-310.pyc,,
+sklearn/utils/__pycache__/_mask.cpython-310.pyc,,
+sklearn/utils/__pycache__/_metadata_requests.cpython-310.pyc,,
+sklearn/utils/__pycache__/_mocking.cpython-310.pyc,,
+sklearn/utils/__pycache__/_param_validation.cpython-310.pyc,,
+sklearn/utils/__pycache__/_plotting.cpython-310.pyc,,
+sklearn/utils/__pycache__/_pprint.cpython-310.pyc,,
+sklearn/utils/__pycache__/_response.cpython-310.pyc,,
+sklearn/utils/__pycache__/_set_output.cpython-310.pyc,,
+sklearn/utils/__pycache__/_show_versions.cpython-310.pyc,,
+sklearn/utils/__pycache__/_tags.cpython-310.pyc,,
+sklearn/utils/__pycache__/_testing.cpython-310.pyc,,
+sklearn/utils/__pycache__/class_weight.cpython-310.pyc,,
+sklearn/utils/__pycache__/deprecation.cpython-310.pyc,,
+sklearn/utils/__pycache__/discovery.cpython-310.pyc,,
+sklearn/utils/__pycache__/estimator_checks.cpython-310.pyc,,
+sklearn/utils/__pycache__/extmath.cpython-310.pyc,,
+sklearn/utils/__pycache__/fixes.cpython-310.pyc,,
+sklearn/utils/__pycache__/graph.cpython-310.pyc,,
+sklearn/utils/__pycache__/metadata_routing.cpython-310.pyc,,
+sklearn/utils/__pycache__/metaestimators.cpython-310.pyc,,
+sklearn/utils/__pycache__/multiclass.cpython-310.pyc,,
+sklearn/utils/__pycache__/optimize.cpython-310.pyc,,
+sklearn/utils/__pycache__/parallel.cpython-310.pyc,,
+sklearn/utils/__pycache__/random.cpython-310.pyc,,
+sklearn/utils/__pycache__/sparsefuncs.cpython-310.pyc,,
+sklearn/utils/__pycache__/stats.cpython-310.pyc,,
+sklearn/utils/__pycache__/validation.cpython-310.pyc,,
+sklearn/utils/_arpack.py,sha256=TxhOiluYxwPM3AV07RJGT0dosprJM6ga_f_Tno8yrJI,1129
+sklearn/utils/_array_api.py,sha256=kx0nSZa98mbXneuLAPNXzjd8RZ5iHhWV4SSduSBVpQU,18876
+sklearn/utils/_available_if.py,sha256=Kop8zZ3I4STHCPt8LyB8GzRT1utlHSuf329pADbxzbs,2873
+sklearn/utils/_bunch.py,sha256=YICcv-loEvJiJHHIzLu-Sia9VMxkdqfBuaRc_1bd474,2096
+sklearn/utils/_cython_blas.cpython-310-x86_64-linux-gnu.so,sha256=z0uJd46Fm8h33u1Fzqp5laHMinfovS60Gvd33NHqA0s,528377
+sklearn/utils/_cython_blas.pxd,sha256=Kx-TV-Wy3JD8JAROmcAB3623tmk01WnffCiFLResUZI,1565
+sklearn/utils/_encode.py,sha256=QmELuyvpp7KMe1n4PzPJd6VJOtjAPj9guqEUUQ6ikzE,11379
+sklearn/utils/_estimator_html_repr.css,sha256=S097GeV_Ajaspg_rNppjoEjbhPHKJNtIhp7pmKGyCck,11016
+sklearn/utils/_estimator_html_repr.py,sha256=8ShSN2xBBZMeEeV5hrYM8TKWsWnyvD5aGkFtu6oxZe0,18308
+sklearn/utils/_fast_dict.cpython-310-x86_64-linux-gnu.so,sha256=yNU4CmyfXpPinSJqNi9Quy9d1cHf3rOrMqBqyEywQ4Q,287705
+sklearn/utils/_fast_dict.pxd,sha256=_EqkuVnVd7LJr72_Kg5l52da2ZlqXWK_EmE7ukAD5W0,476
+sklearn/utils/_heap.cpython-310-x86_64-linux-gnu.so,sha256=fiedcLQDP_mkSjhLmgPYNBZSS-d50RFGO0kadORd-G0,34521
+sklearn/utils/_heap.pxd,sha256=FXcpp-JAYxvFGZqLZ6IrJieDZ9_W2hP4sVOLY4fzJAQ,256
+sklearn/utils/_isfinite.cpython-310-x86_64-linux-gnu.so,sha256=wFaDrgLtZJcuVmJnHOPKGKb4bR2WIQ04nj9WQyQ_ZB8,286649
+sklearn/utils/_joblib.py,sha256=pUOY1HrRIB-U64V1J5-i4z7BJcY8TpUkpx17Z8yzdSw,710
+sklearn/utils/_mask.py,sha256=Q6-kG--ba8BFMRSOUPU9H38M9exXCdiz3KrNRcoHhzE,1798
+sklearn/utils/_metadata_requests.py,sha256=Q-QnTaxc2xWOFpFqV32YxShLdxmNMsqZDsqAT6KSxnE,54375
+sklearn/utils/_mocking.py,sha256=IhE9FooFgHNIvamTEZWLSn6aEKrGixQaykOjmPlCBus,13093
+sklearn/utils/_openmp_helpers.cpython-310-x86_64-linux-gnu.so,sha256=E-nQlKjh7txdWkWG2Ectsk46KrHYBa1lTUbRGp09cQU,80553
+sklearn/utils/_openmp_helpers.pxd,sha256=ORtNXjPXDBOmoHW6--54wwrMEIZptCAZ6T8CJPCuJ-0,1069
+sklearn/utils/_param_validation.py,sha256=uirAI5SGQxD_4jG5GiDSscE-kFJLb2nIrh-pOyIn6pk,28445
+sklearn/utils/_plotting.py,sha256=WDBp1t4t5fZsb4USxiXfvsWbCTdXkVmJ1bZNx9aF8eU,3473
+sklearn/utils/_pprint.py,sha256=i1LEHJl34WYC1cjLmNJPNI01or5zfcPz3Y6Apfk0GME,18516
+sklearn/utils/_random.cpython-310-x86_64-linux-gnu.so,sha256=oNNaytrTHbpxF2zAAdglpG39WPOG0BoZZviMzD7BwIA,356297
+sklearn/utils/_random.pxd,sha256=rpNOrC7Y9gAdh8nYI-CX_6zXZF4fAvl66UUem6NR9pA,1241
+sklearn/utils/_response.py,sha256=50rVUNkrDO-X61AAI-B0PJpEYkjnGRbVpGB-DHaWdDQ,11563
+sklearn/utils/_seq_dataset.cpython-310-x86_64-linux-gnu.so,sha256=IZ9sZIlZ-8GU_r8PfC_3aTrv9R42RGPCiTRzN8yoQIo,331577
+sklearn/utils/_seq_dataset.pxd,sha256=QcjypOmJmox4q2AcisLcvuzk_UbQLDZk7wPgawvHGBE,3634
+sklearn/utils/_set_output.py,sha256=SFYPBHS2tnXneGVDkIuvP9-MOzLkHuToIqzsn3Yf820,14015
+sklearn/utils/_show_versions.py,sha256=VrBM2tc0SufP7THoTgBGRcLpdCTE9kBRG4C0gtARyRQ,2491
+sklearn/utils/_sorting.cpython-310-x86_64-linux-gnu.so,sha256=pPP7AKbyT5SIcY0Fu7iCElkphwsqBI4cFc4YS9e2JCs,34561
+sklearn/utils/_sorting.pxd,sha256=i8Bkh1j07pgP6pIvzFxFIZ7uAlR1fQOCbIHh7v04xw8,161
+sklearn/utils/_tags.py,sha256=4mEPJWv5QYBeV7UG5ySrggu5dygPx-zr3e-CmHb7JQY,2071
+sklearn/utils/_testing.py,sha256=_pKr4NsMRb4w8Ty_GDz6VqLFlW1z0JP-md1zLoIcVg4,39674
+sklearn/utils/_typedefs.cpython-310-x86_64-linux-gnu.so,sha256=0AexPwF7uB4S6N4kLlX4z8eYSNZIm1eR1dwpOy2_rMM,270217
+sklearn/utils/_typedefs.pxd,sha256=jAwP7opXLyBFpvi0mQ4OM0Z1x4jPT_Nna3I6wfJ9dsk,1403
+sklearn/utils/_vector_sentinel.cpython-310-x86_64-linux-gnu.so,sha256=ksgzgoxZbxFdkuKVhfmpFtctK47IhRHmmo7Tyy9Y6AU,176385
+sklearn/utils/_vector_sentinel.pxd,sha256=G_im5dT6DaREJgMAGu2MCd-tj5E-elc5mYX4sulSYW0,296
+sklearn/utils/_weight_vector.cpython-310-x86_64-linux-gnu.so,sha256=_lReRmlohKAQJ6SLdlJaZkBEnjLMf1dU7yQ_jEXrg6c,208457
+sklearn/utils/_weight_vector.pxd,sha256=UqpeZU6jrMxwqqKBPTqNXlBZNbNECTJVAvfOUz1Df84,1717
+sklearn/utils/arrayfuncs.cpython-310-x86_64-linux-gnu.so,sha256=ytyNE50WQkSCOLXJhVGX913OVv8ZwCNYQ1Ms6BE3SbI,307089
+sklearn/utils/class_weight.py,sha256=UEwjK6R1ud5PyQFKEdC8wlW7mdkZgAji5CN9MnezDFY,8245
+sklearn/utils/deprecation.py,sha256=PcrmDIaYaRzTOsvdjUlZ4lwYJKXmdA99Pq6vt-lZksc,3295
+sklearn/utils/discovery.py,sha256=mufDzgpLrGyfKP1HPP8fjXmygKAdbYB0o2DP2LwKWH4,9109
+sklearn/utils/estimator_checks.py,sha256=NJ3pxU0sRbr661IJq2Jp3AIIKF07dXx9Or4NWeeSQuM,167648
+sklearn/utils/extmath.py,sha256=qPcNcMhQkrfEPzaDKPBSPnJWDnhF_SLjaXwX6L5XqJ8,44378
+sklearn/utils/fixes.py,sha256=CQWtRmQO9XFjV5Fsdc8rX8fHrq1VNlOj70ybeLZjUY8,14146
+sklearn/utils/graph.py,sha256=r0fWiMj5YmugSi3XPcTig2ICtZZmfGHOxdrpYIzuPGc,5852
+sklearn/utils/metadata_routing.py,sha256=KzD7vN_MTm_PRNBr_f4qgPilWS8zZ1lL_nnpJJVLI4c,958
+sklearn/utils/metaestimators.py,sha256=vlI1JSQfkXMp4ciM12xWid7kJW9qh5ynpwvoVhXsRrw,5869
+sklearn/utils/multiclass.py,sha256=GheesGFRDFAam8j04ClHJBggPNl0dAie2q9J5Q06ibU,18935
+sklearn/utils/murmurhash.cpython-310-x86_64-linux-gnu.so,sha256=Ia5cUKQdmvUWcO-uhq4kD8KS53bCnv3IKPCBJbqTlao,254273
+sklearn/utils/murmurhash.pxd,sha256=ayAPsMu45zckS93jB0fie2lv8b1-FLfOh_uGDw-0Ev4,864
+sklearn/utils/optimize.py,sha256=V7A0ucjC_P7FosmgxRbPKntA4WEKBgvtM6-TCOt9hDs,9116
+sklearn/utils/parallel.py,sha256=UrPPun6NuBN87NkZ9LUKezV5VJOqBMC4xuuRZZUEvME,4256
+sklearn/utils/random.py,sha256=Tews9aAstPUksIxYjVIg-wP1R7_LA50woOeMZjWi060,3723
+sklearn/utils/sparsefuncs.py,sha256=TpyYWfgmBHXmGbNvZ95chDGtkFEjlHiJRfa0jH97LSs,22673
+sklearn/utils/sparsefuncs_fast.cpython-310-x86_64-linux-gnu.so,sha256=LZHO1XE7ASJ-fyGXQg092DQqsfE-I_aJwUG81dAAq-Y,839729
+sklearn/utils/stats.py,sha256=fdiYo9g8IkeUkw7TsVomxWqhQbsCqHLNmEphVRoLaDY,2357
+sklearn/utils/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sklearn/utils/tests/__pycache__/__init__.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_arpack.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_array_api.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_arrayfuncs.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_bunch.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_class_weight.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_cython_blas.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_cython_templating.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_deprecation.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_encode.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_estimator_checks.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_estimator_html_repr.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_extmath.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_fast_dict.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_fixes.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_graph.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_metaestimators.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_mocking.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_multiclass.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_murmurhash.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_optimize.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_parallel.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_param_validation.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_plotting.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_pprint.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_random.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_response.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_seq_dataset.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_set_output.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_shortest_path.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_show_versions.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_sparsefuncs.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_stats.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_tags.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_testing.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_typedefs.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_utils.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_validation.cpython-310.pyc,,
+sklearn/utils/tests/__pycache__/test_weight_vector.cpython-310.pyc,,
+sklearn/utils/tests/test_arpack.py,sha256=EL3_6a1iDpl8Q-0A8iv6YrwycX0zBwWsL_6cEm3i6lo,490
+sklearn/utils/tests/test_array_api.py,sha256=FT31mXXZVAfe0GlqAs_Hd8cNfAx_ByopltXDoWElGl8,10842
+sklearn/utils/tests/test_arrayfuncs.py,sha256=YKV_XxKozQ5T2CKjovxlGp3orIq0GE5PzHzzbFWVpAY,1310
+sklearn/utils/tests/test_bunch.py,sha256=QZXKwtgneO2wcnnrbMVM_QNfVlVec8eLw0JYtL_ExMI,813
+sklearn/utils/tests/test_class_weight.py,sha256=ULBQ2kTnf1vXVq5piE7RykNtcBjQ4wztsVlmowRFxdc,12309
+sklearn/utils/tests/test_cython_blas.py,sha256=qLgkzvgCOhK5TB1OTGr3Y4ouSQ233srPFXPH8z9v4Y8,6459
+sklearn/utils/tests/test_cython_templating.py,sha256=9VKL_qffGetSHP5o63bhvP0P4OswuFvCsCFKnIABREc,834
+sklearn/utils/tests/test_deprecation.py,sha256=ZFC-uU7o1yJo-iWQ6dbJjyxdkv-YHXxvmNqaTzAjSZc,2023
+sklearn/utils/tests/test_encode.py,sha256=QiiG0ArBGF7ENYrvcgPGwjYgUdn3W6Ch_GE9VEF2DWI,9603
+sklearn/utils/tests/test_estimator_checks.py,sha256=Q0vX0NOsIfmRvoszyYjYedwSidtsxUoJMjijHgNy_zI,43757
+sklearn/utils/tests/test_estimator_html_repr.py,sha256=laE-N_y9W71guyDJto2DHnXoF8OIXh9szLCigO4chOs,18057
+sklearn/utils/tests/test_extmath.py,sha256=fxjeDqqD75B1tt-uDUK5vMVCB_ivMwi2gSasxKaIgOM,36993
+sklearn/utils/tests/test_fast_dict.py,sha256=68dA5PzUI7thW9NFY0oo1Me1aqJdx8z02xThJsUTgqo,1356
+sklearn/utils/tests/test_fixes.py,sha256=M5FbBx2l68m8Fg6ekuhovJ2Z8inzuB3eqUUe3whWPjQ,5722
+sklearn/utils/tests/test_graph.py,sha256=0FGOXawAnpEg2wYW5PEkJsLmIlz1zVTIgFP5IJqdXpc,3047
+sklearn/utils/tests/test_metaestimators.py,sha256=x_0agW4puaVCmqPwBrk3FrWIZeK3qgM9eNJWUxYD640,2107
+sklearn/utils/tests/test_mocking.py,sha256=bq8wqsRlN4duGu7zx0mMo99yY8KpKk6gLZC0SmfXLBE,6075
+sklearn/utils/tests/test_multiclass.py,sha256=ScMu6KDToCiGKnoIJG3HPI7vstvGOGe2eISE-nOGp2E,20238
+sklearn/utils/tests/test_murmurhash.py,sha256=u9nLrCI1mP7rFGj2OWUEpPIhC2Z8WWWSfwl-IaaOuXQ,2515
+sklearn/utils/tests/test_optimize.py,sha256=Nc3GQ-y5ppRfPoduTuvGv9DS-MmkeO-71ySJq8DOVuQ,768
+sklearn/utils/tests/test_parallel.py,sha256=mZUbOoo44Jfa54N0Bw2NL9zRLtpH4v39AXy-0_bWdGs,3650
+sklearn/utils/tests/test_param_validation.py,sha256=PjJ8-S7o5wBJ9Qmsz7MMnnk38iprXa-ejtmThZ4YMuk,24201
+sklearn/utils/tests/test_plotting.py,sha256=_qetb2NqEqQs-2sVLAydoW2VfJWnU6AixzlMzmUy0dw,2768
+sklearn/utils/tests/test_pprint.py,sha256=qm6MKEgzkfHBR-RQyI5S56Twkmzp-C4OsAPdzlJtjqE,27339
+sklearn/utils/tests/test_random.py,sha256=ItwX9BV-LvEPMuco4JoXsLPjtDh012t-PCfwFy2FPyM,7157
+sklearn/utils/tests/test_response.py,sha256=eU7uqsOKp9lVhxRpTzFvNpF2Lppnu9cVgZArtyvZx7M,12608
+sklearn/utils/tests/test_seq_dataset.py,sha256=9periHtRAYQ56vkfB7YWfAueXTEzpna51VRoctWNYHE,5890
+sklearn/utils/tests/test_set_output.py,sha256=UYJEjYeHZtY_Nn87Tz2jE3VEYf9zcpkJeABK9kYL2tM,15290
+sklearn/utils/tests/test_shortest_path.py,sha256=XN1SF7TfMo8tQCC-bUV2wK99jR32hEM7xZOl54NbIoQ,1846
+sklearn/utils/tests/test_show_versions.py,sha256=Yk726ydbUgfB9RSy_UYh9jPmriKHcI3JlWmREOTxO-8,1006
+sklearn/utils/tests/test_sparsefuncs.py,sha256=QeBQ0U-KodfZpR5N8-BZ1zzkMEyQxR7hqK6VBIesSHs,34923
+sklearn/utils/tests/test_stats.py,sha256=Phl42HdzIexmoBxQDvBh2erZo53xm9q7JTiGq_l3It8,2760
+sklearn/utils/tests/test_tags.py,sha256=1hqW8joq6t6Hr9AG00x-hp9ba9PtIM7r6az7WJ1_DCo,1396
+sklearn/utils/tests/test_testing.py,sha256=2X_0N8qp9Tg1Tfvt0GZsDEaVC3mvr1QpP6ty1WxbteA,27802
+sklearn/utils/tests/test_typedefs.py,sha256=eCKBm66loSHimj73jqVeYsOccxQQSmfq8rc-a8uYuGM,631
+sklearn/utils/tests/test_utils.py,sha256=gUwT8UREriHUFqDKGf1YD-ybOiuZZiLiLhuxF0bdiog,29562
+sklearn/utils/tests/test_validation.py,sha256=C45x183s9Tds2JZ6JLMGAduzyX3bJ4VWV0yVZw5_SR4,69302
+sklearn/utils/tests/test_weight_vector.py,sha256=eay4_mfrN7vg2ZGoXmZ06cU9CLQYBJKMR_dK6s2Wyic,665
+sklearn/utils/validation.py,sha256=GU-PcxpY7VhCY-Li4yLPvchKy8B8lTdjW-o9Re3cuts,88758
diff --git a/llmeval-env/lib/python3.10/site-packages/scikit_learn-1.4.2.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/scikit_learn-1.4.2.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..9bb86cf30c63df9170e9af3dd246ce6f41270402
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scikit_learn-1.4.2.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.43.0)
+Root-Is-Purelib: false
+Tag: cp310-cp310-manylinux_2_17_x86_64
+Tag: cp310-cp310-manylinux2014_x86_64
+
diff --git a/llmeval-env/lib/python3.10/site-packages/scikit_learn-1.4.2.dist-info/top_level.txt b/llmeval-env/lib/python3.10/site-packages/scikit_learn-1.4.2.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..0c62c11b422400f71fd75812134ef7b8e4891966
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scikit_learn-1.4.2.dist-info/top_level.txt
@@ -0,0 +1 @@
+sklearn
diff --git a/llmeval-env/lib/python3.10/site-packages/tokenizers/__init__.py b/llmeval-env/lib/python3.10/site-packages/tokenizers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..efd574298f7733465966fdb8bd13f5a2d9844574
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/tokenizers/__init__.py
@@ -0,0 +1,100 @@
+from enum import Enum
+from typing import List, Tuple, Union
+
+
+Offsets = Tuple[int, int]
+
+TextInputSequence = str
+"""A :obj:`str` that represents an input sequence """
+
+PreTokenizedInputSequence = Union[List[str], Tuple[str]]
+"""A pre-tokenized input sequence. Can be one of:
+
+ - A :obj:`List` of :obj:`str`
+ - A :obj:`Tuple` of :obj:`str`
+"""
+
+TextEncodeInput = Union[
+ TextInputSequence,
+ Tuple[TextInputSequence, TextInputSequence],
+ List[TextInputSequence],
+]
+"""Represents a textual input for encoding. Can be either:
+
+ - A single sequence: :data:`~tokenizers.TextInputSequence`
+ - A pair of sequences:
+
+ - A :obj:`Tuple` of :data:`~tokenizers.TextInputSequence`
+ - Or a :obj:`List` of :data:`~tokenizers.TextInputSequence` of size 2
+"""
+
+PreTokenizedEncodeInput = Union[
+ PreTokenizedInputSequence,
+ Tuple[PreTokenizedInputSequence, PreTokenizedInputSequence],
+ List[PreTokenizedInputSequence],
+]
+"""Represents a pre-tokenized input for encoding. Can be either:
+
+ - A single sequence: :data:`~tokenizers.PreTokenizedInputSequence`
+ - A pair of sequences:
+
+ - A :obj:`Tuple` of :data:`~tokenizers.PreTokenizedInputSequence`
+ - Or a :obj:`List` of :data:`~tokenizers.PreTokenizedInputSequence` of size 2
+"""
+
+InputSequence = Union[TextInputSequence, PreTokenizedInputSequence]
+"""Represents all the possible types of input sequences for encoding. Can be:
+
+ - When ``is_pretokenized=False``: :data:`~TextInputSequence`
+ - When ``is_pretokenized=True``: :data:`~PreTokenizedInputSequence`
+"""
+
+EncodeInput = Union[TextEncodeInput, PreTokenizedEncodeInput]
+"""Represents all the possible types of input for encoding. Can be:
+
+ - When ``is_pretokenized=False``: :data:`~TextEncodeInput`
+ - When ``is_pretokenized=True``: :data:`~PreTokenizedEncodeInput`
+"""
+
+
+class OffsetReferential(Enum):
+ ORIGINAL = "original"
+ NORMALIZED = "normalized"
+
+
+class OffsetType(Enum):
+ BYTE = "byte"
+ CHAR = "char"
+
+
+class SplitDelimiterBehavior(Enum):
+ REMOVED = "removed"
+ ISOLATED = "isolated"
+ MERGED_WITH_PREVIOUS = "merged_with_previous"
+ MERGED_WITH_NEXT = "merged_with_next"
+ CONTIGUOUS = "contiguous"
+
+
+from .tokenizers import (
+ AddedToken,
+ Encoding,
+ NormalizedString,
+ PreTokenizedString,
+ Regex,
+ Token,
+ Tokenizer,
+ decoders,
+ models,
+ normalizers,
+ pre_tokenizers,
+ processors,
+ trainers,
+ __version__,
+)
+from .implementations import (
+ BertWordPieceTokenizer,
+ ByteLevelBPETokenizer,
+ CharBPETokenizer,
+ SentencePieceBPETokenizer,
+ SentencePieceUnigramTokenizer,
+)
diff --git a/llmeval-env/lib/python3.10/site-packages/tokenizers/__init__.pyi b/llmeval-env/lib/python3.10/site-packages/tokenizers/__init__.pyi
new file mode 100644
index 0000000000000000000000000000000000000000..5dbc665dcf67fa37034de75619eedb9f346e955e
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/tokenizers/__init__.pyi
@@ -0,0 +1,1200 @@
+# Generated content DO NOT EDIT
+class AddedToken:
+ """
+ Represents a token that can be be added to a :class:`~tokenizers.Tokenizer`.
+ It can have special options that defines the way it should behave.
+
+ Args:
+ content (:obj:`str`): The content of the token
+
+ single_word (:obj:`bool`, defaults to :obj:`False`):
+ Defines whether this token should only match single words. If :obj:`True`, this
+ token will never match inside of a word. For example the token ``ing`` would match
+ on ``tokenizing`` if this option is :obj:`False`, but not if it is :obj:`True`.
+ The notion of "`inside of a word`" is defined by the word boundaries pattern in
+ regular expressions (ie. the token should start and end with word boundaries).
+
+ lstrip (:obj:`bool`, defaults to :obj:`False`):
+ Defines whether this token should strip all potential whitespaces on its left side.
+ If :obj:`True`, this token will greedily match any whitespace on its left. For
+ example if we try to match the token ``[MASK]`` with ``lstrip=True``, in the text
+ ``"I saw a [MASK]"``, we would match on ``" [MASK]"``. (Note the space on the left).
+
+ rstrip (:obj:`bool`, defaults to :obj:`False`):
+ Defines whether this token should strip all potential whitespaces on its right
+ side. If :obj:`True`, this token will greedily match any whitespace on its right.
+ It works just like :obj:`lstrip` but on the right.
+
+ normalized (:obj:`bool`, defaults to :obj:`True` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`):
+ Defines whether this token should match against the normalized version of the input
+ text. For example, with the added token ``"yesterday"``, and a normalizer in charge of
+ lowercasing the text, the token could be extract from the input ``"I saw a lion
+ Yesterday"``.
+ special (:obj:`bool`, defaults to :obj:`False` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`):
+ Defines whether this token should be skipped when decoding.
+
+ """
+ def __init__(self, content, single_word=False, lstrip=False, rstrip=False, normalized=True, special=False):
+ pass
+
+ @property
+ def content(self):
+ """
+ Get the content of this :obj:`AddedToken`
+ """
+ pass
+
+ @property
+ def lstrip(self):
+ """
+ Get the value of the :obj:`lstrip` option
+ """
+ pass
+
+ @property
+ def normalized(self):
+ """
+ Get the value of the :obj:`normalized` option
+ """
+ pass
+
+ @property
+ def rstrip(self):
+ """
+ Get the value of the :obj:`rstrip` option
+ """
+ pass
+
+ @property
+ def single_word(self):
+ """
+ Get the value of the :obj:`single_word` option
+ """
+ pass
+
+ @property
+ def special(self):
+ """
+ Get the value of the :obj:`special` option
+ """
+ pass
+
+class Encoding:
+ """
+ The :class:`~tokenizers.Encoding` represents the output of a :class:`~tokenizers.Tokenizer`.
+ """
+ @property
+ def attention_mask(self):
+ """
+ The attention mask
+
+ This indicates to the LM which tokens should be attended to, and which should not.
+ This is especially important when batching sequences, where we need to applying
+ padding.
+
+ Returns:
+ :obj:`List[int]`: The attention mask
+ """
+ pass
+
+ def char_to_token(self, char_pos, sequence_index=0):
+ """
+ Get the token that contains the char at the given position in the input sequence.
+
+ Args:
+ char_pos (:obj:`int`):
+ The position of a char in the input string
+ sequence_index (:obj:`int`, defaults to :obj:`0`):
+ The index of the sequence that contains the target char
+
+ Returns:
+ :obj:`int`: The index of the token that contains this char in the encoded sequence
+ """
+ pass
+
+ def char_to_word(self, char_pos, sequence_index=0):
+ """
+ Get the word that contains the char at the given position in the input sequence.
+
+ Args:
+ char_pos (:obj:`int`):
+ The position of a char in the input string
+ sequence_index (:obj:`int`, defaults to :obj:`0`):
+ The index of the sequence that contains the target char
+
+ Returns:
+ :obj:`int`: The index of the word that contains this char in the input sequence
+ """
+ pass
+
+ @property
+ def ids(self):
+ """
+ The generated IDs
+
+ The IDs are the main input to a Language Model. They are the token indices,
+ the numerical representations that a LM understands.
+
+ Returns:
+ :obj:`List[int]`: The list of IDs
+ """
+ pass
+
+ @staticmethod
+ def merge(encodings, growing_offsets=True):
+ """
+ Merge the list of encodings into one final :class:`~tokenizers.Encoding`
+
+ Args:
+ encodings (A :obj:`List` of :class:`~tokenizers.Encoding`):
+ The list of encodings that should be merged in one
+
+ growing_offsets (:obj:`bool`, defaults to :obj:`True`):
+ Whether the offsets should accumulate while merging
+
+ Returns:
+ :class:`~tokenizers.Encoding`: The resulting Encoding
+ """
+ pass
+
+ @property
+ def n_sequences(self):
+ """
+ The number of sequences represented
+
+ Returns:
+ :obj:`int`: The number of sequences in this :class:`~tokenizers.Encoding`
+ """
+ pass
+
+ @property
+ def offsets(self):
+ """
+ The offsets associated to each token
+
+ These offsets let's you slice the input string, and thus retrieve the original
+ part that led to producing the corresponding token.
+
+ Returns:
+ A :obj:`List` of :obj:`Tuple[int, int]`: The list of offsets
+ """
+ pass
+
+ @property
+ def overflowing(self):
+ """
+ A :obj:`List` of overflowing :class:`~tokenizers.Encoding`
+
+ When using truncation, the :class:`~tokenizers.Tokenizer` takes care of splitting
+ the output into as many pieces as required to match the specified maximum length.
+ This field lets you retrieve all the subsequent pieces.
+
+ When you use pairs of sequences, the overflowing pieces will contain enough
+ variations to cover all the possible combinations, while respecting the provided
+ maximum length.
+ """
+ pass
+
+ def pad(self, length, direction="right", pad_id=0, pad_type_id=0, pad_token="[PAD]"):
+ """
+ Pad the :class:`~tokenizers.Encoding` at the given length
+
+ Args:
+ length (:obj:`int`):
+ The desired length
+
+ direction: (:obj:`str`, defaults to :obj:`right`):
+ The expected padding direction. Can be either :obj:`right` or :obj:`left`
+
+ pad_id (:obj:`int`, defaults to :obj:`0`):
+ The ID corresponding to the padding token
+
+ pad_type_id (:obj:`int`, defaults to :obj:`0`):
+ The type ID corresponding to the padding token
+
+ pad_token (:obj:`str`, defaults to `[PAD]`):
+ The pad token to use
+ """
+ pass
+
+ @property
+ def sequence_ids(self):
+ """
+ The generated sequence indices.
+
+ They represent the index of the input sequence associated to each token.
+ The sequence id can be None if the token is not related to any input sequence,
+ like for example with special tokens.
+
+ Returns:
+ A :obj:`List` of :obj:`Optional[int]`: A list of optional sequence index.
+ """
+ pass
+
+ def set_sequence_id(self, sequence_id):
+ """
+ Set the given sequence index
+
+ Set the given sequence index for the whole range of tokens contained in this
+ :class:`~tokenizers.Encoding`.
+ """
+ pass
+
+ @property
+ def special_tokens_mask(self):
+ """
+ The special token mask
+
+ This indicates which tokens are special tokens, and which are not.
+
+ Returns:
+ :obj:`List[int]`: The special tokens mask
+ """
+ pass
+
+ def token_to_chars(self, token_index):
+ """
+ Get the offsets of the token at the given index.
+
+ The returned offsets are related to the input sequence that contains the
+ token. In order to determine in which input sequence it belongs, you
+ must call :meth:`~tokenizers.Encoding.token_to_sequence()`.
+
+ Args:
+ token_index (:obj:`int`):
+ The index of a token in the encoded sequence.
+
+ Returns:
+ :obj:`Tuple[int, int]`: The token offsets :obj:`(first, last + 1)`
+ """
+ pass
+
+ def token_to_sequence(self, token_index):
+ """
+ Get the index of the sequence represented by the given token.
+
+ In the general use case, this method returns :obj:`0` for a single sequence or
+ the first sequence of a pair, and :obj:`1` for the second sequence of a pair
+
+ Args:
+ token_index (:obj:`int`):
+ The index of a token in the encoded sequence.
+
+ Returns:
+ :obj:`int`: The sequence id of the given token
+ """
+ pass
+
+ def token_to_word(self, token_index):
+ """
+ Get the index of the word that contains the token in one of the input sequences.
+
+ The returned word index is related to the input sequence that contains
+ the token. In order to determine in which input sequence it belongs, you
+ must call :meth:`~tokenizers.Encoding.token_to_sequence()`.
+
+ Args:
+ token_index (:obj:`int`):
+ The index of a token in the encoded sequence.
+
+ Returns:
+ :obj:`int`: The index of the word in the relevant input sequence.
+ """
+ pass
+
+ @property
+ def tokens(self):
+ """
+ The generated tokens
+
+ They are the string representation of the IDs.
+
+ Returns:
+ :obj:`List[str]`: The list of tokens
+ """
+ pass
+
+ def truncate(self, max_length, stride=0, direction="right"):
+ """
+ Truncate the :class:`~tokenizers.Encoding` at the given length
+
+ If this :class:`~tokenizers.Encoding` represents multiple sequences, when truncating
+ this information is lost. It will be considered as representing a single sequence.
+
+ Args:
+ max_length (:obj:`int`):
+ The desired length
+
+ stride (:obj:`int`, defaults to :obj:`0`):
+ The length of previous content to be included in each overflowing piece
+
+ direction (:obj:`str`, defaults to :obj:`right`):
+ Truncate direction
+ """
+ pass
+
+ @property
+ def type_ids(self):
+ """
+ The generated type IDs
+
+ Generally used for tasks like sequence classification or question answering,
+ these tokens let the LM know which input sequence corresponds to each tokens.
+
+ Returns:
+ :obj:`List[int]`: The list of type ids
+ """
+ pass
+
+ @property
+ def word_ids(self):
+ """
+ The generated word indices.
+
+ They represent the index of the word associated to each token.
+ When the input is pre-tokenized, they correspond to the ID of the given input label,
+ otherwise they correspond to the words indices as defined by the
+ :class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used.
+
+ For special tokens and such (any token that was generated from something that was
+ not part of the input), the output is :obj:`None`
+
+ Returns:
+ A :obj:`List` of :obj:`Optional[int]`: A list of optional word index.
+ """
+ pass
+
+ def word_to_chars(self, word_index, sequence_index=0):
+ """
+ Get the offsets of the word at the given index in one of the input sequences.
+
+ Args:
+ word_index (:obj:`int`):
+ The index of a word in one of the input sequences.
+ sequence_index (:obj:`int`, defaults to :obj:`0`):
+ The index of the sequence that contains the target word
+
+ Returns:
+ :obj:`Tuple[int, int]`: The range of characters (span) :obj:`(first, last + 1)`
+ """
+ pass
+
+ def word_to_tokens(self, word_index, sequence_index=0):
+ """
+ Get the encoded tokens corresponding to the word at the given index
+ in one of the input sequences.
+
+ Args:
+ word_index (:obj:`int`):
+ The index of a word in one of the input sequences.
+ sequence_index (:obj:`int`, defaults to :obj:`0`):
+ The index of the sequence that contains the target word
+
+ Returns:
+ :obj:`Tuple[int, int]`: The range of tokens: :obj:`(first, last + 1)`
+ """
+ pass
+
+ @property
+ def words(self):
+ """
+ The generated word indices.
+
+ .. warning::
+ This is deprecated and will be removed in a future version.
+ Please use :obj:`~tokenizers.Encoding.word_ids` instead.
+
+ They represent the index of the word associated to each token.
+ When the input is pre-tokenized, they correspond to the ID of the given input label,
+ otherwise they correspond to the words indices as defined by the
+ :class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used.
+
+ For special tokens and such (any token that was generated from something that was
+ not part of the input), the output is :obj:`None`
+
+ Returns:
+ A :obj:`List` of :obj:`Optional[int]`: A list of optional word index.
+ """
+ pass
+
+class NormalizedString:
+ """
+ NormalizedString
+
+ A NormalizedString takes care of modifying an "original" string, to obtain a "normalized" one.
+ While making all the requested modifications, it keeps track of the alignment information
+ between the two versions of the string.
+
+ Args:
+ sequence: str:
+ The string sequence used to initialize this NormalizedString
+ """
+ def append(self, s):
+ """
+ Append the given sequence to the string
+ """
+ pass
+
+ def clear(self):
+ """
+ Clears the string
+ """
+ pass
+
+ def filter(self, func):
+ """
+ Filter each character of the string using the given func
+ """
+ pass
+
+ def for_each(self, func):
+ """
+ Calls the given function for each character of the string
+ """
+ pass
+
+ def lowercase(self):
+ """
+ Lowercase the string
+ """
+ pass
+
+ def lstrip(self):
+ """
+ Strip the left of the string
+ """
+ pass
+
+ def map(self, func):
+ """
+ Calls the given function for each character of the string
+
+ Replaces each character of the string using the returned value. Each
+ returned value **must** be a str of length 1 (ie a character).
+ """
+ pass
+
+ def nfc(self):
+ """
+ Runs the NFC normalization
+ """
+ pass
+
+ def nfd(self):
+ """
+ Runs the NFD normalization
+ """
+ pass
+
+ def nfkc(self):
+ """
+ Runs the NFKC normalization
+ """
+ pass
+
+ def nfkd(self):
+ """
+ Runs the NFKD normalization
+ """
+ pass
+
+ @property
+ def normalized(self):
+ """
+ The normalized part of the string
+ """
+ pass
+
+ def prepend(self, s):
+ """
+ Prepend the given sequence to the string
+ """
+ pass
+
+ def replace(self, pattern, content):
+ """
+ Replace the content of the given pattern with the provided content
+
+ Args:
+ pattern: Pattern:
+ A pattern used to match the string. Usually a string or a Regex
+
+ content: str:
+ The content to be used as replacement
+ """
+ pass
+
+ def rstrip(self):
+ """
+ Strip the right of the string
+ """
+ pass
+
+ def slice(self, range):
+ """
+ Slice the string using the given range
+ """
+ pass
+
+ def split(self, pattern, behavior):
+ """
+ Split the NormalizedString using the given pattern and the specified behavior
+
+ Args:
+ pattern: Pattern:
+ A pattern used to split the string. Usually a string or a regex built with `tokenizers.Regex`
+
+ behavior: SplitDelimiterBehavior:
+ The behavior to use when splitting.
+ Choices: "removed", "isolated", "merged_with_previous", "merged_with_next",
+ "contiguous"
+
+ Returns:
+ A list of NormalizedString, representing each split
+ """
+ pass
+
+ def strip(self):
+ """
+ Strip both ends of the string
+ """
+ pass
+
+ def uppercase(self):
+ """
+ Uppercase the string
+ """
+ pass
+
+class PreTokenizedString:
+ """
+ PreTokenizedString
+
+ Wrapper over a string, that provides a way to normalize, pre-tokenize, tokenize the
+ underlying string, while keeping track of the alignment information (offsets).
+
+ The PreTokenizedString manages what we call `splits`. Each split represents a substring
+ which is a subpart of the original string, with the relevant offsets and tokens.
+
+ When calling one of the methods used to modify the PreTokenizedString (namely one of
+ `split`, `normalize` or `tokenize), only the `splits` that don't have any associated
+ tokens will get modified.
+
+ Args:
+ sequence: str:
+ The string sequence used to initialize this PreTokenizedString
+ """
+ def __init__(self, sequence):
+ pass
+
+ def get_splits(self, offset_referential="original", offset_type="char"):
+ """
+ Get the splits currently managed by the PreTokenizedString
+
+ Args:
+ offset_referential: :obj:`str`
+ Whether the returned splits should have offsets expressed relative
+ to the original string, or the normalized one. choices: "original", "normalized".
+
+ offset_type: :obj:`str`
+ Whether the returned splits should have offsets expressed in bytes or chars.
+ When slicing an str, we usually want to use chars, which is the default value.
+ Now in some cases it might be interesting to get these offsets expressed in bytes,
+ so it is possible to change this here.
+ choices: "char", "bytes"
+
+ Returns
+ A list of splits
+ """
+ pass
+
+ def normalize(self, func):
+ """
+ Normalize each split of the `PreTokenizedString` using the given `func`
+
+ Args:
+ func: Callable[[NormalizedString], None]:
+ The function used to normalize each underlying split. This function
+ does not need to return anything, just calling the methods on the provided
+ NormalizedString allow its modification.
+ """
+ pass
+
+ def split(self, func):
+ """
+ Split the PreTokenizedString using the given `func`
+
+ Args:
+ func: Callable[[index, NormalizedString], List[NormalizedString]]:
+ The function used to split each underlying split.
+ It is expected to return a list of `NormalizedString`, that represent the new
+ splits. If the given `NormalizedString` does not need any splitting, we can
+ just return it directly.
+ In order for the offsets to be tracked accurately, any returned `NormalizedString`
+ should come from calling either `.split` or `.slice` on the received one.
+ """
+ pass
+
+ def to_encoding(self, type_id=0, word_idx=None):
+ """
+ Return an Encoding generated from this PreTokenizedString
+
+ Args:
+ type_id: int = 0:
+ The type_id to be used on the generated Encoding.
+
+ word_idx: Optional[int] = None:
+ An optional word index to be used for each token of this Encoding. If provided,
+ all the word indices in the generated Encoding will use this value, instead
+ of the one automatically tracked during pre-tokenization.
+
+ Returns:
+ An Encoding
+ """
+ pass
+
+ def tokenize(self, func):
+ """
+ Tokenize each split of the `PreTokenizedString` using the given `func`
+
+ Args:
+ func: Callable[[str], List[Token]]:
+ The function used to tokenize each underlying split. This function must return
+ a list of Token generated from the input str.
+ """
+ pass
+
+class Regex:
+ """
+ Instantiate a new Regex with the given pattern
+ """
+ def __init__(self, pattern):
+ pass
+
+class Token:
+ pass
+
+class Tokenizer:
+ """
+ A :obj:`Tokenizer` works as a pipeline. It processes some raw text as input
+ and outputs an :class:`~tokenizers.Encoding`.
+
+ Args:
+ model (:class:`~tokenizers.models.Model`):
+ The core algorithm that this :obj:`Tokenizer` should be using.
+
+ """
+ def __init__(self, model):
+ pass
+
+ def add_special_tokens(self, tokens):
+ """
+ Add the given special tokens to the Tokenizer.
+
+ If these tokens are already part of the vocabulary, it just let the Tokenizer know about
+ them. If they don't exist, the Tokenizer creates them, giving them a new id.
+
+ These special tokens will never be processed by the model (ie won't be split into
+ multiple tokens), and they can be removed from the output when decoding.
+
+ Args:
+ tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`):
+ The list of special tokens we want to add to the vocabulary. Each token can either
+ be a string or an instance of :class:`~tokenizers.AddedToken` for more
+ customization.
+
+ Returns:
+ :obj:`int`: The number of tokens that were created in the vocabulary
+ """
+ pass
+
+ def add_tokens(self, tokens):
+ """
+ Add the given tokens to the vocabulary
+
+ The given tokens are added only if they don't already exist in the vocabulary.
+ Each token then gets a new attributed id.
+
+ Args:
+ tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`):
+ The list of tokens we want to add to the vocabulary. Each token can be either a
+ string or an instance of :class:`~tokenizers.AddedToken` for more customization.
+
+ Returns:
+ :obj:`int`: The number of tokens that were created in the vocabulary
+ """
+ pass
+
+ def decode(self, ids, skip_special_tokens=True):
+ """
+ Decode the given list of ids back to a string
+
+ This is used to decode anything coming back from a Language Model
+
+ Args:
+ ids (A :obj:`List/Tuple` of :obj:`int`):
+ The list of ids that we want to decode
+
+ skip_special_tokens (:obj:`bool`, defaults to :obj:`True`):
+ Whether the special tokens should be removed from the decoded string
+
+ Returns:
+ :obj:`str`: The decoded string
+ """
+ pass
+
+ def decode_batch(self, sequences, skip_special_tokens=True):
+ """
+ Decode a batch of ids back to their corresponding string
+
+ Args:
+ sequences (:obj:`List` of :obj:`List[int]`):
+ The batch of sequences we want to decode
+
+ skip_special_tokens (:obj:`bool`, defaults to :obj:`True`):
+ Whether the special tokens should be removed from the decoded strings
+
+ Returns:
+ :obj:`List[str]`: A list of decoded strings
+ """
+ pass
+
+ @property
+ def decoder(self):
+ """
+ The `optional` :class:`~tokenizers.decoders.Decoder` in use by the Tokenizer
+ """
+ pass
+
+ def enable_padding(
+ self, direction="right", pad_id=0, pad_type_id=0, pad_token="[PAD]", length=None, pad_to_multiple_of=None
+ ):
+ """
+ Enable the padding
+
+ Args:
+ direction (:obj:`str`, `optional`, defaults to :obj:`right`):
+ The direction in which to pad. Can be either ``right`` or ``left``
+
+ pad_to_multiple_of (:obj:`int`, `optional`):
+ If specified, the padding length should always snap to the next multiple of the
+ given value. For example if we were going to pad witha length of 250 but
+ ``pad_to_multiple_of=8`` then we will pad to 256.
+
+ pad_id (:obj:`int`, defaults to 0):
+ The id to be used when padding
+
+ pad_type_id (:obj:`int`, defaults to 0):
+ The type id to be used when padding
+
+ pad_token (:obj:`str`, defaults to :obj:`[PAD]`):
+ The pad token to be used when padding
+
+ length (:obj:`int`, `optional`):
+ If specified, the length at which to pad. If not specified we pad using the size of
+ the longest sequence in a batch.
+ """
+ pass
+
+ def enable_truncation(self, max_length, stride=0, strategy="longest_first", direction="right"):
+ """
+ Enable truncation
+
+ Args:
+ max_length (:obj:`int`):
+ The max length at which to truncate
+
+ stride (:obj:`int`, `optional`):
+ The length of the previous first sequence to be included in the overflowing
+ sequence
+
+ strategy (:obj:`str`, `optional`, defaults to :obj:`longest_first`):
+ The strategy used to truncation. Can be one of ``longest_first``, ``only_first`` or
+ ``only_second``.
+
+ direction (:obj:`str`, defaults to :obj:`right`):
+ Truncate direction
+ """
+ pass
+
+ def encode(self, sequence, pair=None, is_pretokenized=False, add_special_tokens=True):
+ """
+ Encode the given sequence and pair. This method can process raw text sequences
+ as well as already pre-tokenized sequences.
+
+ Example:
+ Here are some examples of the inputs that are accepted::
+
+ encode("A single sequence")`
+ encode("A sequence", "And its pair")`
+ encode([ "A", "pre", "tokenized", "sequence" ], is_pretokenized=True)`
+ encode(
+ [ "A", "pre", "tokenized", "sequence" ], [ "And", "its", "pair" ],
+ is_pretokenized=True
+ )
+
+ Args:
+ sequence (:obj:`~tokenizers.InputSequence`):
+ The main input sequence we want to encode. This sequence can be either raw
+ text or pre-tokenized, according to the ``is_pretokenized`` argument:
+
+ - If ``is_pretokenized=False``: :class:`~tokenizers.TextInputSequence`
+ - If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedInputSequence`
+
+ pair (:obj:`~tokenizers.InputSequence`, `optional`):
+ An optional input sequence. The expected format is the same that for ``sequence``.
+
+ is_pretokenized (:obj:`bool`, defaults to :obj:`False`):
+ Whether the input is already pre-tokenized
+
+ add_special_tokens (:obj:`bool`, defaults to :obj:`True`):
+ Whether to add the special tokens
+
+ Returns:
+ :class:`~tokenizers.Encoding`: The encoded result
+
+ """
+ pass
+
+ def encode_batch(self, input, is_pretokenized=False, add_special_tokens=True):
+ """
+ Encode the given batch of inputs. This method accept both raw text sequences
+ as well as already pre-tokenized sequences.
+
+ Example:
+ Here are some examples of the inputs that are accepted::
+
+ encode_batch([
+ "A single sequence",
+ ("A tuple with a sequence", "And its pair"),
+ [ "A", "pre", "tokenized", "sequence" ],
+ ([ "A", "pre", "tokenized", "sequence" ], "And its pair")
+ ])
+
+ Args:
+ input (A :obj:`List`/:obj:`Tuple` of :obj:`~tokenizers.EncodeInput`):
+ A list of single sequences or pair sequences to encode. Each sequence
+ can be either raw text or pre-tokenized, according to the ``is_pretokenized``
+ argument:
+
+ - If ``is_pretokenized=False``: :class:`~tokenizers.TextEncodeInput`
+ - If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedEncodeInput`
+
+ is_pretokenized (:obj:`bool`, defaults to :obj:`False`):
+ Whether the input is already pre-tokenized
+
+ add_special_tokens (:obj:`bool`, defaults to :obj:`True`):
+ Whether to add the special tokens
+
+ Returns:
+ A :obj:`List` of :class:`~tokenizers.Encoding`: The encoded batch
+
+ """
+ pass
+
+ @property
+ def encode_special_tokens(self):
+ """
+ Modifies the tokenizer in order to use or not the special tokens
+ during encoding.
+
+ Args:
+ value (:obj:`bool`):
+ Whether to use the special tokens or not
+
+ """
+ pass
+
+ @staticmethod
+ def from_buffer(buffer):
+ """
+ Instantiate a new :class:`~tokenizers.Tokenizer` from the given buffer.
+
+ Args:
+ buffer (:obj:`bytes`):
+ A buffer containing a previously serialized :class:`~tokenizers.Tokenizer`
+
+ Returns:
+ :class:`~tokenizers.Tokenizer`: The new tokenizer
+ """
+ pass
+
+ @staticmethod
+ def from_file(path):
+ """
+ Instantiate a new :class:`~tokenizers.Tokenizer` from the file at the given path.
+
+ Args:
+ path (:obj:`str`):
+ A path to a local JSON file representing a previously serialized
+ :class:`~tokenizers.Tokenizer`
+
+ Returns:
+ :class:`~tokenizers.Tokenizer`: The new tokenizer
+ """
+ pass
+
+ @staticmethod
+ def from_pretrained(identifier, revision="main", auth_token=None):
+ """
+ Instantiate a new :class:`~tokenizers.Tokenizer` from an existing file on the
+ Hugging Face Hub.
+
+ Args:
+ identifier (:obj:`str`):
+ The identifier of a Model on the Hugging Face Hub, that contains
+ a tokenizer.json file
+ revision (:obj:`str`, defaults to `main`):
+ A branch or commit id
+ auth_token (:obj:`str`, `optional`, defaults to `None`):
+ An optional auth token used to access private repositories on the
+ Hugging Face Hub
+
+ Returns:
+ :class:`~tokenizers.Tokenizer`: The new tokenizer
+ """
+ pass
+
+ @staticmethod
+ def from_str(json):
+ """
+ Instantiate a new :class:`~tokenizers.Tokenizer` from the given JSON string.
+
+ Args:
+ json (:obj:`str`):
+ A valid JSON string representing a previously serialized
+ :class:`~tokenizers.Tokenizer`
+
+ Returns:
+ :class:`~tokenizers.Tokenizer`: The new tokenizer
+ """
+ pass
+
+ def get_added_tokens_decoder(self):
+ """
+ Get the underlying vocabulary
+
+ Returns:
+ :obj:`Dict[int, AddedToken]`: The vocabulary
+ """
+ pass
+
+ def get_vocab(self, with_added_tokens=True):
+ """
+ Get the underlying vocabulary
+
+ Args:
+ with_added_tokens (:obj:`bool`, defaults to :obj:`True`):
+ Whether to include the added tokens
+
+ Returns:
+ :obj:`Dict[str, int]`: The vocabulary
+ """
+ pass
+
+ def get_vocab_size(self, with_added_tokens=True):
+ """
+ Get the size of the underlying vocabulary
+
+ Args:
+ with_added_tokens (:obj:`bool`, defaults to :obj:`True`):
+ Whether to include the added tokens
+
+ Returns:
+ :obj:`int`: The size of the vocabulary
+ """
+ pass
+
+ def id_to_token(self, id):
+ """
+ Convert the given id to its corresponding token if it exists
+
+ Args:
+ id (:obj:`int`):
+ The id to convert
+
+ Returns:
+ :obj:`Optional[str]`: An optional token, :obj:`None` if out of vocabulary
+ """
+ pass
+
+ @property
+ def model(self):
+ """
+ The :class:`~tokenizers.models.Model` in use by the Tokenizer
+ """
+ pass
+
+ def no_padding(self):
+ """
+ Disable padding
+ """
+ pass
+
+ def no_truncation(self):
+ """
+ Disable truncation
+ """
+ pass
+
+ @property
+ def normalizer(self):
+ """
+ The `optional` :class:`~tokenizers.normalizers.Normalizer` in use by the Tokenizer
+ """
+ pass
+
+ def num_special_tokens_to_add(self, is_pair):
+ """
+ Return the number of special tokens that would be added for single/pair sentences.
+ :param is_pair: Boolean indicating if the input would be a single sentence or a pair
+ :return:
+ """
+ pass
+
+ @property
+ def padding(self):
+ """
+ Get the current padding parameters
+
+ `Cannot be set, use` :meth:`~tokenizers.Tokenizer.enable_padding` `instead`
+
+ Returns:
+ (:obj:`dict`, `optional`):
+ A dict with the current padding parameters if padding is enabled
+ """
+ pass
+
+ def post_process(self, encoding, pair=None, add_special_tokens=True):
+ """
+ Apply all the post-processing steps to the given encodings.
+
+ The various steps are:
+
+ 1. Truncate according to the set truncation params (provided with
+ :meth:`~tokenizers.Tokenizer.enable_truncation`)
+ 2. Apply the :class:`~tokenizers.processors.PostProcessor`
+ 3. Pad according to the set padding params (provided with
+ :meth:`~tokenizers.Tokenizer.enable_padding`)
+
+ Args:
+ encoding (:class:`~tokenizers.Encoding`):
+ The :class:`~tokenizers.Encoding` corresponding to the main sequence.
+
+ pair (:class:`~tokenizers.Encoding`, `optional`):
+ An optional :class:`~tokenizers.Encoding` corresponding to the pair sequence.
+
+ add_special_tokens (:obj:`bool`):
+ Whether to add the special tokens
+
+ Returns:
+ :class:`~tokenizers.Encoding`: The final post-processed encoding
+ """
+ pass
+
+ @property
+ def post_processor(self):
+ """
+ The `optional` :class:`~tokenizers.processors.PostProcessor` in use by the Tokenizer
+ """
+ pass
+
+ @property
+ def pre_tokenizer(self):
+ """
+ The `optional` :class:`~tokenizers.pre_tokenizers.PreTokenizer` in use by the Tokenizer
+ """
+ pass
+
+ def save(self, path, pretty=True):
+ """
+ Save the :class:`~tokenizers.Tokenizer` to the file at the given path.
+
+ Args:
+ path (:obj:`str`):
+ A path to a file in which to save the serialized tokenizer.
+
+ pretty (:obj:`bool`, defaults to :obj:`True`):
+ Whether the JSON file should be pretty formatted.
+ """
+ pass
+
+ def to_str(self, pretty=False):
+ """
+ Gets a serialized string representing this :class:`~tokenizers.Tokenizer`.
+
+ Args:
+ pretty (:obj:`bool`, defaults to :obj:`False`):
+ Whether the JSON string should be pretty formatted.
+
+ Returns:
+ :obj:`str`: A string representing the serialized Tokenizer
+ """
+ pass
+
+ def token_to_id(self, token):
+ """
+ Convert the given token to its corresponding id if it exists
+
+ Args:
+ token (:obj:`str`):
+ The token to convert
+
+ Returns:
+ :obj:`Optional[int]`: An optional id, :obj:`None` if out of vocabulary
+ """
+ pass
+
+ def train(self, files, trainer=None):
+ """
+ Train the Tokenizer using the given files.
+
+ Reads the files line by line, while keeping all the whitespace, even new lines.
+ If you want to train from data store in-memory, you can check
+ :meth:`~tokenizers.Tokenizer.train_from_iterator`
+
+ Args:
+ files (:obj:`List[str]`):
+ A list of path to the files that we should use for training
+
+ trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`):
+ An optional trainer that should be used to train our Model
+ """
+ pass
+
+ def train_from_iterator(self, iterator, trainer=None, length=None):
+ """
+ Train the Tokenizer using the provided iterator.
+
+ You can provide anything that is a Python Iterator
+
+ * A list of sequences :obj:`List[str]`
+ * A generator that yields :obj:`str` or :obj:`List[str]`
+ * A Numpy array of strings
+ * ...
+
+ Args:
+ iterator (:obj:`Iterator`):
+ Any iterator over strings or list of strings
+
+ trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`):
+ An optional trainer that should be used to train our Model
+
+ length (:obj:`int`, `optional`):
+ The total number of sequences in the iterator. This is used to
+ provide meaningful progress tracking
+ """
+ pass
+
+ @property
+ def truncation(self):
+ """
+ Get the currently set truncation parameters
+
+ `Cannot set, use` :meth:`~tokenizers.Tokenizer.enable_truncation` `instead`
+
+ Returns:
+ (:obj:`dict`, `optional`):
+ A dict with the current truncation parameters if truncation is enabled
+ """
+ pass
diff --git a/llmeval-env/lib/python3.10/site-packages/tokenizers/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/tokenizers/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8f3b1142324bee4b411d8f3a4da540dd632e54d1
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/tokenizers/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/tokenizers/decoders/__init__.py b/llmeval-env/lib/python3.10/site-packages/tokenizers/decoders/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a717379c5fcbb0b91b1661c03ef77234e96e64e5
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/tokenizers/decoders/__init__.py
@@ -0,0 +1,14 @@
+from .. import decoders
+
+
+Decoder = decoders.Decoder
+ByteLevel = decoders.ByteLevel
+Replace = decoders.Replace
+WordPiece = decoders.WordPiece
+ByteFallback = decoders.ByteFallback
+Fuse = decoders.Fuse
+Strip = decoders.Strip
+Metaspace = decoders.Metaspace
+BPEDecoder = decoders.BPEDecoder
+CTC = decoders.CTC
+Sequence = decoders.Sequence
diff --git a/llmeval-env/lib/python3.10/site-packages/tokenizers/decoders/__init__.pyi b/llmeval-env/lib/python3.10/site-packages/tokenizers/decoders/__init__.pyi
new file mode 100644
index 0000000000000000000000000000000000000000..b967fbd141802c496e3d94985732c3649492ac49
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/tokenizers/decoders/__init__.pyi
@@ -0,0 +1,271 @@
+# Generated content DO NOT EDIT
+class Decoder:
+ """
+ Base class for all decoders
+
+ This class is not supposed to be instantiated directly. Instead, any implementation of
+ a Decoder will return an instance of this class when instantiated.
+ """
+ def decode(self, tokens):
+ """
+ Decode the given list of tokens to a final string
+
+ Args:
+ tokens (:obj:`List[str]`):
+ The list of tokens to decode
+
+ Returns:
+ :obj:`str`: The decoded string
+ """
+ pass
+
+class BPEDecoder(Decoder):
+ """
+ BPEDecoder Decoder
+
+ Args:
+ suffix (:obj:`str`, `optional`, defaults to :obj:``):
+ The suffix that was used to caracterize an end-of-word. This suffix will
+ be replaced by whitespaces during the decoding
+ """
+ def __init__(self, suffix=""):
+ pass
+
+ def decode(self, tokens):
+ """
+ Decode the given list of tokens to a final string
+
+ Args:
+ tokens (:obj:`List[str]`):
+ The list of tokens to decode
+
+ Returns:
+ :obj:`str`: The decoded string
+ """
+ pass
+
+class ByteFallback(Decoder):
+ """
+ ByteFallback Decoder
+ ByteFallback is a simple trick which converts tokens looking like `<0x61>`
+ to pure bytes, and attempts to make them into a string. If the tokens
+ cannot be decoded you will get � instead for each inconvertable byte token
+
+ """
+ def __init__(self):
+ pass
+
+ def decode(self, tokens):
+ """
+ Decode the given list of tokens to a final string
+
+ Args:
+ tokens (:obj:`List[str]`):
+ The list of tokens to decode
+
+ Returns:
+ :obj:`str`: The decoded string
+ """
+ pass
+
+class ByteLevel(Decoder):
+ """
+ ByteLevel Decoder
+
+ This decoder is to be used in tandem with the :class:`~tokenizers.pre_tokenizers.ByteLevel`
+ :class:`~tokenizers.pre_tokenizers.PreTokenizer`.
+ """
+ def __init__(self):
+ pass
+
+ def decode(self, tokens):
+ """
+ Decode the given list of tokens to a final string
+
+ Args:
+ tokens (:obj:`List[str]`):
+ The list of tokens to decode
+
+ Returns:
+ :obj:`str`: The decoded string
+ """
+ pass
+
+class CTC(Decoder):
+ """
+ CTC Decoder
+
+ Args:
+ pad_token (:obj:`str`, `optional`, defaults to :obj:``):
+ The pad token used by CTC to delimit a new token.
+ word_delimiter_token (:obj:`str`, `optional`, defaults to :obj:`|`):
+ The word delimiter token. It will be replaced by a
+ cleanup (:obj:`bool`, `optional`, defaults to :obj:`True`):
+ Whether to cleanup some tokenization artifacts.
+ Mainly spaces before punctuation, and some abbreviated english forms.
+ """
+ def __init__(self, pad_token="", word_delimiter_token="|", cleanup=True):
+ pass
+
+ def decode(self, tokens):
+ """
+ Decode the given list of tokens to a final string
+
+ Args:
+ tokens (:obj:`List[str]`):
+ The list of tokens to decode
+
+ Returns:
+ :obj:`str`: The decoded string
+ """
+ pass
+
+class Fuse(Decoder):
+ """
+ Fuse Decoder
+ Fuse simply fuses every token into a single string.
+ This is the last step of decoding, this decoder exists only if
+ there is need to add other decoders *after* the fusion
+ """
+ def __init__(self):
+ pass
+
+ def decode(self, tokens):
+ """
+ Decode the given list of tokens to a final string
+
+ Args:
+ tokens (:obj:`List[str]`):
+ The list of tokens to decode
+
+ Returns:
+ :obj:`str`: The decoded string
+ """
+ pass
+
+class Metaspace(Decoder):
+ """
+ Metaspace Decoder
+
+ Args:
+ replacement (:obj:`str`, `optional`, defaults to :obj:`▁`):
+ The replacement character. Must be exactly one character. By default we
+ use the `▁` (U+2581) meta symbol (Same as in SentencePiece).
+
+ prepend_scheme (:obj:`str`, `optional`, defaults to :obj:`"always"`):
+ Whether to add a space to the first word if there isn't already one. This
+ lets us treat `hello` exactly like `say hello`.
+ Choices: "always", "never", "first". First means the space is only added on the first
+ token (relevant when special tokens are used or other pre_tokenizer are used).
+ """
+ def __init__(self, replacement="▁", prepend_scheme="always", split=True):
+ pass
+
+ def decode(self, tokens):
+ """
+ Decode the given list of tokens to a final string
+
+ Args:
+ tokens (:obj:`List[str]`):
+ The list of tokens to decode
+
+ Returns:
+ :obj:`str`: The decoded string
+ """
+ pass
+
+class Replace(Decoder):
+ """
+ Replace Decoder
+
+ This decoder is to be used in tandem with the :class:`~tokenizers.pre_tokenizers.Replace`
+ :class:`~tokenizers.pre_tokenizers.PreTokenizer`.
+ """
+ def __init__(self, pattern, content):
+ pass
+
+ def decode(self, tokens):
+ """
+ Decode the given list of tokens to a final string
+
+ Args:
+ tokens (:obj:`List[str]`):
+ The list of tokens to decode
+
+ Returns:
+ :obj:`str`: The decoded string
+ """
+ pass
+
+class Sequence(Decoder):
+ """
+ Sequence Decoder
+
+ Args:
+ decoders (:obj:`List[Decoder]`)
+ The decoders that need to be chained
+ """
+ def __init__(self, decoders):
+ pass
+
+ def decode(self, tokens):
+ """
+ Decode the given list of tokens to a final string
+
+ Args:
+ tokens (:obj:`List[str]`):
+ The list of tokens to decode
+
+ Returns:
+ :obj:`str`: The decoded string
+ """
+ pass
+
+class Strip(Decoder):
+ """
+ Strip normalizer
+ Strips n left characters of each token, or n right characters of each token
+ """
+ def __init__(self, content, left=0, right=0):
+ pass
+
+ def decode(self, tokens):
+ """
+ Decode the given list of tokens to a final string
+
+ Args:
+ tokens (:obj:`List[str]`):
+ The list of tokens to decode
+
+ Returns:
+ :obj:`str`: The decoded string
+ """
+ pass
+
+class WordPiece(Decoder):
+ """
+ WordPiece Decoder
+
+ Args:
+ prefix (:obj:`str`, `optional`, defaults to :obj:`##`):
+ The prefix to use for subwords that are not a beginning-of-word
+
+ cleanup (:obj:`bool`, `optional`, defaults to :obj:`True`):
+ Whether to cleanup some tokenization artifacts. Mainly spaces before punctuation,
+ and some abbreviated english forms.
+ """
+ def __init__(self, prefix="##", cleanup=True):
+ pass
+
+ def decode(self, tokens):
+ """
+ Decode the given list of tokens to a final string
+
+ Args:
+ tokens (:obj:`List[str]`):
+ The list of tokens to decode
+
+ Returns:
+ :obj:`str`: The decoded string
+ """
+ pass
diff --git a/llmeval-env/lib/python3.10/site-packages/tokenizers/decoders/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/tokenizers/decoders/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..473444aef213e8ee525f4bb8667d476995f18610
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/tokenizers/decoders/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__init__.py b/llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..7e775892d04a91d645653ea9015954b7985d3147
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__init__.py
@@ -0,0 +1,6 @@
+from .base_tokenizer import BaseTokenizer
+from .bert_wordpiece import BertWordPieceTokenizer
+from .byte_level_bpe import ByteLevelBPETokenizer
+from .char_level_bpe import CharBPETokenizer
+from .sentencepiece_bpe import SentencePieceBPETokenizer
+from .sentencepiece_unigram import SentencePieceUnigramTokenizer
diff --git a/llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b8ac20a411a68c7205c90a8433e0d446289ac2c6
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/base_tokenizer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/base_tokenizer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..814e21c66643e2777aa5d43719237ab525e43265
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/base_tokenizer.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/bert_wordpiece.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/bert_wordpiece.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..407170e3f9b07440ac53037c8811282dc34e6189
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/bert_wordpiece.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/byte_level_bpe.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/byte_level_bpe.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..73a5298067be47269cee004eb7e4548b76d01264
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/byte_level_bpe.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/char_level_bpe.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/char_level_bpe.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2f7d234de2d42b5d30a93b65695f02e8475ef968
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/char_level_bpe.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/sentencepiece_bpe.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/sentencepiece_bpe.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1866be2b0944839d969812138c9172308eba0993
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/sentencepiece_bpe.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/sentencepiece_unigram.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/sentencepiece_unigram.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ac05fdf61042ed8402ee70cc67597d84cb1fb22b
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/__pycache__/sentencepiece_unigram.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/base_tokenizer.py b/llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/base_tokenizer.py
new file mode 100644
index 0000000000000000000000000000000000000000..4528dcebab9c2a72523316e1a85ddf04c64d3be3
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/base_tokenizer.py
@@ -0,0 +1,418 @@
+from typing import Dict, List, Optional, Tuple, Union
+
+from tokenizers import AddedToken, EncodeInput, Encoding, InputSequence, Tokenizer
+from tokenizers.decoders import Decoder
+from tokenizers.models import Model
+from tokenizers.normalizers import Normalizer
+from tokenizers.pre_tokenizers import PreTokenizer
+from tokenizers.processors import PostProcessor
+
+
+Offsets = Tuple[int, int]
+
+
+class BaseTokenizer:
+ def __init__(self, tokenizer: Tokenizer, parameters=None):
+ self._tokenizer = tokenizer
+ self._parameters = parameters if parameters is not None else {}
+
+ def __repr__(self):
+ return "Tokenizer(vocabulary_size={}, {})".format(
+ self._tokenizer.get_vocab_size(),
+ ", ".join(k + "=" + str(v) for k, v in self._parameters.items()),
+ )
+
+ def num_special_tokens_to_add(self, is_pair: bool) -> int:
+ """
+ Return the number of special tokens that would be added for single/pair sentences.
+ :param is_pair: Boolean indicating if the input would be a single sentence or a pair
+ :return:
+ """
+ return self._tokenizer.num_special_tokens_to_add(is_pair)
+
+ def get_vocab(self, with_added_tokens: bool = True) -> Dict[str, int]:
+ """Returns the vocabulary
+
+ Args:
+ with_added_tokens: boolean:
+ Whether to include the added tokens in the vocabulary
+
+ Returns:
+ The vocabulary
+ """
+ return self._tokenizer.get_vocab(with_added_tokens=with_added_tokens)
+
+ def get_added_tokens_decoder(self) -> Dict[int, AddedToken]:
+ """Returns the added reverse vocabulary
+
+ Returns:
+ The added vocabulary mapping ints to AddedTokens
+ """
+ return self._tokenizer.get_added_tokens_decoder()
+
+ def get_vocab_size(self, with_added_tokens: bool = True) -> int:
+ """Return the size of vocabulary, with or without added tokens.
+
+ Args:
+ with_added_tokens: (`optional`) bool:
+ Whether to count in added special tokens or not
+
+ Returns:
+ Size of vocabulary
+ """
+ return self._tokenizer.get_vocab_size(with_added_tokens=with_added_tokens)
+
+ def enable_padding(
+ self,
+ direction: Optional[str] = "right",
+ pad_to_multiple_of: Optional[int] = None,
+ pad_id: Optional[int] = 0,
+ pad_type_id: Optional[int] = 0,
+ pad_token: Optional[str] = "[PAD]",
+ length: Optional[int] = None,
+ ):
+ """Change the padding strategy
+
+ Args:
+ direction: (`optional`) str:
+ Can be one of: `right` or `left`
+
+ pad_to_multiple_of: (`optional`) unsigned int:
+ If specified, the padding length should always snap to the next multiple of
+ the given value. For example if we were going to pad with a length of 250 but
+ `pad_to_multiple_of=8` then we will pad to 256.
+
+ pad_id: (`optional`) unsigned int:
+ The indice to be used when padding
+
+ pad_type_id: (`optional`) unsigned int:
+ The type indice to be used when padding
+
+ pad_token: (`optional`) str:
+ The pad token to be used when padding
+
+ length: (`optional`) unsigned int:
+ If specified, the length at which to pad. If not specified
+ we pad using the size of the longest sequence in a batch
+ """
+ return self._tokenizer.enable_padding(
+ direction=direction,
+ pad_to_multiple_of=pad_to_multiple_of,
+ pad_id=pad_id,
+ pad_type_id=pad_type_id,
+ pad_token=pad_token,
+ length=length,
+ )
+
+ def no_padding(self):
+ """Disable padding"""
+ return self._tokenizer.no_padding()
+
+ @property
+ def padding(self) -> Optional[dict]:
+ """Get the current padding parameters
+
+ Returns:
+ None if padding is disabled, a dict with the currently set parameters
+ if the padding is enabled.
+ """
+ return self._tokenizer.padding
+
+ def enable_truncation(self, max_length: int, stride: Optional[int] = 0, strategy: Optional[str] = "longest_first"):
+ """Change the truncation options
+
+ Args:
+ max_length: unsigned int:
+ The maximum length at which to truncate
+
+ stride: (`optional`) unsigned int:
+ The length of the previous first sequence to be included
+ in the overflowing sequence
+
+ strategy: (`optional`) str:
+ Can be one of `longest_first`, `only_first` or `only_second`
+ """
+ return self._tokenizer.enable_truncation(max_length, stride=stride, strategy=strategy)
+
+ def no_truncation(self):
+ """Disable truncation"""
+ return self._tokenizer.no_truncation()
+
+ @property
+ def truncation(self) -> Optional[dict]:
+ """Get the current truncation parameters
+
+ Returns:
+ None if truncation is disabled, a dict with the current truncation parameters if
+ truncation is enabled
+ """
+ return self._tokenizer.truncation
+
+ def add_tokens(self, tokens: List[Union[str, AddedToken]]) -> int:
+ """Add the given tokens to the vocabulary
+
+ Args:
+ tokens: List[Union[str, AddedToken]]:
+ A list of tokens to add to the vocabulary. Each token can either be
+ a string, or an instance of AddedToken
+
+ Returns:
+ The number of tokens that were added to the vocabulary
+ """
+ return self._tokenizer.add_tokens(tokens)
+
+ def add_special_tokens(self, special_tokens: List[Union[str, AddedToken]]) -> int:
+ """Add the given special tokens to the vocabulary, and treat them as special tokens.
+
+ The special tokens will never be processed by the model, and will be
+ removed while decoding.
+
+ Args:
+ tokens: List[Union[str, AddedToken]]:
+ A list of special tokens to add to the vocabulary. Each token can either be
+ a string, or an instance of AddedToken
+
+ Returns:
+ The number of tokens that were added to the vocabulary
+ """
+ return self._tokenizer.add_special_tokens(special_tokens)
+
+ def normalize(self, sequence: str) -> str:
+ """Normalize the given sequence
+
+ Args:
+ sequence: str:
+ The sequence to normalize
+
+ Returns:
+ The normalized string
+ """
+ return self._tokenizer.normalize(sequence)
+
+ def encode(
+ self,
+ sequence: InputSequence,
+ pair: Optional[InputSequence] = None,
+ is_pretokenized: bool = False,
+ add_special_tokens: bool = True,
+ ) -> Encoding:
+ """Encode the given sequence and pair. This method can process raw text sequences as well
+ as already pre-tokenized sequences.
+
+ Args:
+ sequence: InputSequence:
+ The sequence we want to encode. This sequence can be either raw text or
+ pre-tokenized, according to the `is_pretokenized` argument:
+
+ - If `is_pretokenized=False`: `InputSequence` is expected to be `str`
+ - If `is_pretokenized=True`: `InputSequence` is expected to be
+ `Union[List[str], Tuple[str]]`
+
+ is_pretokenized: bool:
+ Whether the input is already pre-tokenized.
+
+ add_special_tokens: bool:
+ Whether to add the special tokens while encoding.
+
+ Returns:
+ An Encoding
+ """
+ if sequence is None:
+ raise ValueError("encode: `sequence` can't be `None`")
+
+ return self._tokenizer.encode(sequence, pair, is_pretokenized, add_special_tokens)
+
+ def encode_batch(
+ self,
+ inputs: List[EncodeInput],
+ is_pretokenized: bool = False,
+ add_special_tokens: bool = True,
+ ) -> List[Encoding]:
+ """Encode the given inputs. This method accept both raw text sequences as well as already
+ pre-tokenized sequences.
+
+ Args:
+ inputs: List[EncodeInput]:
+ A list of single sequences or pair sequences to encode. Each `EncodeInput` is
+ expected to be of the following form:
+ `Union[InputSequence, Tuple[InputSequence, InputSequence]]`
+
+ Each `InputSequence` can either be raw text or pre-tokenized,
+ according to the `is_pretokenized` argument:
+
+ - If `is_pretokenized=False`: `InputSequence` is expected to be `str`
+ - If `is_pretokenized=True`: `InputSequence` is expected to be
+ `Union[List[str], Tuple[str]]`
+
+ is_pretokenized: bool:
+ Whether the input is already pre-tokenized.
+
+ add_special_tokens: bool:
+ Whether to add the special tokens while encoding.
+
+ Returns:
+ A list of Encoding
+ """
+
+ if inputs is None:
+ raise ValueError("encode_batch: `inputs` can't be `None`")
+
+ return self._tokenizer.encode_batch(inputs, is_pretokenized, add_special_tokens)
+
+ def decode(self, ids: List[int], skip_special_tokens: Optional[bool] = True) -> str:
+ """Decode the given list of ids to a string sequence
+
+ Args:
+ ids: List[unsigned int]:
+ A list of ids to be decoded
+
+ skip_special_tokens: (`optional`) boolean:
+ Whether to remove all the special tokens from the output string
+
+ Returns:
+ The decoded string
+ """
+ if ids is None:
+ raise ValueError("None input is not valid. Should be a list of integers.")
+
+ return self._tokenizer.decode(ids, skip_special_tokens=skip_special_tokens)
+
+ def decode_batch(self, sequences: List[List[int]], skip_special_tokens: Optional[bool] = True) -> str:
+ """Decode the list of sequences to a list of string sequences
+
+ Args:
+ sequences: List[List[unsigned int]]:
+ A list of sequence of ids to be decoded
+
+ skip_special_tokens: (`optional`) boolean:
+ Whether to remove all the special tokens from the output strings
+
+ Returns:
+ A list of decoded strings
+ """
+ if sequences is None:
+ raise ValueError("None input is not valid. Should be list of list of integers.")
+
+ return self._tokenizer.decode_batch(sequences, skip_special_tokens=skip_special_tokens)
+
+ def token_to_id(self, token: str) -> Optional[int]:
+ """Convert the given token to its corresponding id
+
+ Args:
+ token: str:
+ The token to convert
+
+ Returns:
+ The corresponding id if it exists, None otherwise
+ """
+ return self._tokenizer.token_to_id(token)
+
+ def id_to_token(self, id: int) -> Optional[str]:
+ """Convert the given token id to its corresponding string
+
+ Args:
+ token: id:
+ The token id to convert
+
+ Returns:
+ The corresponding string if it exists, None otherwise
+ """
+ return self._tokenizer.id_to_token(id)
+
+ def save_model(self, directory: str, prefix: Optional[str] = None):
+ """Save the current model to the given directory
+
+ Args:
+ directory: str:
+ A path to the destination directory
+
+ prefix: (Optional) str:
+ An optional prefix, used to prefix each file name
+ """
+ return self._tokenizer.model.save(directory, prefix=prefix)
+
+ def save(self, path: str, pretty: bool = True):
+ """Save the current Tokenizer at the given path
+
+ Args:
+ path: str:
+ A path to the destination Tokenizer file
+ """
+ return self._tokenizer.save(path, pretty)
+
+ def to_str(self, pretty: bool = False):
+ """Get a serialized JSON version of the Tokenizer as a str
+
+ Args:
+ pretty: bool:
+ Whether the JSON string should be prettified
+
+ Returns:
+ str
+ """
+ return self._tokenizer.to_str(pretty)
+
+ def post_process(
+ self, encoding: Encoding, pair: Optional[Encoding] = None, add_special_tokens: bool = True
+ ) -> Encoding:
+ """Apply all the post-processing steps to the given encodings.
+
+ The various steps are:
+ 1. Truncate according to global params (provided to `enable_truncation`)
+ 2. Apply the PostProcessor
+ 3. Pad according to global params. (provided to `enable_padding`)
+
+ Args:
+ encoding: Encoding:
+ The main Encoding to post process
+
+ pair: Optional[Encoding]:
+ An optional pair Encoding
+
+ add_special_tokens: bool:
+ Whether to add special tokens
+
+ Returns:
+ The resulting Encoding
+ """
+ return self._tokenizer.post_process(encoding, pair, add_special_tokens)
+
+ @property
+ def model(self) -> Model:
+ return self._tokenizer.model
+
+ @model.setter
+ def model(self, model: Model):
+ self._tokenizer.model = model
+
+ @property
+ def normalizer(self) -> Normalizer:
+ return self._tokenizer.normalizer
+
+ @normalizer.setter
+ def normalizer(self, normalizer: Normalizer):
+ self._tokenizer.normalizer = normalizer
+
+ @property
+ def pre_tokenizer(self) -> PreTokenizer:
+ return self._tokenizer.pre_tokenizer
+
+ @pre_tokenizer.setter
+ def pre_tokenizer(self, pre_tokenizer: PreTokenizer):
+ self._tokenizer.pre_tokenizer = pre_tokenizer
+
+ @property
+ def post_processor(self) -> PostProcessor:
+ return self._tokenizer.post_processor
+
+ @post_processor.setter
+ def post_processor(self, post_processor: PostProcessor):
+ self._tokenizer.post_processor = post_processor
+
+ @property
+ def decoder(self) -> Decoder:
+ return self._tokenizer.decoder
+
+ @decoder.setter
+ def decoder(self, decoder: Decoder):
+ self._tokenizer.decoder = decoder
diff --git a/llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/bert_wordpiece.py b/llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/bert_wordpiece.py
new file mode 100644
index 0000000000000000000000000000000000000000..1f34e3ca8a4f8b3ed454e09d828918881232ef90
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/bert_wordpiece.py
@@ -0,0 +1,151 @@
+from typing import Dict, Iterator, List, Optional, Union
+
+from tokenizers import AddedToken, Tokenizer, decoders, trainers
+from tokenizers.models import WordPiece
+from tokenizers.normalizers import BertNormalizer
+from tokenizers.pre_tokenizers import BertPreTokenizer
+from tokenizers.processors import BertProcessing
+
+from .base_tokenizer import BaseTokenizer
+
+
+class BertWordPieceTokenizer(BaseTokenizer):
+ """Bert WordPiece Tokenizer"""
+
+ def __init__(
+ self,
+ vocab: Optional[Union[str, Dict[str, int]]] = None,
+ unk_token: Union[str, AddedToken] = "[UNK]",
+ sep_token: Union[str, AddedToken] = "[SEP]",
+ cls_token: Union[str, AddedToken] = "[CLS]",
+ pad_token: Union[str, AddedToken] = "[PAD]",
+ mask_token: Union[str, AddedToken] = "[MASK]",
+ clean_text: bool = True,
+ handle_chinese_chars: bool = True,
+ strip_accents: Optional[bool] = None,
+ lowercase: bool = True,
+ wordpieces_prefix: str = "##",
+ ):
+ if vocab is not None:
+ tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(unk_token)))
+ else:
+ tokenizer = Tokenizer(WordPiece(unk_token=str(unk_token)))
+
+ # Let the tokenizer know about special tokens if they are part of the vocab
+ if tokenizer.token_to_id(str(unk_token)) is not None:
+ tokenizer.add_special_tokens([str(unk_token)])
+ if tokenizer.token_to_id(str(sep_token)) is not None:
+ tokenizer.add_special_tokens([str(sep_token)])
+ if tokenizer.token_to_id(str(cls_token)) is not None:
+ tokenizer.add_special_tokens([str(cls_token)])
+ if tokenizer.token_to_id(str(pad_token)) is not None:
+ tokenizer.add_special_tokens([str(pad_token)])
+ if tokenizer.token_to_id(str(mask_token)) is not None:
+ tokenizer.add_special_tokens([str(mask_token)])
+
+ tokenizer.normalizer = BertNormalizer(
+ clean_text=clean_text,
+ handle_chinese_chars=handle_chinese_chars,
+ strip_accents=strip_accents,
+ lowercase=lowercase,
+ )
+ tokenizer.pre_tokenizer = BertPreTokenizer()
+
+ if vocab is not None:
+ sep_token_id = tokenizer.token_to_id(str(sep_token))
+ if sep_token_id is None:
+ raise TypeError("sep_token not found in the vocabulary")
+ cls_token_id = tokenizer.token_to_id(str(cls_token))
+ if cls_token_id is None:
+ raise TypeError("cls_token not found in the vocabulary")
+
+ tokenizer.post_processor = BertProcessing((str(sep_token), sep_token_id), (str(cls_token), cls_token_id))
+ tokenizer.decoder = decoders.WordPiece(prefix=wordpieces_prefix)
+
+ parameters = {
+ "model": "BertWordPiece",
+ "unk_token": unk_token,
+ "sep_token": sep_token,
+ "cls_token": cls_token,
+ "pad_token": pad_token,
+ "mask_token": mask_token,
+ "clean_text": clean_text,
+ "handle_chinese_chars": handle_chinese_chars,
+ "strip_accents": strip_accents,
+ "lowercase": lowercase,
+ "wordpieces_prefix": wordpieces_prefix,
+ }
+
+ super().__init__(tokenizer, parameters)
+
+ @staticmethod
+ def from_file(vocab: str, **kwargs):
+ vocab = WordPiece.read_file(vocab)
+ return BertWordPieceTokenizer(vocab, **kwargs)
+
+ def train(
+ self,
+ files: Union[str, List[str]],
+ vocab_size: int = 30000,
+ min_frequency: int = 2,
+ limit_alphabet: int = 1000,
+ initial_alphabet: List[str] = [],
+ special_tokens: List[Union[str, AddedToken]] = [
+ "[PAD]",
+ "[UNK]",
+ "[CLS]",
+ "[SEP]",
+ "[MASK]",
+ ],
+ show_progress: bool = True,
+ wordpieces_prefix: str = "##",
+ ):
+ """Train the model using the given files"""
+
+ trainer = trainers.WordPieceTrainer(
+ vocab_size=vocab_size,
+ min_frequency=min_frequency,
+ limit_alphabet=limit_alphabet,
+ initial_alphabet=initial_alphabet,
+ special_tokens=special_tokens,
+ show_progress=show_progress,
+ continuing_subword_prefix=wordpieces_prefix,
+ )
+ if isinstance(files, str):
+ files = [files]
+ self._tokenizer.train(files, trainer=trainer)
+
+ def train_from_iterator(
+ self,
+ iterator: Union[Iterator[str], Iterator[Iterator[str]]],
+ vocab_size: int = 30000,
+ min_frequency: int = 2,
+ limit_alphabet: int = 1000,
+ initial_alphabet: List[str] = [],
+ special_tokens: List[Union[str, AddedToken]] = [
+ "[PAD]",
+ "[UNK]",
+ "[CLS]",
+ "[SEP]",
+ "[MASK]",
+ ],
+ show_progress: bool = True,
+ wordpieces_prefix: str = "##",
+ length: Optional[int] = None,
+ ):
+ """Train the model using the given iterator"""
+
+ trainer = trainers.WordPieceTrainer(
+ vocab_size=vocab_size,
+ min_frequency=min_frequency,
+ limit_alphabet=limit_alphabet,
+ initial_alphabet=initial_alphabet,
+ special_tokens=special_tokens,
+ show_progress=show_progress,
+ continuing_subword_prefix=wordpieces_prefix,
+ )
+ self._tokenizer.train_from_iterator(
+ iterator,
+ trainer=trainer,
+ length=length,
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/byte_level_bpe.py b/llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/byte_level_bpe.py
new file mode 100644
index 0000000000000000000000000000000000000000..c7e3dbc466259795ed9d168f57d8fcabe947e96e
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/byte_level_bpe.py
@@ -0,0 +1,122 @@
+from typing import Dict, Iterator, List, Optional, Tuple, Union
+
+from tokenizers import AddedToken, Tokenizer, decoders, pre_tokenizers, processors, trainers
+from tokenizers.models import BPE
+from tokenizers.normalizers import Lowercase, Sequence, unicode_normalizer_from_str
+
+from .base_tokenizer import BaseTokenizer
+
+
+class ByteLevelBPETokenizer(BaseTokenizer):
+ """ByteLevelBPETokenizer
+
+ Represents a Byte-level BPE as introduced by OpenAI with their GPT-2 model
+ """
+
+ def __init__(
+ self,
+ vocab: Optional[Union[str, Dict[str, int]]] = None,
+ merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None,
+ add_prefix_space: bool = False,
+ lowercase: bool = False,
+ dropout: Optional[float] = None,
+ unicode_normalizer: Optional[str] = None,
+ continuing_subword_prefix: Optional[str] = None,
+ end_of_word_suffix: Optional[str] = None,
+ trim_offsets: bool = False,
+ ):
+ if vocab is not None and merges is not None:
+ tokenizer = Tokenizer(
+ BPE(
+ vocab,
+ merges,
+ dropout=dropout,
+ continuing_subword_prefix=continuing_subword_prefix or "",
+ end_of_word_suffix=end_of_word_suffix or "",
+ )
+ )
+ else:
+ tokenizer = Tokenizer(BPE())
+
+ # Check for Unicode normalization first (before everything else)
+ normalizers = []
+
+ if unicode_normalizer:
+ normalizers += [unicode_normalizer_from_str(unicode_normalizer)]
+
+ if lowercase:
+ normalizers += [Lowercase()]
+
+ # Create the normalizer structure
+ if len(normalizers) > 0:
+ if len(normalizers) > 1:
+ tokenizer.normalizer = Sequence(normalizers)
+ else:
+ tokenizer.normalizer = normalizers[0]
+
+ tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=add_prefix_space)
+ tokenizer.decoder = decoders.ByteLevel()
+ tokenizer.post_processor = processors.ByteLevel(trim_offsets=trim_offsets)
+
+ parameters = {
+ "model": "ByteLevelBPE",
+ "add_prefix_space": add_prefix_space,
+ "lowercase": lowercase,
+ "dropout": dropout,
+ "unicode_normalizer": unicode_normalizer,
+ "continuing_subword_prefix": continuing_subword_prefix,
+ "end_of_word_suffix": end_of_word_suffix,
+ "trim_offsets": trim_offsets,
+ }
+
+ super().__init__(tokenizer, parameters)
+
+ @staticmethod
+ def from_file(vocab_filename: str, merges_filename: str, **kwargs):
+ vocab, merges = BPE.read_file(vocab_filename, merges_filename)
+ return ByteLevelBPETokenizer(vocab, merges, **kwargs)
+
+ def train(
+ self,
+ files: Union[str, List[str]],
+ vocab_size: int = 30000,
+ min_frequency: int = 2,
+ show_progress: bool = True,
+ special_tokens: List[Union[str, AddedToken]] = [],
+ ):
+ """Train the model using the given files"""
+
+ trainer = trainers.BpeTrainer(
+ vocab_size=vocab_size,
+ min_frequency=min_frequency,
+ show_progress=show_progress,
+ special_tokens=special_tokens,
+ initial_alphabet=pre_tokenizers.ByteLevel.alphabet(),
+ )
+ if isinstance(files, str):
+ files = [files]
+ self._tokenizer.train(files, trainer=trainer)
+
+ def train_from_iterator(
+ self,
+ iterator: Union[Iterator[str], Iterator[Iterator[str]]],
+ vocab_size: int = 30000,
+ min_frequency: int = 2,
+ show_progress: bool = True,
+ special_tokens: List[Union[str, AddedToken]] = [],
+ length: Optional[int] = None,
+ ):
+ """Train the model using the given iterator"""
+
+ trainer = trainers.BpeTrainer(
+ vocab_size=vocab_size,
+ min_frequency=min_frequency,
+ show_progress=show_progress,
+ special_tokens=special_tokens,
+ initial_alphabet=pre_tokenizers.ByteLevel.alphabet(),
+ )
+ self._tokenizer.train_from_iterator(
+ iterator,
+ trainer=trainer,
+ length=length,
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/char_level_bpe.py b/llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/char_level_bpe.py
new file mode 100644
index 0000000000000000000000000000000000000000..29ca5977d389d6ff4788fe263d65957e9c4e55fa
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/tokenizers/implementations/char_level_bpe.py
@@ -0,0 +1,150 @@
+from typing import Dict, Iterator, List, Optional, Tuple, Union
+
+from .. import AddedToken, Tokenizer, decoders, pre_tokenizers, trainers
+from ..models import BPE
+from ..normalizers import BertNormalizer, Lowercase, Sequence, unicode_normalizer_from_str
+from .base_tokenizer import BaseTokenizer
+
+
+class CharBPETokenizer(BaseTokenizer):
+ """Original BPE Tokenizer
+
+ Represents the BPE algorithm, as introduced by Rico Sennrich
+ (https://arxiv.org/abs/1508.07909)
+
+ The defaults settings corresponds to OpenAI GPT BPE tokenizers and differs from the original
+ Sennrich subword-nmt implementation by the following options that you can deactivate:
+ - adding a normalizer to clean up the text (deactivate with `bert_normalizer=False`) by:
+ * removing any control characters and replacing all whitespaces by the classic one.
+ * handle chinese chars by putting spaces around them.
+ * strip all accents.
+ - spitting on punctuation in addition to whitespaces (deactivate it with
+ `split_on_whitespace_only=True`)
+ """
+
+ def __init__(
+ self,
+ vocab: Optional[Union[str, Dict[str, int]]] = None,
+ merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None,
+ unk_token: Union[str, AddedToken] = "