diff --git a/ckpts/universal/global_step80/zero/17.mlp.dense_4h_to_h.weight/exp_avg.pt b/ckpts/universal/global_step80/zero/17.mlp.dense_4h_to_h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..27315e9080974509da7d6be41d0173ff2cd72dca --- /dev/null +++ b/ckpts/universal/global_step80/zero/17.mlp.dense_4h_to_h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c14966b0f04921ae94219d62004ec1c62f2d523c898d261b7dfd320c04b7a3a +size 33555612 diff --git a/ckpts/universal/global_step80/zero/17.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/17.mlp.dense_4h_to_h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..e2fec8500be5cf48f37242ff06f913983fa0c226 --- /dev/null +++ b/ckpts/universal/global_step80/zero/17.mlp.dense_4h_to_h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea24a8ff97ad7b5ffa04fe7f9936715d6ffe0f86852910e638ea7081cf6f58dc +size 33555627 diff --git a/ckpts/universal/global_step80/zero/17.mlp.dense_4h_to_h.weight/fp32.pt b/ckpts/universal/global_step80/zero/17.mlp.dense_4h_to_h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..5e7f4665b018223476e733518ead2899a0f10005 --- /dev/null +++ b/ckpts/universal/global_step80/zero/17.mlp.dense_4h_to_h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77ff49d590025dabe163e59f659355c91bdd03a973214b4554577f2fcbbd4df6 +size 33555533 diff --git a/venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33226616d74496acfcaa3328f62043d2bb197565 Binary files /dev/null and b/venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/__main__.cpython-310.pyc b/venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86a8da0ae4da491a2582b04f961abd3b8db13739 Binary files /dev/null and b/venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/__main__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/md.cpython-310.pyc b/venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/md.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9aa7ced216f702deaae61cefac6aaeb6be060fc6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/md.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/models.cpython-310.pyc b/venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/models.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88261ed866dcb6263fd547d6fb4ab3c22e21c8dc Binary files /dev/null and b/venv/lib/python3.10/site-packages/charset_normalizer/__pycache__/models.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/charset_normalizer/api.py b/venv/lib/python3.10/site-packages/charset_normalizer/api.py new file mode 100644 index 0000000000000000000000000000000000000000..0ba08e3a50ba6d61e75f3f31772eb4dfdd3f8f05 --- /dev/null +++ b/venv/lib/python3.10/site-packages/charset_normalizer/api.py @@ -0,0 +1,626 @@ +import logging +from os import PathLike +from typing import BinaryIO, List, Optional, Set, Union + +from .cd import ( + coherence_ratio, + encoding_languages, + mb_encoding_languages, + merge_coherence_ratios, +) +from .constant import IANA_SUPPORTED, TOO_BIG_SEQUENCE, TOO_SMALL_SEQUENCE, TRACE +from .md import mess_ratio +from .models import CharsetMatch, CharsetMatches +from .utils import ( + any_specified_encoding, + cut_sequence_chunks, + iana_name, + identify_sig_or_bom, + is_cp_similar, + is_multi_byte_encoding, + should_strip_sig_or_bom, +) + +# Will most likely be controversial +# logging.addLevelName(TRACE, "TRACE") +logger = logging.getLogger("charset_normalizer") +explain_handler = logging.StreamHandler() +explain_handler.setFormatter( + logging.Formatter("%(asctime)s | %(levelname)s | %(message)s") +) + + +def from_bytes( + sequences: Union[bytes, bytearray], + steps: int = 5, + chunk_size: int = 512, + threshold: float = 0.2, + cp_isolation: Optional[List[str]] = None, + cp_exclusion: Optional[List[str]] = None, + preemptive_behaviour: bool = True, + explain: bool = False, + language_threshold: float = 0.1, + enable_fallback: bool = True, +) -> CharsetMatches: + """ + Given a raw bytes sequence, return the best possibles charset usable to render str objects. + If there is no results, it is a strong indicator that the source is binary/not text. + By default, the process will extract 5 blocks of 512o each to assess the mess and coherence of a given sequence. + And will give up a particular code page after 20% of measured mess. Those criteria are customizable at will. + + The preemptive behavior DOES NOT replace the traditional detection workflow, it prioritize a particular code page + but never take it for granted. Can improve the performance. + + You may want to focus your attention to some code page or/and not others, use cp_isolation and cp_exclusion for that + purpose. + + This function will strip the SIG in the payload/sequence every time except on UTF-16, UTF-32. + By default the library does not setup any handler other than the NullHandler, if you choose to set the 'explain' + toggle to True it will alter the logger configuration to add a StreamHandler that is suitable for debugging. + Custom logging format and handler can be set manually. + """ + + if not isinstance(sequences, (bytearray, bytes)): + raise TypeError( + "Expected object of type bytes or bytearray, got: {0}".format( + type(sequences) + ) + ) + + if explain: + previous_logger_level: int = logger.level + logger.addHandler(explain_handler) + logger.setLevel(TRACE) + + length: int = len(sequences) + + if length == 0: + logger.debug("Encoding detection on empty bytes, assuming utf_8 intention.") + if explain: + logger.removeHandler(explain_handler) + logger.setLevel(previous_logger_level or logging.WARNING) + return CharsetMatches([CharsetMatch(sequences, "utf_8", 0.0, False, [], "")]) + + if cp_isolation is not None: + logger.log( + TRACE, + "cp_isolation is set. use this flag for debugging purpose. " + "limited list of encoding allowed : %s.", + ", ".join(cp_isolation), + ) + cp_isolation = [iana_name(cp, False) for cp in cp_isolation] + else: + cp_isolation = [] + + if cp_exclusion is not None: + logger.log( + TRACE, + "cp_exclusion is set. use this flag for debugging purpose. " + "limited list of encoding excluded : %s.", + ", ".join(cp_exclusion), + ) + cp_exclusion = [iana_name(cp, False) for cp in cp_exclusion] + else: + cp_exclusion = [] + + if length <= (chunk_size * steps): + logger.log( + TRACE, + "override steps (%i) and chunk_size (%i) as content does not fit (%i byte(s) given) parameters.", + steps, + chunk_size, + length, + ) + steps = 1 + chunk_size = length + + if steps > 1 and length / steps < chunk_size: + chunk_size = int(length / steps) + + is_too_small_sequence: bool = len(sequences) < TOO_SMALL_SEQUENCE + is_too_large_sequence: bool = len(sequences) >= TOO_BIG_SEQUENCE + + if is_too_small_sequence: + logger.log( + TRACE, + "Trying to detect encoding from a tiny portion of ({}) byte(s).".format( + length + ), + ) + elif is_too_large_sequence: + logger.log( + TRACE, + "Using lazy str decoding because the payload is quite large, ({}) byte(s).".format( + length + ), + ) + + prioritized_encodings: List[str] = [] + + specified_encoding: Optional[str] = ( + any_specified_encoding(sequences) if preemptive_behaviour else None + ) + + if specified_encoding is not None: + prioritized_encodings.append(specified_encoding) + logger.log( + TRACE, + "Detected declarative mark in sequence. Priority +1 given for %s.", + specified_encoding, + ) + + tested: Set[str] = set() + tested_but_hard_failure: List[str] = [] + tested_but_soft_failure: List[str] = [] + + fallback_ascii: Optional[CharsetMatch] = None + fallback_u8: Optional[CharsetMatch] = None + fallback_specified: Optional[CharsetMatch] = None + + results: CharsetMatches = CharsetMatches() + + sig_encoding, sig_payload = identify_sig_or_bom(sequences) + + if sig_encoding is not None: + prioritized_encodings.append(sig_encoding) + logger.log( + TRACE, + "Detected a SIG or BOM mark on first %i byte(s). Priority +1 given for %s.", + len(sig_payload), + sig_encoding, + ) + + prioritized_encodings.append("ascii") + + if "utf_8" not in prioritized_encodings: + prioritized_encodings.append("utf_8") + + for encoding_iana in prioritized_encodings + IANA_SUPPORTED: + if cp_isolation and encoding_iana not in cp_isolation: + continue + + if cp_exclusion and encoding_iana in cp_exclusion: + continue + + if encoding_iana in tested: + continue + + tested.add(encoding_iana) + + decoded_payload: Optional[str] = None + bom_or_sig_available: bool = sig_encoding == encoding_iana + strip_sig_or_bom: bool = bom_or_sig_available and should_strip_sig_or_bom( + encoding_iana + ) + + if encoding_iana in {"utf_16", "utf_32"} and not bom_or_sig_available: + logger.log( + TRACE, + "Encoding %s won't be tested as-is because it require a BOM. Will try some sub-encoder LE/BE.", + encoding_iana, + ) + continue + if encoding_iana in {"utf_7"} and not bom_or_sig_available: + logger.log( + TRACE, + "Encoding %s won't be tested as-is because detection is unreliable without BOM/SIG.", + encoding_iana, + ) + continue + + try: + is_multi_byte_decoder: bool = is_multi_byte_encoding(encoding_iana) + except (ModuleNotFoundError, ImportError): + logger.log( + TRACE, + "Encoding %s does not provide an IncrementalDecoder", + encoding_iana, + ) + continue + + try: + if is_too_large_sequence and is_multi_byte_decoder is False: + str( + sequences[: int(50e4)] + if strip_sig_or_bom is False + else sequences[len(sig_payload) : int(50e4)], + encoding=encoding_iana, + ) + else: + decoded_payload = str( + sequences + if strip_sig_or_bom is False + else sequences[len(sig_payload) :], + encoding=encoding_iana, + ) + except (UnicodeDecodeError, LookupError) as e: + if not isinstance(e, LookupError): + logger.log( + TRACE, + "Code page %s does not fit given bytes sequence at ALL. %s", + encoding_iana, + str(e), + ) + tested_but_hard_failure.append(encoding_iana) + continue + + similar_soft_failure_test: bool = False + + for encoding_soft_failed in tested_but_soft_failure: + if is_cp_similar(encoding_iana, encoding_soft_failed): + similar_soft_failure_test = True + break + + if similar_soft_failure_test: + logger.log( + TRACE, + "%s is deemed too similar to code page %s and was consider unsuited already. Continuing!", + encoding_iana, + encoding_soft_failed, + ) + continue + + r_ = range( + 0 if not bom_or_sig_available else len(sig_payload), + length, + int(length / steps), + ) + + multi_byte_bonus: bool = ( + is_multi_byte_decoder + and decoded_payload is not None + and len(decoded_payload) < length + ) + + if multi_byte_bonus: + logger.log( + TRACE, + "Code page %s is a multi byte encoding table and it appear that at least one character " + "was encoded using n-bytes.", + encoding_iana, + ) + + max_chunk_gave_up: int = int(len(r_) / 4) + + max_chunk_gave_up = max(max_chunk_gave_up, 2) + early_stop_count: int = 0 + lazy_str_hard_failure = False + + md_chunks: List[str] = [] + md_ratios = [] + + try: + for chunk in cut_sequence_chunks( + sequences, + encoding_iana, + r_, + chunk_size, + bom_or_sig_available, + strip_sig_or_bom, + sig_payload, + is_multi_byte_decoder, + decoded_payload, + ): + md_chunks.append(chunk) + + md_ratios.append( + mess_ratio( + chunk, + threshold, + explain is True and 1 <= len(cp_isolation) <= 2, + ) + ) + + if md_ratios[-1] >= threshold: + early_stop_count += 1 + + if (early_stop_count >= max_chunk_gave_up) or ( + bom_or_sig_available and strip_sig_or_bom is False + ): + break + except ( + UnicodeDecodeError + ) as e: # Lazy str loading may have missed something there + logger.log( + TRACE, + "LazyStr Loading: After MD chunk decode, code page %s does not fit given bytes sequence at ALL. %s", + encoding_iana, + str(e), + ) + early_stop_count = max_chunk_gave_up + lazy_str_hard_failure = True + + # We might want to check the sequence again with the whole content + # Only if initial MD tests passes + if ( + not lazy_str_hard_failure + and is_too_large_sequence + and not is_multi_byte_decoder + ): + try: + sequences[int(50e3) :].decode(encoding_iana, errors="strict") + except UnicodeDecodeError as e: + logger.log( + TRACE, + "LazyStr Loading: After final lookup, code page %s does not fit given bytes sequence at ALL. %s", + encoding_iana, + str(e), + ) + tested_but_hard_failure.append(encoding_iana) + continue + + mean_mess_ratio: float = sum(md_ratios) / len(md_ratios) if md_ratios else 0.0 + if mean_mess_ratio >= threshold or early_stop_count >= max_chunk_gave_up: + tested_but_soft_failure.append(encoding_iana) + logger.log( + TRACE, + "%s was excluded because of initial chaos probing. Gave up %i time(s). " + "Computed mean chaos is %f %%.", + encoding_iana, + early_stop_count, + round(mean_mess_ratio * 100, ndigits=3), + ) + # Preparing those fallbacks in case we got nothing. + if ( + enable_fallback + and encoding_iana in ["ascii", "utf_8", specified_encoding] + and not lazy_str_hard_failure + ): + fallback_entry = CharsetMatch( + sequences, encoding_iana, threshold, False, [], decoded_payload + ) + if encoding_iana == specified_encoding: + fallback_specified = fallback_entry + elif encoding_iana == "ascii": + fallback_ascii = fallback_entry + else: + fallback_u8 = fallback_entry + continue + + logger.log( + TRACE, + "%s passed initial chaos probing. Mean measured chaos is %f %%", + encoding_iana, + round(mean_mess_ratio * 100, ndigits=3), + ) + + if not is_multi_byte_decoder: + target_languages: List[str] = encoding_languages(encoding_iana) + else: + target_languages = mb_encoding_languages(encoding_iana) + + if target_languages: + logger.log( + TRACE, + "{} should target any language(s) of {}".format( + encoding_iana, str(target_languages) + ), + ) + + cd_ratios = [] + + # We shall skip the CD when its about ASCII + # Most of the time its not relevant to run "language-detection" on it. + if encoding_iana != "ascii": + for chunk in md_chunks: + chunk_languages = coherence_ratio( + chunk, + language_threshold, + ",".join(target_languages) if target_languages else None, + ) + + cd_ratios.append(chunk_languages) + + cd_ratios_merged = merge_coherence_ratios(cd_ratios) + + if cd_ratios_merged: + logger.log( + TRACE, + "We detected language {} using {}".format( + cd_ratios_merged, encoding_iana + ), + ) + + results.append( + CharsetMatch( + sequences, + encoding_iana, + mean_mess_ratio, + bom_or_sig_available, + cd_ratios_merged, + decoded_payload, + ) + ) + + if ( + encoding_iana in [specified_encoding, "ascii", "utf_8"] + and mean_mess_ratio < 0.1 + ): + logger.debug( + "Encoding detection: %s is most likely the one.", encoding_iana + ) + if explain: + logger.removeHandler(explain_handler) + logger.setLevel(previous_logger_level) + return CharsetMatches([results[encoding_iana]]) + + if encoding_iana == sig_encoding: + logger.debug( + "Encoding detection: %s is most likely the one as we detected a BOM or SIG within " + "the beginning of the sequence.", + encoding_iana, + ) + if explain: + logger.removeHandler(explain_handler) + logger.setLevel(previous_logger_level) + return CharsetMatches([results[encoding_iana]]) + + if len(results) == 0: + if fallback_u8 or fallback_ascii or fallback_specified: + logger.log( + TRACE, + "Nothing got out of the detection process. Using ASCII/UTF-8/Specified fallback.", + ) + + if fallback_specified: + logger.debug( + "Encoding detection: %s will be used as a fallback match", + fallback_specified.encoding, + ) + results.append(fallback_specified) + elif ( + (fallback_u8 and fallback_ascii is None) + or ( + fallback_u8 + and fallback_ascii + and fallback_u8.fingerprint != fallback_ascii.fingerprint + ) + or (fallback_u8 is not None) + ): + logger.debug("Encoding detection: utf_8 will be used as a fallback match") + results.append(fallback_u8) + elif fallback_ascii: + logger.debug("Encoding detection: ascii will be used as a fallback match") + results.append(fallback_ascii) + + if results: + logger.debug( + "Encoding detection: Found %s as plausible (best-candidate) for content. With %i alternatives.", + results.best().encoding, # type: ignore + len(results) - 1, + ) + else: + logger.debug("Encoding detection: Unable to determine any suitable charset.") + + if explain: + logger.removeHandler(explain_handler) + logger.setLevel(previous_logger_level) + + return results + + +def from_fp( + fp: BinaryIO, + steps: int = 5, + chunk_size: int = 512, + threshold: float = 0.20, + cp_isolation: Optional[List[str]] = None, + cp_exclusion: Optional[List[str]] = None, + preemptive_behaviour: bool = True, + explain: bool = False, + language_threshold: float = 0.1, + enable_fallback: bool = True, +) -> CharsetMatches: + """ + Same thing than the function from_bytes but using a file pointer that is already ready. + Will not close the file pointer. + """ + return from_bytes( + fp.read(), + steps, + chunk_size, + threshold, + cp_isolation, + cp_exclusion, + preemptive_behaviour, + explain, + language_threshold, + enable_fallback, + ) + + +def from_path( + path: Union[str, bytes, PathLike], # type: ignore[type-arg] + steps: int = 5, + chunk_size: int = 512, + threshold: float = 0.20, + cp_isolation: Optional[List[str]] = None, + cp_exclusion: Optional[List[str]] = None, + preemptive_behaviour: bool = True, + explain: bool = False, + language_threshold: float = 0.1, + enable_fallback: bool = True, +) -> CharsetMatches: + """ + Same thing than the function from_bytes but with one extra step. Opening and reading given file path in binary mode. + Can raise IOError. + """ + with open(path, "rb") as fp: + return from_fp( + fp, + steps, + chunk_size, + threshold, + cp_isolation, + cp_exclusion, + preemptive_behaviour, + explain, + language_threshold, + enable_fallback, + ) + + +def is_binary( + fp_or_path_or_payload: Union[PathLike, str, BinaryIO, bytes], # type: ignore[type-arg] + steps: int = 5, + chunk_size: int = 512, + threshold: float = 0.20, + cp_isolation: Optional[List[str]] = None, + cp_exclusion: Optional[List[str]] = None, + preemptive_behaviour: bool = True, + explain: bool = False, + language_threshold: float = 0.1, + enable_fallback: bool = False, +) -> bool: + """ + Detect if the given input (file, bytes, or path) points to a binary file. aka. not a string. + Based on the same main heuristic algorithms and default kwargs at the sole exception that fallbacks match + are disabled to be stricter around ASCII-compatible but unlikely to be a string. + """ + if isinstance(fp_or_path_or_payload, (str, PathLike)): + guesses = from_path( + fp_or_path_or_payload, + steps=steps, + chunk_size=chunk_size, + threshold=threshold, + cp_isolation=cp_isolation, + cp_exclusion=cp_exclusion, + preemptive_behaviour=preemptive_behaviour, + explain=explain, + language_threshold=language_threshold, + enable_fallback=enable_fallback, + ) + elif isinstance( + fp_or_path_or_payload, + ( + bytes, + bytearray, + ), + ): + guesses = from_bytes( + fp_or_path_or_payload, + steps=steps, + chunk_size=chunk_size, + threshold=threshold, + cp_isolation=cp_isolation, + cp_exclusion=cp_exclusion, + preemptive_behaviour=preemptive_behaviour, + explain=explain, + language_threshold=language_threshold, + enable_fallback=enable_fallback, + ) + else: + guesses = from_fp( + fp_or_path_or_payload, + steps=steps, + chunk_size=chunk_size, + threshold=threshold, + cp_isolation=cp_isolation, + cp_exclusion=cp_exclusion, + preemptive_behaviour=preemptive_behaviour, + explain=explain, + language_threshold=language_threshold, + enable_fallback=enable_fallback, + ) + + return not guesses diff --git a/venv/lib/python3.10/site-packages/charset_normalizer/cli/__init__.py b/venv/lib/python3.10/site-packages/charset_normalizer/cli/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d95fedfe5723713337f1a94ec8f0a00b6ca7816a --- /dev/null +++ b/venv/lib/python3.10/site-packages/charset_normalizer/cli/__init__.py @@ -0,0 +1,6 @@ +from .__main__ import cli_detect, query_yes_no + +__all__ = ( + "cli_detect", + "query_yes_no", +) diff --git a/venv/lib/python3.10/site-packages/charset_normalizer/cli/__main__.py b/venv/lib/python3.10/site-packages/charset_normalizer/cli/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..f4bcbaac049b542a004918a0b1499122fcca9cc0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/charset_normalizer/cli/__main__.py @@ -0,0 +1,296 @@ +import argparse +import sys +from json import dumps +from os.path import abspath, basename, dirname, join, realpath +from platform import python_version +from typing import List, Optional +from unicodedata import unidata_version + +import charset_normalizer.md as md_module +from charset_normalizer import from_fp +from charset_normalizer.models import CliDetectionResult +from charset_normalizer.version import __version__ + + +def query_yes_no(question: str, default: str = "yes") -> bool: + """Ask a yes/no question via input() and return their answer. + + "question" is a string that is presented to the user. + "default" is the presumed answer if the user just hits . + It must be "yes" (the default), "no" or None (meaning + an answer is required of the user). + + The "answer" return value is True for "yes" or False for "no". + + Credit goes to (c) https://stackoverflow.com/questions/3041986/apt-command-line-interface-like-yes-no-input + """ + valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False} + if default is None: + prompt = " [y/n] " + elif default == "yes": + prompt = " [Y/n] " + elif default == "no": + prompt = " [y/N] " + else: + raise ValueError("invalid default answer: '%s'" % default) + + while True: + sys.stdout.write(question + prompt) + choice = input().lower() + if default is not None and choice == "": + return valid[default] + elif choice in valid: + return valid[choice] + else: + sys.stdout.write("Please respond with 'yes' or 'no' " "(or 'y' or 'n').\n") + + +def cli_detect(argv: Optional[List[str]] = None) -> int: + """ + CLI assistant using ARGV and ArgumentParser + :param argv: + :return: 0 if everything is fine, anything else equal trouble + """ + parser = argparse.ArgumentParser( + description="The Real First Universal Charset Detector. " + "Discover originating encoding used on text file. " + "Normalize text to unicode." + ) + + parser.add_argument( + "files", type=argparse.FileType("rb"), nargs="+", help="File(s) to be analysed" + ) + parser.add_argument( + "-v", + "--verbose", + action="store_true", + default=False, + dest="verbose", + help="Display complementary information about file if any. " + "Stdout will contain logs about the detection process.", + ) + parser.add_argument( + "-a", + "--with-alternative", + action="store_true", + default=False, + dest="alternatives", + help="Output complementary possibilities if any. Top-level JSON WILL be a list.", + ) + parser.add_argument( + "-n", + "--normalize", + action="store_true", + default=False, + dest="normalize", + help="Permit to normalize input file. If not set, program does not write anything.", + ) + parser.add_argument( + "-m", + "--minimal", + action="store_true", + default=False, + dest="minimal", + help="Only output the charset detected to STDOUT. Disabling JSON output.", + ) + parser.add_argument( + "-r", + "--replace", + action="store_true", + default=False, + dest="replace", + help="Replace file when trying to normalize it instead of creating a new one.", + ) + parser.add_argument( + "-f", + "--force", + action="store_true", + default=False, + dest="force", + help="Replace file without asking if you are sure, use this flag with caution.", + ) + parser.add_argument( + "-t", + "--threshold", + action="store", + default=0.2, + type=float, + dest="threshold", + help="Define a custom maximum amount of chaos allowed in decoded content. 0. <= chaos <= 1.", + ) + parser.add_argument( + "--version", + action="version", + version="Charset-Normalizer {} - Python {} - Unicode {} - SpeedUp {}".format( + __version__, + python_version(), + unidata_version, + "OFF" if md_module.__file__.lower().endswith(".py") else "ON", + ), + help="Show version information and exit.", + ) + + args = parser.parse_args(argv) + + if args.replace is True and args.normalize is False: + print("Use --replace in addition of --normalize only.", file=sys.stderr) + return 1 + + if args.force is True and args.replace is False: + print("Use --force in addition of --replace only.", file=sys.stderr) + return 1 + + if args.threshold < 0.0 or args.threshold > 1.0: + print("--threshold VALUE should be between 0. AND 1.", file=sys.stderr) + return 1 + + x_ = [] + + for my_file in args.files: + matches = from_fp(my_file, threshold=args.threshold, explain=args.verbose) + + best_guess = matches.best() + + if best_guess is None: + print( + 'Unable to identify originating encoding for "{}". {}'.format( + my_file.name, + "Maybe try increasing maximum amount of chaos." + if args.threshold < 1.0 + else "", + ), + file=sys.stderr, + ) + x_.append( + CliDetectionResult( + abspath(my_file.name), + None, + [], + [], + "Unknown", + [], + False, + 1.0, + 0.0, + None, + True, + ) + ) + else: + x_.append( + CliDetectionResult( + abspath(my_file.name), + best_guess.encoding, + best_guess.encoding_aliases, + [ + cp + for cp in best_guess.could_be_from_charset + if cp != best_guess.encoding + ], + best_guess.language, + best_guess.alphabets, + best_guess.bom, + best_guess.percent_chaos, + best_guess.percent_coherence, + None, + True, + ) + ) + + if len(matches) > 1 and args.alternatives: + for el in matches: + if el != best_guess: + x_.append( + CliDetectionResult( + abspath(my_file.name), + el.encoding, + el.encoding_aliases, + [ + cp + for cp in el.could_be_from_charset + if cp != el.encoding + ], + el.language, + el.alphabets, + el.bom, + el.percent_chaos, + el.percent_coherence, + None, + False, + ) + ) + + if args.normalize is True: + if best_guess.encoding.startswith("utf") is True: + print( + '"{}" file does not need to be normalized, as it already came from unicode.'.format( + my_file.name + ), + file=sys.stderr, + ) + if my_file.closed is False: + my_file.close() + continue + + dir_path = dirname(realpath(my_file.name)) + file_name = basename(realpath(my_file.name)) + + o_: List[str] = file_name.split(".") + + if args.replace is False: + o_.insert(-1, best_guess.encoding) + if my_file.closed is False: + my_file.close() + elif ( + args.force is False + and query_yes_no( + 'Are you sure to normalize "{}" by replacing it ?'.format( + my_file.name + ), + "no", + ) + is False + ): + if my_file.closed is False: + my_file.close() + continue + + try: + x_[0].unicode_path = join(dir_path, ".".join(o_)) + + with open(x_[0].unicode_path, "w", encoding="utf-8") as fp: + fp.write(str(best_guess)) + except IOError as e: + print(str(e), file=sys.stderr) + if my_file.closed is False: + my_file.close() + return 2 + + if my_file.closed is False: + my_file.close() + + if args.minimal is False: + print( + dumps( + [el.__dict__ for el in x_] if len(x_) > 1 else x_[0].__dict__, + ensure_ascii=True, + indent=4, + ) + ) + else: + for my_file in args.files: + print( + ", ".join( + [ + el.encoding or "undefined" + for el in x_ + if el.path == abspath(my_file.name) + ] + ) + ) + + return 0 + + +if __name__ == "__main__": + cli_detect() diff --git a/venv/lib/python3.10/site-packages/charset_normalizer/cli/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/charset_normalizer/cli/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e5e295be99349c7dee5f8bff25d5341163ad319 Binary files /dev/null and b/venv/lib/python3.10/site-packages/charset_normalizer/cli/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/charset_normalizer/cli/__pycache__/__main__.cpython-310.pyc b/venv/lib/python3.10/site-packages/charset_normalizer/cli/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24ad806fd252a060fe1f24258f19c15391f845b7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/charset_normalizer/cli/__pycache__/__main__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/charset_normalizer/md.py b/venv/lib/python3.10/site-packages/charset_normalizer/md.py new file mode 100644 index 0000000000000000000000000000000000000000..77897aae4f44d084d6a59d7f7f1665035ff0047d --- /dev/null +++ b/venv/lib/python3.10/site-packages/charset_normalizer/md.py @@ -0,0 +1,615 @@ +from functools import lru_cache +from logging import getLogger +from typing import List, Optional + +from .constant import ( + COMMON_SAFE_ASCII_CHARACTERS, + TRACE, + UNICODE_SECONDARY_RANGE_KEYWORD, +) +from .utils import ( + is_accentuated, + is_arabic, + is_arabic_isolated_form, + is_case_variable, + is_cjk, + is_emoticon, + is_hangul, + is_hiragana, + is_katakana, + is_latin, + is_punctuation, + is_separator, + is_symbol, + is_thai, + is_unprintable, + remove_accent, + unicode_range, +) + + +class MessDetectorPlugin: + """ + Base abstract class used for mess detection plugins. + All detectors MUST extend and implement given methods. + """ + + def eligible(self, character: str) -> bool: + """ + Determine if given character should be fed in. + """ + raise NotImplementedError # pragma: nocover + + def feed(self, character: str) -> None: + """ + The main routine to be executed upon character. + Insert the logic in witch the text would be considered chaotic. + """ + raise NotImplementedError # pragma: nocover + + def reset(self) -> None: # pragma: no cover + """ + Permit to reset the plugin to the initial state. + """ + raise NotImplementedError + + @property + def ratio(self) -> float: + """ + Compute the chaos ratio based on what your feed() has seen. + Must NOT be lower than 0.; No restriction gt 0. + """ + raise NotImplementedError # pragma: nocover + + +class TooManySymbolOrPunctuationPlugin(MessDetectorPlugin): + def __init__(self) -> None: + self._punctuation_count: int = 0 + self._symbol_count: int = 0 + self._character_count: int = 0 + + self._last_printable_char: Optional[str] = None + self._frenzy_symbol_in_word: bool = False + + def eligible(self, character: str) -> bool: + return character.isprintable() + + def feed(self, character: str) -> None: + self._character_count += 1 + + if ( + character != self._last_printable_char + and character not in COMMON_SAFE_ASCII_CHARACTERS + ): + if is_punctuation(character): + self._punctuation_count += 1 + elif ( + character.isdigit() is False + and is_symbol(character) + and is_emoticon(character) is False + ): + self._symbol_count += 2 + + self._last_printable_char = character + + def reset(self) -> None: # pragma: no cover + self._punctuation_count = 0 + self._character_count = 0 + self._symbol_count = 0 + + @property + def ratio(self) -> float: + if self._character_count == 0: + return 0.0 + + ratio_of_punctuation: float = ( + self._punctuation_count + self._symbol_count + ) / self._character_count + + return ratio_of_punctuation if ratio_of_punctuation >= 0.3 else 0.0 + + +class TooManyAccentuatedPlugin(MessDetectorPlugin): + def __init__(self) -> None: + self._character_count: int = 0 + self._accentuated_count: int = 0 + + def eligible(self, character: str) -> bool: + return character.isalpha() + + def feed(self, character: str) -> None: + self._character_count += 1 + + if is_accentuated(character): + self._accentuated_count += 1 + + def reset(self) -> None: # pragma: no cover + self._character_count = 0 + self._accentuated_count = 0 + + @property + def ratio(self) -> float: + if self._character_count < 8: + return 0.0 + + ratio_of_accentuation: float = self._accentuated_count / self._character_count + return ratio_of_accentuation if ratio_of_accentuation >= 0.35 else 0.0 + + +class UnprintablePlugin(MessDetectorPlugin): + def __init__(self) -> None: + self._unprintable_count: int = 0 + self._character_count: int = 0 + + def eligible(self, character: str) -> bool: + return True + + def feed(self, character: str) -> None: + if is_unprintable(character): + self._unprintable_count += 1 + self._character_count += 1 + + def reset(self) -> None: # pragma: no cover + self._unprintable_count = 0 + + @property + def ratio(self) -> float: + if self._character_count == 0: + return 0.0 + + return (self._unprintable_count * 8) / self._character_count + + +class SuspiciousDuplicateAccentPlugin(MessDetectorPlugin): + def __init__(self) -> None: + self._successive_count: int = 0 + self._character_count: int = 0 + + self._last_latin_character: Optional[str] = None + + def eligible(self, character: str) -> bool: + return character.isalpha() and is_latin(character) + + def feed(self, character: str) -> None: + self._character_count += 1 + if ( + self._last_latin_character is not None + and is_accentuated(character) + and is_accentuated(self._last_latin_character) + ): + if character.isupper() and self._last_latin_character.isupper(): + self._successive_count += 1 + # Worse if its the same char duplicated with different accent. + if remove_accent(character) == remove_accent(self._last_latin_character): + self._successive_count += 1 + self._last_latin_character = character + + def reset(self) -> None: # pragma: no cover + self._successive_count = 0 + self._character_count = 0 + self._last_latin_character = None + + @property + def ratio(self) -> float: + if self._character_count == 0: + return 0.0 + + return (self._successive_count * 2) / self._character_count + + +class SuspiciousRange(MessDetectorPlugin): + def __init__(self) -> None: + self._suspicious_successive_range_count: int = 0 + self._character_count: int = 0 + self._last_printable_seen: Optional[str] = None + + def eligible(self, character: str) -> bool: + return character.isprintable() + + def feed(self, character: str) -> None: + self._character_count += 1 + + if ( + character.isspace() + or is_punctuation(character) + or character in COMMON_SAFE_ASCII_CHARACTERS + ): + self._last_printable_seen = None + return + + if self._last_printable_seen is None: + self._last_printable_seen = character + return + + unicode_range_a: Optional[str] = unicode_range(self._last_printable_seen) + unicode_range_b: Optional[str] = unicode_range(character) + + if is_suspiciously_successive_range(unicode_range_a, unicode_range_b): + self._suspicious_successive_range_count += 1 + + self._last_printable_seen = character + + def reset(self) -> None: # pragma: no cover + self._character_count = 0 + self._suspicious_successive_range_count = 0 + self._last_printable_seen = None + + @property + def ratio(self) -> float: + if self._character_count <= 24: + return 0.0 + + ratio_of_suspicious_range_usage: float = ( + self._suspicious_successive_range_count * 2 + ) / self._character_count + + return ratio_of_suspicious_range_usage + + +class SuperWeirdWordPlugin(MessDetectorPlugin): + def __init__(self) -> None: + self._word_count: int = 0 + self._bad_word_count: int = 0 + self._foreign_long_count: int = 0 + + self._is_current_word_bad: bool = False + self._foreign_long_watch: bool = False + + self._character_count: int = 0 + self._bad_character_count: int = 0 + + self._buffer: str = "" + self._buffer_accent_count: int = 0 + + def eligible(self, character: str) -> bool: + return True + + def feed(self, character: str) -> None: + if character.isalpha(): + self._buffer += character + if is_accentuated(character): + self._buffer_accent_count += 1 + if ( + self._foreign_long_watch is False + and (is_latin(character) is False or is_accentuated(character)) + and is_cjk(character) is False + and is_hangul(character) is False + and is_katakana(character) is False + and is_hiragana(character) is False + and is_thai(character) is False + ): + self._foreign_long_watch = True + return + if not self._buffer: + return + if ( + character.isspace() or is_punctuation(character) or is_separator(character) + ) and self._buffer: + self._word_count += 1 + buffer_length: int = len(self._buffer) + + self._character_count += buffer_length + + if buffer_length >= 4: + if self._buffer_accent_count / buffer_length > 0.34: + self._is_current_word_bad = True + # Word/Buffer ending with an upper case accentuated letter are so rare, + # that we will consider them all as suspicious. Same weight as foreign_long suspicious. + if ( + is_accentuated(self._buffer[-1]) + and self._buffer[-1].isupper() + and all(_.isupper() for _ in self._buffer) is False + ): + self._foreign_long_count += 1 + self._is_current_word_bad = True + if buffer_length >= 24 and self._foreign_long_watch: + camel_case_dst = [ + i + for c, i in zip(self._buffer, range(0, buffer_length)) + if c.isupper() + ] + probable_camel_cased: bool = False + + if camel_case_dst and (len(camel_case_dst) / buffer_length <= 0.3): + probable_camel_cased = True + + if not probable_camel_cased: + self._foreign_long_count += 1 + self._is_current_word_bad = True + + if self._is_current_word_bad: + self._bad_word_count += 1 + self._bad_character_count += len(self._buffer) + self._is_current_word_bad = False + + self._foreign_long_watch = False + self._buffer = "" + self._buffer_accent_count = 0 + elif ( + character not in {"<", ">", "-", "=", "~", "|", "_"} + and character.isdigit() is False + and is_symbol(character) + ): + self._is_current_word_bad = True + self._buffer += character + + def reset(self) -> None: # pragma: no cover + self._buffer = "" + self._is_current_word_bad = False + self._foreign_long_watch = False + self._bad_word_count = 0 + self._word_count = 0 + self._character_count = 0 + self._bad_character_count = 0 + self._foreign_long_count = 0 + + @property + def ratio(self) -> float: + if self._word_count <= 10 and self._foreign_long_count == 0: + return 0.0 + + return self._bad_character_count / self._character_count + + +class CjkInvalidStopPlugin(MessDetectorPlugin): + """ + GB(Chinese) based encoding often render the stop incorrectly when the content does not fit and + can be easily detected. Searching for the overuse of '丅' and '丄'. + """ + + def __init__(self) -> None: + self._wrong_stop_count: int = 0 + self._cjk_character_count: int = 0 + + def eligible(self, character: str) -> bool: + return True + + def feed(self, character: str) -> None: + if character in {"丅", "丄"}: + self._wrong_stop_count += 1 + return + if is_cjk(character): + self._cjk_character_count += 1 + + def reset(self) -> None: # pragma: no cover + self._wrong_stop_count = 0 + self._cjk_character_count = 0 + + @property + def ratio(self) -> float: + if self._cjk_character_count < 16: + return 0.0 + return self._wrong_stop_count / self._cjk_character_count + + +class ArchaicUpperLowerPlugin(MessDetectorPlugin): + def __init__(self) -> None: + self._buf: bool = False + + self._character_count_since_last_sep: int = 0 + + self._successive_upper_lower_count: int = 0 + self._successive_upper_lower_count_final: int = 0 + + self._character_count: int = 0 + + self._last_alpha_seen: Optional[str] = None + self._current_ascii_only: bool = True + + def eligible(self, character: str) -> bool: + return True + + def feed(self, character: str) -> None: + is_concerned = character.isalpha() and is_case_variable(character) + chunk_sep = is_concerned is False + + if chunk_sep and self._character_count_since_last_sep > 0: + if ( + self._character_count_since_last_sep <= 64 + and character.isdigit() is False + and self._current_ascii_only is False + ): + self._successive_upper_lower_count_final += ( + self._successive_upper_lower_count + ) + + self._successive_upper_lower_count = 0 + self._character_count_since_last_sep = 0 + self._last_alpha_seen = None + self._buf = False + self._character_count += 1 + self._current_ascii_only = True + + return + + if self._current_ascii_only is True and character.isascii() is False: + self._current_ascii_only = False + + if self._last_alpha_seen is not None: + if (character.isupper() and self._last_alpha_seen.islower()) or ( + character.islower() and self._last_alpha_seen.isupper() + ): + if self._buf is True: + self._successive_upper_lower_count += 2 + self._buf = False + else: + self._buf = True + else: + self._buf = False + + self._character_count += 1 + self._character_count_since_last_sep += 1 + self._last_alpha_seen = character + + def reset(self) -> None: # pragma: no cover + self._character_count = 0 + self._character_count_since_last_sep = 0 + self._successive_upper_lower_count = 0 + self._successive_upper_lower_count_final = 0 + self._last_alpha_seen = None + self._buf = False + self._current_ascii_only = True + + @property + def ratio(self) -> float: + if self._character_count == 0: + return 0.0 + + return self._successive_upper_lower_count_final / self._character_count + + +class ArabicIsolatedFormPlugin(MessDetectorPlugin): + def __init__(self) -> None: + self._character_count: int = 0 + self._isolated_form_count: int = 0 + + def reset(self) -> None: # pragma: no cover + self._character_count = 0 + self._isolated_form_count = 0 + + def eligible(self, character: str) -> bool: + return is_arabic(character) + + def feed(self, character: str) -> None: + self._character_count += 1 + + if is_arabic_isolated_form(character): + self._isolated_form_count += 1 + + @property + def ratio(self) -> float: + if self._character_count < 8: + return 0.0 + + isolated_form_usage: float = self._isolated_form_count / self._character_count + + return isolated_form_usage + + +@lru_cache(maxsize=1024) +def is_suspiciously_successive_range( + unicode_range_a: Optional[str], unicode_range_b: Optional[str] +) -> bool: + """ + Determine if two Unicode range seen next to each other can be considered as suspicious. + """ + if unicode_range_a is None or unicode_range_b is None: + return True + + if unicode_range_a == unicode_range_b: + return False + + if "Latin" in unicode_range_a and "Latin" in unicode_range_b: + return False + + if "Emoticons" in unicode_range_a or "Emoticons" in unicode_range_b: + return False + + # Latin characters can be accompanied with a combining diacritical mark + # eg. Vietnamese. + if ("Latin" in unicode_range_a or "Latin" in unicode_range_b) and ( + "Combining" in unicode_range_a or "Combining" in unicode_range_b + ): + return False + + keywords_range_a, keywords_range_b = unicode_range_a.split( + " " + ), unicode_range_b.split(" ") + + for el in keywords_range_a: + if el in UNICODE_SECONDARY_RANGE_KEYWORD: + continue + if el in keywords_range_b: + return False + + # Japanese Exception + range_a_jp_chars, range_b_jp_chars = ( + unicode_range_a + in ( + "Hiragana", + "Katakana", + ), + unicode_range_b in ("Hiragana", "Katakana"), + ) + if (range_a_jp_chars or range_b_jp_chars) and ( + "CJK" in unicode_range_a or "CJK" in unicode_range_b + ): + return False + if range_a_jp_chars and range_b_jp_chars: + return False + + if "Hangul" in unicode_range_a or "Hangul" in unicode_range_b: + if "CJK" in unicode_range_a or "CJK" in unicode_range_b: + return False + if unicode_range_a == "Basic Latin" or unicode_range_b == "Basic Latin": + return False + + # Chinese/Japanese use dedicated range for punctuation and/or separators. + if ("CJK" in unicode_range_a or "CJK" in unicode_range_b) or ( + unicode_range_a in ["Katakana", "Hiragana"] + and unicode_range_b in ["Katakana", "Hiragana"] + ): + if "Punctuation" in unicode_range_a or "Punctuation" in unicode_range_b: + return False + if "Forms" in unicode_range_a or "Forms" in unicode_range_b: + return False + if unicode_range_a == "Basic Latin" or unicode_range_b == "Basic Latin": + return False + + return True + + +@lru_cache(maxsize=2048) +def mess_ratio( + decoded_sequence: str, maximum_threshold: float = 0.2, debug: bool = False +) -> float: + """ + Compute a mess ratio given a decoded bytes sequence. The maximum threshold does stop the computation earlier. + """ + + detectors: List[MessDetectorPlugin] = [ + md_class() for md_class in MessDetectorPlugin.__subclasses__() + ] + + length: int = len(decoded_sequence) + 1 + + mean_mess_ratio: float = 0.0 + + if length < 512: + intermediary_mean_mess_ratio_calc: int = 32 + elif length <= 1024: + intermediary_mean_mess_ratio_calc = 64 + else: + intermediary_mean_mess_ratio_calc = 128 + + for character, index in zip(decoded_sequence + "\n", range(length)): + for detector in detectors: + if detector.eligible(character): + detector.feed(character) + + if ( + index > 0 and index % intermediary_mean_mess_ratio_calc == 0 + ) or index == length - 1: + mean_mess_ratio = sum(dt.ratio for dt in detectors) + + if mean_mess_ratio >= maximum_threshold: + break + + if debug: + logger = getLogger("charset_normalizer") + + logger.log( + TRACE, + "Mess-detector extended-analysis start. " + f"intermediary_mean_mess_ratio_calc={intermediary_mean_mess_ratio_calc} mean_mess_ratio={mean_mess_ratio} " + f"maximum_threshold={maximum_threshold}", + ) + + if len(decoded_sequence) > 16: + logger.log(TRACE, f"Starting with: {decoded_sequence[:16]}") + logger.log(TRACE, f"Ending with: {decoded_sequence[-16::]}") + + for dt in detectors: # pragma: nocover + logger.log(TRACE, f"{dt.__class__}: {dt.ratio}") + + return round(mean_mess_ratio, 3) diff --git a/venv/lib/python3.10/site-packages/lxml/ElementInclude.py b/venv/lib/python3.10/site-packages/lxml/ElementInclude.py new file mode 100644 index 0000000000000000000000000000000000000000..21884336f534cd2013165934111146684c9909cf --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/ElementInclude.py @@ -0,0 +1,244 @@ +# +# ElementTree +# $Id: ElementInclude.py 1862 2004-06-18 07:31:02Z Fredrik $ +# +# limited xinclude support for element trees +# +# history: +# 2003-08-15 fl created +# 2003-11-14 fl fixed default loader +# +# Copyright (c) 2003-2004 by Fredrik Lundh. All rights reserved. +# +# fredrik@pythonware.com +# http://www.pythonware.com +# +# -------------------------------------------------------------------- +# The ElementTree toolkit is +# +# Copyright (c) 1999-2004 by Fredrik Lundh +# +# By obtaining, using, and/or copying this software and/or its +# associated documentation, you agree that you have read, understood, +# and will comply with the following terms and conditions: +# +# Permission to use, copy, modify, and distribute this software and +# its associated documentation for any purpose and without fee is +# hereby granted, provided that the above copyright notice appears in +# all copies, and that both that copyright notice and this permission +# notice appear in supporting documentation, and that the name of +# Secret Labs AB or the author not be used in advertising or publicity +# pertaining to distribution of the software without specific, written +# prior permission. +# +# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD +# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- +# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR +# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# -------------------------------------------------------------------- + +""" +Limited XInclude support for the ElementTree package. + +While lxml.etree has full support for XInclude (see +`etree.ElementTree.xinclude()`), this module provides a simpler, pure +Python, ElementTree compatible implementation that supports a simple +form of custom URL resolvers. +""" + +from lxml import etree +try: + from urlparse import urljoin + from urllib2 import urlopen +except ImportError: + # Python 3 + from urllib.parse import urljoin + from urllib.request import urlopen + +XINCLUDE = "{http://www.w3.org/2001/XInclude}" + +XINCLUDE_INCLUDE = XINCLUDE + "include" +XINCLUDE_FALLBACK = XINCLUDE + "fallback" +XINCLUDE_ITER_TAG = XINCLUDE + "*" + +# For security reasons, the inclusion depth is limited to this read-only value by default. +DEFAULT_MAX_INCLUSION_DEPTH = 6 + + +## +# Fatal include error. + +class FatalIncludeError(etree.LxmlSyntaxError): + pass + + +class LimitedRecursiveIncludeError(FatalIncludeError): + pass + + +## +# ET compatible default loader. +# This loader reads an included resource from disk. +# +# @param href Resource reference. +# @param parse Parse mode. Either "xml" or "text". +# @param encoding Optional text encoding. +# @return The expanded resource. If the parse mode is "xml", this +# is an ElementTree instance. If the parse mode is "text", this +# is a Unicode string. If the loader fails, it can return None +# or raise an IOError exception. +# @throws IOError If the loader fails to load the resource. + +def default_loader(href, parse, encoding=None): + file = open(href, 'rb') + if parse == "xml": + data = etree.parse(file).getroot() + else: + data = file.read() + if not encoding: + encoding = 'utf-8' + data = data.decode(encoding) + file.close() + return data + + +## +# Default loader used by lxml.etree - handles custom resolvers properly +# + +def _lxml_default_loader(href, parse, encoding=None, parser=None): + if parse == "xml": + data = etree.parse(href, parser).getroot() + else: + if "://" in href: + f = urlopen(href) + else: + f = open(href, 'rb') + data = f.read() + f.close() + if not encoding: + encoding = 'utf-8' + data = data.decode(encoding) + return data + + +## +# Wrapper for ET compatibility - drops the parser + +def _wrap_et_loader(loader): + def load(href, parse, encoding=None, parser=None): + return loader(href, parse, encoding) + return load + + +## +# Expand XInclude directives. +# +# @param elem Root element. +# @param loader Optional resource loader. If omitted, it defaults +# to {@link default_loader}. If given, it should be a callable +# that implements the same interface as default_loader. +# @param base_url The base URL of the original file, to resolve +# relative include file references. +# @param max_depth The maximum number of recursive inclusions. +# Limited to reduce the risk of malicious content explosion. +# Pass None to disable the limitation. +# @throws LimitedRecursiveIncludeError If the {@link max_depth} was exceeded. +# @throws FatalIncludeError If the function fails to include a given +# resource, or if the tree contains malformed XInclude elements. +# @throws IOError If the function fails to load a given resource. +# @returns the node or its replacement if it was an XInclude node + +def include(elem, loader=None, base_url=None, + max_depth=DEFAULT_MAX_INCLUSION_DEPTH): + if max_depth is None: + max_depth = -1 + elif max_depth < 0: + raise ValueError("expected non-negative depth or None for 'max_depth', got %r" % max_depth) + + if base_url is None: + if hasattr(elem, 'getroot'): + tree = elem + elem = elem.getroot() + else: + tree = elem.getroottree() + if hasattr(tree, 'docinfo'): + base_url = tree.docinfo.URL + elif hasattr(elem, 'getroot'): + elem = elem.getroot() + _include(elem, loader, base_url, max_depth) + + +def _include(elem, loader=None, base_url=None, + max_depth=DEFAULT_MAX_INCLUSION_DEPTH, _parent_hrefs=None): + if loader is not None: + load_include = _wrap_et_loader(loader) + else: + load_include = _lxml_default_loader + + if _parent_hrefs is None: + _parent_hrefs = set() + + parser = elem.getroottree().parser + + include_elements = list( + elem.iter(XINCLUDE_ITER_TAG)) + + for e in include_elements: + if e.tag == XINCLUDE_INCLUDE: + # process xinclude directive + href = urljoin(base_url, e.get("href")) + parse = e.get("parse", "xml") + parent = e.getparent() + if parse == "xml": + if href in _parent_hrefs: + raise FatalIncludeError( + "recursive include of %r detected" % href + ) + if max_depth == 0: + raise LimitedRecursiveIncludeError( + "maximum xinclude depth reached when including file %s" % href) + node = load_include(href, parse, parser=parser) + if node is None: + raise FatalIncludeError( + "cannot load %r as %r" % (href, parse) + ) + node = _include(node, loader, href, max_depth - 1, {href} | _parent_hrefs) + if e.tail: + node.tail = (node.tail or "") + e.tail + if parent is None: + return node # replaced the root node! + parent.replace(e, node) + elif parse == "text": + text = load_include(href, parse, encoding=e.get("encoding")) + if text is None: + raise FatalIncludeError( + "cannot load %r as %r" % (href, parse) + ) + predecessor = e.getprevious() + if predecessor is not None: + predecessor.tail = (predecessor.tail or "") + text + elif parent is None: + return text # replaced the root node! + else: + parent.text = (parent.text or "") + text + (e.tail or "") + parent.remove(e) + else: + raise FatalIncludeError( + "unknown parse type in xi:include tag (%r)" % parse + ) + elif e.tag == XINCLUDE_FALLBACK: + parent = e.getparent() + if parent is not None and parent.tag != XINCLUDE_INCLUDE: + raise FatalIncludeError( + "xi:fallback tag must be child of xi:include (%r)" % e.tag + ) + else: + raise FatalIncludeError( + "Invalid element found in XInclude namespace (%r)" % e.tag + ) + return elem diff --git a/venv/lib/python3.10/site-packages/lxml/__init__.py b/venv/lib/python3.10/site-packages/lxml/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..65a92c2faef38686ee59bfd6f6a08bdae73eece0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/__init__.py @@ -0,0 +1,22 @@ +# this is a package + +__version__ = "5.2.1" + + +def get_include(): + """ + Returns a list of header include paths (for lxml itself, libxml2 + and libxslt) needed to compile C code against lxml if it was built + with statically linked libraries. + """ + import os + lxml_path = __path__[0] + include_path = os.path.join(lxml_path, 'includes') + includes = [include_path, lxml_path] + + for name in os.listdir(include_path): + path = os.path.join(include_path, name) + if os.path.isdir(path): + includes.append(path) + + return includes diff --git a/venv/lib/python3.10/site-packages/lxml/__pycache__/ElementInclude.cpython-310.pyc b/venv/lib/python3.10/site-packages/lxml/__pycache__/ElementInclude.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1bf3c375055ea009877b10b23036e0173e985ba7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lxml/__pycache__/ElementInclude.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lxml/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/lxml/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f140a99eeeb888e936d12edd121fcfd12abba492 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lxml/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lxml/__pycache__/_elementpath.cpython-310.pyc b/venv/lib/python3.10/site-packages/lxml/__pycache__/_elementpath.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f80c25d8576335608ed7e3971e9994abf0958774 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lxml/__pycache__/_elementpath.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lxml/__pycache__/builder.cpython-310.pyc b/venv/lib/python3.10/site-packages/lxml/__pycache__/builder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0fd2ec7c37b71db79750fa862cf1651cdf248fe2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lxml/__pycache__/builder.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lxml/__pycache__/cssselect.cpython-310.pyc b/venv/lib/python3.10/site-packages/lxml/__pycache__/cssselect.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f77e864c3c0f9f14e198c608970ad8d9d0722ffd Binary files /dev/null and b/venv/lib/python3.10/site-packages/lxml/__pycache__/cssselect.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lxml/__pycache__/doctestcompare.cpython-310.pyc b/venv/lib/python3.10/site-packages/lxml/__pycache__/doctestcompare.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a62d0db8e5266ef0195a41ac7e675b4263fc208a Binary files /dev/null and b/venv/lib/python3.10/site-packages/lxml/__pycache__/doctestcompare.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lxml/__pycache__/pyclasslookup.cpython-310.pyc b/venv/lib/python3.10/site-packages/lxml/__pycache__/pyclasslookup.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b6324348f0983e8e5b3b76568119526e3f13c24 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lxml/__pycache__/pyclasslookup.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lxml/__pycache__/sax.cpython-310.pyc b/venv/lib/python3.10/site-packages/lxml/__pycache__/sax.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0315ba37961610ff8451512924e4920188d9db51 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lxml/__pycache__/sax.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lxml/__pycache__/usedoctest.cpython-310.pyc b/venv/lib/python3.10/site-packages/lxml/__pycache__/usedoctest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba9e8b4c57399541242f24889fbd0731c3ae33e1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lxml/__pycache__/usedoctest.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lxml/_elementpath.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/lxml/_elementpath.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..b67203995fc65fd74b7a63e09e726cb04d2e07c3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lxml/_elementpath.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/lxml/_elementpath.py b/venv/lib/python3.10/site-packages/lxml/_elementpath.py new file mode 100644 index 0000000000000000000000000000000000000000..6233a63502600e0d7165ee51b7e3164792089a16 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/_elementpath.py @@ -0,0 +1,341 @@ +# cython: language_level=2 + +# +# ElementTree +# $Id: ElementPath.py 3375 2008-02-13 08:05:08Z fredrik $ +# +# limited xpath support for element trees +# +# history: +# 2003-05-23 fl created +# 2003-05-28 fl added support for // etc +# 2003-08-27 fl fixed parsing of periods in element names +# 2007-09-10 fl new selection engine +# 2007-09-12 fl fixed parent selector +# 2007-09-13 fl added iterfind; changed findall to return a list +# 2007-11-30 fl added namespaces support +# 2009-10-30 fl added child element value filter +# +# Copyright (c) 2003-2009 by Fredrik Lundh. All rights reserved. +# +# fredrik@pythonware.com +# http://www.pythonware.com +# +# -------------------------------------------------------------------- +# The ElementTree toolkit is +# +# Copyright (c) 1999-2009 by Fredrik Lundh +# +# By obtaining, using, and/or copying this software and/or its +# associated documentation, you agree that you have read, understood, +# and will comply with the following terms and conditions: +# +# Permission to use, copy, modify, and distribute this software and +# its associated documentation for any purpose and without fee is +# hereby granted, provided that the above copyright notice appears in +# all copies, and that both that copyright notice and this permission +# notice appear in supporting documentation, and that the name of +# Secret Labs AB or the author not be used in advertising or publicity +# pertaining to distribution of the software without specific, written +# prior permission. +# +# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD +# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- +# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR +# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# -------------------------------------------------------------------- + +## +# Implementation module for XPath support. There's usually no reason +# to import this module directly; the ElementTree does this for +# you, if needed. +## + + +import re + +xpath_tokenizer_re = re.compile( + "(" + "'[^']*'|\"[^\"]*\"|" + "::|" + "//?|" + r"\.\.|" + r"\(\)|" + r"[/.*:\[\]\(\)@=])|" + r"((?:\{[^}]+\})?[^/\[\]\(\)@=\s]+)|" + r"\s+" + ) + +def xpath_tokenizer(pattern, namespaces=None, with_prefixes=True): + # ElementTree uses '', lxml used None originally. + default_namespace = (namespaces.get(None) or namespaces.get('')) if namespaces else None + parsing_attribute = False + for token in xpath_tokenizer_re.findall(pattern): + ttype, tag = token + if tag and tag[0] != "{": + if ":" in tag and with_prefixes: + prefix, uri = tag.split(":", 1) + try: + if not namespaces: + raise KeyError + yield ttype, "{%s}%s" % (namespaces[prefix], uri) + except KeyError: + raise SyntaxError("prefix %r not found in prefix map" % prefix) + elif default_namespace and not parsing_attribute: + yield ttype, "{%s}%s" % (default_namespace, tag) + else: + yield token + parsing_attribute = False + else: + yield token + parsing_attribute = ttype == '@' + + +def prepare_child(next, token): + tag = token[1] + def select(result): + for elem in result: + yield from elem.iterchildren(tag) + return select + +def prepare_star(next, token): + def select(result): + for elem in result: + yield from elem.iterchildren('*') + return select + +def prepare_self(next, token): + def select(result): + return result + return select + +def prepare_descendant(next, token): + token = next() + if token[0] == "*": + tag = "*" + elif not token[0]: + tag = token[1] + else: + raise SyntaxError("invalid descendant") + def select(result): + for elem in result: + yield from elem.iterdescendants(tag) + return select + +def prepare_parent(next, token): + def select(result): + for elem in result: + parent = elem.getparent() + if parent is not None: + yield parent + return select + +def prepare_predicate(next, token): + # FIXME: replace with real parser!!! refs: + # http://effbot.org/zone/simple-iterator-parser.htm + # http://javascript.crockford.com/tdop/tdop.html + signature = '' + predicate = [] + while 1: + token = next() + if token[0] == "]": + break + if token == ('', ''): + # ignore whitespace + continue + if token[0] and token[0][:1] in "'\"": + token = "'", token[0][1:-1] + signature += token[0] or "-" + predicate.append(token[1]) + + # use signature to determine predicate type + if signature == "@-": + # [@attribute] predicate + key = predicate[1] + def select(result): + for elem in result: + if elem.get(key) is not None: + yield elem + return select + if signature == "@-='": + # [@attribute='value'] + key = predicate[1] + value = predicate[-1] + def select(result): + for elem in result: + if elem.get(key) == value: + yield elem + return select + if signature == "-" and not re.match(r"-?\d+$", predicate[0]): + # [tag] + tag = predicate[0] + def select(result): + for elem in result: + for _ in elem.iterchildren(tag): + yield elem + break + return select + if signature == ".='" or (signature == "-='" and not re.match(r"-?\d+$", predicate[0])): + # [.='value'] or [tag='value'] + tag = predicate[0] + value = predicate[-1] + if tag: + def select(result): + for elem in result: + for e in elem.iterchildren(tag): + if "".join(e.itertext()) == value: + yield elem + break + else: + def select(result): + for elem in result: + if "".join(elem.itertext()) == value: + yield elem + return select + if signature == "-" or signature == "-()" or signature == "-()-": + # [index] or [last()] or [last()-index] + if signature == "-": + # [index] + index = int(predicate[0]) - 1 + if index < 0: + if index == -1: + raise SyntaxError( + "indices in path predicates are 1-based, not 0-based") + else: + raise SyntaxError("path index >= 1 expected") + else: + if predicate[0] != "last": + raise SyntaxError("unsupported function") + if signature == "-()-": + try: + index = int(predicate[2]) - 1 + except ValueError: + raise SyntaxError("unsupported expression") + else: + index = -1 + def select(result): + for elem in result: + parent = elem.getparent() + if parent is None: + continue + try: + # FIXME: what if the selector is "*" ? + elems = list(parent.iterchildren(elem.tag)) + if elems[index] is elem: + yield elem + except IndexError: + pass + return select + raise SyntaxError("invalid predicate") + +ops = { + "": prepare_child, + "*": prepare_star, + ".": prepare_self, + "..": prepare_parent, + "//": prepare_descendant, + "[": prepare_predicate, +} + + +# -------------------------------------------------------------------- + +_cache = {} + + +def _build_path_iterator(path, namespaces, with_prefixes=True): + """compile selector pattern""" + if path[-1:] == "/": + path += "*" # implicit all (FIXME: keep this?) + + cache_key = (path,) + if namespaces: + # lxml originally used None for the default namespace but ElementTree uses the + # more convenient (all-strings-dict) empty string, so we support both here, + # preferring the more convenient '', as long as they aren't ambiguous. + if None in namespaces: + if '' in namespaces and namespaces[None] != namespaces['']: + raise ValueError("Ambiguous default namespace provided: %r versus %r" % ( + namespaces[None], namespaces[''])) + cache_key += (namespaces[None],) + tuple(sorted( + item for item in namespaces.items() if item[0] is not None)) + else: + cache_key += tuple(sorted(namespaces.items())) + + try: + return _cache[cache_key] + except KeyError: + pass + if len(_cache) > 100: + _cache.clear() + + if path[:1] == "/": + raise SyntaxError("cannot use absolute path on element") + stream = iter(xpath_tokenizer(path, namespaces, with_prefixes=with_prefixes)) + try: + _next = stream.next + except AttributeError: + # Python 3 + _next = stream.__next__ + try: + token = _next() + except StopIteration: + raise SyntaxError("empty path expression") + selector = [] + while 1: + try: + selector.append(ops[token[0]](_next, token)) + except StopIteration: + raise SyntaxError("invalid path") + try: + token = _next() + if token[0] == "/": + token = _next() + except StopIteration: + break + _cache[cache_key] = selector + return selector + + +## +# Iterate over the matching nodes + +def iterfind(elem, path, namespaces=None, with_prefixes=True): + selector = _build_path_iterator(path, namespaces, with_prefixes=with_prefixes) + result = iter((elem,)) + for select in selector: + result = select(result) + return result + + +## +# Find first matching object. + +def find(elem, path, namespaces=None, with_prefixes=True): + it = iterfind(elem, path, namespaces, with_prefixes=with_prefixes) + try: + return next(it) + except StopIteration: + return None + + +## +# Find all matching objects. + +def findall(elem, path, namespaces=None, with_prefixes=True): + return list(iterfind(elem, path, namespaces)) + + +## +# Find text for first matching object. + +def findtext(elem, path, default=None, namespaces=None, with_prefixes=True): + el = find(elem, path, namespaces, with_prefixes=with_prefixes) + if el is None: + return default + else: + return el.text or '' diff --git a/venv/lib/python3.10/site-packages/lxml/apihelpers.pxi b/venv/lib/python3.10/site-packages/lxml/apihelpers.pxi new file mode 100644 index 0000000000000000000000000000000000000000..fb60af7d23766c9b5bd8a10f14a02ad099a8d249 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/apihelpers.pxi @@ -0,0 +1,1793 @@ +# Private/public helper functions for API functions + +from lxml.includes cimport uri + + +cdef void displayNode(xmlNode* c_node, indent) noexcept: + # to help with debugging + cdef xmlNode* c_child + try: + print(indent * ' ', c_node) + c_child = c_node.children + while c_child is not NULL: + displayNode(c_child, indent + 1) + c_child = c_child.next + finally: + return # swallow any exceptions + +cdef inline bint _isHtmlDocument(_Element element) except -1: + cdef xmlNode* c_node = element._c_node + return ( + c_node is not NULL and c_node.doc is not NULL and + c_node.doc.properties & tree.XML_DOC_HTML != 0 + ) + +cdef inline int _assertValidNode(_Element element) except -1: + assert element._c_node is not NULL, "invalid Element proxy at %s" % id(element) + +cdef inline int _assertValidDoc(_Document doc) except -1: + assert doc._c_doc is not NULL, "invalid Document proxy at %s" % id(doc) + +cdef _Document _documentOrRaise(object input): + """Call this to get the document of a _Document, _ElementTree or _Element + object, or to raise an exception if it can't be determined. + + Should be used in all API functions for consistency. + """ + cdef _Document doc + if isinstance(input, _ElementTree): + if (<_ElementTree>input)._context_node is not None: + doc = (<_ElementTree>input)._context_node._doc + else: + doc = None + elif isinstance(input, _Element): + doc = (<_Element>input)._doc + elif isinstance(input, _Document): + doc = <_Document>input + else: + raise TypeError, f"Invalid input object: {python._fqtypename(input).decode('utf8')}" + if doc is None: + raise ValueError, f"Input object has no document: {python._fqtypename(input).decode('utf8')}" + _assertValidDoc(doc) + return doc + +cdef _Element _rootNodeOrRaise(object input): + """Call this to get the root node of a _Document, _ElementTree or + _Element object, or to raise an exception if it can't be determined. + + Should be used in all API functions for consistency. + """ + cdef _Element node + if isinstance(input, _ElementTree): + node = (<_ElementTree>input)._context_node + elif isinstance(input, _Element): + node = <_Element>input + elif isinstance(input, _Document): + node = (<_Document>input).getroot() + else: + raise TypeError, f"Invalid input object: {python._fqtypename(input).decode('utf8')}" + if (node is None or not node._c_node or + node._c_node.type != tree.XML_ELEMENT_NODE): + raise ValueError, f"Input object is not an XML element: {python._fqtypename(input).decode('utf8')}" + _assertValidNode(node) + return node + +cdef bint _isAncestorOrSame(xmlNode* c_ancestor, xmlNode* c_node) noexcept: + while c_node: + if c_node is c_ancestor: + return True + c_node = c_node.parent + return False + +cdef _Element _makeElement(tag, xmlDoc* c_doc, _Document doc, + _BaseParser parser, text, tail, attrib, nsmap, + dict extra_attrs): + """Create a new element and initialize text content, namespaces and + attributes. + + This helper function will reuse as much of the existing document as + possible: + + If 'parser' is None, the parser will be inherited from 'doc' or the + default parser will be used. + + If 'doc' is None, 'c_doc' is used to create a new _Document and the new + element is made its root node. + + If 'c_doc' is also NULL, a new xmlDoc will be created. + """ + cdef xmlNode* c_node + if doc is not None: + c_doc = doc._c_doc + ns_utf, name_utf = _getNsTag(tag) + if parser is not None and parser._for_html: + _htmlTagValidOrRaise(name_utf) + if c_doc is NULL: + c_doc = _newHTMLDoc() + else: + _tagValidOrRaise(name_utf) + if c_doc is NULL: + c_doc = _newXMLDoc() + c_node = _createElement(c_doc, name_utf) + if c_node is NULL: + if doc is None and c_doc is not NULL: + tree.xmlFreeDoc(c_doc) + raise MemoryError() + try: + if doc is None: + tree.xmlDocSetRootElement(c_doc, c_node) + doc = _documentFactory(c_doc, parser) + if text is not None: + _setNodeText(c_node, text) + if tail is not None: + _setTailText(c_node, tail) + # add namespaces to node if necessary + _setNodeNamespaces(c_node, doc, ns_utf, nsmap) + _initNodeAttributes(c_node, doc, attrib, extra_attrs) + return _elementFactory(doc, c_node) + except: + # free allocated c_node/c_doc unless Python does it for us + if c_node.doc is not c_doc: + # node not yet in document => will not be freed by document + if tail is not None: + _removeText(c_node.next) # tail + tree.xmlFreeNode(c_node) + if doc is None: + # c_doc will not be freed by doc + tree.xmlFreeDoc(c_doc) + raise + +cdef int _initNewElement(_Element element, bint is_html, name_utf, ns_utf, + _BaseParser parser, attrib, nsmap, dict extra_attrs) except -1: + """Initialise a new Element object. + + This is used when users instantiate a Python Element subclass + directly, without it being mapped to an existing XML node. + """ + cdef xmlDoc* c_doc + cdef xmlNode* c_node + cdef _Document doc + if is_html: + _htmlTagValidOrRaise(name_utf) + c_doc = _newHTMLDoc() + else: + _tagValidOrRaise(name_utf) + c_doc = _newXMLDoc() + c_node = _createElement(c_doc, name_utf) + if c_node is NULL: + if c_doc is not NULL: + tree.xmlFreeDoc(c_doc) + raise MemoryError() + tree.xmlDocSetRootElement(c_doc, c_node) + doc = _documentFactory(c_doc, parser) + # add namespaces to node if necessary + _setNodeNamespaces(c_node, doc, ns_utf, nsmap) + _initNodeAttributes(c_node, doc, attrib, extra_attrs) + _registerProxy(element, doc, c_node) + element._init() + return 0 + +cdef _Element _makeSubElement(_Element parent, tag, text, tail, + attrib, nsmap, dict extra_attrs): + """Create a new child element and initialize text content, namespaces and + attributes. + """ + cdef xmlNode* c_node + cdef xmlDoc* c_doc + if parent is None or parent._doc is None: + return None + _assertValidNode(parent) + ns_utf, name_utf = _getNsTag(tag) + c_doc = parent._doc._c_doc + + if parent._doc._parser is not None and parent._doc._parser._for_html: + _htmlTagValidOrRaise(name_utf) + else: + _tagValidOrRaise(name_utf) + + c_node = _createElement(c_doc, name_utf) + if c_node is NULL: + raise MemoryError() + tree.xmlAddChild(parent._c_node, c_node) + + try: + if text is not None: + _setNodeText(c_node, text) + if tail is not None: + _setTailText(c_node, tail) + + # add namespaces to node if necessary + _setNodeNamespaces(c_node, parent._doc, ns_utf, nsmap) + _initNodeAttributes(c_node, parent._doc, attrib, extra_attrs) + return _elementFactory(parent._doc, c_node) + except: + # make sure we clean up in case of an error + _removeNode(parent._doc, c_node) + raise + + +cdef int _setNodeNamespaces(xmlNode* c_node, _Document doc, + object node_ns_utf, object nsmap) except -1: + """Lookup current namespace prefixes, then set namespace structure for + node (if 'node_ns_utf' was provided) and register new ns-prefix mappings. + + 'node_ns_utf' should only be passed for a newly created node. + """ + cdef xmlNs* c_ns + cdef list nsdefs + + if nsmap: + for prefix, href in _iter_nsmap(nsmap): + href_utf = _utf8(href) + _uriValidOrRaise(href_utf) + c_href = _xcstr(href_utf) + if prefix is not None: + prefix_utf = _utf8(prefix) + _prefixValidOrRaise(prefix_utf) + c_prefix = _xcstr(prefix_utf) + else: + c_prefix = NULL + # add namespace with prefix if it is not already known + c_ns = tree.xmlSearchNs(doc._c_doc, c_node, c_prefix) + if c_ns is NULL or \ + c_ns.href is NULL or \ + tree.xmlStrcmp(c_ns.href, c_href) != 0: + c_ns = tree.xmlNewNs(c_node, c_href, c_prefix) + if href_utf == node_ns_utf: + tree.xmlSetNs(c_node, c_ns) + node_ns_utf = None + + if node_ns_utf is not None: + _uriValidOrRaise(node_ns_utf) + doc._setNodeNs(c_node, _xcstr(node_ns_utf)) + return 0 + + +cdef dict _build_nsmap(xmlNode* c_node): + """ + Namespace prefix->URI mapping known in the context of this Element. + This includes all namespace declarations of the parents. + """ + cdef xmlNs* c_ns + nsmap = {} + while c_node is not NULL and c_node.type == tree.XML_ELEMENT_NODE: + c_ns = c_node.nsDef + while c_ns is not NULL: + if c_ns.prefix or c_ns.href: + prefix = funicodeOrNone(c_ns.prefix) + if prefix not in nsmap: + nsmap[prefix] = funicodeOrNone(c_ns.href) + c_ns = c_ns.next + c_node = c_node.parent + return nsmap + + +cdef _iter_nsmap(nsmap): + """ + Create a reproducibly ordered iterable from an nsmap mapping. + Tries to preserve an existing order and sorts if it assumes no order. + + The difference to _iter_attrib() is that None doesn't sort with strings + in Py3.x. + """ + if isinstance(nsmap, dict): + # dicts are insertion-ordered in Py3.6+ => keep the user provided order. + return nsmap.items() + if len(nsmap) <= 1: + return nsmap.items() + # nsmap will usually be a plain unordered dict => avoid type checking overhead + if type(nsmap) is not dict and isinstance(nsmap, OrderedDict): + return nsmap.items() # keep existing order + if None not in nsmap: + return sorted(nsmap.items()) + + # Move the default namespace to the end. This makes sure libxml2 + # prefers a prefix if the ns is defined redundantly on the same + # element. That way, users can work around a problem themselves + # where default namespace attributes on non-default namespaced + # elements serialise without prefix (i.e. into the non-default + # namespace). + default_ns = nsmap[None] + nsdefs = [(k, v) for k, v in nsmap.items() if k is not None] + nsdefs.sort() + nsdefs.append((None, default_ns)) + return nsdefs + + +cdef _iter_attrib(attrib): + """ + Create a reproducibly ordered iterable from an attrib mapping. + Tries to preserve an existing order and sorts if it assumes no order. + """ + # dicts are insertion-ordered in Py3.6+ => keep the user provided order. + if isinstance(attrib, (dict, _Attrib, OrderedDict)): + return attrib.items() + # assume it's an unordered mapping of some kind + return sorted(attrib.items()) + + +cdef _initNodeAttributes(xmlNode* c_node, _Document doc, attrib, dict extra): + """Initialise the attributes of an element node. + """ + cdef bint is_html + cdef xmlNs* c_ns + if attrib is not None and not hasattr(attrib, 'items'): + raise TypeError, f"Invalid attribute dictionary: {python._fqtypename(attrib).decode('utf8')}" + if not attrib and not extra: + return # nothing to do + is_html = doc._parser._for_html + seen = set() + if extra: + for name, value in extra.items(): + _addAttributeToNode(c_node, doc, is_html, name, value, seen) + if attrib: + for name, value in _iter_attrib(attrib): + _addAttributeToNode(c_node, doc, is_html, name, value, seen) + + +cdef int _addAttributeToNode(xmlNode* c_node, _Document doc, bint is_html, + name, value, set seen_tags) except -1: + ns_utf, name_utf = tag = _getNsTag(name) + if tag in seen_tags: + return 0 + seen_tags.add(tag) + if not is_html: + _attributeValidOrRaise(name_utf) + value_utf = _utf8(value) + if ns_utf is None: + tree.xmlNewProp(c_node, _xcstr(name_utf), _xcstr(value_utf)) + else: + _uriValidOrRaise(ns_utf) + c_ns = doc._findOrBuildNodeNs(c_node, _xcstr(ns_utf), NULL, 1) + tree.xmlNewNsProp(c_node, c_ns, + _xcstr(name_utf), _xcstr(value_utf)) + return 0 + + +ctypedef struct _ns_node_ref: + xmlNs* ns + xmlNode* node + + +cdef int _collectNsDefs(xmlNode* c_element, _ns_node_ref **_c_ns_list, + size_t *_c_ns_list_len, size_t *_c_ns_list_size) except -1: + c_ns_list = _c_ns_list[0] + cdef size_t c_ns_list_len = _c_ns_list_len[0] + cdef size_t c_ns_list_size = _c_ns_list_size[0] + + c_nsdef = c_element.nsDef + while c_nsdef is not NULL: + if c_ns_list_len >= c_ns_list_size: + if c_ns_list is NULL: + c_ns_list_size = 20 + else: + c_ns_list_size *= 2 + c_nsref_ptr = <_ns_node_ref*> python.lxml_realloc( + c_ns_list, c_ns_list_size, sizeof(_ns_node_ref)) + if c_nsref_ptr is NULL: + if c_ns_list is not NULL: + python.lxml_free(c_ns_list) + _c_ns_list[0] = NULL + raise MemoryError() + c_ns_list = c_nsref_ptr + + c_ns_list[c_ns_list_len] = _ns_node_ref(c_nsdef, c_element) + c_ns_list_len += 1 + c_nsdef = c_nsdef.next + + _c_ns_list_size[0] = c_ns_list_size + _c_ns_list_len[0] = c_ns_list_len + _c_ns_list[0] = c_ns_list + + +cdef int _removeUnusedNamespaceDeclarations(xmlNode* c_element, set prefixes_to_keep) except -1: + """Remove any namespace declarations from a subtree that are not used by + any of its elements (or attributes). + + If a 'prefixes_to_keep' is provided, it must be a set of prefixes. + Any corresponding namespace mappings will not be removed as part of the cleanup. + """ + cdef xmlNode* c_node + cdef _ns_node_ref* c_ns_list = NULL + cdef size_t c_ns_list_size = 0 + cdef size_t c_ns_list_len = 0 + cdef size_t i + + if c_element.parent and c_element.parent.type == tree.XML_DOCUMENT_NODE: + # include declarations on the document node + _collectNsDefs(c_element.parent, &c_ns_list, &c_ns_list_len, &c_ns_list_size) + + tree.BEGIN_FOR_EACH_ELEMENT_FROM(c_element, c_element, 1) + # collect all new namespace declarations into the ns list + if c_element.nsDef: + _collectNsDefs(c_element, &c_ns_list, &c_ns_list_len, &c_ns_list_size) + + # remove all namespace declarations from the list that are referenced + if c_ns_list_len and c_element.type == tree.XML_ELEMENT_NODE: + c_node = c_element + while c_node and c_ns_list_len: + if c_node.ns: + for i in range(c_ns_list_len): + if c_node.ns is c_ns_list[i].ns: + c_ns_list_len -= 1 + c_ns_list[i] = c_ns_list[c_ns_list_len] + #c_ns_list[c_ns_list_len] = _ns_node_ref(NULL, NULL) + break + if c_node is c_element: + # continue with attributes + c_node = c_element.properties + else: + c_node = c_node.next + tree.END_FOR_EACH_ELEMENT_FROM(c_element) + + if c_ns_list is NULL: + return 0 + + # free all namespace declarations that remained in the list, + # except for those we should keep explicitly + cdef xmlNs* c_nsdef + for i in range(c_ns_list_len): + if prefixes_to_keep is not None: + if c_ns_list[i].ns.prefix and c_ns_list[i].ns.prefix in prefixes_to_keep: + continue + c_node = c_ns_list[i].node + c_nsdef = c_node.nsDef + if c_nsdef is c_ns_list[i].ns: + c_node.nsDef = c_node.nsDef.next + else: + while c_nsdef.next is not c_ns_list[i].ns: + c_nsdef = c_nsdef.next + c_nsdef.next = c_nsdef.next.next + tree.xmlFreeNs(c_ns_list[i].ns) + + if c_ns_list is not NULL: + python.lxml_free(c_ns_list) + return 0 + +cdef xmlNs* _searchNsByHref(xmlNode* c_node, const_xmlChar* c_href, bint is_attribute) noexcept: + """Search a namespace declaration that covers a node (element or + attribute). + + For attributes, try to find a prefixed namespace declaration + instead of the default namespaces. This helps in supporting + round-trips for attributes on elements with a different namespace. + """ + cdef xmlNs* c_ns + cdef xmlNs* c_default_ns = NULL + cdef xmlNode* c_element + if c_href is NULL or c_node is NULL or c_node.type == tree.XML_ENTITY_REF_NODE: + return NULL + if tree.xmlStrcmp(c_href, tree.XML_XML_NAMESPACE) == 0: + # no special cases here, let libxml2 handle this + return tree.xmlSearchNsByHref(c_node.doc, c_node, c_href) + if c_node.type == tree.XML_ATTRIBUTE_NODE: + is_attribute = 1 + while c_node is not NULL and c_node.type != tree.XML_ELEMENT_NODE: + c_node = c_node.parent + c_element = c_node + while c_node is not NULL: + if c_node.type == tree.XML_ELEMENT_NODE: + c_ns = c_node.nsDef + while c_ns is not NULL: + if c_ns.href is not NULL and tree.xmlStrcmp(c_href, c_ns.href) == 0: + if c_ns.prefix is NULL and is_attribute: + # for attributes, continue searching a named + # prefix, but keep the first default namespace + # declaration that we found + if c_default_ns is NULL: + c_default_ns = c_ns + elif tree.xmlSearchNs( + c_element.doc, c_element, c_ns.prefix) is c_ns: + # start node is in namespace scope => found! + return c_ns + c_ns = c_ns.next + if c_node is not c_element and c_node.ns is not NULL: + # optimise: the node may have the namespace itself + c_ns = c_node.ns + if c_ns.href is not NULL and tree.xmlStrcmp(c_href, c_ns.href) == 0: + if c_ns.prefix is NULL and is_attribute: + # for attributes, continue searching a named + # prefix, but keep the first default namespace + # declaration that we found + if c_default_ns is NULL: + c_default_ns = c_ns + elif tree.xmlSearchNs( + c_element.doc, c_element, c_ns.prefix) is c_ns: + # start node is in namespace scope => found! + return c_ns + c_node = c_node.parent + # nothing found => use a matching default namespace or fail + if c_default_ns is not NULL: + if tree.xmlSearchNs(c_element.doc, c_element, NULL) is c_default_ns: + return c_default_ns + return NULL + +cdef int _replaceNodeByChildren(_Document doc, xmlNode* c_node) except -1: + # NOTE: this does not deallocate the node, just unlink it! + cdef xmlNode* c_parent + cdef xmlNode* c_child + if c_node.children is NULL: + tree.xmlUnlinkNode(c_node) + return 0 + + c_parent = c_node.parent + # fix parent links of children + c_child = c_node.children + while c_child is not NULL: + c_child.parent = c_parent + c_child = c_child.next + + # fix namespace references of children if their parent's namespace + # declarations get lost + if c_node.nsDef is not NULL: + c_child = c_node.children + while c_child is not NULL: + moveNodeToDocument(doc, doc._c_doc, c_child) + c_child = c_child.next + + # fix sibling links to/from child slice + if c_node.prev is NULL: + c_parent.children = c_node.children + else: + c_node.prev.next = c_node.children + c_node.children.prev = c_node.prev + if c_node.next is NULL: + c_parent.last = c_node.last + else: + c_node.next.prev = c_node.last + c_node.last.next = c_node.next + + # unlink c_node + c_node.children = c_node.last = NULL + c_node.parent = c_node.next = c_node.prev = NULL + return 0 + +cdef unicode _attributeValue(xmlNode* c_element, xmlAttr* c_attrib_node): + c_href = _getNs(c_attrib_node) + value = tree.xmlGetNsProp(c_element, c_attrib_node.name, c_href) + try: + result = funicode(value) + finally: + tree.xmlFree(value) + return result + +cdef unicode _attributeValueFromNsName(xmlNode* c_element, + const_xmlChar* c_href, const_xmlChar* c_name): + c_result = tree.xmlGetNsProp(c_element, c_name, c_href) + if c_result is NULL: + return None + try: + result = funicode(c_result) + finally: + tree.xmlFree(c_result) + return result + +cdef object _getNodeAttributeValue(xmlNode* c_node, key, default): + ns, tag = _getNsTag(key) + c_href = NULL if ns is None else _xcstr(ns) + c_result = tree.xmlGetNsProp(c_node, _xcstr(tag), c_href) + if c_result is NULL: + # XXX free namespace that is not in use..? + return default + try: + result = funicode(c_result) + finally: + tree.xmlFree(c_result) + return result + +cdef inline object _getAttributeValue(_Element element, key, default): + return _getNodeAttributeValue(element._c_node, key, default) + +cdef int _setAttributeValue(_Element element, key, value) except -1: + cdef const_xmlChar* c_value + cdef xmlNs* c_ns + ns, tag = _getNsTag(key) + is_html = element._doc._parser._for_html + if not is_html: + _attributeValidOrRaise(tag) + c_tag = _xcstr(tag) + if value is None and is_html: + c_value = NULL + else: + if isinstance(value, QName): + value = _resolveQNameText(element, value) + else: + value = _utf8(value) + c_value = _xcstr(value) + if ns is None: + c_ns = NULL + else: + c_ns = element._doc._findOrBuildNodeNs(element._c_node, _xcstr(ns), NULL, 1) + tree.xmlSetNsProp(element._c_node, c_ns, c_tag, c_value) + return 0 + +cdef int _delAttribute(_Element element, key) except -1: + ns, tag = _getNsTag(key) + c_href = NULL if ns is None else _xcstr(ns) + if _delAttributeFromNsName(element._c_node, c_href, _xcstr(tag)): + raise KeyError, key + return 0 + +cdef int _delAttributeFromNsName(xmlNode* c_node, const_xmlChar* c_href, const_xmlChar* c_name) noexcept: + c_attr = tree.xmlHasNsProp(c_node, c_name, c_href) + if c_attr is NULL: + # XXX free namespace that is not in use..? + return -1 + tree.xmlRemoveProp(c_attr) + return 0 + +cdef list _collectAttributes(xmlNode* c_node, int collecttype): + """Collect all attributes of a node in a list. Depending on collecttype, + it collects either the name (1), the value (2) or the name-value tuples. + """ + cdef Py_ssize_t count + c_attr = c_node.properties + count = 0 + while c_attr is not NULL: + if c_attr.type == tree.XML_ATTRIBUTE_NODE: + count += 1 + c_attr = c_attr.next + + if not count: + return [] + + attributes = [None] * count + c_attr = c_node.properties + count = 0 + while c_attr is not NULL: + if c_attr.type == tree.XML_ATTRIBUTE_NODE: + if collecttype == 1: + item = _namespacedName(c_attr) + elif collecttype == 2: + item = _attributeValue(c_node, c_attr) + else: + item = (_namespacedName(c_attr), + _attributeValue(c_node, c_attr)) + attributes[count] = item + count += 1 + c_attr = c_attr.next + return attributes + +cdef object __RE_XML_ENCODING = re.compile( + r'^(<\?xml[^>]+)\s+encoding\s*=\s*["\'][^"\']*["\'](\s*\?>|)', re.U) + +cdef object __REPLACE_XML_ENCODING = __RE_XML_ENCODING.sub +cdef object __HAS_XML_ENCODING = __RE_XML_ENCODING.match + +cdef object _stripEncodingDeclaration(object xml_string): + # this is a hack to remove the XML encoding declaration from unicode + return __REPLACE_XML_ENCODING(r'\g<1>\g<2>', xml_string) + +cdef bint _hasEncodingDeclaration(object xml_string) except -1: + # check if a (unicode) string has an XML encoding declaration + return __HAS_XML_ENCODING(xml_string) is not None + +cdef inline bint _hasText(xmlNode* c_node) noexcept: + return c_node is not NULL and _textNodeOrSkip(c_node.children) is not NULL + +cdef inline bint _hasTail(xmlNode* c_node) noexcept: + return c_node is not NULL and _textNodeOrSkip(c_node.next) is not NULL + +cdef inline bint _hasNonWhitespaceTail(xmlNode* c_node) except -1: + return _hasNonWhitespaceText(c_node, tail=True) + +cdef bint _hasNonWhitespaceText(xmlNode* c_node, bint tail=False) except -1: + c_text_node = c_node and _textNodeOrSkip(c_node.next if tail else c_node.children) + if c_text_node is NULL: + return False + while c_text_node is not NULL: + if c_text_node.content[0] != c'\0' and not _collectText(c_text_node).isspace(): + return True + c_text_node = _textNodeOrSkip(c_text_node.next) + return False + +cdef unicode _collectText(xmlNode* c_node): + """Collect all text nodes and return them as a unicode string. + + Start collecting at c_node. + + If there was no text to collect, return None + """ + cdef Py_ssize_t scount + cdef xmlChar* c_text + cdef xmlNode* c_node_cur + # check for multiple text nodes + scount = 0 + c_text = NULL + c_node_cur = c_node = _textNodeOrSkip(c_node) + while c_node_cur is not NULL: + if c_node_cur.content[0] != c'\0': + c_text = c_node_cur.content + scount += 1 + c_node_cur = _textNodeOrSkip(c_node_cur.next) + + # handle two most common cases first + if c_text is NULL: + return '' if scount > 0 else None + if scount == 1: + return funicode(c_text) + + # the rest is not performance critical anymore + result = b'' + while c_node is not NULL: + result += c_node.content + c_node = _textNodeOrSkip(c_node.next) + return funicode(result) + +cdef void _removeText(xmlNode* c_node) noexcept: + """Remove all text nodes. + + Start removing at c_node. + """ + cdef xmlNode* c_next + c_node = _textNodeOrSkip(c_node) + while c_node is not NULL: + c_next = _textNodeOrSkip(c_node.next) + tree.xmlUnlinkNode(c_node) + tree.xmlFreeNode(c_node) + c_node = c_next + +cdef xmlNode* _createTextNode(xmlDoc* doc, value) except NULL: + cdef xmlNode* c_text_node + if isinstance(value, CDATA): + c_text_node = tree.xmlNewCDataBlock( + doc, _xcstr((value)._utf8_data), + python.PyBytes_GET_SIZE((value)._utf8_data)) + else: + text = _utf8(value) + c_text_node = tree.xmlNewDocText(doc, _xcstr(text)) + if not c_text_node: + raise MemoryError() + return c_text_node + +cdef int _setNodeText(xmlNode* c_node, value) except -1: + # remove all text nodes at the start first + _removeText(c_node.children) + if value is None: + return 0 + # now add new text node with value at start + c_text_node = _createTextNode(c_node.doc, value) + if c_node.children is NULL: + tree.xmlAddChild(c_node, c_text_node) + else: + tree.xmlAddPrevSibling(c_node.children, c_text_node) + return 0 + +cdef int _setTailText(xmlNode* c_node, value) except -1: + # remove all text nodes at the start first + _removeText(c_node.next) + if value is None: + return 0 + # now append new text node with value + c_text_node = _createTextNode(c_node.doc, value) + tree.xmlAddNextSibling(c_node, c_text_node) + return 0 + +cdef bytes _resolveQNameText(_Element element, value): + cdef xmlNs* c_ns + ns, tag = _getNsTag(value) + if ns is None: + return tag + else: + c_ns = element._doc._findOrBuildNodeNs( + element._c_node, _xcstr(ns), NULL, 0) + return python.PyBytes_FromFormat('%s:%s', c_ns.prefix, _cstr(tag)) + +cdef inline bint _hasChild(xmlNode* c_node) noexcept: + return c_node is not NULL and _findChildForwards(c_node, 0) is not NULL + +cdef inline Py_ssize_t _countElements(xmlNode* c_node) noexcept: + "Counts the elements within the following siblings and the node itself." + cdef Py_ssize_t count + count = 0 + while c_node is not NULL: + if _isElement(c_node): + count += 1 + c_node = c_node.next + return count + +cdef int _findChildSlice( + slice sliceobject, xmlNode* c_parent, + xmlNode** c_start_node, Py_ssize_t* c_step, Py_ssize_t* c_length) except -1: + """Resolve a children slice. + + Returns the start node, step size and the slice length in the + pointer arguments. + """ + cdef Py_ssize_t start = 0, stop = 0, childcount + childcount = _countElements(c_parent.children) + if childcount == 0: + c_start_node[0] = NULL + c_length[0] = 0 + if sliceobject.step is None: + c_step[0] = 1 + else: + python._PyEval_SliceIndex(sliceobject.step, c_step) + return 0 + python.PySlice_GetIndicesEx( + sliceobject, childcount, &start, &stop, c_step, c_length) + if start > childcount // 2: + c_start_node[0] = _findChildBackwards(c_parent, childcount - start - 1) + else: + c_start_node[0] = _findChild(c_parent, start) + return 0 + +cdef bint _isFullSlice(slice sliceobject) except -1: + """Conservative guess if this slice is a full slice as in ``s[:]``. + """ + cdef Py_ssize_t step = 0 + if sliceobject is None: + return 0 + if sliceobject.start is None and \ + sliceobject.stop is None: + if sliceobject.step is None: + return 1 + python._PyEval_SliceIndex(sliceobject.step, &step) + if step == 1: + return 1 + return 0 + return 0 + +cdef _collectChildren(_Element element): + cdef xmlNode* c_node + cdef list result = [] + c_node = element._c_node.children + if c_node is not NULL: + if not _isElement(c_node): + c_node = _nextElement(c_node) + while c_node is not NULL: + result.append(_elementFactory(element._doc, c_node)) + c_node = _nextElement(c_node) + return result + +cdef inline xmlNode* _findChild(xmlNode* c_node, Py_ssize_t index) noexcept: + if index < 0: + return _findChildBackwards(c_node, -index - 1) + else: + return _findChildForwards(c_node, index) + +cdef inline xmlNode* _findChildForwards(xmlNode* c_node, Py_ssize_t index) noexcept: + """Return child element of c_node with index, or return NULL if not found. + """ + cdef xmlNode* c_child + cdef Py_ssize_t c + c_child = c_node.children + c = 0 + while c_child is not NULL: + if _isElement(c_child): + if c == index: + return c_child + c += 1 + c_child = c_child.next + return NULL + +cdef inline xmlNode* _findChildBackwards(xmlNode* c_node, Py_ssize_t index) noexcept: + """Return child element of c_node with index, or return NULL if not found. + Search from the end. + """ + cdef xmlNode* c_child + cdef Py_ssize_t c + c_child = c_node.last + c = 0 + while c_child is not NULL: + if _isElement(c_child): + if c == index: + return c_child + c += 1 + c_child = c_child.prev + return NULL + +cdef inline xmlNode* _textNodeOrSkip(xmlNode* c_node) noexcept nogil: + """Return the node if it's a text node. Skip over ignorable nodes in a + series of text nodes. Return NULL if a non-ignorable node is found. + + This is used to skip over XInclude nodes when collecting adjacent text + nodes. + """ + while c_node is not NULL: + if c_node.type == tree.XML_TEXT_NODE or \ + c_node.type == tree.XML_CDATA_SECTION_NODE: + return c_node + elif c_node.type == tree.XML_XINCLUDE_START or \ + c_node.type == tree.XML_XINCLUDE_END: + c_node = c_node.next + else: + return NULL + return NULL + +cdef inline xmlNode* _nextElement(xmlNode* c_node) noexcept: + """Given a node, find the next sibling that is an element. + """ + if c_node is NULL: + return NULL + c_node = c_node.next + while c_node is not NULL: + if _isElement(c_node): + return c_node + c_node = c_node.next + return NULL + +cdef inline xmlNode* _previousElement(xmlNode* c_node) noexcept: + """Given a node, find the next sibling that is an element. + """ + if c_node is NULL: + return NULL + c_node = c_node.prev + while c_node is not NULL: + if _isElement(c_node): + return c_node + c_node = c_node.prev + return NULL + +cdef inline xmlNode* _parentElement(xmlNode* c_node) noexcept: + "Given a node, find the parent element." + if c_node is NULL or not _isElement(c_node): + return NULL + c_node = c_node.parent + if c_node is NULL or not _isElement(c_node): + return NULL + return c_node + +cdef inline bint _tagMatches(xmlNode* c_node, const_xmlChar* c_href, const_xmlChar* c_name) noexcept: + """Tests if the node matches namespace URI and tag name. + + A node matches if it matches both c_href and c_name. + + A node matches c_href if any of the following is true: + * c_href is NULL + * its namespace is NULL and c_href is the empty string + * its namespace string equals the c_href string + + A node matches c_name if any of the following is true: + * c_name is NULL + * its name string equals the c_name string + """ + if c_node is NULL: + return 0 + if c_node.type != tree.XML_ELEMENT_NODE: + # not an element, only succeed if we match everything + return c_name is NULL and c_href is NULL + if c_name is NULL: + if c_href is NULL: + # always match + return 1 + else: + c_node_href = _getNs(c_node) + if c_node_href is NULL: + return c_href[0] == c'\0' + else: + return tree.xmlStrcmp(c_node_href, c_href) == 0 + elif c_href is NULL: + if _getNs(c_node) is not NULL: + return 0 + return c_node.name == c_name or tree.xmlStrcmp(c_node.name, c_name) == 0 + elif c_node.name == c_name or tree.xmlStrcmp(c_node.name, c_name) == 0: + c_node_href = _getNs(c_node) + if c_node_href is NULL: + return c_href[0] == c'\0' + else: + return tree.xmlStrcmp(c_node_href, c_href) == 0 + else: + return 0 + +cdef inline bint _tagMatchesExactly(xmlNode* c_node, qname* c_qname) noexcept: + """Tests if the node matches namespace URI and tag name. + + This differs from _tagMatches() in that it does not consider a + NULL value in qname.href a wildcard, and that it expects the c_name + to be taken from the doc dict, i.e. it only compares the names by + address. + + A node matches if it matches both href and c_name of the qname. + + A node matches c_href if any of the following is true: + * its namespace is NULL and c_href is the empty string + * its namespace string equals the c_href string + + A node matches c_name if any of the following is true: + * c_name is NULL + * its name string points to the same address (!) as c_name + """ + return _nsTagMatchesExactly(_getNs(c_node), c_node.name, c_qname) + +cdef inline bint _nsTagMatchesExactly(const_xmlChar* c_node_href, + const_xmlChar* c_node_name, + qname* c_qname) noexcept: + """Tests if name and namespace URI match those of c_qname. + + This differs from _tagMatches() in that it does not consider a + NULL value in qname.href a wildcard, and that it expects the c_name + to be taken from the doc dict, i.e. it only compares the names by + address. + + A node matches if it matches both href and c_name of the qname. + + A node matches c_href if any of the following is true: + * its namespace is NULL and c_href is the empty string + * its namespace string equals the c_href string + + A node matches c_name if any of the following is true: + * c_name is NULL + * its name string points to the same address (!) as c_name + """ + cdef char* c_href + if c_qname.c_name is not NULL and c_qname.c_name is not c_node_name: + return 0 + if c_qname.href is NULL: + return 1 + c_href = python.__cstr(c_qname.href) + if c_href[0] == b'\0': + return c_node_href is NULL or c_node_href[0] == b'\0' + elif c_node_href is NULL: + return 0 + else: + return tree.xmlStrcmp(c_href, c_node_href) == 0 + +cdef Py_ssize_t _mapTagsToQnameMatchArray(xmlDoc* c_doc, list ns_tags, + qname* c_ns_tags, bint force_into_dict) except -1: + """Map a sequence of (name, namespace) pairs to a qname array for efficient + matching with _tagMatchesExactly() above. + + Note that each qname struct in the array owns its href byte string object + if it is not NULL. + """ + cdef Py_ssize_t count = 0, i + cdef bytes ns, tag + for ns, tag in ns_tags: + if tag is None: + c_tag = NULL + elif force_into_dict: + c_tag = tree.xmlDictLookup(c_doc.dict, _xcstr(tag), len(tag)) + if c_tag is NULL: + # clean up before raising the error + for i in xrange(count): + cpython.ref.Py_XDECREF(c_ns_tags[i].href) + raise MemoryError() + else: + c_tag = tree.xmlDictExists(c_doc.dict, _xcstr(tag), len(tag)) + if c_tag is NULL: + # not in the dict => not in the document + continue + c_ns_tags[count].c_name = c_tag + if ns is None: + c_ns_tags[count].href = NULL + else: + cpython.ref.Py_INCREF(ns) # keep an owned reference! + c_ns_tags[count].href = ns + count += 1 + return count + +cdef int _removeNode(_Document doc, xmlNode* c_node) except -1: + """Unlink and free a node and subnodes if possible. Otherwise, make sure + it's self-contained. + """ + cdef xmlNode* c_next + c_next = c_node.next + tree.xmlUnlinkNode(c_node) + _moveTail(c_next, c_node) + if not attemptDeallocation(c_node): + # make namespaces absolute + moveNodeToDocument(doc, c_node.doc, c_node) + return 0 + +cdef int _removeSiblings(xmlNode* c_element, tree.xmlElementType node_type, bint with_tail) except -1: + cdef xmlNode* c_node + cdef xmlNode* c_next + c_node = c_element.next + while c_node is not NULL: + c_next = _nextElement(c_node) + if c_node.type == node_type: + if with_tail: + _removeText(c_node.next) + tree.xmlUnlinkNode(c_node) + attemptDeallocation(c_node) + c_node = c_next + c_node = c_element.prev + while c_node is not NULL: + c_next = _previousElement(c_node) + if c_node.type == node_type: + if with_tail: + _removeText(c_node.next) + tree.xmlUnlinkNode(c_node) + attemptDeallocation(c_node) + c_node = c_next + return 0 + +cdef void _moveTail(xmlNode* c_tail, xmlNode* c_target) noexcept: + cdef xmlNode* c_next + # tail support: look for any text nodes trailing this node and + # move them too + c_tail = _textNodeOrSkip(c_tail) + while c_tail is not NULL: + c_next = _textNodeOrSkip(c_tail.next) + c_target = tree.xmlAddNextSibling(c_target, c_tail) + c_tail = c_next + +cdef int _copyTail(xmlNode* c_tail, xmlNode* c_target) except -1: + cdef xmlNode* c_new_tail + # tail copying support: look for any text nodes trailing this node and + # copy it to the target node + c_tail = _textNodeOrSkip(c_tail) + while c_tail is not NULL: + if c_target.doc is not c_tail.doc: + c_new_tail = tree.xmlDocCopyNode(c_tail, c_target.doc, 0) + else: + c_new_tail = tree.xmlCopyNode(c_tail, 0) + if c_new_tail is NULL: + raise MemoryError() + c_target = tree.xmlAddNextSibling(c_target, c_new_tail) + c_tail = _textNodeOrSkip(c_tail.next) + return 0 + +cdef int _copyNonElementSiblings(xmlNode* c_node, xmlNode* c_target) except -1: + cdef xmlNode* c_copy + cdef xmlNode* c_sibling = c_node + while c_sibling.prev != NULL and \ + (c_sibling.prev.type == tree.XML_PI_NODE or + c_sibling.prev.type == tree.XML_COMMENT_NODE or + c_sibling.prev.type == tree.XML_DTD_NODE): + c_sibling = c_sibling.prev + while c_sibling != c_node: + if c_sibling.type == tree.XML_DTD_NODE: + c_copy = _copyDtd(c_sibling) + if c_sibling == c_node.doc.intSubset: + c_target.doc.intSubset = c_copy + else: # c_sibling == c_node.doc.extSubset + c_target.doc.extSubset = c_copy + else: + c_copy = tree.xmlDocCopyNode(c_sibling, c_target.doc, 1) + if c_copy is NULL: + raise MemoryError() + tree.xmlAddPrevSibling(c_target, c_copy) + c_sibling = c_sibling.next + while c_sibling.next != NULL and \ + (c_sibling.next.type == tree.XML_PI_NODE or + c_sibling.next.type == tree.XML_COMMENT_NODE): + c_sibling = c_sibling.next + c_copy = tree.xmlDocCopyNode(c_sibling, c_target.doc, 1) + if c_copy is NULL: + raise MemoryError() + tree.xmlAddNextSibling(c_target, c_copy) + +cdef int _deleteSlice(_Document doc, xmlNode* c_node, + Py_ssize_t count, Py_ssize_t step) except -1: + """Delete slice, ``count`` items starting with ``c_node`` with a step + width of ``step``. + """ + cdef xmlNode* c_next + cdef Py_ssize_t c, i + cdef _node_to_node_function next_element + if c_node is NULL: + return 0 + if step > 0: + next_element = _nextElement + else: + step = -step + next_element = _previousElement + # now start deleting nodes + c = 0 + c_next = c_node + while c_node is not NULL and c < count: + for i in range(step): + c_next = next_element(c_next) + if c_next is NULL: + break + _removeNode(doc, c_node) + c += 1 + c_node = c_next + return 0 + +cdef int _replaceSlice(_Element parent, xmlNode* c_node, + Py_ssize_t slicelength, Py_ssize_t step, + bint left_to_right, elements) except -1: + """Replace the slice of ``count`` elements starting at ``c_node`` with + positive step width ``step`` by the Elements in ``elements``. The + direction is given by the boolean argument ``left_to_right``. + + ``c_node`` may be NULL to indicate the end of the children list. + """ + cdef xmlNode* c_orig_neighbour + cdef xmlNode* c_next + cdef xmlDoc* c_source_doc + cdef _Element element + cdef Py_ssize_t seqlength, i, c + cdef _node_to_node_function next_element + assert step > 0 + if left_to_right: + next_element = _nextElement + else: + next_element = _previousElement + + if not isinstance(elements, (list, tuple)): + elements = list(elements) + + if step != 1 or not left_to_right: + # *replacing* children stepwise with list => check size! + seqlength = len(elements) + if seqlength != slicelength: + raise ValueError, f"attempt to assign sequence of size {seqlength} " \ + f"to extended slice of size {slicelength}" + + if c_node is NULL: + # no children yet => add all elements straight away + if left_to_right: + for element in elements: + assert element is not None, "Node must not be None" + _appendChild(parent, element) + else: + for element in elements: + assert element is not None, "Node must not be None" + _prependChild(parent, element) + return 0 + + # remove the elements first as some might be re-added + if left_to_right: + # L->R, remember left neighbour + c_orig_neighbour = _previousElement(c_node) + else: + # R->L, remember right neighbour + c_orig_neighbour = _nextElement(c_node) + + # We remove the original slice elements one by one. Since we hold + # a Python reference to all elements that we will insert, it is + # safe to let _removeNode() try (and fail) to free them even if + # the element itself or one of its descendents will be reinserted. + c = 0 + c_next = c_node + while c_node is not NULL and c < slicelength: + for i in range(step): + c_next = next_element(c_next) + if c_next is NULL: + break + _removeNode(parent._doc, c_node) + c += 1 + c_node = c_next + + # make sure each element is inserted only once + elements = iter(elements) + + # find the first node right of the new insertion point + if left_to_right: + if c_orig_neighbour is not NULL: + c_node = next_element(c_orig_neighbour) + else: + # before the first element + c_node = _findChildForwards(parent._c_node, 0) + elif c_orig_neighbour is NULL: + # at the end, but reversed stepping + # append one element and go to the next insertion point + for element in elements: + assert element is not None, "Node must not be None" + _appendChild(parent, element) + c_node = element._c_node + if slicelength > 0: + slicelength -= 1 + for i in range(1, step): + c_node = next_element(c_node) + if c_node is NULL: + break + break + else: + c_node = c_orig_neighbour + + if left_to_right: + # adjust step size after removing slice as we are not stepping + # over the newly inserted elements + step -= 1 + + # now insert elements where we removed them + if c_node is not NULL: + for element in elements: + assert element is not None, "Node must not be None" + _assertValidNode(element) + # move element and tail over + c_source_doc = element._c_node.doc + c_next = element._c_node.next + tree.xmlAddPrevSibling(c_node, element._c_node) + _moveTail(c_next, element._c_node) + + # integrate element into new document + moveNodeToDocument(parent._doc, c_source_doc, element._c_node) + + # stop at the end of the slice + if slicelength > 0: + slicelength -= 1 + for i in range(step): + c_node = next_element(c_node) + if c_node is NULL: + break + if c_node is NULL: + break + else: + # everything inserted + return 0 + + # append the remaining elements at the respective end + if left_to_right: + for element in elements: + assert element is not None, "Node must not be None" + _assertValidNode(element) + _appendChild(parent, element) + else: + for element in elements: + assert element is not None, "Node must not be None" + _assertValidNode(element) + _prependChild(parent, element) + + return 0 + + +cdef int _linkChild(xmlNode* c_parent, xmlNode* c_node) except -1: + """Adaptation of 'xmlAddChild()' that deep-fix the document links iteratively. + """ + assert _isElement(c_node) + c_node.parent = c_parent + if c_parent.children is NULL: + c_parent.children = c_parent.last = c_node + else: + c_node.prev = c_parent.last + c_parent.last.next = c_node + c_parent.last = c_node + + _setTreeDoc(c_node, c_parent.doc) + return 0 + + +cdef int _appendChild(_Element parent, _Element child) except -1: + """Append a new child to a parent element. + """ + c_node = child._c_node + c_source_doc = c_node.doc + # prevent cycles + if _isAncestorOrSame(c_node, parent._c_node): + raise ValueError("cannot append parent to itself") + # store possible text node + c_next = c_node.next + # move node itself + tree.xmlUnlinkNode(c_node) + # do not call xmlAddChild() here since it would deep-traverse the tree + _linkChild(parent._c_node, c_node) + _moveTail(c_next, c_node) + # uh oh, elements may be pointing to different doc when + # parent element has moved; change them too.. + moveNodeToDocument(parent._doc, c_source_doc, c_node) + return 0 + +cdef int _prependChild(_Element parent, _Element child) except -1: + """Prepend a new child to a parent element. + """ + c_node = child._c_node + c_source_doc = c_node.doc + # prevent cycles + if _isAncestorOrSame(c_node, parent._c_node): + raise ValueError("cannot append parent to itself") + # store possible text node + c_next = c_node.next + # move node itself + c_child = _findChildForwards(parent._c_node, 0) + if c_child is NULL: + tree.xmlUnlinkNode(c_node) + # do not call xmlAddChild() here since it would deep-traverse the tree + _linkChild(parent._c_node, c_node) + else: + tree.xmlAddPrevSibling(c_child, c_node) + _moveTail(c_next, c_node) + # uh oh, elements may be pointing to different doc when + # parent element has moved; change them too.. + moveNodeToDocument(parent._doc, c_source_doc, c_node) + return 0 + +cdef int _appendSibling(_Element element, _Element sibling) except -1: + """Add a new sibling behind an element. + """ + return _addSibling(element, sibling, as_next=True) + +cdef int _prependSibling(_Element element, _Element sibling) except -1: + """Add a new sibling before an element. + """ + return _addSibling(element, sibling, as_next=False) + +cdef int _addSibling(_Element element, _Element sibling, bint as_next) except -1: + c_node = sibling._c_node + c_source_doc = c_node.doc + # prevent cycles + if _isAncestorOrSame(c_node, element._c_node): + if element._c_node is c_node: + return 0 # nothing to do + raise ValueError("cannot add ancestor as sibling, please break cycle first") + # store possible text node + c_next = c_node.next + # move node itself + if as_next: + # must insert after any tail text + c_next_node = _nextElement(element._c_node) + if c_next_node is NULL: + c_next_node = element._c_node + while c_next_node.next: + c_next_node = c_next_node.next + tree.xmlAddNextSibling(c_next_node, c_node) + else: + tree.xmlAddPrevSibling(c_next_node, c_node) + else: + tree.xmlAddPrevSibling(element._c_node, c_node) + _moveTail(c_next, c_node) + # uh oh, elements may be pointing to different doc when + # parent element has moved; change them too.. + moveNodeToDocument(element._doc, c_source_doc, c_node) + return 0 + +cdef inline bint isutf8(const_xmlChar* s) noexcept: + cdef xmlChar c = s[0] + while c != c'\0': + if c & 0x80: + return True + s += 1 + c = s[0] + return False + +cdef bint isutf8l(const_xmlChar* s, size_t length) noexcept: + """ + Search for non-ASCII characters in the string, knowing its length in advance. + """ + cdef unsigned int i + cdef unsigned long non_ascii_mask + cdef const unsigned long *lptr = s + + cdef const unsigned long *end = lptr + length // sizeof(unsigned long) + if length >= sizeof(non_ascii_mask): + # Build constant 0x80808080... mask (and let the C compiler fold it). + non_ascii_mask = 0 + for i in range(sizeof(non_ascii_mask) // 2): + non_ascii_mask = (non_ascii_mask << 16) | 0x8080 + + # Advance to long-aligned character before we start reading longs. + while (s) % sizeof(unsigned long) and s < end: + if s[0] & 0x80: + return True + s += 1 + + # Read one long at a time + lptr = s + while lptr < end: + if lptr[0] & non_ascii_mask: + return True + lptr += 1 + s = lptr + + while s < (end + length % sizeof(unsigned long)): + if s[0] & 0x80: + return True + s += 1 + + return False + +cdef int _is_valid_xml_ascii(bytes pystring) except -1: + """Check if a string is XML ascii content.""" + cdef signed char ch + # When ch is a *signed* char, non-ascii characters are negative integers + # and xmlIsChar_ch does not accept them. + for ch in pystring: + if not tree.xmlIsChar_ch(ch): + return 0 + return 1 + +cdef bint _is_valid_xml_utf8(bytes pystring) except -1: + """Check if a string is like valid UTF-8 XML content.""" + cdef const_xmlChar* s = _xcstr(pystring) + cdef const_xmlChar* c_end = s + len(pystring) + cdef unsigned long next3 = 0 + if s < c_end - 2: + next3 = (s[0] << 8) | (s[1]) + + while s < c_end - 2: + next3 = 0x00ffffff & ((next3 << 8) | s[2]) + if s[0] & 0x80: + # 0xefbfbe and 0xefbfbf are utf-8 encodings of + # forbidden characters \ufffe and \uffff + if next3 == 0x00efbfbe or next3 == 0x00efbfbf: + return 0 + # 0xeda080 and 0xedbfbf are utf-8 encodings of + # \ud800 and \udfff. Anything between them (inclusive) + # is forbidden, because they are surrogate blocks in utf-16. + if 0x00eda080 <= next3 <= 0x00edbfbf: + return 0 + elif not tree.xmlIsChar_ch(s[0]): + return 0 # invalid ascii char + s += 1 + + while s < c_end: + if not s[0] & 0x80 and not tree.xmlIsChar_ch(s[0]): + return 0 # invalid ascii char + s += 1 + + return 1 + +cdef inline unicode funicodeOrNone(const_xmlChar* s): + return funicode(s) if s is not NULL else None + +cdef inline unicode funicodeOrEmpty(const_xmlChar* s): + return funicode(s) if s is not NULL else '' + +cdef unicode funicode(const_xmlChar* s): + return s.decode('UTF-8') + +cdef bytes _utf8(object s): + """Test if a string is valid user input and encode it to UTF-8. + Reject all bytes/unicode input that contains non-XML characters. + Reject all bytes input that contains non-ASCII characters. + """ + cdef int valid + cdef bytes utf8_string + if isinstance(s, unicode): + utf8_string = (s).encode('utf8') + valid = _is_valid_xml_utf8(utf8_string) + elif isinstance(s, (bytes, bytearray)): + utf8_string = s if type(s) is bytes else bytes(s) + valid = _is_valid_xml_ascii(utf8_string) + else: + raise TypeError("Argument must be bytes or unicode, got '%.200s'" % type(s).__name__) + if not valid: + raise ValueError( + "All strings must be XML compatible: Unicode or ASCII, no NULL bytes or control characters") + return utf8_string + + +cdef bytes _utf8orNone(object s): + return _utf8(s) if s is not None else None + + +cdef enum: + NO_FILE_PATH = 0 + ABS_UNIX_FILE_PATH = 1 + ABS_WIN_FILE_PATH = 2 + REL_FILE_PATH = 3 + + +cdef bint _isFilePath(const_xmlChar* c_path) noexcept: + "simple heuristic to see if a path is a filename" + cdef xmlChar c + # test if it looks like an absolute Unix path or a Windows network path + if c_path[0] == c'/': + return ABS_UNIX_FILE_PATH + + # test if it looks like an absolute Windows path or URL + if c'a' <= c_path[0] <= c'z' or c'A' <= c_path[0] <= c'Z': + c_path += 1 + if c_path[0] == c':' and c_path[1] in b'\0\\': + return ABS_WIN_FILE_PATH # C: or C:\... + + # test if it looks like a URL with scheme:// + while c'a' <= c_path[0] <= c'z' or c'A' <= c_path[0] <= c'Z': + c_path += 1 + if c_path[0] == c':' and c_path[1] == c'/' and c_path[2] == c'/': + return NO_FILE_PATH + + # assume it's a relative path + return REL_FILE_PATH + + +cdef object _getFSPathOrObject(object obj): + """ + Get the __fspath__ attribute of an object if it exists. + Otherwise, the original object is returned. + """ + if _isString(obj): + return obj + try: + return python.PyOS_FSPath(obj) + except TypeError: + return obj + + +cdef object _encodeFilename(object filename): + """Make sure a filename is 8-bit encoded (or None). + """ + if filename is None: + return None + elif isinstance(filename, bytes): + return filename + elif isinstance(filename, unicode): + filename8 = (filename).encode('utf8') + if _isFilePath(filename8): + try: + return python.PyUnicode_AsEncodedString( + filename, _C_FILENAME_ENCODING, NULL) + except UnicodeEncodeError: + pass + return filename8 + else: + raise TypeError("Argument must be string or unicode.") + +cdef object _decodeFilename(const_xmlChar* c_path): + """Make the filename a unicode string if we are in Py3. + """ + return _decodeFilenameWithLength(c_path, tree.xmlStrlen(c_path)) + +cdef object _decodeFilenameWithLength(const_xmlChar* c_path, size_t c_len): + """Make the filename a unicode string if we are in Py3. + """ + if _isFilePath(c_path): + try: + return python.PyUnicode_Decode( + c_path, c_len, _C_FILENAME_ENCODING, NULL) + except UnicodeDecodeError: + pass + try: + return (c_path)[:c_len].decode('UTF-8') + except UnicodeDecodeError: + # this is a stupid fallback, but it might still work... + return (c_path)[:c_len].decode('latin-1', 'replace') + +cdef object _encodeFilenameUTF8(object filename): + """Recode filename as UTF-8. Tries ASCII, local filesystem encoding and + UTF-8 as source encoding. + """ + cdef char* c_filename + if filename is None: + return None + elif isinstance(filename, bytes): + if not isutf8l(filename, len(filename)): + # plain ASCII! + return filename + c_filename = _cstr(filename) + try: + # try to decode with default encoding + filename = python.PyUnicode_Decode( + c_filename, len(filename), + _C_FILENAME_ENCODING, NULL) + except UnicodeDecodeError as decode_exc: + try: + # try if it's proper UTF-8 + (filename).decode('utf8') + return filename + except UnicodeDecodeError: + raise decode_exc # otherwise re-raise original exception + if isinstance(filename, unicode): + return (filename).encode('utf8') + else: + raise TypeError("Argument must be string or unicode.") + +cdef tuple _getNsTag(tag): + """Given a tag, find namespace URI and tag name. + Return None for NS uri if no namespace URI provided. + """ + return __getNsTag(tag, 0) + +cdef tuple _getNsTagWithEmptyNs(tag): + """Given a tag, find namespace URI and tag name. Return None for NS uri + if no namespace URI provided, or the empty string if namespace + part is '{}'. + """ + return __getNsTag(tag, 1) + +cdef tuple __getNsTag(tag, bint empty_ns): + cdef char* c_tag + cdef char* c_ns_end + cdef Py_ssize_t taglen + cdef Py_ssize_t nslen + cdef bytes ns = None + # _isString() is much faster than isinstance() + if not _isString(tag) and isinstance(tag, QName): + tag = (tag).text + tag = _utf8(tag) + c_tag = _cstr(tag) + if c_tag[0] == c'{': + c_tag += 1 + c_ns_end = cstring_h.strchr(c_tag, c'}') + if c_ns_end is NULL: + raise ValueError, "Invalid tag name" + nslen = c_ns_end - c_tag + taglen = python.PyBytes_GET_SIZE(tag) - nslen - 2 + if taglen == 0: + raise ValueError, "Empty tag name" + if nslen > 0: + ns = c_tag[:nslen] + elif empty_ns: + ns = b'' + tag = c_ns_end[1:taglen+1] + elif python.PyBytes_GET_SIZE(tag) == 0: + raise ValueError, "Empty tag name" + return ns, tag + +cdef inline int _pyXmlNameIsValid(name_utf8): + return _xmlNameIsValid(_xcstr(name_utf8)) and b':' not in name_utf8 + +cdef inline int _pyHtmlNameIsValid(name_utf8): + return _htmlNameIsValid(_xcstr(name_utf8)) + +cdef inline int _xmlNameIsValid(const_xmlChar* c_name) noexcept: + return tree.xmlValidateNameValue(c_name) + +cdef int _htmlNameIsValid(const_xmlChar* c_name) noexcept: + if c_name is NULL or c_name[0] == c'\0': + return 0 + while c_name[0] != c'\0': + if c_name[0] in b'&<>/"\'\t\n\x0B\x0C\r ': + return 0 + c_name += 1 + return 1 + +cdef bint _characterReferenceIsValid(const_xmlChar* c_name) noexcept: + cdef bint is_hex + if c_name[0] == c'x': + c_name += 1 + is_hex = 1 + else: + is_hex = 0 + if c_name[0] == c'\0': + return 0 + while c_name[0] != c'\0': + if c_name[0] < c'0' or c_name[0] > c'9': + if not is_hex: + return 0 + if not (c'a' <= c_name[0] <= c'f'): + if not (c'A' <= c_name[0] <= c'F'): + return 0 + c_name += 1 + return 1 + +cdef int _tagValidOrRaise(tag_utf) except -1: + if not _pyXmlNameIsValid(tag_utf): + raise ValueError(f"Invalid tag name {(tag_utf).decode('utf8')!r}") + return 0 + +cdef int _htmlTagValidOrRaise(tag_utf) except -1: + if not _pyHtmlNameIsValid(tag_utf): + raise ValueError(f"Invalid HTML tag name {(tag_utf).decode('utf8')!r}") + return 0 + +cdef int _attributeValidOrRaise(name_utf) except -1: + if not _pyXmlNameIsValid(name_utf): + raise ValueError(f"Invalid attribute name {(name_utf).decode('utf8')!r}") + return 0 + +cdef int _prefixValidOrRaise(tag_utf) except -1: + if not _pyXmlNameIsValid(tag_utf): + raise ValueError(f"Invalid namespace prefix {(tag_utf).decode('utf8')!r}") + return 0 + +cdef int _uriValidOrRaise(uri_utf) except -1: + cdef uri.xmlURI* c_uri = uri.xmlParseURI(_cstr(uri_utf)) + if c_uri is NULL: + raise ValueError(f"Invalid namespace URI {(uri_utf).decode('utf8')!r}") + uri.xmlFreeURI(c_uri) + return 0 + +cdef inline unicode _namespacedName(xmlNode* c_node): + return _namespacedNameFromNsName(_getNs(c_node), c_node.name) + + +cdef unicode _namespacedNameFromNsName(const_xmlChar* c_href, const_xmlChar* c_name): + name = funicode(c_name) + if c_href is NULL: + return name + href = funicode(c_href) + return f"{{{href}}}{name}" + + +cdef _getFilenameForFile(source): + """Given a Python File or Gzip object, give filename back. + + Returns None if not a file object. + """ + # urllib2 provides a geturl() method + try: + return source.geturl() + except: + pass + # file instances have a name attribute + try: + filename = source.name + if _isString(filename): + return os_path_abspath(filename) + except: + pass + # gzip file instances have a filename attribute (before Py3k) + try: + filename = source.filename + if _isString(filename): + return os_path_abspath(filename) + except: + pass + # can't determine filename + return None diff --git a/venv/lib/python3.10/site-packages/lxml/builder.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/lxml/builder.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..e6a43a3fa8f2b6b43e51c8d51e1af9668d13018f Binary files /dev/null and b/venv/lib/python3.10/site-packages/lxml/builder.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/lxml/builder.py b/venv/lib/python3.10/site-packages/lxml/builder.py new file mode 100644 index 0000000000000000000000000000000000000000..cff67b0bc006f7f7c184d74f76913c3e2c1a557b --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/builder.py @@ -0,0 +1,232 @@ +# cython: language_level=2 + +# +# Element generator factory by Fredrik Lundh. +# +# Source: +# http://online.effbot.org/2006_11_01_archive.htm#et-builder +# http://effbot.python-hosting.com/file/stuff/sandbox/elementlib/builder.py +# +# -------------------------------------------------------------------- +# The ElementTree toolkit is +# +# Copyright (c) 1999-2004 by Fredrik Lundh +# +# By obtaining, using, and/or copying this software and/or its +# associated documentation, you agree that you have read, understood, +# and will comply with the following terms and conditions: +# +# Permission to use, copy, modify, and distribute this software and +# its associated documentation for any purpose and without fee is +# hereby granted, provided that the above copyright notice appears in +# all copies, and that both that copyright notice and this permission +# notice appear in supporting documentation, and that the name of +# Secret Labs AB or the author not be used in advertising or publicity +# pertaining to distribution of the software without specific, written +# prior permission. +# +# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD +# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- +# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR +# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# -------------------------------------------------------------------- + +""" +The ``E`` Element factory for generating XML documents. +""" + + +import lxml.etree as ET +_QName = ET.QName + +from functools import partial + +try: + basestring +except NameError: + basestring = str + +try: + unicode +except NameError: + unicode = str + + +class ElementMaker: + """Element generator factory. + + Unlike the ordinary Element factory, the E factory allows you to pass in + more than just a tag and some optional attributes; you can also pass in + text and other elements. The text is added as either text or tail + attributes, and elements are inserted at the right spot. Some small + examples:: + + >>> from lxml import etree as ET + >>> from lxml.builder import E + + >>> ET.tostring(E("tag")) + '' + >>> ET.tostring(E("tag", "text")) + 'text' + >>> ET.tostring(E("tag", "text", key="value")) + 'text' + >>> ET.tostring(E("tag", E("subtag", "text"), "tail")) + 'texttail' + + For simple tags, the factory also allows you to write ``E.tag(...)`` instead + of ``E('tag', ...)``:: + + >>> ET.tostring(E.tag()) + '' + >>> ET.tostring(E.tag("text")) + 'text' + >>> ET.tostring(E.tag(E.subtag("text"), "tail")) + 'texttail' + + Here's a somewhat larger example; this shows how to generate HTML + documents, using a mix of prepared factory functions for inline elements, + nested ``E.tag`` calls, and embedded XHTML fragments:: + + # some common inline elements + A = E.a + I = E.i + B = E.b + + def CLASS(v): + # helper function, 'class' is a reserved word + return {'class': v} + + page = ( + E.html( + E.head( + E.title("This is a sample document") + ), + E.body( + E.h1("Hello!", CLASS("title")), + E.p("This is a paragraph with ", B("bold"), " text in it!"), + E.p("This is another paragraph, with a ", + A("link", href="http://www.python.org"), "."), + E.p("Here are some reserved characters: ."), + ET.XML("

And finally, here is an embedded XHTML fragment.

"), + ) + ) + ) + + print ET.tostring(page) + + Here's a prettyprinted version of the output from the above script:: + + + + This is a sample document + + +

Hello!

+

This is a paragraph with bold text in it!

+

This is another paragraph, with link.

+

Here are some reserved characters: <spam&egg>.

+

And finally, here is an embedded XHTML fragment.

+ + + + For namespace support, you can pass a namespace map (``nsmap``) + and/or a specific target ``namespace`` to the ElementMaker class:: + + >>> E = ElementMaker(namespace="http://my.ns/") + >>> print(ET.tostring( E.test )) + + + >>> E = ElementMaker(namespace="http://my.ns/", nsmap={'p':'http://my.ns/'}) + >>> print(ET.tostring( E.test )) + + """ + + def __init__(self, typemap=None, + namespace=None, nsmap=None, makeelement=None): + self._namespace = '{' + namespace + '}' if namespace is not None else None + self._nsmap = dict(nsmap) if nsmap else None + + assert makeelement is None or callable(makeelement) + self._makeelement = makeelement if makeelement is not None else ET.Element + + # initialize the default type map functions for this element factory + typemap = dict(typemap) if typemap else {} + + def add_text(elem, item): + try: + last_child = elem[-1] + except IndexError: + elem.text = (elem.text or "") + item + else: + last_child.tail = (last_child.tail or "") + item + + def add_cdata(elem, cdata): + if elem.text: + raise ValueError("Can't add a CDATA section. Element already has some text: %r" % elem.text) + elem.text = cdata + + if str not in typemap: + typemap[str] = add_text + if unicode not in typemap: + typemap[unicode] = add_text + if ET.CDATA not in typemap: + typemap[ET.CDATA] = add_cdata + + def add_dict(elem, item): + attrib = elem.attrib + for k, v in item.items(): + if isinstance(v, basestring): + attrib[k] = v + else: + attrib[k] = typemap[type(v)](None, v) + + if dict not in typemap: + typemap[dict] = add_dict + + self._typemap = typemap + + def __call__(self, tag, *children, **attrib): + typemap = self._typemap + + # We'll usually get a 'str', and the compiled type check is very fast. + if not isinstance(tag, str) and isinstance(tag, _QName): + # A QName is explicitly qualified, do not look at self._namespace. + tag = tag.text + elif self._namespace is not None and tag[0] != '{': + tag = self._namespace + tag + elem = self._makeelement(tag, nsmap=self._nsmap) + if attrib: + typemap[dict](elem, attrib) + + for item in children: + if callable(item): + item = item() + t = typemap.get(type(item)) + if t is None: + if ET.iselement(item): + elem.append(item) + continue + for basetype in type(item).__mro__: + # See if the typemap knows of any of this type's bases. + t = typemap.get(basetype) + if t is not None: + break + else: + raise TypeError("bad argument type: %s(%r)" % + (type(item).__name__, item)) + v = t(elem, item) + if v: + typemap.get(type(v))(elem, v) + + return elem + + def __getattr__(self, tag): + return partial(self, tag) + + +# create factory object +E = ElementMaker() diff --git a/venv/lib/python3.10/site-packages/lxml/classlookup.pxi b/venv/lib/python3.10/site-packages/lxml/classlookup.pxi new file mode 100644 index 0000000000000000000000000000000000000000..92d1d47a58657a7741d20f48cfe3525a66dbc722 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/classlookup.pxi @@ -0,0 +1,580 @@ +# Configurable Element class lookup + +################################################################################ +# Custom Element classes + +cdef public class ElementBase(_Element) [ type LxmlElementBaseType, + object LxmlElementBase ]: + """ElementBase(*children, attrib=None, nsmap=None, **_extra) + + The public Element class. All custom Element classes must inherit + from this one. To create an Element, use the `Element()` factory. + + BIG FAT WARNING: Subclasses *must not* override __init__ or + __new__ as it is absolutely undefined when these objects will be + created or destroyed. All persistent state of Elements must be + stored in the underlying XML. If you really need to initialize + the object after creation, you can implement an ``_init(self)`` + method that will be called directly after object creation. + + Subclasses of this class can be instantiated to create a new + Element. By default, the tag name will be the class name and the + namespace will be empty. You can modify this with the following + class attributes: + + * TAG - the tag name, possibly containing a namespace in Clark + notation + + * NAMESPACE - the default namespace URI, unless provided as part + of the TAG attribute. + + * HTML - flag if the class is an HTML tag, as opposed to an XML + tag. This only applies to un-namespaced tags and defaults to + false (i.e. XML). + + * PARSER - the parser that provides the configuration for the + newly created document. Providing an HTML parser here will + default to creating an HTML element. + + In user code, the latter three are commonly inherited in class + hierarchies that implement a common namespace. + """ + def __init__(self, *children, attrib=None, nsmap=None, **_extra): + """ElementBase(*children, attrib=None, nsmap=None, **_extra) + """ + cdef bint is_html = 0 + cdef _BaseParser parser + cdef _Element last_child + # don't use normal attribute access as it might be overridden + _getattr = object.__getattribute__ + try: + namespace = _utf8(_getattr(self, 'NAMESPACE')) + except AttributeError: + namespace = None + try: + ns, tag = _getNsTag(_getattr(self, 'TAG')) + if ns is not None: + namespace = ns + except AttributeError: + tag = _utf8(_getattr(_getattr(self, '__class__'), '__name__')) + if b'.' in tag: + tag = tag.split(b'.')[-1] + try: + parser = _getattr(self, 'PARSER') + except AttributeError: + parser = None + for child in children: + if isinstance(child, _Element): + parser = (<_Element>child)._doc._parser + break + if isinstance(parser, HTMLParser): + is_html = 1 + if namespace is None: + try: + is_html = _getattr(self, 'HTML') + except AttributeError: + pass + _initNewElement(self, is_html, tag, namespace, parser, + attrib, nsmap, _extra) + last_child = None + for child in children: + if _isString(child): + if last_child is None: + _setNodeText(self._c_node, + (_collectText(self._c_node.children) or '') + child) + else: + _setTailText(last_child._c_node, + (_collectText(last_child._c_node.next) or '') + child) + elif isinstance(child, _Element): + last_child = child + _appendChild(self, last_child) + elif isinstance(child, type) and issubclass(child, ElementBase): + last_child = child() + _appendChild(self, last_child) + else: + raise TypeError, f"Invalid child type: {type(child)!r}" + +cdef class CommentBase(_Comment): + """All custom Comment classes must inherit from this one. + + To create an XML Comment instance, use the ``Comment()`` factory. + + Subclasses *must not* override __init__ or __new__ as it is + absolutely undefined when these objects will be created or + destroyed. All persistent state of Comments must be stored in the + underlying XML. If you really need to initialize the object after + creation, you can implement an ``_init(self)`` method that will be + called after object creation. + """ + def __init__(self, text): + # copied from Comment() factory + cdef _Document doc + cdef xmlDoc* c_doc + if text is None: + text = b'' + else: + text = _utf8(text) + c_doc = _newXMLDoc() + doc = _documentFactory(c_doc, None) + self._c_node = _createComment(c_doc, _xcstr(text)) + if self._c_node is NULL: + raise MemoryError() + tree.xmlAddChild(c_doc, self._c_node) + _registerProxy(self, doc, self._c_node) + self._init() + +cdef class PIBase(_ProcessingInstruction): + """All custom Processing Instruction classes must inherit from this one. + + To create an XML ProcessingInstruction instance, use the ``PI()`` + factory. + + Subclasses *must not* override __init__ or __new__ as it is + absolutely undefined when these objects will be created or + destroyed. All persistent state of PIs must be stored in the + underlying XML. If you really need to initialize the object after + creation, you can implement an ``_init(self)`` method that will be + called after object creation. + """ + def __init__(self, target, text=None): + # copied from PI() factory + cdef _Document doc + cdef xmlDoc* c_doc + target = _utf8(target) + if text is None: + text = b'' + else: + text = _utf8(text) + c_doc = _newXMLDoc() + doc = _documentFactory(c_doc, None) + self._c_node = _createPI(c_doc, _xcstr(target), _xcstr(text)) + if self._c_node is NULL: + raise MemoryError() + tree.xmlAddChild(c_doc, self._c_node) + _registerProxy(self, doc, self._c_node) + self._init() + +cdef class EntityBase(_Entity): + """All custom Entity classes must inherit from this one. + + To create an XML Entity instance, use the ``Entity()`` factory. + + Subclasses *must not* override __init__ or __new__ as it is + absolutely undefined when these objects will be created or + destroyed. All persistent state of Entities must be stored in the + underlying XML. If you really need to initialize the object after + creation, you can implement an ``_init(self)`` method that will be + called after object creation. + """ + def __init__(self, name): + cdef _Document doc + cdef xmlDoc* c_doc + name_utf = _utf8(name) + c_name = _xcstr(name_utf) + if c_name[0] == c'#': + if not _characterReferenceIsValid(c_name + 1): + raise ValueError, f"Invalid character reference: '{name}'" + elif not _xmlNameIsValid(c_name): + raise ValueError, f"Invalid entity reference: '{name}'" + c_doc = _newXMLDoc() + doc = _documentFactory(c_doc, None) + self._c_node = _createEntity(c_doc, c_name) + if self._c_node is NULL: + raise MemoryError() + tree.xmlAddChild(c_doc, self._c_node) + _registerProxy(self, doc, self._c_node) + self._init() + + +cdef int _validateNodeClass(xmlNode* c_node, cls) except -1: + if c_node.type == tree.XML_ELEMENT_NODE: + expected = ElementBase + elif c_node.type == tree.XML_COMMENT_NODE: + expected = CommentBase + elif c_node.type == tree.XML_ENTITY_REF_NODE: + expected = EntityBase + elif c_node.type == tree.XML_PI_NODE: + expected = PIBase + else: + assert False, f"Unknown node type: {c_node.type}" + + if not (isinstance(cls, type) and issubclass(cls, expected)): + raise TypeError( + f"result of class lookup must be subclass of {type(expected)}, got {type(cls)}") + return 0 + + +################################################################################ +# Element class lookup + +ctypedef public object (*_element_class_lookup_function)(object, _Document, xmlNode*) + +# class to store element class lookup functions +cdef public class ElementClassLookup [ type LxmlElementClassLookupType, + object LxmlElementClassLookup ]: + """ElementClassLookup(self) + Superclass of Element class lookups. + """ + cdef _element_class_lookup_function _lookup_function + + +cdef public class FallbackElementClassLookup(ElementClassLookup) \ + [ type LxmlFallbackElementClassLookupType, + object LxmlFallbackElementClassLookup ]: + """FallbackElementClassLookup(self, fallback=None) + + Superclass of Element class lookups with additional fallback. + """ + cdef readonly ElementClassLookup fallback + cdef _element_class_lookup_function _fallback_function + def __cinit__(self): + # fall back to default lookup + self._fallback_function = _lookupDefaultElementClass + + def __init__(self, ElementClassLookup fallback=None): + if fallback is not None: + self._setFallback(fallback) + else: + self._fallback_function = _lookupDefaultElementClass + + cdef void _setFallback(self, ElementClassLookup lookup): + """Sets the fallback scheme for this lookup method. + """ + self.fallback = lookup + self._fallback_function = lookup._lookup_function + if self._fallback_function is NULL: + self._fallback_function = _lookupDefaultElementClass + + def set_fallback(self, ElementClassLookup lookup not None): + """set_fallback(self, lookup) + + Sets the fallback scheme for this lookup method. + """ + self._setFallback(lookup) + +cdef inline object _callLookupFallback(FallbackElementClassLookup lookup, + _Document doc, xmlNode* c_node): + return lookup._fallback_function(lookup.fallback, doc, c_node) + + +################################################################################ +# default lookup scheme + +cdef class ElementDefaultClassLookup(ElementClassLookup): + """ElementDefaultClassLookup(self, element=None, comment=None, pi=None, entity=None) + Element class lookup scheme that always returns the default Element + class. + + The keyword arguments ``element``, ``comment``, ``pi`` and ``entity`` + accept the respective Element classes. + """ + cdef readonly object element_class + cdef readonly object comment_class + cdef readonly object pi_class + cdef readonly object entity_class + def __cinit__(self): + self._lookup_function = _lookupDefaultElementClass + + def __init__(self, element=None, comment=None, pi=None, entity=None): + if element is None: + self.element_class = _Element + elif issubclass(element, ElementBase): + self.element_class = element + else: + raise TypeError, "element class must be subclass of ElementBase" + + if comment is None: + self.comment_class = _Comment + elif issubclass(comment, CommentBase): + self.comment_class = comment + else: + raise TypeError, "comment class must be subclass of CommentBase" + + if entity is None: + self.entity_class = _Entity + elif issubclass(entity, EntityBase): + self.entity_class = entity + else: + raise TypeError, "Entity class must be subclass of EntityBase" + + if pi is None: + self.pi_class = None # special case, see below + elif issubclass(pi, PIBase): + self.pi_class = pi + else: + raise TypeError, "PI class must be subclass of PIBase" + +cdef object _lookupDefaultElementClass(state, _Document _doc, xmlNode* c_node): + "Trivial class lookup function that always returns the default class." + if c_node.type == tree.XML_ELEMENT_NODE: + if state is not None: + return (state).element_class + else: + return _Element + elif c_node.type == tree.XML_COMMENT_NODE: + if state is not None: + return (state).comment_class + else: + return _Comment + elif c_node.type == tree.XML_ENTITY_REF_NODE: + if state is not None: + return (state).entity_class + else: + return _Entity + elif c_node.type == tree.XML_PI_NODE: + if state is None or (state).pi_class is None: + # special case XSLT-PI + if c_node.name is not NULL and c_node.content is not NULL: + if tree.xmlStrcmp(c_node.name, "xml-stylesheet") == 0: + if tree.xmlStrstr(c_node.content, "text/xsl") is not NULL or \ + tree.xmlStrstr(c_node.content, "text/xml") is not NULL: + return _XSLTProcessingInstruction + return _ProcessingInstruction + else: + return (state).pi_class + else: + assert False, f"Unknown node type: {c_node.type}" + + +################################################################################ +# attribute based lookup scheme + +cdef class AttributeBasedElementClassLookup(FallbackElementClassLookup): + """AttributeBasedElementClassLookup(self, attribute_name, class_mapping, fallback=None) + Checks an attribute of an Element and looks up the value in a + class dictionary. + + Arguments: + - attribute name - '{ns}name' style string + - class mapping - Python dict mapping attribute values to Element classes + - fallback - optional fallback lookup mechanism + + A None key in the class mapping will be checked if the attribute is + missing. + """ + cdef object _class_mapping + cdef tuple _pytag + cdef const_xmlChar* _c_ns + cdef const_xmlChar* _c_name + def __cinit__(self): + self._lookup_function = _attribute_class_lookup + + def __init__(self, attribute_name, class_mapping, + ElementClassLookup fallback=None): + self._pytag = _getNsTag(attribute_name) + ns, name = self._pytag + if ns is None: + self._c_ns = NULL + else: + self._c_ns = _xcstr(ns) + self._c_name = _xcstr(name) + self._class_mapping = dict(class_mapping) + + FallbackElementClassLookup.__init__(self, fallback) + +cdef object _attribute_class_lookup(state, _Document doc, xmlNode* c_node): + cdef AttributeBasedElementClassLookup lookup + cdef python.PyObject* dict_result + + lookup = state + if c_node.type == tree.XML_ELEMENT_NODE: + value = _attributeValueFromNsName( + c_node, lookup._c_ns, lookup._c_name) + dict_result = python.PyDict_GetItem(lookup._class_mapping, value) + if dict_result is not NULL: + cls = dict_result + _validateNodeClass(c_node, cls) + return cls + return _callLookupFallback(lookup, doc, c_node) + + +################################################################################ +# per-parser lookup scheme + +cdef class ParserBasedElementClassLookup(FallbackElementClassLookup): + """ParserBasedElementClassLookup(self, fallback=None) + Element class lookup based on the XML parser. + """ + def __cinit__(self): + self._lookup_function = _parser_class_lookup + +cdef object _parser_class_lookup(state, _Document doc, xmlNode* c_node): + if doc._parser._class_lookup is not None: + return doc._parser._class_lookup._lookup_function( + doc._parser._class_lookup, doc, c_node) + return _callLookupFallback(state, doc, c_node) + + +################################################################################ +# custom class lookup based on node type, namespace, name + +cdef class CustomElementClassLookup(FallbackElementClassLookup): + """CustomElementClassLookup(self, fallback=None) + Element class lookup based on a subclass method. + + You can inherit from this class and override the method:: + + lookup(self, type, doc, namespace, name) + + to lookup the element class for a node. Arguments of the method: + * type: one of 'element', 'comment', 'PI', 'entity' + * doc: document that the node is in + * namespace: namespace URI of the node (or None for comments/PIs/entities) + * name: name of the element/entity, None for comments, target for PIs + + If you return None from this method, the fallback will be called. + """ + def __cinit__(self): + self._lookup_function = _custom_class_lookup + + def lookup(self, type, doc, namespace, name): + "lookup(self, type, doc, namespace, name)" + return None + +cdef object _custom_class_lookup(state, _Document doc, xmlNode* c_node): + cdef CustomElementClassLookup lookup + + lookup = state + + if c_node.type == tree.XML_ELEMENT_NODE: + element_type = "element" + elif c_node.type == tree.XML_COMMENT_NODE: + element_type = "comment" + elif c_node.type == tree.XML_PI_NODE: + element_type = "PI" + elif c_node.type == tree.XML_ENTITY_REF_NODE: + element_type = "entity" + else: + element_type = "element" + if c_node.name is NULL: + name = None + else: + name = funicode(c_node.name) + c_str = tree._getNs(c_node) + ns = funicode(c_str) if c_str is not NULL else None + + cls = lookup.lookup(element_type, doc, ns, name) + if cls is not None: + _validateNodeClass(c_node, cls) + return cls + return _callLookupFallback(lookup, doc, c_node) + + +################################################################################ +# read-only tree based class lookup + +cdef class PythonElementClassLookup(FallbackElementClassLookup): + """PythonElementClassLookup(self, fallback=None) + Element class lookup based on a subclass method. + + This class lookup scheme allows access to the entire XML tree in + read-only mode. To use it, re-implement the ``lookup(self, doc, + root)`` method in a subclass:: + + from lxml import etree, pyclasslookup + + class MyElementClass(etree.ElementBase): + honkey = True + + class MyLookup(pyclasslookup.PythonElementClassLookup): + def lookup(self, doc, root): + if root.tag == "sometag": + return MyElementClass + else: + for child in root: + if child.tag == "someothertag": + return MyElementClass + # delegate to default + return None + + If you return None from this method, the fallback will be called. + + The first argument is the opaque document instance that contains + the Element. The second argument is a lightweight Element proxy + implementation that is only valid during the lookup. Do not try + to keep a reference to it. Once the lookup is done, the proxy + will be invalid. + + Also, you cannot wrap such a read-only Element in an ElementTree, + and you must take care not to keep a reference to them outside of + the `lookup()` method. + + Note that the API of the Element objects is not complete. It is + purely read-only and does not support all features of the normal + `lxml.etree` API (such as XPath, extended slicing or some + iteration methods). + + See https://lxml.de/element_classes.html + """ + def __cinit__(self): + self._lookup_function = _python_class_lookup + + def lookup(self, doc, element): + """lookup(self, doc, element) + + Override this method to implement your own lookup scheme. + """ + return None + +cdef object _python_class_lookup(state, _Document doc, tree.xmlNode* c_node): + cdef PythonElementClassLookup lookup + cdef _ReadOnlyProxy proxy + lookup = state + + proxy = _newReadOnlyProxy(None, c_node) + cls = lookup.lookup(doc, proxy) + _freeReadOnlyProxies(proxy) + + if cls is not None: + _validateNodeClass(c_node, cls) + return cls + return _callLookupFallback(lookup, doc, c_node) + +################################################################################ +# Global setup + +cdef _element_class_lookup_function LOOKUP_ELEMENT_CLASS +cdef object ELEMENT_CLASS_LOOKUP_STATE + +cdef void _setElementClassLookupFunction( + _element_class_lookup_function function, object state): + global LOOKUP_ELEMENT_CLASS, ELEMENT_CLASS_LOOKUP_STATE + if function is NULL: + state = DEFAULT_ELEMENT_CLASS_LOOKUP + function = DEFAULT_ELEMENT_CLASS_LOOKUP._lookup_function + + ELEMENT_CLASS_LOOKUP_STATE = state + LOOKUP_ELEMENT_CLASS = function + +def set_element_class_lookup(ElementClassLookup lookup = None): + """set_element_class_lookup(lookup = None) + + Set the global element class lookup method. + + This defines the main entry point for looking up element implementations. + The standard implementation uses the :class:`ParserBasedElementClassLookup` + to delegate to different lookup schemes for each parser. + + .. warning:: + + This should only be changed by applications, not by library packages. + In most cases, parser specific lookups should be preferred, + which can be configured via + :meth:`~lxml.etree.XMLParser.set_element_class_lookup` + (and the same for HTML parsers). + + Globally replacing the element class lookup by something other than a + :class:`ParserBasedElementClassLookup` will prevent parser specific lookup + schemes from working. Several tools rely on parser specific lookups, + including :mod:`lxml.html` and :mod:`lxml.objectify`. + """ + if lookup is None or lookup._lookup_function is NULL: + _setElementClassLookupFunction(NULL, None) + else: + _setElementClassLookupFunction(lookup._lookup_function, lookup) + +# default setup: parser delegation +cdef ParserBasedElementClassLookup DEFAULT_ELEMENT_CLASS_LOOKUP +DEFAULT_ELEMENT_CLASS_LOOKUP = ParserBasedElementClassLookup() + +set_element_class_lookup(DEFAULT_ELEMENT_CLASS_LOOKUP) diff --git a/venv/lib/python3.10/site-packages/lxml/cleanup.pxi b/venv/lib/python3.10/site-packages/lxml/cleanup.pxi new file mode 100644 index 0000000000000000000000000000000000000000..8e266b33f0f3aef34f3448276abfb2cb8b1e4772 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/cleanup.pxi @@ -0,0 +1,215 @@ +# functions for tree cleanup and removing elements from subtrees + +def cleanup_namespaces(tree_or_element, top_nsmap=None, keep_ns_prefixes=None): + """cleanup_namespaces(tree_or_element, top_nsmap=None, keep_ns_prefixes=None) + + Remove all namespace declarations from a subtree that are not used + by any of the elements or attributes in that tree. + + If a 'top_nsmap' is provided, it must be a mapping from prefixes + to namespace URIs. These namespaces will be declared on the top + element of the subtree before running the cleanup, which allows + moving namespace declarations to the top of the tree. + + If a 'keep_ns_prefixes' is provided, it must be a list of prefixes. + These prefixes will not be removed as part of the cleanup. + """ + element = _rootNodeOrRaise(tree_or_element) + c_element = element._c_node + + if top_nsmap: + doc = element._doc + # declare namespaces from nsmap, then apply them to the subtree + _setNodeNamespaces(c_element, doc, None, top_nsmap) + moveNodeToDocument(doc, c_element.doc, c_element) + + keep_ns_prefixes = ( + set([_utf8(prefix) for prefix in keep_ns_prefixes]) + if keep_ns_prefixes else None) + + _removeUnusedNamespaceDeclarations(c_element, keep_ns_prefixes) + + +def strip_attributes(tree_or_element, *attribute_names): + """strip_attributes(tree_or_element, *attribute_names) + + Delete all attributes with the provided attribute names from an + Element (or ElementTree) and its descendants. + + Attribute names can contain wildcards as in `_Element.iter`. + + Example usage:: + + strip_attributes(root_element, + 'simpleattr', + '{http://some/ns}attrname', + '{http://other/ns}*') + """ + cdef _MultiTagMatcher matcher + element = _rootNodeOrRaise(tree_or_element) + if not attribute_names: + return + + matcher = _MultiTagMatcher.__new__(_MultiTagMatcher, attribute_names) + matcher.cacheTags(element._doc) + if matcher.rejectsAllAttributes(): + return + _strip_attributes(element._c_node, matcher) + + +cdef _strip_attributes(xmlNode* c_node, _MultiTagMatcher matcher): + cdef xmlAttr* c_attr + cdef xmlAttr* c_next_attr + tree.BEGIN_FOR_EACH_ELEMENT_FROM(c_node, c_node, 1) + if c_node.type == tree.XML_ELEMENT_NODE: + c_attr = c_node.properties + while c_attr is not NULL: + c_next_attr = c_attr.next + if matcher.matchesAttribute(c_attr): + tree.xmlRemoveProp(c_attr) + c_attr = c_next_attr + tree.END_FOR_EACH_ELEMENT_FROM(c_node) + + +def strip_elements(tree_or_element, *tag_names, bint with_tail=True): + """strip_elements(tree_or_element, *tag_names, with_tail=True) + + Delete all elements with the provided tag names from a tree or + subtree. This will remove the elements and their entire subtree, + including all their attributes, text content and descendants. It + will also remove the tail text of the element unless you + explicitly set the ``with_tail`` keyword argument option to False. + + Tag names can contain wildcards as in `_Element.iter`. + + Note that this will not delete the element (or ElementTree root + element) that you passed even if it matches. It will only treat + its descendants. If you want to include the root element, check + its tag name directly before even calling this function. + + Example usage:: + + strip_elements(some_element, + 'simpletagname', # non-namespaced tag + '{http://some/ns}tagname', # namespaced tag + '{http://some/other/ns}*' # any tag from a namespace + lxml.etree.Comment # comments + ) + """ + cdef _MultiTagMatcher matcher + doc = _documentOrRaise(tree_or_element) + element = _rootNodeOrRaise(tree_or_element) + if not tag_names: + return + + matcher = _MultiTagMatcher.__new__(_MultiTagMatcher, tag_names) + matcher.cacheTags(doc) + if matcher.rejectsAll(): + return + + if isinstance(tree_or_element, _ElementTree): + # include PIs and comments next to the root node + if matcher.matchesType(tree.XML_COMMENT_NODE): + _removeSiblings(element._c_node, tree.XML_COMMENT_NODE, with_tail) + if matcher.matchesType(tree.XML_PI_NODE): + _removeSiblings(element._c_node, tree.XML_PI_NODE, with_tail) + _strip_elements(doc, element._c_node, matcher, with_tail) + +cdef _strip_elements(_Document doc, xmlNode* c_node, _MultiTagMatcher matcher, + bint with_tail): + cdef xmlNode* c_child + cdef xmlNode* c_next + + tree.BEGIN_FOR_EACH_ELEMENT_FROM(c_node, c_node, 1) + if c_node.type == tree.XML_ELEMENT_NODE: + # we run through the children here to prevent any problems + # with the tree iteration which would occur if we unlinked the + # c_node itself + c_child = _findChildForwards(c_node, 0) + while c_child is not NULL: + c_next = _nextElement(c_child) + if matcher.matches(c_child): + if c_child.type == tree.XML_ELEMENT_NODE: + if not with_tail: + tree.xmlUnlinkNode(c_child) + _removeNode(doc, c_child) + else: + if with_tail: + _removeText(c_child.next) + tree.xmlUnlinkNode(c_child) + attemptDeallocation(c_child) + c_child = c_next + tree.END_FOR_EACH_ELEMENT_FROM(c_node) + + +def strip_tags(tree_or_element, *tag_names): + """strip_tags(tree_or_element, *tag_names) + + Delete all elements with the provided tag names from a tree or + subtree. This will remove the elements and their attributes, but + *not* their text/tail content or descendants. Instead, it will + merge the text content and children of the element into its + parent. + + Tag names can contain wildcards as in `_Element.iter`. + + Note that this will not delete the element (or ElementTree root + element) that you passed even if it matches. It will only treat + its descendants. + + Example usage:: + + strip_tags(some_element, + 'simpletagname', # non-namespaced tag + '{http://some/ns}tagname', # namespaced tag + '{http://some/other/ns}*' # any tag from a namespace + Comment # comments (including their text!) + ) + """ + cdef _MultiTagMatcher matcher + doc = _documentOrRaise(tree_or_element) + element = _rootNodeOrRaise(tree_or_element) + if not tag_names: + return + + matcher = _MultiTagMatcher.__new__(_MultiTagMatcher, tag_names) + matcher.cacheTags(doc) + if matcher.rejectsAll(): + return + + if isinstance(tree_or_element, _ElementTree): + # include PIs and comments next to the root node + if matcher.matchesType(tree.XML_COMMENT_NODE): + _removeSiblings(element._c_node, tree.XML_COMMENT_NODE, 0) + if matcher.matchesType(tree.XML_PI_NODE): + _removeSiblings(element._c_node, tree.XML_PI_NODE, 0) + _strip_tags(doc, element._c_node, matcher) + +cdef _strip_tags(_Document doc, xmlNode* c_node, _MultiTagMatcher matcher): + cdef xmlNode* c_child + cdef xmlNode* c_next + + tree.BEGIN_FOR_EACH_ELEMENT_FROM(c_node, c_node, 1) + if c_node.type == tree.XML_ELEMENT_NODE: + # we run through the children here to prevent any problems + # with the tree iteration which would occur if we unlinked the + # c_node itself + c_child = _findChildForwards(c_node, 0) + while c_child is not NULL: + if not matcher.matches(c_child): + c_child = _nextElement(c_child) + continue + if c_child.type == tree.XML_ELEMENT_NODE: + c_next = _findChildForwards(c_child, 0) or _nextElement(c_child) + _replaceNodeByChildren(doc, c_child) + if not attemptDeallocation(c_child): + if c_child.nsDef is not NULL: + # make namespaces absolute + moveNodeToDocument(doc, doc._c_doc, c_child) + c_child = c_next + else: + c_next = _nextElement(c_child) + tree.xmlUnlinkNode(c_child) + attemptDeallocation(c_child) + c_child = c_next + tree.END_FOR_EACH_ELEMENT_FROM(c_node) diff --git a/venv/lib/python3.10/site-packages/lxml/cssselect.py b/venv/lib/python3.10/site-packages/lxml/cssselect.py new file mode 100644 index 0000000000000000000000000000000000000000..54cd75ac9bfecdec7ea81e91b0840c6edd401515 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/cssselect.py @@ -0,0 +1,101 @@ +"""CSS Selectors based on XPath. + +This module supports selecting XML/HTML tags based on CSS selectors. +See the `CSSSelector` class for details. + +This is a thin wrapper around cssselect 0.7 or later. +""" + + +from . import etree +try: + import cssselect as external_cssselect +except ImportError: + raise ImportError( + 'cssselect does not seem to be installed. ' + 'See https://pypi.org/project/cssselect/') + + +SelectorSyntaxError = external_cssselect.SelectorSyntaxError +ExpressionError = external_cssselect.ExpressionError +SelectorError = external_cssselect.SelectorError + + +__all__ = ['SelectorSyntaxError', 'ExpressionError', 'SelectorError', + 'CSSSelector'] + + +class LxmlTranslator(external_cssselect.GenericTranslator): + """ + A custom CSS selector to XPath translator with lxml-specific extensions. + """ + def xpath_contains_function(self, xpath, function): + # Defined there, removed in later drafts: + # http://www.w3.org/TR/2001/CR-css3-selectors-20011113/#content-selectors + if function.argument_types() not in (['STRING'], ['IDENT']): + raise ExpressionError( + "Expected a single string or ident for :contains(), got %r" + % function.arguments) + value = function.arguments[0].value + return xpath.add_condition( + 'contains(__lxml_internal_css:lower-case(string(.)), %s)' + % self.xpath_literal(value.lower())) + + +class LxmlHTMLTranslator(LxmlTranslator, external_cssselect.HTMLTranslator): + """ + lxml extensions + HTML support. + """ + + +def _make_lower_case(context, s): + return s.lower() + +ns = etree.FunctionNamespace('http://codespeak.net/lxml/css/') +ns.prefix = '__lxml_internal_css' +ns['lower-case'] = _make_lower_case + + +class CSSSelector(etree.XPath): + """A CSS selector. + + Usage:: + + >>> from lxml import etree, cssselect + >>> select = cssselect.CSSSelector("a tag > child") + + >>> root = etree.XML("TEXT") + >>> [ el.tag for el in select(root) ] + ['child'] + + To use CSS namespaces, you need to pass a prefix-to-namespace + mapping as ``namespaces`` keyword argument:: + + >>> rdfns = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#' + >>> select_ns = cssselect.CSSSelector('root > rdf|Description', + ... namespaces={'rdf': rdfns}) + + >>> rdf = etree.XML(( + ... '' + ... 'blah' + ... '') % rdfns) + >>> [(el.tag, el.text) for el in select_ns(rdf)] + [('{http://www.w3.org/1999/02/22-rdf-syntax-ns#}Description', 'blah')] + + """ + def __init__(self, css, namespaces=None, translator='xml'): + if translator == 'xml': + translator = LxmlTranslator() + elif translator == 'html': + translator = LxmlHTMLTranslator() + elif translator == 'xhtml': + translator = LxmlHTMLTranslator(xhtml=True) + path = translator.css_to_xpath(css) + super().__init__(path, namespaces=namespaces) + self.css = css + + def __repr__(self): + return '<%s %x for %r>' % ( + self.__class__.__name__, + abs(id(self)), + self.css) diff --git a/venv/lib/python3.10/site-packages/lxml/debug.pxi b/venv/lib/python3.10/site-packages/lxml/debug.pxi new file mode 100644 index 0000000000000000000000000000000000000000..e5bb061958f29a875e7df043f3b2aec9f550233f --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/debug.pxi @@ -0,0 +1,90 @@ +@cython.final +@cython.internal +cdef class _MemDebug: + """Debugging support for the memory allocation in libxml2. + """ + def bytes_used(self): + """bytes_used(self) + + Returns the total amount of memory (in bytes) currently used by libxml2. + Note that libxml2 constrains this value to a C int, which limits + the accuracy on 64 bit systems. + """ + return tree.xmlMemUsed() + + def blocks_used(self): + """blocks_used(self) + + Returns the total number of memory blocks currently allocated by libxml2. + Note that libxml2 constrains this value to a C int, which limits + the accuracy on 64 bit systems. + """ + return tree.xmlMemBlocks() + + def dict_size(self): + """dict_size(self) + + Returns the current size of the global name dictionary used by libxml2 + for the current thread. Each thread has its own dictionary. + """ + c_dict = __GLOBAL_PARSER_CONTEXT._getThreadDict(NULL) + if c_dict is NULL: + raise MemoryError() + return tree.xmlDictSize(c_dict) + + def dump(self, output_file=None, byte_count=None): + """dump(self, output_file=None, byte_count=None) + + Dumps the current memory blocks allocated by libxml2 to a file. + + The optional parameter 'output_file' specifies the file path. It defaults + to the file ".memorylist" in the current directory. + + The optional parameter 'byte_count' limits the number of bytes in the dump. + Note that this parameter is ignored when lxml is compiled against a libxml2 + version before 2.7.0. + """ + cdef Py_ssize_t c_count + if output_file is None: + output_file = b'.memorylist' + elif isinstance(output_file, unicode): + output_file.encode(sys.getfilesystemencoding()) + + f = stdio.fopen(output_file, "w") + if f is NULL: + raise IOError(f"Failed to create file {output_file.decode(sys.getfilesystemencoding())}") + try: + if byte_count is None: + tree.xmlMemDisplay(f) + else: + c_count = byte_count + tree.xmlMemDisplayLast(f, c_count) + finally: + stdio.fclose(f) + + def show(self, output_file=None, block_count=None): + """show(self, output_file=None, block_count=None) + + Dumps the current memory blocks allocated by libxml2 to a file. + The output file format is suitable for line diffing. + + The optional parameter 'output_file' specifies the file path. It defaults + to the file ".memorydump" in the current directory. + + The optional parameter 'block_count' limits the number of blocks + in the dump. + """ + if output_file is None: + output_file = b'.memorydump' + elif isinstance(output_file, unicode): + output_file.encode(sys.getfilesystemencoding()) + + f = stdio.fopen(output_file, "w") + if f is NULL: + raise IOError(f"Failed to create file {output_file.decode(sys.getfilesystemencoding())}") + try: + tree.xmlMemShow(f, block_count if block_count is not None else tree.xmlMemBlocks()) + finally: + stdio.fclose(f) + +memory_debugger = _MemDebug() diff --git a/venv/lib/python3.10/site-packages/lxml/docloader.pxi b/venv/lib/python3.10/site-packages/lxml/docloader.pxi new file mode 100644 index 0000000000000000000000000000000000000000..7b38f43838592445d2618440b178bd9c8557073c --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/docloader.pxi @@ -0,0 +1,178 @@ +# Custom resolver API + +ctypedef enum _InputDocumentDataType: + PARSER_DATA_INVALID + PARSER_DATA_EMPTY + PARSER_DATA_STRING + PARSER_DATA_FILENAME + PARSER_DATA_FILE + +@cython.final +@cython.internal +cdef class _InputDocument: + cdef _InputDocumentDataType _type + cdef bytes _data_bytes + cdef object _filename + cdef object _file + cdef bint _close_file + + def __cinit__(self): + self._type = PARSER_DATA_INVALID + + +cdef class Resolver: + "This is the base class of all resolvers." + def resolve(self, system_url, public_id, context): + """resolve(self, system_url, public_id, context) + + Override this method to resolve an external source by + ``system_url`` and ``public_id``. The third argument is an + opaque context object. + + Return the result of one of the ``resolve_*()`` methods. + """ + return None + + def resolve_empty(self, context): + """resolve_empty(self, context) + + Return an empty input document. + + Pass context as parameter. + """ + cdef _InputDocument doc_ref + doc_ref = _InputDocument() + doc_ref._type = PARSER_DATA_EMPTY + return doc_ref + + def resolve_string(self, string, context, *, base_url=None): + """resolve_string(self, string, context, base_url=None) + + Return a parsable string as input document. + + Pass data string and context as parameters. You can pass the + source URL or filename through the ``base_url`` keyword + argument. + """ + cdef _InputDocument doc_ref + if isinstance(string, unicode): + string = (string).encode('utf8') + elif not isinstance(string, bytes): + raise TypeError, "argument must be a byte string or unicode string" + doc_ref = _InputDocument() + doc_ref._type = PARSER_DATA_STRING + doc_ref._data_bytes = string + if base_url is not None: + doc_ref._filename = _encodeFilename(base_url) + return doc_ref + + def resolve_filename(self, filename, context): + """resolve_filename(self, filename, context) + + Return the name of a parsable file as input document. + + Pass filename and context as parameters. You can also pass a + URL with an HTTP, FTP or file target. + """ + cdef _InputDocument doc_ref + doc_ref = _InputDocument() + doc_ref._type = PARSER_DATA_FILENAME + doc_ref._filename = _encodeFilename(filename) + return doc_ref + + def resolve_file(self, f, context, *, base_url=None, bint close=True): + """resolve_file(self, f, context, base_url=None, close=True) + + Return an open file-like object as input document. + + Pass open file and context as parameters. You can pass the + base URL or filename of the file through the ``base_url`` + keyword argument. If the ``close`` flag is True (the + default), the file will be closed after reading. + + Note that using ``.resolve_filename()`` is more efficient, + especially in threaded environments. + """ + cdef _InputDocument doc_ref + try: + f.read + except AttributeError: + raise TypeError, "Argument is not a file-like object" + doc_ref = _InputDocument() + doc_ref._type = PARSER_DATA_FILE + if base_url is not None: + doc_ref._filename = _encodeFilename(base_url) + else: + doc_ref._filename = _getFilenameForFile(f) + doc_ref._close_file = close + doc_ref._file = f + return doc_ref + +@cython.final +@cython.internal +cdef class _ResolverRegistry: + cdef object _resolvers + cdef Resolver _default_resolver + def __cinit__(self, Resolver default_resolver=None): + self._resolvers = set() + self._default_resolver = default_resolver + + def add(self, Resolver resolver not None): + """add(self, resolver) + + Register a resolver. + + For each requested entity, the 'resolve' method of the resolver will + be called and the result will be passed to the parser. If this method + returns None, the request will be delegated to other resolvers or the + default resolver. The resolvers will be tested in an arbitrary order + until the first match is found. + """ + self._resolvers.add(resolver) + + def remove(self, resolver): + "remove(self, resolver)" + self._resolvers.discard(resolver) + + cdef _ResolverRegistry _copy(self): + cdef _ResolverRegistry registry + registry = _ResolverRegistry(self._default_resolver) + registry._resolvers = self._resolvers.copy() + return registry + + def copy(self): + "copy(self)" + return self._copy() + + def resolve(self, system_url, public_id, context): + "resolve(self, system_url, public_id, context)" + for resolver in self._resolvers: + result = resolver.resolve(system_url, public_id, context) + if result is not None: + return result + if self._default_resolver is None: + return None + return self._default_resolver.resolve(system_url, public_id, context) + + def __repr__(self): + return repr(self._resolvers) + + +@cython.internal +cdef class _ResolverContext(_ExceptionContext): + cdef _ResolverRegistry _resolvers + cdef _TempStore _storage + + cdef int clear(self) except -1: + _ExceptionContext.clear(self) + self._storage.clear() + return 0 + + +cdef _initResolverContext(_ResolverContext context, + _ResolverRegistry resolvers): + if resolvers is None: + context._resolvers = _ResolverRegistry() + else: + context._resolvers = resolvers + context._storage = _TempStore() diff --git a/venv/lib/python3.10/site-packages/lxml/doctestcompare.py b/venv/lib/python3.10/site-packages/lxml/doctestcompare.py new file mode 100644 index 0000000000000000000000000000000000000000..8099771de906a37ed007c779f152fe96f182060d --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/doctestcompare.py @@ -0,0 +1,488 @@ +""" +lxml-based doctest output comparison. + +Note: normally, you should just import the `lxml.usedoctest` and +`lxml.html.usedoctest` modules from within a doctest, instead of this +one:: + + >>> import lxml.usedoctest # for XML output + + >>> import lxml.html.usedoctest # for HTML output + +To use this module directly, you must call ``lxmldoctest.install()``, +which will cause doctest to use this in all subsequent calls. + +This changes the way output is checked and comparisons are made for +XML or HTML-like content. + +XML or HTML content is noticed because the example starts with ``<`` +(it's HTML if it starts with ```` or include an ``any`` +attribute in the tag. An ``any`` tag matches any tag, while the +attribute matches any and all attributes. + +When a match fails, the reformatted example and gotten text is +displayed (indented), and a rough diff-like output is given. Anything +marked with ``+`` is in the output but wasn't supposed to be, and +similarly ``-`` means its in the example but wasn't in the output. + +You can disable parsing on one line with ``# doctest:+NOPARSE_MARKUP`` +""" + +from lxml import etree +import sys +import re +import doctest +try: + from html import escape as html_escape +except ImportError: + from cgi import escape as html_escape + +__all__ = ['PARSE_HTML', 'PARSE_XML', 'NOPARSE_MARKUP', 'LXMLOutputChecker', + 'LHTMLOutputChecker', 'install', 'temp_install'] + +PARSE_HTML = doctest.register_optionflag('PARSE_HTML') +PARSE_XML = doctest.register_optionflag('PARSE_XML') +NOPARSE_MARKUP = doctest.register_optionflag('NOPARSE_MARKUP') + +OutputChecker = doctest.OutputChecker + +def strip(v): + if v is None: + return None + else: + return v.strip() + +def norm_whitespace(v): + return _norm_whitespace_re.sub(' ', v) + +_html_parser = etree.HTMLParser(recover=False, remove_blank_text=True) + +def html_fromstring(html): + return etree.fromstring(html, _html_parser) + +# We use this to distinguish repr()s from elements: +_repr_re = re.compile(r'^<[^>]+ (at|object) ') +_norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+') + +class LXMLOutputChecker(OutputChecker): + + empty_tags = ( + 'param', 'img', 'area', 'br', 'basefont', 'input', + 'base', 'meta', 'link', 'col') + + def get_default_parser(self): + return etree.XML + + def check_output(self, want, got, optionflags): + alt_self = getattr(self, '_temp_override_self', None) + if alt_self is not None: + super_method = self._temp_call_super_check_output + self = alt_self + else: + super_method = OutputChecker.check_output + parser = self.get_parser(want, got, optionflags) + if not parser: + return super_method( + self, want, got, optionflags) + try: + want_doc = parser(want) + except etree.XMLSyntaxError: + return False + try: + got_doc = parser(got) + except etree.XMLSyntaxError: + return False + return self.compare_docs(want_doc, got_doc) + + def get_parser(self, want, got, optionflags): + parser = None + if NOPARSE_MARKUP & optionflags: + return None + if PARSE_HTML & optionflags: + parser = html_fromstring + elif PARSE_XML & optionflags: + parser = etree.XML + elif (want.strip().lower().startswith('' % el.tag + return '<%s %s>' % (el.tag, ' '.join(attrs)) + + def format_end_tag(self, el): + if isinstance(el, etree.CommentBase): + # FIXME: probably PIs should be handled specially too? + return '-->' + return '' % el.tag + + def collect_diff(self, want, got, html, indent): + parts = [] + if not len(want) and not len(got): + parts.append(' '*indent) + parts.append(self.collect_diff_tag(want, got)) + if not self.html_empty_tag(got, html): + parts.append(self.collect_diff_text(want.text, got.text)) + parts.append(self.collect_diff_end_tag(want, got)) + parts.append(self.collect_diff_text(want.tail, got.tail)) + parts.append('\n') + return ''.join(parts) + parts.append(' '*indent) + parts.append(self.collect_diff_tag(want, got)) + parts.append('\n') + if strip(want.text) or strip(got.text): + parts.append(' '*indent) + parts.append(self.collect_diff_text(want.text, got.text)) + parts.append('\n') + want_children = list(want) + got_children = list(got) + while want_children or got_children: + if not want_children: + parts.append(self.format_doc(got_children.pop(0), html, indent+2, '+')) + continue + if not got_children: + parts.append(self.format_doc(want_children.pop(0), html, indent+2, '-')) + continue + parts.append(self.collect_diff( + want_children.pop(0), got_children.pop(0), html, indent+2)) + parts.append(' '*indent) + parts.append(self.collect_diff_end_tag(want, got)) + parts.append('\n') + if strip(want.tail) or strip(got.tail): + parts.append(' '*indent) + parts.append(self.collect_diff_text(want.tail, got.tail)) + parts.append('\n') + return ''.join(parts) + + def collect_diff_tag(self, want, got): + if not self.tag_compare(want.tag, got.tag): + tag = '%s (got: %s)' % (want.tag, got.tag) + else: + tag = got.tag + attrs = [] + any = want.tag == 'any' or 'any' in want.attrib + for name, value in sorted(got.attrib.items()): + if name not in want.attrib and not any: + attrs.append('+%s="%s"' % (name, self.format_text(value, False))) + else: + if name in want.attrib: + text = self.collect_diff_text(want.attrib[name], value, False) + else: + text = self.format_text(value, False) + attrs.append('%s="%s"' % (name, text)) + if not any: + for name, value in sorted(want.attrib.items()): + if name in got.attrib: + continue + attrs.append('-%s="%s"' % (name, self.format_text(value, False))) + if attrs: + tag = '<%s %s>' % (tag, ' '.join(attrs)) + else: + tag = '<%s>' % tag + return tag + + def collect_diff_end_tag(self, want, got): + if want.tag != got.tag: + tag = '%s (got: %s)' % (want.tag, got.tag) + else: + tag = got.tag + return '' % tag + + def collect_diff_text(self, want, got, strip=True): + if self.text_compare(want, got, strip): + if not got: + return '' + return self.format_text(got, strip) + text = '%s (got: %s)' % (want, got) + return self.format_text(text, strip) + +class LHTMLOutputChecker(LXMLOutputChecker): + def get_default_parser(self): + return html_fromstring + +def install(html=False): + """ + Install doctestcompare for all future doctests. + + If html is true, then by default the HTML parser will be used; + otherwise the XML parser is used. + """ + if html: + doctest.OutputChecker = LHTMLOutputChecker + else: + doctest.OutputChecker = LXMLOutputChecker + +def temp_install(html=False, del_module=None): + """ + Use this *inside* a doctest to enable this checker for this + doctest only. + + If html is true, then by default the HTML parser will be used; + otherwise the XML parser is used. + """ + if html: + Checker = LHTMLOutputChecker + else: + Checker = LXMLOutputChecker + frame = _find_doctest_frame() + dt_self = frame.f_locals['self'] + checker = Checker() + old_checker = dt_self._checker + dt_self._checker = checker + # The unfortunate thing is that there is a local variable 'check' + # in the function that runs the doctests, that is a bound method + # into the output checker. We have to update that. We can't + # modify the frame, so we have to modify the object in place. The + # only way to do this is to actually change the func_code + # attribute of the method. We change it, and then wait for + # __record_outcome to be run, which signals the end of the __run + # method, at which point we restore the previous check_output + # implementation. + check_func = frame.f_locals['check'].__func__ + checker_check_func = checker.check_output.__func__ + # Because we can't patch up func_globals, this is the only global + # in check_output that we care about: + doctest.etree = etree + _RestoreChecker(dt_self, old_checker, checker, + check_func, checker_check_func, + del_module) + +class _RestoreChecker: + def __init__(self, dt_self, old_checker, new_checker, check_func, clone_func, + del_module): + self.dt_self = dt_self + self.checker = old_checker + self.checker._temp_call_super_check_output = self.call_super + self.checker._temp_override_self = new_checker + self.check_func = check_func + self.clone_func = clone_func + self.del_module = del_module + self.install_clone() + self.install_dt_self() + def install_clone(self): + self.func_code = self.check_func.__code__ + self.func_globals = self.check_func.__globals__ + self.check_func.__code__ = self.clone_func.__code__ + def uninstall_clone(self): + self.check_func.__code__ = self.func_code + def install_dt_self(self): + self.prev_func = self.dt_self._DocTestRunner__record_outcome + self.dt_self._DocTestRunner__record_outcome = self + def uninstall_dt_self(self): + self.dt_self._DocTestRunner__record_outcome = self.prev_func + def uninstall_module(self): + if self.del_module: + import sys + del sys.modules[self.del_module] + if '.' in self.del_module: + package, module = self.del_module.rsplit('.', 1) + package_mod = sys.modules[package] + delattr(package_mod, module) + def __call__(self, *args, **kw): + self.uninstall_clone() + self.uninstall_dt_self() + del self.checker._temp_override_self + del self.checker._temp_call_super_check_output + result = self.prev_func(*args, **kw) + self.uninstall_module() + return result + def call_super(self, *args, **kw): + self.uninstall_clone() + try: + return self.check_func(*args, **kw) + finally: + self.install_clone() + +def _find_doctest_frame(): + import sys + frame = sys._getframe(1) + while frame: + l = frame.f_locals + if 'BOOM' in l: + # Sign of doctest + return frame + frame = frame.f_back + raise LookupError( + "Could not find doctest (only use this function *inside* a doctest)") + +__test__ = { + 'basic': ''' + >>> temp_install() + >>> print """stuff""" + ... + >>> print """""" + + + + >>> print """blahblahblah""" # doctest: +NOPARSE_MARKUP, +ELLIPSIS + ...foo /> + '''} + +if __name__ == '__main__': + import doctest + doctest.testmod() + + diff --git a/venv/lib/python3.10/site-packages/lxml/dtd.pxi b/venv/lib/python3.10/site-packages/lxml/dtd.pxi new file mode 100644 index 0000000000000000000000000000000000000000..348212c3dccaa4c70f57b572ebec9229a16981d9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/dtd.pxi @@ -0,0 +1,478 @@ +# support for DTD validation +from lxml.includes cimport dtdvalid + +cdef class DTDError(LxmlError): + """Base class for DTD errors. + """ + +cdef class DTDParseError(DTDError): + """Error while parsing a DTD. + """ + +cdef class DTDValidateError(DTDError): + """Error while validating an XML document with a DTD. + """ + + +cdef inline int _assertValidDTDNode(node, void *c_node) except -1: + assert c_node is not NULL, "invalid DTD proxy at %s" % id(node) + + +@cython.final +@cython.internal +@cython.freelist(8) +cdef class _DTDElementContentDecl: + cdef DTD _dtd + cdef tree.xmlElementContent* _c_node + + def __repr__(self): + return "<%s.%s object name=%r type=%r occur=%r at 0x%x>" % (self.__class__.__module__, self.__class__.__name__, self.name, self.type, self.occur, id(self)) + + @property + def name(self): + _assertValidDTDNode(self, self._c_node) + return funicodeOrNone(self._c_node.name) + + @property + def type(self): + _assertValidDTDNode(self, self._c_node) + cdef int type = self._c_node.type + if type == tree.XML_ELEMENT_CONTENT_PCDATA: + return "pcdata" + elif type == tree.XML_ELEMENT_CONTENT_ELEMENT: + return "element" + elif type == tree.XML_ELEMENT_CONTENT_SEQ: + return "seq" + elif type == tree.XML_ELEMENT_CONTENT_OR: + return "or" + else: + return None + + @property + def occur(self): + _assertValidDTDNode(self, self._c_node) + cdef int occur = self._c_node.ocur + if occur == tree.XML_ELEMENT_CONTENT_ONCE: + return "once" + elif occur == tree.XML_ELEMENT_CONTENT_OPT: + return "opt" + elif occur == tree.XML_ELEMENT_CONTENT_MULT: + return "mult" + elif occur == tree.XML_ELEMENT_CONTENT_PLUS: + return "plus" + else: + return None + + @property + def left(self): + _assertValidDTDNode(self, self._c_node) + c1 = self._c_node.c1 + if c1: + node = <_DTDElementContentDecl>_DTDElementContentDecl.__new__(_DTDElementContentDecl) + node._dtd = self._dtd + node._c_node = c1 + return node + else: + return None + + @property + def right(self): + _assertValidDTDNode(self, self._c_node) + c2 = self._c_node.c2 + if c2: + node = <_DTDElementContentDecl>_DTDElementContentDecl.__new__(_DTDElementContentDecl) + node._dtd = self._dtd + node._c_node = c2 + return node + else: + return None + + +@cython.final +@cython.internal +@cython.freelist(8) +cdef class _DTDAttributeDecl: + cdef DTD _dtd + cdef tree.xmlAttribute* _c_node + + def __repr__(self): + return "<%s.%s object name=%r elemname=%r prefix=%r type=%r default=%r default_value=%r at 0x%x>" % (self.__class__.__module__, self.__class__.__name__, self.name, self.elemname, self.prefix, self.type, self.default, self.default_value, id(self)) + + @property + def name(self): + _assertValidDTDNode(self, self._c_node) + return funicodeOrNone(self._c_node.name) + + @property + def elemname(self): + _assertValidDTDNode(self, self._c_node) + return funicodeOrNone(self._c_node.elem) + + @property + def prefix(self): + _assertValidDTDNode(self, self._c_node) + return funicodeOrNone(self._c_node.prefix) + + @property + def type(self): + _assertValidDTDNode(self, self._c_node) + cdef int type = self._c_node.atype + if type == tree.XML_ATTRIBUTE_CDATA: + return "cdata" + elif type == tree.XML_ATTRIBUTE_ID: + return "id" + elif type == tree.XML_ATTRIBUTE_IDREF: + return "idref" + elif type == tree.XML_ATTRIBUTE_IDREFS: + return "idrefs" + elif type == tree.XML_ATTRIBUTE_ENTITY: + return "entity" + elif type == tree.XML_ATTRIBUTE_ENTITIES: + return "entities" + elif type == tree.XML_ATTRIBUTE_NMTOKEN: + return "nmtoken" + elif type == tree.XML_ATTRIBUTE_NMTOKENS: + return "nmtokens" + elif type == tree.XML_ATTRIBUTE_ENUMERATION: + return "enumeration" + elif type == tree.XML_ATTRIBUTE_NOTATION: + return "notation" + else: + return None + + @property + def default(self): + _assertValidDTDNode(self, self._c_node) + cdef int default = self._c_node.def_ + if default == tree.XML_ATTRIBUTE_NONE: + return "none" + elif default == tree.XML_ATTRIBUTE_REQUIRED: + return "required" + elif default == tree.XML_ATTRIBUTE_IMPLIED: + return "implied" + elif default == tree.XML_ATTRIBUTE_FIXED: + return "fixed" + else: + return None + + @property + def default_value(self): + _assertValidDTDNode(self, self._c_node) + return funicodeOrNone(self._c_node.defaultValue) + + def itervalues(self): + _assertValidDTDNode(self, self._c_node) + cdef tree.xmlEnumeration *c_node = self._c_node.tree + while c_node is not NULL: + yield funicode(c_node.name) + c_node = c_node.next + + def values(self): + return list(self.itervalues()) + + +@cython.final +@cython.internal +@cython.freelist(8) +cdef class _DTDElementDecl: + cdef DTD _dtd + cdef tree.xmlElement* _c_node + + def __repr__(self): + return "<%s.%s object name=%r prefix=%r type=%r at 0x%x>" % (self.__class__.__module__, self.__class__.__name__, self.name, self.prefix, self.type, id(self)) + + @property + def name(self): + _assertValidDTDNode(self, self._c_node) + return funicodeOrNone(self._c_node.name) + + @property + def prefix(self): + _assertValidDTDNode(self, self._c_node) + return funicodeOrNone(self._c_node.prefix) + + @property + def type(self): + _assertValidDTDNode(self, self._c_node) + cdef int type = self._c_node.etype + if type == tree.XML_ELEMENT_TYPE_UNDEFINED: + return "undefined" + elif type == tree.XML_ELEMENT_TYPE_EMPTY: + return "empty" + elif type == tree.XML_ELEMENT_TYPE_ANY: + return "any" + elif type == tree.XML_ELEMENT_TYPE_MIXED: + return "mixed" + elif type == tree.XML_ELEMENT_TYPE_ELEMENT: + return "element" + else: + return None + + @property + def content(self): + _assertValidDTDNode(self, self._c_node) + cdef tree.xmlElementContent *content = self._c_node.content + if content: + node = <_DTDElementContentDecl>_DTDElementContentDecl.__new__(_DTDElementContentDecl) + node._dtd = self._dtd + node._c_node = content + return node + else: + return None + + def iterattributes(self): + _assertValidDTDNode(self, self._c_node) + cdef tree.xmlAttribute *c_node = self._c_node.attributes + while c_node: + node = <_DTDAttributeDecl>_DTDAttributeDecl.__new__(_DTDAttributeDecl) + node._dtd = self._dtd + node._c_node = c_node + yield node + c_node = c_node.nexth + + def attributes(self): + return list(self.iterattributes()) + + +@cython.final +@cython.internal +@cython.freelist(8) +cdef class _DTDEntityDecl: + cdef DTD _dtd + cdef tree.xmlEntity* _c_node + def __repr__(self): + return "<%s.%s object name=%r at 0x%x>" % (self.__class__.__module__, self.__class__.__name__, self.name, id(self)) + + @property + def name(self): + _assertValidDTDNode(self, self._c_node) + return funicodeOrNone(self._c_node.name) + + @property + def orig(self): + _assertValidDTDNode(self, self._c_node) + return funicodeOrNone(self._c_node.orig) + + @property + def content(self): + _assertValidDTDNode(self, self._c_node) + return funicodeOrNone(self._c_node.content) + + @property + def system_url(self): + _assertValidDTDNode(self, self._c_node) + return funicodeOrNone(self._c_node.SystemID) + + +################################################################################ +# DTD + +cdef class DTD(_Validator): + """DTD(self, file=None, external_id=None) + A DTD validator. + + Can load from filesystem directly given a filename or file-like object. + Alternatively, pass the keyword parameter ``external_id`` to load from a + catalog. + """ + cdef tree.xmlDtd* _c_dtd + def __init__(self, file=None, *, external_id=None): + _Validator.__init__(self) + if file is not None: + file = _getFSPathOrObject(file) + if _isString(file): + file = _encodeFilename(file) + with self._error_log: + orig_loader = _register_document_loader() + self._c_dtd = xmlparser.xmlParseDTD(NULL, _xcstr(file)) + _reset_document_loader(orig_loader) + elif hasattr(file, 'read'): + orig_loader = _register_document_loader() + self._c_dtd = _parseDtdFromFilelike(file) + _reset_document_loader(orig_loader) + else: + raise DTDParseError, "file must be a filename, file-like or path-like object" + elif external_id is not None: + with self._error_log: + orig_loader = _register_document_loader() + self._c_dtd = xmlparser.xmlParseDTD(external_id, NULL) + _reset_document_loader(orig_loader) + else: + raise DTDParseError, "either filename or external ID required" + + if self._c_dtd is NULL: + raise DTDParseError( + self._error_log._buildExceptionMessage("error parsing DTD"), + self._error_log) + + @property + def name(self): + if self._c_dtd is NULL: + return None + return funicodeOrNone(self._c_dtd.name) + + @property + def external_id(self): + if self._c_dtd is NULL: + return None + return funicodeOrNone(self._c_dtd.ExternalID) + + @property + def system_url(self): + if self._c_dtd is NULL: + return None + return funicodeOrNone(self._c_dtd.SystemID) + + def iterelements(self): + cdef tree.xmlNode *c_node = self._c_dtd.children if self._c_dtd is not NULL else NULL + while c_node is not NULL: + if c_node.type == tree.XML_ELEMENT_DECL: + node = _DTDElementDecl() + node._dtd = self + node._c_node = c_node + yield node + c_node = c_node.next + + def elements(self): + return list(self.iterelements()) + + def iterentities(self): + cdef tree.xmlNode *c_node = self._c_dtd.children if self._c_dtd is not NULL else NULL + while c_node is not NULL: + if c_node.type == tree.XML_ENTITY_DECL: + node = _DTDEntityDecl() + node._dtd = self + node._c_node = c_node + yield node + c_node = c_node.next + + def entities(self): + return list(self.iterentities()) + + def __dealloc__(self): + tree.xmlFreeDtd(self._c_dtd) + + def __call__(self, etree): + """__call__(self, etree) + + Validate doc using the DTD. + + Returns true if the document is valid, false if not. + """ + cdef _Document doc + cdef _Element root_node + cdef xmlDoc* c_doc + cdef dtdvalid.xmlValidCtxt* valid_ctxt + cdef int ret = -1 + + assert self._c_dtd is not NULL, "DTD not initialised" + doc = _documentOrRaise(etree) + root_node = _rootNodeOrRaise(etree) + + valid_ctxt = dtdvalid.xmlNewValidCtxt() + if valid_ctxt is NULL: + raise DTDError("Failed to create validation context") + + # work around error reporting bug in libxml2 <= 2.9.1 (and later?) + # https://bugzilla.gnome.org/show_bug.cgi?id=724903 + valid_ctxt.error = _nullGenericErrorFunc + valid_ctxt.userData = NULL + + try: + with self._error_log: + c_doc = _fakeRootDoc(doc._c_doc, root_node._c_node) + ret = dtdvalid.xmlValidateDtd(valid_ctxt, c_doc, self._c_dtd) + _destroyFakeDoc(doc._c_doc, c_doc) + finally: + dtdvalid.xmlFreeValidCtxt(valid_ctxt) + + if ret == -1: + raise DTDValidateError("Internal error in DTD validation", + self._error_log) + return ret == 1 + + +cdef tree.xmlDtd* _parseDtdFromFilelike(file) except NULL: + cdef _ExceptionContext exc_context + cdef _FileReaderContext dtd_parser + cdef _ErrorLog error_log + cdef tree.xmlDtd* c_dtd = NULL + exc_context = _ExceptionContext() + dtd_parser = _FileReaderContext(file, exc_context, None) + error_log = _ErrorLog() + + with error_log: + c_dtd = dtd_parser._readDtd() + + exc_context._raise_if_stored() + if c_dtd is NULL: + raise DTDParseError("error parsing DTD", error_log) + return c_dtd + +cdef DTD _dtdFactory(tree.xmlDtd* c_dtd): + # do not run through DTD.__init__()! + cdef DTD dtd + if c_dtd is NULL: + return None + dtd = DTD.__new__(DTD) + dtd._c_dtd = _copyDtd(c_dtd) + _Validator.__init__(dtd) + return dtd + + +cdef tree.xmlDtd* _copyDtd(tree.xmlDtd* c_orig_dtd) except NULL: + """ + Copy a DTD. libxml2 (currently) fails to set up the element->attributes + links when copying DTDs, so we have to rebuild them here. + """ + c_dtd = tree.xmlCopyDtd(c_orig_dtd) + if not c_dtd: + raise MemoryError + cdef tree.xmlNode* c_node = c_dtd.children + while c_node: + if c_node.type == tree.XML_ATTRIBUTE_DECL: + _linkDtdAttribute(c_dtd, c_node) + c_node = c_node.next + return c_dtd + + +cdef void _linkDtdAttribute(tree.xmlDtd* c_dtd, tree.xmlAttribute* c_attr) noexcept: + """ + Create the link to the DTD attribute declaration from the corresponding + element declaration. + """ + c_elem = dtdvalid.xmlGetDtdElementDesc(c_dtd, c_attr.elem) + if not c_elem: + # no such element? something is wrong with the DTD ... + return + c_pos = c_elem.attributes + if not c_pos: + c_elem.attributes = c_attr + c_attr.nexth = NULL + return + # libxml2 keeps namespace declarations first, and we need to make + # sure we don't re-insert attributes that are already there + if _isDtdNsDecl(c_attr): + if not _isDtdNsDecl(c_pos): + c_elem.attributes = c_attr + c_attr.nexth = c_pos + return + while c_pos != c_attr and c_pos.nexth and _isDtdNsDecl(c_pos.nexth): + c_pos = c_pos.nexth + else: + # append at end + while c_pos != c_attr and c_pos.nexth: + c_pos = c_pos.nexth + if c_pos == c_attr: + return + c_attr.nexth = c_pos.nexth + c_pos.nexth = c_attr + + +cdef bint _isDtdNsDecl(tree.xmlAttribute* c_attr) noexcept: + if cstring_h.strcmp(c_attr.name, "xmlns") == 0: + return True + if (c_attr.prefix is not NULL and + cstring_h.strcmp(c_attr.prefix, "xmlns") == 0): + return True + return False diff --git a/venv/lib/python3.10/site-packages/lxml/etree.h b/venv/lib/python3.10/site-packages/lxml/etree.h new file mode 100644 index 0000000000000000000000000000000000000000..5ffc7ba32670f056a6415ab60ffb8240fb6d4a28 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/etree.h @@ -0,0 +1,248 @@ +/* Generated by Cython 3.0.10 */ + +#ifndef __PYX_HAVE__lxml__etree +#define __PYX_HAVE__lxml__etree + +#include "Python.h" +struct LxmlDocument; +struct LxmlElement; +struct LxmlElementTree; +struct LxmlElementTagMatcher; +struct LxmlElementIterator; +struct LxmlElementBase; +struct LxmlElementClassLookup; +struct LxmlFallbackElementClassLookup; + +/* "lxml/etree.pyx":333 + * + * # type of a function that steps from node to node + * ctypedef public xmlNode* (*_node_to_node_function)(xmlNode*) # <<<<<<<<<<<<<< + * + * + */ +typedef xmlNode *(*_node_to_node_function)(xmlNode *); + +/* "lxml/etree.pyx":349 + * @cython.final + * @cython.freelist(8) + * cdef public class _Document [ type LxmlDocumentType, object LxmlDocument ]: # <<<<<<<<<<<<<< + * """Internal base class to reference a libxml document. + * + */ +struct LxmlDocument { + PyObject_HEAD + struct __pyx_vtabstruct_4lxml_5etree__Document *__pyx_vtab; + int _ns_counter; + PyObject *_prefix_tail; + xmlDoc *_c_doc; + struct __pyx_obj_4lxml_5etree__BaseParser *_parser; +}; + +/* "lxml/etree.pyx":698 + * + * @cython.no_gc_clear + * cdef public class _Element [ type LxmlElementType, object LxmlElement ]: # <<<<<<<<<<<<<< + * """Element class. + * + */ +struct LxmlElement { + PyObject_HEAD + struct LxmlDocument *_doc; + xmlNode *_c_node; + PyObject *_tag; +}; + +/* "lxml/etree.pyx":1872 + * + * + * cdef public class _ElementTree [ type LxmlElementTreeType, # <<<<<<<<<<<<<< + * object LxmlElementTree ]: + * cdef _Document _doc + */ +struct LxmlElementTree { + PyObject_HEAD + struct __pyx_vtabstruct_4lxml_5etree__ElementTree *__pyx_vtab; + struct LxmlDocument *_doc; + struct LxmlElement *_context_node; +}; + +/* "lxml/etree.pyx":2646 + * + * + * cdef public class _ElementTagMatcher [ object LxmlElementTagMatcher, # <<<<<<<<<<<<<< + * type LxmlElementTagMatcherType ]: + * """ + */ +struct LxmlElementTagMatcher { + PyObject_HEAD + struct __pyx_vtabstruct_4lxml_5etree__ElementTagMatcher *__pyx_vtab; + PyObject *_pystrings; + int _node_type; + char *_href; + char *_name; +}; + +/* "lxml/etree.pyx":2677 + * self._name = NULL + * + * cdef public class _ElementIterator(_ElementTagMatcher) [ # <<<<<<<<<<<<<< + * object LxmlElementIterator, type LxmlElementIteratorType ]: + * """ + */ +struct LxmlElementIterator { + struct LxmlElementTagMatcher __pyx_base; + struct LxmlElement *_node; + _node_to_node_function _next_element; +}; + +/* "src/lxml/classlookup.pxi":6 + * # Custom Element classes + * + * cdef public class ElementBase(_Element) [ type LxmlElementBaseType, # <<<<<<<<<<<<<< + * object LxmlElementBase ]: + * """ElementBase(*children, attrib=None, nsmap=None, **_extra) + */ +struct LxmlElementBase { + struct LxmlElement __pyx_base; +}; + +/* "src/lxml/classlookup.pxi":210 + * # Element class lookup + * + * ctypedef public object (*_element_class_lookup_function)(object, _Document, xmlNode*) # <<<<<<<<<<<<<< + * + * # class to store element class lookup functions + */ +typedef PyObject *(*_element_class_lookup_function)(PyObject *, struct LxmlDocument *, xmlNode *); + +/* "src/lxml/classlookup.pxi":213 + * + * # class to store element class lookup functions + * cdef public class ElementClassLookup [ type LxmlElementClassLookupType, # <<<<<<<<<<<<<< + * object LxmlElementClassLookup ]: + * """ElementClassLookup(self) + */ +struct LxmlElementClassLookup { + PyObject_HEAD + _element_class_lookup_function _lookup_function; +}; + +/* "src/lxml/classlookup.pxi":221 + * + * + * cdef public class FallbackElementClassLookup(ElementClassLookup) \ # <<<<<<<<<<<<<< + * [ type LxmlFallbackElementClassLookupType, + * object LxmlFallbackElementClassLookup ]: + */ +struct LxmlFallbackElementClassLookup { + struct LxmlElementClassLookup __pyx_base; + struct __pyx_vtabstruct_4lxml_5etree_FallbackElementClassLookup *__pyx_vtab; + struct LxmlElementClassLookup *fallback; + _element_class_lookup_function _fallback_function; +}; + +#ifndef __PYX_HAVE_API__lxml__etree + +#ifdef CYTHON_EXTERN_C + #undef __PYX_EXTERN_C + #define __PYX_EXTERN_C CYTHON_EXTERN_C +#elif defined(__PYX_EXTERN_C) + #ifdef _MSC_VER + #pragma message ("Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead.") + #else + #warning Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead. + #endif +#else + #ifdef __cplusplus + #define __PYX_EXTERN_C extern "C" + #else + #define __PYX_EXTERN_C extern + #endif +#endif + +#ifndef DL_IMPORT + #define DL_IMPORT(_T) _T +#endif + +__PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlDocumentType; +__PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlElementType; +__PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlElementTreeType; +__PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlElementTagMatcherType; +__PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlElementIteratorType; +__PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlElementBaseType; +__PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlElementClassLookupType; +__PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlFallbackElementClassLookupType; + +__PYX_EXTERN_C struct LxmlElement *deepcopyNodeToDocument(struct LxmlDocument *, xmlNode *); +__PYX_EXTERN_C struct LxmlElementTree *elementTreeFactory(struct LxmlElement *); +__PYX_EXTERN_C struct LxmlElementTree *newElementTree(struct LxmlElement *, PyObject *); +__PYX_EXTERN_C struct LxmlElementTree *adoptExternalDocument(xmlDoc *, PyObject *, int); +__PYX_EXTERN_C struct LxmlElement *elementFactory(struct LxmlDocument *, xmlNode *); +__PYX_EXTERN_C struct LxmlElement *makeElement(PyObject *, struct LxmlDocument *, PyObject *, PyObject *, PyObject *, PyObject *, PyObject *); +__PYX_EXTERN_C struct LxmlElement *makeSubElement(struct LxmlElement *, PyObject *, PyObject *, PyObject *, PyObject *, PyObject *); +__PYX_EXTERN_C void setElementClassLookupFunction(_element_class_lookup_function, PyObject *); +__PYX_EXTERN_C PyObject *lookupDefaultElementClass(PyObject *, PyObject *, xmlNode *); +__PYX_EXTERN_C PyObject *lookupNamespaceElementClass(PyObject *, PyObject *, xmlNode *); +__PYX_EXTERN_C PyObject *callLookupFallback(struct LxmlFallbackElementClassLookup *, struct LxmlDocument *, xmlNode *); +__PYX_EXTERN_C int tagMatches(xmlNode *, const xmlChar *, const xmlChar *); +__PYX_EXTERN_C struct LxmlDocument *documentOrRaise(PyObject *); +__PYX_EXTERN_C struct LxmlElement *rootNodeOrRaise(PyObject *); +__PYX_EXTERN_C int hasText(xmlNode *); +__PYX_EXTERN_C int hasTail(xmlNode *); +__PYX_EXTERN_C PyObject *textOf(xmlNode *); +__PYX_EXTERN_C PyObject *tailOf(xmlNode *); +__PYX_EXTERN_C int setNodeText(xmlNode *, PyObject *); +__PYX_EXTERN_C int setTailText(xmlNode *, PyObject *); +__PYX_EXTERN_C PyObject *attributeValue(xmlNode *, xmlAttr *); +__PYX_EXTERN_C PyObject *attributeValueFromNsName(xmlNode *, const xmlChar *, const xmlChar *); +__PYX_EXTERN_C PyObject *getAttributeValue(struct LxmlElement *, PyObject *, PyObject *); +__PYX_EXTERN_C PyObject *iterattributes(struct LxmlElement *, int); +__PYX_EXTERN_C PyObject *collectAttributes(xmlNode *, int); +__PYX_EXTERN_C int setAttributeValue(struct LxmlElement *, PyObject *, PyObject *); +__PYX_EXTERN_C int delAttribute(struct LxmlElement *, PyObject *); +__PYX_EXTERN_C int delAttributeFromNsName(xmlNode *, const xmlChar *, const xmlChar *); +__PYX_EXTERN_C int hasChild(xmlNode *); +__PYX_EXTERN_C xmlNode *findChild(xmlNode *, Py_ssize_t); +__PYX_EXTERN_C xmlNode *findChildForwards(xmlNode *, Py_ssize_t); +__PYX_EXTERN_C xmlNode *findChildBackwards(xmlNode *, Py_ssize_t); +__PYX_EXTERN_C xmlNode *nextElement(xmlNode *); +__PYX_EXTERN_C xmlNode *previousElement(xmlNode *); +__PYX_EXTERN_C void appendChild(struct LxmlElement *, struct LxmlElement *); +__PYX_EXTERN_C int appendChildToElement(struct LxmlElement *, struct LxmlElement *); +__PYX_EXTERN_C PyObject *pyunicode(const xmlChar *); +__PYX_EXTERN_C PyObject *utf8(PyObject *); +__PYX_EXTERN_C PyObject *getNsTag(PyObject *); +__PYX_EXTERN_C PyObject *getNsTagWithEmptyNs(PyObject *); +__PYX_EXTERN_C PyObject *namespacedName(xmlNode *); +__PYX_EXTERN_C PyObject *namespacedNameFromNsName(const xmlChar *, const xmlChar *); +__PYX_EXTERN_C void iteratorStoreNext(struct LxmlElementIterator *, struct LxmlElement *); +__PYX_EXTERN_C void initTagMatch(struct LxmlElementTagMatcher *, PyObject *); +__PYX_EXTERN_C xmlNs *findOrBuildNodeNsPrefix(struct LxmlDocument *, xmlNode *, const xmlChar *, const xmlChar *); + +#endif /* !__PYX_HAVE_API__lxml__etree */ + +/* WARNING: the interface of the module init function changed in CPython 3.5. */ +/* It now returns a PyModuleDef instance instead of a PyModule instance. */ + +#if PY_MAJOR_VERSION < 3 +PyMODINIT_FUNC initetree(void); +#else +/* WARNING: Use PyImport_AppendInittab("etree", PyInit_etree) instead of calling PyInit_etree directly from Python 3.5 */ +PyMODINIT_FUNC PyInit_etree(void); + +#if PY_VERSION_HEX >= 0x03050000 && (defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER) || (defined(__cplusplus) && __cplusplus >= 201402L)) +#if defined(__cplusplus) && __cplusplus >= 201402L +[[deprecated("Use PyImport_AppendInittab(\"etree\", PyInit_etree) instead of calling PyInit_etree directly.")]] inline +#elif defined(__GNUC__) || defined(__clang__) +__attribute__ ((__deprecated__("Use PyImport_AppendInittab(\"etree\", PyInit_etree) instead of calling PyInit_etree directly."), __unused__)) __inline__ +#elif defined(_MSC_VER) +__declspec(deprecated("Use PyImport_AppendInittab(\"etree\", PyInit_etree) instead of calling PyInit_etree directly.")) __inline +#endif +static PyObject* __PYX_WARN_IF_PyInit_etree_INIT_CALLED(PyObject* res) { + return res; +} +#define PyInit_etree() __PYX_WARN_IF_PyInit_etree_INIT_CALLED(PyInit_etree()) +#endif +#endif + +#endif /* !__PYX_HAVE__lxml__etree */ diff --git a/venv/lib/python3.10/site-packages/lxml/etree.pyx b/venv/lib/python3.10/site-packages/lxml/etree.pyx new file mode 100644 index 0000000000000000000000000000000000000000..31b2c52da145b249d4f2804ee125d5355eb1055a --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/etree.pyx @@ -0,0 +1,3712 @@ +# cython: binding=True +# cython: auto_pickle=False +# cython: language_level=3 + +""" +The ``lxml.etree`` module implements the extended ElementTree API for XML. +""" + +__docformat__ = "restructuredtext en" + +__all__ = [ + 'AttributeBasedElementClassLookup', 'C14NError', 'C14NWriterTarget', 'CDATA', + 'Comment', 'CommentBase', 'CustomElementClassLookup', 'DEBUG', + 'DTD', 'DTDError', 'DTDParseError', 'DTDValidateError', + 'DocumentInvalid', 'ETCompatXMLParser', 'ETXPath', 'Element', + 'ElementBase', 'ElementClassLookup', 'ElementDefaultClassLookup', + 'ElementNamespaceClassLookup', 'ElementTree', 'Entity', 'EntityBase', + 'Error', 'ErrorDomains', 'ErrorLevels', 'ErrorTypes', 'Extension', + 'FallbackElementClassLookup', 'FunctionNamespace', 'HTML', + 'HTMLParser', 'LIBXML_COMPILED_VERSION', 'LIBXML_VERSION', + 'LIBXSLT_COMPILED_VERSION', 'LIBXSLT_VERSION', 'LXML_VERSION', + 'LxmlError', 'LxmlRegistryError', 'LxmlSyntaxError', + 'NamespaceRegistryError', 'PI', 'PIBase', 'ParseError', + 'ParserBasedElementClassLookup', 'ParserError', 'ProcessingInstruction', + 'PyErrorLog', 'PythonElementClassLookup', 'QName', 'RelaxNG', + 'RelaxNGError', 'RelaxNGErrorTypes', 'RelaxNGParseError', + 'RelaxNGValidateError', 'Resolver', 'Schematron', 'SchematronError', + 'SchematronParseError', 'SchematronValidateError', 'SerialisationError', + 'SubElement', 'TreeBuilder', 'XInclude', 'XIncludeError', 'XML', + 'XMLDTDID', 'XMLID', 'XMLParser', 'XMLSchema', 'XMLSchemaError', + 'XMLSchemaParseError', 'XMLSchemaValidateError', 'XMLSyntaxError', + 'XMLTreeBuilder', 'XPath', 'XPathDocumentEvaluator', 'XPathError', + 'XPathEvalError', 'XPathEvaluator', 'XPathFunctionError', 'XPathResultError', + 'XPathSyntaxError', 'XSLT', 'XSLTAccessControl', 'XSLTApplyError', + 'XSLTError', 'XSLTExtension', 'XSLTExtensionError', 'XSLTParseError', + 'XSLTSaveError', 'canonicalize', + 'cleanup_namespaces', 'clear_error_log', 'dump', + 'fromstring', 'fromstringlist', 'get_default_parser', 'iselement', + 'iterparse', 'iterwalk', 'parse', 'parseid', 'register_namespace', + 'set_default_parser', 'set_element_class_lookup', 'strip_attributes', + 'strip_elements', 'strip_tags', 'tostring', 'tostringlist', 'tounicode', + 'use_global_python_log' + ] + +cimport cython + +from lxml cimport python +from lxml.includes cimport tree, config +from lxml.includes.tree cimport xmlDoc, xmlNode, xmlAttr, xmlNs, _isElement, _getNs +from lxml.includes.tree cimport const_xmlChar, xmlChar, _xcstr +from lxml.python cimport _cstr, _isString +from lxml.includes cimport xpath +from lxml.includes cimport c14n + +# Cython's standard declarations +cimport cpython.mem +cimport cpython.ref +from libc cimport limits, stdio, stdlib +from libc cimport string as cstring_h # not to be confused with stdlib 'string' +from libc.string cimport const_char + +cdef object os_path_abspath +from os.path import abspath as os_path_abspath + +cdef object BytesIO, StringIO +from io import BytesIO, StringIO + +cdef object OrderedDict +from collections import OrderedDict + +cdef object _elementpath +from lxml import _elementpath + +cdef object sys +import sys + +cdef object re +import re + +cdef object partial +from functools import partial + +cdef object islice +from itertools import islice + +cdef object ITER_EMPTY = iter(()) + +cdef object MutableMapping +from collections.abc import MutableMapping + +class _ImmutableMapping(MutableMapping): + def __getitem__(self, key): + raise KeyError, key + + def __setitem__(self, key, value): + raise KeyError, key + + def __delitem__(self, key): + raise KeyError, key + + def __contains__(self, key): + return False + + def __len__(self): + return 0 + + def __iter__(self): + return ITER_EMPTY + iterkeys = itervalues = iteritems = __iter__ + +cdef object IMMUTABLE_EMPTY_MAPPING = _ImmutableMapping() +del _ImmutableMapping + + +# the rules +# --------- +# any libxml C argument/variable is prefixed with c_ +# any non-public function/class is prefixed with an underscore +# instance creation is always through factories + +# what to do with libxml2/libxslt error messages? +# 0 : drop +# 1 : use log +DEF __DEBUG = 1 + +# maximum number of lines in the libxml2/xslt log if __DEBUG == 1 +DEF __MAX_LOG_SIZE = 100 + +# make the compiled-in debug state publicly available +DEBUG = __DEBUG + +# A struct to store a cached qualified tag name+href pair. +# While we can borrow the c_name from the document dict, +# PyPy requires us to store a Python reference for the +# namespace in order to keep the byte buffer alive. +cdef struct qname: + const_xmlChar* c_name + python.PyObject* href + +# initialize parser (and threading) +xmlparser.xmlInitParser() + +# global per-thread setup +tree.xmlThrDefIndentTreeOutput(1) +tree.xmlThrDefLineNumbersDefaultValue(1) + +_initThreadLogging() + +# filename encoding +cdef bytes _FILENAME_ENCODING = (sys.getfilesystemencoding() or sys.getdefaultencoding() or 'ascii').encode("UTF-8") +cdef char* _C_FILENAME_ENCODING = _cstr(_FILENAME_ENCODING) + +# set up some default namespace prefixes +cdef dict _DEFAULT_NAMESPACE_PREFIXES = { + b"http://www.w3.org/XML/1998/namespace": b'xml', + b"http://www.w3.org/1999/xhtml": b"html", + b"http://www.w3.org/1999/XSL/Transform": b"xsl", + b"http://www.w3.org/1999/02/22-rdf-syntax-ns#": b"rdf", + b"http://schemas.xmlsoap.org/wsdl/": b"wsdl", + # xml schema + b"http://www.w3.org/2001/XMLSchema": b"xs", + b"http://www.w3.org/2001/XMLSchema-instance": b"xsi", + # dublin core + b"http://purl.org/dc/elements/1.1/": b"dc", + # objectify + b"http://codespeak.net/lxml/objectify/pytype" : b"py", +} + +# To avoid runtime encoding overhead, we keep a Unicode copy +# of the uri-prefix mapping as (str, str) items view. +cdef object _DEFAULT_NAMESPACE_PREFIXES_ITEMS = [] + +cdef _update_default_namespace_prefixes_items(): + cdef bytes ns, prefix + global _DEFAULT_NAMESPACE_PREFIXES_ITEMS + _DEFAULT_NAMESPACE_PREFIXES_ITEMS = { + ns.decode('utf-8') : prefix.decode('utf-8') + for ns, prefix in _DEFAULT_NAMESPACE_PREFIXES.items() + }.items() + +_update_default_namespace_prefixes_items() + +cdef object _check_internal_prefix = re.compile(br"ns\d+$").match + +def register_namespace(prefix, uri): + """Registers a namespace prefix that newly created Elements in that + namespace will use. The registry is global, and any existing + mapping for either the given prefix or the namespace URI will be + removed. + """ + prefix_utf, uri_utf = _utf8(prefix), _utf8(uri) + if _check_internal_prefix(prefix_utf): + raise ValueError("Prefix format reserved for internal use") + _tagValidOrRaise(prefix_utf) + _uriValidOrRaise(uri_utf) + if (uri_utf == b"http://www.w3.org/XML/1998/namespace" and prefix_utf != b'xml' + or prefix_utf == b'xml' and uri_utf != b"http://www.w3.org/XML/1998/namespace"): + raise ValueError("Cannot change the 'xml' prefix of the XML namespace") + for k, v in list(_DEFAULT_NAMESPACE_PREFIXES.items()): + if k == uri_utf or v == prefix_utf: + del _DEFAULT_NAMESPACE_PREFIXES[k] + _DEFAULT_NAMESPACE_PREFIXES[uri_utf] = prefix_utf + _update_default_namespace_prefixes_items() + + +# Error superclass for ElementTree compatibility +cdef class Error(Exception): + pass + +# module level superclass for all exceptions +cdef class LxmlError(Error): + """Main exception base class for lxml. All other exceptions inherit from + this one. + """ + def __init__(self, message, error_log=None): + super(_Error, self).__init__(message) + if error_log is None: + self.error_log = __copyGlobalErrorLog() + else: + self.error_log = error_log.copy() + +cdef object _Error = Error + + +# superclass for all syntax errors +class LxmlSyntaxError(LxmlError, SyntaxError): + """Base class for all syntax errors. + """ + +cdef class C14NError(LxmlError): + """Error during C14N serialisation. + """ + +# version information +cdef __unpackDottedVersion(version): + version_list = [] + l = (version.decode("ascii").replace('-', '.').split('.') + [0]*4)[:4] + for item in l: + try: + item = int(item) + except ValueError: + if item.startswith('dev'): + count = item[3:] + item = -300 + elif item.startswith('alpha'): + count = item[5:] + item = -200 + elif item.startswith('beta'): + count = item[4:] + item = -100 + else: + count = 0 + if count: + item += int(count) + version_list.append(item) + return tuple(version_list) + +cdef __unpackIntVersion(int c_version): + return ( + ((c_version // (100*100)) % 100), + ((c_version // 100) % 100), + (c_version % 100) + ) + +cdef int _LIBXML_VERSION_INT +try: + _LIBXML_VERSION_INT = int( + re.match('[0-9]+', (tree.xmlParserVersion).decode("ascii")).group(0)) +except Exception: + print("Unknown libxml2 version: " + (tree.xmlParserVersion).decode("latin1")) + _LIBXML_VERSION_INT = 0 + +LIBXML_VERSION = __unpackIntVersion(_LIBXML_VERSION_INT) +LIBXML_COMPILED_VERSION = __unpackIntVersion(tree.LIBXML_VERSION) +LXML_VERSION = __unpackDottedVersion(tree.LXML_VERSION_STRING) + +__version__ = tree.LXML_VERSION_STRING.decode("ascii") + + +# class for temporary storage of Python references, +# used e.g. for XPath results +@cython.final +@cython.internal +cdef class _TempStore: + cdef list _storage + def __init__(self): + self._storage = [] + + cdef int add(self, obj) except -1: + self._storage.append(obj) + return 0 + + cdef int clear(self) except -1: + del self._storage[:] + return 0 + + +# class for temporarily storing exceptions raised in extensions +@cython.internal +cdef class _ExceptionContext: + cdef object _exc_info + cdef int clear(self) except -1: + self._exc_info = None + return 0 + + cdef void _store_raised(self) noexcept: + try: + self._exc_info = sys.exc_info() + except BaseException as e: + self._store_exception(e) + finally: + return # and swallow any further exceptions + + cdef int _store_exception(self, exception) except -1: + self._exc_info = (exception, None, None) + return 0 + + cdef bint _has_raised(self) except -1: + return self._exc_info is not None + + cdef int _raise_if_stored(self) except -1: + if self._exc_info is None: + return 0 + type, value, traceback = self._exc_info + self._exc_info = None + if value is None and traceback is None: + raise type + else: + raise type, value, traceback + + +# type of a function that steps from node to node +ctypedef public xmlNode* (*_node_to_node_function)(xmlNode*) + + +################################################################################ +# Include submodules + +include "proxy.pxi" # Proxy handling (element backpointers/memory/etc.) +include "apihelpers.pxi" # Private helper functions +include "xmlerror.pxi" # Error and log handling + + +################################################################################ +# Public Python API + +@cython.final +@cython.freelist(8) +cdef public class _Document [ type LxmlDocumentType, object LxmlDocument ]: + """Internal base class to reference a libxml document. + + When instances of this class are garbage collected, the libxml + document is cleaned up. + """ + cdef int _ns_counter + cdef bytes _prefix_tail + cdef xmlDoc* _c_doc + cdef _BaseParser _parser + + def __dealloc__(self): + # if there are no more references to the document, it is safe + # to clean the whole thing up, as all nodes have a reference to + # the document + tree.xmlFreeDoc(self._c_doc) + + @cython.final + cdef getroot(self): + # return an element proxy for the document root + cdef xmlNode* c_node + c_node = tree.xmlDocGetRootElement(self._c_doc) + if c_node is NULL: + return None + return _elementFactory(self, c_node) + + @cython.final + cdef bint hasdoctype(self) noexcept: + # DOCTYPE gets parsed into internal subset (xmlDTD*) + return self._c_doc is not NULL and self._c_doc.intSubset is not NULL + + @cython.final + cdef getdoctype(self): + # get doctype info: root tag, public/system ID (or None if not known) + cdef tree.xmlDtd* c_dtd + cdef xmlNode* c_root_node + public_id = None + sys_url = None + c_dtd = self._c_doc.intSubset + if c_dtd is not NULL: + if c_dtd.ExternalID is not NULL: + public_id = funicode(c_dtd.ExternalID) + if c_dtd.SystemID is not NULL: + sys_url = funicode(c_dtd.SystemID) + c_dtd = self._c_doc.extSubset + if c_dtd is not NULL: + if not public_id and c_dtd.ExternalID is not NULL: + public_id = funicode(c_dtd.ExternalID) + if not sys_url and c_dtd.SystemID is not NULL: + sys_url = funicode(c_dtd.SystemID) + c_root_node = tree.xmlDocGetRootElement(self._c_doc) + if c_root_node is NULL: + root_name = None + else: + root_name = funicode(c_root_node.name) + return root_name, public_id, sys_url + + @cython.final + cdef getxmlinfo(self): + # return XML version and encoding (or None if not known) + cdef xmlDoc* c_doc = self._c_doc + if c_doc.version is NULL: + version = None + else: + version = funicode(c_doc.version) + if c_doc.encoding is NULL: + encoding = None + else: + encoding = funicode(c_doc.encoding) + return version, encoding + + @cython.final + cdef isstandalone(self): + # returns True for "standalone=true", + # False for "standalone=false", None if not provided + if self._c_doc.standalone == -1: + return None + else: + return (self._c_doc.standalone == 1) + + @cython.final + cdef bytes buildNewPrefix(self): + # get a new unique prefix ("nsX") for this document + cdef bytes ns + if self._ns_counter < len(_PREFIX_CACHE): + ns = _PREFIX_CACHE[self._ns_counter] + else: + ns = python.PyBytes_FromFormat("ns%d", self._ns_counter) + if self._prefix_tail is not None: + ns += self._prefix_tail + self._ns_counter += 1 + if self._ns_counter < 0: + # overflow! + self._ns_counter = 0 + if self._prefix_tail is None: + self._prefix_tail = b"A" + else: + self._prefix_tail += b"A" + return ns + + @cython.final + cdef xmlNs* _findOrBuildNodeNs(self, xmlNode* c_node, + const_xmlChar* c_href, const_xmlChar* c_prefix, + bint is_attribute) except NULL: + """Get or create namespace structure for a node. Reuses the prefix if + possible. + """ + cdef xmlNs* c_ns + cdef xmlNs* c_doc_ns + cdef python.PyObject* dict_result + if c_node.type != tree.XML_ELEMENT_NODE: + assert c_node.type == tree.XML_ELEMENT_NODE, \ + "invalid node type %d, expected %d" % ( + c_node.type, tree.XML_ELEMENT_NODE) + # look for existing ns declaration + c_ns = _searchNsByHref(c_node, c_href, is_attribute) + if c_ns is not NULL: + if is_attribute and c_ns.prefix is NULL: + # do not put namespaced attributes into the default + # namespace as this would break serialisation + pass + else: + return c_ns + + # none found => determine a suitable new prefix + if c_prefix is NULL: + dict_result = python.PyDict_GetItem( + _DEFAULT_NAMESPACE_PREFIXES, c_href) + if dict_result is not NULL: + prefix = dict_result + else: + prefix = self.buildNewPrefix() + c_prefix = _xcstr(prefix) + + # make sure the prefix is not in use already + while tree.xmlSearchNs(self._c_doc, c_node, c_prefix) is not NULL: + prefix = self.buildNewPrefix() + c_prefix = _xcstr(prefix) + + # declare the namespace and return it + c_ns = tree.xmlNewNs(c_node, c_href, c_prefix) + if c_ns is NULL: + raise MemoryError() + return c_ns + + @cython.final + cdef int _setNodeNs(self, xmlNode* c_node, const_xmlChar* c_href) except -1: + "Lookup namespace structure and set it for the node." + c_ns = self._findOrBuildNodeNs(c_node, c_href, NULL, 0) + tree.xmlSetNs(c_node, c_ns) + +cdef tuple __initPrefixCache(): + cdef int i + return tuple([ python.PyBytes_FromFormat("ns%d", i) + for i in range(30) ]) + +cdef tuple _PREFIX_CACHE = __initPrefixCache() + +cdef _Document _documentFactory(xmlDoc* c_doc, _BaseParser parser): + cdef _Document result + result = _Document.__new__(_Document) + result._c_doc = c_doc + result._ns_counter = 0 + result._prefix_tail = None + if parser is None: + parser = __GLOBAL_PARSER_CONTEXT.getDefaultParser() + result._parser = parser + return result + + +cdef object _find_invalid_public_id_characters = re.compile( + ur"[^\x20\x0D\x0Aa-zA-Z0-9'()+,./:=?;!*#@$_%-]+").search + + +cdef class DocInfo: + "Document information provided by parser and DTD." + cdef _Document _doc + def __cinit__(self, tree): + "Create a DocInfo object for an ElementTree object or root Element." + self._doc = _documentOrRaise(tree) + root_name, public_id, system_url = self._doc.getdoctype() + if not root_name and (public_id or system_url): + raise ValueError, "Could not find root node" + + @property + def root_name(self): + """Returns the name of the root node as defined by the DOCTYPE.""" + root_name, public_id, system_url = self._doc.getdoctype() + return root_name + + @cython.final + cdef tree.xmlDtd* _get_c_dtd(self): + """"Return the DTD. Create it if it does not yet exist.""" + cdef xmlDoc* c_doc = self._doc._c_doc + cdef xmlNode* c_root_node + cdef const_xmlChar* c_name + + if c_doc.intSubset: + return c_doc.intSubset + + c_root_node = tree.xmlDocGetRootElement(c_doc) + c_name = c_root_node.name if c_root_node else NULL + return tree.xmlCreateIntSubset(c_doc, c_name, NULL, NULL) + + def clear(self): + """Removes DOCTYPE and internal subset from the document.""" + cdef xmlDoc* c_doc = self._doc._c_doc + cdef tree.xmlNode* c_dtd = c_doc.intSubset + if c_dtd is NULL: + return + tree.xmlUnlinkNode(c_dtd) + tree.xmlFreeNode(c_dtd) + + property public_id: + """Public ID of the DOCTYPE. + + Mutable. May be set to a valid string or None. If a DTD does not + exist, setting this variable (even to None) will create one. + """ + def __get__(self): + root_name, public_id, system_url = self._doc.getdoctype() + return public_id + + def __set__(self, value): + cdef xmlChar* c_value = NULL + if value is not None: + match = _find_invalid_public_id_characters(value) + if match: + raise ValueError, f'Invalid character(s) {match.group(0)!r} in public_id.' + value = _utf8(value) + c_value = tree.xmlStrdup(_xcstr(value)) + if not c_value: + raise MemoryError() + + c_dtd = self._get_c_dtd() + if not c_dtd: + tree.xmlFree(c_value) + raise MemoryError() + if c_dtd.ExternalID: + tree.xmlFree(c_dtd.ExternalID) + c_dtd.ExternalID = c_value + + property system_url: + """System ID of the DOCTYPE. + + Mutable. May be set to a valid string or None. If a DTD does not + exist, setting this variable (even to None) will create one. + """ + def __get__(self): + root_name, public_id, system_url = self._doc.getdoctype() + return system_url + + def __set__(self, value): + cdef xmlChar* c_value = NULL + if value is not None: + bvalue = _utf8(value) + # sys_url may be any valid unicode string that can be + # enclosed in single quotes or quotes. + if b"'" in bvalue and b'"' in bvalue: + raise ValueError( + 'System URL may not contain both single (\') and double quotes (").') + c_value = tree.xmlStrdup(_xcstr(bvalue)) + if not c_value: + raise MemoryError() + + c_dtd = self._get_c_dtd() + if not c_dtd: + tree.xmlFree(c_value) + raise MemoryError() + if c_dtd.SystemID: + tree.xmlFree(c_dtd.SystemID) + c_dtd.SystemID = c_value + + @property + def xml_version(self): + """Returns the XML version as declared by the document.""" + xml_version, encoding = self._doc.getxmlinfo() + return xml_version + + @property + def encoding(self): + """Returns the encoding name as declared by the document.""" + xml_version, encoding = self._doc.getxmlinfo() + return encoding + + @property + def standalone(self): + """Returns the standalone flag as declared by the document. The possible + values are True (``standalone='yes'``), False + (``standalone='no'`` or flag not provided in the declaration), + and None (unknown or no declaration found). Note that a + normal truth test on this value will always tell if the + ``standalone`` flag was set to ``'yes'`` or not. + """ + return self._doc.isstandalone() + + property URL: + "The source URL of the document (or None if unknown)." + def __get__(self): + if self._doc._c_doc.URL is NULL: + return None + return _decodeFilename(self._doc._c_doc.URL) + def __set__(self, url): + url = _encodeFilename(url) + c_oldurl = self._doc._c_doc.URL + if url is None: + self._doc._c_doc.URL = NULL + else: + self._doc._c_doc.URL = tree.xmlStrdup(_xcstr(url)) + if c_oldurl is not NULL: + tree.xmlFree(c_oldurl) + + @property + def doctype(self): + """Returns a DOCTYPE declaration string for the document.""" + root_name, public_id, system_url = self._doc.getdoctype() + if system_url: + # If '"' in system_url, we must escape it with single + # quotes, otherwise escape with double quotes. If url + # contains both a single quote and a double quote, XML + # standard is being violated. + if '"' in system_url: + quoted_system_url = f"'{system_url}'" + else: + quoted_system_url = f'"{system_url}"' + if public_id: + if system_url: + return f'' + else: + return f'' + elif system_url: + return f'' + elif self._doc.hasdoctype(): + return f'' + else: + return '' + + @property + def internalDTD(self): + """Returns a DTD validator based on the internal subset of the document.""" + return _dtdFactory(self._doc._c_doc.intSubset) + + @property + def externalDTD(self): + """Returns a DTD validator based on the external subset of the document.""" + return _dtdFactory(self._doc._c_doc.extSubset) + + +@cython.no_gc_clear +cdef public class _Element [ type LxmlElementType, object LxmlElement ]: + """Element class. + + References a document object and a libxml node. + + By pointing to a Document instance, a reference is kept to + _Document as long as there is some pointer to a node in it. + """ + cdef _Document _doc + cdef xmlNode* _c_node + cdef object _tag + + def _init(self): + """_init(self) + + Called after object initialisation. Custom subclasses may override + this if they recursively call _init() in the superclasses. + """ + + @cython.linetrace(False) + @cython.profile(False) + def __dealloc__(self): + #print("trying to free node:", self._c_node) + #displayNode(self._c_node, 0) + if self._c_node is not NULL: + _unregisterProxy(self) + attemptDeallocation(self._c_node) + + # MANIPULATORS + + def __setitem__(self, x, value): + """__setitem__(self, x, value) + + Replaces the given subelement index or slice. + """ + cdef xmlNode* c_node = NULL + cdef xmlNode* c_next + cdef xmlDoc* c_source_doc + cdef _Element element + cdef bint left_to_right + cdef Py_ssize_t slicelength = 0, step = 0 + _assertValidNode(self) + if value is None: + raise ValueError, "cannot assign None" + if isinstance(x, slice): + # slice assignment + _findChildSlice(x, self._c_node, &c_node, &step, &slicelength) + if step > 0: + left_to_right = 1 + else: + left_to_right = 0 + step = -step + _replaceSlice(self, c_node, slicelength, step, left_to_right, value) + return + else: + # otherwise: normal item assignment + element = value + _assertValidNode(element) + c_node = _findChild(self._c_node, x) + if c_node is NULL: + raise IndexError, "list index out of range" + c_source_doc = element._c_node.doc + c_next = element._c_node.next + _removeText(c_node.next) + tree.xmlReplaceNode(c_node, element._c_node) + _moveTail(c_next, element._c_node) + moveNodeToDocument(self._doc, c_source_doc, element._c_node) + if not attemptDeallocation(c_node): + moveNodeToDocument(self._doc, c_node.doc, c_node) + + def __delitem__(self, x): + """__delitem__(self, x) + + Deletes the given subelement or a slice. + """ + cdef xmlNode* c_node = NULL + cdef xmlNode* c_next + cdef Py_ssize_t step = 0, slicelength = 0 + _assertValidNode(self) + if isinstance(x, slice): + # slice deletion + if _isFullSlice(x): + c_node = self._c_node.children + if c_node is not NULL: + if not _isElement(c_node): + c_node = _nextElement(c_node) + while c_node is not NULL: + c_next = _nextElement(c_node) + _removeNode(self._doc, c_node) + c_node = c_next + else: + _findChildSlice(x, self._c_node, &c_node, &step, &slicelength) + _deleteSlice(self._doc, c_node, slicelength, step) + else: + # item deletion + c_node = _findChild(self._c_node, x) + if c_node is NULL: + raise IndexError, f"index out of range: {x}" + _removeNode(self._doc, c_node) + + def __deepcopy__(self, memo): + "__deepcopy__(self, memo)" + return self.__copy__() + + def __copy__(self): + "__copy__(self)" + cdef xmlDoc* c_doc + cdef xmlNode* c_node + cdef _Document new_doc + _assertValidNode(self) + c_doc = _copyDocRoot(self._doc._c_doc, self._c_node) # recursive + new_doc = _documentFactory(c_doc, self._doc._parser) + root = new_doc.getroot() + if root is not None: + return root + # Comment/PI + c_node = c_doc.children + while c_node is not NULL and c_node.type != self._c_node.type: + c_node = c_node.next + if c_node is NULL: + return None + return _elementFactory(new_doc, c_node) + + def set(self, key, value): + """set(self, key, value) + + Sets an element attribute. + In HTML documents (not XML or XHTML), the value None is allowed and creates + an attribute without value (just the attribute name). + """ + _assertValidNode(self) + _setAttributeValue(self, key, value) + + def append(self, _Element element not None): + """append(self, element) + + Adds a subelement to the end of this element. + """ + _assertValidNode(self) + _assertValidNode(element) + _appendChild(self, element) + + def addnext(self, _Element element not None): + """addnext(self, element) + + Adds the element as a following sibling directly after this + element. + + This is normally used to set a processing instruction or comment after + the root node of a document. Note that tail text is automatically + discarded when adding at the root level. + """ + _assertValidNode(self) + _assertValidNode(element) + if self._c_node.parent != NULL and not _isElement(self._c_node.parent): + if element._c_node.type not in (tree.XML_PI_NODE, tree.XML_COMMENT_NODE): + raise TypeError, "Only processing instructions and comments can be siblings of the root element" + element.tail = None + _appendSibling(self, element) + + def addprevious(self, _Element element not None): + """addprevious(self, element) + + Adds the element as a preceding sibling directly before this + element. + + This is normally used to set a processing instruction or comment + before the root node of a document. Note that tail text is + automatically discarded when adding at the root level. + """ + _assertValidNode(self) + _assertValidNode(element) + if self._c_node.parent != NULL and not _isElement(self._c_node.parent): + if element._c_node.type != tree.XML_PI_NODE: + if element._c_node.type != tree.XML_COMMENT_NODE: + raise TypeError, "Only processing instructions and comments can be siblings of the root element" + element.tail = None + _prependSibling(self, element) + + def extend(self, elements): + """extend(self, elements) + + Extends the current children by the elements in the iterable. + """ + cdef _Element element + _assertValidNode(self) + for element in elements: + if element is None: + raise TypeError, "Node must not be None" + _assertValidNode(element) + _appendChild(self, element) + + def clear(self, bint keep_tail=False): + """clear(self, keep_tail=False) + + Resets an element. This function removes all subelements, clears + all attributes and sets the text and tail properties to None. + + Pass ``keep_tail=True`` to leave the tail text untouched. + """ + cdef xmlAttr* c_attr + cdef xmlAttr* c_attr_next + cdef xmlNode* c_node + cdef xmlNode* c_node_next + _assertValidNode(self) + c_node = self._c_node + # remove self.text and self.tail + _removeText(c_node.children) + if not keep_tail: + _removeText(c_node.next) + # remove all attributes + c_attr = c_node.properties + if c_attr: + c_node.properties = NULL + tree.xmlFreePropList(c_attr) + # remove all subelements + c_node = c_node.children + if c_node and not _isElement(c_node): + c_node = _nextElement(c_node) + while c_node is not NULL: + c_node_next = _nextElement(c_node) + _removeNode(self._doc, c_node) + c_node = c_node_next + + def insert(self, index: int, _Element element not None): + """insert(self, index, element) + + Inserts a subelement at the given position in this element + """ + cdef xmlNode* c_node + cdef xmlNode* c_next + cdef xmlDoc* c_source_doc + _assertValidNode(self) + _assertValidNode(element) + c_node = _findChild(self._c_node, index) + if c_node is NULL: + _appendChild(self, element) + return + # prevent cycles + if _isAncestorOrSame(element._c_node, self._c_node): + raise ValueError("cannot append parent to itself") + c_source_doc = element._c_node.doc + c_next = element._c_node.next + tree.xmlAddPrevSibling(c_node, element._c_node) + _moveTail(c_next, element._c_node) + moveNodeToDocument(self._doc, c_source_doc, element._c_node) + + def remove(self, _Element element not None): + """remove(self, element) + + Removes a matching subelement. Unlike the find methods, this + method compares elements based on identity, not on tag value + or contents. + """ + cdef xmlNode* c_node + cdef xmlNode* c_next + _assertValidNode(self) + _assertValidNode(element) + c_node = element._c_node + if c_node.parent is not self._c_node: + raise ValueError, "Element is not a child of this node." + c_next = element._c_node.next + tree.xmlUnlinkNode(c_node) + _moveTail(c_next, c_node) + # fix namespace declarations + moveNodeToDocument(self._doc, c_node.doc, c_node) + + def replace(self, _Element old_element not None, + _Element new_element not None): + """replace(self, old_element, new_element) + + Replaces a subelement with the element passed as second argument. + """ + cdef xmlNode* c_old_node + cdef xmlNode* c_old_next + cdef xmlNode* c_new_node + cdef xmlNode* c_new_next + cdef xmlDoc* c_source_doc + _assertValidNode(self) + _assertValidNode(old_element) + _assertValidNode(new_element) + c_old_node = old_element._c_node + if c_old_node.parent is not self._c_node: + raise ValueError, "Element is not a child of this node." + c_new_node = new_element._c_node + # prevent cycles + if _isAncestorOrSame(c_new_node, self._c_node): + raise ValueError("cannot append parent to itself") + # replace node + c_old_next = c_old_node.next + c_new_next = c_new_node.next + c_source_doc = c_new_node.doc + tree.xmlReplaceNode(c_old_node, c_new_node) + _moveTail(c_new_next, c_new_node) + _moveTail(c_old_next, c_old_node) + moveNodeToDocument(self._doc, c_source_doc, c_new_node) + # fix namespace declarations + moveNodeToDocument(self._doc, c_old_node.doc, c_old_node) + + # PROPERTIES + property tag: + """Element tag + """ + def __get__(self): + if self._tag is not None: + return self._tag + _assertValidNode(self) + self._tag = _namespacedName(self._c_node) + return self._tag + + def __set__(self, value): + cdef _BaseParser parser + _assertValidNode(self) + ns, name = _getNsTag(value) + parser = self._doc._parser + if parser is not None and parser._for_html: + _htmlTagValidOrRaise(name) + else: + _tagValidOrRaise(name) + self._tag = value + tree.xmlNodeSetName(self._c_node, _xcstr(name)) + if ns is None: + self._c_node.ns = NULL + else: + self._doc._setNodeNs(self._c_node, _xcstr(ns)) + + @property + def attrib(self): + """Element attribute dictionary. Where possible, use get(), set(), + keys(), values() and items() to access element attributes. + """ + return _Attrib.__new__(_Attrib, self) + + property text: + """Text before the first subelement. This is either a string or + the value None, if there was no text. + """ + def __get__(self): + _assertValidNode(self) + return _collectText(self._c_node.children) + + def __set__(self, value): + _assertValidNode(self) + if isinstance(value, QName): + value = _resolveQNameText(self, value).decode('utf8') + _setNodeText(self._c_node, value) + + # using 'del el.text' is the wrong thing to do + #def __del__(self): + # _setNodeText(self._c_node, None) + + property tail: + """Text after this element's end tag, but before the next sibling + element's start tag. This is either a string or the value None, if + there was no text. + """ + def __get__(self): + _assertValidNode(self) + return _collectText(self._c_node.next) + + def __set__(self, value): + _assertValidNode(self) + _setTailText(self._c_node, value) + + # using 'del el.tail' is the wrong thing to do + #def __del__(self): + # _setTailText(self._c_node, None) + + # not in ElementTree, read-only + @property + def prefix(self): + """Namespace prefix or None. + """ + if self._c_node.ns is not NULL: + if self._c_node.ns.prefix is not NULL: + return funicode(self._c_node.ns.prefix) + return None + + # not in ElementTree, read-only + property sourceline: + """Original line number as found by the parser or None if unknown. + """ + def __get__(self): + cdef long line + _assertValidNode(self) + line = tree.xmlGetLineNo(self._c_node) + return line if line > 0 else None + + def __set__(self, line): + _assertValidNode(self) + if line <= 0: + self._c_node.line = 0 + else: + self._c_node.line = line + + # not in ElementTree, read-only + @property + def nsmap(self): + """Namespace prefix->URI mapping known in the context of this + Element. This includes all namespace declarations of the + parents. + + Note that changing the returned dict has no effect on the Element. + """ + _assertValidNode(self) + return _build_nsmap(self._c_node) + + # not in ElementTree, read-only + property base: + """The base URI of the Element (xml:base or HTML base URL). + None if the base URI is unknown. + + Note that the value depends on the URL of the document that + holds the Element if there is no xml:base attribute on the + Element or its ancestors. + + Setting this property will set an xml:base attribute on the + Element, regardless of the document type (XML or HTML). + """ + def __get__(self): + _assertValidNode(self) + c_base = tree.xmlNodeGetBase(self._doc._c_doc, self._c_node) + if c_base is NULL: + if self._doc._c_doc.URL is NULL: + return None + return _decodeFilename(self._doc._c_doc.URL) + try: + base = _decodeFilename(c_base) + finally: + tree.xmlFree(c_base) + return base + + def __set__(self, url): + _assertValidNode(self) + if url is None: + c_base = NULL + else: + url = _encodeFilename(url) + c_base = _xcstr(url) + tree.xmlNodeSetBase(self._c_node, c_base) + + # ACCESSORS + def __repr__(self): + "__repr__(self)" + return "" % (self.tag, id(self)) + + def __getitem__(self, x): + """Returns the subelement at the given position or the requested + slice. + """ + cdef xmlNode* c_node = NULL + cdef Py_ssize_t step = 0, slicelength = 0 + cdef Py_ssize_t c, i + cdef _node_to_node_function next_element + cdef list result + _assertValidNode(self) + if isinstance(x, slice): + # slicing + if _isFullSlice(x): + return _collectChildren(self) + _findChildSlice(x, self._c_node, &c_node, &step, &slicelength) + if c_node is NULL: + return [] + if step > 0: + next_element = _nextElement + else: + step = -step + next_element = _previousElement + result = [] + c = 0 + while c_node is not NULL and c < slicelength: + result.append(_elementFactory(self._doc, c_node)) + c += 1 + for i in range(step): + c_node = next_element(c_node) + if c_node is NULL: + break + return result + else: + # indexing + c_node = _findChild(self._c_node, x) + if c_node is NULL: + raise IndexError, "list index out of range" + return _elementFactory(self._doc, c_node) + + def __len__(self): + """__len__(self) + + Returns the number of subelements. + """ + _assertValidNode(self) + return _countElements(self._c_node.children) + + def __bool__(self): + """__bool__(self)""" + import warnings + warnings.warn( + "The behavior of this method will change in future versions. " + "Use specific 'len(elem)' or 'elem is not None' test instead.", + FutureWarning + ) + # emulate old behaviour + _assertValidNode(self) + return _hasChild(self._c_node) + + def __contains__(self, element): + "__contains__(self, element)" + cdef xmlNode* c_node + _assertValidNode(self) + if not isinstance(element, _Element): + return 0 + c_node = (<_Element>element)._c_node + return c_node is not NULL and c_node.parent is self._c_node + + def __iter__(self): + "__iter__(self)" + return ElementChildIterator(self) + + def __reversed__(self): + "__reversed__(self)" + return ElementChildIterator(self, reversed=True) + + def index(self, child: _Element, start: int = None, stop: int = None): + """index(self, child, start=None, stop=None) + + Find the position of the child within the parent. + + This method is not part of the original ElementTree API. + """ + cdef Py_ssize_t k, l + cdef Py_ssize_t c_start, c_stop + cdef xmlNode* c_child + cdef xmlNode* c_start_node + _assertValidNode(self) + _assertValidNode(child) + c_child = child._c_node + if c_child.parent is not self._c_node: + raise ValueError, "Element is not a child of this node." + + # handle the unbounded search straight away (normal case) + if stop is None and (start is None or start == 0): + k = 0 + c_child = c_child.prev + while c_child is not NULL: + if _isElement(c_child): + k += 1 + c_child = c_child.prev + return k + + # check indices + if start is None: + c_start = 0 + else: + c_start = start + if stop is None: + c_stop = 0 + else: + c_stop = stop + if c_stop == 0 or \ + c_start >= c_stop and (c_stop > 0 or c_start < 0): + raise ValueError, "list.index(x): x not in slice" + + # for negative slice indices, check slice before searching index + if c_start < 0 or c_stop < 0: + # start from right, at most up to leftmost(c_start, c_stop) + if c_start < c_stop: + k = -c_start + else: + k = -c_stop + c_start_node = self._c_node.last + l = 1 + while c_start_node != c_child and l < k: + if _isElement(c_start_node): + l += 1 + c_start_node = c_start_node.prev + if c_start_node == c_child: + # found! before slice end? + if c_stop < 0 and l <= -c_stop: + raise ValueError, "list.index(x): x not in slice" + elif c_start < 0: + raise ValueError, "list.index(x): x not in slice" + + # now determine the index backwards from child + c_child = c_child.prev + k = 0 + if c_stop > 0: + # we can optimize: stop after c_stop elements if not found + while c_child != NULL and k < c_stop: + if _isElement(c_child): + k += 1 + c_child = c_child.prev + if k < c_stop: + return k + else: + # traverse all + while c_child != NULL: + if _isElement(c_child): + k = k + 1 + c_child = c_child.prev + if c_start > 0: + if k >= c_start: + return k + else: + return k + if c_start != 0 or c_stop != 0: + raise ValueError, "list.index(x): x not in slice" + else: + raise ValueError, "list.index(x): x not in list" + + def get(self, key, default=None): + """get(self, key, default=None) + + Gets an element attribute. + """ + _assertValidNode(self) + return _getAttributeValue(self, key, default) + + def keys(self): + """keys(self) + + Gets a list of attribute names. The names are returned in an + arbitrary order (just like for an ordinary Python dictionary). + """ + _assertValidNode(self) + return _collectAttributes(self._c_node, 1) + + def values(self): + """values(self) + + Gets element attribute values as a sequence of strings. The + attributes are returned in an arbitrary order. + """ + _assertValidNode(self) + return _collectAttributes(self._c_node, 2) + + def items(self): + """items(self) + + Gets element attributes, as a sequence. The attributes are returned in + an arbitrary order. + """ + _assertValidNode(self) + return _collectAttributes(self._c_node, 3) + + def getchildren(self): + """getchildren(self) + + Returns all direct children. The elements are returned in document + order. + + :deprecated: Note that this method has been deprecated as of + ElementTree 1.3 and lxml 2.0. New code should use + ``list(element)`` or simply iterate over elements. + """ + _assertValidNode(self) + return _collectChildren(self) + + def getparent(self): + """getparent(self) + + Returns the parent of this element or None for the root element. + """ + cdef xmlNode* c_node + #_assertValidNode(self) # not needed + c_node = _parentElement(self._c_node) + if c_node is NULL: + return None + return _elementFactory(self._doc, c_node) + + def getnext(self): + """getnext(self) + + Returns the following sibling of this element or None. + """ + cdef xmlNode* c_node + #_assertValidNode(self) # not needed + c_node = _nextElement(self._c_node) + if c_node is NULL: + return None + return _elementFactory(self._doc, c_node) + + def getprevious(self): + """getprevious(self) + + Returns the preceding sibling of this element or None. + """ + cdef xmlNode* c_node + #_assertValidNode(self) # not needed + c_node = _previousElement(self._c_node) + if c_node is NULL: + return None + return _elementFactory(self._doc, c_node) + + def itersiblings(self, tag=None, *tags, preceding=False): + """itersiblings(self, tag=None, *tags, preceding=False) + + Iterate over the following or preceding siblings of this element. + + The direction is determined by the 'preceding' keyword which + defaults to False, i.e. forward iteration over the following + siblings. When True, the iterator yields the preceding + siblings in reverse document order, i.e. starting right before + the current element and going backwards. + + Can be restricted to find only elements with specific tags, + see `iter`. + """ + if preceding: + if self._c_node and not self._c_node.prev: + return ITER_EMPTY + elif self._c_node and not self._c_node.next: + return ITER_EMPTY + if tag is not None: + tags += (tag,) + return SiblingsIterator(self, tags, preceding=preceding) + + def iterancestors(self, tag=None, *tags): + """iterancestors(self, tag=None, *tags) + + Iterate over the ancestors of this element (from parent to parent). + + Can be restricted to find only elements with specific tags, + see `iter`. + """ + if self._c_node and not self._c_node.parent: + return ITER_EMPTY + if tag is not None: + tags += (tag,) + return AncestorsIterator(self, tags) + + def iterdescendants(self, tag=None, *tags): + """iterdescendants(self, tag=None, *tags) + + Iterate over the descendants of this element in document order. + + As opposed to ``el.iter()``, this iterator does not yield the element + itself. The returned elements can be restricted to find only elements + with specific tags, see `iter`. + """ + if self._c_node and not self._c_node.children: + return ITER_EMPTY + if tag is not None: + tags += (tag,) + return ElementDepthFirstIterator(self, tags, inclusive=False) + + def iterchildren(self, tag=None, *tags, reversed=False): + """iterchildren(self, tag=None, *tags, reversed=False) + + Iterate over the children of this element. + + As opposed to using normal iteration on this element, the returned + elements can be reversed with the 'reversed' keyword and restricted + to find only elements with specific tags, see `iter`. + """ + if self._c_node and not self._c_node.children: + return ITER_EMPTY + if tag is not None: + tags += (tag,) + return ElementChildIterator(self, tags, reversed=reversed) + + def getroottree(self): + """getroottree(self) + + Return an ElementTree for the root node of the document that + contains this element. + + This is the same as following element.getparent() up the tree until it + returns None (for the root element) and then build an ElementTree for + the last parent that was returned.""" + _assertValidDoc(self._doc) + return _elementTreeFactory(self._doc, None) + + def getiterator(self, tag=None, *tags): + """getiterator(self, tag=None, *tags) + + Returns a sequence or iterator of all elements in the subtree in + document order (depth first pre-order), starting with this + element. + + Can be restricted to find only elements with specific tags, + see `iter`. + + :deprecated: Note that this method is deprecated as of + ElementTree 1.3 and lxml 2.0. It returns an iterator in + lxml, which diverges from the original ElementTree + behaviour. If you want an efficient iterator, use the + ``element.iter()`` method instead. You should only use this + method in new code if you require backwards compatibility + with older versions of lxml or ElementTree. + """ + if tag is not None: + tags += (tag,) + return ElementDepthFirstIterator(self, tags) + + def iter(self, tag=None, *tags): + """iter(self, tag=None, *tags) + + Iterate over all elements in the subtree in document order (depth + first pre-order), starting with this element. + + Can be restricted to find only elements with specific tags: + pass ``"{ns}localname"`` as tag. Either or both of ``ns`` and + ``localname`` can be ``*`` for a wildcard; ``ns`` can be empty + for no namespace. ``"localname"`` is equivalent to ``"{}localname"`` + (i.e. no namespace) but ``"*"`` is ``"{*}*"`` (any or no namespace), + not ``"{}*"``. + + You can also pass the Element, Comment, ProcessingInstruction and + Entity factory functions to look only for the specific element type. + + Passing multiple tags (or a sequence of tags) instead of a single tag + will let the iterator return all elements matching any of these tags, + in document order. + """ + if tag is not None: + tags += (tag,) + return ElementDepthFirstIterator(self, tags) + + def itertext(self, tag=None, *tags, with_tail=True): + """itertext(self, tag=None, *tags, with_tail=True) + + Iterates over the text content of a subtree. + + You can pass tag names to restrict text content to specific elements, + see `iter`. + + You can set the ``with_tail`` keyword argument to ``False`` to skip + over tail text. + """ + if tag is not None: + tags += (tag,) + return ElementTextIterator(self, tags, with_tail=with_tail) + + def makeelement(self, _tag, attrib=None, nsmap=None, **_extra): + """makeelement(self, _tag, attrib=None, nsmap=None, **_extra) + + Creates a new element associated with the same document. + """ + _assertValidDoc(self._doc) + return _makeElement(_tag, NULL, self._doc, None, None, None, + attrib, nsmap, _extra) + + def find(self, path, namespaces=None): + """find(self, path, namespaces=None) + + Finds the first matching subelement, by tag name or path. + + The optional ``namespaces`` argument accepts a + prefix-to-namespace mapping that allows the usage of XPath + prefixes in the path expression. + """ + if isinstance(path, QName): + path = (path).text + return _elementpath.find(self, path, namespaces, with_prefixes=not _isHtmlDocument(self)) + + def findtext(self, path, default=None, namespaces=None): + """findtext(self, path, default=None, namespaces=None) + + Finds text for the first matching subelement, by tag name or path. + + The optional ``namespaces`` argument accepts a + prefix-to-namespace mapping that allows the usage of XPath + prefixes in the path expression. + """ + if isinstance(path, QName): + path = (path).text + return _elementpath.findtext(self, path, default, namespaces, with_prefixes=not _isHtmlDocument(self)) + + def findall(self, path, namespaces=None): + """findall(self, path, namespaces=None) + + Finds all matching subelements, by tag name or path. + + The optional ``namespaces`` argument accepts a + prefix-to-namespace mapping that allows the usage of XPath + prefixes in the path expression. + """ + if isinstance(path, QName): + path = (path).text + return _elementpath.findall(self, path, namespaces, with_prefixes=not _isHtmlDocument(self)) + + def iterfind(self, path, namespaces=None): + """iterfind(self, path, namespaces=None) + + Iterates over all matching subelements, by tag name or path. + + The optional ``namespaces`` argument accepts a + prefix-to-namespace mapping that allows the usage of XPath + prefixes in the path expression. + """ + if isinstance(path, QName): + path = (path).text + return _elementpath.iterfind(self, path, namespaces, with_prefixes=not _isHtmlDocument(self)) + + def xpath(self, _path, *, namespaces=None, extensions=None, + smart_strings=True, **_variables): + """xpath(self, _path, namespaces=None, extensions=None, smart_strings=True, **_variables) + + Evaluate an xpath expression using the element as context node. + """ + evaluator = XPathElementEvaluator(self, namespaces=namespaces, + extensions=extensions, + smart_strings=smart_strings) + return evaluator(_path, **_variables) + + def cssselect(self, expr, *, translator='xml'): + """ + Run the CSS expression on this element and its children, + returning a list of the results. + + Equivalent to lxml.cssselect.CSSSelect(expr)(self) -- note + that pre-compiling the expression can provide a substantial + speedup. + """ + # Do the import here to make the dependency optional. + from lxml.cssselect import CSSSelector + return CSSSelector(expr, translator=translator)(self) + + +cdef extern from "includes/etree_defs.h": + # macro call to 't->tp_new()' for fast instantiation + cdef object NEW_ELEMENT "PY_NEW" (object t) + + +@cython.linetrace(False) +cdef _Element _elementFactory(_Document doc, xmlNode* c_node): + cdef _Element result + result = getProxy(c_node) + if result is not None: + return result + if c_node is NULL: + return None + + element_class = LOOKUP_ELEMENT_CLASS( + ELEMENT_CLASS_LOOKUP_STATE, doc, c_node) + if hasProxy(c_node): + # prevent re-entry race condition - we just called into Python + return getProxy(c_node) + result = NEW_ELEMENT(element_class) + if hasProxy(c_node): + # prevent re-entry race condition - we just called into Python + result._c_node = NULL + return getProxy(c_node) + + _registerProxy(result, doc, c_node) + if element_class is not _Element: + result._init() + return result + + +@cython.internal +cdef class __ContentOnlyElement(_Element): + cdef int _raiseImmutable(self) except -1: + raise TypeError, "this element does not have children or attributes" + + def set(self, key, value): + "set(self, key, value)" + self._raiseImmutable() + + def append(self, value): + "append(self, value)" + self._raiseImmutable() + + def insert(self, index, value): + "insert(self, index, value)" + self._raiseImmutable() + + def __setitem__(self, index, value): + "__setitem__(self, index, value)" + self._raiseImmutable() + + @property + def attrib(self): + return IMMUTABLE_EMPTY_MAPPING + + property text: + def __get__(self): + _assertValidNode(self) + return funicodeOrEmpty(self._c_node.content) + + def __set__(self, value): + cdef tree.xmlDict* c_dict + _assertValidNode(self) + if value is None: + c_text = NULL + else: + value = _utf8(value) + c_text = _xcstr(value) + tree.xmlNodeSetContent(self._c_node, c_text) + + # ACCESSORS + def __getitem__(self, x): + "__getitem__(self, x)" + if isinstance(x, slice): + return [] + else: + raise IndexError, "list index out of range" + + def __len__(self): + "__len__(self)" + return 0 + + def get(self, key, default=None): + "get(self, key, default=None)" + return None + + def keys(self): + "keys(self)" + return [] + + def items(self): + "items(self)" + return [] + + def values(self): + "values(self)" + return [] + +cdef class _Comment(__ContentOnlyElement): + @property + def tag(self): + return Comment + + def __repr__(self): + return "" % self.text + +cdef class _ProcessingInstruction(__ContentOnlyElement): + @property + def tag(self): + return ProcessingInstruction + + property target: + # not in ElementTree + def __get__(self): + _assertValidNode(self) + return funicode(self._c_node.name) + + def __set__(self, value): + _assertValidNode(self) + value = _utf8(value) + c_text = _xcstr(value) + tree.xmlNodeSetName(self._c_node, c_text) + + def __repr__(self): + text = self.text + if text: + return "" % (self.target, text) + else: + return "" % self.target + + def get(self, key, default=None): + """get(self, key, default=None) + + Try to parse pseudo-attributes from the text content of the + processing instruction, search for one with the given key as + name and return its associated value. + + Note that this is only a convenience method for the most + common case that all text content is structured in + attribute-like name-value pairs with properly quoted values. + It is not guaranteed to work for all possible text content. + """ + return self.attrib.get(key, default) + + @property + def attrib(self): + """Returns a dict containing all pseudo-attributes that can be + parsed from the text content of this processing instruction. + Note that modifying the dict currently has no effect on the + XML node, although this is not guaranteed to stay this way. + """ + return { attr : (value1 or value2) + for attr, value1, value2 in _FIND_PI_ATTRIBUTES(' ' + self.text) } + +cdef object _FIND_PI_ATTRIBUTES = re.compile(r'\s+(\w+)\s*=\s*(?:\'([^\']*)\'|"([^"]*)")', re.U).findall + +cdef class _Entity(__ContentOnlyElement): + @property + def tag(self): + return Entity + + property name: + # not in ElementTree + def __get__(self): + _assertValidNode(self) + return funicode(self._c_node.name) + + def __set__(self, value): + _assertValidNode(self) + value_utf = _utf8(value) + if b'&' in value_utf or b';' in value_utf: + raise ValueError, f"Invalid entity name '{value}'" + tree.xmlNodeSetName(self._c_node, _xcstr(value_utf)) + + @property + def text(self): + # FIXME: should this be None or '&[VALUE];' or the resolved + # entity value ? + _assertValidNode(self) + return f'&{funicode(self._c_node.name)};' + + def __repr__(self): + return "&%s;" % self.name + + +cdef class QName: + """QName(text_or_uri_or_element, tag=None) + + QName wrapper for qualified XML names. + + Pass a tag name by itself or a namespace URI and a tag name to + create a qualified name. Alternatively, pass an Element to + extract its tag name. ``None`` as first argument is ignored in + order to allow for generic 2-argument usage. + + The ``text`` property holds the qualified name in + ``{namespace}tagname`` notation. The ``namespace`` and + ``localname`` properties hold the respective parts of the tag + name. + + You can pass QName objects wherever a tag name is expected. Also, + setting Element text from a QName will resolve the namespace prefix + on assignment and set a qualified text value. This is helpful in XML + languages like SOAP or XML-Schema that use prefixed tag names in + their text content. + """ + cdef readonly unicode text + cdef readonly unicode localname + cdef readonly unicode namespace + def __init__(self, text_or_uri_or_element, tag=None): + if text_or_uri_or_element is None: + # Allow None as no namespace. + text_or_uri_or_element, tag = tag, None + if not _isString(text_or_uri_or_element): + if isinstance(text_or_uri_or_element, _Element): + text_or_uri_or_element = (<_Element>text_or_uri_or_element).tag + if not _isString(text_or_uri_or_element): + raise ValueError, f"Invalid input tag of type {type(text_or_uri_or_element)!r}" + elif isinstance(text_or_uri_or_element, QName): + text_or_uri_or_element = (text_or_uri_or_element).text + elif text_or_uri_or_element is not None: + text_or_uri_or_element = unicode(text_or_uri_or_element) + else: + raise ValueError, f"Invalid input tag of type {type(text_or_uri_or_element)!r}" + + ns_utf, tag_utf = _getNsTag(text_or_uri_or_element) + if tag is not None: + # either ('ns', 'tag') or ('{ns}oldtag', 'newtag') + if ns_utf is None: + ns_utf = tag_utf # case 1: namespace ended up as tag name + tag_utf = _utf8(tag) + _tagValidOrRaise(tag_utf) + self.localname = (tag_utf).decode('utf8') + if ns_utf is None: + self.namespace = None + self.text = self.localname + else: + self.namespace = (ns_utf).decode('utf8') + self.text = "{%s}%s" % (self.namespace, self.localname) + def __str__(self): + return self.text + def __hash__(self): + return hash(self.text) + def __richcmp__(self, other, int op): + try: + if type(other) is QName: + other = (other).text + elif not isinstance(other, unicode): + other = unicode(other) + except (ValueError, UnicodeDecodeError): + return NotImplemented + return python.PyObject_RichCompare(self.text, other, op) + + +cdef public class _ElementTree [ type LxmlElementTreeType, + object LxmlElementTree ]: + cdef _Document _doc + cdef _Element _context_node + + # Note that _doc is only used to store the original document if we do not + # have a _context_node. All methods should prefer self._context_node._doc + # to honour tree restructuring. _doc can happily be None! + + @cython.final + cdef int _assertHasRoot(self) except -1: + """We have to take care here: the document may not have a root node! + This can happen if ElementTree() is called without any argument and + the caller 'forgets' to call parse() afterwards, so this is a bug in + the caller program. + """ + assert self._context_node is not None, \ + "ElementTree not initialized, missing root" + return 0 + + def parse(self, source, _BaseParser parser=None, *, base_url=None): + """parse(self, source, parser=None, base_url=None) + + Updates self with the content of source and returns its root. + """ + cdef _Document doc = None + try: + doc = _parseDocument(source, parser, base_url) + except _TargetParserResult as result_container: + # raises a TypeError if we don't get an _Element + self._context_node = result_container.result + else: + self._context_node = doc.getroot() + self._doc = None if self._context_node is not None else doc + return self._context_node + + def _setroot(self, _Element root not None): + """_setroot(self, root) + + Relocate the ElementTree to a new root node. + """ + _assertValidNode(root) + if root._c_node.type != tree.XML_ELEMENT_NODE: + raise TypeError, "Only elements can be the root of an ElementTree" + self._context_node = root + self._doc = None + + def getroot(self): + """getroot(self) + + Gets the root element for this tree. + """ + return self._context_node + + def __copy__(self): + return _elementTreeFactory(self._doc, self._context_node) + + def __deepcopy__(self, memo): + cdef _Element root + cdef _Document doc + cdef xmlDoc* c_doc + if self._context_node is not None: + root = self._context_node.__copy__() + assert root is not None + _assertValidNode(root) + _copyNonElementSiblings(self._context_node._c_node, root._c_node) + return _elementTreeFactory(None, root) + elif self._doc is not None: + _assertValidDoc(self._doc) + c_doc = tree.xmlCopyDoc(self._doc._c_doc, 1) + if c_doc is NULL: + raise MemoryError() + doc = _documentFactory(c_doc, self._doc._parser) + return _elementTreeFactory(doc, None) + else: + # so what ... + return self + + # not in ElementTree + @property + def docinfo(self) -> DocInfo: + """Information about the document provided by parser and DTD.""" + self._assertHasRoot() + return DocInfo(self._context_node._doc) + + # not in ElementTree, read-only + @property + def parser(self): + """The parser that was used to parse the document in this ElementTree. + """ + if self._context_node is not None and \ + self._context_node._doc is not None: + return self._context_node._doc._parser + if self._doc is not None: + return self._doc._parser + return None + + def write(self, file, *, encoding=None, method="xml", + bint pretty_print=False, xml_declaration=None, bint with_tail=True, + standalone=None, doctype=None, compression=0, + bint exclusive=False, inclusive_ns_prefixes=None, + bint with_comments=True, bint strip_text=False, + docstring=None): + """write(self, file, encoding=None, method="xml", + pretty_print=False, xml_declaration=None, with_tail=True, + standalone=None, doctype=None, compression=0, + exclusive=False, inclusive_ns_prefixes=None, + with_comments=True, strip_text=False) + + Write the tree to a filename, file or file-like object. + + Defaults to ASCII encoding and writing a declaration as needed. + + The keyword argument 'method' selects the output method: + 'xml', 'html', 'text', 'c14n' or 'c14n2'. Default is 'xml'. + + With ``method="c14n"`` (C14N version 1), the options ``exclusive``, + ``with_comments`` and ``inclusive_ns_prefixes`` request exclusive + C14N, include comments, and list the inclusive prefixes respectively. + + With ``method="c14n2"`` (C14N version 2), the ``with_comments`` and + ``strip_text`` options control the output of comments and text space + according to C14N 2.0. + + Passing a boolean value to the ``standalone`` option will + output an XML declaration with the corresponding + ``standalone`` flag. + + The ``doctype`` option allows passing in a plain string that will + be serialised before the XML tree. Note that passing in non + well-formed content here will make the XML output non well-formed. + Also, an existing doctype in the document tree will not be removed + when serialising an ElementTree instance. + + The ``compression`` option enables GZip compression level 1-9. + + The ``inclusive_ns_prefixes`` should be a list of namespace strings + (i.e. ['xs', 'xsi']) that will be promoted to the top-level element + during exclusive C14N serialisation. This parameter is ignored if + exclusive mode=False. + + If exclusive=True and no list is provided, a namespace will only be + rendered if it is used by the immediate parent or one of its attributes + and its prefix and values have not already been rendered by an ancestor + of the namespace node's parent element. + """ + cdef bint write_declaration + cdef int is_standalone + + self._assertHasRoot() + _assertValidNode(self._context_node) + if compression is None or compression < 0: + compression = 0 + + # C14N serialisation + if method in ('c14n', 'c14n2'): + if encoding is not None: + raise ValueError("Cannot specify encoding with C14N") + if xml_declaration: + raise ValueError("Cannot enable XML declaration in C14N") + + if method == 'c14n': + _tofilelikeC14N(file, self._context_node, exclusive, with_comments, + compression, inclusive_ns_prefixes) + else: # c14n2 + with _open_utf8_file(file, compression=compression) as f: + target = C14NWriterTarget( + f.write, with_comments=with_comments, strip_text=strip_text) + _tree_to_target(self, target) + return + + if not with_comments: + raise ValueError("Can only discard comments in C14N serialisation") + # suppress decl. in default case (purely for ElementTree compatibility) + if xml_declaration is not None: + write_declaration = xml_declaration + if encoding is None: + encoding = 'ASCII' + else: + encoding = encoding.upper() + elif encoding is None: + encoding = 'ASCII' + write_declaration = 0 + else: + encoding = encoding.upper() + write_declaration = encoding not in ( + 'US-ASCII', 'ASCII', 'UTF8', 'UTF-8') + if standalone is None: + is_standalone = -1 + elif standalone: + write_declaration = 1 + is_standalone = 1 + else: + write_declaration = 1 + is_standalone = 0 + + if docstring is not None and doctype is None: + import warnings + warnings.warn( + "The 'docstring' option is deprecated. Use 'doctype' instead.", + DeprecationWarning) + doctype = docstring + + _tofilelike(file, self._context_node, encoding, doctype, method, + write_declaration, 1, pretty_print, with_tail, + is_standalone, compression) + + def getpath(self, _Element element not None): + """getpath(self, element) + + Returns a structural, absolute XPath expression to find the element. + + For namespaced elements, the expression uses prefixes from the + document, which therefore need to be provided in order to make any + use of the expression in XPath. + + Also see the method getelementpath(self, element), which returns a + self-contained ElementPath expression. + """ + cdef _Document doc + cdef _Element root + cdef xmlDoc* c_doc + _assertValidNode(element) + if self._context_node is not None: + root = self._context_node + doc = root._doc + elif self._doc is not None: + doc = self._doc + root = doc.getroot() + else: + raise ValueError, "Element is not in this tree." + _assertValidDoc(doc) + _assertValidNode(root) + if element._doc is not doc: + raise ValueError, "Element is not in this tree." + + c_doc = _fakeRootDoc(doc._c_doc, root._c_node) + c_path = tree.xmlGetNodePath(element._c_node) + _destroyFakeDoc(doc._c_doc, c_doc) + if c_path is NULL: + raise MemoryError() + path = funicode(c_path) + tree.xmlFree(c_path) + return path + + def getelementpath(self, _Element element not None): + """getelementpath(self, element) + + Returns a structural, absolute ElementPath expression to find the + element. This path can be used in the .find() method to look up + the element, provided that the elements along the path and their + list of immediate children were not modified in between. + + ElementPath has the advantage over an XPath expression (as returned + by the .getpath() method) that it does not require additional prefix + declarations. It is always self-contained. + """ + cdef _Element root + cdef Py_ssize_t count + _assertValidNode(element) + if element._c_node.type != tree.XML_ELEMENT_NODE: + raise ValueError, "input is not an Element" + if self._context_node is not None: + root = self._context_node + elif self._doc is not None: + root = self._doc.getroot() + else: + raise ValueError, "Element is not in this tree" + _assertValidNode(root) + if element._doc is not root._doc: + raise ValueError, "Element is not in this tree" + + path = [] + c_element = element._c_node + while c_element is not root._c_node: + c_name = c_element.name + c_href = _getNs(c_element) + tag = _namespacedNameFromNsName(c_href, c_name) + if c_href is NULL: + c_href = b'' # no namespace (NULL is wildcard) + # use tag[N] if there are preceding siblings with the same tag + count = 0 + c_node = c_element.prev + while c_node is not NULL: + if c_node.type == tree.XML_ELEMENT_NODE: + if _tagMatches(c_node, c_href, c_name): + count += 1 + c_node = c_node.prev + if count: + tag = f'{tag}[{count+1}]' + else: + # use tag[1] if there are following siblings with the same tag + c_node = c_element.next + while c_node is not NULL: + if c_node.type == tree.XML_ELEMENT_NODE: + if _tagMatches(c_node, c_href, c_name): + tag += '[1]' + break + c_node = c_node.next + + path.append(tag) + c_element = c_element.parent + if c_element is NULL or c_element.type != tree.XML_ELEMENT_NODE: + raise ValueError, "Element is not in this tree." + if not path: + return '.' + path.reverse() + return '/'.join(path) + + def getiterator(self, tag=None, *tags): + """getiterator(self, *tags, tag=None) + + Returns a sequence or iterator of all elements in document order + (depth first pre-order), starting with the root element. + + Can be restricted to find only elements with specific tags, + see `_Element.iter`. + + :deprecated: Note that this method is deprecated as of + ElementTree 1.3 and lxml 2.0. It returns an iterator in + lxml, which diverges from the original ElementTree + behaviour. If you want an efficient iterator, use the + ``tree.iter()`` method instead. You should only use this + method in new code if you require backwards compatibility + with older versions of lxml or ElementTree. + """ + root = self.getroot() + if root is None: + return ITER_EMPTY + if tag is not None: + tags += (tag,) + return root.getiterator(*tags) + + def iter(self, tag=None, *tags): + """iter(self, tag=None, *tags) + + Creates an iterator for the root element. The iterator loops over + all elements in this tree, in document order. Note that siblings + of the root element (comments or processing instructions) are not + returned by the iterator. + + Can be restricted to find only elements with specific tags, + see `_Element.iter`. + """ + root = self.getroot() + if root is None: + return ITER_EMPTY + if tag is not None: + tags += (tag,) + return root.iter(*tags) + + def find(self, path, namespaces=None): + """find(self, path, namespaces=None) + + Finds the first toplevel element with given tag. Same as + ``tree.getroot().find(path)``. + + The optional ``namespaces`` argument accepts a + prefix-to-namespace mapping that allows the usage of XPath + prefixes in the path expression. + """ + self._assertHasRoot() + root = self.getroot() + if _isString(path): + if path[:1] == "/": + path = "." + path + from warnings import warn + warn( + "This search incorrectly ignores the root element, and will be " + "fixed in a future version. If you rely on the current " + f"behaviour, change it to {path!r}", + FutureWarning, stacklevel=1 + ) + return root.find(path, namespaces) + + def findtext(self, path, default=None, namespaces=None): + """findtext(self, path, default=None, namespaces=None) + + Finds the text for the first element matching the ElementPath + expression. Same as getroot().findtext(path) + + The optional ``namespaces`` argument accepts a + prefix-to-namespace mapping that allows the usage of XPath + prefixes in the path expression. + """ + self._assertHasRoot() + root = self.getroot() + if _isString(path): + if path[:1] == "/": + path = "." + path + from warnings import warn + warn( + "This search incorrectly ignores the root element, and will be " + "fixed in a future version. If you rely on the current " + f"behaviour, change it to {path!r}", + FutureWarning, stacklevel=1 + ) + return root.findtext(path, default, namespaces) + + def findall(self, path, namespaces=None): + """findall(self, path, namespaces=None) + + Finds all elements matching the ElementPath expression. Same as + getroot().findall(path). + + The optional ``namespaces`` argument accepts a + prefix-to-namespace mapping that allows the usage of XPath + prefixes in the path expression. + """ + self._assertHasRoot() + root = self.getroot() + if _isString(path): + if path[:1] == "/": + path = "." + path + from warnings import warn + warn( + "This search incorrectly ignores the root element, and will be " + "fixed in a future version. If you rely on the current " + f"behaviour, change it to {path!r}", + FutureWarning, stacklevel=1 + ) + return root.findall(path, namespaces) + + def iterfind(self, path, namespaces=None): + """iterfind(self, path, namespaces=None) + + Iterates over all elements matching the ElementPath expression. + Same as getroot().iterfind(path). + + The optional ``namespaces`` argument accepts a + prefix-to-namespace mapping that allows the usage of XPath + prefixes in the path expression. + """ + self._assertHasRoot() + root = self.getroot() + if _isString(path): + if path[:1] == "/": + path = "." + path + from warnings import warn + warn( + "This search incorrectly ignores the root element, and will be " + "fixed in a future version. If you rely on the current " + f"behaviour, change it to {path!r}", + FutureWarning, stacklevel=1 + ) + return root.iterfind(path, namespaces) + + def xpath(self, _path, *, namespaces=None, extensions=None, + smart_strings=True, **_variables): + """xpath(self, _path, namespaces=None, extensions=None, smart_strings=True, **_variables) + + XPath evaluate in context of document. + + ``namespaces`` is an optional dictionary with prefix to namespace URI + mappings, used by XPath. ``extensions`` defines additional extension + functions. + + Returns a list (nodeset), or bool, float or string. + + In case of a list result, return Element for element nodes, + string for text and attribute values. + + Note: if you are going to apply multiple XPath expressions + against the same document, it is more efficient to use + XPathEvaluator directly. + """ + self._assertHasRoot() + evaluator = XPathDocumentEvaluator(self, namespaces=namespaces, + extensions=extensions, + smart_strings=smart_strings) + return evaluator(_path, **_variables) + + def xslt(self, _xslt, extensions=None, access_control=None, **_kw): + """xslt(self, _xslt, extensions=None, access_control=None, **_kw) + + Transform this document using other document. + + xslt is a tree that should be XSLT + keyword parameters are XSLT transformation parameters. + + Returns the transformed tree. + + Note: if you are going to apply the same XSLT stylesheet against + multiple documents, it is more efficient to use the XSLT + class directly. + """ + self._assertHasRoot() + style = XSLT(_xslt, extensions=extensions, + access_control=access_control) + return style(self, **_kw) + + def relaxng(self, relaxng): + """relaxng(self, relaxng) + + Validate this document using other document. + + The relaxng argument is a tree that should contain a Relax NG schema. + + Returns True or False, depending on whether validation + succeeded. + + Note: if you are going to apply the same Relax NG schema against + multiple documents, it is more efficient to use the RelaxNG + class directly. + """ + self._assertHasRoot() + schema = RelaxNG(relaxng) + return schema.validate(self) + + def xmlschema(self, xmlschema): + """xmlschema(self, xmlschema) + + Validate this document using other document. + + The xmlschema argument is a tree that should contain an XML Schema. + + Returns True or False, depending on whether validation + succeeded. + + Note: If you are going to apply the same XML Schema against + multiple documents, it is more efficient to use the XMLSchema + class directly. + """ + self._assertHasRoot() + schema = XMLSchema(xmlschema) + return schema.validate(self) + + def xinclude(self): + """xinclude(self) + + Process the XInclude nodes in this document and include the + referenced XML fragments. + + There is support for loading files through the file system, HTTP and + FTP. + + Note that XInclude does not support custom resolvers in Python space + due to restrictions of libxml2 <= 2.6.29. + """ + self._assertHasRoot() + XInclude()(self._context_node) + + def write_c14n(self, file, *, bint exclusive=False, bint with_comments=True, + compression=0, inclusive_ns_prefixes=None): + """write_c14n(self, file, exclusive=False, with_comments=True, + compression=0, inclusive_ns_prefixes=None) + + C14N write of document. Always writes UTF-8. + + The ``compression`` option enables GZip compression level 1-9. + + The ``inclusive_ns_prefixes`` should be a list of namespace strings + (i.e. ['xs', 'xsi']) that will be promoted to the top-level element + during exclusive C14N serialisation. This parameter is ignored if + exclusive mode=False. + + If exclusive=True and no list is provided, a namespace will only be + rendered if it is used by the immediate parent or one of its attributes + and its prefix and values have not already been rendered by an ancestor + of the namespace node's parent element. + + NOTE: This method is deprecated as of lxml 4.4 and will be removed in a + future release. Use ``.write(f, method="c14n")`` instead. + """ + self._assertHasRoot() + _assertValidNode(self._context_node) + if compression is None or compression < 0: + compression = 0 + + _tofilelikeC14N(file, self._context_node, exclusive, with_comments, + compression, inclusive_ns_prefixes) + +cdef _ElementTree _elementTreeFactory(_Document doc, _Element context_node): + return _newElementTree(doc, context_node, _ElementTree) + +cdef _ElementTree _newElementTree(_Document doc, _Element context_node, + object baseclass): + cdef _ElementTree result + result = baseclass() + if context_node is None and doc is not None: + context_node = doc.getroot() + if context_node is None: + _assertValidDoc(doc) + result._doc = doc + else: + _assertValidNode(context_node) + result._context_node = context_node + return result + + +@cython.final +@cython.freelist(16) +cdef class _Attrib: + """A dict-like proxy for the ``Element.attrib`` property. + """ + cdef _Element _element + def __cinit__(self, _Element element not None): + _assertValidNode(element) + self._element = element + + # MANIPULATORS + def __setitem__(self, key, value): + _assertValidNode(self._element) + _setAttributeValue(self._element, key, value) + + def __delitem__(self, key): + _assertValidNode(self._element) + _delAttribute(self._element, key) + + def update(self, sequence_or_dict): + _assertValidNode(self._element) + if isinstance(sequence_or_dict, (dict, _Attrib)): + sequence_or_dict = sequence_or_dict.items() + for key, value in sequence_or_dict: + _setAttributeValue(self._element, key, value) + + def pop(self, key, *default): + if len(default) > 1: + raise TypeError, f"pop expected at most 2 arguments, got {len(default)+1}" + _assertValidNode(self._element) + result = _getAttributeValue(self._element, key, None) + if result is None: + if not default: + raise KeyError, key + result = default[0] + else: + _delAttribute(self._element, key) + return result + + def clear(self): + _assertValidNode(self._element) + c_attrs = self._element._c_node.properties + if c_attrs: + self._element._c_node.properties = NULL + tree.xmlFreePropList(c_attrs) + + # ACCESSORS + def __repr__(self): + _assertValidNode(self._element) + return repr(dict( _collectAttributes(self._element._c_node, 3) )) + + def __copy__(self): + _assertValidNode(self._element) + return dict(_collectAttributes(self._element._c_node, 3)) + + def __deepcopy__(self, memo): + _assertValidNode(self._element) + return dict(_collectAttributes(self._element._c_node, 3)) + + def __getitem__(self, key): + _assertValidNode(self._element) + result = _getAttributeValue(self._element, key, None) + if result is None: + raise KeyError, key + return result + + def __bool__(self): + _assertValidNode(self._element) + cdef xmlAttr* c_attr = self._element._c_node.properties + while c_attr is not NULL: + if c_attr.type == tree.XML_ATTRIBUTE_NODE: + return 1 + c_attr = c_attr.next + return 0 + + def __len__(self): + _assertValidNode(self._element) + cdef xmlAttr* c_attr = self._element._c_node.properties + cdef Py_ssize_t c = 0 + while c_attr is not NULL: + if c_attr.type == tree.XML_ATTRIBUTE_NODE: + c += 1 + c_attr = c_attr.next + return c + + def get(self, key, default=None): + _assertValidNode(self._element) + return _getAttributeValue(self._element, key, default) + + def keys(self): + _assertValidNode(self._element) + return _collectAttributes(self._element._c_node, 1) + + def __iter__(self): + _assertValidNode(self._element) + return iter(_collectAttributes(self._element._c_node, 1)) + + def iterkeys(self): + _assertValidNode(self._element) + return iter(_collectAttributes(self._element._c_node, 1)) + + def values(self): + _assertValidNode(self._element) + return _collectAttributes(self._element._c_node, 2) + + def itervalues(self): + _assertValidNode(self._element) + return iter(_collectAttributes(self._element._c_node, 2)) + + def items(self): + _assertValidNode(self._element) + return _collectAttributes(self._element._c_node, 3) + + def iteritems(self): + _assertValidNode(self._element) + return iter(_collectAttributes(self._element._c_node, 3)) + + def has_key(self, key): + _assertValidNode(self._element) + return key in self + + def __contains__(self, key): + _assertValidNode(self._element) + cdef xmlNode* c_node + ns, tag = _getNsTag(key) + c_node = self._element._c_node + c_href = NULL if ns is None else _xcstr(ns) + return 1 if tree.xmlHasNsProp(c_node, _xcstr(tag), c_href) else 0 + + def __richcmp__(self, other, int op): + try: + one = dict(self.items()) + if not isinstance(other, dict): + other = dict(other) + except (TypeError, ValueError): + return NotImplemented + return python.PyObject_RichCompare(one, other, op) + +MutableMapping.register(_Attrib) + + +@cython.final +@cython.internal +cdef class _AttribIterator: + """Attribute iterator - for internal use only! + """ + # XML attributes must not be removed while running! + cdef _Element _node + cdef xmlAttr* _c_attr + cdef int _keysvalues # 1 - keys, 2 - values, 3 - items (key, value) + def __iter__(self): + return self + + def __next__(self): + cdef xmlAttr* c_attr + if self._node is None: + raise StopIteration + c_attr = self._c_attr + while c_attr is not NULL and c_attr.type != tree.XML_ATTRIBUTE_NODE: + c_attr = c_attr.next + if c_attr is NULL: + self._node = None + raise StopIteration + + self._c_attr = c_attr.next + if self._keysvalues == 1: + return _namespacedName(c_attr) + elif self._keysvalues == 2: + return _attributeValue(self._node._c_node, c_attr) + else: + return (_namespacedName(c_attr), + _attributeValue(self._node._c_node, c_attr)) + +cdef object _attributeIteratorFactory(_Element element, int keysvalues): + cdef _AttribIterator attribs + if element._c_node.properties is NULL: + return ITER_EMPTY + attribs = _AttribIterator() + attribs._node = element + attribs._c_attr = element._c_node.properties + attribs._keysvalues = keysvalues + return attribs + + +cdef public class _ElementTagMatcher [ object LxmlElementTagMatcher, + type LxmlElementTagMatcherType ]: + """ + Dead but public. :) + """ + cdef object _pystrings + cdef int _node_type + cdef char* _href + cdef char* _name + cdef _initTagMatch(self, tag): + self._href = NULL + self._name = NULL + if tag is None: + self._node_type = 0 + elif tag is Comment: + self._node_type = tree.XML_COMMENT_NODE + elif tag is ProcessingInstruction: + self._node_type = tree.XML_PI_NODE + elif tag is Entity: + self._node_type = tree.XML_ENTITY_REF_NODE + elif tag is Element: + self._node_type = tree.XML_ELEMENT_NODE + else: + self._node_type = tree.XML_ELEMENT_NODE + self._pystrings = _getNsTag(tag) + if self._pystrings[0] is not None: + self._href = _cstr(self._pystrings[0]) + self._name = _cstr(self._pystrings[1]) + if self._name[0] == c'*' and self._name[1] == c'\0': + self._name = NULL + +cdef public class _ElementIterator(_ElementTagMatcher) [ + object LxmlElementIterator, type LxmlElementIteratorType ]: + """ + Dead but public. :) + """ + # we keep Python references here to control GC + cdef _Element _node + cdef _node_to_node_function _next_element + def __iter__(self): + return self + + cdef void _storeNext(self, _Element node): + cdef xmlNode* c_node + c_node = self._next_element(node._c_node) + while c_node is not NULL and \ + self._node_type != 0 and \ + (self._node_type != c_node.type or + not _tagMatches(c_node, self._href, self._name)): + c_node = self._next_element(c_node) + if c_node is NULL: + self._node = None + else: + # Python ref: + self._node = _elementFactory(node._doc, c_node) + + def __next__(self): + cdef xmlNode* c_node + cdef _Element current_node + if self._node is None: + raise StopIteration + # Python ref: + current_node = self._node + self._storeNext(current_node) + return current_node + +@cython.final +@cython.internal +cdef class _MultiTagMatcher: + """ + Match an xmlNode against a list of tags. + """ + cdef list _py_tags + cdef qname* _cached_tags + cdef size_t _tag_count + cdef size_t _cached_size + cdef _Document _cached_doc + cdef int _node_types + + def __cinit__(self, tags): + self._py_tags = [] + self.initTagMatch(tags) + + def __dealloc__(self): + self._clear() + + cdef bint rejectsAll(self) noexcept: + return not self._tag_count and not self._node_types + + cdef bint rejectsAllAttributes(self) noexcept: + return not self._tag_count + + cdef bint matchesType(self, int node_type) noexcept: + if node_type == tree.XML_ELEMENT_NODE and self._tag_count: + return True + return self._node_types & (1 << node_type) + + cdef void _clear(self) noexcept: + cdef size_t i, count + count = self._tag_count + self._tag_count = 0 + if self._cached_tags: + for i in range(count): + cpython.ref.Py_XDECREF(self._cached_tags[i].href) + python.lxml_free(self._cached_tags) + self._cached_tags = NULL + + cdef initTagMatch(self, tags): + self._cached_doc = None + del self._py_tags[:] + self._clear() + if tags is None or tags == (): + # no selection in tags argument => match anything + self._node_types = ( + 1 << tree.XML_COMMENT_NODE | + 1 << tree.XML_PI_NODE | + 1 << tree.XML_ENTITY_REF_NODE | + 1 << tree.XML_ELEMENT_NODE) + else: + self._node_types = 0 + self._storeTags(tags, set()) + + cdef _storeTags(self, tag, set seen): + if tag is Comment: + self._node_types |= 1 << tree.XML_COMMENT_NODE + elif tag is ProcessingInstruction: + self._node_types |= 1 << tree.XML_PI_NODE + elif tag is Entity: + self._node_types |= 1 << tree.XML_ENTITY_REF_NODE + elif tag is Element: + self._node_types |= 1 << tree.XML_ELEMENT_NODE + elif python._isString(tag): + if tag in seen: + return + seen.add(tag) + if tag in ('*', '{*}*'): + self._node_types |= 1 << tree.XML_ELEMENT_NODE + else: + href, name = _getNsTag(tag) + if name == b'*': + name = None + if href is None: + href = b'' # no namespace + elif href == b'*': + href = None # wildcard: any namespace, including none + self._py_tags.append((href, name)) + elif isinstance(tag, QName): + self._storeTags(tag.text, seen) + else: + # support a sequence of tags + for item in tag: + self._storeTags(item, seen) + + cdef inline int cacheTags(self, _Document doc, bint force_into_dict=False) except -1: + """ + Look up the tag names in the doc dict to enable string pointer comparisons. + """ + cdef size_t dict_size = tree.xmlDictSize(doc._c_doc.dict) + if doc is self._cached_doc and dict_size == self._cached_size: + # doc and dict didn't change => names already cached + return 0 + self._tag_count = 0 + if not self._py_tags: + self._cached_doc = doc + self._cached_size = dict_size + return 0 + if not self._cached_tags: + self._cached_tags = python.lxml_malloc(len(self._py_tags), sizeof(qname)) + if not self._cached_tags: + self._cached_doc = None + raise MemoryError() + self._tag_count = _mapTagsToQnameMatchArray( + doc._c_doc, self._py_tags, self._cached_tags, force_into_dict) + self._cached_doc = doc + self._cached_size = dict_size + return 0 + + cdef inline bint matches(self, xmlNode* c_node) noexcept: + cdef qname* c_qname + if self._node_types & (1 << c_node.type): + return True + elif c_node.type == tree.XML_ELEMENT_NODE: + for c_qname in self._cached_tags[:self._tag_count]: + if _tagMatchesExactly(c_node, c_qname): + return True + return False + + cdef inline bint matchesNsTag(self, const_xmlChar* c_href, + const_xmlChar* c_name) noexcept: + cdef qname* c_qname + if self._node_types & (1 << tree.XML_ELEMENT_NODE): + return True + for c_qname in self._cached_tags[:self._tag_count]: + if _nsTagMatchesExactly(c_href, c_name, c_qname): + return True + return False + + cdef inline bint matchesAttribute(self, xmlAttr* c_attr) noexcept: + """Attribute matches differ from Element matches in that they do + not care about node types. + """ + cdef qname* c_qname + for c_qname in self._cached_tags[:self._tag_count]: + if _tagMatchesExactly(c_attr, c_qname): + return True + return False + +cdef class _ElementMatchIterator: + cdef _Element _node + cdef _node_to_node_function _next_element + cdef _MultiTagMatcher _matcher + + @cython.final + cdef _initTagMatcher(self, tags): + self._matcher = _MultiTagMatcher.__new__(_MultiTagMatcher, tags) + + def __iter__(self): + return self + + @cython.final + cdef int _storeNext(self, _Element node) except -1: + self._matcher.cacheTags(node._doc) + c_node = self._next_element(node._c_node) + while c_node is not NULL and not self._matcher.matches(c_node): + c_node = self._next_element(c_node) + # store Python ref to next node to make sure it's kept alive + self._node = _elementFactory(node._doc, c_node) if c_node is not NULL else None + return 0 + + def __next__(self): + cdef _Element current_node = self._node + if current_node is None: + raise StopIteration + self._storeNext(current_node) + return current_node + +cdef class ElementChildIterator(_ElementMatchIterator): + """ElementChildIterator(self, node, tag=None, reversed=False) + Iterates over the children of an element. + """ + def __cinit__(self, _Element node not None, tag=None, *, bint reversed=False): + cdef xmlNode* c_node + _assertValidNode(node) + self._initTagMatcher(tag) + if reversed: + c_node = _findChildBackwards(node._c_node, 0) + self._next_element = _previousElement + else: + c_node = _findChildForwards(node._c_node, 0) + self._next_element = _nextElement + self._matcher.cacheTags(node._doc) + while c_node is not NULL and not self._matcher.matches(c_node): + c_node = self._next_element(c_node) + # store Python ref to next node to make sure it's kept alive + self._node = _elementFactory(node._doc, c_node) if c_node is not NULL else None + +cdef class SiblingsIterator(_ElementMatchIterator): + """SiblingsIterator(self, node, tag=None, preceding=False) + Iterates over the siblings of an element. + + You can pass the boolean keyword ``preceding`` to specify the direction. + """ + def __cinit__(self, _Element node not None, tag=None, *, bint preceding=False): + _assertValidNode(node) + self._initTagMatcher(tag) + if preceding: + self._next_element = _previousElement + else: + self._next_element = _nextElement + self._storeNext(node) + +cdef class AncestorsIterator(_ElementMatchIterator): + """AncestorsIterator(self, node, tag=None) + Iterates over the ancestors of an element (from parent to parent). + """ + def __cinit__(self, _Element node not None, tag=None): + _assertValidNode(node) + self._initTagMatcher(tag) + self._next_element = _parentElement + self._storeNext(node) + +cdef class ElementDepthFirstIterator: + """ElementDepthFirstIterator(self, node, tag=None, inclusive=True) + Iterates over an element and its sub-elements in document order (depth + first pre-order). + + Note that this also includes comments, entities and processing + instructions. To filter them out, check if the ``tag`` property + of the returned element is a string (i.e. not None and not a + factory function), or pass the ``Element`` factory for the ``tag`` + argument to receive only Elements. + + If the optional ``tag`` argument is not None, the iterator returns only + the elements that match the respective name and namespace. + + The optional boolean argument 'inclusive' defaults to True and can be set + to False to exclude the start element itself. + + Note that the behaviour of this iterator is completely undefined if the + tree it traverses is modified during iteration. + """ + # we keep Python references here to control GC + # keep the next Element after the one we return, and the (s)top node + cdef _Element _next_node + cdef _Element _top_node + cdef _MultiTagMatcher _matcher + def __cinit__(self, _Element node not None, tag=None, *, bint inclusive=True): + _assertValidNode(node) + self._top_node = node + self._next_node = node + self._matcher = _MultiTagMatcher.__new__(_MultiTagMatcher, tag) + self._matcher.cacheTags(node._doc) + if not inclusive or not self._matcher.matches(node._c_node): + # find start node (this cannot raise StopIteration, self._next_node != None) + next(self) + + def __iter__(self): + return self + + def __next__(self): + cdef xmlNode* c_node + cdef _Element current_node = self._next_node + if current_node is None: + raise StopIteration + c_node = current_node._c_node + self._matcher.cacheTags(current_node._doc) + if not self._matcher._tag_count: + # no tag name was found in the dict => not in document either + # try to match by node type + c_node = self._nextNodeAnyTag(c_node) + else: + c_node = self._nextNodeMatchTag(c_node) + if c_node is NULL: + self._next_node = None + else: + self._next_node = _elementFactory(current_node._doc, c_node) + return current_node + + @cython.final + cdef xmlNode* _nextNodeAnyTag(self, xmlNode* c_node) noexcept: + cdef int node_types = self._matcher._node_types + if not node_types: + return NULL + tree.BEGIN_FOR_EACH_ELEMENT_FROM(self._top_node._c_node, c_node, 0) + if node_types & (1 << c_node.type): + return c_node + tree.END_FOR_EACH_ELEMENT_FROM(c_node) + return NULL + + @cython.final + cdef xmlNode* _nextNodeMatchTag(self, xmlNode* c_node) noexcept: + tree.BEGIN_FOR_EACH_ELEMENT_FROM(self._top_node._c_node, c_node, 0) + if self._matcher.matches(c_node): + return c_node + tree.END_FOR_EACH_ELEMENT_FROM(c_node) + return NULL + + +cdef class ElementTextIterator: + """ElementTextIterator(self, element, tag=None, with_tail=True) + Iterates over the text content of a subtree. + + You can pass the ``tag`` keyword argument to restrict text content to a + specific tag name. + + You can set the ``with_tail`` keyword argument to ``False`` to skip over + tail text (e.g. if you know that it's only whitespace from pretty-printing). + """ + cdef object _events + cdef _Element _start_element + def __cinit__(self, _Element element not None, tag=None, *, bint with_tail=True): + _assertValidNode(element) + if with_tail: + events = ("start", "comment", "pi", "end") + else: + events = ("start",) + self._start_element = element + self._events = iterwalk(element, events=events, tag=tag) + + def __iter__(self): + return self + + def __next__(self): + cdef _Element element + result = None + while result is None: + event, element = next(self._events) # raises StopIteration + if event == "start": + result = element.text + elif element is not self._start_element: + result = element.tail + return result + + +cdef xmlNode* _createElement(xmlDoc* c_doc, object name_utf) except NULL: + cdef xmlNode* c_node + c_node = tree.xmlNewDocNode(c_doc, NULL, _xcstr(name_utf), NULL) + return c_node + +cdef xmlNode* _createComment(xmlDoc* c_doc, const_xmlChar* text) noexcept: + cdef xmlNode* c_node + c_node = tree.xmlNewDocComment(c_doc, text) + return c_node + +cdef xmlNode* _createPI(xmlDoc* c_doc, const_xmlChar* target, const_xmlChar* text) noexcept: + cdef xmlNode* c_node + c_node = tree.xmlNewDocPI(c_doc, target, text) + return c_node + +cdef xmlNode* _createEntity(xmlDoc* c_doc, const_xmlChar* name) noexcept: + cdef xmlNode* c_node + c_node = tree.xmlNewReference(c_doc, name) + return c_node + +# module-level API for ElementTree + +def Element(_tag, attrib=None, nsmap=None, **_extra): + """Element(_tag, attrib=None, nsmap=None, **_extra) + + Element factory. This function returns an object implementing the + Element interface. + + Also look at the `_Element.makeelement()` and + `_BaseParser.makeelement()` methods, which provide a faster way to + create an Element within a specific document or parser context. + """ + return _makeElement(_tag, NULL, None, None, None, None, + attrib, nsmap, _extra) + + +def Comment(text=None): + """Comment(text=None) + + Comment element factory. This factory function creates a special element that will + be serialized as an XML comment. + """ + cdef _Document doc + cdef xmlNode* c_node + cdef xmlDoc* c_doc + + if text is None: + text = b'' + else: + text = _utf8(text) + if b'--' in text or text.endswith(b'-'): + raise ValueError("Comment may not contain '--' or end with '-'") + + c_doc = _newXMLDoc() + doc = _documentFactory(c_doc, None) + c_node = _createComment(c_doc, _xcstr(text)) + tree.xmlAddChild(c_doc, c_node) + return _elementFactory(doc, c_node) + + +def ProcessingInstruction(target, text=None): + """ProcessingInstruction(target, text=None) + + ProcessingInstruction element factory. This factory function creates a + special element that will be serialized as an XML processing instruction. + """ + cdef _Document doc + cdef xmlNode* c_node + cdef xmlDoc* c_doc + + target = _utf8(target) + _tagValidOrRaise(target) + if target.lower() == b'xml': + raise ValueError, f"Invalid PI name '{target}'" + + if text is None: + text = b'' + else: + text = _utf8(text) + if b'?>' in text: + raise ValueError, "PI text must not contain '?>'" + + c_doc = _newXMLDoc() + doc = _documentFactory(c_doc, None) + c_node = _createPI(c_doc, _xcstr(target), _xcstr(text)) + tree.xmlAddChild(c_doc, c_node) + return _elementFactory(doc, c_node) + +PI = ProcessingInstruction + + +cdef class CDATA: + """CDATA(data) + + CDATA factory. This factory creates an opaque data object that + can be used to set Element text. The usual way to use it is:: + + >>> el = Element('content') + >>> el.text = CDATA('a string') + + >>> print(el.text) + a string + >>> print(tostring(el, encoding="unicode")) + + """ + cdef bytes _utf8_data + def __cinit__(self, data): + _utf8_data = _utf8(data) + if b']]>' in _utf8_data: + raise ValueError, "']]>' not allowed inside CDATA" + self._utf8_data = _utf8_data + + +def Entity(name): + """Entity(name) + + Entity factory. This factory function creates a special element + that will be serialized as an XML entity reference or character + reference. Note, however, that entities will not be automatically + declared in the document. A document that uses entity references + requires a DTD to define the entities. + """ + cdef _Document doc + cdef xmlNode* c_node + cdef xmlDoc* c_doc + name_utf = _utf8(name) + c_name = _xcstr(name_utf) + if c_name[0] == c'#': + if not _characterReferenceIsValid(c_name + 1): + raise ValueError, f"Invalid character reference: '{name}'" + elif not _xmlNameIsValid(c_name): + raise ValueError, f"Invalid entity reference: '{name}'" + c_doc = _newXMLDoc() + doc = _documentFactory(c_doc, None) + c_node = _createEntity(c_doc, c_name) + tree.xmlAddChild(c_doc, c_node) + return _elementFactory(doc, c_node) + + +def SubElement(_Element _parent not None, _tag, + attrib=None, nsmap=None, **_extra): + """SubElement(_parent, _tag, attrib=None, nsmap=None, **_extra) + + Subelement factory. This function creates an element instance, and + appends it to an existing element. + """ + return _makeSubElement(_parent, _tag, None, None, attrib, nsmap, _extra) + + +def ElementTree(_Element element=None, *, file=None, _BaseParser parser=None): + """ElementTree(element=None, file=None, parser=None) + + ElementTree wrapper class. + """ + cdef xmlNode* c_next + cdef xmlNode* c_node + cdef xmlNode* c_node_copy + cdef xmlDoc* c_doc + cdef _ElementTree etree + cdef _Document doc + + if element is not None: + doc = element._doc + elif file is not None: + try: + doc = _parseDocument(file, parser, None) + except _TargetParserResult as result_container: + return result_container.result + else: + c_doc = _newXMLDoc() + doc = _documentFactory(c_doc, parser) + + return _elementTreeFactory(doc, element) + + +def HTML(text, _BaseParser parser=None, *, base_url=None): + """HTML(text, parser=None, base_url=None) + + Parses an HTML document from a string constant. Returns the root + node (or the result returned by a parser target). This function + can be used to embed "HTML literals" in Python code. + + To override the parser with a different ``HTMLParser`` you can pass it to + the ``parser`` keyword argument. + + The ``base_url`` keyword argument allows to set the original base URL of + the document to support relative Paths when looking up external entities + (DTD, XInclude, ...). + """ + cdef _Document doc + if parser is None: + parser = __GLOBAL_PARSER_CONTEXT.getDefaultParser() + if not isinstance(parser, HTMLParser): + parser = __DEFAULT_HTML_PARSER + try: + doc = _parseMemoryDocument(text, base_url, parser) + return doc.getroot() + except _TargetParserResult as result_container: + return result_container.result + + +def XML(text, _BaseParser parser=None, *, base_url=None): + """XML(text, parser=None, base_url=None) + + Parses an XML document or fragment from a string constant. + Returns the root node (or the result returned by a parser target). + This function can be used to embed "XML literals" in Python code, + like in + + >>> root = XML("") + >>> print(root.tag) + root + + To override the parser with a different ``XMLParser`` you can pass it to + the ``parser`` keyword argument. + + The ``base_url`` keyword argument allows to set the original base URL of + the document to support relative Paths when looking up external entities + (DTD, XInclude, ...). + """ + cdef _Document doc + if parser is None: + parser = __GLOBAL_PARSER_CONTEXT.getDefaultParser() + if not isinstance(parser, XMLParser): + parser = __DEFAULT_XML_PARSER + try: + doc = _parseMemoryDocument(text, base_url, parser) + return doc.getroot() + except _TargetParserResult as result_container: + return result_container.result + + +def fromstring(text, _BaseParser parser=None, *, base_url=None): + """fromstring(text, parser=None, base_url=None) + + Parses an XML document or fragment from a string. Returns the + root node (or the result returned by a parser target). + + To override the default parser with a different parser you can pass it to + the ``parser`` keyword argument. + + The ``base_url`` keyword argument allows to set the original base URL of + the document to support relative Paths when looking up external entities + (DTD, XInclude, ...). + """ + cdef _Document doc + try: + doc = _parseMemoryDocument(text, base_url, parser) + return doc.getroot() + except _TargetParserResult as result_container: + return result_container.result + + +def fromstringlist(strings, _BaseParser parser=None): + """fromstringlist(strings, parser=None) + + Parses an XML document from a sequence of strings. Returns the + root node (or the result returned by a parser target). + + To override the default parser with a different parser you can pass it to + the ``parser`` keyword argument. + """ + cdef _Document doc + if isinstance(strings, (bytes, unicode)): + raise ValueError("passing a single string into fromstringlist() is not" + " efficient, use fromstring() instead") + if parser is None: + parser = __GLOBAL_PARSER_CONTEXT.getDefaultParser() + feed = parser.feed + for data in strings: + feed(data) + return parser.close() + + +def iselement(element): + """iselement(element) + + Checks if an object appears to be a valid element object. + """ + return isinstance(element, _Element) and (<_Element>element)._c_node is not NULL + + +def indent(tree, space=" ", *, Py_ssize_t level=0): + """indent(tree, space=" ", level=0) + + Indent an XML document by inserting newlines and indentation space + after elements. + + *tree* is the ElementTree or Element to modify. The (root) element + itself will not be changed, but the tail text of all elements in its + subtree will be adapted. + + *space* is the whitespace to insert for each indentation level, two + space characters by default. + + *level* is the initial indentation level. Setting this to a higher + value than 0 can be used for indenting subtrees that are more deeply + nested inside of a document. + """ + root = _rootNodeOrRaise(tree) + if level < 0: + raise ValueError(f"Initial indentation level must be >= 0, got {level}") + if _hasChild(root._c_node): + space = _utf8(space) + indent = b"\n" + level * space + _indent_children(root._c_node, 1, space, [indent, indent + space]) + + +cdef int _indent_children(xmlNode* c_node, Py_ssize_t level, bytes one_space, list indentations) except -1: + # Reuse indentation strings for speed. + if len(indentations) <= level: + indentations.append(indentations[-1] + one_space) + + # Start a new indentation level for the first child. + child_indentation = indentations[level] + if not _hasNonWhitespaceText(c_node): + _setNodeText(c_node, child_indentation) + + # Recursively indent all children. + cdef xmlNode* c_child = _findChildForwards(c_node, 0) + while c_child is not NULL: + if _hasChild(c_child): + _indent_children(c_child, level+1, one_space, indentations) + c_next_child = _nextElement(c_child) + if not _hasNonWhitespaceTail(c_child): + if c_next_child is NULL: + # Dedent after the last child. + child_indentation = indentations[level-1] + _setTailText(c_child, child_indentation) + c_child = c_next_child + return 0 + + +def dump(_Element elem not None, *, bint pretty_print=True, bint with_tail=True): + """dump(elem, pretty_print=True, with_tail=True) + + Writes an element tree or element structure to sys.stdout. This function + should be used for debugging only. + """ + xml = tostring(elem, pretty_print=pretty_print, with_tail=with_tail, encoding='unicode') + if not pretty_print: + xml += '\n' + sys.stdout.write(xml) + + +def tostring(element_or_tree, *, encoding=None, method="xml", + xml_declaration=None, bint pretty_print=False, bint with_tail=True, + standalone=None, doctype=None, + # method='c14n' + bint exclusive=False, inclusive_ns_prefixes=None, + # method='c14n2' + bint with_comments=True, bint strip_text=False, + ): + """tostring(element_or_tree, encoding=None, method="xml", + xml_declaration=None, pretty_print=False, with_tail=True, + standalone=None, doctype=None, + exclusive=False, inclusive_ns_prefixes=None, + with_comments=True, strip_text=False, + ) + + Serialize an element to an encoded string representation of its XML + tree. + + Defaults to ASCII encoding without XML declaration. This + behaviour can be configured with the keyword arguments 'encoding' + (string) and 'xml_declaration' (bool). Note that changing the + encoding to a non UTF-8 compatible encoding will enable a + declaration by default. + + You can also serialise to a Unicode string without declaration by + passing the name ``'unicode'`` as encoding (or the ``str`` function + in Py3 or ``unicode`` in Py2). This changes the return value from + a byte string to an unencoded unicode string. + + The keyword argument 'pretty_print' (bool) enables formatted XML. + + The keyword argument 'method' selects the output method: 'xml', + 'html', plain 'text' (text content without tags), 'c14n' or 'c14n2'. + Default is 'xml'. + + With ``method="c14n"`` (C14N version 1), the options ``exclusive``, + ``with_comments`` and ``inclusive_ns_prefixes`` request exclusive + C14N, include comments, and list the inclusive prefixes respectively. + + With ``method="c14n2"`` (C14N version 2), the ``with_comments`` and + ``strip_text`` options control the output of comments and text space + according to C14N 2.0. + + Passing a boolean value to the ``standalone`` option will output + an XML declaration with the corresponding ``standalone`` flag. + + The ``doctype`` option allows passing in a plain string that will + be serialised before the XML tree. Note that passing in non + well-formed content here will make the XML output non well-formed. + Also, an existing doctype in the document tree will not be removed + when serialising an ElementTree instance. + + You can prevent the tail text of the element from being serialised + by passing the boolean ``with_tail`` option. This has no impact + on the tail text of children, which will always be serialised. + """ + cdef bint write_declaration + cdef int is_standalone + # C14N serialisation + if method in ('c14n', 'c14n2'): + if encoding is not None: + raise ValueError("Cannot specify encoding with C14N") + if xml_declaration: + raise ValueError("Cannot enable XML declaration in C14N") + if method == 'c14n': + return _tostringC14N(element_or_tree, exclusive, with_comments, inclusive_ns_prefixes) + else: + out = BytesIO() + target = C14NWriterTarget( + utf8_writer(out).write, + with_comments=with_comments, strip_text=strip_text) + _tree_to_target(element_or_tree, target) + return out.getvalue() + if not with_comments: + raise ValueError("Can only discard comments in C14N serialisation") + if strip_text: + raise ValueError("Can only strip text in C14N 2.0 serialisation") + if encoding is unicode or (encoding is not None and encoding.lower() == 'unicode'): + if xml_declaration: + raise ValueError, \ + "Serialisation to unicode must not request an XML declaration" + write_declaration = 0 + encoding = unicode + elif xml_declaration is None: + # by default, write an XML declaration only for non-standard encodings + write_declaration = encoding is not None and encoding.upper() not in \ + ('ASCII', 'UTF-8', 'UTF8', 'US-ASCII') + else: + write_declaration = xml_declaration + if encoding is None: + encoding = 'ASCII' + if standalone is None: + is_standalone = -1 + elif standalone: + write_declaration = 1 + is_standalone = 1 + else: + write_declaration = 1 + is_standalone = 0 + + if isinstance(element_or_tree, _Element): + return _tostring(<_Element>element_or_tree, encoding, doctype, method, + write_declaration, 0, pretty_print, with_tail, + is_standalone) + elif isinstance(element_or_tree, _ElementTree): + return _tostring((<_ElementTree>element_or_tree)._context_node, + encoding, doctype, method, write_declaration, 1, + pretty_print, with_tail, is_standalone) + else: + raise TypeError, f"Type '{python._fqtypename(element_or_tree).decode('utf8')}' cannot be serialized." + + + +def tostringlist(element_or_tree, *args, **kwargs): + """tostringlist(element_or_tree, *args, **kwargs) + + Serialize an element to an encoded string representation of its XML + tree, stored in a list of partial strings. + + This is purely for ElementTree 1.3 compatibility. The result is a + single string wrapped in a list. + """ + return [tostring(element_or_tree, *args, **kwargs)] + + +def tounicode(element_or_tree, *, method="xml", bint pretty_print=False, + bint with_tail=True, doctype=None): + """tounicode(element_or_tree, method="xml", pretty_print=False, + with_tail=True, doctype=None) + + Serialize an element to the Python unicode representation of its XML + tree. + + :deprecated: use ``tostring(el, encoding='unicode')`` instead. + + Note that the result does not carry an XML encoding declaration and is + therefore not necessarily suited for serialization to byte streams without + further treatment. + + The boolean keyword argument 'pretty_print' enables formatted XML. + + The keyword argument 'method' selects the output method: 'xml', + 'html' or plain 'text'. + + You can prevent the tail text of the element from being serialised + by passing the boolean ``with_tail`` option. This has no impact + on the tail text of children, which will always be serialised. + """ + if isinstance(element_or_tree, _Element): + return _tostring(<_Element>element_or_tree, unicode, doctype, method, + 0, 0, pretty_print, with_tail, -1) + elif isinstance(element_or_tree, _ElementTree): + return _tostring((<_ElementTree>element_or_tree)._context_node, + unicode, doctype, method, 0, 1, pretty_print, + with_tail, -1) + else: + raise TypeError, f"Type '{type(element_or_tree)}' cannot be serialized." + + +def parse(source, _BaseParser parser=None, *, base_url=None): + """parse(source, parser=None, base_url=None) + + Return an ElementTree object loaded with source elements. If no parser + is provided as second argument, the default parser is used. + + The ``source`` can be any of the following: + + - a file name/path + - a file object + - a file-like object + - a URL using the HTTP or FTP protocol + + To parse from a string, use the ``fromstring()`` function instead. + + Note that it is generally faster to parse from a file path or URL + than from an open file object or file-like object. Transparent + decompression from gzip compressed sources is supported (unless + explicitly disabled in libxml2). + + The ``base_url`` keyword allows setting a URL for the document + when parsing from a file-like object. This is needed when looking + up external entities (DTD, XInclude, ...) with relative paths. + """ + cdef _Document doc + try: + doc = _parseDocument(source, parser, base_url) + return _elementTreeFactory(doc, None) + except _TargetParserResult as result_container: + return result_container.result + + +def adopt_external_document(capsule, _BaseParser parser=None): + """adopt_external_document(capsule, parser=None) + + Unpack a libxml2 document pointer from a PyCapsule and wrap it in an + lxml ElementTree object. + + This allows external libraries to build XML/HTML trees using libxml2 + and then pass them efficiently into lxml for further processing. + + If a ``parser`` is provided, it will be used for configuring the + lxml document. No parsing will be done. + + The capsule must have the name ``"libxml2:xmlDoc"`` and its pointer + value must reference a correct libxml2 document of type ``xmlDoc*``. + The creator of the capsule must take care to correctly clean up the + document using an appropriate capsule destructor. By default, the + libxml2 document will be copied to let lxml safely own the memory + of the internal tree that it uses. + + If the capsule context is non-NULL, it must point to a C string that + can be compared using ``strcmp()``. If the context string equals + ``"destructor:xmlFreeDoc"``, the libxml2 document will not be copied + but the capsule invalidated instead by clearing its destructor and + name. That way, lxml takes ownership of the libxml2 document in memory + without creating a copy first, and the capsule destructor will not be + called. The document will then eventually be cleaned up by lxml using + the libxml2 API function ``xmlFreeDoc()`` once it is no longer used. + + If no copy is made, later modifications of the tree outside of lxml + should not be attempted after transferring the ownership. + """ + cdef xmlDoc* c_doc + cdef bint is_owned = False + c_doc = python.lxml_unpack_xmldoc_capsule(capsule, &is_owned) + doc = _adoptForeignDoc(c_doc, parser, is_owned) + return _elementTreeFactory(doc, None) + + +################################################################################ +# Include submodules + +include "readonlytree.pxi" # Read-only implementation of Element proxies +include "classlookup.pxi" # Element class lookup mechanisms +include "nsclasses.pxi" # Namespace implementation and registry +include "docloader.pxi" # Support for custom document loaders +include "parser.pxi" # XML and HTML parsers +include "saxparser.pxi" # SAX-like Parser interface and tree builder +include "parsertarget.pxi" # ET Parser target +include "serializer.pxi" # XML output functions +include "iterparse.pxi" # incremental XML parsing +include "xmlid.pxi" # XMLID and IDDict +include "xinclude.pxi" # XInclude +include "cleanup.pxi" # Cleanup and recursive element removal functions + + +################################################################################ +# Include submodules for XPath and XSLT + +include "extensions.pxi" # XPath/XSLT extension functions +include "xpath.pxi" # XPath evaluation +include "xslt.pxi" # XSL transformations +include "xsltext.pxi" # XSL extension elements + + +################################################################################ +# Validation + +cdef class DocumentInvalid(LxmlError): + """Validation error. + + Raised by all document validators when their ``assertValid(tree)`` + method fails. + """ + + +cdef class _Validator: + "Base class for XML validators." + cdef _ErrorLog _error_log + def __cinit__(self): + self._error_log = _ErrorLog() + + def validate(self, etree): + """validate(self, etree) + + Validate the document using this schema. + + Returns true if document is valid, false if not. + """ + return self(etree) + + def assertValid(self, etree): + """assertValid(self, etree) + + Raises `DocumentInvalid` if the document does not comply with the schema. + """ + if not self(etree): + raise DocumentInvalid(self._error_log._buildExceptionMessage( + "Document does not comply with schema"), + self._error_log) + + def assert_(self, etree): + """assert_(self, etree) + + Raises `AssertionError` if the document does not comply with the schema. + """ + if not self(etree): + raise AssertionError, self._error_log._buildExceptionMessage( + "Document does not comply with schema") + + cpdef _append_log_message(self, int domain, int type, int level, int line, + message, filename): + self._error_log._receiveGeneric(domain, type, level, line, message, + filename) + + cpdef _clear_error_log(self): + self._error_log.clear() + + @property + def error_log(self): + """The log of validation errors and warnings.""" + assert self._error_log is not None, "XPath evaluator not initialised" + return self._error_log.copy() + +include "dtd.pxi" # DTD +include "relaxng.pxi" # RelaxNG +include "xmlschema.pxi" # XMLSchema +include "schematron.pxi" # Schematron (requires libxml2 2.6.21+) + +################################################################################ +# Public C API + +include "public-api.pxi" + +################################################################################ +# Other stuff + +include "debug.pxi" diff --git a/venv/lib/python3.10/site-packages/lxml/etree_api.h b/venv/lib/python3.10/site-packages/lxml/etree_api.h new file mode 100644 index 0000000000000000000000000000000000000000..b0ebb13dbd535c0b4bcb4ef39536aaf0aa0f0ffe --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/etree_api.h @@ -0,0 +1,195 @@ +/* Generated by Cython 3.0.10 */ + +#ifndef __PYX_HAVE_API__lxml__etree +#define __PYX_HAVE_API__lxml__etree +#ifdef __MINGW64__ +#define MS_WIN64 +#endif +#include "Python.h" +#include "etree.h" + +static struct LxmlElement *(*__pyx_api_f_4lxml_5etree_deepcopyNodeToDocument)(struct LxmlDocument *, xmlNode *) = 0; +#define deepcopyNodeToDocument __pyx_api_f_4lxml_5etree_deepcopyNodeToDocument +static struct LxmlElementTree *(*__pyx_api_f_4lxml_5etree_elementTreeFactory)(struct LxmlElement *) = 0; +#define elementTreeFactory __pyx_api_f_4lxml_5etree_elementTreeFactory +static struct LxmlElementTree *(*__pyx_api_f_4lxml_5etree_newElementTree)(struct LxmlElement *, PyObject *) = 0; +#define newElementTree __pyx_api_f_4lxml_5etree_newElementTree +static struct LxmlElementTree *(*__pyx_api_f_4lxml_5etree_adoptExternalDocument)(xmlDoc *, PyObject *, int) = 0; +#define adoptExternalDocument __pyx_api_f_4lxml_5etree_adoptExternalDocument +static struct LxmlElement *(*__pyx_api_f_4lxml_5etree_elementFactory)(struct LxmlDocument *, xmlNode *) = 0; +#define elementFactory __pyx_api_f_4lxml_5etree_elementFactory +static struct LxmlElement *(*__pyx_api_f_4lxml_5etree_makeElement)(PyObject *, struct LxmlDocument *, PyObject *, PyObject *, PyObject *, PyObject *, PyObject *) = 0; +#define makeElement __pyx_api_f_4lxml_5etree_makeElement +static struct LxmlElement *(*__pyx_api_f_4lxml_5etree_makeSubElement)(struct LxmlElement *, PyObject *, PyObject *, PyObject *, PyObject *, PyObject *) = 0; +#define makeSubElement __pyx_api_f_4lxml_5etree_makeSubElement +static void (*__pyx_api_f_4lxml_5etree_setElementClassLookupFunction)(_element_class_lookup_function, PyObject *) = 0; +#define setElementClassLookupFunction __pyx_api_f_4lxml_5etree_setElementClassLookupFunction +static PyObject *(*__pyx_api_f_4lxml_5etree_lookupDefaultElementClass)(PyObject *, PyObject *, xmlNode *) = 0; +#define lookupDefaultElementClass __pyx_api_f_4lxml_5etree_lookupDefaultElementClass +static PyObject *(*__pyx_api_f_4lxml_5etree_lookupNamespaceElementClass)(PyObject *, PyObject *, xmlNode *) = 0; +#define lookupNamespaceElementClass __pyx_api_f_4lxml_5etree_lookupNamespaceElementClass +static PyObject *(*__pyx_api_f_4lxml_5etree_callLookupFallback)(struct LxmlFallbackElementClassLookup *, struct LxmlDocument *, xmlNode *) = 0; +#define callLookupFallback __pyx_api_f_4lxml_5etree_callLookupFallback +static int (*__pyx_api_f_4lxml_5etree_tagMatches)(xmlNode *, const xmlChar *, const xmlChar *) = 0; +#define tagMatches __pyx_api_f_4lxml_5etree_tagMatches +static struct LxmlDocument *(*__pyx_api_f_4lxml_5etree_documentOrRaise)(PyObject *) = 0; +#define documentOrRaise __pyx_api_f_4lxml_5etree_documentOrRaise +static struct LxmlElement *(*__pyx_api_f_4lxml_5etree_rootNodeOrRaise)(PyObject *) = 0; +#define rootNodeOrRaise __pyx_api_f_4lxml_5etree_rootNodeOrRaise +static int (*__pyx_api_f_4lxml_5etree_hasText)(xmlNode *) = 0; +#define hasText __pyx_api_f_4lxml_5etree_hasText +static int (*__pyx_api_f_4lxml_5etree_hasTail)(xmlNode *) = 0; +#define hasTail __pyx_api_f_4lxml_5etree_hasTail +static PyObject *(*__pyx_api_f_4lxml_5etree_textOf)(xmlNode *) = 0; +#define textOf __pyx_api_f_4lxml_5etree_textOf +static PyObject *(*__pyx_api_f_4lxml_5etree_tailOf)(xmlNode *) = 0; +#define tailOf __pyx_api_f_4lxml_5etree_tailOf +static int (*__pyx_api_f_4lxml_5etree_setNodeText)(xmlNode *, PyObject *) = 0; +#define setNodeText __pyx_api_f_4lxml_5etree_setNodeText +static int (*__pyx_api_f_4lxml_5etree_setTailText)(xmlNode *, PyObject *) = 0; +#define setTailText __pyx_api_f_4lxml_5etree_setTailText +static PyObject *(*__pyx_api_f_4lxml_5etree_attributeValue)(xmlNode *, xmlAttr *) = 0; +#define attributeValue __pyx_api_f_4lxml_5etree_attributeValue +static PyObject *(*__pyx_api_f_4lxml_5etree_attributeValueFromNsName)(xmlNode *, const xmlChar *, const xmlChar *) = 0; +#define attributeValueFromNsName __pyx_api_f_4lxml_5etree_attributeValueFromNsName +static PyObject *(*__pyx_api_f_4lxml_5etree_getAttributeValue)(struct LxmlElement *, PyObject *, PyObject *) = 0; +#define getAttributeValue __pyx_api_f_4lxml_5etree_getAttributeValue +static PyObject *(*__pyx_api_f_4lxml_5etree_iterattributes)(struct LxmlElement *, int) = 0; +#define iterattributes __pyx_api_f_4lxml_5etree_iterattributes +static PyObject *(*__pyx_api_f_4lxml_5etree_collectAttributes)(xmlNode *, int) = 0; +#define collectAttributes __pyx_api_f_4lxml_5etree_collectAttributes +static int (*__pyx_api_f_4lxml_5etree_setAttributeValue)(struct LxmlElement *, PyObject *, PyObject *) = 0; +#define setAttributeValue __pyx_api_f_4lxml_5etree_setAttributeValue +static int (*__pyx_api_f_4lxml_5etree_delAttribute)(struct LxmlElement *, PyObject *) = 0; +#define delAttribute __pyx_api_f_4lxml_5etree_delAttribute +static int (*__pyx_api_f_4lxml_5etree_delAttributeFromNsName)(xmlNode *, const xmlChar *, const xmlChar *) = 0; +#define delAttributeFromNsName __pyx_api_f_4lxml_5etree_delAttributeFromNsName +static int (*__pyx_api_f_4lxml_5etree_hasChild)(xmlNode *) = 0; +#define hasChild __pyx_api_f_4lxml_5etree_hasChild +static xmlNode *(*__pyx_api_f_4lxml_5etree_findChild)(xmlNode *, Py_ssize_t) = 0; +#define findChild __pyx_api_f_4lxml_5etree_findChild +static xmlNode *(*__pyx_api_f_4lxml_5etree_findChildForwards)(xmlNode *, Py_ssize_t) = 0; +#define findChildForwards __pyx_api_f_4lxml_5etree_findChildForwards +static xmlNode *(*__pyx_api_f_4lxml_5etree_findChildBackwards)(xmlNode *, Py_ssize_t) = 0; +#define findChildBackwards __pyx_api_f_4lxml_5etree_findChildBackwards +static xmlNode *(*__pyx_api_f_4lxml_5etree_nextElement)(xmlNode *) = 0; +#define nextElement __pyx_api_f_4lxml_5etree_nextElement +static xmlNode *(*__pyx_api_f_4lxml_5etree_previousElement)(xmlNode *) = 0; +#define previousElement __pyx_api_f_4lxml_5etree_previousElement +static void (*__pyx_api_f_4lxml_5etree_appendChild)(struct LxmlElement *, struct LxmlElement *) = 0; +#define appendChild __pyx_api_f_4lxml_5etree_appendChild +static int (*__pyx_api_f_4lxml_5etree_appendChildToElement)(struct LxmlElement *, struct LxmlElement *) = 0; +#define appendChildToElement __pyx_api_f_4lxml_5etree_appendChildToElement +static PyObject *(*__pyx_api_f_4lxml_5etree_pyunicode)(const xmlChar *) = 0; +#define pyunicode __pyx_api_f_4lxml_5etree_pyunicode +static PyObject *(*__pyx_api_f_4lxml_5etree_utf8)(PyObject *) = 0; +#define utf8 __pyx_api_f_4lxml_5etree_utf8 +static PyObject *(*__pyx_api_f_4lxml_5etree_getNsTag)(PyObject *) = 0; +#define getNsTag __pyx_api_f_4lxml_5etree_getNsTag +static PyObject *(*__pyx_api_f_4lxml_5etree_getNsTagWithEmptyNs)(PyObject *) = 0; +#define getNsTagWithEmptyNs __pyx_api_f_4lxml_5etree_getNsTagWithEmptyNs +static PyObject *(*__pyx_api_f_4lxml_5etree_namespacedName)(xmlNode *) = 0; +#define namespacedName __pyx_api_f_4lxml_5etree_namespacedName +static PyObject *(*__pyx_api_f_4lxml_5etree_namespacedNameFromNsName)(const xmlChar *, const xmlChar *) = 0; +#define namespacedNameFromNsName __pyx_api_f_4lxml_5etree_namespacedNameFromNsName +static void (*__pyx_api_f_4lxml_5etree_iteratorStoreNext)(struct LxmlElementIterator *, struct LxmlElement *) = 0; +#define iteratorStoreNext __pyx_api_f_4lxml_5etree_iteratorStoreNext +static void (*__pyx_api_f_4lxml_5etree_initTagMatch)(struct LxmlElementTagMatcher *, PyObject *) = 0; +#define initTagMatch __pyx_api_f_4lxml_5etree_initTagMatch +static xmlNs *(*__pyx_api_f_4lxml_5etree_findOrBuildNodeNsPrefix)(struct LxmlDocument *, xmlNode *, const xmlChar *, const xmlChar *) = 0; +#define findOrBuildNodeNsPrefix __pyx_api_f_4lxml_5etree_findOrBuildNodeNsPrefix +#ifndef __PYX_HAVE_RT_ImportFunction_3_0_10 +#define __PYX_HAVE_RT_ImportFunction_3_0_10 +static int __Pyx_ImportFunction_3_0_10(PyObject *module, const char *funcname, void (**f)(void), const char *sig) { + PyObject *d = 0; + PyObject *cobj = 0; + union { + void (*fp)(void); + void *p; + } tmp; + d = PyObject_GetAttrString(module, (char *)"__pyx_capi__"); + if (!d) + goto bad; + cobj = PyDict_GetItemString(d, funcname); + if (!cobj) { + PyErr_Format(PyExc_ImportError, + "%.200s does not export expected C function %.200s", + PyModule_GetName(module), funcname); + goto bad; + } + if (!PyCapsule_IsValid(cobj, sig)) { + PyErr_Format(PyExc_TypeError, + "C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)", + PyModule_GetName(module), funcname, sig, PyCapsule_GetName(cobj)); + goto bad; + } + tmp.p = PyCapsule_GetPointer(cobj, sig); + *f = tmp.fp; + if (!(*f)) + goto bad; + Py_DECREF(d); + return 0; +bad: + Py_XDECREF(d); + return -1; +} +#endif + + +static int import_lxml__etree(void) { + PyObject *module = 0; + module = PyImport_ImportModule("lxml.etree"); + if (!module) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "deepcopyNodeToDocument", (void (**)(void))&__pyx_api_f_4lxml_5etree_deepcopyNodeToDocument, "struct LxmlElement *(struct LxmlDocument *, xmlNode *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "elementTreeFactory", (void (**)(void))&__pyx_api_f_4lxml_5etree_elementTreeFactory, "struct LxmlElementTree *(struct LxmlElement *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "newElementTree", (void (**)(void))&__pyx_api_f_4lxml_5etree_newElementTree, "struct LxmlElementTree *(struct LxmlElement *, PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "adoptExternalDocument", (void (**)(void))&__pyx_api_f_4lxml_5etree_adoptExternalDocument, "struct LxmlElementTree *(xmlDoc *, PyObject *, int)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "elementFactory", (void (**)(void))&__pyx_api_f_4lxml_5etree_elementFactory, "struct LxmlElement *(struct LxmlDocument *, xmlNode *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "makeElement", (void (**)(void))&__pyx_api_f_4lxml_5etree_makeElement, "struct LxmlElement *(PyObject *, struct LxmlDocument *, PyObject *, PyObject *, PyObject *, PyObject *, PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "makeSubElement", (void (**)(void))&__pyx_api_f_4lxml_5etree_makeSubElement, "struct LxmlElement *(struct LxmlElement *, PyObject *, PyObject *, PyObject *, PyObject *, PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "setElementClassLookupFunction", (void (**)(void))&__pyx_api_f_4lxml_5etree_setElementClassLookupFunction, "void (_element_class_lookup_function, PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "lookupDefaultElementClass", (void (**)(void))&__pyx_api_f_4lxml_5etree_lookupDefaultElementClass, "PyObject *(PyObject *, PyObject *, xmlNode *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "lookupNamespaceElementClass", (void (**)(void))&__pyx_api_f_4lxml_5etree_lookupNamespaceElementClass, "PyObject *(PyObject *, PyObject *, xmlNode *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "callLookupFallback", (void (**)(void))&__pyx_api_f_4lxml_5etree_callLookupFallback, "PyObject *(struct LxmlFallbackElementClassLookup *, struct LxmlDocument *, xmlNode *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "tagMatches", (void (**)(void))&__pyx_api_f_4lxml_5etree_tagMatches, "int (xmlNode *, const xmlChar *, const xmlChar *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "documentOrRaise", (void (**)(void))&__pyx_api_f_4lxml_5etree_documentOrRaise, "struct LxmlDocument *(PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "rootNodeOrRaise", (void (**)(void))&__pyx_api_f_4lxml_5etree_rootNodeOrRaise, "struct LxmlElement *(PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "hasText", (void (**)(void))&__pyx_api_f_4lxml_5etree_hasText, "int (xmlNode *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "hasTail", (void (**)(void))&__pyx_api_f_4lxml_5etree_hasTail, "int (xmlNode *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "textOf", (void (**)(void))&__pyx_api_f_4lxml_5etree_textOf, "PyObject *(xmlNode *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "tailOf", (void (**)(void))&__pyx_api_f_4lxml_5etree_tailOf, "PyObject *(xmlNode *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "setNodeText", (void (**)(void))&__pyx_api_f_4lxml_5etree_setNodeText, "int (xmlNode *, PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "setTailText", (void (**)(void))&__pyx_api_f_4lxml_5etree_setTailText, "int (xmlNode *, PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "attributeValue", (void (**)(void))&__pyx_api_f_4lxml_5etree_attributeValue, "PyObject *(xmlNode *, xmlAttr *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "attributeValueFromNsName", (void (**)(void))&__pyx_api_f_4lxml_5etree_attributeValueFromNsName, "PyObject *(xmlNode *, const xmlChar *, const xmlChar *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "getAttributeValue", (void (**)(void))&__pyx_api_f_4lxml_5etree_getAttributeValue, "PyObject *(struct LxmlElement *, PyObject *, PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "iterattributes", (void (**)(void))&__pyx_api_f_4lxml_5etree_iterattributes, "PyObject *(struct LxmlElement *, int)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "collectAttributes", (void (**)(void))&__pyx_api_f_4lxml_5etree_collectAttributes, "PyObject *(xmlNode *, int)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "setAttributeValue", (void (**)(void))&__pyx_api_f_4lxml_5etree_setAttributeValue, "int (struct LxmlElement *, PyObject *, PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "delAttribute", (void (**)(void))&__pyx_api_f_4lxml_5etree_delAttribute, "int (struct LxmlElement *, PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "delAttributeFromNsName", (void (**)(void))&__pyx_api_f_4lxml_5etree_delAttributeFromNsName, "int (xmlNode *, const xmlChar *, const xmlChar *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "hasChild", (void (**)(void))&__pyx_api_f_4lxml_5etree_hasChild, "int (xmlNode *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "findChild", (void (**)(void))&__pyx_api_f_4lxml_5etree_findChild, "xmlNode *(xmlNode *, Py_ssize_t)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "findChildForwards", (void (**)(void))&__pyx_api_f_4lxml_5etree_findChildForwards, "xmlNode *(xmlNode *, Py_ssize_t)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "findChildBackwards", (void (**)(void))&__pyx_api_f_4lxml_5etree_findChildBackwards, "xmlNode *(xmlNode *, Py_ssize_t)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "nextElement", (void (**)(void))&__pyx_api_f_4lxml_5etree_nextElement, "xmlNode *(xmlNode *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "previousElement", (void (**)(void))&__pyx_api_f_4lxml_5etree_previousElement, "xmlNode *(xmlNode *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "appendChild", (void (**)(void))&__pyx_api_f_4lxml_5etree_appendChild, "void (struct LxmlElement *, struct LxmlElement *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "appendChildToElement", (void (**)(void))&__pyx_api_f_4lxml_5etree_appendChildToElement, "int (struct LxmlElement *, struct LxmlElement *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyunicode", (void (**)(void))&__pyx_api_f_4lxml_5etree_pyunicode, "PyObject *(const xmlChar *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "utf8", (void (**)(void))&__pyx_api_f_4lxml_5etree_utf8, "PyObject *(PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "getNsTag", (void (**)(void))&__pyx_api_f_4lxml_5etree_getNsTag, "PyObject *(PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "getNsTagWithEmptyNs", (void (**)(void))&__pyx_api_f_4lxml_5etree_getNsTagWithEmptyNs, "PyObject *(PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "namespacedName", (void (**)(void))&__pyx_api_f_4lxml_5etree_namespacedName, "PyObject *(xmlNode *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "namespacedNameFromNsName", (void (**)(void))&__pyx_api_f_4lxml_5etree_namespacedNameFromNsName, "PyObject *(const xmlChar *, const xmlChar *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "iteratorStoreNext", (void (**)(void))&__pyx_api_f_4lxml_5etree_iteratorStoreNext, "void (struct LxmlElementIterator *, struct LxmlElement *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "initTagMatch", (void (**)(void))&__pyx_api_f_4lxml_5etree_initTagMatch, "void (struct LxmlElementTagMatcher *, PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "findOrBuildNodeNsPrefix", (void (**)(void))&__pyx_api_f_4lxml_5etree_findOrBuildNodeNsPrefix, "xmlNs *(struct LxmlDocument *, xmlNode *, const xmlChar *, const xmlChar *)") < 0) goto bad; + Py_DECREF(module); module = 0; + return 0; + bad: + Py_XDECREF(module); + return -1; +} + +#endif /* !__PYX_HAVE_API__lxml__etree */ diff --git a/venv/lib/python3.10/site-packages/lxml/extensions.pxi b/venv/lib/python3.10/site-packages/lxml/extensions.pxi new file mode 100644 index 0000000000000000000000000000000000000000..2a2c94ecc7a6baae223ff7d613bf76d1b699be99 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/extensions.pxi @@ -0,0 +1,833 @@ +# support for extension functions in XPath and XSLT + +cdef class XPathError(LxmlError): + """Base class of all XPath errors. + """ + +cdef class XPathEvalError(XPathError): + """Error during XPath evaluation. + """ + +cdef class XPathFunctionError(XPathEvalError): + """Internal error looking up an XPath extension function. + """ + +cdef class XPathResultError(XPathEvalError): + """Error handling an XPath result. + """ + + +# forward declarations + +ctypedef int (*_register_function)(void* ctxt, name_utf, ns_uri_utf) +cdef class _ExsltRegExp + +################################################################################ +# Base class for XSLT and XPath evaluation contexts: functions, namespaces, ... + +@cython.internal +cdef class _BaseContext: + cdef xpath.xmlXPathContext* _xpathCtxt + cdef _Document _doc + cdef dict _extensions + cdef list _namespaces + cdef list _global_namespaces + cdef dict _utf_refs + cdef dict _function_cache + cdef dict _eval_context_dict + cdef bint _build_smart_strings + # for exception handling and temporary reference keeping: + cdef _TempStore _temp_refs + cdef set _temp_documents + cdef _ExceptionContext _exc + cdef _ErrorLog _error_log + + def __cinit__(self): + self._xpathCtxt = NULL + + def __init__(self, namespaces, extensions, error_log, enable_regexp, + build_smart_strings): + cdef _ExsltRegExp _regexp + cdef dict new_extensions + cdef list ns + self._utf_refs = {} + self._global_namespaces = [] + self._function_cache = {} + self._eval_context_dict = None + self._error_log = error_log + + if extensions is not None: + # convert extensions to UTF-8 + if isinstance(extensions, dict): + extensions = (extensions,) + # format: [ {(ns, name):function} ] -> {(ns_utf, name_utf):function} + new_extensions = {} + for extension in extensions: + for (ns_uri, name), function in extension.items(): + if name is None: + raise ValueError, "extensions must have non empty names" + ns_utf = self._to_utf(ns_uri) + name_utf = self._to_utf(name) + new_extensions[(ns_utf, name_utf)] = function + extensions = new_extensions or None + + if namespaces is not None: + if isinstance(namespaces, dict): + namespaces = namespaces.items() + if namespaces: + ns = [] + for prefix, ns_uri in namespaces: + if prefix is None or not prefix: + raise TypeError, \ + "empty namespace prefix is not supported in XPath" + if ns_uri is None or not ns_uri: + raise TypeError, \ + "setting default namespace is not supported in XPath" + prefix_utf = self._to_utf(prefix) + ns_uri_utf = self._to_utf(ns_uri) + ns.append( (prefix_utf, ns_uri_utf) ) + namespaces = ns + else: + namespaces = None + + self._doc = None + self._exc = _ExceptionContext() + self._extensions = extensions + self._namespaces = namespaces + self._temp_refs = _TempStore() + self._temp_documents = set() + self._build_smart_strings = build_smart_strings + + if enable_regexp: + _regexp = _ExsltRegExp() + _regexp._register_in_context(self) + + cdef _BaseContext _copy(self): + cdef _BaseContext context + if self._namespaces is not None: + namespaces = self._namespaces[:] + else: + namespaces = None + context = self.__class__(namespaces, None, self._error_log, False, + self._build_smart_strings) + if self._extensions is not None: + context._extensions = self._extensions.copy() + return context + + cdef bytes _to_utf(self, s): + "Convert to UTF-8 and keep a reference to the encoded string" + cdef python.PyObject* dict_result + if s is None: + return None + dict_result = python.PyDict_GetItem(self._utf_refs, s) + if dict_result is not NULL: + return dict_result + utf = _utf8(s) + self._utf_refs[s] = utf + if python.IS_PYPY: + # use C level refs, PyPy refs are not enough! + python.Py_INCREF(utf) + return utf + + cdef void _set_xpath_context(self, xpath.xmlXPathContext* xpathCtxt) noexcept: + self._xpathCtxt = xpathCtxt + xpathCtxt.userData = self + # Need a cast here because older libxml2 releases do not use 'const' in the functype. + xpathCtxt.error = _receiveXPathError + + @cython.final + cdef _register_context(self, _Document doc): + self._doc = doc + self._exc.clear() + + @cython.final + cdef _cleanup_context(self): + #xpath.xmlXPathRegisteredNsCleanup(self._xpathCtxt) + #self.unregisterGlobalNamespaces() + if python.IS_PYPY: + # clean up double refs in PyPy (see "_to_utf()" method) + for ref in self._utf_refs.itervalues(): + python.Py_DECREF(ref) + self._utf_refs.clear() + self._eval_context_dict = None + self._doc = None + + @cython.final + cdef _release_context(self): + if self._xpathCtxt is not NULL: + self._xpathCtxt.userData = NULL + self._xpathCtxt = NULL + + # namespaces (internal UTF-8 methods with leading '_') + + cdef addNamespace(self, prefix, ns_uri): + cdef list namespaces + if prefix is None: + raise TypeError, "empty prefix is not supported in XPath" + prefix_utf = self._to_utf(prefix) + ns_uri_utf = self._to_utf(ns_uri) + new_item = (prefix_utf, ns_uri_utf) + if self._namespaces is None: + self._namespaces = [new_item] + else: + namespaces = [] + for item in self._namespaces: + if item[0] == prefix_utf: + item = new_item + new_item = None + namespaces.append(item) + if new_item is not None: + namespaces.append(new_item) + self._namespaces = namespaces + if self._xpathCtxt is not NULL: + xpath.xmlXPathRegisterNs( + self._xpathCtxt, _xcstr(prefix_utf), _xcstr(ns_uri_utf)) + + cdef registerNamespace(self, prefix, ns_uri): + if prefix is None: + raise TypeError, "empty prefix is not supported in XPath" + prefix_utf = self._to_utf(prefix) + ns_uri_utf = self._to_utf(ns_uri) + self._global_namespaces.append(prefix_utf) + xpath.xmlXPathRegisterNs(self._xpathCtxt, + _xcstr(prefix_utf), _xcstr(ns_uri_utf)) + + cdef registerLocalNamespaces(self): + if self._namespaces is None: + return + for prefix_utf, ns_uri_utf in self._namespaces: + xpath.xmlXPathRegisterNs( + self._xpathCtxt, _xcstr(prefix_utf), _xcstr(ns_uri_utf)) + + cdef registerGlobalNamespaces(self): + cdef list ns_prefixes = _find_all_extension_prefixes() + if python.PyList_GET_SIZE(ns_prefixes) > 0: + for prefix_utf, ns_uri_utf in ns_prefixes: + self._global_namespaces.append(prefix_utf) + xpath.xmlXPathRegisterNs( + self._xpathCtxt, _xcstr(prefix_utf), _xcstr(ns_uri_utf)) + + cdef unregisterGlobalNamespaces(self): + if python.PyList_GET_SIZE(self._global_namespaces) > 0: + for prefix_utf in self._global_namespaces: + xpath.xmlXPathRegisterNs(self._xpathCtxt, + _xcstr(prefix_utf), NULL) + del self._global_namespaces[:] + + cdef void _unregisterNamespace(self, prefix_utf) noexcept: + xpath.xmlXPathRegisterNs(self._xpathCtxt, + _xcstr(prefix_utf), NULL) + + # extension functions + + cdef int _addLocalExtensionFunction(self, ns_utf, name_utf, function) except -1: + if self._extensions is None: + self._extensions = {} + self._extensions[(ns_utf, name_utf)] = function + return 0 + + cdef registerGlobalFunctions(self, void* ctxt, + _register_function reg_func): + cdef python.PyObject* dict_result + cdef dict d + for ns_utf, ns_functions in __FUNCTION_NAMESPACE_REGISTRIES.iteritems(): + dict_result = python.PyDict_GetItem( + self._function_cache, ns_utf) + if dict_result is not NULL: + d = dict_result + else: + d = {} + self._function_cache[ns_utf] = d + for name_utf, function in ns_functions.iteritems(): + d[name_utf] = function + reg_func(ctxt, name_utf, ns_utf) + + cdef registerLocalFunctions(self, void* ctxt, + _register_function reg_func): + cdef python.PyObject* dict_result + cdef dict d + if self._extensions is None: + return # done + last_ns = None + d = None + for (ns_utf, name_utf), function in self._extensions.iteritems(): + if ns_utf is not last_ns or d is None: + last_ns = ns_utf + dict_result = python.PyDict_GetItem( + self._function_cache, ns_utf) + if dict_result is not NULL: + d = dict_result + else: + d = {} + self._function_cache[ns_utf] = d + d[name_utf] = function + reg_func(ctxt, name_utf, ns_utf) + + cdef unregisterAllFunctions(self, void* ctxt, + _register_function unreg_func): + for ns_utf, functions in self._function_cache.iteritems(): + for name_utf in functions: + unreg_func(ctxt, name_utf, ns_utf) + + cdef unregisterGlobalFunctions(self, void* ctxt, + _register_function unreg_func): + for ns_utf, functions in self._function_cache.items(): + for name_utf in functions: + if self._extensions is None or \ + (ns_utf, name_utf) not in self._extensions: + unreg_func(ctxt, name_utf, ns_utf) + + @cython.final + cdef _find_cached_function(self, const_xmlChar* c_ns_uri, const_xmlChar* c_name): + """Lookup an extension function in the cache and return it. + + Parameters: c_ns_uri may be NULL, c_name must not be NULL + """ + cdef python.PyObject* c_dict + cdef python.PyObject* dict_result + c_dict = python.PyDict_GetItem( + self._function_cache, None if c_ns_uri is NULL else c_ns_uri) + if c_dict is not NULL: + dict_result = python.PyDict_GetItem( + c_dict, c_name) + if dict_result is not NULL: + return dict_result + return None + + # Python access to the XPath context for extension functions + + @property + def context_node(self): + cdef xmlNode* c_node + if self._xpathCtxt is NULL: + raise XPathError, \ + "XPath context is only usable during the evaluation" + c_node = self._xpathCtxt.node + if c_node is NULL: + raise XPathError, "no context node" + if c_node.doc != self._xpathCtxt.doc: + raise XPathError, \ + "document-external context nodes are not supported" + if self._doc is None: + raise XPathError, "document context is missing" + return _elementFactory(self._doc, c_node) + + @property + def eval_context(self): + if self._eval_context_dict is None: + self._eval_context_dict = {} + return self._eval_context_dict + + # Python reference keeping during XPath function evaluation + + @cython.final + cdef _release_temp_refs(self): + "Free temporarily referenced objects from this context." + self._temp_refs.clear() + self._temp_documents.clear() + + @cython.final + cdef _hold(self, obj): + """A way to temporarily hold references to nodes in the evaluator. + + This is needed because otherwise nodes created in XPath extension + functions would be reference counted too soon, during the XPath + evaluation. This is most important in the case of exceptions. + """ + cdef _Element element + if isinstance(obj, _Element): + self._temp_refs.add(obj) + self._temp_documents.add((<_Element>obj)._doc) + return + elif _isString(obj) or not python.PySequence_Check(obj): + return + for o in obj: + if isinstance(o, _Element): + #print "Holding element:", element._c_node + self._temp_refs.add(o) + #print "Holding document:", element._doc._c_doc + self._temp_documents.add((<_Element>o)._doc) + + @cython.final + cdef _Document _findDocumentForNode(self, xmlNode* c_node): + """If an XPath expression returns an element from a different + document than the current context document, we call this to + see if it was possibly created by an extension and is a known + document instance. + """ + cdef _Document doc + for doc in self._temp_documents: + if doc is not None and doc._c_doc is c_node.doc: + return doc + return None + + +# libxml2 keeps these error messages in a static array in its code +# and doesn't give us access to them ... + +cdef tuple LIBXML2_XPATH_ERROR_MESSAGES = ( + b"Ok", + b"Number encoding", + b"Unfinished literal", + b"Start of literal", + b"Expected $ for variable reference", + b"Undefined variable", + b"Invalid predicate", + b"Invalid expression", + b"Missing closing curly brace", + b"Unregistered function", + b"Invalid operand", + b"Invalid type", + b"Invalid number of arguments", + b"Invalid context size", + b"Invalid context position", + b"Memory allocation error", + b"Syntax error", + b"Resource error", + b"Sub resource error", + b"Undefined namespace prefix", + b"Encoding error", + b"Char out of XML range", + b"Invalid or incomplete context", + b"Stack usage error", + b"Forbidden variable\n", + b"?? Unknown error ??\n", +) + +cdef void _forwardXPathError(void* c_ctxt, const xmlerror.xmlError* c_error) noexcept with gil: + cdef xmlerror.xmlError error + cdef int xpath_code + if c_error.message is not NULL: + error.message = c_error.message + else: + xpath_code = c_error.code - xmlerror.XML_XPATH_EXPRESSION_OK + if 0 <= xpath_code < len(LIBXML2_XPATH_ERROR_MESSAGES): + error.message = _cstr(LIBXML2_XPATH_ERROR_MESSAGES[xpath_code]) + else: + error.message = b"unknown error" + error.domain = c_error.domain + error.code = c_error.code + error.level = c_error.level + error.line = c_error.line + error.int2 = c_error.int1 # column + error.file = c_error.file + error.node = NULL + + (<_BaseContext>c_ctxt)._error_log._receive(&error) + +cdef void _receiveXPathError(void* c_context, const xmlerror.xmlError* error) noexcept nogil: + if not __DEBUG: + return + if c_context is NULL: + _forwardError(NULL, error) + else: + _forwardXPathError(c_context, error) + + +def Extension(module, function_mapping=None, *, ns=None): + """Extension(module, function_mapping=None, ns=None) + + Build a dictionary of extension functions from the functions + defined in a module or the methods of an object. + + As second argument, you can pass an additional mapping of + attribute names to XPath function names, or a list of function + names that should be taken. + + The ``ns`` keyword argument accepts a namespace URI for the XPath + functions. + """ + cdef dict functions = {} + if isinstance(function_mapping, dict): + for function_name, xpath_name in function_mapping.items(): + functions[(ns, xpath_name)] = getattr(module, function_name) + else: + if function_mapping is None: + function_mapping = [ name for name in dir(module) + if not name.startswith('_') ] + for function_name in function_mapping: + functions[(ns, function_name)] = getattr(module, function_name) + return functions + +################################################################################ +# EXSLT regexp implementation + +@cython.final +@cython.internal +cdef class _ExsltRegExp: + cdef dict _compile_map + def __cinit__(self): + self._compile_map = {} + + cdef _make_string(self, value): + if _isString(value): + return value + elif isinstance(value, list): + # node set: take recursive text concatenation of first element + if python.PyList_GET_SIZE(value) == 0: + return '' + firstnode = value[0] + if _isString(firstnode): + return firstnode + elif isinstance(firstnode, _Element): + c_text = tree.xmlNodeGetContent((<_Element>firstnode)._c_node) + if c_text is NULL: + raise MemoryError() + try: + return funicode(c_text) + finally: + tree.xmlFree(c_text) + else: + return unicode(firstnode) + else: + return unicode(value) + + cdef _compile(self, rexp, ignore_case): + cdef python.PyObject* c_result + rexp = self._make_string(rexp) + key = (rexp, ignore_case) + c_result = python.PyDict_GetItem(self._compile_map, key) + if c_result is not NULL: + return c_result + py_flags = re.UNICODE + if ignore_case: + py_flags = py_flags | re.IGNORECASE + rexp_compiled = re.compile(rexp, py_flags) + self._compile_map[key] = rexp_compiled + return rexp_compiled + + def test(self, ctxt, s, rexp, flags=''): + flags = self._make_string(flags) + s = self._make_string(s) + rexpc = self._compile(rexp, 'i' in flags) + if rexpc.search(s) is None: + return False + else: + return True + + def match(self, ctxt, s, rexp, flags=''): + cdef list result_list + flags = self._make_string(flags) + s = self._make_string(s) + rexpc = self._compile(rexp, 'i' in flags) + if 'g' in flags: + results = rexpc.findall(s) + if not results: + return () + else: + result = rexpc.search(s) + if not result: + return () + results = [ result.group() ] + results.extend( result.groups('') ) + result_list = [] + root = Element('matches') + for s_match in results: + if python.PyTuple_CheckExact(s_match): + s_match = ''.join(s_match) + elem = SubElement(root, 'match') + elem.text = s_match + result_list.append(elem) + return result_list + + def replace(self, ctxt, s, rexp, flags, replacement): + replacement = self._make_string(replacement) + flags = self._make_string(flags) + s = self._make_string(s) + rexpc = self._compile(rexp, 'i' in flags) + count: object = 0 if 'g' in flags else 1 + return rexpc.sub(replacement, s, count) + + cdef _register_in_context(self, _BaseContext context): + ns = b"http://exslt.org/regular-expressions" + context._addLocalExtensionFunction(ns, b"test", self.test) + context._addLocalExtensionFunction(ns, b"match", self.match) + context._addLocalExtensionFunction(ns, b"replace", self.replace) + + +################################################################################ +# helper functions + +cdef xpath.xmlXPathObject* _wrapXPathObject(object obj, _Document doc, + _BaseContext context) except NULL: + cdef xpath.xmlNodeSet* resultSet + cdef _Element fake_node = None + cdef xmlNode* c_node + + if isinstance(obj, unicode): + obj = _utf8(obj) + if isinstance(obj, bytes): + # libxml2 copies the string value + return xpath.xmlXPathNewCString(_cstr(obj)) + if isinstance(obj, bool): + return xpath.xmlXPathNewBoolean(obj) + if python.PyNumber_Check(obj): + return xpath.xmlXPathNewFloat(obj) + if obj is None: + resultSet = xpath.xmlXPathNodeSetCreate(NULL) + elif isinstance(obj, _Element): + resultSet = xpath.xmlXPathNodeSetCreate((<_Element>obj)._c_node) + elif python.PySequence_Check(obj): + resultSet = xpath.xmlXPathNodeSetCreate(NULL) + try: + for value in obj: + if isinstance(value, _Element): + if context is not None: + context._hold(value) + xpath.xmlXPathNodeSetAdd(resultSet, (<_Element>value)._c_node) + else: + if context is None or doc is None: + raise XPathResultError, \ + f"Non-Element values not supported at this point - got {value!r}" + # support strings by appending text nodes to an Element + if isinstance(value, unicode): + value = _utf8(value) + if isinstance(value, bytes): + if fake_node is None: + fake_node = _makeElement("text-root", NULL, doc, None, + None, None, None, None, None) + context._hold(fake_node) + else: + # append a comment node to keep the text nodes separate + c_node = tree.xmlNewDocComment(doc._c_doc, "") + if c_node is NULL: + raise MemoryError() + tree.xmlAddChild(fake_node._c_node, c_node) + context._hold(value) + c_node = tree.xmlNewDocText(doc._c_doc, _xcstr(value)) + if c_node is NULL: + raise MemoryError() + tree.xmlAddChild(fake_node._c_node, c_node) + xpath.xmlXPathNodeSetAdd(resultSet, c_node) + else: + raise XPathResultError, \ + f"This is not a supported node-set result: {value!r}" + except: + xpath.xmlXPathFreeNodeSet(resultSet) + raise + else: + raise XPathResultError, f"Unknown return type: {python._fqtypename(obj).decode('utf8')}" + return xpath.xmlXPathWrapNodeSet(resultSet) + +cdef object _unwrapXPathObject(xpath.xmlXPathObject* xpathObj, + _Document doc, _BaseContext context): + if xpathObj.type == xpath.XPATH_UNDEFINED: + raise XPathResultError, "Undefined xpath result" + elif xpathObj.type == xpath.XPATH_NODESET: + return _createNodeSetResult(xpathObj, doc, context) + elif xpathObj.type == xpath.XPATH_BOOLEAN: + return xpathObj.boolval + elif xpathObj.type == xpath.XPATH_NUMBER: + return xpathObj.floatval + elif xpathObj.type == xpath.XPATH_STRING: + stringval = funicode(xpathObj.stringval) + if context._build_smart_strings: + stringval = _elementStringResultFactory( + stringval, None, None, False) + return stringval + elif xpathObj.type == xpath.XPATH_POINT: + raise NotImplementedError, "XPATH_POINT" + elif xpathObj.type == xpath.XPATH_RANGE: + raise NotImplementedError, "XPATH_RANGE" + elif xpathObj.type == xpath.XPATH_LOCATIONSET: + raise NotImplementedError, "XPATH_LOCATIONSET" + elif xpathObj.type == xpath.XPATH_USERS: + raise NotImplementedError, "XPATH_USERS" + elif xpathObj.type == xpath.XPATH_XSLT_TREE: + return _createNodeSetResult(xpathObj, doc, context) + else: + raise XPathResultError, f"Unknown xpath result {xpathObj.type}" + +cdef object _createNodeSetResult(xpath.xmlXPathObject* xpathObj, _Document doc, + _BaseContext context): + cdef xmlNode* c_node + cdef int i + cdef list result + result = [] + if xpathObj.nodesetval is NULL: + return result + for i in range(xpathObj.nodesetval.nodeNr): + c_node = xpathObj.nodesetval.nodeTab[i] + _unpackNodeSetEntry(result, c_node, doc, context, + xpathObj.type == xpath.XPATH_XSLT_TREE) + return result + +cdef _unpackNodeSetEntry(list results, xmlNode* c_node, _Document doc, + _BaseContext context, bint is_fragment): + cdef xmlNode* c_child + if _isElement(c_node): + if c_node.doc != doc._c_doc and c_node.doc._private is NULL: + # XXX: works, but maybe not always the right thing to do? + # XPath: only runs when extensions create or copy trees + # -> we store Python refs to these, so that is OK + # XSLT: can it leak when merging trees from multiple sources? + c_node = tree.xmlDocCopyNode(c_node, doc._c_doc, 1) + # FIXME: call _instantiateElementFromXPath() instead? + results.append( + _fakeDocElementFactory(doc, c_node)) + elif c_node.type == tree.XML_TEXT_NODE or \ + c_node.type == tree.XML_CDATA_SECTION_NODE or \ + c_node.type == tree.XML_ATTRIBUTE_NODE: + results.append( + _buildElementStringResult(doc, c_node, context)) + elif c_node.type == tree.XML_NAMESPACE_DECL: + results.append( (funicodeOrNone((c_node).prefix), + funicodeOrNone((c_node).href)) ) + elif c_node.type == tree.XML_DOCUMENT_NODE or \ + c_node.type == tree.XML_HTML_DOCUMENT_NODE: + # ignored for everything but result tree fragments + if is_fragment: + c_child = c_node.children + while c_child is not NULL: + _unpackNodeSetEntry(results, c_child, doc, context, 0) + c_child = c_child.next + elif c_node.type == tree.XML_XINCLUDE_START or \ + c_node.type == tree.XML_XINCLUDE_END: + pass + else: + raise NotImplementedError, \ + f"Not yet implemented result node type: {c_node.type}" + +cdef void _freeXPathObject(xpath.xmlXPathObject* xpathObj) noexcept: + """Free the XPath object, but *never* free the *content* of node sets. + Python dealloc will do that for us. + """ + if xpathObj.nodesetval is not NULL: + xpath.xmlXPathFreeNodeSet(xpathObj.nodesetval) + xpathObj.nodesetval = NULL + xpath.xmlXPathFreeObject(xpathObj) + +cdef _Element _instantiateElementFromXPath(xmlNode* c_node, _Document doc, + _BaseContext context): + # NOTE: this may copy the element - only call this when it can't leak + if c_node.doc != doc._c_doc and c_node.doc._private is NULL: + # not from the context document and not from a fake document + # either => may still be from a known document, e.g. one + # created by an extension function + node_doc = context._findDocumentForNode(c_node) + if node_doc is None: + # not from a known document at all! => can only make a + # safety copy here + c_node = tree.xmlDocCopyNode(c_node, doc._c_doc, 1) + else: + doc = node_doc + return _fakeDocElementFactory(doc, c_node) + +################################################################################ +# special str/unicode subclasses + +@cython.final +cdef class _ElementUnicodeResult(unicode): + cdef _Element _parent + cdef readonly object attrname + cdef readonly bint is_tail + + def getparent(self): + return self._parent + + @property + def is_text(self): + return self._parent is not None and not (self.is_tail or self.attrname is not None) + + @property + def is_attribute(self): + return self.attrname is not None + +cdef object _elementStringResultFactory(string_value, _Element parent, + attrname, bint is_tail): + result = _ElementUnicodeResult(string_value) + result._parent = parent + result.is_tail = is_tail + result.attrname = attrname + return result + +cdef object _buildElementStringResult(_Document doc, xmlNode* c_node, + _BaseContext context): + cdef _Element parent = None + cdef object attrname = None + cdef xmlNode* c_element + cdef bint is_tail + + if c_node.type == tree.XML_ATTRIBUTE_NODE: + attrname = _namespacedName(c_node) + is_tail = 0 + s = tree.xmlNodeGetContent(c_node) + try: + value = funicode(s) + finally: + tree.xmlFree(s) + c_element = NULL + else: + #assert c_node.type == tree.XML_TEXT_NODE or c_node.type == tree.XML_CDATA_SECTION_NODE, "invalid node type" + # may be tail text or normal text + value = funicode(c_node.content) + c_element = _previousElement(c_node) + is_tail = c_element is not NULL + + if not context._build_smart_strings: + return value + + if c_element is NULL: + # non-tail text or attribute text + c_element = c_node.parent + while c_element is not NULL and not _isElement(c_element): + c_element = c_element.parent + + if c_element is not NULL: + parent = _instantiateElementFromXPath(c_element, doc, context) + + return _elementStringResultFactory( + value, parent, attrname, is_tail) + +################################################################################ +# callbacks for XPath/XSLT extension functions + +cdef void _extension_function_call(_BaseContext context, function, + xpath.xmlXPathParserContext* ctxt, int nargs) noexcept: + cdef _Document doc + cdef xpath.xmlXPathObject* obj + cdef list args + cdef int i + doc = context._doc + try: + args = [] + for i in range(nargs): + obj = xpath.valuePop(ctxt) + o = _unwrapXPathObject(obj, doc, context) + _freeXPathObject(obj) + args.append(o) + args.reverse() + + res = function(context, *args) + # wrap result for XPath consumption + obj = _wrapXPathObject(res, doc, context) + # prevent Python from deallocating elements handed to libxml2 + context._hold(res) + xpath.valuePush(ctxt, obj) + except: + xpath.xmlXPathErr(ctxt, xpath.XPATH_EXPR_ERROR) + context._exc._store_raised() + finally: + return # swallow any further exceptions + +# lookup the function by name and call it + +cdef void _xpath_function_call(xpath.xmlXPathParserContext* ctxt, + int nargs) noexcept with gil: + cdef _BaseContext context + cdef xpath.xmlXPathContext* rctxt = ctxt.context + context = <_BaseContext> rctxt.userData + try: + function = context._find_cached_function(rctxt.functionURI, rctxt.function) + if function is not None: + _extension_function_call(context, function, ctxt, nargs) + else: + xpath.xmlXPathErr(ctxt, xpath.XPATH_UNKNOWN_FUNC_ERROR) + context._exc._store_exception(XPathFunctionError( + f"XPath function '{_namespacedNameFromNsName(rctxt.functionURI, rctxt.function)}' not found")) + except: + # may not be the right error, but we need to tell libxml2 *something* + xpath.xmlXPathErr(ctxt, xpath.XPATH_UNKNOWN_FUNC_ERROR) + context._exc._store_raised() + finally: + return # swallow any further exceptions diff --git a/venv/lib/python3.10/site-packages/lxml/includes/htmlparser.pxd b/venv/lib/python3.10/site-packages/lxml/includes/htmlparser.pxd new file mode 100644 index 0000000000000000000000000000000000000000..31dcc406cdc3d006afe811e7e1f778b56407510f --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/includes/htmlparser.pxd @@ -0,0 +1,56 @@ +from libc.string cimport const_char + +from lxml.includes.tree cimport xmlDoc +from lxml.includes.tree cimport xmlInputReadCallback, xmlInputCloseCallback +from lxml.includes.xmlparser cimport xmlParserCtxt, xmlSAXHandler, xmlSAXHandlerV1 + +cdef extern from "libxml/HTMLparser.h" nogil: + ctypedef enum htmlParserOption: + HTML_PARSE_NOERROR # suppress error reports + HTML_PARSE_NOWARNING # suppress warning reports + HTML_PARSE_PEDANTIC # pedantic error reporting + HTML_PARSE_NOBLANKS # remove blank nodes + HTML_PARSE_NONET # Forbid network access + # libxml2 2.6.21+ only: + HTML_PARSE_RECOVER # Relaxed parsing + HTML_PARSE_COMPACT # compact small text nodes + # libxml2 2.7.7+ only: + HTML_PARSE_NOIMPLIED # Do not add implied html/body... elements + # libxml2 2.7.8+ only: + HTML_PARSE_NODEFDTD # do not default a doctype if not found + # libxml2 2.8.0+ only: + XML_PARSE_IGNORE_ENC # ignore internal document encoding hint + + xmlSAXHandlerV1 htmlDefaultSAXHandler + + cdef xmlParserCtxt* htmlCreateMemoryParserCtxt( + char* buffer, int size) + cdef xmlParserCtxt* htmlCreateFileParserCtxt( + char* filename, char* encoding) + cdef xmlParserCtxt* htmlCreatePushParserCtxt(xmlSAXHandler* sax, + void* user_data, + char* chunk, int size, + char* filename, int enc) + cdef void htmlFreeParserCtxt(xmlParserCtxt* ctxt) + cdef void htmlCtxtReset(xmlParserCtxt* ctxt) + cdef int htmlCtxtUseOptions(xmlParserCtxt* ctxt, int options) + cdef int htmlParseDocument(xmlParserCtxt* ctxt) + cdef int htmlParseChunk(xmlParserCtxt* ctxt, + char* chunk, int size, int terminate) + + cdef xmlDoc* htmlCtxtReadFile(xmlParserCtxt* ctxt, + char* filename, const_char* encoding, + int options) + cdef xmlDoc* htmlCtxtReadDoc(xmlParserCtxt* ctxt, + char* buffer, char* URL, const_char* encoding, + int options) + cdef xmlDoc* htmlCtxtReadIO(xmlParserCtxt* ctxt, + xmlInputReadCallback ioread, + xmlInputCloseCallback ioclose, + void* ioctx, + char* URL, const_char* encoding, + int options) + cdef xmlDoc* htmlCtxtReadMemory(xmlParserCtxt* ctxt, + char* buffer, int size, + char* filename, const_char* encoding, + int options) diff --git a/venv/lib/python3.10/site-packages/lxml/isoschematron/__init__.py b/venv/lib/python3.10/site-packages/lxml/isoschematron/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a157a8224e7941828a9fbf8c9994bc5325ee7b05 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/isoschematron/__init__.py @@ -0,0 +1,348 @@ +"""The ``lxml.isoschematron`` package implements ISO Schematron support on top +of the pure-xslt 'skeleton' implementation. +""" + +import sys +import os.path +from lxml import etree as _etree # due to validator __init__ signature + + +# some compat stuff, borrowed from lxml.html +try: + unicode +except NameError: + # Python 3 + unicode = str +try: + basestring +except NameError: + # Python 3 + basestring = str + + +__all__ = ['extract_xsd', 'extract_rng', 'iso_dsdl_include', + 'iso_abstract_expand', 'iso_svrl_for_xslt1', + 'svrl_validation_errors', 'schematron_schema_valid', + 'stylesheet_params', 'Schematron'] + + +# some namespaces +#FIXME: Maybe lxml should provide a dedicated place for common namespace +#FIXME: definitions? +XML_SCHEMA_NS = "http://www.w3.org/2001/XMLSchema" +RELAXNG_NS = "http://relaxng.org/ns/structure/1.0" +SCHEMATRON_NS = "http://purl.oclc.org/dsdl/schematron" +SVRL_NS = "http://purl.oclc.org/dsdl/svrl" + + +# some helpers +_schematron_root = '{%s}schema' % SCHEMATRON_NS +_xml_schema_root = '{%s}schema' % XML_SCHEMA_NS +_resources_dir = os.path.join(os.path.dirname(__file__), 'resources') + + +# the iso-schematron skeleton implementation steps aka xsl transformations +extract_xsd = _etree.XSLT(_etree.parse( + os.path.join(_resources_dir, 'xsl', 'XSD2Schtrn.xsl'))) +extract_rng = _etree.XSLT(_etree.parse( + os.path.join(_resources_dir, 'xsl', 'RNG2Schtrn.xsl'))) +iso_dsdl_include = _etree.XSLT(_etree.parse( + os.path.join(_resources_dir, 'xsl', 'iso-schematron-xslt1', + 'iso_dsdl_include.xsl'))) +iso_abstract_expand = _etree.XSLT(_etree.parse( + os.path.join(_resources_dir, 'xsl', 'iso-schematron-xslt1', + 'iso_abstract_expand.xsl'))) +iso_svrl_for_xslt1 = _etree.XSLT(_etree.parse( + os.path.join(_resources_dir, + 'xsl', 'iso-schematron-xslt1', 'iso_svrl_for_xslt1.xsl'))) + + +# svrl result accessors +svrl_validation_errors = _etree.XPath( + '//svrl:failed-assert', namespaces={'svrl': SVRL_NS}) + +# RelaxNG validator for schematron schemas +schematron_schema_valid_supported = False +try: + schematron_schema_valid = _etree.RelaxNG( + file=os.path.join(_resources_dir, 'rng', 'iso-schematron.rng')) + schematron_schema_valid_supported = True +except _etree.RelaxNGParseError: + # Some distributions delete the file due to licensing issues. + def schematron_schema_valid(arg): + raise NotImplementedError("Validating the ISO schematron requires iso-schematron.rng") + + +def stylesheet_params(**kwargs): + """Convert keyword args to a dictionary of stylesheet parameters. + XSL stylesheet parameters must be XPath expressions, i.e.: + + * string expressions, like "'5'" + * simple (number) expressions, like "5" + * valid XPath expressions, like "/a/b/text()" + + This function converts native Python keyword arguments to stylesheet + parameters following these rules: + If an arg is a string wrap it with XSLT.strparam(). + If an arg is an XPath object use its path string. + If arg is None raise TypeError. + Else convert arg to string. + """ + result = {} + for key, val in kwargs.items(): + if isinstance(val, basestring): + val = _etree.XSLT.strparam(val) + elif val is None: + raise TypeError('None not allowed as a stylesheet parameter') + elif not isinstance(val, _etree.XPath): + val = unicode(val) + result[key] = val + return result + + +# helper function for use in Schematron __init__ +def _stylesheet_param_dict(paramsDict, kwargsDict): + """Return a copy of paramsDict, updated with kwargsDict entries, wrapped as + stylesheet arguments. + kwargsDict entries with a value of None are ignored. + """ + # beware of changing mutable default arg + paramsDict = dict(paramsDict) + for k, v in kwargsDict.items(): + if v is not None: # None values do not override + paramsDict[k] = v + paramsDict = stylesheet_params(**paramsDict) + return paramsDict + + +class Schematron(_etree._Validator): + """An ISO Schematron validator. + + Pass a root Element or an ElementTree to turn it into a validator. + Alternatively, pass a filename as keyword argument 'file' to parse from + the file system. + + Schematron is a less well known, but very powerful schema language. + The main idea is to use the capabilities of XPath to put restrictions on + the structure and the content of XML documents. + + The standard behaviour is to fail on ``failed-assert`` findings only + (``ASSERTS_ONLY``). To change this, you can either pass a report filter + function to the ``error_finder`` parameter (e.g. ``ASSERTS_AND_REPORTS`` + or a custom ``XPath`` object), or subclass isoschematron.Schematron for + complete control of the validation process. + + Built on the Schematron language 'reference' skeleton pure-xslt + implementation, the validator is created as an XSLT 1.0 stylesheet using + these steps: + + 0) (Extract from XML Schema or RelaxNG schema) + 1) Process inclusions + 2) Process abstract patterns + 3) Compile the schematron schema to XSLT + + The ``include`` and ``expand`` keyword arguments can be used to switch off + steps 1) and 2). + To set parameters for steps 1), 2) and 3) hand parameter dictionaries to the + keyword arguments ``include_params``, ``expand_params`` or + ``compile_params``. + For convenience, the compile-step parameter ``phase`` is also exposed as a + keyword argument ``phase``. This takes precedence if the parameter is also + given in the parameter dictionary. + + If ``store_schematron`` is set to True, the (included-and-expanded) + schematron document tree is stored and available through the ``schematron`` + property. + If ``store_xslt`` is set to True, the validation XSLT document tree will be + stored and can be retrieved through the ``validator_xslt`` property. + With ``store_report`` set to True (default: False), the resulting validation + report document gets stored and can be accessed as the ``validation_report`` + property. + + If ``validate_schema`` is set to False, the validation of the schema file + itself is disabled. Validation happens by default after building the full + schema, unless the schema validation file cannot be found at import time, + in which case the validation gets disabled. Some lxml distributions exclude + this file due to licensing issues. ISO-Schematron validation can then still + be used normally, but the schemas themselves cannot be validated. + + Here is a usage example:: + + >>> from lxml import etree + >>> from lxml.isoschematron import Schematron + + >>> schematron = Schematron(etree.XML(''' + ... + ... + ... id is the only permitted attribute name + ... + ... Attribute + ... is forbidden + ... + ... + ... + ... '''), + ... error_finder=Schematron.ASSERTS_AND_REPORTS) + + >>> xml = etree.XML(''' + ... + ... + ... + ... + ... ''') + + >>> schematron.validate(xml) + False + + >>> xml = etree.XML(''' + ... + ... + ... + ... + ... ''') + + >>> schematron.validate(xml) + True + """ + + # libxml2 error categorization for validation errors + _domain = _etree.ErrorDomains.SCHEMATRONV + _level = _etree.ErrorLevels.ERROR + _error_type = _etree.ErrorTypes.SCHEMATRONV_ASSERT + + # convenience definitions for common behaviours + ASSERTS_ONLY = svrl_validation_errors # Default + ASSERTS_AND_REPORTS = _etree.XPath( + '//svrl:failed-assert | //svrl:successful-report', + namespaces={'svrl': SVRL_NS}) + + def _extract(self, element): + """Extract embedded schematron schema from non-schematron host schema. + This method will only be called by __init__ if the given schema document + is not a schematron schema by itself. + Must return a schematron schema document tree or None. + """ + schematron = None + if element.tag == _xml_schema_root: + schematron = self._extract_xsd(element) + elif element.nsmap[element.prefix] == RELAXNG_NS: + # RelaxNG does not have a single unique root element + schematron = self._extract_rng(element) + return schematron + + # customization points + # etree.XSLT objects that provide the extract, include, expand, compile + # steps + _extract_xsd = extract_xsd + _extract_rng = extract_rng + _include = iso_dsdl_include + _expand = iso_abstract_expand + _compile = iso_svrl_for_xslt1 + + # etree.xpath object that determines input document validity when applied to + # the svrl result report; must return a list of result elements (empty if + # valid) + _validation_errors = ASSERTS_ONLY + + def __init__(self, etree=None, file=None, include=True, expand=True, + include_params={}, expand_params={}, compile_params={}, + store_schematron=False, store_xslt=False, store_report=False, + phase=None, error_finder=ASSERTS_ONLY, + validate_schema=schematron_schema_valid_supported): + super().__init__() + + self._store_report = store_report + self._schematron = None + self._validator_xslt = None + self._validation_report = None + if error_finder is not self.ASSERTS_ONLY: + self._validation_errors = error_finder + + # parse schema document, may be a schematron schema or an XML Schema or + # a RelaxNG schema with embedded schematron rules + root = None + try: + if etree is not None: + if _etree.iselement(etree): + root = etree + else: + root = etree.getroot() + elif file is not None: + root = _etree.parse(file).getroot() + except Exception: + raise _etree.SchematronParseError( + "No tree or file given: %s" % sys.exc_info()[1]) + if root is None: + raise ValueError("Empty tree") + if root.tag == _schematron_root: + schematron = root + else: + schematron = self._extract(root) + if schematron is None: + raise _etree.SchematronParseError( + "Document is not a schematron schema or schematron-extractable") + # perform the iso-schematron skeleton implementation steps to get a + # validating xslt + if include: + schematron = self._include(schematron, **include_params) + if expand: + schematron = self._expand(schematron, **expand_params) + if validate_schema and not schematron_schema_valid(schematron): + raise _etree.SchematronParseError( + "invalid schematron schema: %s" % + schematron_schema_valid.error_log) + if store_schematron: + self._schematron = schematron + # add new compile keyword args here if exposing them + compile_kwargs = {'phase': phase} + compile_params = _stylesheet_param_dict(compile_params, compile_kwargs) + validator_xslt = self._compile(schematron, **compile_params) + if store_xslt: + self._validator_xslt = validator_xslt + self._validator = _etree.XSLT(validator_xslt) + + def __call__(self, etree): + """Validate doc using Schematron. + + Returns true if document is valid, false if not. + """ + self._clear_error_log() + result = self._validator(etree) + if self._store_report: + self._validation_report = result + errors = self._validation_errors(result) + if errors: + if _etree.iselement(etree): + fname = etree.getroottree().docinfo.URL or '' + else: + fname = etree.docinfo.URL or '' + for error in errors: + # Does svrl report the line number, anywhere? Don't think so. + self._append_log_message( + domain=self._domain, type=self._error_type, + level=self._level, line=0, + message=_etree.tostring(error, encoding='unicode'), + filename=fname) + return False + return True + + @property + def schematron(self): + """ISO-schematron schema document (None if object has been initialized + with store_schematron=False). + """ + return self._schematron + + @property + def validator_xslt(self): + """ISO-schematron skeleton implementation XSLT validator document (None + if object has been initialized with store_xslt=False). + """ + return self._validator_xslt + + @property + def validation_report(self): + """ISO-schematron validation result report (None if result-storing has + been turned off). + """ + return self._validation_report diff --git a/venv/lib/python3.10/site-packages/lxml/isoschematron/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/lxml/isoschematron/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ecc93b246ff2f8f7ecc6b82c636cdeff546357f0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/lxml/isoschematron/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/lxml/isoschematron/resources/rng/iso-schematron.rng b/venv/lib/python3.10/site-packages/lxml/isoschematron/resources/rng/iso-schematron.rng new file mode 100644 index 0000000000000000000000000000000000000000..a4f504af1f7d6f01f7523d447b9304f417c01800 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/isoschematron/resources/rng/iso-schematron.rng @@ -0,0 +1,709 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ltr + rtl + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + true + + + + + + + + + + + + + + + + + + + + + + + false + + + + + + + + + + + + + + + + + + + + + + + + + + false + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + true + + + + + + + + + + + + + + + + + + + + + + + + + + + + false + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + preserve + default + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1 + + + diff --git a/venv/lib/python3.10/site-packages/lxml/isoschematron/resources/xsl/RNG2Schtrn.xsl b/venv/lib/python3.10/site-packages/lxml/isoschematron/resources/xsl/RNG2Schtrn.xsl new file mode 100644 index 0000000000000000000000000000000000000000..21a5d2a069cab9fa327d9a3cd4e4d56c21bb10db --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/isoschematron/resources/xsl/RNG2Schtrn.xsl @@ -0,0 +1,75 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/venv/lib/python3.10/site-packages/lxml/isoschematron/resources/xsl/XSD2Schtrn.xsl b/venv/lib/python3.10/site-packages/lxml/isoschematron/resources/xsl/XSD2Schtrn.xsl new file mode 100644 index 0000000000000000000000000000000000000000..de0c9ea700d20c78111660e5fe8bf4ddc5a88137 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/isoschematron/resources/xsl/XSD2Schtrn.xsl @@ -0,0 +1,77 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/venv/lib/python3.10/site-packages/lxml/isoschematron/resources/xsl/iso-schematron-xslt1/iso_abstract_expand.xsl b/venv/lib/python3.10/site-packages/lxml/isoschematron/resources/xsl/iso-schematron-xslt1/iso_abstract_expand.xsl new file mode 100644 index 0000000000000000000000000000000000000000..5018395234799dd65d53a339daaddf445a09dbea --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/isoschematron/resources/xsl/iso-schematron-xslt1/iso_abstract_expand.xsl @@ -0,0 +1,313 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Suppressed abstract pattern was here + + + + + + + Start pattern based on abstract + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/lxml/isoschematron/resources/xsl/iso-schematron-xslt1/iso_dsdl_include.xsl b/venv/lib/python3.10/site-packages/lxml/isoschematron/resources/xsl/iso-schematron-xslt1/iso_dsdl_include.xsl new file mode 100644 index 0000000000000000000000000000000000000000..44e5573b73077e015d404935479bdc9344c5ea4d --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/isoschematron/resources/xsl/iso-schematron-xslt1/iso_dsdl_include.xsl @@ -0,0 +1,1160 @@ + + + + + + + + + + true + true + true + true + true + true + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Error: Impossible URL in RELAX NG extRef + include + + + + + + + + + + + + + + Unable to open referenced included file: + + + + + + + + + Unable to locate id attribute: + + + + + + + + + + + + + Unable to open referenced included file: + + + + + + + Unable to locate id attribute: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Error: Impossible URL in Schematron include + + + + + + + + + + + + + + + + + + + Unable to open referenced included file: + + + + + + + + + + + + + Unable to locate id attribute: + + + + + + + + + + + Schema error: Use include to + include fragments, not a whole + schema + + + + + + + + + + + + + + + + + + + + Unable to open referenced included file: + + + + + + + + + + Unable to locate id attribute: + + + + + + + + + + Schema error: Use include to include + fragments, not a whole schema + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Error: Impossible URL in Schematron include + + + + + + + + + + + + + + + + + + + Unable to open referenced included file: + + + + + + + + + + + + + Unable to locate id attribute: + + + + + + + + + + + + + + + + + + + + + + + + + + + + Unable to open referenced included file: + + + + + + + + + + Unable to locate id attribute: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Error: Impossible URL in Schematron include + + + + + + + + + + + + + + Unable to open referenced included file: + + + + + + + + + Schema error: Use include to include + fragments, not a whole schema + + + + + Unable to locate id attribute: + + + + + + + + + + + + + + + + Unable to open referenced included file: + + + + + + + Schema error: Use include to include + fragments, not a whole schema + + + + + Unable to locate id attribute: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Error: Impossible URL in DTLL include + + + + + + + + + + + + + Unable to open referenced included file: + + + + + + + + + Unable to locate id attribute: + + + + + + + + + + + + + + Unable to open referenced included file: + + + + + + + Unable to locate id attribute: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Error: Impossible URL in CRDL include + + + + + + + + + + + + + + Unable to open referenced included file: + + + + + + + + + + Unable to locate id attribute: + + + + + + + + + + + + + + Unable to open referenced included file: + + + + + + Unable to locate id attribute: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Fatal error: Xinclude href contains fragment + identifier # + + + + + + + Fatal error: Sorry, this software only + supports simple ids in XInclude xpointers + + + + + + + Fatal Error: Impossible URL in XInclude + include + + + + + + + + + + + + + + + + + + + + + + + + + + + Unable to open referenced included file and fallback + file: + + + + + + + Unable to open referenced included file: + + + + + + + + + + + + + + + + Unable to open referenced included file: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Error: Impossible URL in XLink embedding + link + + + + + + + + + + + + + Unable to open referenced included file: + + + + + + + + + Unable to locate id attribute: + + + + + + + + + + + + + + Unable to open referenced included file: + + + + + + + Unable to locate id attribute: + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/lxml/isoschematron/resources/xsl/iso-schematron-xslt1/iso_schematron_message.xsl b/venv/lib/python3.10/site-packages/lxml/isoschematron/resources/xsl/iso-schematron-xslt1/iso_schematron_message.xsl new file mode 100644 index 0000000000000000000000000000000000000000..d59b8f38fe0bf28bc089b033c2acdd314839b8cc --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/isoschematron/resources/xsl/iso-schematron-xslt1/iso_schematron_message.xsl @@ -0,0 +1,55 @@ + + + + + + + + + + + + + + + + + + + + + + + ( + / + ) + + + \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/lxml/isoschematron/resources/xsl/iso-schematron-xslt1/iso_schematron_skeleton_for_xslt1.xsl b/venv/lib/python3.10/site-packages/lxml/isoschematron/resources/xsl/iso-schematron-xslt1/iso_schematron_skeleton_for_xslt1.xsl new file mode 100644 index 0000000000000000000000000000000000000000..b0e7175cfff34fb05d631622c9429f0adcda8d5d --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/isoschematron/resources/xsl/iso-schematron-xslt1/iso_schematron_skeleton_for_xslt1.xsl @@ -0,0 +1,1796 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #ALL + + + +false + +true + + + + + true + false + + + + + + + true + false + + + + + + + + + @*| + + * + node() + *|comment()|processing-instruction() + + + + + + + + + + + + +default + +false + + + +1 + + + + + Schema error: Schematron elements in old and new namespaces found + + + + + + + + + + + + + + + + + Schema error: in the queryBinding attribute, use 'xslt' + + + + + 1.0 + + + + + + + + + This XSLT was automatically generated from a Schematron schema. + + + + + 1.0 + + + + + + + + + + Fail: This implementation of ISO Schematron does not work with + schemas using the "" query language. + + + + + Implementers: please note that overriding process-prolog or process-root is + the preferred method for meta-stylesheets to use where possible. + + + + + + + + + + PHASES + + PROLOG + + KEYS + + DEFAULT RULES + + SCHEMA METADATA + + SCHEMATRON PATTERNS + + + + + + + + + + + + + + + + + + + + + + + Phase Error: no phase with name has been defined. + + + + + + + MODE: SCHEMATRON-SELECT-FULL-PATH + This mode can be used to generate an ugly though full XPath for locators + + + + + + + + + + + + + + + + + + + + + + + + + MODE: SCHEMATRON-FULL-PATH + This mode can be used to generate an ugly though full XPath for locators + + + + + + / + + + + + + [] + + + + *[local-name()=' + ' and namespace-uri()=' + + '] + + + [] + + + + + + + + + + / + + @ + + @*[local-name()=' + + ' and namespace-uri()=' + + '] + + + + + + + + + MODE: SCHEMATRON-FULL-PATH-2 + + This mode can be used to generate prefixed XPath for humans + + + + + + / + + + [ + + ] + + + + + /@ + + + + + MODE: GENERATE-ID-FROM-PATH + + + + + + + + + + + + + + + + + + + + + + . + + + + + + + MODE: SCHEMATRON-FULL-PATH-3 + + + This mode can be used to generate prefixed XPath for humans + (Top-level element has index) + + + + + + / + + + [ + + ] + + + + + /@ + + + + + MODE: GENERATE-ID-2 + + + U + + + U + + + + + U. + + n + + + + + U. + + _ + + _ + + + + + Strip characters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Markup Error: no pattern attribute in <active> + + + + Reference Error: the pattern "" has been activated but is not declared + + + + + + + + Markup Error: no test attribute in <assert + + + ASSERT + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Markup Error: no test attribute in <report> + + + + REPORT + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Markup Error: no id attribute in <diagnostic> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Markup Error: no rule attribute in <extends> + + + Reference Error: the abstract rule "" has been referenced but is not declared + + + + + + + + + + + + + + Markup Error: no name attribute in <key> + + + Markup Error: no path or use attribute in <key> + + + + + + + + + + + + + + + + Markup Error: no path or use attribute in <key> + + + + + + + + + + + + Schema error: The key element is not in the ISO Schematron namespace. Use the XSLT namespace. + + + + + + + + Schema error: Empty href= attribute for include directive. + + + + + + + + + + + + + + Error: Impossible URL in Schematron include + + + + + + + Schema error: Use include to include fragments, not a whole schema + + + + + + + + + + Schema error: Use include to include fragments, not a whole schema + + + + + + + + + + + + + + + Error: Impossible URL in Schematron include + + + + + + + Schema error: Use include to include fragments, not a whole schema + + + + + + + + + + + Schema error: Use include to include fragments, not a whole schema + + + + + + + + + + Warning: Variables should not be used with the "xpath" query language binding. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Markup Error: no uri attribute in <ns> + + + Markup Error: no prefix attribute in <ns> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + //( + + ( + + ) + | + + ) + [not(self::text())] + + + + + + + + + + + + + Schema implementation error: This schema has abstract patterns, yet they are supposed to be preprocessed out already + + + + + + + + + + PATTERN + + + + + + + + + + + + + + + + + + + + Markup Error: no id attribute in <phase> + + + + + + + + Markup Error: no context attribute in <rule> + + + RULE + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Markup Error: no id attribute on abstract <rule> + + + Markup Error: (2) context attribute on abstract <rule> + + + + + + Markup Error: context attribute on abstract <rule> + + + + + + + + + + + + + + + + + + + + + + + + + + + Markup Error: no select attribute in <value-of> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Warning: + + must not contain any child elements + + + + + + + + + + + + + + + + + + + + + + + + + Reference error: A diagnostic "" has been referenced but is not declared + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Using the XSLT namespace with a prefix other than "xsl" in + Schematron rules is not supported + in this processor: + + + + + + + + + + + + + + + + + + + + Error: unrecognized element in ISO Schematron namespace: check spelling + and capitalization + + + + + + + + + + + + + Warning: unrecognized element + + + + + + + + + + + + + + + Warning: unrecognized element + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + title + + + + + + + schema-title + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/venv/lib/python3.10/site-packages/lxml/isoschematron/resources/xsl/iso-schematron-xslt1/iso_svrl_for_xslt1.xsl b/venv/lib/python3.10/site-packages/lxml/isoschematron/resources/xsl/iso-schematron-xslt1/iso_svrl_for_xslt1.xsl new file mode 100644 index 0000000000000000000000000000000000000000..dae74ff6a2bd56a4f21147905c3a1661baf0b3ab --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/isoschematron/resources/xsl/iso-schematron-xslt1/iso_svrl_for_xslt1.xsl @@ -0,0 +1,588 @@ + + + + + + + + + + + + + + + + +true + + + + + + + + + + + #ALL + + +false +true +true + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + xslt1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +   +   +   + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/venv/lib/python3.10/site-packages/lxml/isoschematron/resources/xsl/iso-schematron-xslt1/readme.txt b/venv/lib/python3.10/site-packages/lxml/isoschematron/resources/xsl/iso-schematron-xslt1/readme.txt new file mode 100644 index 0000000000000000000000000000000000000000..e5d6dfcd9e9c2787c32d98b0063a1fa1ec3236ad --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/isoschematron/resources/xsl/iso-schematron-xslt1/readme.txt @@ -0,0 +1,84 @@ +ISO SCHEMATRON 2010 + +XSLT implementation by Rick Jelliffe with assistance from members of Schematron-love-in maillist. + +2010-04-21 + +Two distributions are available. One is for XSLT1 engines. +The other is for XSLT2 engines, such as SAXON 9. + + +This version of Schematron splits the process into a pipeline of several different XSLT stages. + +1) First, preprocess your Schematron schema with iso_dsdl_include.xsl. +This is a macro processor to assemble the schema from various parts. +If your schema is not in separate parts, you can skip this stage. +This stage also generates error messages for some common XPath syntax problems. + +2) Second, preprocess the output from stage 1 with iso_abstract_expand.xsl. +This is a macro processor to convert abstract patterns to real patterns. +If your schema does not use abstract patterns, you can skip this +stage. + +3) Third, compile the Schematron schema into an XSLT script. +This will typically use iso_svrl_for_xslt1.xsl or iso_svrl_for_xslt2.xsl +(which in turn invoke iso_schematron_skeleton_for_xslt1.xsl or iso_schematron_skeleton_for_saxon.xsl) +However, other "meta-stylesheets" are also in common use; the principle of operation is the same. +If your schema uses Schematron phases, supply these as command line/invocation parameters +to this process. + +4) Fourth, run the script generated by stage 3 against the document being validated. +If you are using the SVRL script, then the output of validation will be an XML document. +If your schema uses Schematron parameters, supply these as command line/invocation parameters +to this process. + + +The XSLT2 distribution also features several next generation features, +such as validating multiple documents. See the source code for details. + +Schematron assertions can be written in any language, of course; the file +sch-messages-en.xhtml contains the diagnostics messages from the XSLT2 skeleton +in English, and this can be used as template to localize the skeleton's +error messages. Note that typically programming errors in Schematron are XPath +errors, which requires localized messages from the XSLT engine. + +ANT +--- +To give an example of how to process a document, here is a sample ANT task. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/venv/lib/python3.10/site-packages/lxml/iterparse.pxi b/venv/lib/python3.10/site-packages/lxml/iterparse.pxi new file mode 100644 index 0000000000000000000000000000000000000000..f569b865ee980c16c7c8a679ae0f5475745817cc --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/iterparse.pxi @@ -0,0 +1,438 @@ +# iterparse -- event-driven parsing + +DEF __ITERPARSE_CHUNK_SIZE = 32768 + +cdef class iterparse: + """iterparse(self, source, events=("end",), tag=None, \ + attribute_defaults=False, dtd_validation=False, \ + load_dtd=False, no_network=True, remove_blank_text=False, \ + remove_comments=False, remove_pis=False, encoding=None, \ + html=False, recover=None, huge_tree=False, schema=None) + + Incremental parser. + + Parses XML into a tree and generates tuples (event, element) in a + SAX-like fashion. ``event`` is any of 'start', 'end', 'start-ns', + 'end-ns'. + + For 'start' and 'end', ``element`` is the Element that the parser just + found opening or closing. For 'start-ns', it is a tuple (prefix, URI) of + a new namespace declaration. For 'end-ns', it is simply None. Note that + all start and end events are guaranteed to be properly nested. + + The keyword argument ``events`` specifies a sequence of event type names + that should be generated. By default, only 'end' events will be + generated. + + The additional ``tag`` argument restricts the 'start' and 'end' events to + those elements that match the given tag. The ``tag`` argument can also be + a sequence of tags to allow matching more than one tag. By default, + events are generated for all elements. Note that the 'start-ns' and + 'end-ns' events are not impacted by this restriction. + + The other keyword arguments in the constructor are mainly based on the + libxml2 parser configuration. A DTD will also be loaded if validation or + attribute default values are requested. + + Available boolean keyword arguments: + - attribute_defaults: read default attributes from DTD + - dtd_validation: validate (if DTD is available) + - load_dtd: use DTD for parsing + - no_network: prevent network access for related files + - remove_blank_text: discard blank text nodes + - remove_comments: discard comments + - remove_pis: discard processing instructions + - strip_cdata: replace CDATA sections by normal text content (default: True) + - compact: safe memory for short text content (default: True) + - resolve_entities: replace entities by their text value (default: True) + - huge_tree: disable security restrictions and support very deep trees + and very long text content (only affects libxml2 2.7+) + - html: parse input as HTML (default: XML) + - recover: try hard to parse through broken input (default: True for HTML, + False otherwise) + + Other keyword arguments: + - encoding: override the document encoding + - schema: an XMLSchema to validate against + """ + cdef _FeedParser _parser + cdef object _tag + cdef object _events + cdef readonly object root + cdef object _source + cdef object _filename + cdef object _error + cdef bint _close_source_after_read + + def __init__(self, source, events=("end",), *, tag=None, + attribute_defaults=False, dtd_validation=False, + load_dtd=False, no_network=True, remove_blank_text=False, + compact=True, resolve_entities=True, remove_comments=False, + remove_pis=False, strip_cdata=True, encoding=None, + html=False, recover=None, huge_tree=False, collect_ids=True, + XMLSchema schema=None): + if not hasattr(source, 'read'): + source = _getFSPathOrObject(source) + self._filename = source + self._source = open(source, 'rb') + self._close_source_after_read = True + else: + self._filename = _getFilenameForFile(source) + self._source = source + self._close_source_after_read = False + + if recover is None: + recover = html + + if html: + # make sure we're not looking for namespaces + events = [event for event in events + if event not in ('start-ns', 'end-ns')] + parser = HTMLPullParser( + events, + tag=tag, + recover=recover, + base_url=self._filename, + encoding=encoding, + remove_blank_text=remove_blank_text, + remove_comments=remove_comments, + remove_pis=remove_pis, + strip_cdata=strip_cdata, + no_network=no_network, + target=None, # TODO + schema=schema, + compact=compact) + else: + parser = XMLPullParser( + events, + tag=tag, + recover=recover, + base_url=self._filename, + encoding=encoding, + attribute_defaults=attribute_defaults, + dtd_validation=dtd_validation, + load_dtd=load_dtd, + no_network=no_network, + schema=schema, + huge_tree=huge_tree, + remove_blank_text=remove_blank_text, + resolve_entities=resolve_entities, + remove_comments=remove_comments, + remove_pis=remove_pis, + strip_cdata=strip_cdata, + collect_ids=True, + target=None, # TODO + compact=compact) + + self._events = parser.read_events() + self._parser = parser + + @property + def error_log(self): + """The error log of the last (or current) parser run. + """ + return self._parser.feed_error_log + + @property + def resolvers(self): + """The custom resolver registry of the last (or current) parser run. + """ + return self._parser.resolvers + + @property + def version(self): + """The version of the underlying XML parser.""" + return self._parser.version + + def set_element_class_lookup(self, ElementClassLookup lookup = None): + """set_element_class_lookup(self, lookup = None) + + Set a lookup scheme for element classes generated from this parser. + + Reset it by passing None or nothing. + """ + self._parser.set_element_class_lookup(lookup) + + def makeelement(self, _tag, attrib=None, nsmap=None, **_extra): + """makeelement(self, _tag, attrib=None, nsmap=None, **_extra) + + Creates a new element associated with this parser. + """ + self._parser.makeelement( + _tag, attrib=None, nsmap=None, **_extra) + + @cython.final + cdef _close_source(self): + if self._source is None: + return + if not self._close_source_after_read: + self._source = None + return + try: + close = self._source.close + except AttributeError: + close = None + finally: + self._source = None + if close is not None: + close() + + def __iter__(self): + return self + + def __next__(self): + try: + return next(self._events) + except StopIteration: + pass + context = <_SaxParserContext>self._parser._getPushParserContext() + if self._source is not None: + done = False + while not done: + try: + done = self._read_more_events(context) + return next(self._events) + except StopIteration: + pass # no events yet + except Exception as e: + self._error = e + self._close_source() + try: + return next(self._events) + except StopIteration: + break + # nothing left to read or return + if self._error is not None: + error = self._error + self._error = None + raise error + if (context._validator is not None + and not context._validator.isvalid()): + _raiseParseError(context._c_ctxt, self._filename, + context._error_log) + # no errors => all done + raise StopIteration + + @cython.final + cdef bint _read_more_events(self, _SaxParserContext context) except -123: + data = self._source.read(__ITERPARSE_CHUNK_SIZE) + if not isinstance(data, bytes): + self._close_source() + raise TypeError("reading file objects must return bytes objects") + if not data: + try: + self.root = self._parser.close() + finally: + self._close_source() + return True + self._parser.feed(data) + return False + + +cdef enum _IterwalkSkipStates: + IWSKIP_NEXT_IS_START + IWSKIP_SKIP_NEXT + IWSKIP_CAN_SKIP + IWSKIP_CANNOT_SKIP + + +cdef class iterwalk: + """iterwalk(self, element_or_tree, events=("end",), tag=None) + + A tree walker that generates events from an existing tree as if it + was parsing XML data with ``iterparse()``. + + Just as for ``iterparse()``, the ``tag`` argument can be a single tag or a + sequence of tags. + + After receiving a 'start' or 'start-ns' event, the children and + descendants of the current element can be excluded from iteration + by calling the ``skip_subtree()`` method. + """ + cdef _MultiTagMatcher _matcher + cdef list _node_stack + cdef list _events + cdef object _pop_event + cdef object _include_siblings + cdef int _index + cdef int _event_filter + cdef _IterwalkSkipStates _skip_state + + def __init__(self, element_or_tree, events=("end",), tag=None): + cdef _Element root + cdef int ns_count + root = _rootNodeOrRaise(element_or_tree) + self._event_filter = _buildParseEventFilter(events) + if tag is None or tag == '*': + self._matcher = None + else: + self._matcher = _MultiTagMatcher.__new__(_MultiTagMatcher, tag) + self._node_stack = [] + self._events = [] + self._pop_event = self._events.pop + self._skip_state = IWSKIP_CANNOT_SKIP # ignore all skip requests by default + + if self._event_filter: + self._index = 0 + if self._matcher is not None and self._event_filter & PARSE_EVENT_FILTER_START: + self._matcher.cacheTags(root._doc) + + # When processing an ElementTree, add events for the preceding comments/PIs. + if self._event_filter & (PARSE_EVENT_FILTER_COMMENT | PARSE_EVENT_FILTER_PI): + if isinstance(element_or_tree, _ElementTree): + self._include_siblings = root + for elem in list(root.itersiblings(preceding=True))[::-1]: + if self._event_filter & PARSE_EVENT_FILTER_COMMENT and elem.tag is Comment: + self._events.append(('comment', elem)) + elif self._event_filter & PARSE_EVENT_FILTER_PI and elem.tag is PI: + self._events.append(('pi', elem)) + + ns_count = self._start_node(root) + self._node_stack.append( (root, ns_count) ) + else: + self._index = -1 + + def __iter__(self): + return self + + def __next__(self): + cdef xmlNode* c_child + cdef _Element node + cdef _Element next_node + cdef int ns_count = 0 + if self._events: + return self._next_event() + if self._matcher is not None and self._index >= 0: + node = self._node_stack[self._index][0] + self._matcher.cacheTags(node._doc) + + # find next node + while self._index >= 0: + node = self._node_stack[self._index][0] + + if self._skip_state == IWSKIP_SKIP_NEXT: + c_child = NULL + else: + c_child = self._process_non_elements( + node._doc, _findChildForwards(node._c_node, 0)) + self._skip_state = IWSKIP_CANNOT_SKIP + + while c_child is NULL: + # back off through parents + self._index -= 1 + node = self._end_node() + if self._index < 0: + break + c_child = self._process_non_elements( + node._doc, _nextElement(node._c_node)) + + if c_child is not NULL: + next_node = _elementFactory(node._doc, c_child) + if self._event_filter & (PARSE_EVENT_FILTER_START | + PARSE_EVENT_FILTER_START_NS): + ns_count = self._start_node(next_node) + elif self._event_filter & PARSE_EVENT_FILTER_END_NS: + ns_count = _countNsDefs(next_node._c_node) + self._node_stack.append( (next_node, ns_count) ) + self._index += 1 + if self._events: + return self._next_event() + + if self._include_siblings is not None: + node, self._include_siblings = self._include_siblings, None + self._process_non_elements(node._doc, _nextElement(node._c_node)) + if self._events: + return self._next_event() + + raise StopIteration + + @cython.final + cdef xmlNode* _process_non_elements(self, _Document doc, xmlNode* c_node): + while c_node is not NULL and c_node.type != tree.XML_ELEMENT_NODE: + if c_node.type == tree.XML_COMMENT_NODE: + if self._event_filter & PARSE_EVENT_FILTER_COMMENT: + self._events.append( + ("comment", _elementFactory(doc, c_node))) + c_node = _nextElement(c_node) + elif c_node.type == tree.XML_PI_NODE: + if self._event_filter & PARSE_EVENT_FILTER_PI: + self._events.append( + ("pi", _elementFactory(doc, c_node))) + c_node = _nextElement(c_node) + else: + break + return c_node + + @cython.final + cdef _next_event(self): + if self._skip_state == IWSKIP_NEXT_IS_START: + if self._events[0][0] in ('start', 'start-ns'): + self._skip_state = IWSKIP_CAN_SKIP + return self._pop_event(0) + + def skip_subtree(self): + """Prevent descending into the current subtree. + Instead, the next returned event will be the 'end' event of the current element + (if included), ignoring any children or descendants. + + This has no effect right after an 'end' or 'end-ns' event. + """ + if self._skip_state == IWSKIP_CAN_SKIP: + self._skip_state = IWSKIP_SKIP_NEXT + + @cython.final + cdef int _start_node(self, _Element node) except -1: + cdef int ns_count + if self._event_filter & PARSE_EVENT_FILTER_START_NS: + ns_count = _appendStartNsEvents(node._c_node, self._events) + if self._events: + self._skip_state = IWSKIP_NEXT_IS_START + elif self._event_filter & PARSE_EVENT_FILTER_END_NS: + ns_count = _countNsDefs(node._c_node) + else: + ns_count = 0 + if self._event_filter & PARSE_EVENT_FILTER_START: + if self._matcher is None or self._matcher.matches(node._c_node): + self._events.append( ("start", node) ) + self._skip_state = IWSKIP_NEXT_IS_START + return ns_count + + @cython.final + cdef _Element _end_node(self): + cdef _Element node + cdef int i, ns_count + node, ns_count = self._node_stack.pop() + if self._event_filter & PARSE_EVENT_FILTER_END: + if self._matcher is None or self._matcher.matches(node._c_node): + self._events.append( ("end", node) ) + if self._event_filter & PARSE_EVENT_FILTER_END_NS and ns_count: + event = ("end-ns", None) + for i in range(ns_count): + self._events.append(event) + return node + + +cdef int _countNsDefs(xmlNode* c_node) noexcept: + cdef xmlNs* c_ns + cdef int count + count = 0 + c_ns = c_node.nsDef + while c_ns is not NULL: + count += (c_ns.href is not NULL) + c_ns = c_ns.next + return count + + +cdef int _appendStartNsEvents(xmlNode* c_node, list event_list) except -1: + cdef xmlNs* c_ns + cdef int count + count = 0 + c_ns = c_node.nsDef + while c_ns is not NULL: + if c_ns.href: + ns_tuple = (funicodeOrEmpty(c_ns.prefix), + funicode(c_ns.href)) + event_list.append( ("start-ns", ns_tuple) ) + count += 1 + c_ns = c_ns.next + return count diff --git a/venv/lib/python3.10/site-packages/lxml/lxml.etree.h b/venv/lib/python3.10/site-packages/lxml/lxml.etree.h new file mode 100644 index 0000000000000000000000000000000000000000..5ffc7ba32670f056a6415ab60ffb8240fb6d4a28 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/lxml.etree.h @@ -0,0 +1,248 @@ +/* Generated by Cython 3.0.10 */ + +#ifndef __PYX_HAVE__lxml__etree +#define __PYX_HAVE__lxml__etree + +#include "Python.h" +struct LxmlDocument; +struct LxmlElement; +struct LxmlElementTree; +struct LxmlElementTagMatcher; +struct LxmlElementIterator; +struct LxmlElementBase; +struct LxmlElementClassLookup; +struct LxmlFallbackElementClassLookup; + +/* "lxml/etree.pyx":333 + * + * # type of a function that steps from node to node + * ctypedef public xmlNode* (*_node_to_node_function)(xmlNode*) # <<<<<<<<<<<<<< + * + * + */ +typedef xmlNode *(*_node_to_node_function)(xmlNode *); + +/* "lxml/etree.pyx":349 + * @cython.final + * @cython.freelist(8) + * cdef public class _Document [ type LxmlDocumentType, object LxmlDocument ]: # <<<<<<<<<<<<<< + * """Internal base class to reference a libxml document. + * + */ +struct LxmlDocument { + PyObject_HEAD + struct __pyx_vtabstruct_4lxml_5etree__Document *__pyx_vtab; + int _ns_counter; + PyObject *_prefix_tail; + xmlDoc *_c_doc; + struct __pyx_obj_4lxml_5etree__BaseParser *_parser; +}; + +/* "lxml/etree.pyx":698 + * + * @cython.no_gc_clear + * cdef public class _Element [ type LxmlElementType, object LxmlElement ]: # <<<<<<<<<<<<<< + * """Element class. + * + */ +struct LxmlElement { + PyObject_HEAD + struct LxmlDocument *_doc; + xmlNode *_c_node; + PyObject *_tag; +}; + +/* "lxml/etree.pyx":1872 + * + * + * cdef public class _ElementTree [ type LxmlElementTreeType, # <<<<<<<<<<<<<< + * object LxmlElementTree ]: + * cdef _Document _doc + */ +struct LxmlElementTree { + PyObject_HEAD + struct __pyx_vtabstruct_4lxml_5etree__ElementTree *__pyx_vtab; + struct LxmlDocument *_doc; + struct LxmlElement *_context_node; +}; + +/* "lxml/etree.pyx":2646 + * + * + * cdef public class _ElementTagMatcher [ object LxmlElementTagMatcher, # <<<<<<<<<<<<<< + * type LxmlElementTagMatcherType ]: + * """ + */ +struct LxmlElementTagMatcher { + PyObject_HEAD + struct __pyx_vtabstruct_4lxml_5etree__ElementTagMatcher *__pyx_vtab; + PyObject *_pystrings; + int _node_type; + char *_href; + char *_name; +}; + +/* "lxml/etree.pyx":2677 + * self._name = NULL + * + * cdef public class _ElementIterator(_ElementTagMatcher) [ # <<<<<<<<<<<<<< + * object LxmlElementIterator, type LxmlElementIteratorType ]: + * """ + */ +struct LxmlElementIterator { + struct LxmlElementTagMatcher __pyx_base; + struct LxmlElement *_node; + _node_to_node_function _next_element; +}; + +/* "src/lxml/classlookup.pxi":6 + * # Custom Element classes + * + * cdef public class ElementBase(_Element) [ type LxmlElementBaseType, # <<<<<<<<<<<<<< + * object LxmlElementBase ]: + * """ElementBase(*children, attrib=None, nsmap=None, **_extra) + */ +struct LxmlElementBase { + struct LxmlElement __pyx_base; +}; + +/* "src/lxml/classlookup.pxi":210 + * # Element class lookup + * + * ctypedef public object (*_element_class_lookup_function)(object, _Document, xmlNode*) # <<<<<<<<<<<<<< + * + * # class to store element class lookup functions + */ +typedef PyObject *(*_element_class_lookup_function)(PyObject *, struct LxmlDocument *, xmlNode *); + +/* "src/lxml/classlookup.pxi":213 + * + * # class to store element class lookup functions + * cdef public class ElementClassLookup [ type LxmlElementClassLookupType, # <<<<<<<<<<<<<< + * object LxmlElementClassLookup ]: + * """ElementClassLookup(self) + */ +struct LxmlElementClassLookup { + PyObject_HEAD + _element_class_lookup_function _lookup_function; +}; + +/* "src/lxml/classlookup.pxi":221 + * + * + * cdef public class FallbackElementClassLookup(ElementClassLookup) \ # <<<<<<<<<<<<<< + * [ type LxmlFallbackElementClassLookupType, + * object LxmlFallbackElementClassLookup ]: + */ +struct LxmlFallbackElementClassLookup { + struct LxmlElementClassLookup __pyx_base; + struct __pyx_vtabstruct_4lxml_5etree_FallbackElementClassLookup *__pyx_vtab; + struct LxmlElementClassLookup *fallback; + _element_class_lookup_function _fallback_function; +}; + +#ifndef __PYX_HAVE_API__lxml__etree + +#ifdef CYTHON_EXTERN_C + #undef __PYX_EXTERN_C + #define __PYX_EXTERN_C CYTHON_EXTERN_C +#elif defined(__PYX_EXTERN_C) + #ifdef _MSC_VER + #pragma message ("Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead.") + #else + #warning Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead. + #endif +#else + #ifdef __cplusplus + #define __PYX_EXTERN_C extern "C" + #else + #define __PYX_EXTERN_C extern + #endif +#endif + +#ifndef DL_IMPORT + #define DL_IMPORT(_T) _T +#endif + +__PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlDocumentType; +__PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlElementType; +__PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlElementTreeType; +__PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlElementTagMatcherType; +__PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlElementIteratorType; +__PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlElementBaseType; +__PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlElementClassLookupType; +__PYX_EXTERN_C DL_IMPORT(PyTypeObject) LxmlFallbackElementClassLookupType; + +__PYX_EXTERN_C struct LxmlElement *deepcopyNodeToDocument(struct LxmlDocument *, xmlNode *); +__PYX_EXTERN_C struct LxmlElementTree *elementTreeFactory(struct LxmlElement *); +__PYX_EXTERN_C struct LxmlElementTree *newElementTree(struct LxmlElement *, PyObject *); +__PYX_EXTERN_C struct LxmlElementTree *adoptExternalDocument(xmlDoc *, PyObject *, int); +__PYX_EXTERN_C struct LxmlElement *elementFactory(struct LxmlDocument *, xmlNode *); +__PYX_EXTERN_C struct LxmlElement *makeElement(PyObject *, struct LxmlDocument *, PyObject *, PyObject *, PyObject *, PyObject *, PyObject *); +__PYX_EXTERN_C struct LxmlElement *makeSubElement(struct LxmlElement *, PyObject *, PyObject *, PyObject *, PyObject *, PyObject *); +__PYX_EXTERN_C void setElementClassLookupFunction(_element_class_lookup_function, PyObject *); +__PYX_EXTERN_C PyObject *lookupDefaultElementClass(PyObject *, PyObject *, xmlNode *); +__PYX_EXTERN_C PyObject *lookupNamespaceElementClass(PyObject *, PyObject *, xmlNode *); +__PYX_EXTERN_C PyObject *callLookupFallback(struct LxmlFallbackElementClassLookup *, struct LxmlDocument *, xmlNode *); +__PYX_EXTERN_C int tagMatches(xmlNode *, const xmlChar *, const xmlChar *); +__PYX_EXTERN_C struct LxmlDocument *documentOrRaise(PyObject *); +__PYX_EXTERN_C struct LxmlElement *rootNodeOrRaise(PyObject *); +__PYX_EXTERN_C int hasText(xmlNode *); +__PYX_EXTERN_C int hasTail(xmlNode *); +__PYX_EXTERN_C PyObject *textOf(xmlNode *); +__PYX_EXTERN_C PyObject *tailOf(xmlNode *); +__PYX_EXTERN_C int setNodeText(xmlNode *, PyObject *); +__PYX_EXTERN_C int setTailText(xmlNode *, PyObject *); +__PYX_EXTERN_C PyObject *attributeValue(xmlNode *, xmlAttr *); +__PYX_EXTERN_C PyObject *attributeValueFromNsName(xmlNode *, const xmlChar *, const xmlChar *); +__PYX_EXTERN_C PyObject *getAttributeValue(struct LxmlElement *, PyObject *, PyObject *); +__PYX_EXTERN_C PyObject *iterattributes(struct LxmlElement *, int); +__PYX_EXTERN_C PyObject *collectAttributes(xmlNode *, int); +__PYX_EXTERN_C int setAttributeValue(struct LxmlElement *, PyObject *, PyObject *); +__PYX_EXTERN_C int delAttribute(struct LxmlElement *, PyObject *); +__PYX_EXTERN_C int delAttributeFromNsName(xmlNode *, const xmlChar *, const xmlChar *); +__PYX_EXTERN_C int hasChild(xmlNode *); +__PYX_EXTERN_C xmlNode *findChild(xmlNode *, Py_ssize_t); +__PYX_EXTERN_C xmlNode *findChildForwards(xmlNode *, Py_ssize_t); +__PYX_EXTERN_C xmlNode *findChildBackwards(xmlNode *, Py_ssize_t); +__PYX_EXTERN_C xmlNode *nextElement(xmlNode *); +__PYX_EXTERN_C xmlNode *previousElement(xmlNode *); +__PYX_EXTERN_C void appendChild(struct LxmlElement *, struct LxmlElement *); +__PYX_EXTERN_C int appendChildToElement(struct LxmlElement *, struct LxmlElement *); +__PYX_EXTERN_C PyObject *pyunicode(const xmlChar *); +__PYX_EXTERN_C PyObject *utf8(PyObject *); +__PYX_EXTERN_C PyObject *getNsTag(PyObject *); +__PYX_EXTERN_C PyObject *getNsTagWithEmptyNs(PyObject *); +__PYX_EXTERN_C PyObject *namespacedName(xmlNode *); +__PYX_EXTERN_C PyObject *namespacedNameFromNsName(const xmlChar *, const xmlChar *); +__PYX_EXTERN_C void iteratorStoreNext(struct LxmlElementIterator *, struct LxmlElement *); +__PYX_EXTERN_C void initTagMatch(struct LxmlElementTagMatcher *, PyObject *); +__PYX_EXTERN_C xmlNs *findOrBuildNodeNsPrefix(struct LxmlDocument *, xmlNode *, const xmlChar *, const xmlChar *); + +#endif /* !__PYX_HAVE_API__lxml__etree */ + +/* WARNING: the interface of the module init function changed in CPython 3.5. */ +/* It now returns a PyModuleDef instance instead of a PyModule instance. */ + +#if PY_MAJOR_VERSION < 3 +PyMODINIT_FUNC initetree(void); +#else +/* WARNING: Use PyImport_AppendInittab("etree", PyInit_etree) instead of calling PyInit_etree directly from Python 3.5 */ +PyMODINIT_FUNC PyInit_etree(void); + +#if PY_VERSION_HEX >= 0x03050000 && (defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER) || (defined(__cplusplus) && __cplusplus >= 201402L)) +#if defined(__cplusplus) && __cplusplus >= 201402L +[[deprecated("Use PyImport_AppendInittab(\"etree\", PyInit_etree) instead of calling PyInit_etree directly.")]] inline +#elif defined(__GNUC__) || defined(__clang__) +__attribute__ ((__deprecated__("Use PyImport_AppendInittab(\"etree\", PyInit_etree) instead of calling PyInit_etree directly."), __unused__)) __inline__ +#elif defined(_MSC_VER) +__declspec(deprecated("Use PyImport_AppendInittab(\"etree\", PyInit_etree) instead of calling PyInit_etree directly.")) __inline +#endif +static PyObject* __PYX_WARN_IF_PyInit_etree_INIT_CALLED(PyObject* res) { + return res; +} +#define PyInit_etree() __PYX_WARN_IF_PyInit_etree_INIT_CALLED(PyInit_etree()) +#endif +#endif + +#endif /* !__PYX_HAVE__lxml__etree */ diff --git a/venv/lib/python3.10/site-packages/lxml/lxml.etree_api.h b/venv/lib/python3.10/site-packages/lxml/lxml.etree_api.h new file mode 100644 index 0000000000000000000000000000000000000000..5efcb431f45d1cb937ce5a9fbf047b6e339fd72d --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/lxml.etree_api.h @@ -0,0 +1,195 @@ +/* Generated by Cython 3.0.10 */ + +#ifndef __PYX_HAVE_API__lxml__etree +#define __PYX_HAVE_API__lxml__etree +#ifdef __MINGW64__ +#define MS_WIN64 +#endif +#include "Python.h" +#include "lxml.etree.h" + +static struct LxmlElement *(*__pyx_api_f_4lxml_5etree_deepcopyNodeToDocument)(struct LxmlDocument *, xmlNode *) = 0; +#define deepcopyNodeToDocument __pyx_api_f_4lxml_5etree_deepcopyNodeToDocument +static struct LxmlElementTree *(*__pyx_api_f_4lxml_5etree_elementTreeFactory)(struct LxmlElement *) = 0; +#define elementTreeFactory __pyx_api_f_4lxml_5etree_elementTreeFactory +static struct LxmlElementTree *(*__pyx_api_f_4lxml_5etree_newElementTree)(struct LxmlElement *, PyObject *) = 0; +#define newElementTree __pyx_api_f_4lxml_5etree_newElementTree +static struct LxmlElementTree *(*__pyx_api_f_4lxml_5etree_adoptExternalDocument)(xmlDoc *, PyObject *, int) = 0; +#define adoptExternalDocument __pyx_api_f_4lxml_5etree_adoptExternalDocument +static struct LxmlElement *(*__pyx_api_f_4lxml_5etree_elementFactory)(struct LxmlDocument *, xmlNode *) = 0; +#define elementFactory __pyx_api_f_4lxml_5etree_elementFactory +static struct LxmlElement *(*__pyx_api_f_4lxml_5etree_makeElement)(PyObject *, struct LxmlDocument *, PyObject *, PyObject *, PyObject *, PyObject *, PyObject *) = 0; +#define makeElement __pyx_api_f_4lxml_5etree_makeElement +static struct LxmlElement *(*__pyx_api_f_4lxml_5etree_makeSubElement)(struct LxmlElement *, PyObject *, PyObject *, PyObject *, PyObject *, PyObject *) = 0; +#define makeSubElement __pyx_api_f_4lxml_5etree_makeSubElement +static void (*__pyx_api_f_4lxml_5etree_setElementClassLookupFunction)(_element_class_lookup_function, PyObject *) = 0; +#define setElementClassLookupFunction __pyx_api_f_4lxml_5etree_setElementClassLookupFunction +static PyObject *(*__pyx_api_f_4lxml_5etree_lookupDefaultElementClass)(PyObject *, PyObject *, xmlNode *) = 0; +#define lookupDefaultElementClass __pyx_api_f_4lxml_5etree_lookupDefaultElementClass +static PyObject *(*__pyx_api_f_4lxml_5etree_lookupNamespaceElementClass)(PyObject *, PyObject *, xmlNode *) = 0; +#define lookupNamespaceElementClass __pyx_api_f_4lxml_5etree_lookupNamespaceElementClass +static PyObject *(*__pyx_api_f_4lxml_5etree_callLookupFallback)(struct LxmlFallbackElementClassLookup *, struct LxmlDocument *, xmlNode *) = 0; +#define callLookupFallback __pyx_api_f_4lxml_5etree_callLookupFallback +static int (*__pyx_api_f_4lxml_5etree_tagMatches)(xmlNode *, const xmlChar *, const xmlChar *) = 0; +#define tagMatches __pyx_api_f_4lxml_5etree_tagMatches +static struct LxmlDocument *(*__pyx_api_f_4lxml_5etree_documentOrRaise)(PyObject *) = 0; +#define documentOrRaise __pyx_api_f_4lxml_5etree_documentOrRaise +static struct LxmlElement *(*__pyx_api_f_4lxml_5etree_rootNodeOrRaise)(PyObject *) = 0; +#define rootNodeOrRaise __pyx_api_f_4lxml_5etree_rootNodeOrRaise +static int (*__pyx_api_f_4lxml_5etree_hasText)(xmlNode *) = 0; +#define hasText __pyx_api_f_4lxml_5etree_hasText +static int (*__pyx_api_f_4lxml_5etree_hasTail)(xmlNode *) = 0; +#define hasTail __pyx_api_f_4lxml_5etree_hasTail +static PyObject *(*__pyx_api_f_4lxml_5etree_textOf)(xmlNode *) = 0; +#define textOf __pyx_api_f_4lxml_5etree_textOf +static PyObject *(*__pyx_api_f_4lxml_5etree_tailOf)(xmlNode *) = 0; +#define tailOf __pyx_api_f_4lxml_5etree_tailOf +static int (*__pyx_api_f_4lxml_5etree_setNodeText)(xmlNode *, PyObject *) = 0; +#define setNodeText __pyx_api_f_4lxml_5etree_setNodeText +static int (*__pyx_api_f_4lxml_5etree_setTailText)(xmlNode *, PyObject *) = 0; +#define setTailText __pyx_api_f_4lxml_5etree_setTailText +static PyObject *(*__pyx_api_f_4lxml_5etree_attributeValue)(xmlNode *, xmlAttr *) = 0; +#define attributeValue __pyx_api_f_4lxml_5etree_attributeValue +static PyObject *(*__pyx_api_f_4lxml_5etree_attributeValueFromNsName)(xmlNode *, const xmlChar *, const xmlChar *) = 0; +#define attributeValueFromNsName __pyx_api_f_4lxml_5etree_attributeValueFromNsName +static PyObject *(*__pyx_api_f_4lxml_5etree_getAttributeValue)(struct LxmlElement *, PyObject *, PyObject *) = 0; +#define getAttributeValue __pyx_api_f_4lxml_5etree_getAttributeValue +static PyObject *(*__pyx_api_f_4lxml_5etree_iterattributes)(struct LxmlElement *, int) = 0; +#define iterattributes __pyx_api_f_4lxml_5etree_iterattributes +static PyObject *(*__pyx_api_f_4lxml_5etree_collectAttributes)(xmlNode *, int) = 0; +#define collectAttributes __pyx_api_f_4lxml_5etree_collectAttributes +static int (*__pyx_api_f_4lxml_5etree_setAttributeValue)(struct LxmlElement *, PyObject *, PyObject *) = 0; +#define setAttributeValue __pyx_api_f_4lxml_5etree_setAttributeValue +static int (*__pyx_api_f_4lxml_5etree_delAttribute)(struct LxmlElement *, PyObject *) = 0; +#define delAttribute __pyx_api_f_4lxml_5etree_delAttribute +static int (*__pyx_api_f_4lxml_5etree_delAttributeFromNsName)(xmlNode *, const xmlChar *, const xmlChar *) = 0; +#define delAttributeFromNsName __pyx_api_f_4lxml_5etree_delAttributeFromNsName +static int (*__pyx_api_f_4lxml_5etree_hasChild)(xmlNode *) = 0; +#define hasChild __pyx_api_f_4lxml_5etree_hasChild +static xmlNode *(*__pyx_api_f_4lxml_5etree_findChild)(xmlNode *, Py_ssize_t) = 0; +#define findChild __pyx_api_f_4lxml_5etree_findChild +static xmlNode *(*__pyx_api_f_4lxml_5etree_findChildForwards)(xmlNode *, Py_ssize_t) = 0; +#define findChildForwards __pyx_api_f_4lxml_5etree_findChildForwards +static xmlNode *(*__pyx_api_f_4lxml_5etree_findChildBackwards)(xmlNode *, Py_ssize_t) = 0; +#define findChildBackwards __pyx_api_f_4lxml_5etree_findChildBackwards +static xmlNode *(*__pyx_api_f_4lxml_5etree_nextElement)(xmlNode *) = 0; +#define nextElement __pyx_api_f_4lxml_5etree_nextElement +static xmlNode *(*__pyx_api_f_4lxml_5etree_previousElement)(xmlNode *) = 0; +#define previousElement __pyx_api_f_4lxml_5etree_previousElement +static void (*__pyx_api_f_4lxml_5etree_appendChild)(struct LxmlElement *, struct LxmlElement *) = 0; +#define appendChild __pyx_api_f_4lxml_5etree_appendChild +static int (*__pyx_api_f_4lxml_5etree_appendChildToElement)(struct LxmlElement *, struct LxmlElement *) = 0; +#define appendChildToElement __pyx_api_f_4lxml_5etree_appendChildToElement +static PyObject *(*__pyx_api_f_4lxml_5etree_pyunicode)(const xmlChar *) = 0; +#define pyunicode __pyx_api_f_4lxml_5etree_pyunicode +static PyObject *(*__pyx_api_f_4lxml_5etree_utf8)(PyObject *) = 0; +#define utf8 __pyx_api_f_4lxml_5etree_utf8 +static PyObject *(*__pyx_api_f_4lxml_5etree_getNsTag)(PyObject *) = 0; +#define getNsTag __pyx_api_f_4lxml_5etree_getNsTag +static PyObject *(*__pyx_api_f_4lxml_5etree_getNsTagWithEmptyNs)(PyObject *) = 0; +#define getNsTagWithEmptyNs __pyx_api_f_4lxml_5etree_getNsTagWithEmptyNs +static PyObject *(*__pyx_api_f_4lxml_5etree_namespacedName)(xmlNode *) = 0; +#define namespacedName __pyx_api_f_4lxml_5etree_namespacedName +static PyObject *(*__pyx_api_f_4lxml_5etree_namespacedNameFromNsName)(const xmlChar *, const xmlChar *) = 0; +#define namespacedNameFromNsName __pyx_api_f_4lxml_5etree_namespacedNameFromNsName +static void (*__pyx_api_f_4lxml_5etree_iteratorStoreNext)(struct LxmlElementIterator *, struct LxmlElement *) = 0; +#define iteratorStoreNext __pyx_api_f_4lxml_5etree_iteratorStoreNext +static void (*__pyx_api_f_4lxml_5etree_initTagMatch)(struct LxmlElementTagMatcher *, PyObject *) = 0; +#define initTagMatch __pyx_api_f_4lxml_5etree_initTagMatch +static xmlNs *(*__pyx_api_f_4lxml_5etree_findOrBuildNodeNsPrefix)(struct LxmlDocument *, xmlNode *, const xmlChar *, const xmlChar *) = 0; +#define findOrBuildNodeNsPrefix __pyx_api_f_4lxml_5etree_findOrBuildNodeNsPrefix +#ifndef __PYX_HAVE_RT_ImportFunction_3_0_10 +#define __PYX_HAVE_RT_ImportFunction_3_0_10 +static int __Pyx_ImportFunction_3_0_10(PyObject *module, const char *funcname, void (**f)(void), const char *sig) { + PyObject *d = 0; + PyObject *cobj = 0; + union { + void (*fp)(void); + void *p; + } tmp; + d = PyObject_GetAttrString(module, (char *)"__pyx_capi__"); + if (!d) + goto bad; + cobj = PyDict_GetItemString(d, funcname); + if (!cobj) { + PyErr_Format(PyExc_ImportError, + "%.200s does not export expected C function %.200s", + PyModule_GetName(module), funcname); + goto bad; + } + if (!PyCapsule_IsValid(cobj, sig)) { + PyErr_Format(PyExc_TypeError, + "C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)", + PyModule_GetName(module), funcname, sig, PyCapsule_GetName(cobj)); + goto bad; + } + tmp.p = PyCapsule_GetPointer(cobj, sig); + *f = tmp.fp; + if (!(*f)) + goto bad; + Py_DECREF(d); + return 0; +bad: + Py_XDECREF(d); + return -1; +} +#endif + + +static int import_lxml__etree(void) { + PyObject *module = 0; + module = PyImport_ImportModule("lxml.etree"); + if (!module) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "deepcopyNodeToDocument", (void (**)(void))&__pyx_api_f_4lxml_5etree_deepcopyNodeToDocument, "struct LxmlElement *(struct LxmlDocument *, xmlNode *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "elementTreeFactory", (void (**)(void))&__pyx_api_f_4lxml_5etree_elementTreeFactory, "struct LxmlElementTree *(struct LxmlElement *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "newElementTree", (void (**)(void))&__pyx_api_f_4lxml_5etree_newElementTree, "struct LxmlElementTree *(struct LxmlElement *, PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "adoptExternalDocument", (void (**)(void))&__pyx_api_f_4lxml_5etree_adoptExternalDocument, "struct LxmlElementTree *(xmlDoc *, PyObject *, int)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "elementFactory", (void (**)(void))&__pyx_api_f_4lxml_5etree_elementFactory, "struct LxmlElement *(struct LxmlDocument *, xmlNode *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "makeElement", (void (**)(void))&__pyx_api_f_4lxml_5etree_makeElement, "struct LxmlElement *(PyObject *, struct LxmlDocument *, PyObject *, PyObject *, PyObject *, PyObject *, PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "makeSubElement", (void (**)(void))&__pyx_api_f_4lxml_5etree_makeSubElement, "struct LxmlElement *(struct LxmlElement *, PyObject *, PyObject *, PyObject *, PyObject *, PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "setElementClassLookupFunction", (void (**)(void))&__pyx_api_f_4lxml_5etree_setElementClassLookupFunction, "void (_element_class_lookup_function, PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "lookupDefaultElementClass", (void (**)(void))&__pyx_api_f_4lxml_5etree_lookupDefaultElementClass, "PyObject *(PyObject *, PyObject *, xmlNode *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "lookupNamespaceElementClass", (void (**)(void))&__pyx_api_f_4lxml_5etree_lookupNamespaceElementClass, "PyObject *(PyObject *, PyObject *, xmlNode *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "callLookupFallback", (void (**)(void))&__pyx_api_f_4lxml_5etree_callLookupFallback, "PyObject *(struct LxmlFallbackElementClassLookup *, struct LxmlDocument *, xmlNode *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "tagMatches", (void (**)(void))&__pyx_api_f_4lxml_5etree_tagMatches, "int (xmlNode *, const xmlChar *, const xmlChar *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "documentOrRaise", (void (**)(void))&__pyx_api_f_4lxml_5etree_documentOrRaise, "struct LxmlDocument *(PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "rootNodeOrRaise", (void (**)(void))&__pyx_api_f_4lxml_5etree_rootNodeOrRaise, "struct LxmlElement *(PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "hasText", (void (**)(void))&__pyx_api_f_4lxml_5etree_hasText, "int (xmlNode *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "hasTail", (void (**)(void))&__pyx_api_f_4lxml_5etree_hasTail, "int (xmlNode *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "textOf", (void (**)(void))&__pyx_api_f_4lxml_5etree_textOf, "PyObject *(xmlNode *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "tailOf", (void (**)(void))&__pyx_api_f_4lxml_5etree_tailOf, "PyObject *(xmlNode *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "setNodeText", (void (**)(void))&__pyx_api_f_4lxml_5etree_setNodeText, "int (xmlNode *, PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "setTailText", (void (**)(void))&__pyx_api_f_4lxml_5etree_setTailText, "int (xmlNode *, PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "attributeValue", (void (**)(void))&__pyx_api_f_4lxml_5etree_attributeValue, "PyObject *(xmlNode *, xmlAttr *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "attributeValueFromNsName", (void (**)(void))&__pyx_api_f_4lxml_5etree_attributeValueFromNsName, "PyObject *(xmlNode *, const xmlChar *, const xmlChar *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "getAttributeValue", (void (**)(void))&__pyx_api_f_4lxml_5etree_getAttributeValue, "PyObject *(struct LxmlElement *, PyObject *, PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "iterattributes", (void (**)(void))&__pyx_api_f_4lxml_5etree_iterattributes, "PyObject *(struct LxmlElement *, int)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "collectAttributes", (void (**)(void))&__pyx_api_f_4lxml_5etree_collectAttributes, "PyObject *(xmlNode *, int)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "setAttributeValue", (void (**)(void))&__pyx_api_f_4lxml_5etree_setAttributeValue, "int (struct LxmlElement *, PyObject *, PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "delAttribute", (void (**)(void))&__pyx_api_f_4lxml_5etree_delAttribute, "int (struct LxmlElement *, PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "delAttributeFromNsName", (void (**)(void))&__pyx_api_f_4lxml_5etree_delAttributeFromNsName, "int (xmlNode *, const xmlChar *, const xmlChar *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "hasChild", (void (**)(void))&__pyx_api_f_4lxml_5etree_hasChild, "int (xmlNode *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "findChild", (void (**)(void))&__pyx_api_f_4lxml_5etree_findChild, "xmlNode *(xmlNode *, Py_ssize_t)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "findChildForwards", (void (**)(void))&__pyx_api_f_4lxml_5etree_findChildForwards, "xmlNode *(xmlNode *, Py_ssize_t)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "findChildBackwards", (void (**)(void))&__pyx_api_f_4lxml_5etree_findChildBackwards, "xmlNode *(xmlNode *, Py_ssize_t)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "nextElement", (void (**)(void))&__pyx_api_f_4lxml_5etree_nextElement, "xmlNode *(xmlNode *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "previousElement", (void (**)(void))&__pyx_api_f_4lxml_5etree_previousElement, "xmlNode *(xmlNode *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "appendChild", (void (**)(void))&__pyx_api_f_4lxml_5etree_appendChild, "void (struct LxmlElement *, struct LxmlElement *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "appendChildToElement", (void (**)(void))&__pyx_api_f_4lxml_5etree_appendChildToElement, "int (struct LxmlElement *, struct LxmlElement *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "pyunicode", (void (**)(void))&__pyx_api_f_4lxml_5etree_pyunicode, "PyObject *(const xmlChar *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "utf8", (void (**)(void))&__pyx_api_f_4lxml_5etree_utf8, "PyObject *(PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "getNsTag", (void (**)(void))&__pyx_api_f_4lxml_5etree_getNsTag, "PyObject *(PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "getNsTagWithEmptyNs", (void (**)(void))&__pyx_api_f_4lxml_5etree_getNsTagWithEmptyNs, "PyObject *(PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "namespacedName", (void (**)(void))&__pyx_api_f_4lxml_5etree_namespacedName, "PyObject *(xmlNode *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "namespacedNameFromNsName", (void (**)(void))&__pyx_api_f_4lxml_5etree_namespacedNameFromNsName, "PyObject *(const xmlChar *, const xmlChar *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "iteratorStoreNext", (void (**)(void))&__pyx_api_f_4lxml_5etree_iteratorStoreNext, "void (struct LxmlElementIterator *, struct LxmlElement *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "initTagMatch", (void (**)(void))&__pyx_api_f_4lxml_5etree_initTagMatch, "void (struct LxmlElementTagMatcher *, PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_10(module, "findOrBuildNodeNsPrefix", (void (**)(void))&__pyx_api_f_4lxml_5etree_findOrBuildNodeNsPrefix, "xmlNs *(struct LxmlDocument *, xmlNode *, const xmlChar *, const xmlChar *)") < 0) goto bad; + Py_DECREF(module); module = 0; + return 0; + bad: + Py_XDECREF(module); + return -1; +} + +#endif /* !__PYX_HAVE_API__lxml__etree */ diff --git a/venv/lib/python3.10/site-packages/lxml/nsclasses.pxi b/venv/lib/python3.10/site-packages/lxml/nsclasses.pxi new file mode 100644 index 0000000000000000000000000000000000000000..a3c86f0e0140557c0f3f2c5a2557dab1c62bfe3c --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/nsclasses.pxi @@ -0,0 +1,281 @@ +# module-level API for namespace implementations + +cdef class LxmlRegistryError(LxmlError): + """Base class of lxml registry errors. + """ + +cdef class NamespaceRegistryError(LxmlRegistryError): + """Error registering a namespace extension. + """ + + +@cython.internal +cdef class _NamespaceRegistry: + "Dictionary-like namespace registry" + cdef object _ns_uri + cdef bytes _ns_uri_utf + cdef dict _entries + cdef char* _c_ns_uri_utf + def __cinit__(self, ns_uri): + self._ns_uri = ns_uri + if ns_uri is None: + self._ns_uri_utf = None + self._c_ns_uri_utf = NULL + else: + self._ns_uri_utf = _utf8(ns_uri) + self._c_ns_uri_utf = _cstr(self._ns_uri_utf) + self._entries = {} + + def update(self, class_dict_iterable): + """update(self, class_dict_iterable) + + Forgivingly update the registry. + + ``class_dict_iterable`` may be a dict or some other iterable + that yields (name, value) pairs. + + If a value does not match the required type for this registry, + or if the name starts with '_', it will be silently discarded. + This allows registrations at the module or class level using + vars(), globals() etc.""" + if hasattr(class_dict_iterable, 'items'): + class_dict_iterable = class_dict_iterable.items() + for name, item in class_dict_iterable: + if (name is None or name[:1] != '_') and callable(item): + self[name] = item + + def __getitem__(self, name): + if name is not None: + name = _utf8(name) + return self._get(name) + + def __delitem__(self, name): + if name is not None: + name = _utf8(name) + del self._entries[name] + + cdef object _get(self, object name): + cdef python.PyObject* dict_result + dict_result = python.PyDict_GetItem(self._entries, name) + if dict_result is NULL: + raise KeyError, "Name not registered." + return dict_result + + cdef object _getForString(self, char* name): + cdef python.PyObject* dict_result + dict_result = python.PyDict_GetItem(self._entries, name) + if dict_result is NULL: + raise KeyError, "Name not registered." + return dict_result + + def __iter__(self): + return iter(self._entries) + + def items(self): + return list(self._entries.items()) + + def iteritems(self): + return iter(self._entries.items()) + + def clear(self): + self._entries.clear() + + def __call__(self, obj): + # Usage as decorator: + # ns = lookup.get_namespace("...") + # @ns('abc') + # class element(ElementBase): pass + # + # @ns + # class elementname(ElementBase): pass + + if obj is None or python._isString(obj): + # @ns(None) or @ns('tag') + return partial(self.__deco, obj) + # plain @ns decorator + self[obj.__name__] = obj + return obj + + def __deco(self, name, obj): + self[name] = obj + return obj + + +@cython.final +@cython.internal +cdef class _ClassNamespaceRegistry(_NamespaceRegistry): + "Dictionary-like registry for namespace implementation classes" + def __setitem__(self, name, item): + if not isinstance(item, type) or not issubclass(item, ElementBase): + raise NamespaceRegistryError, \ + "Registered element classes must be subtypes of ElementBase" + if name is not None: + name = _utf8(name) + self._entries[name] = item + + def __repr__(self): + return "Namespace(%r)" % self._ns_uri + + +cdef class ElementNamespaceClassLookup(FallbackElementClassLookup): + """ElementNamespaceClassLookup(self, fallback=None) + + Element class lookup scheme that searches the Element class in the + Namespace registry. + + Usage: + + >>> lookup = ElementNamespaceClassLookup() + >>> ns_elements = lookup.get_namespace("http://schema.org/Movie") + + >>> @ns_elements + ... class movie(ElementBase): + ... "Element implementation for 'movie' tag (using class name) in schema namespace." + + >>> @ns_elements("movie") + ... class MovieElement(ElementBase): + ... "Element implementation for 'movie' tag (explicit tag name) in schema namespace." + """ + cdef dict _namespace_registries + def __cinit__(self): + self._namespace_registries = {} + + def __init__(self, ElementClassLookup fallback=None): + FallbackElementClassLookup.__init__(self, fallback) + self._lookup_function = _find_nselement_class + + def get_namespace(self, ns_uri): + """get_namespace(self, ns_uri) + + Retrieve the namespace object associated with the given URI. + Pass None for the empty namespace. + + Creates a new namespace object if it does not yet exist.""" + if ns_uri: + ns_utf = _utf8(ns_uri) + else: + ns_utf = None + try: + return self._namespace_registries[ns_utf] + except KeyError: + registry = self._namespace_registries[ns_utf] = \ + _ClassNamespaceRegistry(ns_uri) + return registry + +cdef object _find_nselement_class(state, _Document doc, xmlNode* c_node): + cdef python.PyObject* dict_result + cdef ElementNamespaceClassLookup lookup + cdef _NamespaceRegistry registry + if state is None: + return _lookupDefaultElementClass(None, doc, c_node) + + lookup = state + if c_node.type != tree.XML_ELEMENT_NODE: + return _callLookupFallback(lookup, doc, c_node) + + c_namespace_utf = _getNs(c_node) + if c_namespace_utf is not NULL: + dict_result = python.PyDict_GetItem( + lookup._namespace_registries, c_namespace_utf) + else: + dict_result = python.PyDict_GetItem( + lookup._namespace_registries, None) + if dict_result is not NULL: + registry = <_NamespaceRegistry>dict_result + classes = registry._entries + + if c_node.name is not NULL: + dict_result = python.PyDict_GetItem( + classes, c_node.name) + else: + dict_result = NULL + + if dict_result is NULL: + dict_result = python.PyDict_GetItem(classes, None) + + if dict_result is not NULL: + return dict_result + return _callLookupFallback(lookup, doc, c_node) + + +################################################################################ +# XPath extension functions + +cdef dict __FUNCTION_NAMESPACE_REGISTRIES +__FUNCTION_NAMESPACE_REGISTRIES = {} + +def FunctionNamespace(ns_uri): + """FunctionNamespace(ns_uri) + + Retrieve the function namespace object associated with the given + URI. + + Creates a new one if it does not yet exist. A function namespace + can only be used to register extension functions. + + Usage: + + >>> ns_functions = FunctionNamespace("http://schema.org/Movie") + + >>> @ns_functions # uses function name + ... def add2(x): + ... return x + 2 + + >>> @ns_functions("add3") # uses explicit name + ... def add_three(x): + ... return x + 3 + """ + ns_utf = _utf8(ns_uri) if ns_uri else None + try: + return __FUNCTION_NAMESPACE_REGISTRIES[ns_utf] + except KeyError: + registry = __FUNCTION_NAMESPACE_REGISTRIES[ns_utf] = \ + _XPathFunctionNamespaceRegistry(ns_uri) + return registry + +@cython.internal +cdef class _FunctionNamespaceRegistry(_NamespaceRegistry): + def __setitem__(self, name, item): + if not callable(item): + raise NamespaceRegistryError, \ + "Registered functions must be callable." + if not name: + raise ValueError, \ + "extensions must have non empty names" + self._entries[_utf8(name)] = item + + def __repr__(self): + return "FunctionNamespace(%r)" % self._ns_uri + +@cython.final +@cython.internal +cdef class _XPathFunctionNamespaceRegistry(_FunctionNamespaceRegistry): + cdef object _prefix + cdef bytes _prefix_utf + + property prefix: + "Namespace prefix for extension functions." + def __del__(self): + self._prefix = None # no prefix configured + self._prefix_utf = None + def __get__(self): + if self._prefix is None: + return '' + else: + return self._prefix + def __set__(self, prefix): + if prefix == '': + prefix = None # empty prefix + self._prefix_utf = _utf8(prefix) if prefix is not None else None + self._prefix = prefix + +cdef list _find_all_extension_prefixes(): + "Internal lookup function to find all function prefixes for XSLT/XPath." + cdef _XPathFunctionNamespaceRegistry registry + cdef list ns_prefixes = [] + for registry in __FUNCTION_NAMESPACE_REGISTRIES.itervalues(): + if registry._prefix_utf is not None: + if registry._ns_uri_utf is not None: + ns_prefixes.append( + (registry._prefix_utf, registry._ns_uri_utf)) + return ns_prefixes diff --git a/venv/lib/python3.10/site-packages/lxml/objectify.pyx b/venv/lib/python3.10/site-packages/lxml/objectify.pyx new file mode 100644 index 0000000000000000000000000000000000000000..0ff922262ed55ef2d04bdea0f196bd724b736550 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/objectify.pyx @@ -0,0 +1,2145 @@ +# cython: binding=True +# cython: auto_pickle=False +# cython: language_level=3 + +""" +The ``lxml.objectify`` module implements a Python object API for XML. +It is based on `lxml.etree`. +""" + +cimport cython + +from lxml.includes.etreepublic cimport _Document, _Element, ElementBase, ElementClassLookup +from lxml.includes.etreepublic cimport elementFactory, import_lxml__etree, textOf, pyunicode +from lxml.includes.tree cimport const_xmlChar, _xcstr +from lxml cimport python +from lxml.includes cimport tree + +cimport lxml.includes.etreepublic as cetree +cimport libc.string as cstring_h # not to be confused with stdlib 'string' +from libc.string cimport const_char + +__all__ = ['BoolElement', 'DataElement', 'E', 'Element', 'ElementMaker', + 'FloatElement', 'IntElement', 'NoneElement', + 'NumberElement', 'ObjectPath', 'ObjectifiedDataElement', + 'ObjectifiedElement', 'ObjectifyElementClassLookup', + 'PYTYPE_ATTRIBUTE', 'PyType', 'StringElement', 'SubElement', + 'XML', 'annotate', 'deannotate', 'dump', 'enable_recursive_str', + 'fromstring', 'getRegisteredTypes', 'makeparser', 'parse', + 'pyannotate', 'pytypename', 'set_default_parser', + 'set_pytype_attribute_tag', 'xsiannotate'] + +cdef object etree +from lxml import etree +# initialize C-API of lxml.etree +import_lxml__etree() + +__version__ = etree.__version__ + +cdef object _float_is_inf, _float_is_nan +from math import isinf as _float_is_inf, isnan as _float_is_nan + +cdef object re +import re + +cdef tuple IGNORABLE_ERRORS = (ValueError, TypeError) +cdef object is_special_method = re.compile('__.*__$').match + + +cdef object _typename(object t): + cdef const_char* c_name + c_name = python._fqtypename(t) + s = cstring_h.strrchr(c_name, c'.') + if s is not NULL: + c_name = s + 1 + return pyunicode(c_name) + + +# namespace/name for "pytype" hint attribute +cdef object PYTYPE_NAMESPACE +cdef bytes PYTYPE_NAMESPACE_UTF8 +cdef const_xmlChar* _PYTYPE_NAMESPACE + +cdef object PYTYPE_ATTRIBUTE_NAME +cdef bytes PYTYPE_ATTRIBUTE_NAME_UTF8 +cdef const_xmlChar* _PYTYPE_ATTRIBUTE_NAME + +PYTYPE_ATTRIBUTE = None + +cdef unicode TREE_PYTYPE_NAME = "TREE" + +cdef tuple _unicodeAndUtf8(s): + return s, python.PyUnicode_AsUTF8String(s) + +def set_pytype_attribute_tag(attribute_tag=None): + """set_pytype_attribute_tag(attribute_tag=None) + Change name and namespace of the XML attribute that holds Python type + information. + + Do not use this unless you know what you are doing. + + Reset by calling without argument. + + Default: "{http://codespeak.net/lxml/objectify/pytype}pytype" + """ + global PYTYPE_ATTRIBUTE, _PYTYPE_NAMESPACE, _PYTYPE_ATTRIBUTE_NAME + global PYTYPE_NAMESPACE, PYTYPE_NAMESPACE_UTF8 + global PYTYPE_ATTRIBUTE_NAME, PYTYPE_ATTRIBUTE_NAME_UTF8 + if attribute_tag is None: + PYTYPE_NAMESPACE, PYTYPE_NAMESPACE_UTF8 = \ + _unicodeAndUtf8("http://codespeak.net/lxml/objectify/pytype") + PYTYPE_ATTRIBUTE_NAME, PYTYPE_ATTRIBUTE_NAME_UTF8 = \ + _unicodeAndUtf8("pytype") + else: + PYTYPE_NAMESPACE_UTF8, PYTYPE_ATTRIBUTE_NAME_UTF8 = \ + cetree.getNsTag(attribute_tag) + PYTYPE_NAMESPACE = PYTYPE_NAMESPACE_UTF8.decode('utf8') + PYTYPE_ATTRIBUTE_NAME = PYTYPE_ATTRIBUTE_NAME_UTF8.decode('utf8') + + _PYTYPE_NAMESPACE = PYTYPE_NAMESPACE_UTF8 + _PYTYPE_ATTRIBUTE_NAME = PYTYPE_ATTRIBUTE_NAME_UTF8 + PYTYPE_ATTRIBUTE = cetree.namespacedNameFromNsName( + _PYTYPE_NAMESPACE, _PYTYPE_ATTRIBUTE_NAME) + +set_pytype_attribute_tag() + + +# namespaces for XML Schema +cdef object XML_SCHEMA_NS, XML_SCHEMA_NS_UTF8 +XML_SCHEMA_NS, XML_SCHEMA_NS_UTF8 = \ + _unicodeAndUtf8("http://www.w3.org/2001/XMLSchema") +cdef const_xmlChar* _XML_SCHEMA_NS = _xcstr(XML_SCHEMA_NS_UTF8) + +cdef object XML_SCHEMA_INSTANCE_NS, XML_SCHEMA_INSTANCE_NS_UTF8 +XML_SCHEMA_INSTANCE_NS, XML_SCHEMA_INSTANCE_NS_UTF8 = \ + _unicodeAndUtf8("http://www.w3.org/2001/XMLSchema-instance") +cdef const_xmlChar* _XML_SCHEMA_INSTANCE_NS = _xcstr(XML_SCHEMA_INSTANCE_NS_UTF8) + +cdef object XML_SCHEMA_INSTANCE_NIL_ATTR = "{%s}nil" % XML_SCHEMA_INSTANCE_NS +cdef object XML_SCHEMA_INSTANCE_TYPE_ATTR = "{%s}type" % XML_SCHEMA_INSTANCE_NS + + +################################################################################ +# Element class for the main API + +cdef class ObjectifiedElement(ElementBase): + """Main XML Element class. + + Element children are accessed as object attributes. Multiple children + with the same name are available through a list index. Example:: + + >>> root = XML("01") + >>> second_c2 = root.c1.c2[1] + >>> print(second_c2.text) + 1 + + Note that you cannot (and must not) instantiate this class or its + subclasses. + """ + def __iter__(self): + """Iterate over self and all siblings with the same tag. + """ + parent = self.getparent() + if parent is None: + return iter([self]) + return etree.ElementChildIterator(parent, tag=self.tag) + + def __str__(self): + if __RECURSIVE_STR: + return _dump(self, 0) + else: + return textOf(self._c_node) or '' + + # pickle support for objectified Element + def __reduce__(self): + return fromstring, (etree.tostring(self),) + + @property + def text(self): + return textOf(self._c_node) + + @property + def __dict__(self): + """A fake implementation for __dict__ to support dir() etc. + + Note that this only considers the first child with a given name. + """ + cdef _Element child + cdef dict children + c_ns = tree._getNs(self._c_node) + tag = "{%s}*" % pyunicode(c_ns) if c_ns is not NULL else None + children = {} + for child in etree.ElementChildIterator(self, tag=tag): + if c_ns is NULL and tree._getNs(child._c_node) is not NULL: + continue + name = pyunicode(child._c_node.name) + if name not in children: + children[name] = child + return children + + def __len__(self): + """Count self and siblings with the same tag. + """ + return _countSiblings(self._c_node) + + def countchildren(self): + """countchildren(self) + + Return the number of children of this element, regardless of their + name. + """ + # copied from etree + cdef Py_ssize_t c + cdef tree.xmlNode* c_node + c = 0 + c_node = self._c_node.children + while c_node is not NULL: + if tree._isElement(c_node): + c += 1 + c_node = c_node.next + return c + + def getchildren(self): + """getchildren(self) + + Returns a sequence of all direct children. The elements are + returned in document order. + """ + cdef tree.xmlNode* c_node + result = [] + c_node = self._c_node.children + while c_node is not NULL: + if tree._isElement(c_node): + result.append(cetree.elementFactory(self._doc, c_node)) + c_node = c_node.next + return result + + def __getattr__(self, tag): + """Return the (first) child with the given tag name. If no namespace + is provided, the child will be looked up in the same one as self. + """ + return _lookupChildOrRaise(self, tag) + + def __setattr__(self, tag, value): + """Set the value of the (first) child with the given tag name. If no + namespace is provided, the child will be looked up in the same one as + self. + """ + cdef _Element element + # properties are looked up /after/ __setattr__, so we must emulate them + if tag == 'text' or tag == 'pyval': + # read-only ! + raise TypeError, f"attribute '{tag}' of '{_typename(self)}' objects is not writable" + elif tag == 'tail': + cetree.setTailText(self._c_node, value) + return + elif tag == 'tag': + ElementBase.tag.__set__(self, value) + return + elif tag == 'base': + ElementBase.base.__set__(self, value) + return + tag = _buildChildTag(self, tag) + element = _lookupChild(self, tag) + if element is None: + _appendValue(self, tag, value) + else: + _replaceElement(element, value) + + def __delattr__(self, tag): + child = _lookupChildOrRaise(self, tag) + self.remove(child) + + def addattr(self, tag, value): + """addattr(self, tag, value) + + Add a child value to the element. + + As opposed to append(), it sets a data value, not an element. + """ + _appendValue(self, _buildChildTag(self, tag), value) + + def __getitem__(self, key): + """Return a sibling, counting from the first child of the parent. The + method behaves like both a dict and a sequence. + + * If argument is an integer, returns the sibling at that position. + + * If argument is a string, does the same as getattr(). This can be + used to provide namespaces for element lookup, or to look up + children with special names (``text`` etc.). + + * If argument is a slice object, returns the matching slice. + """ + cdef tree.xmlNode* c_self_node + cdef tree.xmlNode* c_parent + cdef tree.xmlNode* c_node + cdef Py_ssize_t c_index + if python._isString(key): + return _lookupChildOrRaise(self, key) + elif isinstance(key, slice): + return list(self)[key] + # normal item access + c_index = key # raises TypeError if necessary + c_self_node = self._c_node + c_parent = c_self_node.parent + if c_parent is NULL: + if c_index == 0 or c_index == -1: + return self + raise IndexError, unicode(key) + if c_index < 0: + c_node = c_parent.last + else: + c_node = c_parent.children + c_node = _findFollowingSibling( + c_node, tree._getNs(c_self_node), c_self_node.name, c_index) + if c_node is NULL: + raise IndexError, unicode(key) + return elementFactory(self._doc, c_node) + + def __setitem__(self, key, value): + """Set the value of a sibling, counting from the first child of the + parent. Implements key assignment, item assignment and slice + assignment. + + * If argument is an integer, sets the sibling at that position. + + * If argument is a string, does the same as setattr(). This is used + to provide namespaces for element lookup. + + * If argument is a sequence (list, tuple, etc.), assign the contained + items to the siblings. + """ + cdef _Element element + cdef tree.xmlNode* c_node + if python._isString(key): + key = _buildChildTag(self, key) + element = _lookupChild(self, key) + if element is None: + _appendValue(self, key, value) + else: + _replaceElement(element, value) + return + + if self._c_node.parent is NULL: + # the 'root[i] = ...' case + raise TypeError, "assignment to root element is invalid" + + if isinstance(key, slice): + # slice assignment + _setSlice(key, self, value) + else: + # normal index assignment + if key < 0: + c_node = self._c_node.parent.last + else: + c_node = self._c_node.parent.children + c_node = _findFollowingSibling( + c_node, tree._getNs(self._c_node), self._c_node.name, key) + if c_node is NULL: + raise IndexError, unicode(key) + element = elementFactory(self._doc, c_node) + _replaceElement(element, value) + + def __delitem__(self, key): + parent = self.getparent() + if parent is None: + raise TypeError, "deleting items not supported by root element" + if isinstance(key, slice): + # slice deletion + del_items = list(self)[key] + remove = parent.remove + for el in del_items: + remove(el) + else: + # normal index deletion + sibling = self.__getitem__(key) + parent.remove(sibling) + + def descendantpaths(self, prefix=None): + """descendantpaths(self, prefix=None) + + Returns a list of object path expressions for all descendants. + """ + if prefix is not None and not python._isString(prefix): + prefix = '.'.join(prefix) + return _build_descendant_paths(self._c_node, prefix) + + +cdef inline bint _tagMatches(tree.xmlNode* c_node, const_xmlChar* c_href, const_xmlChar* c_name): + if c_node.name != c_name: + return 0 + if c_href == NULL: + return 1 + c_node_href = tree._getNs(c_node) + if c_node_href == NULL: + return c_href[0] == c'\0' + return tree.xmlStrcmp(c_node_href, c_href) == 0 + + +cdef Py_ssize_t _countSiblings(tree.xmlNode* c_start_node): + cdef tree.xmlNode* c_node + cdef Py_ssize_t count + c_tag = c_start_node.name + c_href = tree._getNs(c_start_node) + count = 1 + c_node = c_start_node.next + while c_node is not NULL: + if c_node.type == tree.XML_ELEMENT_NODE and \ + _tagMatches(c_node, c_href, c_tag): + count += 1 + c_node = c_node.next + c_node = c_start_node.prev + while c_node is not NULL: + if c_node.type == tree.XML_ELEMENT_NODE and \ + _tagMatches(c_node, c_href, c_tag): + count += 1 + c_node = c_node.prev + return count + +cdef tree.xmlNode* _findFollowingSibling(tree.xmlNode* c_node, + const_xmlChar* href, const_xmlChar* name, + Py_ssize_t index): + cdef tree.xmlNode* (*next)(tree.xmlNode*) + if index >= 0: + next = cetree.nextElement + else: + index = -1 - index + next = cetree.previousElement + while c_node is not NULL: + if c_node.type == tree.XML_ELEMENT_NODE and \ + _tagMatches(c_node, href, name): + index = index - 1 + if index < 0: + return c_node + c_node = next(c_node) + return NULL + +cdef object _lookupChild(_Element parent, tag): + cdef tree.xmlNode* c_result + cdef tree.xmlNode* c_node + c_node = parent._c_node + ns, tag = cetree.getNsTagWithEmptyNs(tag) + c_tag = tree.xmlDictExists( + c_node.doc.dict, _xcstr(tag), python.PyBytes_GET_SIZE(tag)) + if c_tag is NULL: + return None # not in the hash map => not in the tree + if ns is None: + # either inherit ns from parent or use empty (i.e. no) namespace + c_href = tree._getNs(c_node) or '' + else: + c_href = _xcstr(ns) + c_result = _findFollowingSibling(c_node.children, c_href, c_tag, 0) + if c_result is NULL: + return None + return elementFactory(parent._doc, c_result) + +cdef object _lookupChildOrRaise(_Element parent, tag): + element = _lookupChild(parent, tag) + if element is None: + raise AttributeError, "no such child: " + _buildChildTag(parent, tag) + return element + +cdef object _buildChildTag(_Element parent, tag): + ns, tag = cetree.getNsTag(tag) + c_tag = _xcstr(tag) + c_href = tree._getNs(parent._c_node) if ns is None else _xcstr(ns) + return cetree.namespacedNameFromNsName(c_href, c_tag) + +cdef _replaceElement(_Element element, value): + cdef _Element new_element + if isinstance(value, _Element): + # deep copy the new element + new_element = cetree.deepcopyNodeToDocument( + element._doc, (<_Element>value)._c_node) + new_element.tag = element.tag + elif isinstance(value, (list, tuple)): + element[:] = value + return + else: + new_element = element.makeelement(element.tag) + _setElementValue(new_element, value) + element.getparent().replace(element, new_element) + +cdef _appendValue(_Element parent, tag, value): + cdef _Element new_element + if isinstance(value, _Element): + # deep copy the new element + new_element = cetree.deepcopyNodeToDocument( + parent._doc, (<_Element>value)._c_node) + new_element.tag = tag + cetree.appendChildToElement(parent, new_element) + elif isinstance(value, (list, tuple)): + for item in value: + _appendValue(parent, tag, item) + else: + new_element = cetree.makeElement( + tag, parent._doc, None, None, None, None, None) + _setElementValue(new_element, value) + cetree.appendChildToElement(parent, new_element) + +cdef _setElementValue(_Element element, value): + if value is None: + cetree.setAttributeValue( + element, XML_SCHEMA_INSTANCE_NIL_ATTR, "true") + elif isinstance(value, _Element): + _replaceElement(element, value) + return + else: + cetree.delAttributeFromNsName( + element._c_node, _XML_SCHEMA_INSTANCE_NS, "nil") + if python._isString(value): + pytype_name = "str" + py_type = _PYTYPE_DICT.get(pytype_name) + else: + pytype_name = _typename(value) + py_type = _PYTYPE_DICT.get(pytype_name) + if py_type is not None: + value = py_type.stringify(value) + else: + value = unicode(value) + if py_type is not None: + cetree.setAttributeValue(element, PYTYPE_ATTRIBUTE, pytype_name) + else: + cetree.delAttributeFromNsName( + element._c_node, _PYTYPE_NAMESPACE, _PYTYPE_ATTRIBUTE_NAME) + cetree.setNodeText(element._c_node, value) + +cdef _setSlice(sliceobject, _Element target, items): + cdef _Element parent + cdef tree.xmlNode* c_node + cdef Py_ssize_t c_step, c_start, pos + # collect existing slice + if (sliceobject).step is None: + c_step = 1 + else: + c_step = (sliceobject).step + if c_step == 0: + raise ValueError, "Invalid slice" + cdef list del_items = target[sliceobject] + + # collect new values + new_items = [] + tag = target.tag + for item in items: + if isinstance(item, _Element): + # deep copy the new element + new_element = cetree.deepcopyNodeToDocument( + target._doc, (<_Element>item)._c_node) + new_element.tag = tag + else: + new_element = cetree.makeElement( + tag, target._doc, None, None, None, None, None) + _setElementValue(new_element, item) + new_items.append(new_element) + + # sanity check - raise what a list would raise + if c_step != 1 and len(del_items) != len(new_items): + raise ValueError, \ + f"attempt to assign sequence of size {len(new_items)} to extended slice of size {len(del_items)}" + + # replace existing items + pos = 0 + parent = target.getparent() + replace = parent.replace + while pos < len(new_items) and pos < len(del_items): + replace(del_items[pos], new_items[pos]) + pos += 1 + # remove leftover items + if pos < len(del_items): + remove = parent.remove + while pos < len(del_items): + remove(del_items[pos]) + pos += 1 + # append remaining new items + if pos < len(new_items): + # the sanity check above guarantees (step == 1) + if pos > 0: + item = new_items[pos-1] + else: + if (sliceobject).start > 0: + c_node = parent._c_node.children + else: + c_node = parent._c_node.last + c_node = _findFollowingSibling( + c_node, tree._getNs(target._c_node), target._c_node.name, + (sliceobject).start - 1) + if c_node is NULL: + while pos < len(new_items): + cetree.appendChildToElement(parent, new_items[pos]) + pos += 1 + return + item = cetree.elementFactory(parent._doc, c_node) + while pos < len(new_items): + add = item.addnext + item = new_items[pos] + add(item) + pos += 1 + +################################################################################ +# Data type support in subclasses + +cdef class ObjectifiedDataElement(ObjectifiedElement): + """This is the base class for all data type Elements. Subclasses should + override the 'pyval' property and possibly the __str__ method. + """ + @property + def pyval(self): + return textOf(self._c_node) + + def __str__(self): + return textOf(self._c_node) or '' + + def __repr__(self): + return textOf(self._c_node) or '' + + def _setText(self, s): + """For use in subclasses only. Don't use unless you know what you are + doing. + """ + cetree.setNodeText(self._c_node, s) + + +cdef class NumberElement(ObjectifiedDataElement): + cdef object _parse_value + + def _setValueParser(self, function): + """Set the function that parses the Python value from a string. + + Do not use this unless you know what you are doing. + """ + self._parse_value = function + + @property + def pyval(self): + return _parseNumber(self) + + def __int__(self): + return int(_parseNumber(self)) + + def __float__(self): + return float(_parseNumber(self)) + + def __complex__(self): + return complex(_parseNumber(self)) + + def __str__(self): + return unicode(_parseNumber(self)) + + def __repr__(self): + return repr(_parseNumber(self)) + + def __oct__(self): + return oct(_parseNumber(self)) + + def __hex__(self): + return hex(_parseNumber(self)) + + def __richcmp__(self, other, int op): + return _richcmpPyvals(self, other, op) + + def __hash__(self): + return hash(_parseNumber(self)) + + def __add__(self, other): + return _numericValueOf(self) + _numericValueOf(other) + + def __radd__(self, other): + return _numericValueOf(other) + _numericValueOf(self) + + def __sub__(self, other): + return _numericValueOf(self) - _numericValueOf(other) + + def __rsub__(self, other): + return _numericValueOf(other) - _numericValueOf(self) + + def __mul__(self, other): + return _numericValueOf(self) * _numericValueOf(other) + + def __rmul__(self, other): + return _numericValueOf(other) * _numericValueOf(self) + + def __div__(self, other): + return _numericValueOf(self) / _numericValueOf(other) + + def __rdiv__(self, other): + return _numericValueOf(other) / _numericValueOf(self) + + def __truediv__(self, other): + return _numericValueOf(self) / _numericValueOf(other) + + def __rtruediv__(self, other): + return _numericValueOf(other) / _numericValueOf(self) + + def __floordiv__(self, other): + return _numericValueOf(self) // _numericValueOf(other) + + def __rfloordiv__(self, other): + return _numericValueOf(other) // _numericValueOf(self) + + def __mod__(self, other): + return _numericValueOf(self) % _numericValueOf(other) + + def __rmod__(self, other): + return _numericValueOf(other) % _numericValueOf(self) + + def __divmod__(self, other): + return divmod(_numericValueOf(self), _numericValueOf(other)) + + def __rdivmod__(self, other): + return divmod(_numericValueOf(other), _numericValueOf(self)) + + def __pow__(self, other, modulo): + if modulo is None: + return _numericValueOf(self) ** _numericValueOf(other) + else: + return pow(_numericValueOf(self), _numericValueOf(other), modulo) + + def __rpow__(self, other, modulo): + if modulo is None: + return _numericValueOf(other) ** _numericValueOf(self) + else: + return pow(_numericValueOf(other), _numericValueOf(self), modulo) + + def __neg__(self): + return - _numericValueOf(self) + + def __pos__(self): + return + _numericValueOf(self) + + def __abs__(self): + return abs( _numericValueOf(self) ) + + def __bool__(self): + return bool(_numericValueOf(self)) + + def __invert__(self): + return ~ _numericValueOf(self) + + def __lshift__(self, other): + return _numericValueOf(self) << _numericValueOf(other) + + def __rlshift__(self, other): + return _numericValueOf(other) << _numericValueOf(self) + + def __rshift__(self, other): + return _numericValueOf(self) >> _numericValueOf(other) + + def __rrshift__(self, other): + return _numericValueOf(other) >> _numericValueOf(self) + + def __and__(self, other): + return _numericValueOf(self) & _numericValueOf(other) + + def __rand__(self, other): + return _numericValueOf(other) & _numericValueOf(self) + + def __or__(self, other): + return _numericValueOf(self) | _numericValueOf(other) + + def __ror__(self, other): + return _numericValueOf(other) | _numericValueOf(self) + + def __xor__(self, other): + return _numericValueOf(self) ^ _numericValueOf(other) + + def __rxor__(self, other): + return _numericValueOf(other) ^ _numericValueOf(self) + + +cdef class IntElement(NumberElement): + def _init(self): + self._parse_value = int + + def __index__(self): + return int(_parseNumber(self)) + + +cdef class FloatElement(NumberElement): + def _init(self): + self._parse_value = float + + +cdef class StringElement(ObjectifiedDataElement): + """String data class. + + Note that this class does *not* support the sequence protocol of strings: + len(), iter(), str_attr[0], str_attr[0:1], etc. are *not* supported. + Instead, use the .text attribute to get a 'real' string. + """ + @property + def pyval(self): + return textOf(self._c_node) or '' + + def __repr__(self): + return repr(textOf(self._c_node) or '') + + def strlen(self): + text = textOf(self._c_node) + if text is None: + return 0 + else: + return len(text) + + def __bool__(self): + return bool(textOf(self._c_node)) + + def __richcmp__(self, other, int op): + return _richcmpPyvals(self, other, op) + + def __hash__(self): + return hash(textOf(self._c_node) or '') + + def __add__(self, other): + text = _strValueOf(self) + other = _strValueOf(other) + return text + other + + def __radd__(self, other): + text = _strValueOf(self) + other = _strValueOf(other) + return other + text + + def __mul__(self, other): + if isinstance(self, StringElement): + return (textOf((self)._c_node) or '') * _numericValueOf(other) + elif isinstance(other, StringElement): + return _numericValueOf(self) * (textOf((other)._c_node) or '') + else: + return NotImplemented + + def __rmul__(self, other): + return _numericValueOf(other) * (textOf((self)._c_node) or '') + + def __mod__(self, other): + return (_strValueOf(self) or '') % other + + def __int__(self): + return int(textOf(self._c_node)) + + def __float__(self): + return float(textOf(self._c_node)) + + def __complex__(self): + return complex(textOf(self._c_node)) + + +cdef class NoneElement(ObjectifiedDataElement): + def __str__(self): + return "None" + + def __repr__(self): + return "None" + + def __bool__(self): + return False + + def __richcmp__(self, other, int op): + if other is None or self is None: + return python.PyObject_RichCompare(None, None, op) + if isinstance(self, NoneElement): + return python.PyObject_RichCompare(None, other, op) + else: + return python.PyObject_RichCompare(self, None, op) + + def __hash__(self): + return hash(None) + + @property + def pyval(self): + return None + + +cdef class BoolElement(IntElement): + """Boolean type base on string values: 'true' or 'false'. + + Note that this inherits from IntElement to mimic the behaviour of + Python's bool type. + """ + def _init(self): + self._parse_value = _parseBool # wraps as Python callable + + def __bool__(self): + return _parseBool(textOf(self._c_node)) + + def __int__(self): + return 0 + _parseBool(textOf(self._c_node)) + + def __float__(self): + return 0.0 + _parseBool(textOf(self._c_node)) + + def __richcmp__(self, other, int op): + return _richcmpPyvals(self, other, op) + + def __hash__(self): + return hash(_parseBool(textOf(self._c_node))) + + def __str__(self): + return unicode(_parseBool(textOf(self._c_node))) + + def __repr__(self): + return repr(_parseBool(textOf(self._c_node))) + + @property + def pyval(self): + return _parseBool(textOf(self._c_node)) + + +cdef _checkBool(s): + cdef int value = -1 + if s is not None: + value = __parseBoolAsInt(s) + if value == -1: + raise ValueError + + +cdef bint _parseBool(s) except -1: + cdef int value + if s is None: + return False + value = __parseBoolAsInt(s) + if value == -1: + raise ValueError, f"Invalid boolean value: '{s}'" + return value + + +cdef inline int __parseBoolAsInt(text) except -2: + if text == 'false': + return 0 + elif text == 'true': + return 1 + elif text == '0': + return 0 + elif text == '1': + return 1 + return -1 + + +cdef object _parseNumber(NumberElement element): + return element._parse_value(textOf(element._c_node)) + + +cdef enum NumberParserState: + NPS_SPACE_PRE = 0 + NPS_SIGN = 1 + NPS_DIGITS = 2 + NPS_POINT_LEAD = 3 + NPS_POINT = 4 + NPS_FRACTION = 5 + NPS_EXP = 6 + NPS_EXP_SIGN = 7 + NPS_DIGITS_EXP = 8 + NPS_SPACE_TAIL = 9 + NPS_INF1 = 20 + NPS_INF2 = 21 + NPS_INF3 = 22 + NPS_NAN1 = 23 + NPS_NAN2 = 24 + NPS_NAN3 = 25 + NPS_ERROR = 99 + + +ctypedef fused bytes_unicode: + bytes + unicode + + +cdef _checkNumber(bytes_unicode s, bint allow_float): + cdef Py_UCS4 c + cdef NumberParserState state = NPS_SPACE_PRE + + for c in s: + if c in '0123456789': + if state in (NPS_DIGITS, NPS_FRACTION, NPS_DIGITS_EXP): + pass + elif state in (NPS_SPACE_PRE, NPS_SIGN): + state = NPS_DIGITS + elif state in (NPS_POINT_LEAD, NPS_POINT): + state = NPS_FRACTION + elif state in (NPS_EXP, NPS_EXP_SIGN): + state = NPS_DIGITS_EXP + else: + state = NPS_ERROR + else: + if c == '.': + if state in (NPS_SPACE_PRE, NPS_SIGN): + state = NPS_POINT_LEAD + elif state == NPS_DIGITS: + state = NPS_POINT + else: + state = NPS_ERROR + if not allow_float: + state = NPS_ERROR + elif c in '-+': + if state == NPS_SPACE_PRE: + state = NPS_SIGN + elif state == NPS_EXP: + state = NPS_EXP_SIGN + else: + state = NPS_ERROR + elif c == 'E': + if state in (NPS_DIGITS, NPS_POINT, NPS_FRACTION): + state = NPS_EXP + else: + state = NPS_ERROR + if not allow_float: + state = NPS_ERROR + # Allow INF and NaN. XMLSchema requires case, we don't, like Python. + elif c in 'iI': + state = NPS_INF1 if allow_float and state in (NPS_SPACE_PRE, NPS_SIGN) else NPS_ERROR + elif c in 'fF': + state = NPS_INF3 if state == NPS_INF2 else NPS_ERROR + elif c in 'aA': + state = NPS_NAN2 if state == NPS_NAN1 else NPS_ERROR + elif c in 'nN': + # Python also allows [+-]NaN, so let's accept that. + if state in (NPS_SPACE_PRE, NPS_SIGN): + state = NPS_NAN1 if allow_float else NPS_ERROR + elif state == NPS_NAN2: + state = NPS_NAN3 + elif state == NPS_INF1: + state = NPS_INF2 + else: + state = NPS_ERROR + # Allow spaces around text values. + else: + if c.isspace() if (bytes_unicode is unicode) else c in b'\x09\x0a\x0b\x0c\x0d\x20': + if state in (NPS_SPACE_PRE, NPS_SPACE_TAIL): + pass + elif state in (NPS_DIGITS, NPS_POINT, NPS_FRACTION, NPS_DIGITS_EXP, NPS_INF3, NPS_NAN3): + state = NPS_SPACE_TAIL + else: + state = NPS_ERROR + else: + state = NPS_ERROR + + if state == NPS_ERROR: + break + + if state not in (NPS_DIGITS, NPS_FRACTION, NPS_POINT, NPS_DIGITS_EXP, NPS_INF3, NPS_NAN3, NPS_SPACE_TAIL): + raise ValueError + + +cdef _checkInt(s): + return _checkNumber(s, allow_float=False) + + +cdef _checkFloat(s): + return _checkNumber(s, allow_float=True) + + +cdef object _strValueOf(obj): + if python._isString(obj): + return obj + if isinstance(obj, _Element): + return textOf((<_Element>obj)._c_node) or '' + if obj is None: + return '' + return unicode(obj) + + +cdef object _numericValueOf(obj): + if isinstance(obj, NumberElement): + return _parseNumber(obj) + try: + # not always numeric, but Python will raise the right exception + return obj.pyval + except AttributeError: + pass + return obj + + +cdef _richcmpPyvals(left, right, int op): + left = getattr(left, 'pyval', left) + right = getattr(right, 'pyval', right) + return python.PyObject_RichCompare(left, right, op) + + +################################################################################ +# Python type registry + +cdef class PyType: + """PyType(self, name, type_check, type_class, stringify=None) + User defined type. + + Named type that contains a type check function, a type class that + inherits from ObjectifiedDataElement and an optional "stringification" + function. The type check must take a string as argument and raise + ValueError or TypeError if it cannot handle the string value. It may be + None in which case it is not considered for type guessing. For registered + named types, the 'stringify' function (or unicode() if None) is used to + convert a Python object with type name 'name' to the string representation + stored in the XML tree. + + Example:: + + PyType('int', int, MyIntClass).register() + + Note that the order in which types are registered matters. The first + matching type will be used. + """ + cdef readonly object name + cdef readonly object type_check + cdef readonly object stringify + cdef object _type + cdef list _schema_types + def __init__(self, name, type_check, type_class, stringify=None): + if isinstance(name, bytes): + name = (name).decode('ascii') + elif not isinstance(name, unicode): + raise TypeError, "Type name must be a string" + if type_check is not None and not callable(type_check): + raise TypeError, "Type check function must be callable (or None)" + if name != TREE_PYTYPE_NAME and \ + not issubclass(type_class, ObjectifiedDataElement): + raise TypeError, \ + "Data classes must inherit from ObjectifiedDataElement" + self.name = name + self._type = type_class + self.type_check = type_check + if stringify is None: + stringify = unicode + self.stringify = stringify + self._schema_types = [] + + def __repr__(self): + return "PyType(%s, %s)" % (self.name, self._type.__name__) + + def register(self, before=None, after=None): + """register(self, before=None, after=None) + + Register the type. + + The additional keyword arguments 'before' and 'after' accept a + sequence of type names that must appear before/after the new type in + the type list. If any of them is not currently known, it is simply + ignored. Raises ValueError if the dependencies cannot be fulfilled. + """ + if self.name == TREE_PYTYPE_NAME: + raise ValueError, "Cannot register tree type" + if self.type_check is not None: + for item in _TYPE_CHECKS: + if item[0] is self.type_check: + _TYPE_CHECKS.remove(item) + break + entry = (self.type_check, self) + first_pos = 0 + last_pos = -1 + if before or after: + if before is None: + before = () + elif after is None: + after = () + for i, (check, pytype) in enumerate(_TYPE_CHECKS): + if last_pos == -1 and pytype.name in before: + last_pos = i + if pytype.name in after: + first_pos = i+1 + if last_pos == -1: + _TYPE_CHECKS.append(entry) + elif first_pos > last_pos: + raise ValueError, "inconsistent before/after dependencies" + else: + _TYPE_CHECKS.insert(last_pos, entry) + + _PYTYPE_DICT[self.name] = self + for xs_type in self._schema_types: + _SCHEMA_TYPE_DICT[xs_type] = self + + def unregister(self): + "unregister(self)" + if _PYTYPE_DICT.get(self.name) is self: + del _PYTYPE_DICT[self.name] + for xs_type, pytype in list(_SCHEMA_TYPE_DICT.items()): + if pytype is self: + del _SCHEMA_TYPE_DICT[xs_type] + if self.type_check is None: + return + try: + _TYPE_CHECKS.remove( (self.type_check, self) ) + except ValueError: + pass + + property xmlSchemaTypes: + """The list of XML Schema datatypes this Python type maps to. + + Note that this must be set before registering the type! + """ + def __get__(self): + return self._schema_types + def __set__(self, types): + self._schema_types = list(map(unicode, types)) + + +cdef dict _PYTYPE_DICT = {} +cdef dict _SCHEMA_TYPE_DICT = {} +cdef list _TYPE_CHECKS = [] + +cdef unicode _xml_bool(value): + return "true" if value else "false" + +cdef unicode _xml_float(value): + if _float_is_inf(value): + if value > 0: + return "INF" + return "-INF" + if _float_is_nan(value): + return "NaN" + return unicode(repr(value)) + +cdef _pytypename(obj): + return "str" if python._isString(obj) else _typename(obj) + +def pytypename(obj): + """pytypename(obj) + + Find the name of the corresponding PyType for a Python object. + """ + return _pytypename(obj) + +cdef _registerPyTypes(): + pytype = PyType('int', _checkInt, IntElement) # wraps functions for Python + pytype.xmlSchemaTypes = ("integer", "int", "short", "byte", "unsignedShort", + "unsignedByte", "nonPositiveInteger", + "negativeInteger", "long", "nonNegativeInteger", + "unsignedLong", "unsignedInt", "positiveInteger",) + pytype.register() + + # 'long' type just for backwards compatibility + pytype = PyType('long', None, IntElement) + pytype.register() + + pytype = PyType('float', _checkFloat, FloatElement, _xml_float) # wraps functions for Python + pytype.xmlSchemaTypes = ("double", "float") + pytype.register() + + pytype = PyType('bool', _checkBool, BoolElement, _xml_bool) # wraps functions for Python + pytype.xmlSchemaTypes = ("boolean",) + pytype.register() + + pytype = PyType('str', None, StringElement) + pytype.xmlSchemaTypes = ("string", "normalizedString", "token", "language", + "Name", "NCName", "ID", "IDREF", "ENTITY", + "NMTOKEN", ) + pytype.register() + + # since lxml 2.0 + pytype = PyType('NoneType', None, NoneElement) + pytype.register() + + # backwards compatibility + pytype = PyType('none', None, NoneElement) + pytype.register() + +# non-registered PyType for inner tree elements +cdef PyType TREE_PYTYPE = PyType(TREE_PYTYPE_NAME, None, ObjectifiedElement) + +_registerPyTypes() + +def getRegisteredTypes(): + """getRegisteredTypes() + + Returns a list of the currently registered PyType objects. + + To add a new type, retrieve this list and call unregister() for all + entries. Then add the new type at a suitable position (possibly replacing + an existing one) and call register() for all entries. + + This is necessary if the new type interferes with the type check functions + of existing ones (normally only int/float/bool) and must the tried before + other types. To add a type that is not yet parsable by the current type + check functions, you can simply register() it, which will append it to the + end of the type list. + """ + cdef list types = [] + cdef set known = set() + for check, pytype in _TYPE_CHECKS: + name = pytype.name + if name not in known: + known.add(name) + types.append(pytype) + for pytype in _PYTYPE_DICT.values(): + name = pytype.name + if name not in known: + known.add(name) + types.append(pytype) + return types + +cdef PyType _guessPyType(value, PyType defaulttype): + if value is None: + return None + for type_check, tested_pytype in _TYPE_CHECKS: + try: + type_check(value) + return tested_pytype + except IGNORABLE_ERRORS: + # could not be parsed as the specified type => ignore + pass + return defaulttype + +cdef object _guessElementClass(tree.xmlNode* c_node): + value = textOf(c_node) + if value is None: + return None + if value == '': + return StringElement + + for type_check, pytype in _TYPE_CHECKS: + try: + type_check(value) + return (pytype)._type + except IGNORABLE_ERRORS: + pass + return None + +################################################################################ +# adapted ElementMaker supports registered PyTypes + +@cython.final +@cython.internal +cdef class _ObjectifyElementMakerCaller: + cdef object _tag + cdef object _nsmap + cdef object _element_factory + cdef bint _annotate + + def __call__(self, *children, **attrib): + "__call__(self, *children, **attrib)" + cdef _ObjectifyElementMakerCaller elementMaker + cdef _Element element + cdef _Element childElement + cdef bint has_children + cdef bint has_string_value + if self._element_factory is None: + element = _makeElement(self._tag, None, attrib, self._nsmap) + else: + element = self._element_factory(self._tag, attrib, self._nsmap) + + pytype_name = None + has_children = False + has_string_value = False + for child in children: + if child is None: + if len(children) == 1: + cetree.setAttributeValue( + element, XML_SCHEMA_INSTANCE_NIL_ATTR, "true") + elif python._isString(child): + _add_text(element, child) + has_string_value = True + elif isinstance(child, _Element): + cetree.appendChildToElement(element, <_Element>child) + has_children = True + elif isinstance(child, _ObjectifyElementMakerCaller): + elementMaker = <_ObjectifyElementMakerCaller>child + if elementMaker._element_factory is None: + cetree.makeSubElement(element, elementMaker._tag, + None, None, None, None) + else: + childElement = elementMaker._element_factory( + elementMaker._tag) + cetree.appendChildToElement(element, childElement) + has_children = True + elif isinstance(child, dict): + for name, value in child.items(): + # keyword arguments in attrib take precedence + if name in attrib: + continue + pytype = _PYTYPE_DICT.get(_typename(value)) + if pytype is not None: + value = (pytype).stringify(value) + elif not python._isString(value): + value = unicode(value) + cetree.setAttributeValue(element, name, value) + else: + if pytype_name is not None: + # concatenation always makes the result a string + has_string_value = True + pytype_name = _typename(child) + pytype = _PYTYPE_DICT.get(_typename(child)) + if pytype is not None: + _add_text(element, (pytype).stringify(child)) + else: + has_string_value = True + child = unicode(child) + _add_text(element, child) + + if self._annotate and not has_children: + if has_string_value: + cetree.setAttributeValue(element, PYTYPE_ATTRIBUTE, "str") + elif pytype_name is not None: + cetree.setAttributeValue(element, PYTYPE_ATTRIBUTE, pytype_name) + + return element + +cdef _add_text(_Element elem, text): + # add text to the tree in construction, either as element text or + # tail text, depending on the current tree state + cdef tree.xmlNode* c_child + c_child = cetree.findChildBackwards(elem._c_node, 0) + if c_child is not NULL: + old = cetree.tailOf(c_child) + if old is not None: + text = old + text + cetree.setTailText(c_child, text) + else: + old = cetree.textOf(elem._c_node) + if old is not None: + text = old + text + cetree.setNodeText(elem._c_node, text) + +cdef class ElementMaker: + """ElementMaker(self, namespace=None, nsmap=None, annotate=True, makeelement=None) + + An ElementMaker that can be used for constructing trees. + + Example:: + + >>> M = ElementMaker(annotate=False) + >>> attributes = {'class': 'par'} + >>> html = M.html( M.body( M.p('hello', attributes, M.br, 'objectify', style="font-weight: bold") ) ) + + >>> from lxml.etree import tostring + >>> print(tostring(html, method='html').decode('ascii')) +

hello
objectify

+ + To create tags that are not valid Python identifiers, call the factory + directly and pass the tag name as first argument:: + + >>> root = M('tricky-tag', 'some text') + >>> print(root.tag) + tricky-tag + >>> print(root.text) + some text + + Note that this module has a predefined ElementMaker instance called ``E``. + """ + cdef object _makeelement + cdef object _namespace + cdef object _nsmap + cdef bint _annotate + cdef dict _cache + def __init__(self, *, namespace=None, nsmap=None, annotate=True, + makeelement=None): + if nsmap is None: + nsmap = _DEFAULT_NSMAP if annotate else {} + self._nsmap = nsmap + self._namespace = None if namespace is None else "{%s}" % namespace + self._annotate = annotate + if makeelement is not None: + if not callable(makeelement): + raise TypeError( + f"argument of 'makeelement' parameter must be callable, got {type(makeelement)}") + self._makeelement = makeelement + else: + self._makeelement = None + self._cache = {} + + @cython.final + cdef _build_element_maker(self, tag, bint caching): + cdef _ObjectifyElementMakerCaller element_maker + element_maker = _ObjectifyElementMakerCaller.__new__(_ObjectifyElementMakerCaller) + if self._namespace is not None and tag[0] != "{": + element_maker._tag = self._namespace + tag + else: + element_maker._tag = tag + element_maker._nsmap = self._nsmap + element_maker._annotate = self._annotate + element_maker._element_factory = self._makeelement + if caching: + if len(self._cache) > 200: + self._cache.clear() + self._cache[tag] = element_maker + return element_maker + + def __getattr__(self, tag): + element_maker = self._cache.get(tag) + if element_maker is None: + return self._build_element_maker(tag, caching=True) + return element_maker + + def __call__(self, tag, *args, **kwargs): + element_maker = self._cache.get(tag) + if element_maker is None: + element_maker = self._build_element_maker( + tag, caching=not is_special_method(tag)) + return element_maker(*args, **kwargs) + +################################################################################ +# Recursive element dumping + +cdef bint __RECURSIVE_STR = 0 # default: off + +def enable_recursive_str(on=True): + """enable_recursive_str(on=True) + + Enable a recursively generated tree representation for str(element), + based on objectify.dump(element). + """ + global __RECURSIVE_STR + __RECURSIVE_STR = on + +def dump(_Element element not None): + """dump(_Element element not None) + + Return a recursively generated string representation of an element. + """ + return _dump(element, 0) + +cdef object _dump(_Element element, int indent): + indentstr = " " * indent + if isinstance(element, ObjectifiedDataElement): + value = repr(element) + else: + value = textOf(element._c_node) + if value is not None: + if not value.strip(): + value = None + else: + value = repr(value) + result = f"{indentstr}{element.tag} = {value} [{_typename(element)}]\n" + xsi_ns = "{%s}" % XML_SCHEMA_INSTANCE_NS + pytype_ns = "{%s}" % PYTYPE_NAMESPACE + for name, value in sorted(cetree.iterattributes(element, 3)): + if '{' in name: + if name == PYTYPE_ATTRIBUTE: + if value == TREE_PYTYPE_NAME: + continue + else: + name = name.replace(pytype_ns, 'py:') + name = name.replace(xsi_ns, 'xsi:') + result += f"{indentstr} * {name} = {value!r}\n" + + indent += 1 + for child in element.iterchildren(): + result += _dump(child, indent) + if indent == 1: + return result[:-1] # strip last '\n' + else: + return result + + +################################################################################ +# Pickle support for objectified ElementTree + +def __unpickleElementTree(data): + return etree.ElementTree(fromstring(data)) + +cdef _setupPickle(elementTreeReduceFunction): + import copyreg + copyreg.pickle(etree._ElementTree, + elementTreeReduceFunction, __unpickleElementTree) + +def pickleReduceElementTree(obj): + return __unpickleElementTree, (etree.tostring(obj),) + +_setupPickle(pickleReduceElementTree) +del pickleReduceElementTree + +################################################################################ +# Element class lookup + +cdef class ObjectifyElementClassLookup(ElementClassLookup): + """ObjectifyElementClassLookup(self, tree_class=None, empty_data_class=None) + Element class lookup method that uses the objectify classes. + """ + cdef object empty_data_class + cdef object tree_class + def __init__(self, tree_class=None, empty_data_class=None): + """Lookup mechanism for objectify. + + The default Element classes can be replaced by passing subclasses of + ObjectifiedElement and ObjectifiedDataElement as keyword arguments. + 'tree_class' defines inner tree classes (defaults to + ObjectifiedElement), 'empty_data_class' defines the default class for + empty data elements (defaults to StringElement). + """ + self._lookup_function = _lookupElementClass + if tree_class is None: + tree_class = ObjectifiedElement + self.tree_class = tree_class + if empty_data_class is None: + empty_data_class = StringElement + self.empty_data_class = empty_data_class + +cdef object _lookupElementClass(state, _Document doc, tree.xmlNode* c_node): + cdef ObjectifyElementClassLookup lookup + lookup = state + # if element has children => no data class + if cetree.hasChild(c_node): + return lookup.tree_class + + # if element is defined as xsi:nil, return NoneElement class + if "true" == cetree.attributeValueFromNsName( + c_node, _XML_SCHEMA_INSTANCE_NS, "nil"): + return NoneElement + + # check for Python type hint + value = cetree.attributeValueFromNsName( + c_node, _PYTYPE_NAMESPACE, _PYTYPE_ATTRIBUTE_NAME) + if value is not None: + if value == TREE_PYTYPE_NAME: + return lookup.tree_class + py_type = _PYTYPE_DICT.get(value) + if py_type is not None: + return py_type._type + # unknown 'pyval' => try to figure it out ourself, just go on + + # check for XML Schema type hint + value = cetree.attributeValueFromNsName( + c_node, _XML_SCHEMA_INSTANCE_NS, "type") + + if value is not None: + schema_type = _SCHEMA_TYPE_DICT.get(value) + if schema_type is None and ':' in value: + prefix, value = value.split(':', 1) + schema_type = _SCHEMA_TYPE_DICT.get(value) + if schema_type is not None: + return schema_type._type + + # otherwise determine class based on text content type + el_class = _guessElementClass(c_node) + if el_class is not None: + return el_class + + # if element is a root node => default to tree node + if c_node.parent is NULL or not tree._isElement(c_node.parent): + return lookup.tree_class + + return lookup.empty_data_class + + +################################################################################ +# Type annotations + +cdef PyType _check_type(tree.xmlNode* c_node, PyType pytype): + if pytype is None: + return None + value = textOf(c_node) + try: + pytype.type_check(value) + return pytype + except IGNORABLE_ERRORS: + # could not be parsed as the specified type => ignore + pass + return None + +def pyannotate(element_or_tree, *, ignore_old=False, ignore_xsi=False, + empty_pytype=None): + """pyannotate(element_or_tree, ignore_old=False, ignore_xsi=False, empty_pytype=None) + + Recursively annotates the elements of an XML tree with 'pytype' + attributes. + + If the 'ignore_old' keyword argument is True (the default), current 'pytype' + attributes will be ignored and replaced. Otherwise, they will be checked + and only replaced if they no longer fit the current text value. + + Setting the keyword argument ``ignore_xsi`` to True makes the function + additionally ignore existing ``xsi:type`` annotations. The default is to + use them as a type hint. + + The default annotation of empty elements can be set with the + ``empty_pytype`` keyword argument. The default is not to annotate empty + elements. Pass 'str', for example, to make string values the default. + """ + cdef _Element element + element = cetree.rootNodeOrRaise(element_or_tree) + _annotate(element, 0, 1, ignore_xsi, ignore_old, None, empty_pytype) + +def xsiannotate(element_or_tree, *, ignore_old=False, ignore_pytype=False, + empty_type=None): + """xsiannotate(element_or_tree, ignore_old=False, ignore_pytype=False, empty_type=None) + + Recursively annotates the elements of an XML tree with 'xsi:type' + attributes. + + If the 'ignore_old' keyword argument is True (the default), current + 'xsi:type' attributes will be ignored and replaced. Otherwise, they will be + checked and only replaced if they no longer fit the current text value. + + Note that the mapping from Python types to XSI types is usually ambiguous. + Currently, only the first XSI type name in the corresponding PyType + definition will be used for annotation. Thus, you should consider naming + the widest type first if you define additional types. + + Setting the keyword argument ``ignore_pytype`` to True makes the function + additionally ignore existing ``pytype`` annotations. The default is to + use them as a type hint. + + The default annotation of empty elements can be set with the + ``empty_type`` keyword argument. The default is not to annotate empty + elements. Pass 'string', for example, to make string values the default. + """ + cdef _Element element + element = cetree.rootNodeOrRaise(element_or_tree) + _annotate(element, 1, 0, ignore_old, ignore_pytype, empty_type, None) + +def annotate(element_or_tree, *, ignore_old=True, ignore_xsi=False, + empty_pytype=None, empty_type=None, annotate_xsi=0, + annotate_pytype=1): + """annotate(element_or_tree, ignore_old=True, ignore_xsi=False, empty_pytype=None, empty_type=None, annotate_xsi=0, annotate_pytype=1) + + Recursively annotates the elements of an XML tree with 'xsi:type' + and/or 'py:pytype' attributes. + + If the 'ignore_old' keyword argument is True (the default), current + 'py:pytype' attributes will be ignored for the type annotation. Set to False + if you want reuse existing 'py:pytype' information (iff appropriate for the + element text value). + + If the 'ignore_xsi' keyword argument is False (the default), existing + 'xsi:type' attributes will be used for the type annotation, if they fit the + element text values. + + Note that the mapping from Python types to XSI types is usually ambiguous. + Currently, only the first XSI type name in the corresponding PyType + definition will be used for annotation. Thus, you should consider naming + the widest type first if you define additional types. + + The default 'py:pytype' annotation of empty elements can be set with the + ``empty_pytype`` keyword argument. Pass 'str', for example, to make + string values the default. + + The default 'xsi:type' annotation of empty elements can be set with the + ``empty_type`` keyword argument. The default is not to annotate empty + elements. Pass 'string', for example, to make string values the default. + + The keyword arguments 'annotate_xsi' (default: 0) and 'annotate_pytype' + (default: 1) control which kind(s) of annotation to use. + """ + cdef _Element element + element = cetree.rootNodeOrRaise(element_or_tree) + _annotate(element, annotate_xsi, annotate_pytype, ignore_xsi, + ignore_old, empty_type, empty_pytype) + + +cdef _annotate(_Element element, bint annotate_xsi, bint annotate_pytype, + bint ignore_xsi, bint ignore_pytype, + empty_type_name, empty_pytype_name): + cdef _Document doc + cdef tree.xmlNode* c_node + cdef PyType empty_pytype, StrType, NoneType + + if not annotate_xsi and not annotate_pytype: + return + + if empty_type_name is not None: + if isinstance(empty_type_name, bytes): + empty_type_name = (empty_type_name).decode("ascii") + empty_pytype = _SCHEMA_TYPE_DICT.get(empty_type_name) + elif empty_pytype_name is not None: + if isinstance(empty_pytype_name, bytes): + empty_pytype_name = (empty_pytype_name).decode("ascii") + empty_pytype = _PYTYPE_DICT.get(empty_pytype_name) + else: + empty_pytype = None + + StrType = _PYTYPE_DICT.get('str') + NoneType = _PYTYPE_DICT.get('NoneType') + + doc = element._doc + c_node = element._c_node + tree.BEGIN_FOR_EACH_ELEMENT_FROM(c_node, c_node, 1) + if c_node.type == tree.XML_ELEMENT_NODE: + _annotate_element(c_node, doc, annotate_xsi, annotate_pytype, + ignore_xsi, ignore_pytype, + empty_type_name, empty_pytype, StrType, NoneType) + tree.END_FOR_EACH_ELEMENT_FROM(c_node) + +cdef int _annotate_element(tree.xmlNode* c_node, _Document doc, + bint annotate_xsi, bint annotate_pytype, + bint ignore_xsi, bint ignore_pytype, + empty_type_name, PyType empty_pytype, + PyType StrType, PyType NoneType) except -1: + cdef tree.xmlNs* c_ns + cdef PyType pytype = None + typename = None + istree = 0 + + # if element is defined as xsi:nil, represent it as None + if cetree.attributeValueFromNsName( + c_node, _XML_SCHEMA_INSTANCE_NS, "nil") == "true": + pytype = NoneType + + if pytype is None and not ignore_xsi: + # check that old xsi type value is valid + typename = cetree.attributeValueFromNsName( + c_node, _XML_SCHEMA_INSTANCE_NS, "type") + if typename is not None: + pytype = _SCHEMA_TYPE_DICT.get(typename) + if pytype is None and ':' in typename: + prefix, typename = typename.split(':', 1) + pytype = _SCHEMA_TYPE_DICT.get(typename) + if pytype is not None and pytype is not StrType: + # StrType does not have a typecheck but is the default + # anyway, so just accept it if given as type + # information + pytype = _check_type(c_node, pytype) + if pytype is None: + typename = None + + if pytype is None and not ignore_pytype: + # check that old pytype value is valid + old_pytypename = cetree.attributeValueFromNsName( + c_node, _PYTYPE_NAMESPACE, _PYTYPE_ATTRIBUTE_NAME) + if old_pytypename is not None: + if old_pytypename == TREE_PYTYPE_NAME: + if not cetree.hasChild(c_node): + # only case where we should keep it, + # everything else is clear enough + pytype = TREE_PYTYPE + else: + if old_pytypename == 'none': + # transition from lxml 1.x + old_pytypename = "NoneType" + pytype = _PYTYPE_DICT.get(old_pytypename) + if pytype is not None and pytype is not StrType: + # StrType does not have a typecheck but is the + # default anyway, so just accept it if given as + # type information + pytype = _check_type(c_node, pytype) + + if pytype is None: + # try to guess type + if not cetree.hasChild(c_node): + # element has no children => data class + pytype = _guessPyType(textOf(c_node), StrType) + else: + istree = 1 + + if pytype is None: + # use default type for empty elements + if cetree.hasText(c_node): + pytype = StrType + else: + pytype = empty_pytype + if typename is None: + typename = empty_type_name + + if pytype is not None: + if typename is None: + if not istree: + if pytype._schema_types: + # pytype->xsi:type is a 1:n mapping + # simply take the first + typename = pytype._schema_types[0] + elif typename not in pytype._schema_types: + typename = pytype._schema_types[0] + + if annotate_xsi: + if typename is None or istree: + cetree.delAttributeFromNsName( + c_node, _XML_SCHEMA_INSTANCE_NS, "type") + else: + # update or create attribute + typename_utf8 = cetree.utf8(typename) + c_ns = cetree.findOrBuildNodeNsPrefix( + doc, c_node, _XML_SCHEMA_NS, 'xsd') + if c_ns is not NULL: + if b':' in typename_utf8: + prefix, name = typename_utf8.split(b':', 1) + if c_ns.prefix is NULL or c_ns.prefix[0] == c'\0': + typename_utf8 = name + elif tree.xmlStrcmp(_xcstr(prefix), c_ns.prefix) != 0: + typename_utf8 = (c_ns.prefix) + b':' + name + elif c_ns.prefix is not NULL and c_ns.prefix[0] != c'\0': + typename_utf8 = (c_ns.prefix) + b':' + typename_utf8 + c_ns = cetree.findOrBuildNodeNsPrefix( + doc, c_node, _XML_SCHEMA_INSTANCE_NS, 'xsi') + tree.xmlSetNsProp(c_node, c_ns, "type", _xcstr(typename_utf8)) + + if annotate_pytype: + if pytype is None: + # delete attribute if it exists + cetree.delAttributeFromNsName( + c_node, _PYTYPE_NAMESPACE, _PYTYPE_ATTRIBUTE_NAME) + else: + # update or create attribute + c_ns = cetree.findOrBuildNodeNsPrefix( + doc, c_node, _PYTYPE_NAMESPACE, 'py') + pytype_name = cetree.utf8(pytype.name) + tree.xmlSetNsProp(c_node, c_ns, _PYTYPE_ATTRIBUTE_NAME, + _xcstr(pytype_name)) + if pytype is NoneType: + c_ns = cetree.findOrBuildNodeNsPrefix( + doc, c_node, _XML_SCHEMA_INSTANCE_NS, 'xsi') + tree.xmlSetNsProp(c_node, c_ns, "nil", "true") + + return 0 + +cdef object _strip_attributes = etree.strip_attributes +cdef object _cleanup_namespaces = etree.cleanup_namespaces + +def deannotate(element_or_tree, *, bint pytype=True, bint xsi=True, + bint xsi_nil=False, bint cleanup_namespaces=False): + """deannotate(element_or_tree, pytype=True, xsi=True, xsi_nil=False, cleanup_namespaces=False) + + Recursively de-annotate the elements of an XML tree by removing 'py:pytype' + and/or 'xsi:type' attributes and/or 'xsi:nil' attributes. + + If the 'pytype' keyword argument is True (the default), 'py:pytype' + attributes will be removed. If the 'xsi' keyword argument is True (the + default), 'xsi:type' attributes will be removed. + If the 'xsi_nil' keyword argument is True (default: False), 'xsi:nil' + attributes will be removed. + + Note that this does not touch the namespace declarations by + default. If you want to remove unused namespace declarations from + the tree, pass the option ``cleanup_namespaces=True``. + """ + cdef list attribute_names = [] + + if pytype: + attribute_names.append(PYTYPE_ATTRIBUTE) + if xsi: + attribute_names.append(XML_SCHEMA_INSTANCE_TYPE_ATTR) + if xsi_nil: + attribute_names.append(XML_SCHEMA_INSTANCE_NIL_ATTR) + + _strip_attributes(element_or_tree, *attribute_names) + if cleanup_namespaces: + _cleanup_namespaces(element_or_tree) + +################################################################################ +# Module level parser setup + +cdef object __DEFAULT_PARSER +__DEFAULT_PARSER = etree.XMLParser(remove_blank_text=True) +__DEFAULT_PARSER.set_element_class_lookup( ObjectifyElementClassLookup() ) + +cdef object objectify_parser +objectify_parser = __DEFAULT_PARSER + +def set_default_parser(new_parser = None): + """set_default_parser(new_parser = None) + + Replace the default parser used by objectify's Element() and + fromstring() functions. + + The new parser must be an etree.XMLParser. + + Call without arguments to reset to the original parser. + """ + global objectify_parser + if new_parser is None: + objectify_parser = __DEFAULT_PARSER + elif isinstance(new_parser, etree.XMLParser): + objectify_parser = new_parser + else: + raise TypeError, "parser must inherit from lxml.etree.XMLParser" + +def makeparser(**kw): + """makeparser(remove_blank_text=True, **kw) + + Create a new XML parser for objectify trees. + + You can pass all keyword arguments that are supported by + ``etree.XMLParser()``. Note that this parser defaults to removing + blank text. You can disable this by passing the + ``remove_blank_text`` boolean keyword option yourself. + """ + if 'remove_blank_text' not in kw: + kw['remove_blank_text'] = True + parser = etree.XMLParser(**kw) + parser.set_element_class_lookup( ObjectifyElementClassLookup() ) + return parser + +cdef _Element _makeElement(tag, text, attrib, nsmap): + return cetree.makeElement(tag, None, objectify_parser, text, None, attrib, nsmap) + +################################################################################ +# Module level factory functions + +cdef object _fromstring +_fromstring = etree.fromstring + +SubElement = etree.SubElement + +def fromstring(xml, parser=None, *, base_url=None): + """fromstring(xml, parser=None, base_url=None) + + Objectify specific version of the lxml.etree fromstring() function + that uses the objectify parser. + + You can pass a different parser as second argument. + + The ``base_url`` keyword argument allows to set the original base URL of + the document to support relative Paths when looking up external entities + (DTD, XInclude, ...). + """ + if parser is None: + parser = objectify_parser + return _fromstring(xml, parser, base_url=base_url) + +def XML(xml, parser=None, *, base_url=None): + """XML(xml, parser=None, base_url=None) + + Objectify specific version of the lxml.etree XML() literal factory + that uses the objectify parser. + + You can pass a different parser as second argument. + + The ``base_url`` keyword argument allows to set the original base URL of + the document to support relative Paths when looking up external entities + (DTD, XInclude, ...). + """ + if parser is None: + parser = objectify_parser + return _fromstring(xml, parser, base_url=base_url) + +cdef object _parse +_parse = etree.parse + +def parse(f, parser=None, *, base_url=None): + """parse(f, parser=None, base_url=None) + + Parse a file or file-like object with the objectify parser. + + You can pass a different parser as second argument. + + The ``base_url`` keyword allows setting a URL for the document + when parsing from a file-like object. This is needed when looking + up external entities (DTD, XInclude, ...) with relative paths. + """ + if parser is None: + parser = objectify_parser + return _parse(f, parser, base_url=base_url) + +cdef dict _DEFAULT_NSMAP = { + "py" : PYTYPE_NAMESPACE, + "xsi" : XML_SCHEMA_INSTANCE_NS, + "xsd" : XML_SCHEMA_NS +} + +E = ElementMaker() + +def Element(_tag, attrib=None, nsmap=None, *, _pytype=None, **_attributes): + """Element(_tag, attrib=None, nsmap=None, _pytype=None, **_attributes) + + Objectify specific version of the lxml.etree Element() factory that + always creates a structural (tree) element. + + NOTE: requires parser based element class lookup activated in lxml.etree! + """ + if attrib is not None: + if _attributes: + attrib = dict(attrib) + attrib.update(_attributes) + _attributes = attrib + if _pytype is None: + _pytype = TREE_PYTYPE_NAME + if nsmap is None: + nsmap = _DEFAULT_NSMAP + _attributes[PYTYPE_ATTRIBUTE] = _pytype + return _makeElement(_tag, None, _attributes, nsmap) + +def DataElement(_value, attrib=None, nsmap=None, *, _pytype=None, _xsi=None, + **_attributes): + """DataElement(_value, attrib=None, nsmap=None, _pytype=None, _xsi=None, **_attributes) + + Create a new element from a Python value and XML attributes taken from + keyword arguments or a dictionary passed as second argument. + + Automatically adds a 'pytype' attribute for the Python type of the value, + if the type can be identified. If '_pytype' or '_xsi' are among the + keyword arguments, they will be used instead. + + If the _value argument is an ObjectifiedDataElement instance, its py:pytype, + xsi:type and other attributes and nsmap are reused unless they are redefined + in attrib and/or keyword arguments. + """ + if nsmap is None: + nsmap = _DEFAULT_NSMAP + if attrib is not None and attrib: + if _attributes: + attrib = dict(attrib) + attrib.update(_attributes) + _attributes = attrib + if isinstance(_value, ObjectifiedElement): + if _pytype is None: + if _xsi is None and not _attributes and nsmap is _DEFAULT_NSMAP: + # special case: no change! + return _value.__copy__() + if isinstance(_value, ObjectifiedDataElement): + # reuse existing nsmap unless redefined in nsmap parameter + temp = _value.nsmap + if temp is not None and temp: + temp = dict(temp) + temp.update(nsmap) + nsmap = temp + # reuse existing attributes unless redefined in attrib/_attributes + temp = _value.attrib + if temp is not None and temp: + temp = dict(temp) + temp.update(_attributes) + _attributes = temp + # reuse existing xsi:type or py:pytype attributes, unless provided as + # arguments + if _xsi is None and _pytype is None: + _xsi = _attributes.get(XML_SCHEMA_INSTANCE_TYPE_ATTR) + _pytype = _attributes.get(PYTYPE_ATTRIBUTE) + + if _xsi is not None: + if ':' in _xsi: + prefix, name = _xsi.split(':', 1) + ns = nsmap.get(prefix) + if ns != XML_SCHEMA_NS: + raise ValueError, "XSD types require the XSD namespace" + elif nsmap is _DEFAULT_NSMAP: + name = _xsi + _xsi = 'xsd:' + _xsi + else: + name = _xsi + for prefix, ns in nsmap.items(): + if ns == XML_SCHEMA_NS: + if prefix is not None and prefix: + _xsi = prefix + ':' + _xsi + break + else: + raise ValueError, "XSD types require the XSD namespace" + _attributes[XML_SCHEMA_INSTANCE_TYPE_ATTR] = _xsi + if _pytype is None: + # allow using unregistered or even wrong xsi:type names + py_type = _SCHEMA_TYPE_DICT.get(_xsi) + if py_type is None: + py_type = _SCHEMA_TYPE_DICT.get(name) + if py_type is not None: + _pytype = py_type.name + + if _pytype is None: + _pytype = _pytypename(_value) + + if _value is None and _pytype != "str": + _pytype = _pytype or "NoneType" + strval = None + elif python._isString(_value): + strval = _value + elif isinstance(_value, bool): + if _value: + strval = "true" + else: + strval = "false" + else: + py_type = _PYTYPE_DICT.get(_pytype) + stringify = unicode if py_type is None else py_type.stringify + strval = stringify(_value) + + if _pytype is not None: + if _pytype == "NoneType" or _pytype == "none": + strval = None + _attributes[XML_SCHEMA_INSTANCE_NIL_ATTR] = "true" + else: + # check if type information from arguments is valid + py_type = _PYTYPE_DICT.get(_pytype) + if py_type is not None: + if py_type.type_check is not None: + py_type.type_check(strval) + _attributes[PYTYPE_ATTRIBUTE] = _pytype + + return _makeElement("value", strval, _attributes, nsmap) + + +################################################################################ +# ObjectPath + +include "objectpath.pxi" diff --git a/venv/lib/python3.10/site-packages/lxml/objectpath.pxi b/venv/lib/python3.10/site-packages/lxml/objectpath.pxi new file mode 100644 index 0000000000000000000000000000000000000000..e562a365015830bfd3d24650d1109fe891c31039 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/objectpath.pxi @@ -0,0 +1,332 @@ +################################################################################ +# ObjectPath + +ctypedef struct _ObjectPath: + const_xmlChar* href + const_xmlChar* name + Py_ssize_t index + + +cdef object _NO_DEFAULT = object() + + +cdef class ObjectPath: + """ObjectPath(path) + Immutable object that represents a compiled object path. + + Example for a path: 'root.child[1].{other}child[25]' + """ + cdef readonly object find + cdef list _path + cdef object _path_str + cdef _ObjectPath* _c_path + cdef Py_ssize_t _path_len + def __init__(self, path): + if python._isString(path): + self._path = _parse_object_path_string(path) + self._path_str = path + else: + self._path = _parse_object_path_list(path) + self._path_str = '.'.join(path) + self._path_len = len(self._path) + self._c_path = _build_object_path_segments(self._path) + self.find = self.__call__ + + def __dealloc__(self): + if self._c_path is not NULL: + python.lxml_free(self._c_path) + + def __str__(self): + return self._path_str + + def __call__(self, _Element root not None, *_default): + """Follow the attribute path in the object structure and return the + target attribute value. + + If it it not found, either returns a default value (if one was passed + as second argument) or raises AttributeError. + """ + if _default: + if len(_default) > 1: + raise TypeError, "invalid number of arguments: needs one or two" + default = _default[0] + else: + default = _NO_DEFAULT + return _find_object_path(root, self._c_path, self._path_len, default) + + def hasattr(self, _Element root not None): + "hasattr(self, root)" + try: + _find_object_path(root, self._c_path, self._path_len, _NO_DEFAULT) + except AttributeError: + return False + return True + + def setattr(self, _Element root not None, value): + """setattr(self, root, value) + + Set the value of the target element in a subtree. + + If any of the children on the path does not exist, it is created. + """ + _create_object_path(root, self._c_path, self._path_len, 1, value) + + def addattr(self, _Element root not None, value): + """addattr(self, root, value) + + Append a value to the target element in a subtree. + + If any of the children on the path does not exist, it is created. + """ + _create_object_path(root, self._c_path, self._path_len, 0, value) + + +cdef object __MATCH_PATH_SEGMENT = re.compile( + r"(\.?)\s*(?:\{([^}]*)\})?\s*([^.{}\[\]\s]+)\s*(?:\[\s*([-0-9]+)\s*\])?", + re.U).match + +cdef tuple _RELATIVE_PATH_SEGMENT = (None, None, 0) + + +cdef list _parse_object_path_string(_path): + """Parse object path string into a (ns, name, index) list. + """ + cdef bint has_dot + cdef unicode path + new_path = [] + if isinstance(_path, bytes): + path = (_path).decode('ascii') + elif type(_path) is not unicode: + path = unicode(_path) + else: + path = _path + path = path.strip() + if path == '.': + return [_RELATIVE_PATH_SEGMENT] + path_pos = 0 + while path: + match = __MATCH_PATH_SEGMENT(path, path_pos) + if match is None: + break + + dot, ns, name, index = match.groups() + index = int(index) if index else 0 + has_dot = dot == '.' + if not new_path: + if has_dot: + # path '.child' => ignore root + new_path.append(_RELATIVE_PATH_SEGMENT) + elif index: + raise ValueError, "index not allowed on root node" + elif not has_dot: + raise ValueError, "invalid path" + if ns is not None: + ns = python.PyUnicode_AsUTF8String(ns) + name = python.PyUnicode_AsUTF8String(name) + new_path.append( (ns, name, index) ) + + path_pos = match.end() + if not new_path or len(path) > path_pos: + raise ValueError, "invalid path" + return new_path + + +cdef list _parse_object_path_list(path): + """Parse object path sequence into a (ns, name, index) list. + """ + new_path = [] + for item in path: + item = item.strip() + if not new_path and item == '': + # path '.child' => ignore root + ns = name = None + index = 0 + else: + ns, name = cetree.getNsTag(item) + c_name = _xcstr(name) + index_pos = tree.xmlStrchr(c_name, c'[') + if index_pos is NULL: + index = 0 + else: + index_end = tree.xmlStrchr(index_pos + 1, c']') + if index_end is NULL: + raise ValueError, "index must be enclosed in []" + index = int(index_pos[1:index_end - index_pos]) + if not new_path and index != 0: + raise ValueError, "index not allowed on root node" + name = c_name[:index_pos - c_name] + new_path.append( (ns, name, index) ) + if not new_path: + raise ValueError, "invalid path" + return new_path + + +cdef _ObjectPath* _build_object_path_segments(list path_list) except NULL: + cdef _ObjectPath* c_path + cdef _ObjectPath* c_path_segments + c_path_segments = <_ObjectPath*>python.lxml_malloc(len(path_list), sizeof(_ObjectPath)) + if c_path_segments is NULL: + raise MemoryError() + c_path = c_path_segments + for href, name, index in path_list: + c_path[0].href = _xcstr(href) if href is not None else NULL + c_path[0].name = _xcstr(name) if name is not None else NULL + c_path[0].index = index + c_path += 1 + return c_path_segments + + +cdef _find_object_path(_Element root, _ObjectPath* c_path, Py_ssize_t c_path_len, default_value): + """Follow the path to find the target element. + """ + cdef tree.xmlNode* c_node + cdef Py_ssize_t c_index + c_node = root._c_node + c_name = c_path[0].name + c_href = c_path[0].href + if c_href is NULL or c_href[0] == c'\0': + c_href = tree._getNs(c_node) + if not cetree.tagMatches(c_node, c_href, c_name): + if default_value is not _NO_DEFAULT: + return default_value + else: + raise ValueError( + f"root element does not match: need {cetree.namespacedNameFromNsName(c_href, c_name)}, got {root.tag}") + + while c_node is not NULL: + c_path_len -= 1 + if c_path_len <= 0: + break + + c_path += 1 + if c_path[0].href is not NULL: + c_href = c_path[0].href # otherwise: keep parent namespace + c_name = tree.xmlDictExists(c_node.doc.dict, c_path[0].name, -1) + if c_name is NULL: + c_name = c_path[0].name + c_node = NULL + break + c_index = c_path[0].index + c_node = c_node.last if c_index < 0 else c_node.children + c_node = _findFollowingSibling(c_node, c_href, c_name, c_index) + + if c_node is not NULL: + return cetree.elementFactory(root._doc, c_node) + elif default_value is not _NO_DEFAULT: + return default_value + else: + tag = cetree.namespacedNameFromNsName(c_href, c_name) + raise AttributeError, f"no such child: {tag}" + + +cdef _create_object_path(_Element root, _ObjectPath* c_path, + Py_ssize_t c_path_len, int replace, value): + """Follow the path to find the target element, build the missing children + as needed and set the target element to 'value'. If replace is true, an + existing value is replaced, otherwise the new value is added. + """ + cdef _Element child + cdef tree.xmlNode* c_node + cdef tree.xmlNode* c_child + cdef Py_ssize_t c_index + if c_path_len == 1: + raise TypeError, "cannot update root node" + + c_node = root._c_node + c_name = c_path[0].name + c_href = c_path[0].href + if c_href is NULL or c_href[0] == c'\0': + c_href = tree._getNs(c_node) + if not cetree.tagMatches(c_node, c_href, c_name): + raise ValueError( + f"root element does not match: need {cetree.namespacedNameFromNsName(c_href, c_name)}, got {root.tag}") + + while c_path_len > 1: + c_path_len -= 1 + c_path += 1 + if c_path[0].href is not NULL: + c_href = c_path[0].href # otherwise: keep parent namespace + c_index = c_path[0].index + c_name = tree.xmlDictExists(c_node.doc.dict, c_path[0].name, -1) + if c_name is NULL: + c_name = c_path[0].name + c_child = NULL + else: + c_child = c_node.last if c_index < 0 else c_node.children + c_child = _findFollowingSibling(c_child, c_href, c_name, c_index) + + if c_child is not NULL: + c_node = c_child + elif c_index != 0: + raise TypeError, "creating indexed path attributes is not supported" + elif c_path_len == 1: + _appendValue(cetree.elementFactory(root._doc, c_node), + cetree.namespacedNameFromNsName(c_href, c_name), + value) + return + else: + child = cetree.makeSubElement( + cetree.elementFactory(root._doc, c_node), + cetree.namespacedNameFromNsName(c_href, c_name), + None, None, None, None) + c_node = child._c_node + + # if we get here, the entire path was already there + if replace: + element = cetree.elementFactory(root._doc, c_node) + _replaceElement(element, value) + else: + _appendValue(cetree.elementFactory(root._doc, c_node.parent), + cetree.namespacedName(c_node), value) + + +cdef list _build_descendant_paths(tree.xmlNode* c_node, prefix_string): + """Returns a list of all descendant paths. + """ + cdef list path, path_list + tag = cetree.namespacedName(c_node) + if prefix_string: + if prefix_string[-1] != '.': + prefix_string += '.' + prefix_string = prefix_string + tag + else: + prefix_string = tag + path = [prefix_string] + path_list = [] + _recursive_build_descendant_paths(c_node, path, path_list) + return path_list + + +cdef int _recursive_build_descendant_paths(tree.xmlNode* c_node, + list path, list path_list) except -1: + """Fills the list 'path_list' with all descendant paths, initial prefix + being in the list 'path'. + """ + cdef tree.xmlNode* c_child + tags = {} + path_list.append('.'.join(path)) + c_href = tree._getNs(c_node) + c_child = c_node.children + while c_child is not NULL: + while c_child.type != tree.XML_ELEMENT_NODE: + c_child = c_child.next + if c_child is NULL: + return 0 + if c_href is tree._getNs(c_child): + tag = pyunicode(c_child.name) + elif c_href is not NULL and tree._getNs(c_child) is NULL: + # special case: parent has namespace, child does not + tag = '{}' + pyunicode(c_child.name) + else: + tag = cetree.namespacedName(c_child) + count = tags.get(tag) + if count is None: + tags[tag] = 1 + else: + tags[tag] = count + 1 + tag += f'[{count}]' + path.append(tag) + _recursive_build_descendant_paths(c_child, path, path_list) + del path[-1] + c_child = c_child.next + return 0 diff --git a/venv/lib/python3.10/site-packages/lxml/parser.pxi b/venv/lib/python3.10/site-packages/lxml/parser.pxi new file mode 100644 index 0000000000000000000000000000000000000000..ff07dcdd3ebd341a8e02b7cebb9c01f3255a2edd --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/parser.pxi @@ -0,0 +1,1994 @@ +# Parsers for XML and HTML + +from lxml.includes cimport xmlparser +from lxml.includes cimport htmlparser + + +class ParseError(LxmlSyntaxError): + """Syntax error while parsing an XML document. + + For compatibility with ElementTree 1.3 and later. + """ + def __init__(self, message, code, line, column, filename=None): + super(_ParseError, self).__init__(message) + self.lineno, self.offset = (line, column - 1) + self.code = code + self.filename = filename + + @property + def position(self): + return self.lineno, self.offset + 1 + + @position.setter + def position(self, new_pos): + self.lineno, column = new_pos + self.offset = column - 1 + +cdef object _ParseError = ParseError + + +class XMLSyntaxError(ParseError): + """Syntax error while parsing an XML document. + """ + +cdef class ParserError(LxmlError): + """Internal lxml parser error. + """ + + +@cython.final +@cython.internal +cdef class _ParserDictionaryContext: + # Global parser context to share the string dictionary. + # + # This class is a delegate singleton! + # + # It creates _ParserDictionaryContext objects for each thread to keep thread state, + # but those must never be used directly. Always stick to using the static + # __GLOBAL_PARSER_CONTEXT as defined below the class. + # + + cdef tree.xmlDict* _c_dict + cdef _BaseParser _default_parser + cdef list _implied_parser_contexts + + def __cinit__(self): + self._c_dict = NULL + self._implied_parser_contexts = [] + + def __dealloc__(self): + if self._c_dict is not NULL: + xmlparser.xmlDictFree(self._c_dict) + + cdef int initMainParserContext(self) except -1: + """Put the global context into the thread dictionary of the main + thread. To be called once and only in the main thread.""" + thread_dict = python.PyThreadState_GetDict() + if thread_dict is not NULL: + (thread_dict)["_ParserDictionaryContext"] = self + + cdef _ParserDictionaryContext _findThreadParserContext(self): + "Find (or create) the _ParserDictionaryContext object for the current thread" + cdef _ParserDictionaryContext context + thread_dict = python.PyThreadState_GetDict() + if thread_dict is NULL: + return self + d = thread_dict + result = python.PyDict_GetItem(d, "_ParserDictionaryContext") + if result is not NULL: + return result + context = <_ParserDictionaryContext>_ParserDictionaryContext.__new__(_ParserDictionaryContext) + d["_ParserDictionaryContext"] = context + return context + + cdef int setDefaultParser(self, _BaseParser parser) except -1: + "Set the default parser for the current thread" + cdef _ParserDictionaryContext context + context = self._findThreadParserContext() + context._default_parser = parser + + cdef _BaseParser getDefaultParser(self): + "Return (or create) the default parser of the current thread" + cdef _ParserDictionaryContext context + context = self._findThreadParserContext() + if context._default_parser is None: + if self._default_parser is None: + self._default_parser = __DEFAULT_XML_PARSER._copy() + if context is not self: + context._default_parser = self._default_parser._copy() + return context._default_parser + + cdef tree.xmlDict* _getThreadDict(self, tree.xmlDict* default): + "Return the thread-local dict or create a new one if necessary." + cdef _ParserDictionaryContext context + context = self._findThreadParserContext() + if context._c_dict is NULL: + # thread dict not yet set up => use default or create a new one + if default is not NULL: + context._c_dict = default + xmlparser.xmlDictReference(default) + return default + if self._c_dict is NULL: + self._c_dict = xmlparser.xmlDictCreate() + if context is not self: + context._c_dict = xmlparser.xmlDictCreateSub(self._c_dict) + return context._c_dict + + cdef int initThreadDictRef(self, tree.xmlDict** c_dict_ref) except -1: + c_dict = c_dict_ref[0] + c_thread_dict = self._getThreadDict(c_dict) + if c_dict is c_thread_dict: + return 0 + if c_dict is not NULL: + xmlparser.xmlDictFree(c_dict) + c_dict_ref[0] = c_thread_dict + xmlparser.xmlDictReference(c_thread_dict) + + cdef int initParserDict(self, xmlparser.xmlParserCtxt* pctxt) except -1: + "Assure we always use the same string dictionary." + self.initThreadDictRef(&pctxt.dict) + pctxt.dictNames = 1 + + cdef int initXPathParserDict(self, xpath.xmlXPathContext* pctxt) except -1: + "Assure we always use the same string dictionary." + self.initThreadDictRef(&pctxt.dict) + + cdef int initDocDict(self, xmlDoc* result) except -1: + "Store dict of last object parsed if no shared dict yet" + # XXX We also free the result dict here if there already was one. + # This case should only occur for new documents with empty dicts, + # otherwise we'd free data that's in use => segfault + self.initThreadDictRef(&result.dict) + + cdef _ParserContext findImpliedContext(self): + """Return any current implied xml parser context for the current + thread. This is used when the resolver functions are called + with an xmlParserCtxt that was generated from within libxml2 + (i.e. without a _ParserContext) - which happens when parsing + schema and xinclude external references.""" + cdef _ParserDictionaryContext context + cdef _ParserContext implied_context + + # see if we have a current implied parser + context = self._findThreadParserContext() + if context._implied_parser_contexts: + implied_context = context._implied_parser_contexts[-1] + return implied_context + return None + + cdef int pushImpliedContextFromParser(self, _BaseParser parser) except -1: + "Push a new implied context object taken from the parser." + if parser is not None: + self.pushImpliedContext(parser._getParserContext()) + else: + self.pushImpliedContext(None) + + cdef int pushImpliedContext(self, _ParserContext parser_context) except -1: + "Push a new implied context object." + cdef _ParserDictionaryContext context + context = self._findThreadParserContext() + context._implied_parser_contexts.append(parser_context) + + cdef int popImpliedContext(self) except -1: + "Pop the current implied context object." + cdef _ParserDictionaryContext context + context = self._findThreadParserContext() + context._implied_parser_contexts.pop() + +cdef _ParserDictionaryContext __GLOBAL_PARSER_CONTEXT = _ParserDictionaryContext() +__GLOBAL_PARSER_CONTEXT.initMainParserContext() + +############################################################ +## support for Python unicode I/O +############################################################ + +# name of Python Py_UNICODE encoding as known to libxml2 +cdef const_char* _PY_UNICODE_ENCODING = NULL + +cdef int _setupPythonUnicode() except -1: + """Sets _PY_UNICODE_ENCODING to the internal encoding name of Python unicode + strings if libxml2 supports reading native Python unicode. This depends + on iconv and the local Python installation, so we simply check if we find + a matching encoding handler. + """ + cdef tree.xmlCharEncodingHandler* enchandler + cdef Py_ssize_t l + cdef const_char* enc + cdef Py_UNICODE *uchars = [c'<', c't', c'e', c's', c't', c'/', c'>'] + cdef const_xmlChar* buffer = uchars + # apparently, libxml2 can't detect UTF-16 on some systems + if (buffer[0] == c'<' and buffer[1] == c'\0' and + buffer[2] == c't' and buffer[3] == c'\0'): + enc = "UTF-16LE" + elif (buffer[0] == c'\0' and buffer[1] == c'<' and + buffer[2] == c'\0' and buffer[3] == c't'): + enc = "UTF-16BE" + else: + # let libxml2 give it a try + enc = _findEncodingName(buffer, sizeof(Py_UNICODE) * 7) + if enc is NULL: + # not my fault, it's YOUR broken system :) + return 0 + enchandler = tree.xmlFindCharEncodingHandler(enc) + if enchandler is not NULL: + global _PY_UNICODE_ENCODING + tree.xmlCharEncCloseFunc(enchandler) + _PY_UNICODE_ENCODING = enc + return 0 + +cdef const_char* _findEncodingName(const_xmlChar* buffer, int size): + "Work around bug in libxml2: find iconv name of encoding on our own." + cdef tree.xmlCharEncoding enc + enc = tree.xmlDetectCharEncoding(buffer, size) + if enc == tree.XML_CHAR_ENCODING_UTF16LE: + if size >= 4 and (buffer[0] == b'\xFF' and + buffer[1] == b'\xFE' and + buffer[2] == 0 and buffer[3] == 0): + return "UTF-32LE" # according to BOM + else: + return "UTF-16LE" + elif enc == tree.XML_CHAR_ENCODING_UTF16BE: + return "UTF-16BE" + elif enc == tree.XML_CHAR_ENCODING_UCS4LE: + return "UCS-4LE" + elif enc == tree.XML_CHAR_ENCODING_UCS4BE: + return "UCS-4BE" + elif enc == tree.XML_CHAR_ENCODING_NONE: + return NULL + else: + # returns a constant char*, no need to free it + return tree.xmlGetCharEncodingName(enc) + +# Python 3.12 removed support for "Py_UNICODE". +if python.PY_VERSION_HEX < 0x030C0000: + _setupPythonUnicode() + + +cdef unicode _find_PyUCS4EncodingName(): + """ + Find a suitable encoding for Py_UCS4 PyUnicode strings in libxml2. + """ + ustring = "\U0001F92A" + cdef const xmlChar* buffer = python.PyUnicode_DATA(ustring) + cdef Py_ssize_t py_buffer_len = python.PyUnicode_GET_LENGTH(ustring) + + encoding_name = '' + cdef tree.xmlCharEncoding enc = tree.xmlDetectCharEncoding(buffer, py_buffer_len) + enchandler = tree.xmlGetCharEncodingHandler(enc) + if enchandler is not NULL: + try: + if enchandler.name: + encoding_name = enchandler.name.decode('UTF-8') + finally: + tree.xmlCharEncCloseFunc(enchandler) + else: + c_name = tree.xmlGetCharEncodingName(enc) + if c_name: + encoding_name = c_name.decode('UTF-8') + + + if encoding_name and not encoding_name.endswith('LE') and not encoding_name.endswith('BE'): + encoding_name += 'BE' if python.PY_BIG_ENDIAN else 'LE' + return encoding_name or None + +_pyucs4_encoding_name = _find_PyUCS4EncodingName() + + +############################################################ +## support for file-like objects +############################################################ + +@cython.final +@cython.internal +cdef class _FileReaderContext: + cdef object _filelike + cdef object _encoding + cdef object _url + cdef object _bytes + cdef _ExceptionContext _exc_context + cdef Py_ssize_t _bytes_read + cdef char* _c_url + cdef bint _close_file_after_read + + def __cinit__(self, filelike, exc_context not None, url, encoding=None, bint close_file=False): + self._exc_context = exc_context + self._filelike = filelike + self._close_file_after_read = close_file + self._encoding = encoding + if url is None: + self._c_url = NULL + else: + url = _encodeFilename(url) + self._c_url = _cstr(url) + self._url = url + self._bytes = b'' + self._bytes_read = 0 + + cdef _close_file(self): + if self._filelike is None or not self._close_file_after_read: + return + try: + close = self._filelike.close + except AttributeError: + close = None + finally: + self._filelike = None + if close is not None: + close() + + cdef xmlparser.xmlParserInputBuffer* _createParserInputBuffer(self) noexcept: + cdef xmlparser.xmlParserInputBuffer* c_buffer = xmlparser.xmlAllocParserInputBuffer(0) + if c_buffer: + c_buffer.readcallback = _readFilelikeParser + c_buffer.context = self + return c_buffer + + cdef xmlparser.xmlParserInput* _createParserInput( + self, xmlparser.xmlParserCtxt* ctxt) noexcept: + cdef xmlparser.xmlParserInputBuffer* c_buffer = self._createParserInputBuffer() + if not c_buffer: + return NULL + return xmlparser.xmlNewIOInputStream(ctxt, c_buffer, 0) + + cdef tree.xmlDtd* _readDtd(self) noexcept: + cdef xmlparser.xmlParserInputBuffer* c_buffer = self._createParserInputBuffer() + if not c_buffer: + return NULL + with nogil: + return xmlparser.xmlIOParseDTD(NULL, c_buffer, 0) + + cdef xmlDoc* _readDoc(self, xmlparser.xmlParserCtxt* ctxt, int options) noexcept: + cdef xmlDoc* result + cdef void* c_callback_context = self + cdef char* c_encoding = _cstr(self._encoding) if self._encoding is not None else NULL + + orig_options = ctxt.options + with nogil: + if ctxt.html: + result = htmlparser.htmlCtxtReadIO( + ctxt, _readFilelikeParser, NULL, c_callback_context, + self._c_url, c_encoding, options) + if result is not NULL: + if _fixHtmlDictNames(ctxt.dict, result) < 0: + tree.xmlFreeDoc(result) + result = NULL + else: + result = xmlparser.xmlCtxtReadIO( + ctxt, _readFilelikeParser, NULL, c_callback_context, + self._c_url, c_encoding, options) + ctxt.options = orig_options # work around libxml2 problem + + try: + self._close_file() + except: + self._exc_context._store_raised() + finally: + return result # swallow any exceptions + + cdef int copyToBuffer(self, char* c_buffer, int c_requested) noexcept: + cdef int c_byte_count = 0 + cdef char* c_start + cdef Py_ssize_t byte_count, remaining + if self._bytes_read < 0: + return 0 + try: + byte_count = python.PyBytes_GET_SIZE(self._bytes) + remaining = byte_count - self._bytes_read + while c_requested > remaining: + c_start = _cstr(self._bytes) + self._bytes_read + cstring_h.memcpy(c_buffer, c_start, remaining) + c_byte_count += remaining + c_buffer += remaining + c_requested -= remaining + + self._bytes = self._filelike.read(c_requested) + if not isinstance(self._bytes, bytes): + if isinstance(self._bytes, unicode): + if self._encoding is None: + self._bytes = (self._bytes).encode('utf8') + else: + self._bytes = python.PyUnicode_AsEncodedString( + self._bytes, _cstr(self._encoding), NULL) + else: + self._close_file() + raise TypeError, \ + "reading from file-like objects must return byte strings or unicode strings" + + remaining = python.PyBytes_GET_SIZE(self._bytes) + if remaining == 0: + self._bytes_read = -1 + self._close_file() + return c_byte_count + self._bytes_read = 0 + + if c_requested > 0: + c_start = _cstr(self._bytes) + self._bytes_read + cstring_h.memcpy(c_buffer, c_start, c_requested) + c_byte_count += c_requested + self._bytes_read += c_requested + except: + c_byte_count = -1 + self._exc_context._store_raised() + try: + self._close_file() + except: + self._exc_context._store_raised() + finally: + return c_byte_count # swallow any exceptions + +cdef int _readFilelikeParser(void* ctxt, char* c_buffer, int c_size) noexcept with gil: + return (<_FileReaderContext>ctxt).copyToBuffer(c_buffer, c_size) + +cdef int _readFileParser(void* ctxt, char* c_buffer, int c_size) noexcept nogil: + return stdio.fread(c_buffer, 1, c_size, ctxt) + +############################################################ +## support for custom document loaders +############################################################ + +cdef xmlparser.xmlParserInput* _local_resolver(const_char* c_url, const_char* c_pubid, + xmlparser.xmlParserCtxt* c_context) noexcept with gil: + cdef _ResolverContext context + cdef xmlparser.xmlParserInput* c_input + cdef _InputDocument doc_ref + cdef _FileReaderContext file_context + # if there is no _ParserContext associated with the xmlParserCtxt + # passed, check to see if the thread state object has an implied + # context. + if c_context._private is not NULL: + context = <_ResolverContext>c_context._private + else: + context = __GLOBAL_PARSER_CONTEXT.findImpliedContext() + + if context is None: + if __DEFAULT_ENTITY_LOADER is NULL: + return NULL + with nogil: + # free the GIL as we might do serious I/O here (e.g. HTTP) + c_input = __DEFAULT_ENTITY_LOADER(c_url, c_pubid, c_context) + return c_input + + try: + if c_url is NULL: + url = None + else: + # parsing a related document (DTD etc.) => UTF-8 encoded URL? + url = _decodeFilename(c_url) + if c_pubid is NULL: + pubid = None + else: + pubid = funicode(c_pubid) # always UTF-8 + + doc_ref = context._resolvers.resolve(url, pubid, context) + except: + context._store_raised() + return NULL + + if doc_ref is not None: + if doc_ref._type == PARSER_DATA_STRING: + data = doc_ref._data_bytes + filename = doc_ref._filename + if not filename: + filename = None + elif not isinstance(filename, bytes): + # most likely a text URL + filename = filename.encode('utf8') + if not isinstance(filename, bytes): + filename = None + + c_input = xmlparser.xmlNewInputStream(c_context) + if c_input is not NULL: + if filename is not None: + c_input.filename = tree.xmlStrdup(_xcstr(filename)) + c_input.base = _xcstr(data) + c_input.length = python.PyBytes_GET_SIZE(data) + c_input.cur = c_input.base + c_input.end = c_input.base + c_input.length + elif doc_ref._type == PARSER_DATA_FILENAME: + data = None + c_filename = _cstr(doc_ref._filename) + with nogil: + # free the GIL as we might do serious I/O here + c_input = xmlparser.xmlNewInputFromFile( + c_context, c_filename) + elif doc_ref._type == PARSER_DATA_FILE: + file_context = _FileReaderContext(doc_ref._file, context, url, + None, doc_ref._close_file) + c_input = file_context._createParserInput(c_context) + data = file_context + else: + data = None + c_input = NULL + + if data is not None: + context._storage.add(data) + if c_input is not NULL: + return c_input + + if __DEFAULT_ENTITY_LOADER is NULL: + return NULL + + with nogil: + # free the GIL as we might do serious I/O here (e.g. HTTP) + c_input = __DEFAULT_ENTITY_LOADER(c_url, c_pubid, c_context) + return c_input + +cdef xmlparser.xmlExternalEntityLoader __DEFAULT_ENTITY_LOADER +__DEFAULT_ENTITY_LOADER = xmlparser.xmlGetExternalEntityLoader() + + +cdef xmlparser.xmlExternalEntityLoader _register_document_loader() noexcept nogil: + cdef xmlparser.xmlExternalEntityLoader old = xmlparser.xmlGetExternalEntityLoader() + xmlparser.xmlSetExternalEntityLoader(_local_resolver) + return old + +cdef void _reset_document_loader(xmlparser.xmlExternalEntityLoader old) noexcept nogil: + xmlparser.xmlSetExternalEntityLoader(old) + + +############################################################ +## Parsers +############################################################ + +@cython.no_gc_clear # May have to call "self._validator.disconnect()" on dealloc. +@cython.internal +cdef class _ParserContext(_ResolverContext): + cdef _ErrorLog _error_log + cdef _ParserSchemaValidationContext _validator + cdef xmlparser.xmlParserCtxt* _c_ctxt + cdef xmlparser.xmlExternalEntityLoader _orig_loader + cdef python.PyThread_type_lock _lock + cdef _Document _doc + cdef bint _collect_ids + + def __cinit__(self): + self._c_ctxt = NULL + self._collect_ids = True + if not config.ENABLE_THREADING: + self._lock = NULL + else: + self._lock = python.PyThread_allocate_lock() + self._error_log = _ErrorLog() + + def __dealloc__(self): + if config.ENABLE_THREADING and self._lock is not NULL: + python.PyThread_free_lock(self._lock) + self._lock = NULL + if self._c_ctxt is not NULL: + if self._validator is not NULL and self._validator is not None: + # If the parser was not closed correctly (e.g. interrupted iterparse()), + # and the schema validator wasn't freed and cleaned up yet, the libxml2 SAX + # validator plug might still be in place, which will make xmlFreeParserCtxt() + # crash when trying to xmlFree() a static SAX handler. + # Thus, make sure we disconnect the handler interceptor here at the latest. + self._validator.disconnect() + xmlparser.xmlFreeParserCtxt(self._c_ctxt) + + cdef _ParserContext _copy(self): + cdef _ParserContext context + context = self.__class__() + context._collect_ids = self._collect_ids + context._validator = self._validator.copy() + _initParserContext(context, self._resolvers._copy(), NULL) + return context + + cdef void _initParserContext(self, xmlparser.xmlParserCtxt* c_ctxt) noexcept: + self._c_ctxt = c_ctxt + c_ctxt._private = self + + cdef void _resetParserContext(self) noexcept: + if self._c_ctxt is not NULL: + if self._c_ctxt.html: + htmlparser.htmlCtxtReset(self._c_ctxt) + self._c_ctxt.disableSAX = 0 # work around bug in libxml2 + else: + xmlparser.xmlClearParserCtxt(self._c_ctxt) + # work around bug in libxml2 [2.9.10 .. 2.9.14]: + # https://gitlab.gnome.org/GNOME/libxml2/-/issues/378 + self._c_ctxt.nsNr = 0 + + cdef int prepare(self, bint set_document_loader=True) except -1: + cdef int result + if config.ENABLE_THREADING and self._lock is not NULL: + with nogil: + result = python.PyThread_acquire_lock( + self._lock, python.WAIT_LOCK) + if result == 0: + raise ParserError, "parser locking failed" + self._error_log.clear() + self._doc = None + # Need a cast here because older libxml2 releases do not use 'const' in the functype. + self._c_ctxt.sax.serror = _receiveParserError + self._orig_loader = _register_document_loader() if set_document_loader else NULL + if self._validator is not None: + self._validator.connect(self._c_ctxt, self._error_log) + return 0 + + cdef int cleanup(self) except -1: + if self._orig_loader is not NULL: + _reset_document_loader(self._orig_loader) + try: + if self._validator is not None: + self._validator.disconnect() + self._resetParserContext() + self.clear() + self._doc = None + self._c_ctxt.sax.serror = NULL + finally: + if config.ENABLE_THREADING and self._lock is not NULL: + python.PyThread_release_lock(self._lock) + return 0 + + cdef object _handleParseResult(self, _BaseParser parser, + xmlDoc* result, filename): + c_doc = self._handleParseResultDoc(parser, result, filename) + if self._doc is not None and self._doc._c_doc is c_doc: + return self._doc + else: + return _documentFactory(c_doc, parser) + + cdef xmlDoc* _handleParseResultDoc(self, _BaseParser parser, + xmlDoc* result, filename) except NULL: + recover = parser._parse_options & xmlparser.XML_PARSE_RECOVER + return _handleParseResult(self, self._c_ctxt, result, + filename, recover, + free_doc=self._doc is None) + +cdef _initParserContext(_ParserContext context, + _ResolverRegistry resolvers, + xmlparser.xmlParserCtxt* c_ctxt): + _initResolverContext(context, resolvers) + if c_ctxt is not NULL: + context._initParserContext(c_ctxt) + +cdef void _forwardParserError(xmlparser.xmlParserCtxt* _parser_context, const xmlerror.xmlError* error) noexcept with gil: + (<_ParserContext>_parser_context._private)._error_log._receive(error) + +cdef void _receiveParserError(void* c_context, const xmlerror.xmlError* error) noexcept nogil: + if __DEBUG: + if c_context is NULL or (c_context)._private is NULL: + _forwardError(NULL, error) + else: + _forwardParserError(c_context, error) + +cdef int _raiseParseError(xmlparser.xmlParserCtxt* ctxt, filename, + _ErrorLog error_log) except -1: + if filename is not None and \ + ctxt.lastError.domain == xmlerror.XML_FROM_IO: + if isinstance(filename, bytes): + filename = _decodeFilenameWithLength( + filename, len(filename)) + if ctxt.lastError.message is not NULL: + try: + message = ctxt.lastError.message.decode('utf-8') + except UnicodeDecodeError: + # the filename may be in there => play it safe + message = ctxt.lastError.message.decode('iso8859-1') + message = f"Error reading file '{filename}': {message.strip()}" + else: + message = f"Error reading '{filename}'" + raise IOError, message + elif error_log: + raise error_log._buildParseException( + XMLSyntaxError, "Document is not well formed") + elif ctxt.lastError.message is not NULL: + message = ctxt.lastError.message.strip() + code = ctxt.lastError.code + line = ctxt.lastError.line + column = ctxt.lastError.int2 + if ctxt.lastError.line > 0: + message = f"line {line}: {message}" + raise XMLSyntaxError(message, code, line, column, filename) + else: + raise XMLSyntaxError(None, xmlerror.XML_ERR_INTERNAL_ERROR, 0, 0, + filename) + +cdef xmlDoc* _handleParseResult(_ParserContext context, + xmlparser.xmlParserCtxt* c_ctxt, + xmlDoc* result, filename, + bint recover, bint free_doc) except NULL: + cdef bint well_formed + if result is not NULL: + __GLOBAL_PARSER_CONTEXT.initDocDict(result) + + if c_ctxt.myDoc is not NULL: + if c_ctxt.myDoc is not result: + __GLOBAL_PARSER_CONTEXT.initDocDict(c_ctxt.myDoc) + tree.xmlFreeDoc(c_ctxt.myDoc) + c_ctxt.myDoc = NULL + + if result is not NULL: + if (context._validator is not None and + not context._validator.isvalid()): + well_formed = 0 # actually not 'valid', but anyway ... + elif (not c_ctxt.wellFormed and not c_ctxt.html and + c_ctxt.charset == tree.XML_CHAR_ENCODING_8859_1 and + [1 for error in context._error_log + if error.type == ErrorTypes.ERR_INVALID_CHAR]): + # An encoding error occurred and libxml2 switched from UTF-8 + # input to (undecoded) Latin-1, at some arbitrary point in the + # document. Better raise an error than allowing for a broken + # tree with mixed encodings. This is fixed in libxml2 2.12. + well_formed = 0 + elif recover or (c_ctxt.wellFormed and + c_ctxt.lastError.level < xmlerror.XML_ERR_ERROR): + well_formed = 1 + elif not c_ctxt.replaceEntities and not c_ctxt.validate \ + and context is not None: + # in this mode, we ignore errors about undefined entities + for error in context._error_log.filter_from_errors(): + if error.type != ErrorTypes.WAR_UNDECLARED_ENTITY and \ + error.type != ErrorTypes.ERR_UNDECLARED_ENTITY: + well_formed = 0 + break + else: + well_formed = 1 + else: + well_formed = 0 + + if not well_formed: + if free_doc: + tree.xmlFreeDoc(result) + result = NULL + + if context is not None and context._has_raised(): + if result is not NULL: + if free_doc: + tree.xmlFreeDoc(result) + result = NULL + context._raise_if_stored() + + if result is NULL: + if context is not None: + _raiseParseError(c_ctxt, filename, context._error_log) + else: + _raiseParseError(c_ctxt, filename, None) + else: + if result.URL is NULL and filename is not None: + result.URL = tree.xmlStrdup(_xcstr(filename)) + if result.encoding is NULL: + result.encoding = tree.xmlStrdup("UTF-8") + + if context._validator is not None and \ + context._validator._add_default_attributes: + # we currently need to do this here as libxml2 does not + # support inserting default attributes during parse-time + # validation + context._validator.inject_default_attributes(result) + + return result + +cdef int _fixHtmlDictNames(tree.xmlDict* c_dict, xmlDoc* c_doc) noexcept nogil: + cdef xmlNode* c_node + if c_doc is NULL: + return 0 + c_node = c_doc.children + tree.BEGIN_FOR_EACH_ELEMENT_FROM(c_doc, c_node, 1) + if c_node.type == tree.XML_ELEMENT_NODE: + if _fixHtmlDictNodeNames(c_dict, c_node) < 0: + return -1 + tree.END_FOR_EACH_ELEMENT_FROM(c_node) + return 0 + +cdef int _fixHtmlDictSubtreeNames(tree.xmlDict* c_dict, xmlDoc* c_doc, + xmlNode* c_start_node) noexcept nogil: + """ + Move names to the dict, iterating in document order, starting at + c_start_node. This is used in incremental parsing after each chunk. + """ + cdef xmlNode* c_node + if not c_doc: + return 0 + if not c_start_node: + return _fixHtmlDictNames(c_dict, c_doc) + c_node = c_start_node + tree.BEGIN_FOR_EACH_ELEMENT_FROM(c_doc, c_node, 1) + if c_node.type == tree.XML_ELEMENT_NODE: + if _fixHtmlDictNodeNames(c_dict, c_node) < 0: + return -1 + tree.END_FOR_EACH_ELEMENT_FROM(c_node) + return 0 + +cdef inline int _fixHtmlDictNodeNames(tree.xmlDict* c_dict, + xmlNode* c_node) noexcept nogil: + cdef xmlNode* c_attr + c_name = tree.xmlDictLookup(c_dict, c_node.name, -1) + if c_name is NULL: + return -1 + if c_name is not c_node.name: + tree.xmlFree(c_node.name) + c_node.name = c_name + c_attr = c_node.properties + while c_attr is not NULL: + c_name = tree.xmlDictLookup(c_dict, c_attr.name, -1) + if c_name is NULL: + return -1 + if c_name is not c_attr.name: + tree.xmlFree(c_attr.name) + c_attr.name = c_name + c_attr = c_attr.next + return 0 + + +@cython.internal +cdef class _BaseParser: + cdef ElementClassLookup _class_lookup + cdef _ResolverRegistry _resolvers + cdef _ParserContext _parser_context + cdef _ParserContext _push_parser_context + cdef int _parse_options + cdef bint _for_html + cdef bint _remove_comments + cdef bint _remove_pis + cdef bint _strip_cdata + cdef bint _collect_ids + cdef bint _resolve_external_entities + cdef XMLSchema _schema + cdef bytes _filename + cdef readonly object target + cdef object _default_encoding + cdef tuple _events_to_collect # (event_types, tag) + + def __init__(self, int parse_options, bint for_html, XMLSchema schema, + remove_comments, remove_pis, strip_cdata, collect_ids, + target, encoding, bint resolve_external_entities=True): + cdef tree.xmlCharEncodingHandler* enchandler + cdef int c_encoding + if not isinstance(self, (XMLParser, HTMLParser)): + raise TypeError, "This class cannot be instantiated" + + self._parse_options = parse_options + self.target = target + self._for_html = for_html + self._remove_comments = remove_comments + self._remove_pis = remove_pis + self._strip_cdata = strip_cdata + self._collect_ids = collect_ids + self._resolve_external_entities = resolve_external_entities + self._schema = schema + + self._resolvers = _ResolverRegistry() + + if encoding is None: + self._default_encoding = None + else: + encoding = _utf8(encoding) + enchandler = tree.xmlFindCharEncodingHandler(_cstr(encoding)) + if enchandler is NULL: + raise LookupError, f"unknown encoding: '{encoding}'" + tree.xmlCharEncCloseFunc(enchandler) + self._default_encoding = encoding + + cdef _setBaseURL(self, base_url): + self._filename = _encodeFilename(base_url) + + cdef _collectEvents(self, event_types, tag): + if event_types is None: + event_types = () + else: + event_types = tuple(set(event_types)) + _buildParseEventFilter(event_types) # purely for validation + self._events_to_collect = (event_types, tag) + + cdef _ParserContext _getParserContext(self): + cdef xmlparser.xmlParserCtxt* pctxt + if self._parser_context is None: + self._parser_context = self._createContext(self.target, None) + self._parser_context._collect_ids = self._collect_ids + if self._schema is not None: + self._parser_context._validator = \ + self._schema._newSaxValidator( + self._parse_options & xmlparser.XML_PARSE_DTDATTR) + pctxt = self._newParserCtxt() + _initParserContext(self._parser_context, self._resolvers, pctxt) + self._configureSaxContext(pctxt) + return self._parser_context + + cdef _ParserContext _getPushParserContext(self): + cdef xmlparser.xmlParserCtxt* pctxt + if self._push_parser_context is None: + self._push_parser_context = self._createContext( + self.target, self._events_to_collect) + self._push_parser_context._collect_ids = self._collect_ids + if self._schema is not None: + self._push_parser_context._validator = \ + self._schema._newSaxValidator( + self._parse_options & xmlparser.XML_PARSE_DTDATTR) + pctxt = self._newPushParserCtxt() + _initParserContext( + self._push_parser_context, self._resolvers, pctxt) + self._configureSaxContext(pctxt) + return self._push_parser_context + + cdef _ParserContext _createContext(self, target, events_to_collect): + cdef _SaxParserContext sax_context + if target is not None: + sax_context = _TargetParserContext(self) + (<_TargetParserContext>sax_context)._setTarget(target) + elif events_to_collect: + sax_context = _SaxParserContext(self) + else: + # nothing special to configure + return _ParserContext() + if events_to_collect: + events, tag = events_to_collect + sax_context._setEventFilter(events, tag) + return sax_context + + @cython.final + cdef int _configureSaxContext(self, xmlparser.xmlParserCtxt* pctxt) except -1: + if self._remove_comments: + pctxt.sax.comment = NULL + if self._remove_pis: + pctxt.sax.processingInstruction = NULL + if self._strip_cdata: + # hard switch-off for CDATA nodes => makes them plain text + pctxt.sax.cdataBlock = NULL + if not self._resolve_external_entities: + pctxt.sax.getEntity = _getInternalEntityOnly + + cdef int _registerHtmlErrorHandler(self, xmlparser.xmlParserCtxt* c_ctxt) except -1: + cdef xmlparser.xmlSAXHandler* sax = c_ctxt.sax + if sax is not NULL and sax.initialized and sax.initialized != xmlparser.XML_SAX2_MAGIC: + # need to extend SAX1 context to SAX2 to get proper error reports + if sax is &htmlparser.htmlDefaultSAXHandler: + sax = tree.xmlMalloc(sizeof(xmlparser.xmlSAXHandler)) + if sax is NULL: + raise MemoryError() + cstring_h.memcpy(sax, &htmlparser.htmlDefaultSAXHandler, + sizeof(htmlparser.htmlDefaultSAXHandler)) + c_ctxt.sax = sax + sax.initialized = xmlparser.XML_SAX2_MAGIC + # Need a cast here because older libxml2 releases do not use 'const' in the functype. + sax.serror = _receiveParserError + sax.startElementNs = NULL + sax.endElementNs = NULL + sax._private = NULL + return 0 + + cdef xmlparser.xmlParserCtxt* _newParserCtxt(self) except NULL: + cdef xmlparser.xmlParserCtxt* c_ctxt + if self._for_html: + c_ctxt = htmlparser.htmlCreateMemoryParserCtxt('dummy', 5) + if c_ctxt is not NULL: + self._registerHtmlErrorHandler(c_ctxt) + else: + c_ctxt = xmlparser.xmlNewParserCtxt() + if c_ctxt is NULL: + raise MemoryError + c_ctxt.sax.startDocument = _initSaxDocument + return c_ctxt + + cdef xmlparser.xmlParserCtxt* _newPushParserCtxt(self) except NULL: + cdef xmlparser.xmlParserCtxt* c_ctxt + cdef char* c_filename = _cstr(self._filename) if self._filename is not None else NULL + if self._for_html: + c_ctxt = htmlparser.htmlCreatePushParserCtxt( + NULL, NULL, NULL, 0, c_filename, tree.XML_CHAR_ENCODING_NONE) + if c_ctxt is not NULL: + self._registerHtmlErrorHandler(c_ctxt) + htmlparser.htmlCtxtUseOptions(c_ctxt, self._parse_options) + else: + c_ctxt = xmlparser.xmlCreatePushParserCtxt( + NULL, NULL, NULL, 0, c_filename) + if c_ctxt is not NULL: + xmlparser.xmlCtxtUseOptions(c_ctxt, self._parse_options) + if c_ctxt is NULL: + raise MemoryError() + c_ctxt.sax.startDocument = _initSaxDocument + return c_ctxt + + @property + def error_log(self): + """The error log of the last parser run. + """ + cdef _ParserContext context + context = self._getParserContext() + return context._error_log.copy() + + @property + def resolvers(self): + """The custom resolver registry of this parser.""" + return self._resolvers + + @property + def version(self): + """The version of the underlying XML parser.""" + return "libxml2 %d.%d.%d" % LIBXML_VERSION + + def set_element_class_lookup(self, ElementClassLookup lookup = None): + """set_element_class_lookup(self, lookup = None) + + Set a lookup scheme for element classes generated from this parser. + + Reset it by passing None or nothing. + """ + self._class_lookup = lookup + + cdef _BaseParser _copy(self): + "Create a new parser with the same configuration." + cdef _BaseParser parser + parser = self.__class__() + parser._parse_options = self._parse_options + parser._for_html = self._for_html + parser._remove_comments = self._remove_comments + parser._remove_pis = self._remove_pis + parser._strip_cdata = self._strip_cdata + parser._filename = self._filename + parser._resolvers = self._resolvers + parser.target = self.target + parser._class_lookup = self._class_lookup + parser._default_encoding = self._default_encoding + parser._schema = self._schema + parser._events_to_collect = self._events_to_collect + return parser + + def copy(self): + """copy(self) + + Create a new parser with the same configuration. + """ + return self._copy() + + def makeelement(self, _tag, attrib=None, nsmap=None, **_extra): + """makeelement(self, _tag, attrib=None, nsmap=None, **_extra) + + Creates a new element associated with this parser. + """ + return _makeElement(_tag, NULL, None, self, None, None, + attrib, nsmap, _extra) + + # internal parser methods + + cdef xmlDoc* _parseUnicodeDoc(self, utext, char* c_filename) except NULL: + """Parse unicode document, share dictionary if possible. + """ + cdef _ParserContext context + cdef xmlDoc* result + cdef xmlparser.xmlParserCtxt* pctxt + cdef Py_ssize_t py_buffer_len + cdef int buffer_len, c_kind + cdef const_char* c_text + cdef const_char* c_encoding = _PY_UNICODE_ENCODING + if python.PyUnicode_IS_READY(utext): + # PEP-393 string + c_text = python.PyUnicode_DATA(utext) + py_buffer_len = python.PyUnicode_GET_LENGTH(utext) + c_kind = python.PyUnicode_KIND(utext) + if c_kind == 1: + if python.PyUnicode_MAX_CHAR_VALUE(utext) <= 127: + c_encoding = 'UTF-8' + else: + c_encoding = 'ISO-8859-1' + elif c_kind == 2: + py_buffer_len *= 2 + if python.PY_BIG_ENDIAN: + c_encoding = 'UTF-16BE' # actually UCS-2 + else: + c_encoding = 'UTF-16LE' # actually UCS-2 + elif c_kind == 4: + py_buffer_len *= 4 + if python.PY_BIG_ENDIAN: + c_encoding = 'UTF-32BE' # actually UCS-4 + else: + c_encoding = 'UTF-32LE' # actually UCS-4 + else: + assert False, f"Illegal Unicode kind {c_kind}" + else: + # old Py_UNICODE string + py_buffer_len = python.PyUnicode_GET_DATA_SIZE(utext) + c_text = python.PyUnicode_AS_DATA(utext) + assert 0 <= py_buffer_len <= limits.INT_MAX + buffer_len = py_buffer_len + + context = self._getParserContext() + context.prepare() + try: + pctxt = context._c_ctxt + __GLOBAL_PARSER_CONTEXT.initParserDict(pctxt) + orig_options = pctxt.options + with nogil: + if self._for_html: + result = htmlparser.htmlCtxtReadMemory( + pctxt, c_text, buffer_len, c_filename, c_encoding, + self._parse_options) + if result is not NULL: + if _fixHtmlDictNames(pctxt.dict, result) < 0: + tree.xmlFreeDoc(result) + result = NULL + else: + result = xmlparser.xmlCtxtReadMemory( + pctxt, c_text, buffer_len, c_filename, c_encoding, + self._parse_options) + pctxt.options = orig_options # work around libxml2 problem + + return context._handleParseResultDoc(self, result, None) + finally: + context.cleanup() + + cdef xmlDoc* _parseDoc(self, char* c_text, int c_len, + char* c_filename) except NULL: + """Parse document, share dictionary if possible. + """ + cdef _ParserContext context + cdef xmlDoc* result + cdef xmlparser.xmlParserCtxt* pctxt + cdef char* c_encoding + cdef tree.xmlCharEncoding enc + context = self._getParserContext() + context.prepare() + try: + pctxt = context._c_ctxt + __GLOBAL_PARSER_CONTEXT.initParserDict(pctxt) + + if self._default_encoding is None: + c_encoding = NULL + # libxml2 (at least 2.9.3) does not recognise UTF-32 BOMs + # NOTE: limit to problematic cases because it changes character offsets + if c_len >= 4 and (c_text[0] == b'\xFF' and c_text[1] == b'\xFE' and + c_text[2] == 0 and c_text[3] == 0): + c_encoding = "UTF-32LE" + c_text += 4 + c_len -= 4 + elif c_len >= 4 and (c_text[0] == 0 and c_text[1] == 0 and + c_text[2] == b'\xFE' and c_text[3] == b'\xFF'): + c_encoding = "UTF-32BE" + c_text += 4 + c_len -= 4 + else: + # no BOM => try to determine encoding + enc = tree.xmlDetectCharEncoding(c_text, c_len) + if enc == tree.XML_CHAR_ENCODING_UCS4LE: + c_encoding = 'UTF-32LE' + elif enc == tree.XML_CHAR_ENCODING_UCS4BE: + c_encoding = 'UTF-32BE' + else: + c_encoding = _cstr(self._default_encoding) + + orig_options = pctxt.options + with nogil: + if self._for_html: + result = htmlparser.htmlCtxtReadMemory( + pctxt, c_text, c_len, c_filename, + c_encoding, self._parse_options) + if result is not NULL: + if _fixHtmlDictNames(pctxt.dict, result) < 0: + tree.xmlFreeDoc(result) + result = NULL + else: + result = xmlparser.xmlCtxtReadMemory( + pctxt, c_text, c_len, c_filename, + c_encoding, self._parse_options) + pctxt.options = orig_options # work around libxml2 problem + + return context._handleParseResultDoc(self, result, None) + finally: + context.cleanup() + + cdef xmlDoc* _parseDocFromFile(self, char* c_filename) except NULL: + cdef _ParserContext context + cdef xmlDoc* result + cdef xmlparser.xmlParserCtxt* pctxt + cdef char* c_encoding + result = NULL + + context = self._getParserContext() + context.prepare() + try: + pctxt = context._c_ctxt + __GLOBAL_PARSER_CONTEXT.initParserDict(pctxt) + + if self._default_encoding is None: + c_encoding = NULL + else: + c_encoding = _cstr(self._default_encoding) + + orig_options = pctxt.options + with nogil: + if self._for_html: + result = htmlparser.htmlCtxtReadFile( + pctxt, c_filename, c_encoding, self._parse_options) + if result is not NULL: + if _fixHtmlDictNames(pctxt.dict, result) < 0: + tree.xmlFreeDoc(result) + result = NULL + else: + result = xmlparser.xmlCtxtReadFile( + pctxt, c_filename, c_encoding, self._parse_options) + pctxt.options = orig_options # work around libxml2 problem + + return context._handleParseResultDoc(self, result, c_filename) + finally: + context.cleanup() + + cdef xmlDoc* _parseDocFromFilelike(self, filelike, filename, + encoding) except NULL: + cdef _ParserContext context + cdef _FileReaderContext file_context + cdef xmlDoc* result + cdef xmlparser.xmlParserCtxt* pctxt + cdef char* c_filename + if not filename: + filename = None + + context = self._getParserContext() + context.prepare() + try: + pctxt = context._c_ctxt + __GLOBAL_PARSER_CONTEXT.initParserDict(pctxt) + file_context = _FileReaderContext( + filelike, context, filename, + encoding or self._default_encoding) + result = file_context._readDoc(pctxt, self._parse_options) + + return context._handleParseResultDoc( + self, result, filename) + finally: + context.cleanup() + + +cdef tree.xmlEntity* _getInternalEntityOnly(void* ctxt, const_xmlChar* name) noexcept nogil: + """ + Callback function to intercept the entity resolution when external entity loading is disabled. + """ + cdef tree.xmlEntity* entity = xmlparser.xmlSAX2GetEntity(ctxt, name) + if not entity: + return NULL + if entity.etype not in ( + tree.xmlEntityType.XML_EXTERNAL_GENERAL_PARSED_ENTITY, + tree.xmlEntityType.XML_EXTERNAL_GENERAL_UNPARSED_ENTITY, + tree.xmlEntityType.XML_EXTERNAL_PARAMETER_ENTITY): + return entity + + # Reject all external entities and fail the parsing instead. There is currently + # no way in libxml2 to just prevent the entity resolution in this case. + cdef xmlerror.xmlError c_error + cdef xmlerror.xmlStructuredErrorFunc err_func + cdef xmlparser.xmlParserInput* parser_input + cdef void* err_context + + c_ctxt = ctxt + err_func = xmlerror.xmlStructuredError + if err_func: + parser_input = c_ctxt.input + # Copied from xmlVErrParser() in libxml2: get current input from stack. + if parser_input and parser_input.filename is NULL and c_ctxt.inputNr > 1: + parser_input = c_ctxt.inputTab[c_ctxt.inputNr - 2] + + c_error = xmlerror.xmlError( + domain=xmlerror.xmlErrorDomain.XML_FROM_PARSER, + code=xmlerror.xmlParserErrors.XML_ERR_EXT_ENTITY_STANDALONE, + level=xmlerror.xmlErrorLevel.XML_ERR_FATAL, + message=b"External entity resolution is disabled for security reasons " + b"when resolving '&%s;'. Use 'XMLParser(resolve_entities=True)' " + b"if you consider it safe to enable it.", + file=parser_input.filename, + node=entity, + str1= name, + str2=NULL, + str3=NULL, + line=parser_input.line if parser_input else 0, + int1=0, + int2=parser_input.col if parser_input else 0, + ) + err_context = xmlerror.xmlStructuredErrorContext + err_func(err_context, &c_error) + + c_ctxt.wellFormed = 0 + # The entity was looked up and does not need to be freed. + return NULL + + +cdef void _initSaxDocument(void* ctxt) noexcept with gil: + xmlparser.xmlSAX2StartDocument(ctxt) + c_ctxt = ctxt + c_doc = c_ctxt.myDoc + + # set up document dict + if c_doc and c_ctxt.dict and not c_doc.dict: + # I have no idea why libxml2 disables this - we need it + c_ctxt.dictNames = 1 + c_doc.dict = c_ctxt.dict + xmlparser.xmlDictReference(c_ctxt.dict) + + # set up XML ID hash table + if c_ctxt._private: + context = <_ParserContext>c_ctxt._private + if context._collect_ids: + # keep the global parser dict from filling up with XML IDs + if c_doc and not c_doc.ids: + # memory errors are not fatal here + c_dict = xmlparser.xmlDictCreate() + if c_dict: + c_doc.ids = tree.xmlHashCreateDict(0, c_dict) + xmlparser.xmlDictFree(c_dict) + else: + c_doc.ids = tree.xmlHashCreate(0) + else: + c_ctxt.loadsubset |= xmlparser.XML_SKIP_IDS + if c_doc and c_doc.ids and not tree.xmlHashSize(c_doc.ids): + # already initialised but empty => clear + tree.xmlHashFree(c_doc.ids, NULL) + c_doc.ids = NULL + + +############################################################ +## ET feed parser +############################################################ + +cdef class _FeedParser(_BaseParser): + cdef bint _feed_parser_running + + @property + def feed_error_log(self): + """The error log of the last (or current) run of the feed parser. + + Note that this is local to the feed parser and thus is + different from what the ``error_log`` property returns. + """ + return self._getPushParserContext()._error_log.copy() + + cpdef feed(self, data): + """feed(self, data) + + Feeds data to the parser. The argument should be an 8-bit string + buffer containing encoded data, although Unicode is supported as long + as both string types are not mixed. + + This is the main entry point to the consumer interface of a + parser. The parser will parse as much of the XML stream as it + can on each call. To finish parsing or to reset the parser, + call the ``close()`` method. Both methods may raise + ParseError if errors occur in the input data. If an error is + raised, there is no longer a need to call ``close()``. + + The feed parser interface is independent of the normal parser + usage. You can use the same parser as a feed parser and in + the ``parse()`` function concurrently. + """ + cdef _ParserContext context + cdef bytes bstring + cdef xmlparser.xmlParserCtxt* pctxt + cdef Py_ssize_t py_buffer_len, ustart + cdef const_char* char_data + cdef const_char* c_encoding + cdef int buffer_len + cdef int error + cdef bint recover = self._parse_options & xmlparser.XML_PARSE_RECOVER + + if isinstance(data, bytes): + if self._default_encoding is None: + c_encoding = NULL + else: + c_encoding = self._default_encoding + char_data = _cstr(data) + py_buffer_len = python.PyBytes_GET_SIZE(data) + ustart = 0 + elif isinstance(data, unicode): + c_encoding = b"UTF-8" + char_data = NULL + py_buffer_len = len( data) + ustart = 0 + else: + raise TypeError, "Parsing requires string data" + + context = self._getPushParserContext() + pctxt = context._c_ctxt + error = 0 + if not self._feed_parser_running: + context.prepare(set_document_loader=False) + self._feed_parser_running = 1 + c_filename = (_cstr(self._filename) + if self._filename is not None else NULL) + + # We have to give *mlCtxtResetPush() enough input to figure + # out the character encoding (at least four bytes), + # however if we give it all we got, we'll have nothing for + # *mlParseChunk() and things go wrong. + buffer_len = 0 + if char_data is not NULL: + buffer_len = 4 if py_buffer_len > 4 else py_buffer_len + orig_loader = _register_document_loader() + if self._for_html: + error = _htmlCtxtResetPush( + pctxt, char_data, buffer_len, c_filename, c_encoding, + self._parse_options) + else: + xmlparser.xmlCtxtUseOptions(pctxt, self._parse_options) + error = xmlparser.xmlCtxtResetPush( + pctxt, char_data, buffer_len, c_filename, c_encoding) + _reset_document_loader(orig_loader) + py_buffer_len -= buffer_len + char_data += buffer_len + if error: + raise MemoryError() + __GLOBAL_PARSER_CONTEXT.initParserDict(pctxt) + + #print pctxt.charset, 'NONE' if c_encoding is NULL else c_encoding + + fixup_error = 0 + while py_buffer_len > 0 and (error == 0 or recover): + if char_data is NULL: + # Unicode parsing by converting chunks to UTF-8 + buffer_len = 2**19 # len(bytes) <= 4 * (2**19) == 2 MiB + bstring = ( data)[ustart : ustart+buffer_len].encode('UTF-8') + ustart += buffer_len + py_buffer_len -= buffer_len # may end up < 0 + error, fixup_error = _parse_data_chunk(pctxt, bstring, len(bstring)) + else: + # Direct byte string parsing. + buffer_len = py_buffer_len if py_buffer_len <= limits.INT_MAX else limits.INT_MAX + error, fixup_error = _parse_data_chunk(pctxt, char_data, buffer_len) + py_buffer_len -= buffer_len + char_data += buffer_len + + if fixup_error: + context.store_exception(MemoryError()) + + if context._has_raised(): + # propagate Python exceptions immediately + recover = 0 + error = 1 + break + + if error and not pctxt.replaceEntities and not pctxt.validate: + # in this mode, we ignore errors about undefined entities + for entry in context._error_log.filter_from_errors(): + if entry.type != ErrorTypes.WAR_UNDECLARED_ENTITY and \ + entry.type != ErrorTypes.ERR_UNDECLARED_ENTITY: + break + else: + error = 0 + + if not pctxt.wellFormed and pctxt.disableSAX and context._has_raised(): + # propagate Python exceptions immediately + recover = 0 + error = 1 + + if fixup_error or not recover and (error or not pctxt.wellFormed): + self._feed_parser_running = 0 + try: + context._handleParseResult(self, pctxt.myDoc, None) + finally: + context.cleanup() + + cpdef close(self): + """close(self) + + Terminates feeding data to this parser. This tells the parser to + process any remaining data in the feed buffer, and then returns the + root Element of the tree that was parsed. + + This method must be called after passing the last chunk of data into + the ``feed()`` method. It should only be called when using the feed + parser interface, all other usage is undefined. + """ + if not self._feed_parser_running: + raise XMLSyntaxError("no element found", + xmlerror.XML_ERR_INTERNAL_ERROR, 0, 0, + self._filename) + + context = self._getPushParserContext() + pctxt = context._c_ctxt + + self._feed_parser_running = 0 + if self._for_html: + htmlparser.htmlParseChunk(pctxt, NULL, 0, 1) + else: + xmlparser.xmlParseChunk(pctxt, NULL, 0, 1) + + if (pctxt.recovery and not pctxt.disableSAX and + isinstance(context, _SaxParserContext)): + # apply any left-over 'end' events + (<_SaxParserContext>context).flushEvents() + + try: + result = context._handleParseResult(self, pctxt.myDoc, None) + finally: + context.cleanup() + + if isinstance(result, _Document): + return (<_Document>result).getroot() + else: + return result + + +cdef (int, int) _parse_data_chunk(xmlparser.xmlParserCtxt* c_ctxt, + const char* char_data, int buffer_len): + fixup_error = 0 + with nogil: + if c_ctxt.html: + c_node = c_ctxt.node # last node where the parser stopped + orig_loader = _register_document_loader() + error = htmlparser.htmlParseChunk(c_ctxt, char_data, buffer_len, 0) + _reset_document_loader(orig_loader) + # and now for the fun part: move node names to the dict + if c_ctxt.myDoc: + fixup_error = _fixHtmlDictSubtreeNames( + c_ctxt.dict, c_ctxt.myDoc, c_node) + if c_ctxt.myDoc.dict and c_ctxt.myDoc.dict is not c_ctxt.dict: + xmlparser.xmlDictFree(c_ctxt.myDoc.dict) + c_ctxt.myDoc.dict = c_ctxt.dict + xmlparser.xmlDictReference(c_ctxt.dict) + else: + orig_loader = _register_document_loader() + error = xmlparser.xmlParseChunk(c_ctxt, char_data, buffer_len, 0) + _reset_document_loader(orig_loader) + return (error, fixup_error) + + +cdef int _htmlCtxtResetPush(xmlparser.xmlParserCtxt* c_ctxt, + const_char* c_data, int buffer_len, + const_char* c_filename, const_char* c_encoding, + int parse_options) except -1: + cdef xmlparser.xmlParserInput* c_input_stream + # libxml2 lacks an HTML push parser setup function + error = xmlparser.xmlCtxtResetPush( + c_ctxt, c_data, buffer_len, c_filename, c_encoding) + if error: + return error + + # fix libxml2 setup for HTML + c_ctxt.progressive = 1 + c_ctxt.html = 1 + htmlparser.htmlCtxtUseOptions(c_ctxt, parse_options) + + return 0 + + +############################################################ +## XML parser +############################################################ + +cdef int _XML_DEFAULT_PARSE_OPTIONS +_XML_DEFAULT_PARSE_OPTIONS = ( + xmlparser.XML_PARSE_NOENT | + xmlparser.XML_PARSE_NOCDATA | + xmlparser.XML_PARSE_NONET | + xmlparser.XML_PARSE_COMPACT | + xmlparser.XML_PARSE_BIG_LINES + ) + +cdef class XMLParser(_FeedParser): + """XMLParser(self, encoding=None, attribute_defaults=False, dtd_validation=False, load_dtd=False, no_network=True, ns_clean=False, recover=False, schema: XMLSchema =None, huge_tree=False, remove_blank_text=False, resolve_entities=True, remove_comments=False, remove_pis=False, strip_cdata=True, collect_ids=True, target=None, compact=True) + + The XML parser. + + Parsers can be supplied as additional argument to various parse + functions of the lxml API. A default parser is always available + and can be replaced by a call to the global function + 'set_default_parser'. New parsers can be created at any time + without a major run-time overhead. + + The keyword arguments in the constructor are mainly based on the + libxml2 parser configuration. A DTD will also be loaded if DTD + validation or attribute default values are requested (unless you + additionally provide an XMLSchema from which the default + attributes can be read). + + Available boolean keyword arguments: + + - attribute_defaults - inject default attributes from DTD or XMLSchema + - dtd_validation - validate against a DTD referenced by the document + - load_dtd - use DTD for parsing + - no_network - prevent network access for related files (default: True) + - ns_clean - clean up redundant namespace declarations + - recover - try hard to parse through broken XML + - remove_blank_text - discard blank text nodes that appear ignorable + - remove_comments - discard comments + - remove_pis - discard processing instructions + - strip_cdata - replace CDATA sections by normal text content (default: True) + - compact - save memory for short text content (default: True) + - collect_ids - use a hash table of XML IDs for fast access (default: True, always True with DTD validation) + - huge_tree - disable security restrictions and support very deep trees + and very long text content (only affects libxml2 2.7+) + + Other keyword arguments: + + - resolve_entities - replace entities by their text value: False for keeping the + entity references, True for resolving them, and 'internal' for resolving + internal definitions only (no external file/URL access). + The default used to be True and was changed to 'internal' in lxml 5.0. + - encoding - override the document encoding (note: libiconv encoding name) + - target - a parser target object that will receive the parse events + - schema - an XMLSchema to validate against + + Note that you should avoid sharing parsers between threads. While this is + not harmful, it is more efficient to use separate parsers. This does not + apply to the default parser. + """ + def __init__(self, *, encoding=None, attribute_defaults=False, + dtd_validation=False, load_dtd=False, no_network=True, + ns_clean=False, recover=False, XMLSchema schema=None, + huge_tree=False, remove_blank_text=False, resolve_entities='internal', + remove_comments=False, remove_pis=False, strip_cdata=True, + collect_ids=True, target=None, compact=True): + cdef int parse_options + cdef bint resolve_external = True + parse_options = _XML_DEFAULT_PARSE_OPTIONS + if load_dtd: + parse_options = parse_options | xmlparser.XML_PARSE_DTDLOAD + if dtd_validation: + parse_options = parse_options | xmlparser.XML_PARSE_DTDVALID | \ + xmlparser.XML_PARSE_DTDLOAD + if attribute_defaults: + parse_options = parse_options | xmlparser.XML_PARSE_DTDATTR + if schema is None: + parse_options = parse_options | xmlparser.XML_PARSE_DTDLOAD + if ns_clean: + parse_options = parse_options | xmlparser.XML_PARSE_NSCLEAN + if recover: + parse_options = parse_options | xmlparser.XML_PARSE_RECOVER + if remove_blank_text: + parse_options = parse_options | xmlparser.XML_PARSE_NOBLANKS + if huge_tree: + parse_options = parse_options | xmlparser.XML_PARSE_HUGE + if not no_network: + parse_options = parse_options ^ xmlparser.XML_PARSE_NONET + if not compact: + parse_options = parse_options ^ xmlparser.XML_PARSE_COMPACT + if not resolve_entities: + parse_options = parse_options ^ xmlparser.XML_PARSE_NOENT + elif resolve_entities == 'internal': + resolve_external = False + if not strip_cdata: + parse_options = parse_options ^ xmlparser.XML_PARSE_NOCDATA + + _BaseParser.__init__(self, parse_options, False, schema, + remove_comments, remove_pis, strip_cdata, + collect_ids, target, encoding, resolve_external) + + +cdef class XMLPullParser(XMLParser): + """XMLPullParser(self, events=None, *, tag=None, **kwargs) + + XML parser that collects parse events in an iterator. + + The collected events are the same as for iterparse(), but the + parser itself is non-blocking in the sense that it receives + data chunks incrementally through its .feed() method, instead + of reading them directly from a file(-like) object all by itself. + + By default, it collects Element end events. To change that, + pass any subset of the available events into the ``events`` + argument: ``'start'``, ``'end'``, ``'start-ns'``, + ``'end-ns'``, ``'comment'``, ``'pi'``. + + To support loading external dependencies relative to the input + source, you can pass the ``base_url``. + """ + def __init__(self, events=None, *, tag=None, base_url=None, **kwargs): + XMLParser.__init__(self, **kwargs) + if events is None: + events = ('end',) + self._setBaseURL(base_url) + self._collectEvents(events, tag) + + def read_events(self): + return (<_SaxParserContext?>self._getPushParserContext()).events_iterator + + +cdef class ETCompatXMLParser(XMLParser): + """ETCompatXMLParser(self, encoding=None, attribute_defaults=False, \ + dtd_validation=False, load_dtd=False, no_network=True, \ + ns_clean=False, recover=False, schema=None, \ + huge_tree=False, remove_blank_text=False, resolve_entities=True, \ + remove_comments=True, remove_pis=True, strip_cdata=True, \ + target=None, compact=True) + + An XML parser with an ElementTree compatible default setup. + + See the XMLParser class for details. + + This parser has ``remove_comments`` and ``remove_pis`` enabled by default + and thus ignores comments and processing instructions. + """ + def __init__(self, *, encoding=None, attribute_defaults=False, + dtd_validation=False, load_dtd=False, no_network=True, + ns_clean=False, recover=False, schema=None, + huge_tree=False, remove_blank_text=False, resolve_entities=True, + remove_comments=True, remove_pis=True, strip_cdata=True, + target=None, compact=True): + XMLParser.__init__(self, + attribute_defaults=attribute_defaults, + dtd_validation=dtd_validation, + load_dtd=load_dtd, + no_network=no_network, + ns_clean=ns_clean, + recover=recover, + remove_blank_text=remove_blank_text, + huge_tree=huge_tree, + compact=compact, + resolve_entities=resolve_entities, + remove_comments=remove_comments, + remove_pis=remove_pis, + strip_cdata=strip_cdata, + target=target, + encoding=encoding, + schema=schema) + +# ET 1.2 compatible name +XMLTreeBuilder = ETCompatXMLParser + + +cdef XMLParser __DEFAULT_XML_PARSER +__DEFAULT_XML_PARSER = XMLParser() + +__GLOBAL_PARSER_CONTEXT.setDefaultParser(__DEFAULT_XML_PARSER) + +def set_default_parser(_BaseParser parser=None): + """set_default_parser(parser=None) + + Set a default parser for the current thread. This parser is used + globally whenever no parser is supplied to the various parse functions of + the lxml API. If this function is called without a parser (or if it is + None), the default parser is reset to the original configuration. + + Note that the pre-installed default parser is not thread-safe. Avoid the + default parser in multi-threaded environments. You can create a separate + parser for each thread explicitly or use a parser pool. + """ + if parser is None: + parser = __DEFAULT_XML_PARSER + __GLOBAL_PARSER_CONTEXT.setDefaultParser(parser) + +def get_default_parser(): + "get_default_parser()" + return __GLOBAL_PARSER_CONTEXT.getDefaultParser() + +############################################################ +## HTML parser +############################################################ + +cdef int _HTML_DEFAULT_PARSE_OPTIONS +_HTML_DEFAULT_PARSE_OPTIONS = ( + htmlparser.HTML_PARSE_RECOVER | + htmlparser.HTML_PARSE_NONET | + htmlparser.HTML_PARSE_COMPACT + ) + +cdef class HTMLParser(_FeedParser): + """HTMLParser(self, encoding=None, remove_blank_text=False, \ + remove_comments=False, remove_pis=False, strip_cdata=True, \ + no_network=True, target=None, schema: XMLSchema =None, \ + recover=True, compact=True, collect_ids=True, huge_tree=False) + + The HTML parser. + + This parser allows reading HTML into a normal XML tree. By + default, it can read broken (non well-formed) HTML, depending on + the capabilities of libxml2. Use the 'recover' option to switch + this off. + + Available boolean keyword arguments: + + - recover - try hard to parse through broken HTML (default: True) + - no_network - prevent network access for related files (default: True) + - remove_blank_text - discard empty text nodes that are ignorable (i.e. not actual text content) + - remove_comments - discard comments + - remove_pis - discard processing instructions + - strip_cdata - replace CDATA sections by normal text content (default: True) + - compact - save memory for short text content (default: True) + - default_doctype - add a default doctype even if it is not found in the HTML (default: True) + - collect_ids - use a hash table of XML IDs for fast access (default: True) + - huge_tree - disable security restrictions and support very deep trees + and very long text content (only affects libxml2 2.7+) + + Other keyword arguments: + + - encoding - override the document encoding (note: libiconv encoding name) + - target - a parser target object that will receive the parse events + - schema - an XMLSchema to validate against + + Note that you should avoid sharing parsers between threads for performance + reasons. + """ + def __init__(self, *, encoding=None, remove_blank_text=False, + remove_comments=False, remove_pis=False, strip_cdata=True, + no_network=True, target=None, XMLSchema schema=None, + recover=True, compact=True, default_doctype=True, + collect_ids=True, huge_tree=False): + cdef int parse_options + parse_options = _HTML_DEFAULT_PARSE_OPTIONS + if remove_blank_text: + parse_options = parse_options | htmlparser.HTML_PARSE_NOBLANKS + if not recover: + parse_options = parse_options ^ htmlparser.HTML_PARSE_RECOVER + if not no_network: + parse_options = parse_options ^ htmlparser.HTML_PARSE_NONET + if not compact: + parse_options = parse_options ^ htmlparser.HTML_PARSE_COMPACT + if not default_doctype: + parse_options = parse_options ^ htmlparser.HTML_PARSE_NODEFDTD + if huge_tree: + parse_options = parse_options | xmlparser.XML_PARSE_HUGE + + _BaseParser.__init__(self, parse_options, True, schema, + remove_comments, remove_pis, strip_cdata, + collect_ids, target, encoding) + + +cdef HTMLParser __DEFAULT_HTML_PARSER +__DEFAULT_HTML_PARSER = HTMLParser() + + +cdef class HTMLPullParser(HTMLParser): + """HTMLPullParser(self, events=None, *, tag=None, base_url=None, **kwargs) + + HTML parser that collects parse events in an iterator. + + The collected events are the same as for iterparse(), but the + parser itself is non-blocking in the sense that it receives + data chunks incrementally through its .feed() method, instead + of reading them directly from a file(-like) object all by itself. + + By default, it collects Element end events. To change that, + pass any subset of the available events into the ``events`` + argument: ``'start'``, ``'end'``, ``'start-ns'``, + ``'end-ns'``, ``'comment'``, ``'pi'``. + + To support loading external dependencies relative to the input + source, you can pass the ``base_url``. + """ + def __init__(self, events=None, *, tag=None, base_url=None, **kwargs): + HTMLParser.__init__(self, **kwargs) + if events is None: + events = ('end',) + self._setBaseURL(base_url) + self._collectEvents(events, tag) + + def read_events(self): + return (<_SaxParserContext?>self._getPushParserContext()).events_iterator + + +############################################################ +## helper functions for document creation +############################################################ + +cdef xmlDoc* _parseDoc(text, filename, _BaseParser parser) except NULL: + cdef char* c_filename + cdef char* c_text + cdef Py_ssize_t c_len + if parser is None: + parser = __GLOBAL_PARSER_CONTEXT.getDefaultParser() + if not filename: + c_filename = NULL + else: + filename_utf = _encodeFilenameUTF8(filename) + c_filename = _cstr(filename_utf) + if isinstance(text, unicode): + if python.PyUnicode_IS_READY(text): + # PEP-393 Unicode string + c_len = python.PyUnicode_GET_LENGTH(text) * python.PyUnicode_KIND(text) + else: + # old Py_UNICODE string + c_len = python.PyUnicode_GET_DATA_SIZE(text) + if c_len > limits.INT_MAX: + return (<_BaseParser>parser)._parseDocFromFilelike( + StringIO(text), filename, None) + return (<_BaseParser>parser)._parseUnicodeDoc(text, c_filename) + else: + c_len = python.PyBytes_GET_SIZE(text) + if c_len > limits.INT_MAX: + return (<_BaseParser>parser)._parseDocFromFilelike( + BytesIO(text), filename, None) + c_text = _cstr(text) + return (<_BaseParser>parser)._parseDoc(c_text, c_len, c_filename) + +cdef xmlDoc* _parseDocFromFile(filename8, _BaseParser parser) except NULL: + if parser is None: + parser = __GLOBAL_PARSER_CONTEXT.getDefaultParser() + return (<_BaseParser>parser)._parseDocFromFile(_cstr(filename8)) + +cdef xmlDoc* _parseDocFromFilelike(source, filename, + _BaseParser parser) except NULL: + if parser is None: + parser = __GLOBAL_PARSER_CONTEXT.getDefaultParser() + return (<_BaseParser>parser)._parseDocFromFilelike(source, filename, None) + +cdef xmlDoc* _newXMLDoc() except NULL: + cdef xmlDoc* result + result = tree.xmlNewDoc(NULL) + if result is NULL: + raise MemoryError() + if result.encoding is NULL: + result.encoding = tree.xmlStrdup("UTF-8") + __GLOBAL_PARSER_CONTEXT.initDocDict(result) + return result + +cdef xmlDoc* _newHTMLDoc() except NULL: + cdef xmlDoc* result + result = tree.htmlNewDoc(NULL, NULL) + if result is NULL: + raise MemoryError() + __GLOBAL_PARSER_CONTEXT.initDocDict(result) + return result + +cdef xmlDoc* _copyDoc(xmlDoc* c_doc, int recursive) except NULL: + cdef xmlDoc* result + if recursive: + with nogil: + result = tree.xmlCopyDoc(c_doc, recursive) + else: + result = tree.xmlCopyDoc(c_doc, 0) + if result is NULL: + raise MemoryError() + __GLOBAL_PARSER_CONTEXT.initDocDict(result) + return result + +cdef xmlDoc* _copyDocRoot(xmlDoc* c_doc, xmlNode* c_new_root) except NULL: + "Recursively copy the document and make c_new_root the new root node." + cdef xmlDoc* result + cdef xmlNode* c_node + result = tree.xmlCopyDoc(c_doc, 0) # non recursive + __GLOBAL_PARSER_CONTEXT.initDocDict(result) + with nogil: + c_node = tree.xmlDocCopyNode(c_new_root, result, 1) # recursive + if c_node is NULL: + raise MemoryError() + tree.xmlDocSetRootElement(result, c_node) + _copyTail(c_new_root.next, c_node) + return result + +cdef xmlNode* _copyNodeToDoc(xmlNode* c_node, xmlDoc* c_doc) except NULL: + "Recursively copy the element into the document. c_doc is not modified." + cdef xmlNode* c_root + c_root = tree.xmlDocCopyNode(c_node, c_doc, 1) # recursive + if c_root is NULL: + raise MemoryError() + _copyTail(c_node.next, c_root) + return c_root + + +############################################################ +## API level helper functions for _Document creation +############################################################ + +cdef _Document _parseDocument(source, _BaseParser parser, base_url): + cdef _Document doc + source = _getFSPathOrObject(source) + if _isString(source): + # parse the file directly from the filesystem + doc = _parseDocumentFromURL(_encodeFilename(source), parser) + # fix base URL if requested + if base_url is not None: + base_url = _encodeFilenameUTF8(base_url) + if doc._c_doc.URL is not NULL: + tree.xmlFree(doc._c_doc.URL) + doc._c_doc.URL = tree.xmlStrdup(_xcstr(base_url)) + return doc + + if base_url is not None: + url = base_url + else: + url = _getFilenameForFile(source) + + if hasattr(source, 'getvalue') and hasattr(source, 'tell'): + # StringIO - reading from start? + if source.tell() == 0: + return _parseMemoryDocument(source.getvalue(), url, parser) + + # Support for file-like objects (urlgrabber.urlopen, ...) + if hasattr(source, 'read'): + return _parseFilelikeDocument(source, url, parser) + + raise TypeError, f"cannot parse from '{python._fqtypename(source).decode('UTF-8')}'" + +cdef _Document _parseDocumentFromURL(url, _BaseParser parser): + c_doc = _parseDocFromFile(url, parser) + return _documentFactory(c_doc, parser) + +cdef _Document _parseMemoryDocument(text, url, _BaseParser parser): + if isinstance(text, unicode): + if _hasEncodingDeclaration(text): + raise ValueError( + "Unicode strings with encoding declaration are not supported. " + "Please use bytes input or XML fragments without declaration.") + elif not isinstance(text, bytes): + raise ValueError, "can only parse strings" + c_doc = _parseDoc(text, url, parser) + return _documentFactory(c_doc, parser) + +cdef _Document _parseFilelikeDocument(source, url, _BaseParser parser): + c_doc = _parseDocFromFilelike(source, url, parser) + return _documentFactory(c_doc, parser) diff --git a/venv/lib/python3.10/site-packages/lxml/parsertarget.pxi b/venv/lib/python3.10/site-packages/lxml/parsertarget.pxi new file mode 100644 index 0000000000000000000000000000000000000000..37c29957dd12e7a685a6450c408baa68e2c3de02 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/parsertarget.pxi @@ -0,0 +1,180 @@ +# Parser target context (ET target interface) + +cdef object inspect_getargspec +try: + from inspect import getfullargspec as inspect_getargspec +except ImportError: + from inspect import getargspec as inspect_getargspec + + +class _TargetParserResult(Exception): + # Admittedly, this is somewhat ugly, but it's the easiest way + # to push the Python level parser result through the parser + # machinery towards the API level functions + def __init__(self, result): + self.result = result + + +@cython.final +@cython.internal +cdef class _PythonSaxParserTarget(_SaxParserTarget): + cdef object _target_start + cdef object _target_end + cdef object _target_data + cdef object _target_start_ns + cdef object _target_end_ns + cdef object _target_doctype + cdef object _target_pi + cdef object _target_comment + cdef bint _start_takes_nsmap + + def __cinit__(self, target): + cdef int event_filter + event_filter = 0 + self._start_takes_nsmap = 0 + try: + self._target_start = target.start + if self._target_start is not None: + event_filter |= SAX_EVENT_START + except AttributeError: + pass + else: + try: + arguments = inspect_getargspec(self._target_start) + if len(arguments[0]) > 3 or arguments[1] is not None: + self._start_takes_nsmap = 1 + except TypeError: + pass + try: + self._target_end = target.end + if self._target_end is not None: + event_filter |= SAX_EVENT_END + except AttributeError: + pass + try: + self._target_start_ns = target.start_ns + if self._target_start_ns is not None: + event_filter |= SAX_EVENT_START_NS + except AttributeError: + pass + try: + self._target_end_ns = target.end_ns + if self._target_end_ns is not None: + event_filter |= SAX_EVENT_END_NS + except AttributeError: + pass + try: + self._target_data = target.data + if self._target_data is not None: + event_filter |= SAX_EVENT_DATA + except AttributeError: + pass + try: + self._target_doctype = target.doctype + if self._target_doctype is not None: + event_filter |= SAX_EVENT_DOCTYPE + except AttributeError: + pass + try: + self._target_pi = target.pi + if self._target_pi is not None: + event_filter |= SAX_EVENT_PI + except AttributeError: + pass + try: + self._target_comment = target.comment + if self._target_comment is not None: + event_filter |= SAX_EVENT_COMMENT + except AttributeError: + pass + self._sax_event_filter = event_filter + + cdef _handleSaxStart(self, tag, attrib, nsmap): + if self._start_takes_nsmap: + return self._target_start(tag, attrib, nsmap) + else: + return self._target_start(tag, attrib) + + cdef _handleSaxEnd(self, tag): + return self._target_end(tag) + + cdef _handleSaxStartNs(self, prefix, uri): + return self._target_start_ns(prefix, uri) + + cdef _handleSaxEndNs(self, prefix): + return self._target_end_ns(prefix) + + cdef int _handleSaxData(self, data) except -1: + self._target_data(data) + + cdef int _handleSaxDoctype(self, root_tag, public_id, system_id) except -1: + self._target_doctype(root_tag, public_id, system_id) + + cdef _handleSaxPi(self, target, data): + return self._target_pi(target, data) + + cdef _handleSaxComment(self, comment): + return self._target_comment(comment) + + +@cython.final +@cython.internal +@cython.no_gc_clear # Required because parent class uses it - Cython bug. +cdef class _TargetParserContext(_SaxParserContext): + """This class maps SAX2 events to the ET parser target interface. + """ + cdef object _python_target + cdef int _setTarget(self, target) except -1: + self._python_target = target + if not isinstance(target, _SaxParserTarget) or \ + hasattr(target, '__dict__'): + target = _PythonSaxParserTarget(target) + self._setSaxParserTarget(target) + return 0 + + cdef _ParserContext _copy(self): + cdef _TargetParserContext context + context = _ParserContext._copy(self) + context._setTarget(self._python_target) + return context + + cdef void _cleanupTargetParserContext(self, xmlDoc* result) noexcept: + if self._c_ctxt.myDoc is not NULL: + if self._c_ctxt.myDoc is not result and \ + self._c_ctxt.myDoc._private is NULL: + # no _Document proxy => orphen + tree.xmlFreeDoc(self._c_ctxt.myDoc) + self._c_ctxt.myDoc = NULL + + cdef object _handleParseResult(self, _BaseParser parser, xmlDoc* result, + filename): + cdef bint recover + recover = parser._parse_options & xmlparser.XML_PARSE_RECOVER + try: + if self._has_raised(): + self._cleanupTargetParserContext(result) + self._raise_if_stored() + if not self._c_ctxt.wellFormed and not recover: + _raiseParseError(self._c_ctxt, filename, self._error_log) + except: + self._python_target.close() + raise + return self._python_target.close() + + cdef xmlDoc* _handleParseResultDoc(self, _BaseParser parser, + xmlDoc* result, filename) except NULL: + cdef bint recover + recover = parser._parse_options & xmlparser.XML_PARSE_RECOVER + if result is not NULL and result._private is NULL: + # no _Document proxy => orphen + tree.xmlFreeDoc(result) + try: + self._cleanupTargetParserContext(result) + self._raise_if_stored() + if not self._c_ctxt.wellFormed and not recover: + _raiseParseError(self._c_ctxt, filename, self._error_log) + except: + self._python_target.close() + raise + parse_result = self._python_target.close() + raise _TargetParserResult(parse_result) diff --git a/venv/lib/python3.10/site-packages/lxml/proxy.pxi b/venv/lib/python3.10/site-packages/lxml/proxy.pxi new file mode 100644 index 0000000000000000000000000000000000000000..f7b47a73a47d3f0792365796df1b80ffd60f6e34 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/proxy.pxi @@ -0,0 +1,619 @@ +# Proxy functions and low level node allocation stuff + +# Proxies represent elements, their reference is stored in the C +# structure of the respective node to avoid multiple instantiation of +# the Python class. + +@cython.linetrace(False) +@cython.profile(False) +cdef inline _Element getProxy(xmlNode* c_node): + """Get a proxy for a given node. + """ + #print "getProxy for:", c_node + if c_node is not NULL and c_node._private is not NULL: + return <_Element>c_node._private + else: + return None + + +@cython.linetrace(False) +@cython.profile(False) +cdef inline bint hasProxy(xmlNode* c_node): + if c_node._private is NULL: + return False + return True + + +@cython.linetrace(False) +@cython.profile(False) +cdef inline int _registerProxy(_Element proxy, _Document doc, + xmlNode* c_node) except -1: + """Register a proxy and type for the node it's proxying for. + """ + #print "registering for:", proxy._c_node + assert not hasProxy(c_node), "double registering proxy!" + proxy._doc = doc + proxy._c_node = c_node + c_node._private = proxy + return 0 + + +@cython.linetrace(False) +@cython.profile(False) +cdef inline int _unregisterProxy(_Element proxy) except -1: + """Unregister a proxy for the node it's proxying for. + """ + cdef xmlNode* c_node = proxy._c_node + assert c_node._private is proxy, "Tried to unregister unknown proxy" + c_node._private = NULL + return 0 + + +################################################################################ +# temporarily make a node the root node of its document + +cdef xmlDoc* _fakeRootDoc(xmlDoc* c_base_doc, xmlNode* c_node) except NULL: + return _plainFakeRootDoc(c_base_doc, c_node, 1) + +cdef xmlDoc* _plainFakeRootDoc(xmlDoc* c_base_doc, xmlNode* c_node, + bint with_siblings) except NULL: + # build a temporary document that has the given node as root node + # note that copy and original must not be modified during its lifetime!! + # always call _destroyFakeDoc() after use! + cdef xmlNode* c_child + cdef xmlNode* c_root + cdef xmlNode* c_new_root + cdef xmlDoc* c_doc + if with_siblings or (c_node.prev is NULL and c_node.next is NULL): + c_root = tree.xmlDocGetRootElement(c_base_doc) + if c_root is c_node: + # already the root node, no siblings + return c_base_doc + + c_doc = _copyDoc(c_base_doc, 0) # non recursive! + c_new_root = tree.xmlDocCopyNode(c_node, c_doc, 2) # non recursive! + tree.xmlDocSetRootElement(c_doc, c_new_root) + _copyParentNamespaces(c_node, c_new_root) + + c_new_root.children = c_node.children + c_new_root.last = c_node.last + c_new_root.next = c_new_root.prev = NULL + + # store original node + c_doc._private = c_node + + # divert parent pointers of children + c_child = c_new_root.children + while c_child is not NULL: + c_child.parent = c_new_root + c_child = c_child.next + + c_doc.children = c_new_root + return c_doc + +cdef void _destroyFakeDoc(xmlDoc* c_base_doc, xmlDoc* c_doc) noexcept: + # delete a temporary document + cdef xmlNode* c_child + cdef xmlNode* c_parent + cdef xmlNode* c_root + if c_doc is c_base_doc: + return + c_root = tree.xmlDocGetRootElement(c_doc) + + # restore parent pointers of children + c_parent = c_doc._private + c_child = c_root.children + while c_child is not NULL: + c_child.parent = c_parent + c_child = c_child.next + + # prevent recursive removal of children + c_root.children = c_root.last = NULL + tree.xmlFreeDoc(c_doc) + +cdef _Element _fakeDocElementFactory(_Document doc, xmlNode* c_element): + """Special element factory for cases where we need to create a fake + root document, but still need to instantiate arbitrary nodes from + it. If we instantiate the fake root node, things will turn bad + when it's destroyed. + + Instead, if we are asked to instantiate the fake root node, we + instantiate the original node instead. + """ + if c_element.doc is not doc._c_doc: + if c_element.doc._private is not NULL: + if c_element is c_element.doc.children: + c_element = c_element.doc._private + #assert c_element.type == tree.XML_ELEMENT_NODE + return _elementFactory(doc, c_element) + +################################################################################ +# support for freeing tree elements when proxy objects are destroyed + +cdef int attemptDeallocation(xmlNode* c_node) noexcept: + """Attempt deallocation of c_node (or higher up in tree). + """ + cdef xmlNode* c_top + # could be we actually aren't referring to the tree at all + if c_node is NULL: + #print "not freeing, node is NULL" + return 0 + c_top = getDeallocationTop(c_node) + if c_top is not NULL: + #print "freeing:", c_top.name + _removeText(c_top.next) # tail + tree.xmlFreeNode(c_top) + return 1 + return 0 + +cdef xmlNode* getDeallocationTop(xmlNode* c_node) noexcept: + """Return the top of the tree that can be deallocated, or NULL. + """ + cdef xmlNode* c_next + #print "trying to do deallocating:", c_node.type + if hasProxy(c_node): + #print "Not freeing: proxies still exist" + return NULL + while c_node.parent is not NULL: + c_node = c_node.parent + #print "checking:", c_current.type + if c_node.type == tree.XML_DOCUMENT_NODE or \ + c_node.type == tree.XML_HTML_DOCUMENT_NODE: + #print "not freeing: still in doc" + return NULL + # if we're still attached to the document, don't deallocate + if hasProxy(c_node): + #print "Not freeing: proxies still exist" + return NULL + # see whether we have children to deallocate + if not canDeallocateChildNodes(c_node): + return NULL + # see whether we have siblings to deallocate + c_next = c_node.prev + while c_next: + if _isElement(c_next): + if hasProxy(c_next) or not canDeallocateChildNodes(c_next): + return NULL + c_next = c_next.prev + c_next = c_node.next + while c_next: + if _isElement(c_next): + if hasProxy(c_next) or not canDeallocateChildNodes(c_next): + return NULL + c_next = c_next.next + return c_node + +cdef int canDeallocateChildNodes(xmlNode* c_parent) noexcept: + cdef xmlNode* c_node + c_node = c_parent.children + tree.BEGIN_FOR_EACH_ELEMENT_FROM(c_parent, c_node, 1) + if hasProxy(c_node): + return 0 + tree.END_FOR_EACH_ELEMENT_FROM(c_node) + return 1 + +################################################################################ +# fix _Document references and namespaces when a node changes documents + +cdef void _copyParentNamespaces(xmlNode* c_from_node, xmlNode* c_to_node) noexcept nogil: + """Copy the namespaces of all ancestors of c_from_node to c_to_node. + """ + cdef xmlNode* c_parent + cdef xmlNs* c_ns + cdef xmlNs* c_new_ns + cdef int prefix_known + c_parent = c_from_node.parent + while c_parent and (tree._isElementOrXInclude(c_parent) or + c_parent.type == tree.XML_DOCUMENT_NODE): + c_new_ns = c_parent.nsDef + while c_new_ns: + # libxml2 will check if the prefix is already defined + tree.xmlNewNs(c_to_node, c_new_ns.href, c_new_ns.prefix) + c_new_ns = c_new_ns.next + c_parent = c_parent.parent + + +ctypedef struct _ns_update_map: + xmlNs* old + xmlNs* new + + +ctypedef struct _nscache: + _ns_update_map* ns_map + size_t size + size_t last + + +cdef int _growNsCache(_nscache* c_ns_cache) except -1: + cdef _ns_update_map* ns_map_ptr + if c_ns_cache.size == 0: + c_ns_cache.size = 20 + else: + c_ns_cache.size *= 2 + ns_map_ptr = <_ns_update_map*> python.lxml_realloc( + c_ns_cache.ns_map, c_ns_cache.size, sizeof(_ns_update_map)) + if not ns_map_ptr: + python.lxml_free(c_ns_cache.ns_map) + c_ns_cache.ns_map = NULL + raise MemoryError() + c_ns_cache.ns_map = ns_map_ptr + return 0 + + +cdef inline int _appendToNsCache(_nscache* c_ns_cache, + xmlNs* c_old_ns, xmlNs* c_new_ns) except -1: + if c_ns_cache.last >= c_ns_cache.size: + _growNsCache(c_ns_cache) + c_ns_cache.ns_map[c_ns_cache.last] = _ns_update_map(old=c_old_ns, new=c_new_ns) + c_ns_cache.last += 1 + + +cdef int _stripRedundantNamespaceDeclarations(xmlNode* c_element, _nscache* c_ns_cache, + xmlNs** c_del_ns_list) except -1: + """Removes namespace declarations from an element that are already + defined in its parents. Does not free the xmlNs's, just prepends + them to the c_del_ns_list. + """ + cdef xmlNs* c_ns + cdef xmlNs* c_ns_next + cdef xmlNs** c_nsdef + # use a xmlNs** to handle assignments to "c_element.nsDef" correctly + c_nsdef = &c_element.nsDef + while c_nsdef[0] is not NULL: + c_ns = tree.xmlSearchNsByHref( + c_element.doc, c_element.parent, c_nsdef[0].href) + if c_ns is NULL: + # new namespace href => keep and cache the ns declaration + _appendToNsCache(c_ns_cache, c_nsdef[0], c_nsdef[0]) + c_nsdef = &c_nsdef[0].next + else: + # known namespace href => cache mapping and strip old ns + _appendToNsCache(c_ns_cache, c_nsdef[0], c_ns) + # cut out c_nsdef.next and prepend it to garbage chain + c_ns_next = c_nsdef[0].next + c_nsdef[0].next = c_del_ns_list[0] + c_del_ns_list[0] = c_nsdef[0] + c_nsdef[0] = c_ns_next + return 0 + + +cdef void _cleanUpFromNamespaceAdaptation(xmlNode* c_start_node, + _nscache* c_ns_cache, xmlNs* c_del_ns_list) noexcept: + # Try to recover from exceptions with really bad timing. We were in the middle + # of ripping out xmlNS-es and likely ran out of memory. Try to fix up the tree + # by re-adding the original xmlNs declarations (which might still be used in some + # places). + if c_ns_cache.ns_map: + python.lxml_free(c_ns_cache.ns_map) + if c_del_ns_list: + if not c_start_node.nsDef: + c_start_node.nsDef = c_del_ns_list + else: + c_ns = c_start_node.nsDef + while c_ns.next: + c_ns = c_ns.next + c_ns.next = c_del_ns_list + + +cdef int moveNodeToDocument(_Document doc, xmlDoc* c_source_doc, + xmlNode* c_element) except -1: + """Fix the xmlNs pointers of a node and its subtree that were moved. + + Originally copied from libxml2's xmlReconciliateNs(). Expects + libxml2 doc pointers of node to be correct already, but fixes + _Document references. + + For each node in the subtree, we do this: + + 1) Remove redundant declarations of namespace that are already + defined in its parents. + + 2) Replace namespaces that are *not* defined on the node or its + parents by the equivalent namespace declarations that *are* + defined on the node or its parents (possibly using a different + prefix). If a namespace is unknown, declare a new one on the + node. + + 3) Reassign the names of tags and attribute from the dict of the + target document *iff* it is different from the dict used in the + source subtree. + + 4) Set the Document reference to the new Document (if different). + This is done on backtracking to keep the original Document + alive as long as possible, until all its elements are updated. + + Note that the namespace declarations are removed from the tree in + step 1), but freed only after the complete subtree was traversed + and all occurrences were replaced by tree-internal pointers. + """ + cdef xmlNode* c_start_node + cdef xmlNode* c_node + cdef xmlDoc* c_doc = doc._c_doc + cdef tree.xmlAttr* c_attr + cdef char* c_name + cdef _nscache c_ns_cache = [NULL, 0, 0] + cdef xmlNs* c_del_ns_list = NULL + cdef proxy_count = 0 + + if not tree._isElementOrXInclude(c_element): + return 0 + + c_start_node = c_element + + tree.BEGIN_FOR_EACH_FROM(c_element, c_element, 1) + if tree._isElementOrXInclude(c_element): + if hasProxy(c_element): + proxy_count += 1 + + # 1) cut out namespaces defined here that are already known by + # the ancestors + if c_element.nsDef is not NULL: + try: + _stripRedundantNamespaceDeclarations(c_element, &c_ns_cache, &c_del_ns_list) + except: + _cleanUpFromNamespaceAdaptation(c_start_node, &c_ns_cache, c_del_ns_list) + raise + + # 2) make sure the namespaces of an element and its attributes + # are declared in this document (i.e. on the node or its parents) + if c_element.ns is not NULL: + _fixCNs(doc, c_start_node, c_element, &c_ns_cache, c_del_ns_list) + + c_node = c_element.properties + while c_node is not NULL: + if c_node.ns is not NULL: + _fixCNs(doc, c_start_node, c_node, &c_ns_cache, c_del_ns_list) + c_node = c_node.next + + tree.END_FOR_EACH_FROM(c_element) + + # free now unused namespace declarations + if c_del_ns_list is not NULL: + tree.xmlFreeNsList(c_del_ns_list) + + # cleanup + if c_ns_cache.ns_map is not NULL: + python.lxml_free(c_ns_cache.ns_map) + + # 3) fix the names in the tree if we moved it from a different thread + if doc._c_doc.dict is not c_source_doc.dict: + fixThreadDictNames(c_start_node, c_source_doc.dict, doc._c_doc.dict) + + # 4) fix _Document references + # (and potentially deallocate the source document) + if proxy_count > 0: + if proxy_count == 1 and c_start_node._private is not NULL: + proxy = getProxy(c_start_node) + if proxy is not None: + if proxy._doc is not doc: + proxy._doc = doc + else: + fixElementDocument(c_start_node, doc, proxy_count) + else: + fixElementDocument(c_start_node, doc, proxy_count) + + return 0 + + +cdef void _setTreeDoc(xmlNode* c_node, xmlDoc* c_doc) noexcept: + """Adaptation of 'xmlSetTreeDoc()' that deep-fixes the document links iteratively. + It avoids https://gitlab.gnome.org/GNOME/libxml2/issues/42 + """ + tree.BEGIN_FOR_EACH_FROM(c_node, c_node, 1) + if c_node.type == tree.XML_ELEMENT_NODE: + c_attr = c_node.properties + while c_attr: + if c_attr.atype == tree.XML_ATTRIBUTE_ID: + tree.xmlRemoveID(c_node.doc, c_attr) + c_attr.doc = c_doc + _fixDocChildren(c_attr.children, c_doc) + c_attr = c_attr.next + # Set doc link for all nodes, not only elements. + c_node.doc = c_doc + tree.END_FOR_EACH_FROM(c_node) + + +cdef inline void _fixDocChildren(xmlNode* c_child, xmlDoc* c_doc) noexcept: + while c_child: + c_child.doc = c_doc + if c_child.children: + _fixDocChildren(c_child.children, c_doc) + c_child = c_child.next + + +cdef int _fixCNs(_Document doc, xmlNode* c_start_node, xmlNode* c_node, + _nscache* c_ns_cache, xmlNs* c_del_ns_list) except -1: + cdef xmlNs* c_ns = NULL + cdef bint is_prefixed_attr = (c_node.type == tree.XML_ATTRIBUTE_NODE and c_node.ns.prefix) + + for ns_map in c_ns_cache.ns_map[:c_ns_cache.last]: + if c_node.ns is ns_map.old: + if is_prefixed_attr and not ns_map.new.prefix: + # avoid dropping prefix from attributes + continue + c_ns = ns_map.new + break + + if c_ns: + c_node.ns = c_ns + else: + # not in cache or not acceptable + # => find a replacement from this document + try: + c_ns = doc._findOrBuildNodeNs( + c_start_node, c_node.ns.href, c_node.ns.prefix, + c_node.type == tree.XML_ATTRIBUTE_NODE) + c_node.ns = c_ns + _appendToNsCache(c_ns_cache, c_node.ns, c_ns) + except: + _cleanUpFromNamespaceAdaptation(c_start_node, c_ns_cache, c_del_ns_list) + raise + return 0 + + +cdef int fixElementDocument(xmlNode* c_element, _Document doc, + size_t proxy_count) except -1: + cdef xmlNode* c_node = c_element + cdef _Element proxy = None # init-to-None required due to fake-loop below + tree.BEGIN_FOR_EACH_FROM(c_element, c_node, 1) + if c_node._private is not NULL: + proxy = getProxy(c_node) + if proxy is not None: + if proxy._doc is not doc: + proxy._doc = doc + proxy_count -= 1 + if proxy_count == 0: + return 0 + tree.END_FOR_EACH_FROM(c_node) + + +cdef void fixThreadDictNames(xmlNode* c_element, + tree.xmlDict* c_src_dict, + tree.xmlDict* c_dict) noexcept nogil: + # re-assign the names of tags and attributes + # + # this should only be called when the element is based on a + # different libxml2 tag name dictionary + if c_element.type == tree.XML_DOCUMENT_NODE or \ + c_element.type == tree.XML_HTML_DOCUMENT_NODE: + # may define "xml" namespace + fixThreadDictNsForNode(c_element, c_src_dict, c_dict) + if c_element.doc.extSubset: + fixThreadDictNamesForDtd(c_element.doc.extSubset, c_src_dict, c_dict) + if c_element.doc.intSubset: + fixThreadDictNamesForDtd(c_element.doc.intSubset, c_src_dict, c_dict) + c_element = c_element.children + while c_element is not NULL: + fixThreadDictNamesForNode(c_element, c_src_dict, c_dict) + c_element = c_element.next + elif tree._isElementOrXInclude(c_element): + fixThreadDictNamesForNode(c_element, c_src_dict, c_dict) + + +cdef inline void _fixThreadDictPtr(const_xmlChar** c_ptr, + tree.xmlDict* c_src_dict, + tree.xmlDict* c_dict) noexcept nogil: + c_str = c_ptr[0] + if c_str and c_src_dict and tree.xmlDictOwns(c_src_dict, c_str): + # return value can be NULL on memory error, but we don't handle that here + c_str = tree.xmlDictLookup(c_dict, c_str, -1) + if c_str: + c_ptr[0] = c_str + + +cdef void fixThreadDictNamesForNode(xmlNode* c_element, + tree.xmlDict* c_src_dict, + tree.xmlDict* c_dict) noexcept nogil: + cdef xmlNode* c_node = c_element + tree.BEGIN_FOR_EACH_FROM(c_element, c_node, 1) + if c_node.type in (tree.XML_ELEMENT_NODE, tree.XML_XINCLUDE_START): + fixThreadDictNamesForAttributes( + c_node.properties, c_src_dict, c_dict) + fixThreadDictNsForNode(c_node, c_src_dict, c_dict) + _fixThreadDictPtr(&c_node.name, c_src_dict, c_dict) + elif c_node.type == tree.XML_TEXT_NODE: + # libxml2's SAX2 parser interns some indentation space + fixThreadDictContentForNode(c_node, c_src_dict, c_dict) + elif c_node.type == tree.XML_COMMENT_NODE: + pass # don't touch c_node.name + else: + _fixThreadDictPtr(&c_node.name, c_src_dict, c_dict) + tree.END_FOR_EACH_FROM(c_node) + + +cdef inline void fixThreadDictNamesForAttributes(tree.xmlAttr* c_attr, + tree.xmlDict* c_src_dict, + tree.xmlDict* c_dict) noexcept nogil: + cdef xmlNode* c_child + cdef xmlNode* c_node = c_attr + while c_node is not NULL: + if c_node.type not in (tree.XML_TEXT_NODE, tree.XML_COMMENT_NODE): + _fixThreadDictPtr(&c_node.name, c_src_dict, c_dict) + # libxml2 keeps some (!) attribute values in the dict + c_child = c_node.children + while c_child is not NULL: + fixThreadDictContentForNode(c_child, c_src_dict, c_dict) + c_child = c_child.next + c_node = c_node.next + + +cdef inline void fixThreadDictContentForNode(xmlNode* c_node, + tree.xmlDict* c_src_dict, + tree.xmlDict* c_dict) noexcept nogil: + if c_node.content is not NULL and \ + c_node.content is not &c_node.properties: + if tree.xmlDictOwns(c_src_dict, c_node.content): + # result can be NULL on memory error, but we don't handle that here + c_node.content = tree.xmlDictLookup(c_dict, c_node.content, -1) + + +cdef inline void fixThreadDictNsForNode(xmlNode* c_node, + tree.xmlDict* c_src_dict, + tree.xmlDict* c_dict) noexcept nogil: + cdef xmlNs* c_ns = c_node.nsDef + while c_ns is not NULL: + _fixThreadDictPtr(&c_ns.href, c_src_dict, c_dict) + _fixThreadDictPtr(&c_ns.prefix, c_src_dict, c_dict) + c_ns = c_ns.next + + +cdef void fixThreadDictNamesForDtd(tree.xmlDtd* c_dtd, + tree.xmlDict* c_src_dict, + tree.xmlDict* c_dict) noexcept nogil: + cdef xmlNode* c_node + cdef tree.xmlElement* c_element + cdef tree.xmlAttribute* c_attribute + cdef tree.xmlEntity* c_entity + + c_node = c_dtd.children + while c_node: + if c_node.type == tree.XML_ELEMENT_DECL: + c_element = c_node + if c_element.content: + _fixThreadDictPtr(&c_element.content.name, c_src_dict, c_dict) + _fixThreadDictPtr(&c_element.content.prefix, c_src_dict, c_dict) + c_attribute = c_element.attributes + while c_attribute: + _fixThreadDictPtr(&c_attribute.defaultValue, c_src_dict, c_dict) + _fixThreadDictPtr(&c_attribute.name, c_src_dict, c_dict) + _fixThreadDictPtr(&c_attribute.prefix, c_src_dict, c_dict) + _fixThreadDictPtr(&c_attribute.elem, c_src_dict, c_dict) + c_attribute = c_attribute.nexth + elif c_node.type == tree.XML_ENTITY_DECL: + c_entity = c_node + _fixThreadDictPtr(&c_entity.name, c_src_dict, c_dict) + _fixThreadDictPtr(&c_entity.ExternalID, c_src_dict, c_dict) + _fixThreadDictPtr(&c_entity.SystemID, c_src_dict, c_dict) + _fixThreadDictPtr(&c_entity.content, c_src_dict, c_dict) + c_node = c_node.next + + +################################################################################ +# adopt an xmlDoc from an external libxml2 document source + +cdef _Document _adoptForeignDoc(xmlDoc* c_doc, _BaseParser parser=None, bint is_owned=True): + """Convert and wrap an externally produced xmlDoc for use in lxml. + Assures that all '_private' pointers are NULL to prevent accidental + dereference into lxml proxy objects. + """ + if c_doc is NULL: + raise ValueError("Illegal document provided: NULL") + if c_doc.type not in (tree.XML_DOCUMENT_NODE, tree.XML_HTML_DOCUMENT_NODE): + doc_type = c_doc.type + if is_owned: + tree.xmlFreeDoc(c_doc) + raise ValueError(f"Illegal document provided: expected XML or HTML, found {doc_type}") + + cdef xmlNode* c_node = c_doc + + if is_owned: + tree.BEGIN_FOR_EACH_FROM(c_doc, c_node, 1) + c_node._private = NULL + tree.END_FOR_EACH_FROM(c_node) + else: + # create a fresh copy that lxml owns + c_doc = tree.xmlCopyDoc(c_doc, 1) + if c_doc is NULL: + raise MemoryError() + + return _documentFactory(c_doc, parser) diff --git a/venv/lib/python3.10/site-packages/lxml/public-api.pxi b/venv/lib/python3.10/site-packages/lxml/public-api.pxi new file mode 100644 index 0000000000000000000000000000000000000000..fb8b2a2ced120b69c311270adba08924d65980a6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/public-api.pxi @@ -0,0 +1,178 @@ +# Public C API for lxml.etree + +cdef public api _Element deepcopyNodeToDocument(_Document doc, xmlNode* c_root): + "Recursively copy the element into the document. doc is not modified." + cdef xmlNode* c_node + c_node = _copyNodeToDoc(c_root, doc._c_doc) + return _elementFactory(doc, c_node) + +cdef public api _ElementTree elementTreeFactory(_Element context_node): + _assertValidNode(context_node) + return newElementTree(context_node, _ElementTree) + +cdef public api _ElementTree newElementTree(_Element context_node, + object subclass): + if context_node is NULL or context_node is None: + raise TypeError + _assertValidNode(context_node) + return _newElementTree(context_node._doc, context_node, subclass) + +cdef public api _ElementTree adoptExternalDocument(xmlDoc* c_doc, parser, bint is_owned): + if c_doc is NULL: + raise TypeError + doc = _adoptForeignDoc(c_doc, parser, is_owned) + return _elementTreeFactory(doc, None) + +cdef public api _Element elementFactory(_Document doc, xmlNode* c_node): + if c_node is NULL or doc is None: + raise TypeError + return _elementFactory(doc, c_node) + +cdef public api _Element makeElement(tag, _Document doc, parser, + text, tail, attrib, nsmap): + return _makeElement(tag, NULL, doc, parser, text, tail, attrib, nsmap, None) + +cdef public api _Element makeSubElement(_Element parent, tag, text, tail, + attrib, nsmap): + _assertValidNode(parent) + return _makeSubElement(parent, tag, text, tail, attrib, nsmap, None) + +cdef public api void setElementClassLookupFunction( + _element_class_lookup_function function, state): + _setElementClassLookupFunction(function, state) + +cdef public api object lookupDefaultElementClass(state, doc, xmlNode* c_node): + return _lookupDefaultElementClass(state, doc, c_node) + +cdef public api object lookupNamespaceElementClass(state, doc, xmlNode* c_node): + return _find_nselement_class(state, doc, c_node) + +cdef public api object callLookupFallback(FallbackElementClassLookup lookup, + _Document doc, xmlNode* c_node): + return _callLookupFallback(lookup, doc, c_node) + +cdef public api int tagMatches(xmlNode* c_node, const_xmlChar* c_href, const_xmlChar* c_name): + if c_node is NULL: + return -1 + return _tagMatches(c_node, c_href, c_name) + +cdef public api _Document documentOrRaise(object input): + return _documentOrRaise(input) + +cdef public api _Element rootNodeOrRaise(object input): + return _rootNodeOrRaise(input) + +cdef public api bint hasText(xmlNode* c_node): + return _hasText(c_node) + +cdef public api bint hasTail(xmlNode* c_node): + return _hasTail(c_node) + +cdef public api unicode textOf(xmlNode* c_node): + if c_node is NULL: + return None + return _collectText(c_node.children) + +cdef public api unicode tailOf(xmlNode* c_node): + if c_node is NULL: + return None + return _collectText(c_node.next) + +cdef public api int setNodeText(xmlNode* c_node, text) except -1: + if c_node is NULL: + raise ValueError + return _setNodeText(c_node, text) + +cdef public api int setTailText(xmlNode* c_node, text) except -1: + if c_node is NULL: + raise ValueError + return _setTailText(c_node, text) + +cdef public api unicode attributeValue(xmlNode* c_element, xmlAttr* c_attrib_node): + return _attributeValue(c_element, c_attrib_node) + +cdef public api unicode attributeValueFromNsName(xmlNode* c_element, + const_xmlChar* ns, const_xmlChar* name): + return _attributeValueFromNsName(c_element, ns, name) + +cdef public api object getAttributeValue(_Element element, key, default): + _assertValidNode(element) + return _getAttributeValue(element, key, default) + +cdef public api object iterattributes(_Element element, int keysvalues): + _assertValidNode(element) + return _attributeIteratorFactory(element, keysvalues) + +cdef public api list collectAttributes(xmlNode* c_element, int keysvalues): + return _collectAttributes(c_element, keysvalues) + +cdef public api int setAttributeValue(_Element element, key, value) except -1: + _assertValidNode(element) + return _setAttributeValue(element, key, value) + +cdef public api int delAttribute(_Element element, key) except -1: + _assertValidNode(element) + return _delAttribute(element, key) + +cdef public api int delAttributeFromNsName(tree.xmlNode* c_element, + const_xmlChar* c_href, const_xmlChar* c_name): + return _delAttributeFromNsName(c_element, c_href, c_name) + +cdef public api bint hasChild(xmlNode* c_node): + return _hasChild(c_node) + +cdef public api xmlNode* findChild(xmlNode* c_node, Py_ssize_t index): + return _findChild(c_node, index) + +cdef public api xmlNode* findChildForwards(xmlNode* c_node, Py_ssize_t index): + return _findChildForwards(c_node, index) + +cdef public api xmlNode* findChildBackwards(xmlNode* c_node, Py_ssize_t index): + return _findChildBackwards(c_node, index) + +cdef public api xmlNode* nextElement(xmlNode* c_node): + return _nextElement(c_node) + +cdef public api xmlNode* previousElement(xmlNode* c_node): + return _previousElement(c_node) + +cdef public api void appendChild(_Element parent, _Element child): + # deprecated, use appendChildToElement() instead! + _appendChild(parent, child) + +cdef public api int appendChildToElement(_Element parent, _Element child) except -1: + return _appendChild(parent, child) + +cdef public api unicode pyunicode(const_xmlChar* s): + if s is NULL: + raise TypeError + return funicode(s) + +cdef public api bytes utf8(object s): + return _utf8(s) + +cdef public api tuple getNsTag(object tag): + return _getNsTag(tag) + +cdef public api tuple getNsTagWithEmptyNs(object tag): + return _getNsTagWithEmptyNs(tag) + +cdef public api unicode namespacedName(xmlNode* c_node): + return _namespacedName(c_node) + +cdef public api unicode namespacedNameFromNsName(const_xmlChar* href, const_xmlChar* name): + return _namespacedNameFromNsName(href, name) + +cdef public api void iteratorStoreNext(_ElementIterator iterator, _Element node): + # deprecated! + iterator._storeNext(node) + +cdef public api void initTagMatch(_ElementTagMatcher matcher, tag): + # deprecated! + matcher._initTagMatch(tag) + +cdef public api tree.xmlNs* findOrBuildNodeNsPrefix( + _Document doc, xmlNode* c_node, const_xmlChar* href, const_xmlChar* prefix) except NULL: + if doc is None: + raise TypeError + return doc._findOrBuildNodeNs(c_node, href, prefix, 0) diff --git a/venv/lib/python3.10/site-packages/lxml/pyclasslookup.py b/venv/lib/python3.10/site-packages/lxml/pyclasslookup.py new file mode 100644 index 0000000000000000000000000000000000000000..9e1496dfb762108154a0c6c321a5e8fcf73de909 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/pyclasslookup.py @@ -0,0 +1,3 @@ +# dummy module for backwards compatibility + +from lxml.etree import PythonElementClassLookup diff --git a/venv/lib/python3.10/site-packages/lxml/readonlytree.pxi b/venv/lib/python3.10/site-packages/lxml/readonlytree.pxi new file mode 100644 index 0000000000000000000000000000000000000000..9bc9a660731b8562a3d16609bc6aceebaf5f5eff --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/readonlytree.pxi @@ -0,0 +1,565 @@ +# read-only tree implementation + +@cython.internal +cdef class _ReadOnlyProxy: + "A read-only proxy class suitable for PIs/Comments (for internal use only!)." + cdef bint _free_after_use + cdef xmlNode* _c_node + cdef _ReadOnlyProxy _source_proxy + cdef list _dependent_proxies + def __cinit__(self): + self._c_node = NULL + self._free_after_use = 0 + + cdef int _assertNode(self) except -1: + """This is our way of saying: this proxy is invalid! + """ + if not self._c_node: + raise ReferenceError("Proxy invalidated!") + return 0 + + cdef int _raise_unsupported_type(self) except -1: + raise TypeError(f"Unsupported node type: {self._c_node.type}") + + cdef void free_after_use(self) noexcept: + """Should the xmlNode* be freed when releasing the proxy? + """ + self._free_after_use = 1 + + @property + def tag(self): + """Element tag + """ + self._assertNode() + if self._c_node.type == tree.XML_ELEMENT_NODE: + return _namespacedName(self._c_node) + elif self._c_node.type == tree.XML_PI_NODE: + return ProcessingInstruction + elif self._c_node.type == tree.XML_COMMENT_NODE: + return Comment + elif self._c_node.type == tree.XML_ENTITY_REF_NODE: + return Entity + else: + self._raise_unsupported_type() + + @property + def text(self): + """Text before the first subelement. This is either a string or + the value None, if there was no text. + """ + self._assertNode() + if self._c_node.type == tree.XML_ELEMENT_NODE: + return _collectText(self._c_node.children) + elif self._c_node.type in (tree.XML_PI_NODE, + tree.XML_COMMENT_NODE): + if self._c_node.content is NULL: + return '' + else: + return funicode(self._c_node.content) + elif self._c_node.type == tree.XML_ENTITY_REF_NODE: + return f'&{funicode(self._c_node.name)};' + else: + self._raise_unsupported_type() + + @property + def tail(self): + """Text after this element's end tag, but before the next sibling + element's start tag. This is either a string or the value None, if + there was no text. + """ + self._assertNode() + return _collectText(self._c_node.next) + + @property + def sourceline(self): + """Original line number as found by the parser or None if unknown. + """ + cdef long line + self._assertNode() + line = tree.xmlGetLineNo(self._c_node) + if line > 0: + return line + else: + return None + + def __repr__(self): + self._assertNode() + if self._c_node.type == tree.XML_ELEMENT_NODE: + return "" % (self.tag, id(self)) + elif self._c_node.type == tree.XML_COMMENT_NODE: + return "" % self.text + elif self._c_node.type == tree.XML_ENTITY_NODE: + return "&%s;" % funicode(self._c_node.name) + elif self._c_node.type == tree.XML_PI_NODE: + text = self.text + if text: + return "" % (self.target, text) + else: + return "" % self.target + else: + self._raise_unsupported_type() + + def __getitem__(self, x): + """Returns the subelement at the given position or the requested + slice. + """ + cdef xmlNode* c_node = NULL + cdef Py_ssize_t step = 0, slicelength = 0 + cdef Py_ssize_t c, i + cdef _node_to_node_function next_element + cdef list result + self._assertNode() + if isinstance(x, slice): + # slicing + if _isFullSlice(x): + return _collectChildren(self) + _findChildSlice(x, self._c_node, &c_node, &step, &slicelength) + if c_node is NULL: + return [] + if step > 0: + next_element = _nextElement + else: + step = -step + next_element = _previousElement + result = [] + c = 0 + while c_node is not NULL and c < slicelength: + result.append(_newReadOnlyProxy(self._source_proxy, c_node)) + result.append(_elementFactory(self._doc, c_node)) + c = c + 1 + for i from 0 <= i < step: + c_node = next_element(c_node) + return result + else: + # indexing + c_node = _findChild(self._c_node, x) + if c_node is NULL: + raise IndexError, "list index out of range" + return _newReadOnlyProxy(self._source_proxy, c_node) + + def __len__(self): + """Returns the number of subelements. + """ + cdef Py_ssize_t c + cdef xmlNode* c_node + self._assertNode() + c = 0 + c_node = self._c_node.children + while c_node is not NULL: + if tree._isElement(c_node): + c = c + 1 + c_node = c_node.next + return c + + def __bool__(self): + cdef xmlNode* c_node + self._assertNode() + c_node = _findChildBackwards(self._c_node, 0) + return c_node != NULL + + def __deepcopy__(self, memo): + "__deepcopy__(self, memo)" + return self.__copy__() + + cpdef __copy__(self): + "__copy__(self)" + cdef xmlDoc* c_doc + cdef xmlNode* c_node + cdef _Document new_doc + if self._c_node is NULL: + return self + c_doc = _copyDocRoot(self._c_node.doc, self._c_node) # recursive + new_doc = _documentFactory(c_doc, None) + root = new_doc.getroot() + if root is not None: + return root + # Comment/PI + c_node = c_doc.children + while c_node is not NULL and c_node.type != self._c_node.type: + c_node = c_node.next + if c_node is NULL: + return None + return _elementFactory(new_doc, c_node) + + def __iter__(self): + return iter(self.getchildren()) + + def iterchildren(self, tag=None, *, reversed=False): + """iterchildren(self, tag=None, reversed=False) + + Iterate over the children of this element. + """ + children = self.getchildren() + if tag is not None and tag != '*': + children = [ el for el in children if el.tag == tag ] + if reversed: + children = children[::-1] + return iter(children) + + cpdef getchildren(self): + """Returns all subelements. The elements are returned in document + order. + """ + cdef xmlNode* c_node + cdef list result + self._assertNode() + result = [] + c_node = self._c_node.children + while c_node is not NULL: + if tree._isElement(c_node): + result.append(_newReadOnlyProxy(self._source_proxy, c_node)) + c_node = c_node.next + return result + + def getparent(self): + """Returns the parent of this element or None for the root element. + """ + cdef xmlNode* c_parent + self._assertNode() + c_parent = self._c_node.parent + if c_parent is NULL or not tree._isElement(c_parent): + return None + else: + return _newReadOnlyProxy(self._source_proxy, c_parent) + + def getnext(self): + """Returns the following sibling of this element or None. + """ + cdef xmlNode* c_node + self._assertNode() + c_node = _nextElement(self._c_node) + if c_node is not NULL: + return _newReadOnlyProxy(self._source_proxy, c_node) + return None + + def getprevious(self): + """Returns the preceding sibling of this element or None. + """ + cdef xmlNode* c_node + self._assertNode() + c_node = _previousElement(self._c_node) + if c_node is not NULL: + return _newReadOnlyProxy(self._source_proxy, c_node) + return None + + +@cython.final +@cython.internal +cdef class _ReadOnlyPIProxy(_ReadOnlyProxy): + """A read-only proxy for processing instructions (for internal use only!)""" + @property + def target(self): + self._assertNode() + return funicode(self._c_node.name) + +@cython.final +@cython.internal +cdef class _ReadOnlyEntityProxy(_ReadOnlyProxy): + """A read-only proxy for entity references (for internal use only!)""" + property name: + def __get__(self): + return funicode(self._c_node.name) + + def __set__(self, value): + value_utf = _utf8(value) + if '&' in value or ';' in value: + raise ValueError(f"Invalid entity name '{value}'") + tree.xmlNodeSetName(self._c_node, _xcstr(value_utf)) + + @property + def text(self): + return f'&{funicode(self._c_node.name)};' + + +@cython.internal +cdef class _ReadOnlyElementProxy(_ReadOnlyProxy): + """The main read-only Element proxy class (for internal use only!).""" + + @property + def attrib(self): + self._assertNode() + return dict(_collectAttributes(self._c_node, 3)) + + @property + def prefix(self): + """Namespace prefix or None. + """ + self._assertNode() + if self._c_node.ns is not NULL: + if self._c_node.ns.prefix is not NULL: + return funicode(self._c_node.ns.prefix) + return None + + @property + def nsmap(self): + """Namespace prefix->URI mapping known in the context of this + Element. This includes all namespace declarations of the + parents. + + Note that changing the returned dict has no effect on the Element. + """ + self._assertNode() + return _build_nsmap(self._c_node) + + def get(self, key, default=None): + """Gets an element attribute. + """ + self._assertNode() + return _getNodeAttributeValue(self._c_node, key, default) + + def keys(self): + """Gets a list of attribute names. The names are returned in an + arbitrary order (just like for an ordinary Python dictionary). + """ + self._assertNode() + return _collectAttributes(self._c_node, 1) + + def values(self): + """Gets element attributes, as a sequence. The attributes are returned + in an arbitrary order. + """ + self._assertNode() + return _collectAttributes(self._c_node, 2) + + def items(self): + """Gets element attributes, as a sequence. The attributes are returned + in an arbitrary order. + """ + self._assertNode() + return _collectAttributes(self._c_node, 3) + +cdef _ReadOnlyProxy _newReadOnlyProxy( + _ReadOnlyProxy source_proxy, xmlNode* c_node): + cdef _ReadOnlyProxy el + if c_node.type == tree.XML_ELEMENT_NODE: + el = _ReadOnlyElementProxy.__new__(_ReadOnlyElementProxy) + elif c_node.type == tree.XML_PI_NODE: + el = _ReadOnlyPIProxy.__new__(_ReadOnlyPIProxy) + elif c_node.type in (tree.XML_COMMENT_NODE, + tree.XML_ENTITY_REF_NODE): + el = _ReadOnlyProxy.__new__(_ReadOnlyProxy) + else: + raise TypeError(f"Unsupported element type: {c_node.type}") + el._c_node = c_node + _initReadOnlyProxy(el, source_proxy) + return el + +cdef inline _initReadOnlyProxy(_ReadOnlyProxy el, + _ReadOnlyProxy source_proxy): + if source_proxy is None: + el._source_proxy = el + el._dependent_proxies = [el] + else: + el._source_proxy = source_proxy + source_proxy._dependent_proxies.append(el) + +cdef _freeReadOnlyProxies(_ReadOnlyProxy sourceProxy): + cdef xmlNode* c_node + cdef _ReadOnlyProxy el + if sourceProxy is None: + return + if sourceProxy._dependent_proxies is None: + return + for el in sourceProxy._dependent_proxies: + c_node = el._c_node + el._c_node = NULL + if el._free_after_use: + tree.xmlFreeNode(c_node) + del sourceProxy._dependent_proxies[:] + +# opaque wrapper around non-element nodes, e.g. the document node +# +# This class does not imply any restrictions on modifiability or +# read-only status of the node, so use with caution. + +@cython.internal +cdef class _OpaqueNodeWrapper: + cdef tree.xmlNode* _c_node + def __init__(self): + raise TypeError, "This type cannot be instantiated from Python" + +@cython.final +@cython.internal +cdef class _OpaqueDocumentWrapper(_OpaqueNodeWrapper): + cdef int _assertNode(self) except -1: + """This is our way of saying: this proxy is invalid! + """ + assert self._c_node is not NULL, "Proxy invalidated!" + return 0 + + cpdef append(self, other_element): + """Append a copy of an Element to the list of children. + """ + cdef xmlNode* c_next + cdef xmlNode* c_node + self._assertNode() + c_node = _roNodeOf(other_element) + if c_node.type == tree.XML_ELEMENT_NODE: + if tree.xmlDocGetRootElement(self._c_node) is not NULL: + raise ValueError, "cannot append, document already has a root element" + elif c_node.type not in (tree.XML_PI_NODE, tree.XML_COMMENT_NODE): + raise TypeError, f"unsupported element type for top-level node: {c_node.type}" + c_node = _copyNodeToDoc(c_node, self._c_node) + c_next = c_node.next + tree.xmlAddChild(self._c_node, c_node) + _moveTail(c_next, c_node) + + def extend(self, elements): + """Append a copy of all Elements from a sequence to the list of + children. + """ + self._assertNode() + for element in elements: + self.append(element) + +cdef _OpaqueNodeWrapper _newOpaqueAppendOnlyNodeWrapper(xmlNode* c_node): + cdef _OpaqueNodeWrapper node + if c_node.type in (tree.XML_DOCUMENT_NODE, tree.XML_HTML_DOCUMENT_NODE): + node = _OpaqueDocumentWrapper.__new__(_OpaqueDocumentWrapper) + else: + node = _OpaqueNodeWrapper.__new__(_OpaqueNodeWrapper) + node._c_node = c_node + return node + +# element proxies that allow restricted modification + +@cython.internal +cdef class _ModifyContentOnlyProxy(_ReadOnlyProxy): + """A read-only proxy that allows changing the text content. + """ + property text: + def __get__(self): + self._assertNode() + if self._c_node.content is NULL: + return '' + else: + return funicode(self._c_node.content) + + def __set__(self, value): + cdef tree.xmlDict* c_dict + self._assertNode() + if value is None: + c_text = NULL + else: + value = _utf8(value) + c_text = _xcstr(value) + tree.xmlNodeSetContent(self._c_node, c_text) + +@cython.final +@cython.internal +cdef class _ModifyContentOnlyPIProxy(_ModifyContentOnlyProxy): + """A read-only proxy that allows changing the text/target content of a + processing instruction. + """ + property target: + def __get__(self): + self._assertNode() + return funicode(self._c_node.name) + + def __set__(self, value): + self._assertNode() + value = _utf8(value) + c_text = _xcstr(value) + tree.xmlNodeSetName(self._c_node, c_text) + +@cython.final +@cython.internal +cdef class _ModifyContentOnlyEntityProxy(_ModifyContentOnlyProxy): + "A read-only proxy for entity references (for internal use only!)" + property name: + def __get__(self): + return funicode(self._c_node.name) + + def __set__(self, value): + value = _utf8(value) + assert '&' not in value and ';' not in value, \ + f"Invalid entity name '{value}'" + c_text = _xcstr(value) + tree.xmlNodeSetName(self._c_node, c_text) + + +@cython.final +@cython.internal +cdef class _AppendOnlyElementProxy(_ReadOnlyElementProxy): + """A read-only element that allows adding children and changing the + text content (i.e. everything that adds to the subtree). + """ + cpdef append(self, other_element): + """Append a copy of an Element to the list of children. + """ + cdef xmlNode* c_next + cdef xmlNode* c_node + self._assertNode() + c_node = _roNodeOf(other_element) + c_node = _copyNodeToDoc(c_node, self._c_node.doc) + c_next = c_node.next + tree.xmlAddChild(self._c_node, c_node) + _moveTail(c_next, c_node) + + def extend(self, elements): + """Append a copy of all Elements from a sequence to the list of + children. + """ + self._assertNode() + for element in elements: + self.append(element) + + property text: + """Text before the first subelement. This is either a string or the + value None, if there was no text. + """ + def __get__(self): + self._assertNode() + return _collectText(self._c_node.children) + + def __set__(self, value): + self._assertNode() + if isinstance(value, QName): + value = _resolveQNameText(self, value).decode('utf8') + _setNodeText(self._c_node, value) + + +cdef _ReadOnlyProxy _newAppendOnlyProxy( + _ReadOnlyProxy source_proxy, xmlNode* c_node): + cdef _ReadOnlyProxy el + if c_node.type == tree.XML_ELEMENT_NODE: + el = _AppendOnlyElementProxy.__new__(_AppendOnlyElementProxy) + elif c_node.type == tree.XML_PI_NODE: + el = _ModifyContentOnlyPIProxy.__new__(_ModifyContentOnlyPIProxy) + elif c_node.type == tree.XML_COMMENT_NODE: + el = _ModifyContentOnlyProxy.__new__(_ModifyContentOnlyProxy) + else: + raise TypeError(f"Unsupported element type: {c_node.type}") + el._c_node = c_node + _initReadOnlyProxy(el, source_proxy) + return el + +cdef xmlNode* _roNodeOf(element) except NULL: + cdef xmlNode* c_node + if isinstance(element, _Element): + c_node = (<_Element>element)._c_node + elif isinstance(element, _ReadOnlyProxy): + c_node = (<_ReadOnlyProxy>element)._c_node + elif isinstance(element, _OpaqueNodeWrapper): + c_node = (<_OpaqueNodeWrapper>element)._c_node + else: + raise TypeError, f"invalid argument type {type(element)}" + + if c_node is NULL: + raise TypeError, "invalid element" + return c_node + +cdef xmlNode* _nonRoNodeOf(element) except NULL: + cdef xmlNode* c_node + if isinstance(element, _Element): + c_node = (<_Element>element)._c_node + elif isinstance(element, _AppendOnlyElementProxy): + c_node = (<_AppendOnlyElementProxy>element)._c_node + elif isinstance(element, _OpaqueNodeWrapper): + c_node = (<_OpaqueNodeWrapper>element)._c_node + else: + raise TypeError, f"invalid argument type {type(element)}" + + if c_node is NULL: + raise TypeError, "invalid element" + return c_node diff --git a/venv/lib/python3.10/site-packages/lxml/relaxng.pxi b/venv/lib/python3.10/site-packages/lxml/relaxng.pxi new file mode 100644 index 0000000000000000000000000000000000000000..35f875891f7e59a785518b8b70bd19ef3f0f6099 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/relaxng.pxi @@ -0,0 +1,165 @@ +# support for RelaxNG validation +from lxml.includes cimport relaxng + +cdef object _rnc2rng +try: + import rnc2rng as _rnc2rng +except ImportError: + _rnc2rng = None + + +cdef int _require_rnc2rng() except -1: + if _rnc2rng is None: + raise RelaxNGParseError( + 'compact syntax not supported (please install rnc2rng)') + return 0 + + +cdef class RelaxNGError(LxmlError): + """Base class for RelaxNG errors. + """ + +cdef class RelaxNGParseError(RelaxNGError): + """Error while parsing an XML document as RelaxNG. + """ + +cdef class RelaxNGValidateError(RelaxNGError): + """Error while validating an XML document with a RelaxNG schema. + """ + + +################################################################################ +# RelaxNG + +cdef class RelaxNG(_Validator): + """RelaxNG(self, etree=None, file=None) + Turn a document into a Relax NG validator. + + Either pass a schema as Element or ElementTree, or pass a file or + filename through the ``file`` keyword argument. + """ + cdef relaxng.xmlRelaxNG* _c_schema + def __cinit__(self): + self._c_schema = NULL + + def __init__(self, etree=None, *, file=None): + cdef _Document doc + cdef _Element root_node + cdef xmlDoc* fake_c_doc = NULL + cdef relaxng.xmlRelaxNGParserCtxt* parser_ctxt = NULL + _Validator.__init__(self) + if etree is not None: + doc = _documentOrRaise(etree) + root_node = _rootNodeOrRaise(etree) + fake_c_doc = _fakeRootDoc(doc._c_doc, root_node._c_node) + parser_ctxt = relaxng.xmlRelaxNGNewDocParserCtxt(fake_c_doc) + elif file is not None: + if _isString(file): + if file[-4:].lower() == '.rnc': + _require_rnc2rng() + rng_data_utf8 = _utf8(_rnc2rng.dumps(_rnc2rng.load(file))) + doc = _parseMemoryDocument(rng_data_utf8, parser=None, url=file) + parser_ctxt = relaxng.xmlRelaxNGNewDocParserCtxt(doc._c_doc) + else: + doc = None + filename = _encodeFilename(file) + with self._error_log: + orig_loader = _register_document_loader() + parser_ctxt = relaxng.xmlRelaxNGNewParserCtxt(_cstr(filename)) + _reset_document_loader(orig_loader) + elif (_getFilenameForFile(file) or '')[-4:].lower() == '.rnc': + _require_rnc2rng() + rng_data_utf8 = _utf8(_rnc2rng.dumps(_rnc2rng.load(file))) + doc = _parseMemoryDocument( + rng_data_utf8, parser=None, url=_getFilenameForFile(file)) + parser_ctxt = relaxng.xmlRelaxNGNewDocParserCtxt(doc._c_doc) + else: + doc = _parseDocument(file, parser=None, base_url=None) + parser_ctxt = relaxng.xmlRelaxNGNewDocParserCtxt(doc._c_doc) + else: + raise RelaxNGParseError, "No tree or file given" + + if parser_ctxt is NULL: + if fake_c_doc is not NULL: + _destroyFakeDoc(doc._c_doc, fake_c_doc) + raise RelaxNGParseError( + self._error_log._buildExceptionMessage( + "Document is not parsable as Relax NG"), + self._error_log) + + # Need a cast here because older libxml2 releases do not use 'const' in the functype. + relaxng.xmlRelaxNGSetParserStructuredErrors( + parser_ctxt, _receiveError, self._error_log) + _connectGenericErrorLog(self._error_log, xmlerror.XML_FROM_RELAXNGP) + self._c_schema = relaxng.xmlRelaxNGParse(parser_ctxt) + _connectGenericErrorLog(None) + + relaxng.xmlRelaxNGFreeParserCtxt(parser_ctxt) + if self._c_schema is NULL: + if fake_c_doc is not NULL: + _destroyFakeDoc(doc._c_doc, fake_c_doc) + raise RelaxNGParseError( + self._error_log._buildExceptionMessage( + "Document is not valid Relax NG"), + self._error_log) + if fake_c_doc is not NULL: + _destroyFakeDoc(doc._c_doc, fake_c_doc) + + def __dealloc__(self): + relaxng.xmlRelaxNGFree(self._c_schema) + + def __call__(self, etree): + """__call__(self, etree) + + Validate doc using Relax NG. + + Returns true if document is valid, false if not.""" + cdef _Document doc + cdef _Element root_node + cdef xmlDoc* c_doc + cdef relaxng.xmlRelaxNGValidCtxt* valid_ctxt + cdef int ret + + assert self._c_schema is not NULL, "RelaxNG instance not initialised" + doc = _documentOrRaise(etree) + root_node = _rootNodeOrRaise(etree) + + valid_ctxt = relaxng.xmlRelaxNGNewValidCtxt(self._c_schema) + if valid_ctxt is NULL: + raise MemoryError() + + try: + self._error_log.clear() + # Need a cast here because older libxml2 releases do not use 'const' in the functype. + relaxng.xmlRelaxNGSetValidStructuredErrors( + valid_ctxt, _receiveError, self._error_log) + _connectGenericErrorLog(self._error_log, xmlerror.XML_FROM_RELAXNGV) + c_doc = _fakeRootDoc(doc._c_doc, root_node._c_node) + with nogil: + ret = relaxng.xmlRelaxNGValidateDoc(valid_ctxt, c_doc) + _destroyFakeDoc(doc._c_doc, c_doc) + finally: + _connectGenericErrorLog(None) + relaxng.xmlRelaxNGFreeValidCtxt(valid_ctxt) + + if ret == -1: + raise RelaxNGValidateError( + "Internal error in Relax NG validation", + self._error_log) + if ret == 0: + return True + else: + return False + + @classmethod + def from_rnc_string(cls, src, base_url=None): + """Parse a RelaxNG schema in compact syntax from a text string + + Requires the rnc2rng package to be installed. + + Passing the source URL or file path of the source as 'base_url' + will enable resolving resource references relative to the source. + """ + _require_rnc2rng() + rng_str = utf8(_rnc2rng.dumps(_rnc2rng.loads(src))) + return cls(_parseMemoryDocument(rng_str, parser=None, url=base_url)) diff --git a/venv/lib/python3.10/site-packages/lxml/sax.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/lxml/sax.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..e01f7c9f85b11f983f2642a154b62a51b8f416ef Binary files /dev/null and b/venv/lib/python3.10/site-packages/lxml/sax.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/lxml/sax.py b/venv/lib/python3.10/site-packages/lxml/sax.py new file mode 100644 index 0000000000000000000000000000000000000000..eee44226703c6c61f45807916b0a11984a3886ff --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/sax.py @@ -0,0 +1,275 @@ +# cython: language_level=2 + +""" +SAX-based adapter to copy trees from/to the Python standard library. + +Use the `ElementTreeContentHandler` class to build an ElementTree from +SAX events. + +Use the `ElementTreeProducer` class or the `saxify()` function to fire +the SAX events of an ElementTree against a SAX ContentHandler. + +See https://lxml.de/sax.html +""" + + +from xml.sax.handler import ContentHandler +from lxml import etree +from lxml.etree import ElementTree, SubElement +from lxml.etree import Comment, ProcessingInstruction + + +class SaxError(etree.LxmlError): + """General SAX error. + """ + + +def _getNsTag(tag): + if tag[0] == '{': + return tuple(tag[1:].split('}', 1)) + else: + return None, tag + + +class ElementTreeContentHandler(ContentHandler): + """Build an lxml ElementTree from SAX events. + """ + def __init__(self, makeelement=None): + ContentHandler.__init__(self) + self._root = None + self._root_siblings = [] + self._element_stack = [] + self._default_ns = None + self._ns_mapping = { None : [None] } + self._new_mappings = {} + if makeelement is None: + makeelement = etree.Element + self._makeelement = makeelement + + def _get_etree(self): + "Contains the generated ElementTree after parsing is finished." + return ElementTree(self._root) + + etree = property(_get_etree, doc=_get_etree.__doc__) + + def setDocumentLocator(self, locator): + pass + + def startDocument(self): + pass + + def endDocument(self): + pass + + def startPrefixMapping(self, prefix, uri): + self._new_mappings[prefix] = uri + try: + self._ns_mapping[prefix].append(uri) + except KeyError: + self._ns_mapping[prefix] = [uri] + if prefix is None: + self._default_ns = uri + + def endPrefixMapping(self, prefix): + ns_uri_list = self._ns_mapping[prefix] + ns_uri_list.pop() + if prefix is None: + self._default_ns = ns_uri_list[-1] + + def _buildTag(self, ns_name_tuple): + ns_uri, local_name = ns_name_tuple + if ns_uri: + el_tag = "{%s}%s" % ns_name_tuple + elif self._default_ns: + el_tag = "{%s}%s" % (self._default_ns, local_name) + else: + el_tag = local_name + return el_tag + + def startElementNS(self, ns_name, qname, attributes=None): + el_name = self._buildTag(ns_name) + if attributes: + attrs = {} + try: + iter_attributes = attributes.iteritems() + except AttributeError: + iter_attributes = attributes.items() + + for name_tuple, value in iter_attributes: + if name_tuple[0]: + attr_name = "{%s}%s" % name_tuple + else: + attr_name = name_tuple[1] + attrs[attr_name] = value + else: + attrs = None + + element_stack = self._element_stack + if self._root is None: + element = self._root = \ + self._makeelement(el_name, attrs, self._new_mappings) + if self._root_siblings and hasattr(element, 'addprevious'): + for sibling in self._root_siblings: + element.addprevious(sibling) + del self._root_siblings[:] + else: + element = SubElement(element_stack[-1], el_name, + attrs, self._new_mappings) + element_stack.append(element) + + self._new_mappings.clear() + + def processingInstruction(self, target, data): + pi = ProcessingInstruction(target, data) + if self._root is None: + self._root_siblings.append(pi) + else: + self._element_stack[-1].append(pi) + + def endElementNS(self, ns_name, qname): + element = self._element_stack.pop() + el_tag = self._buildTag(ns_name) + if el_tag != element.tag: + raise SaxError("Unexpected element closed: " + el_tag) + + def startElement(self, name, attributes=None): + if attributes: + attributes = {(None, k): v for k, v in attributes.items()} + self.startElementNS((None, name), name, attributes) + + def endElement(self, name): + self.endElementNS((None, name), name) + + def characters(self, data): + last_element = self._element_stack[-1] + try: + # if there already is a child element, we must append to its tail + last_element = last_element[-1] + last_element.tail = (last_element.tail or '') + data + except IndexError: + # otherwise: append to the text + last_element.text = (last_element.text or '') + data + + ignorableWhitespace = characters + + +class ElementTreeProducer: + """Produces SAX events for an element and children. + """ + def __init__(self, element_or_tree, content_handler): + try: + element = element_or_tree.getroot() + except AttributeError: + element = element_or_tree + self._element = element + self._content_handler = content_handler + from xml.sax.xmlreader import AttributesNSImpl as attr_class + self._attr_class = attr_class + self._empty_attributes = attr_class({}, {}) + + def saxify(self): + self._content_handler.startDocument() + + element = self._element + if hasattr(element, 'getprevious'): + siblings = [] + sibling = element.getprevious() + while getattr(sibling, 'tag', None) is ProcessingInstruction: + siblings.append(sibling) + sibling = sibling.getprevious() + for sibling in siblings[::-1]: + self._recursive_saxify(sibling, {}) + + self._recursive_saxify(element, {}) + + if hasattr(element, 'getnext'): + sibling = element.getnext() + while getattr(sibling, 'tag', None) is ProcessingInstruction: + self._recursive_saxify(sibling, {}) + sibling = sibling.getnext() + + self._content_handler.endDocument() + + def _recursive_saxify(self, element, parent_nsmap): + content_handler = self._content_handler + tag = element.tag + if tag is Comment or tag is ProcessingInstruction: + if tag is ProcessingInstruction: + content_handler.processingInstruction( + element.target, element.text) + tail = element.tail + if tail: + content_handler.characters(tail) + return + + element_nsmap = element.nsmap + new_prefixes = [] + if element_nsmap != parent_nsmap: + # There have been updates to the namespace + for prefix, ns_uri in element_nsmap.items(): + if parent_nsmap.get(prefix) != ns_uri: + new_prefixes.append( (prefix, ns_uri) ) + + attribs = element.items() + if attribs: + attr_values = {} + attr_qnames = {} + for attr_ns_name, value in attribs: + attr_ns_tuple = _getNsTag(attr_ns_name) + attr_values[attr_ns_tuple] = value + attr_qnames[attr_ns_tuple] = self._build_qname( + attr_ns_tuple[0], attr_ns_tuple[1], element_nsmap, + preferred_prefix=None, is_attribute=True) + sax_attributes = self._attr_class(attr_values, attr_qnames) + else: + sax_attributes = self._empty_attributes + + ns_uri, local_name = _getNsTag(tag) + qname = self._build_qname( + ns_uri, local_name, element_nsmap, element.prefix, is_attribute=False) + + for prefix, uri in new_prefixes: + content_handler.startPrefixMapping(prefix, uri) + content_handler.startElementNS( + (ns_uri, local_name), qname, sax_attributes) + text = element.text + if text: + content_handler.characters(text) + for child in element: + self._recursive_saxify(child, element_nsmap) + content_handler.endElementNS((ns_uri, local_name), qname) + for prefix, uri in new_prefixes: + content_handler.endPrefixMapping(prefix) + tail = element.tail + if tail: + content_handler.characters(tail) + + def _build_qname(self, ns_uri, local_name, nsmap, preferred_prefix, is_attribute): + if ns_uri is None: + return local_name + + if not is_attribute and nsmap.get(preferred_prefix) == ns_uri: + prefix = preferred_prefix + else: + # Pick the first matching prefix, in alphabetical order. + candidates = [ + pfx for (pfx, uri) in nsmap.items() + if pfx is not None and uri == ns_uri + ] + prefix = ( + candidates[0] if len(candidates) == 1 + else min(candidates) if candidates + else None + ) + + if prefix is None: + # Default namespace + return local_name + return prefix + ':' + local_name + + +def saxify(element_or_tree, content_handler): + """One-shot helper to generate SAX events from an XML tree and fire + them against a SAX ContentHandler. + """ + return ElementTreeProducer(element_or_tree, content_handler).saxify() diff --git a/venv/lib/python3.10/site-packages/lxml/saxparser.pxi b/venv/lib/python3.10/site-packages/lxml/saxparser.pxi new file mode 100644 index 0000000000000000000000000000000000000000..dc03df9af112bf093b6027a52c1760b83ab1b5c6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/saxparser.pxi @@ -0,0 +1,875 @@ +# SAX-like interfaces + +class XMLSyntaxAssertionError(XMLSyntaxError, AssertionError): + """ + An XMLSyntaxError that additionally inherits from AssertionError for + ElementTree / backwards compatibility reasons. + + This class may get replaced by a plain XMLSyntaxError in a future version. + """ + def __init__(self, message): + XMLSyntaxError.__init__(self, message, None, 0, 1) + + +ctypedef enum _SaxParserEvents: + SAX_EVENT_START = 1 << 0 + SAX_EVENT_END = 1 << 1 + SAX_EVENT_DATA = 1 << 2 + SAX_EVENT_DOCTYPE = 1 << 3 + SAX_EVENT_PI = 1 << 4 + SAX_EVENT_COMMENT = 1 << 5 + SAX_EVENT_START_NS = 1 << 6 + SAX_EVENT_END_NS = 1 << 7 + +ctypedef enum _ParseEventFilter: + PARSE_EVENT_FILTER_START = 1 << 0 + PARSE_EVENT_FILTER_END = 1 << 1 + PARSE_EVENT_FILTER_START_NS = 1 << 2 + PARSE_EVENT_FILTER_END_NS = 1 << 3 + PARSE_EVENT_FILTER_COMMENT = 1 << 4 + PARSE_EVENT_FILTER_PI = 1 << 5 + + +cdef int _buildParseEventFilter(events) except -1: + cdef int event_filter = 0 + for event in events: + if event == 'start': + event_filter |= PARSE_EVENT_FILTER_START + elif event == 'end': + event_filter |= PARSE_EVENT_FILTER_END + elif event == 'start-ns': + event_filter |= PARSE_EVENT_FILTER_START_NS + elif event == 'end-ns': + event_filter |= PARSE_EVENT_FILTER_END_NS + elif event == 'comment': + event_filter |= PARSE_EVENT_FILTER_COMMENT + elif event == 'pi': + event_filter |= PARSE_EVENT_FILTER_PI + else: + raise ValueError, f"invalid event name '{event}'" + return event_filter + + +cdef class _SaxParserTarget: + cdef int _sax_event_filter + + cdef _handleSaxStart(self, tag, attrib, nsmap): + return None + cdef _handleSaxEnd(self, tag): + return None + cdef int _handleSaxData(self, data) except -1: + return 0 + cdef int _handleSaxDoctype(self, root_tag, public_id, system_id) except -1: + return 0 + cdef _handleSaxPi(self, target, data): + return None + cdef _handleSaxComment(self, comment): + return None + cdef _handleSaxStartNs(self, prefix, uri): + return None + cdef _handleSaxEndNs(self, prefix): + return None + + +#@cython.final +@cython.internal +@cython.no_gc_clear # Required because parent class uses it - Cython bug. +cdef class _SaxParserContext(_ParserContext): + """This class maps SAX2 events to parser target events. + """ + cdef _SaxParserTarget _target + cdef _BaseParser _parser + cdef xmlparser.startElementNsSAX2Func _origSaxStart + cdef xmlparser.endElementNsSAX2Func _origSaxEnd + cdef xmlparser.startElementSAXFunc _origSaxStartNoNs + cdef xmlparser.endElementSAXFunc _origSaxEndNoNs + cdef xmlparser.charactersSAXFunc _origSaxData + cdef xmlparser.cdataBlockSAXFunc _origSaxCData + cdef xmlparser.internalSubsetSAXFunc _origSaxDoctype + cdef xmlparser.commentSAXFunc _origSaxComment + cdef xmlparser.processingInstructionSAXFunc _origSaxPI + cdef xmlparser.startDocumentSAXFunc _origSaxStartDocument + + # for event collecting + cdef int _event_filter + cdef list _ns_stack + cdef list _node_stack + cdef _ParseEventsIterator events_iterator + + # for iterparse + cdef _Element _root + cdef _MultiTagMatcher _matcher + + def __cinit__(self, _BaseParser parser): + self._ns_stack = [] + self._node_stack = [] + self._parser = parser + self.events_iterator = _ParseEventsIterator() + + cdef void _setSaxParserTarget(self, _SaxParserTarget target) noexcept: + self._target = target + + cdef void _initParserContext(self, xmlparser.xmlParserCtxt* c_ctxt) noexcept: + _ParserContext._initParserContext(self, c_ctxt) + if self._target is not None: + self._connectTarget(c_ctxt) + elif self._event_filter: + self._connectEvents(c_ctxt) + + cdef void _connectTarget(self, xmlparser.xmlParserCtxt* c_ctxt) noexcept: + """Wrap original SAX2 callbacks to call into parser target. + """ + sax = c_ctxt.sax + self._origSaxStart = sax.startElementNs = NULL + self._origSaxStartNoNs = sax.startElement = NULL + if self._target._sax_event_filter & (SAX_EVENT_START | + SAX_EVENT_START_NS | + SAX_EVENT_END_NS): + # intercept => overwrite orig callback + # FIXME: also intercept on when collecting END events + if sax.initialized == xmlparser.XML_SAX2_MAGIC: + sax.startElementNs = _handleSaxTargetStart + if self._target._sax_event_filter & SAX_EVENT_START: + sax.startElement = _handleSaxTargetStartNoNs + + self._origSaxEnd = sax.endElementNs = NULL + self._origSaxEndNoNs = sax.endElement = NULL + if self._target._sax_event_filter & (SAX_EVENT_END | + SAX_EVENT_END_NS): + if sax.initialized == xmlparser.XML_SAX2_MAGIC: + sax.endElementNs = _handleSaxEnd + if self._target._sax_event_filter & SAX_EVENT_END: + sax.endElement = _handleSaxEndNoNs + + self._origSaxData = sax.characters = sax.cdataBlock = NULL + if self._target._sax_event_filter & SAX_EVENT_DATA: + sax.characters = sax.cdataBlock = _handleSaxData + + # doctype propagation is always required for entity replacement + self._origSaxDoctype = sax.internalSubset + if self._target._sax_event_filter & SAX_EVENT_DOCTYPE: + sax.internalSubset = _handleSaxTargetDoctype + + self._origSaxPI = sax.processingInstruction = NULL + if self._target._sax_event_filter & SAX_EVENT_PI: + sax.processingInstruction = _handleSaxTargetPI + + self._origSaxComment = sax.comment = NULL + if self._target._sax_event_filter & SAX_EVENT_COMMENT: + sax.comment = _handleSaxTargetComment + + # enforce entity replacement + sax.reference = NULL + c_ctxt.replaceEntities = 1 + + cdef void _connectEvents(self, xmlparser.xmlParserCtxt* c_ctxt) noexcept: + """Wrap original SAX2 callbacks to collect parse events without parser target. + """ + sax = c_ctxt.sax + self._origSaxStartDocument = sax.startDocument + sax.startDocument = _handleSaxStartDocument + + # only override "start" event handler if needed + self._origSaxStart = sax.startElementNs + if self._event_filter == 0 or c_ctxt.html or \ + self._event_filter & (PARSE_EVENT_FILTER_START | + PARSE_EVENT_FILTER_END | + PARSE_EVENT_FILTER_START_NS | + PARSE_EVENT_FILTER_END_NS): + sax.startElementNs = _handleSaxStart + + self._origSaxStartNoNs = sax.startElement + if self._event_filter == 0 or c_ctxt.html or \ + self._event_filter & (PARSE_EVENT_FILTER_START | + PARSE_EVENT_FILTER_END): + sax.startElement = _handleSaxStartNoNs + + # only override "end" event handler if needed + self._origSaxEnd = sax.endElementNs + if self._event_filter == 0 or \ + self._event_filter & (PARSE_EVENT_FILTER_END | + PARSE_EVENT_FILTER_END_NS): + sax.endElementNs = _handleSaxEnd + + self._origSaxEndNoNs = sax.endElement + if self._event_filter == 0 or \ + self._event_filter & PARSE_EVENT_FILTER_END: + sax.endElement = _handleSaxEndNoNs + + self._origSaxComment = sax.comment + if self._event_filter & PARSE_EVENT_FILTER_COMMENT: + sax.comment = _handleSaxComment + + self._origSaxPI = sax.processingInstruction + if self._event_filter & PARSE_EVENT_FILTER_PI: + sax.processingInstruction = _handleSaxPIEvent + + cdef _setEventFilter(self, events, tag): + self._event_filter = _buildParseEventFilter(events) + if not self._event_filter or tag is None or tag == '*': + self._matcher = None + else: + self._matcher = _MultiTagMatcher.__new__(_MultiTagMatcher, tag) + + cdef int startDocument(self, xmlDoc* c_doc) except -1: + try: + self._doc = _documentFactory(c_doc, self._parser) + finally: + self._parser = None # clear circular reference ASAP + if self._matcher is not None: + self._matcher.cacheTags(self._doc, True) # force entry in libxml2 dict + return 0 + + cdef int pushEvent(self, event, xmlNode* c_node) except -1: + cdef _Element root + if self._root is None: + root = self._doc.getroot() + if root is not None and root._c_node.type == tree.XML_ELEMENT_NODE: + self._root = root + node = _elementFactory(self._doc, c_node) + self.events_iterator._events.append( (event, node) ) + return 0 + + cdef int flushEvents(self) except -1: + events = self.events_iterator._events + while self._node_stack: + events.append( ('end', self._node_stack.pop()) ) + _pushSaxNsEndEvents(self) + while self._ns_stack: + _pushSaxNsEndEvents(self) + + cdef void _handleSaxException(self, xmlparser.xmlParserCtxt* c_ctxt) noexcept: + if c_ctxt.errNo == xmlerror.XML_ERR_OK: + c_ctxt.errNo = xmlerror.XML_ERR_INTERNAL_ERROR + # stop parsing immediately + c_ctxt.wellFormed = 0 + c_ctxt.disableSAX = 1 + c_ctxt.instate = xmlparser.XML_PARSER_EOF + self._store_raised() + + +@cython.final +@cython.internal +cdef class _ParseEventsIterator: + """A reusable parse events iterator""" + cdef list _events + cdef int _event_index + + def __cinit__(self): + self._events = [] + self._event_index = 0 + + def __iter__(self): + return self + + def __next__(self): + cdef int event_index = self._event_index + events = self._events + if event_index >= 2**10 or event_index * 2 >= len(events): + if event_index: + # clean up from time to time + del events[:event_index] + self._event_index = event_index = 0 + if event_index >= len(events): + raise StopIteration + item = events[event_index] + self._event_index = event_index + 1 + return item + + +cdef list _build_prefix_uri_list(_SaxParserContext context, int c_nb_namespaces, + const_xmlChar** c_namespaces): + "Build [(prefix, uri)] list of declared namespaces." + cdef int i + namespaces = [] + for i in xrange(c_nb_namespaces): + namespaces.append((funicodeOrEmpty(c_namespaces[0]), funicode(c_namespaces[1]))) + c_namespaces += 2 + return namespaces + + +cdef void _handleSaxStart( + void* ctxt, const_xmlChar* c_localname, const_xmlChar* c_prefix, + const_xmlChar* c_namespace, int c_nb_namespaces, + const_xmlChar** c_namespaces, + int c_nb_attributes, int c_nb_defaulted, + const_xmlChar** c_attributes) noexcept with gil: + cdef int i + cdef size_t c_len + c_ctxt = ctxt + if c_ctxt._private is NULL or c_ctxt.disableSAX: + return + context = <_SaxParserContext>c_ctxt._private + cdef int event_filter = context._event_filter + try: + if (c_nb_namespaces and + event_filter & (PARSE_EVENT_FILTER_START_NS | + PARSE_EVENT_FILTER_END_NS)): + declared_namespaces = _build_prefix_uri_list( + context, c_nb_namespaces, c_namespaces) + if event_filter & PARSE_EVENT_FILTER_START_NS: + for prefix_uri_tuple in declared_namespaces: + context.events_iterator._events.append(("start-ns", prefix_uri_tuple)) + else: + declared_namespaces = None + + context._origSaxStart(c_ctxt, c_localname, c_prefix, c_namespace, + c_nb_namespaces, c_namespaces, c_nb_attributes, + c_nb_defaulted, c_attributes) + if c_ctxt.html: + _fixHtmlDictNodeNames(c_ctxt.dict, c_ctxt.node) + # The HTML parser in libxml2 reports the missing opening tags when it finds + # misplaced ones, but with tag names from C string constants that ignore the + # parser dict. Thus, we need to intern the name ourselves. + c_localname = tree.xmlDictLookup(c_ctxt.dict, c_localname, -1) + if c_localname is NULL: + raise MemoryError() + + if event_filter & PARSE_EVENT_FILTER_END_NS: + context._ns_stack.append(declared_namespaces) + if event_filter & (PARSE_EVENT_FILTER_END | + PARSE_EVENT_FILTER_START): + _pushSaxStartEvent(context, c_ctxt, c_namespace, c_localname, None) + except: + context._handleSaxException(c_ctxt) + finally: + return # swallow any further exceptions + + +cdef void _handleSaxTargetStart( + void* ctxt, const_xmlChar* c_localname, const_xmlChar* c_prefix, + const_xmlChar* c_namespace, int c_nb_namespaces, + const_xmlChar** c_namespaces, + int c_nb_attributes, int c_nb_defaulted, + const_xmlChar** c_attributes) noexcept with gil: + cdef int i + cdef size_t c_len + c_ctxt = ctxt + if c_ctxt._private is NULL or c_ctxt.disableSAX: + return + context = <_SaxParserContext>c_ctxt._private + + cdef int event_filter = context._event_filter + cdef int sax_event_filter = context._target._sax_event_filter + try: + if c_nb_namespaces: + declared_namespaces = _build_prefix_uri_list( + context, c_nb_namespaces, c_namespaces) + + if event_filter & PARSE_EVENT_FILTER_START_NS: + for prefix_uri_tuple in declared_namespaces: + context.events_iterator._events.append(("start-ns", prefix_uri_tuple)) + + if sax_event_filter & SAX_EVENT_START_NS: + for prefix, uri in declared_namespaces: + context._target._handleSaxStartNs(prefix, uri) + else: + declared_namespaces = None + + if sax_event_filter & SAX_EVENT_START: + if c_nb_defaulted > 0: + # only add default attributes if we asked for them + if c_ctxt.loadsubset & xmlparser.XML_COMPLETE_ATTRS == 0: + c_nb_attributes -= c_nb_defaulted + if c_nb_attributes == 0: + attrib = IMMUTABLE_EMPTY_MAPPING + else: + attrib = {} + for i in xrange(c_nb_attributes): + name = _namespacedNameFromNsName( + c_attributes[2], c_attributes[0]) + if c_attributes[3] is NULL: + value = '' + else: + c_len = c_attributes[4] - c_attributes[3] + value = c_attributes[3][:c_len].decode('utf8') + attrib[name] = value + c_attributes += 5 + + nsmap = dict(declared_namespaces) if c_nb_namespaces else IMMUTABLE_EMPTY_MAPPING + + element = _callTargetSaxStart( + context, c_ctxt, + _namespacedNameFromNsName(c_namespace, c_localname), + attrib, nsmap) + else: + element = None + + if (event_filter & PARSE_EVENT_FILTER_END_NS or + sax_event_filter & SAX_EVENT_END_NS): + context._ns_stack.append(declared_namespaces) + if event_filter & (PARSE_EVENT_FILTER_END | + PARSE_EVENT_FILTER_START): + _pushSaxStartEvent(context, c_ctxt, c_namespace, + c_localname, element) + except: + context._handleSaxException(c_ctxt) + finally: + return # swallow any further exceptions + + +cdef void _handleSaxStartNoNs(void* ctxt, const_xmlChar* c_name, + const_xmlChar** c_attributes) noexcept with gil: + c_ctxt = ctxt + if c_ctxt._private is NULL or c_ctxt.disableSAX: + return + context = <_SaxParserContext>c_ctxt._private + try: + context._origSaxStartNoNs(c_ctxt, c_name, c_attributes) + if c_ctxt.html: + _fixHtmlDictNodeNames(c_ctxt.dict, c_ctxt.node) + # The HTML parser in libxml2 reports the missing opening tags when it finds + # misplaced ones, but with tag names from C string constants that ignore the + # parser dict. Thus, we need to intern the name ourselves. + c_name = tree.xmlDictLookup(c_ctxt.dict, c_name, -1) + if c_name is NULL: + raise MemoryError() + if context._event_filter & (PARSE_EVENT_FILTER_END | + PARSE_EVENT_FILTER_START): + _pushSaxStartEvent(context, c_ctxt, NULL, c_name, None) + except: + context._handleSaxException(c_ctxt) + finally: + return # swallow any further exceptions + + +cdef void _handleSaxTargetStartNoNs(void* ctxt, const_xmlChar* c_name, + const_xmlChar** c_attributes) noexcept with gil: + c_ctxt = ctxt + if c_ctxt._private is NULL or c_ctxt.disableSAX: + return + context = <_SaxParserContext>c_ctxt._private + try: + if c_attributes is NULL: + attrib = IMMUTABLE_EMPTY_MAPPING + else: + attrib = {} + while c_attributes[0] is not NULL: + name = funicode(c_attributes[0]) + attrib[name] = funicodeOrEmpty(c_attributes[1]) + c_attributes += 2 + element = _callTargetSaxStart( + context, c_ctxt, funicode(c_name), + attrib, IMMUTABLE_EMPTY_MAPPING) + if context._event_filter & (PARSE_EVENT_FILTER_END | + PARSE_EVENT_FILTER_START): + _pushSaxStartEvent(context, c_ctxt, NULL, c_name, element) + except: + context._handleSaxException(c_ctxt) + finally: + return # swallow any further exceptions + + +cdef _callTargetSaxStart(_SaxParserContext context, + xmlparser.xmlParserCtxt* c_ctxt, + tag, attrib, nsmap): + element = context._target._handleSaxStart(tag, attrib, nsmap) + if element is not None and c_ctxt.input is not NULL: + if isinstance(element, _Element): + (<_Element>element)._c_node.line = ( + c_ctxt.input.line + if c_ctxt.input.line < 65535 else 65535) + return element + + +cdef int _pushSaxStartEvent(_SaxParserContext context, + xmlparser.xmlParserCtxt* c_ctxt, + const_xmlChar* c_href, + const_xmlChar* c_name, node) except -1: + if (context._matcher is None or + context._matcher.matchesNsTag(c_href, c_name)): + if node is None and context._target is None: + assert context._doc is not None + node = _elementFactory(context._doc, c_ctxt.node) + if context._event_filter & PARSE_EVENT_FILTER_START: + context.events_iterator._events.append(('start', node)) + if (context._target is None and + context._event_filter & PARSE_EVENT_FILTER_END): + context._node_stack.append(node) + return 0 + + +cdef void _handleSaxEnd(void* ctxt, const_xmlChar* c_localname, + const_xmlChar* c_prefix, + const_xmlChar* c_namespace) noexcept with gil: + c_ctxt = ctxt + if c_ctxt._private is NULL or c_ctxt.disableSAX: + return + context = <_SaxParserContext>c_ctxt._private + try: + if context._target is not None: + if context._target._sax_event_filter & SAX_EVENT_END: + node = context._target._handleSaxEnd( + _namespacedNameFromNsName(c_namespace, c_localname)) + else: + node = None + else: + context._origSaxEnd(c_ctxt, c_localname, c_prefix, c_namespace) + node = None + _pushSaxEndEvent(context, c_namespace, c_localname, node) + _pushSaxNsEndEvents(context) + except: + context._handleSaxException(c_ctxt) + finally: + return # swallow any further exceptions + + +cdef void _handleSaxEndNoNs(void* ctxt, const_xmlChar* c_name) noexcept with gil: + c_ctxt = ctxt + if c_ctxt._private is NULL or c_ctxt.disableSAX: + return + context = <_SaxParserContext>c_ctxt._private + try: + if context._target is not None: + node = context._target._handleSaxEnd(funicode(c_name)) + else: + context._origSaxEndNoNs(c_ctxt, c_name) + node = None + _pushSaxEndEvent(context, NULL, c_name, node) + except: + context._handleSaxException(c_ctxt) + finally: + return # swallow any further exceptions + + +cdef int _pushSaxNsEndEvents(_SaxParserContext context) except -1: + cdef bint build_events = context._event_filter & PARSE_EVENT_FILTER_END_NS + cdef bint call_target = ( + context._target is not None + and context._target._sax_event_filter & SAX_EVENT_END_NS) + if not build_events and not call_target: + return 0 + + cdef list declared_namespaces = context._ns_stack.pop() + if declared_namespaces is None: + return 0 + + cdef tuple prefix_uri + for prefix_uri in reversed(declared_namespaces): + if call_target: + context._target._handleSaxEndNs(prefix_uri[0]) + if build_events: + context.events_iterator._events.append(('end-ns', None)) + + return 0 + + +cdef int _pushSaxEndEvent(_SaxParserContext context, + const_xmlChar* c_href, + const_xmlChar* c_name, node) except -1: + if context._event_filter & PARSE_EVENT_FILTER_END: + if (context._matcher is None or + context._matcher.matchesNsTag(c_href, c_name)): + if context._target is None: + node = context._node_stack.pop() + context.events_iterator._events.append(('end', node)) + return 0 + + +cdef void _handleSaxData(void* ctxt, const_xmlChar* c_data, int data_len) noexcept with gil: + # can only be called if parsing with a target + c_ctxt = ctxt + if c_ctxt._private is NULL or c_ctxt.disableSAX: + return + context = <_SaxParserContext>c_ctxt._private + try: + context._target._handleSaxData( + c_data[:data_len].decode('utf8')) + except: + context._handleSaxException(c_ctxt) + finally: + return # swallow any further exceptions + + +cdef void _handleSaxTargetDoctype(void* ctxt, const_xmlChar* c_name, + const_xmlChar* c_public, + const_xmlChar* c_system) noexcept with gil: + # can only be called if parsing with a target + c_ctxt = ctxt + if c_ctxt._private is NULL or c_ctxt.disableSAX: + return + context = <_SaxParserContext>c_ctxt._private + try: + context._target._handleSaxDoctype( + funicodeOrNone(c_name), + funicodeOrNone(c_public), + funicodeOrNone(c_system)) + except: + context._handleSaxException(c_ctxt) + finally: + return # swallow any further exceptions + + +cdef void _handleSaxStartDocument(void* ctxt) noexcept with gil: + c_ctxt = ctxt + if c_ctxt._private is NULL or c_ctxt.disableSAX: + return + context = <_SaxParserContext>c_ctxt._private + context._origSaxStartDocument(ctxt) + c_doc = c_ctxt.myDoc + try: + context.startDocument(c_doc) + except: + context._handleSaxException(c_ctxt) + finally: + return # swallow any further exceptions + + +cdef void _handleSaxTargetPI(void* ctxt, const_xmlChar* c_target, + const_xmlChar* c_data) noexcept with gil: + # can only be called if parsing with a target + c_ctxt = ctxt + if c_ctxt._private is NULL or c_ctxt.disableSAX: + return + context = <_SaxParserContext>c_ctxt._private + try: + pi = context._target._handleSaxPi( + funicodeOrNone(c_target), + funicodeOrEmpty(c_data)) + if context._event_filter & PARSE_EVENT_FILTER_PI: + context.events_iterator._events.append(('pi', pi)) + except: + context._handleSaxException(c_ctxt) + finally: + return # swallow any further exceptions + + +cdef void _handleSaxPIEvent(void* ctxt, const_xmlChar* target, + const_xmlChar* data) noexcept with gil: + # can only be called when collecting pi events + c_ctxt = ctxt + if c_ctxt._private is NULL or c_ctxt.disableSAX: + return + context = <_SaxParserContext>c_ctxt._private + context._origSaxPI(ctxt, target, data) + c_node = _findLastEventNode(c_ctxt) + if c_node is NULL: + return + try: + context.pushEvent('pi', c_node) + except: + context._handleSaxException(c_ctxt) + finally: + return # swallow any further exceptions + + +cdef void _handleSaxTargetComment(void* ctxt, const_xmlChar* c_data) noexcept with gil: + # can only be called if parsing with a target + c_ctxt = ctxt + if c_ctxt._private is NULL or c_ctxt.disableSAX: + return + context = <_SaxParserContext>c_ctxt._private + try: + comment = context._target._handleSaxComment(funicodeOrEmpty(c_data)) + if context._event_filter & PARSE_EVENT_FILTER_COMMENT: + context.events_iterator._events.append(('comment', comment)) + except: + context._handleSaxException(c_ctxt) + finally: + return # swallow any further exceptions + + +cdef void _handleSaxComment(void* ctxt, const_xmlChar* text) noexcept with gil: + # can only be called when collecting comment events + c_ctxt = ctxt + if c_ctxt._private is NULL or c_ctxt.disableSAX: + return + context = <_SaxParserContext>c_ctxt._private + context._origSaxComment(ctxt, text) + c_node = _findLastEventNode(c_ctxt) + if c_node is NULL: + return + try: + context.pushEvent('comment', c_node) + except: + context._handleSaxException(c_ctxt) + finally: + return # swallow any further exceptions + + +cdef inline xmlNode* _findLastEventNode(xmlparser.xmlParserCtxt* c_ctxt): + # this mimics what libxml2 creates for comments/PIs + if c_ctxt.inSubset == 1: + return c_ctxt.myDoc.intSubset.last + elif c_ctxt.inSubset == 2: + return c_ctxt.myDoc.extSubset.last + elif c_ctxt.node is NULL: + return c_ctxt.myDoc.last + elif c_ctxt.node.type == tree.XML_ELEMENT_NODE: + return c_ctxt.node.last + else: + return c_ctxt.node.next + + +############################################################ +## ET compatible XML tree builder +############################################################ + +cdef class TreeBuilder(_SaxParserTarget): + """TreeBuilder(self, element_factory=None, parser=None, + comment_factory=None, pi_factory=None, + insert_comments=True, insert_pis=True) + + Parser target that builds a tree from parse event callbacks. + + The factory arguments can be used to influence the creation of + elements, comments and processing instructions. + + By default, comments and processing instructions are inserted into + the tree, but they can be ignored by passing the respective flags. + + The final tree is returned by the ``close()`` method. + """ + cdef _BaseParser _parser + cdef object _factory + cdef object _comment_factory + cdef object _pi_factory + cdef list _data + cdef list _element_stack + cdef object _element_stack_pop + cdef _Element _last # may be None + cdef bint _in_tail + cdef bint _insert_comments + cdef bint _insert_pis + + def __init__(self, *, element_factory=None, parser=None, + comment_factory=None, pi_factory=None, + bint insert_comments=True, bint insert_pis=True): + self._sax_event_filter = \ + SAX_EVENT_START | SAX_EVENT_END | SAX_EVENT_DATA | \ + SAX_EVENT_PI | SAX_EVENT_COMMENT + self._data = [] # data collector + self._element_stack = [] # element stack + self._element_stack_pop = self._element_stack.pop + self._last = None # last element + self._in_tail = 0 # true if we're after an end tag + self._factory = element_factory + self._comment_factory = comment_factory if comment_factory is not None else Comment + self._pi_factory = pi_factory if pi_factory is not None else ProcessingInstruction + self._insert_comments = insert_comments + self._insert_pis = insert_pis + self._parser = parser + + @cython.final + cdef int _flush(self) except -1: + if self._data: + if self._last is not None: + text = "".join(self._data) + if self._in_tail: + assert self._last.tail is None, "internal error (tail)" + self._last.tail = text + else: + assert self._last.text is None, "internal error (text)" + self._last.text = text + del self._data[:] + return 0 + + # internal SAX event handlers + + @cython.final + cdef _handleSaxStart(self, tag, attrib, nsmap): + self._flush() + if self._factory is not None: + self._last = self._factory(tag, attrib) + if self._element_stack: + _appendChild(self._element_stack[-1], self._last) + elif self._element_stack: + self._last = _makeSubElement( + self._element_stack[-1], tag, None, None, attrib, nsmap, None) + else: + self._last = _makeElement( + tag, NULL, None, self._parser, None, None, attrib, nsmap, None) + self._element_stack.append(self._last) + self._in_tail = 0 + return self._last + + @cython.final + cdef _handleSaxEnd(self, tag): + self._flush() + self._last = self._element_stack_pop() + self._in_tail = 1 + return self._last + + @cython.final + cdef int _handleSaxData(self, data) except -1: + self._data.append(data) + + @cython.final + cdef _handleSaxPi(self, target, data): + elem = self._pi_factory(target, data) + if self._insert_pis: + self._flush() + self._last = elem + if self._element_stack: + _appendChild(self._element_stack[-1], self._last) + self._in_tail = 1 + return self._last + + @cython.final + cdef _handleSaxComment(self, comment): + elem = self._comment_factory(comment) + if self._insert_comments: + self._flush() + self._last = elem + if self._element_stack: + _appendChild(self._element_stack[-1], self._last) + self._in_tail = 1 + return elem + + # Python level event handlers + + def close(self): + """close(self) + + Flushes the builder buffers, and returns the toplevel document + element. Raises XMLSyntaxError on inconsistencies. + """ + if self._element_stack: + raise XMLSyntaxAssertionError("missing end tags") + # TODO: this does not necessarily seem like an error case. Why not just return None? + if self._last is None: + raise XMLSyntaxAssertionError("missing toplevel element") + return self._last + + def data(self, data): + """data(self, data) + + Adds text to the current element. The value should be either an + 8-bit string containing ASCII text, or a Unicode string. + """ + self._handleSaxData(data) + + def start(self, tag, attrs, nsmap=None): + """start(self, tag, attrs, nsmap=None) + + Opens a new element. + """ + if nsmap is None: + nsmap = IMMUTABLE_EMPTY_MAPPING + return self._handleSaxStart(tag, attrs, nsmap) + + def end(self, tag): + """end(self, tag) + + Closes the current element. + """ + element = self._handleSaxEnd(tag) + assert self._last.tag == tag,\ + f"end tag mismatch (expected {self._last.tag}, got {tag})" + return element + + def pi(self, target, data=None): + """pi(self, target, data=None) + + Creates a processing instruction using the factory, appends it + (unless disabled) and returns it. + """ + return self._handleSaxPi(target, data) + + def comment(self, comment): + """comment(self, comment) + + Creates a comment using the factory, appends it (unless disabled) + and returns it. + """ + return self._handleSaxComment(comment) diff --git a/venv/lib/python3.10/site-packages/lxml/schematron.pxi b/venv/lib/python3.10/site-packages/lxml/schematron.pxi new file mode 100644 index 0000000000000000000000000000000000000000..ea0881fdf846b0c05f974292b30a4f7307e6848b --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/schematron.pxi @@ -0,0 +1,168 @@ +# support for Schematron validation +from lxml.includes cimport schematron + + +cdef class SchematronError(LxmlError): + """Base class of all Schematron errors. + """ + +cdef class SchematronParseError(SchematronError): + """Error while parsing an XML document as Schematron schema. + """ + +cdef class SchematronValidateError(SchematronError): + """Error while validating an XML document with a Schematron schema. + """ + + +################################################################################ +# Schematron + +cdef class Schematron(_Validator): + """Schematron(self, etree=None, file=None) + A Schematron validator. + + Pass a root Element or an ElementTree to turn it into a validator. + Alternatively, pass a filename as keyword argument 'file' to parse from + the file system. + + Schematron is a less well known, but very powerful schema language. The main + idea is to use the capabilities of XPath to put restrictions on the structure + and the content of XML documents. Here is a simple example:: + + >>> schematron = Schematron(XML(''' + ... + ... + ... + ... Attribute + ... is forbidden + ... + ... + ... + ... + ... ''')) + + >>> xml = XML(''' + ... + ... + ... + ... + ... ''') + + >>> schematron.validate(xml) + 0 + + >>> xml = XML(''' + ... + ... + ... + ... + ... ''') + + >>> schematron.validate(xml) + 1 + + Schematron was added to libxml2 in version 2.6.21. Before version 2.6.32, + however, Schematron lacked support for error reporting other than to stderr. + This version is therefore required to retrieve validation warnings and + errors in lxml. + """ + cdef schematron.xmlSchematron* _c_schema + cdef xmlDoc* _c_schema_doc + def __cinit__(self): + self._c_schema = NULL + self._c_schema_doc = NULL + + def __init__(self, etree=None, *, file=None): + cdef _Document doc + cdef _Element root_node + cdef xmlNode* c_node + cdef char* c_href + cdef schematron.xmlSchematronParserCtxt* parser_ctxt = NULL + _Validator.__init__(self) + if not config.ENABLE_SCHEMATRON: + raise SchematronError, \ + "lxml.etree was compiled without Schematron support." + if etree is not None: + doc = _documentOrRaise(etree) + root_node = _rootNodeOrRaise(etree) + self._c_schema_doc = _copyDocRoot(doc._c_doc, root_node._c_node) + parser_ctxt = schematron.xmlSchematronNewDocParserCtxt(self._c_schema_doc) + elif file is not None: + filename = _getFilenameForFile(file) + if filename is None: + # XXX assume a string object + filename = file + filename = _encodeFilename(filename) + with self._error_log: + orig_loader = _register_document_loader() + parser_ctxt = schematron.xmlSchematronNewParserCtxt(_cstr(filename)) + _reset_document_loader(orig_loader) + else: + raise SchematronParseError, "No tree or file given" + + if parser_ctxt is NULL: + if self._c_schema_doc is not NULL: + tree.xmlFreeDoc(self._c_schema_doc) + self._c_schema_doc = NULL + raise MemoryError() + + try: + with self._error_log: + orig_loader = _register_document_loader() + self._c_schema = schematron.xmlSchematronParse(parser_ctxt) + _reset_document_loader(orig_loader) + finally: + schematron.xmlSchematronFreeParserCtxt(parser_ctxt) + + if self._c_schema is NULL: + raise SchematronParseError( + "Document is not a valid Schematron schema", + self._error_log) + + def __dealloc__(self): + schematron.xmlSchematronFree(self._c_schema) + if self._c_schema_doc is not NULL: + tree.xmlFreeDoc(self._c_schema_doc) + + def __call__(self, etree): + """__call__(self, etree) + + Validate doc using Schematron. + + Returns true if document is valid, false if not.""" + cdef _Document doc + cdef _Element root_node + cdef xmlDoc* c_doc + cdef schematron.xmlSchematronValidCtxt* valid_ctxt + cdef int ret + + assert self._c_schema is not NULL, "Schematron instance not initialised" + doc = _documentOrRaise(etree) + root_node = _rootNodeOrRaise(etree) + + valid_ctxt = schematron.xmlSchematronNewValidCtxt( + self._c_schema, schematron.XML_SCHEMATRON_OUT_ERROR) + if valid_ctxt is NULL: + raise MemoryError() + + try: + self._error_log.clear() + # Need a cast here because older libxml2 releases do not use 'const' in the functype. + schematron.xmlSchematronSetValidStructuredErrors( + valid_ctxt, _receiveError, self._error_log) + c_doc = _fakeRootDoc(doc._c_doc, root_node._c_node) + with nogil: + ret = schematron.xmlSchematronValidateDoc(valid_ctxt, c_doc) + _destroyFakeDoc(doc._c_doc, c_doc) + finally: + schematron.xmlSchematronFreeValidCtxt(valid_ctxt) + + if ret == -1: + raise SchematronValidateError( + "Internal error in Schematron validation", + self._error_log) + if ret == 0: + return True + else: + return False diff --git a/venv/lib/python3.10/site-packages/lxml/serializer.pxi b/venv/lib/python3.10/site-packages/lxml/serializer.pxi new file mode 100644 index 0000000000000000000000000000000000000000..0a7a1e43649912603e98105e4e248b9eba3af6da --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/serializer.pxi @@ -0,0 +1,1871 @@ +# XML serialization and output functions + +cdef object GzipFile +from gzip import GzipFile + + +cdef class SerialisationError(LxmlError): + """A libxml2 error that occurred during serialisation. + """ + + +cdef enum _OutputMethods: + OUTPUT_METHOD_XML + OUTPUT_METHOD_HTML + OUTPUT_METHOD_TEXT + + +cdef int _findOutputMethod(method) except -1: + if method is None: + return OUTPUT_METHOD_XML + method = method.lower() + if method == "xml": + return OUTPUT_METHOD_XML + if method == "html": + return OUTPUT_METHOD_HTML + if method == "text": + return OUTPUT_METHOD_TEXT + raise ValueError(f"unknown output method {method!r}") + + +cdef _textToString(xmlNode* c_node, encoding, bint with_tail): + cdef bint needs_conversion + cdef const_xmlChar* c_text + cdef xmlNode* c_text_node + cdef tree.xmlBuffer* c_buffer + cdef int error_result + + c_buffer = tree.xmlBufferCreate() + if c_buffer is NULL: + raise MemoryError() + + with nogil: + error_result = tree.xmlNodeBufGetContent(c_buffer, c_node) + if with_tail: + c_text_node = _textNodeOrSkip(c_node.next) + while c_text_node is not NULL: + tree.xmlBufferWriteChar(c_buffer, c_text_node.content) + c_text_node = _textNodeOrSkip(c_text_node.next) + c_text = tree.xmlBufferContent(c_buffer) + + if error_result < 0 or c_text is NULL: + tree.xmlBufferFree(c_buffer) + raise SerialisationError, "Error during serialisation (out of memory?)" + + try: + needs_conversion = 0 + if encoding is unicode: + needs_conversion = 1 + elif encoding is not None: + # Python prefers lower case encoding names + encoding = encoding.lower() + if encoding not in ('utf8', 'utf-8'): + if encoding == 'ascii': + if isutf8l(c_text, tree.xmlBufferLength(c_buffer)): + # will raise a decode error below + needs_conversion = 1 + else: + needs_conversion = 1 + + if needs_conversion: + text = (c_text)[:tree.xmlBufferLength(c_buffer)].decode('utf8') + if encoding is not unicode: + encoding = _utf8(encoding) + text = python.PyUnicode_AsEncodedString( + text, encoding, 'strict') + else: + text = (c_text)[:tree.xmlBufferLength(c_buffer)] + finally: + tree.xmlBufferFree(c_buffer) + return text + + +cdef _tostring(_Element element, encoding, doctype, method, + bint write_xml_declaration, bint write_complete_document, + bint pretty_print, bint with_tail, int standalone): + """Serialize an element to an encoded string representation of its XML + tree. + """ + cdef tree.xmlOutputBuffer* c_buffer + cdef tree.xmlBuf* c_result_buffer + cdef tree.xmlCharEncodingHandler* enchandler + cdef const_char* c_enc + cdef const_xmlChar* c_version + cdef const_xmlChar* c_doctype + cdef int c_method + cdef int error_result + if element is None: + return None + _assertValidNode(element) + c_method = _findOutputMethod(method) + if c_method == OUTPUT_METHOD_TEXT: + return _textToString(element._c_node, encoding, with_tail) + if encoding is None or encoding is unicode: + c_enc = NULL + else: + encoding = _utf8(encoding) + c_enc = _cstr(encoding) + if doctype is None: + c_doctype = NULL + else: + doctype = _utf8(doctype) + c_doctype = _xcstr(doctype) + # it is necessary to *and* find the encoding handler *and* use + # encoding during output + enchandler = tree.xmlFindCharEncodingHandler(c_enc) + if enchandler is NULL and c_enc is not NULL: + if encoding is not None: + encoding = encoding.decode('UTF-8') + raise LookupError, f"unknown encoding: '{encoding}'" + c_buffer = tree.xmlAllocOutputBuffer(enchandler) + if c_buffer is NULL: + tree.xmlCharEncCloseFunc(enchandler) + raise MemoryError() + + with nogil: + _writeNodeToBuffer(c_buffer, element._c_node, c_enc, c_doctype, c_method, + write_xml_declaration, write_complete_document, + pretty_print, with_tail, standalone) + tree.xmlOutputBufferFlush(c_buffer) + if c_buffer.conv is not NULL: + c_result_buffer = c_buffer.conv + else: + c_result_buffer = c_buffer.buffer + + error_result = c_buffer.error + if error_result != xmlerror.XML_ERR_OK: + tree.xmlOutputBufferClose(c_buffer) + _raiseSerialisationError(error_result) + + try: + if encoding is unicode: + result = (tree.xmlBufContent( + c_result_buffer))[:tree.xmlBufUse(c_result_buffer)].decode('UTF-8') + else: + result = (tree.xmlBufContent( + c_result_buffer))[:tree.xmlBufUse(c_result_buffer)] + finally: + error_result = tree.xmlOutputBufferClose(c_buffer) + if error_result == -1: + _raiseSerialisationError(error_result) + return result + +cdef bytes _tostringC14N(element_or_tree, bint exclusive, bint with_comments, inclusive_ns_prefixes): + cdef xmlDoc* c_doc + cdef xmlChar* c_buffer = NULL + cdef int byte_count = -1 + cdef bytes result + cdef _Document doc + cdef _Element element + cdef xmlChar **c_inclusive_ns_prefixes + + if isinstance(element_or_tree, _Element): + _assertValidNode(<_Element>element_or_tree) + doc = (<_Element>element_or_tree)._doc + c_doc = _plainFakeRootDoc(doc._c_doc, (<_Element>element_or_tree)._c_node, 0) + else: + doc = _documentOrRaise(element_or_tree) + _assertValidDoc(doc) + c_doc = doc._c_doc + + c_inclusive_ns_prefixes = _convert_ns_prefixes(c_doc.dict, inclusive_ns_prefixes) if inclusive_ns_prefixes else NULL + try: + with nogil: + byte_count = c14n.xmlC14NDocDumpMemory( + c_doc, NULL, exclusive, c_inclusive_ns_prefixes, with_comments, &c_buffer) + + finally: + _destroyFakeDoc(doc._c_doc, c_doc) + if c_inclusive_ns_prefixes is not NULL: + python.lxml_free(c_inclusive_ns_prefixes) + + if byte_count < 0 or c_buffer is NULL: + if c_buffer is not NULL: + tree.xmlFree(c_buffer) + raise C14NError, "C14N failed" + try: + result = c_buffer[:byte_count] + finally: + tree.xmlFree(c_buffer) + return result + +cdef _raiseSerialisationError(int error_result): + if error_result == xmlerror.XML_ERR_NO_MEMORY: + raise MemoryError() + message = ErrorTypes._getName(error_result) + if message is None: + message = f"unknown error {error_result}" + raise SerialisationError, message + +############################################################ +# low-level serialisation functions + +cdef void _writeDoctype(tree.xmlOutputBuffer* c_buffer, + const_xmlChar* c_doctype) noexcept nogil: + tree.xmlOutputBufferWrite(c_buffer, tree.xmlStrlen(c_doctype), + c_doctype) + tree.xmlOutputBufferWriteString(c_buffer, "\n") + +cdef void _writeNodeToBuffer(tree.xmlOutputBuffer* c_buffer, + xmlNode* c_node, const_char* encoding, const_xmlChar* c_doctype, + int c_method, bint write_xml_declaration, + bint write_complete_document, + bint pretty_print, bint with_tail, + int standalone) noexcept nogil: + cdef xmlNode* c_nsdecl_node + cdef xmlDoc* c_doc = c_node.doc + if write_xml_declaration and c_method == OUTPUT_METHOD_XML: + _writeDeclarationToBuffer(c_buffer, c_doc.version, encoding, standalone) + + # comments/processing instructions before doctype declaration + if write_complete_document and not c_buffer.error and c_doc.intSubset: + _writePrevSiblings(c_buffer, c_doc.intSubset, encoding, pretty_print) + + if c_doctype: + _writeDoctype(c_buffer, c_doctype) + # write internal DTD subset, preceding PIs/comments, etc. + if write_complete_document and not c_buffer.error: + if c_doctype is NULL: + _writeDtdToBuffer(c_buffer, c_doc, c_node.name, c_method, encoding) + _writePrevSiblings(c_buffer, c_node, encoding, pretty_print) + + c_nsdecl_node = c_node + if not c_node.parent or c_node.parent.type != tree.XML_DOCUMENT_NODE: + # copy the node and add namespaces from parents + # this is required to make libxml write them + c_nsdecl_node = tree.xmlCopyNode(c_node, 2) + if not c_nsdecl_node: + c_buffer.error = xmlerror.XML_ERR_NO_MEMORY + return + _copyParentNamespaces(c_node, c_nsdecl_node) + + c_nsdecl_node.parent = c_node.parent + c_nsdecl_node.children = c_node.children + c_nsdecl_node.last = c_node.last + + # write node + if c_method == OUTPUT_METHOD_HTML: + tree.htmlNodeDumpFormatOutput( + c_buffer, c_doc, c_nsdecl_node, encoding, pretty_print) + else: + tree.xmlNodeDumpOutput( + c_buffer, c_doc, c_nsdecl_node, 0, pretty_print, encoding) + + if c_nsdecl_node is not c_node: + # clean up + c_nsdecl_node.children = c_nsdecl_node.last = NULL + tree.xmlFreeNode(c_nsdecl_node) + + if c_buffer.error: + return + + # write tail, trailing comments, etc. + if with_tail: + _writeTail(c_buffer, c_node, encoding, c_method, pretty_print) + if write_complete_document: + _writeNextSiblings(c_buffer, c_node, encoding, pretty_print) + if pretty_print: + tree.xmlOutputBufferWrite(c_buffer, 1, "\n") + +cdef void _writeDeclarationToBuffer(tree.xmlOutputBuffer* c_buffer, + const_xmlChar* version, const_char* encoding, + int standalone) noexcept nogil: + if version is NULL: + version = "1.0" + tree.xmlOutputBufferWrite(c_buffer, 15, "version) + tree.xmlOutputBufferWrite(c_buffer, 12, "' encoding='") + tree.xmlOutputBufferWriteString(c_buffer, encoding) + if standalone == 0: + tree.xmlOutputBufferWrite(c_buffer, 20, "' standalone='no'?>\n") + elif standalone == 1: + tree.xmlOutputBufferWrite(c_buffer, 21, "' standalone='yes'?>\n") + else: + tree.xmlOutputBufferWrite(c_buffer, 4, "'?>\n") + +cdef void _writeDtdToBuffer(tree.xmlOutputBuffer* c_buffer, + xmlDoc* c_doc, const_xmlChar* c_root_name, + int c_method, const_char* encoding) noexcept nogil: + cdef tree.xmlDtd* c_dtd + cdef xmlNode* c_node + cdef char* quotechar + c_dtd = c_doc.intSubset + if not c_dtd or not c_dtd.name: + return + + # Name in document type declaration must match the root element tag. + # For XML, case sensitive match, for HTML insensitive. + if c_method == OUTPUT_METHOD_HTML: + if tree.xmlStrcasecmp(c_root_name, c_dtd.name) != 0: + return + else: + if tree.xmlStrcmp(c_root_name, c_dtd.name) != 0: + return + + tree.xmlOutputBufferWrite(c_buffer, 10, "c_dtd.name) + + cdef const_xmlChar* public_id = c_dtd.ExternalID + cdef const_xmlChar* sys_url = c_dtd.SystemID + if public_id and public_id[0] == b'\0': + public_id = NULL + if sys_url and sys_url[0] == b'\0': + sys_url = NULL + + if public_id: + tree.xmlOutputBufferWrite(c_buffer, 9, ' PUBLIC "') + tree.xmlOutputBufferWriteString(c_buffer, public_id) + if sys_url: + tree.xmlOutputBufferWrite(c_buffer, 2, '" ') + else: + tree.xmlOutputBufferWrite(c_buffer, 1, '"') + elif sys_url: + tree.xmlOutputBufferWrite(c_buffer, 8, ' SYSTEM ') + + if sys_url: + if tree.xmlStrchr(sys_url, b'"'): + quotechar = '\'' + else: + quotechar = '"' + tree.xmlOutputBufferWrite(c_buffer, 1, quotechar) + tree.xmlOutputBufferWriteString(c_buffer, sys_url) + tree.xmlOutputBufferWrite(c_buffer, 1, quotechar) + + if (not c_dtd.entities and not c_dtd.elements and + not c_dtd.attributes and not c_dtd.notations and + not c_dtd.pentities): + tree.xmlOutputBufferWrite(c_buffer, 2, '>\n') + return + + tree.xmlOutputBufferWrite(c_buffer, 3, ' [\n') + if c_dtd.notations and not c_buffer.error: + c_buf = tree.xmlBufferCreate() + if not c_buf: + c_buffer.error = xmlerror.XML_ERR_NO_MEMORY + return + tree.xmlDumpNotationTable(c_buf, c_dtd.notations) + tree.xmlOutputBufferWrite( + c_buffer, tree.xmlBufferLength(c_buf), + tree.xmlBufferContent(c_buf)) + tree.xmlBufferFree(c_buf) + c_node = c_dtd.children + while c_node and not c_buffer.error: + tree.xmlNodeDumpOutput(c_buffer, c_node.doc, c_node, 0, 0, encoding) + c_node = c_node.next + tree.xmlOutputBufferWrite(c_buffer, 3, "]>\n") + +cdef void _writeTail(tree.xmlOutputBuffer* c_buffer, xmlNode* c_node, + const_char* encoding, int c_method, bint pretty_print) noexcept nogil: + "Write the element tail." + c_node = c_node.next + while c_node and not c_buffer.error and c_node.type in ( + tree.XML_TEXT_NODE, tree.XML_CDATA_SECTION_NODE): + if c_method == OUTPUT_METHOD_HTML: + tree.htmlNodeDumpFormatOutput( + c_buffer, c_node.doc, c_node, encoding, pretty_print) + else: + tree.xmlNodeDumpOutput( + c_buffer, c_node.doc, c_node, 0, pretty_print, encoding) + c_node = c_node.next + +cdef void _writePrevSiblings(tree.xmlOutputBuffer* c_buffer, xmlNode* c_node, + const_char* encoding, bint pretty_print) noexcept nogil: + cdef xmlNode* c_sibling + if c_node.parent and _isElement(c_node.parent): + return + # we are at a root node, so add PI and comment siblings + c_sibling = c_node + while c_sibling.prev and \ + (c_sibling.prev.type == tree.XML_PI_NODE or + c_sibling.prev.type == tree.XML_COMMENT_NODE): + c_sibling = c_sibling.prev + while c_sibling is not c_node and not c_buffer.error: + tree.xmlNodeDumpOutput(c_buffer, c_node.doc, c_sibling, 0, + pretty_print, encoding) + if pretty_print: + tree.xmlOutputBufferWriteString(c_buffer, "\n") + c_sibling = c_sibling.next + +cdef void _writeNextSiblings(tree.xmlOutputBuffer* c_buffer, xmlNode* c_node, + const_char* encoding, bint pretty_print) noexcept nogil: + cdef xmlNode* c_sibling + if c_node.parent and _isElement(c_node.parent): + return + # we are at a root node, so add PI and comment siblings + c_sibling = c_node.next + while not c_buffer.error and c_sibling and \ + (c_sibling.type == tree.XML_PI_NODE or + c_sibling.type == tree.XML_COMMENT_NODE): + if pretty_print: + tree.xmlOutputBufferWriteString(c_buffer, "\n") + tree.xmlNodeDumpOutput(c_buffer, c_node.doc, c_sibling, 0, + pretty_print, encoding) + c_sibling = c_sibling.next + + +# copied and adapted from libxml2 +cdef unsigned char *xmlSerializeHexCharRef(unsigned char *out, int val) noexcept: + cdef xmlChar *ptr + cdef const xmlChar* hexdigits = b"0123456789ABCDEF" + + out[0] = b'&' + out += 1 + out[0] = b'#' + out += 1 + out[0] = b'x' + out += 1 + + if val < 0x10: + ptr = out + elif val < 0x100: + ptr = out + 1 + elif val < 0x1000: + ptr = out + 2 + elif val < 0x10000: + ptr = out + 3 + elif val < 0x100000: + ptr = out + 4 + else: + ptr = out + 5 + + out = ptr + 1 + while val > 0: + ptr[0] = hexdigits[val & 0xF] + ptr -= 1 + val >>= 4 + + out[0] = b';' + out += 1 + out[0] = 0 + + return out + + +# copied and adapted from libxml2 (xmlBufAttrSerializeTxtContent()) +cdef _write_attr_string(tree.xmlOutputBuffer* buf, const char *string): + cdef const char *base + cdef const char *cur + cdef const unsigned char *ucur + + cdef unsigned char tmp[12] + cdef int val = 0 + cdef int l + + if string == NULL: + return + + base = cur = string + while cur[0] != 0: + if cur[0] == b'\n': + if base != cur: + tree.xmlOutputBufferWrite(buf, cur - base, base) + + tree.xmlOutputBufferWrite(buf, 5, " ") + cur += 1 + base = cur + + elif cur[0] == b'\r': + if base != cur: + tree.xmlOutputBufferWrite(buf, cur - base, base) + + tree.xmlOutputBufferWrite(buf, 5, " ") + cur += 1 + base = cur + + elif cur[0] == b'\t': + if base != cur: + tree.xmlOutputBufferWrite(buf, cur - base, base) + + tree.xmlOutputBufferWrite(buf, 4, " ") + cur += 1 + base = cur + + elif cur[0] == b'"': + if base != cur: + tree.xmlOutputBufferWrite(buf, cur - base, base) + + tree.xmlOutputBufferWrite(buf, 6, """) + cur += 1 + base = cur + + elif cur[0] == b'<': + if base != cur: + tree.xmlOutputBufferWrite(buf, cur - base, base) + + tree.xmlOutputBufferWrite(buf, 4, "<") + cur += 1 + base = cur + + elif cur[0] == b'>': + if base != cur: + tree.xmlOutputBufferWrite(buf, cur - base, base) + + tree.xmlOutputBufferWrite(buf, 4, ">") + cur += 1 + base = cur + elif cur[0] == b'&': + if base != cur: + tree.xmlOutputBufferWrite(buf, cur - base, base) + + tree.xmlOutputBufferWrite(buf, 5, "&") + cur += 1 + base = cur + + elif (cur[0] >= 0x80) and (cur[1] != 0): + + if base != cur: + tree.xmlOutputBufferWrite(buf, cur - base, base) + + ucur = cur + + if ucur[0] < 0xC0: + # invalid UTF-8 sequence + val = ucur[0] + l = 1 + + elif ucur[0] < 0xE0: + val = (ucur[0]) & 0x1F + val <<= 6 + val |= (ucur[1]) & 0x3F + l = 2 + + elif (ucur[0] < 0xF0) and (ucur[2] != 0): + val = (ucur[0]) & 0x0F + val <<= 6 + val |= (ucur[1]) & 0x3F + val <<= 6 + val |= (ucur[2]) & 0x3F + l = 3 + + elif (ucur[0] < 0xF8) and (ucur[2] != 0) and (ucur[3] != 0): + val = (ucur[0]) & 0x07 + val <<= 6 + val |= (ucur[1]) & 0x3F + val <<= 6 + val |= (ucur[2]) & 0x3F + val <<= 6 + val |= (ucur[3]) & 0x3F + l = 4 + else: + # invalid UTF-8 sequence + val = ucur[0] + l = 1 + + if (l == 1) or (not tree.xmlIsCharQ(val)): + raise ValueError(f"Invalid character: {val:X}") + + # We could do multiple things here. Just save + # as a char ref + xmlSerializeHexCharRef(tmp, val) + tree.xmlOutputBufferWrite(buf, len(tmp), tmp) + cur += l + base = cur + + else: + cur += 1 + + if base != cur: + tree.xmlOutputBufferWrite(buf, cur - base, base) + + +############################################################ +# output to file-like objects + +cdef object io_open +from io import open + +cdef object gzip +import gzip + +cdef object getwriter +from codecs import getwriter +cdef object utf8_writer = getwriter('utf8') + +cdef object contextmanager +from contextlib import contextmanager + +cdef object _open_utf8_file + +@contextmanager +def _open_utf8_file(file, compression=0): + file = _getFSPathOrObject(file) + if _isString(file): + if compression: + with gzip.GzipFile(file, mode='wb', compresslevel=compression) as zf: + yield utf8_writer(zf) + else: + with io_open(file, 'w', encoding='utf8') as f: + yield f + else: + if compression: + with gzip.GzipFile(fileobj=file, mode='wb', compresslevel=compression) as zf: + yield utf8_writer(zf) + else: + yield utf8_writer(file) + + +@cython.final +@cython.internal +cdef class _FilelikeWriter: + cdef object _filelike + cdef object _close_filelike + cdef _ExceptionContext _exc_context + cdef _ErrorLog error_log + def __cinit__(self, filelike, exc_context=None, compression=None, close=False): + if compression is not None and compression > 0: + filelike = GzipFile( + fileobj=filelike, mode='wb', compresslevel=compression) + self._close_filelike = filelike.close + elif close: + self._close_filelike = filelike.close + self._filelike = filelike + if exc_context is None: + self._exc_context = _ExceptionContext() + else: + self._exc_context = exc_context + self.error_log = _ErrorLog() + + cdef tree.xmlOutputBuffer* _createOutputBuffer( + self, tree.xmlCharEncodingHandler* enchandler) except NULL: + cdef tree.xmlOutputBuffer* c_buffer + c_buffer = tree.xmlOutputBufferCreateIO( + _writeFilelikeWriter, _closeFilelikeWriter, + self, enchandler) + if c_buffer is NULL: + raise IOError, "Could not create I/O writer context." + return c_buffer + + cdef int write(self, char* c_buffer, int size) noexcept: + try: + if self._filelike is None: + raise IOError, "File is already closed" + py_buffer = c_buffer[:size] + self._filelike.write(py_buffer) + except: + size = -1 + self._exc_context._store_raised() + finally: + return size # and swallow any further exceptions + + cdef int close(self) noexcept: + retval = 0 + try: + if self._close_filelike is not None: + self._close_filelike() + # we should not close the file here as we didn't open it + self._filelike = None + except: + retval = -1 + self._exc_context._store_raised() + finally: + return retval # and swallow any further exceptions + +cdef int _writeFilelikeWriter(void* ctxt, char* c_buffer, int length) noexcept: + return (<_FilelikeWriter>ctxt).write(c_buffer, length) + +cdef int _closeFilelikeWriter(void* ctxt) noexcept: + return (<_FilelikeWriter>ctxt).close() + +cdef _tofilelike(f, _Element element, encoding, doctype, method, + bint write_xml_declaration, bint write_doctype, + bint pretty_print, bint with_tail, int standalone, + int compression): + cdef _FilelikeWriter writer = None + cdef tree.xmlOutputBuffer* c_buffer + cdef tree.xmlCharEncodingHandler* enchandler + cdef const_char* c_enc + cdef const_xmlChar* c_doctype + cdef int error_result + + c_method = _findOutputMethod(method) + if c_method == OUTPUT_METHOD_TEXT: + data = _textToString(element._c_node, encoding, with_tail) + if compression: + bytes_out = BytesIO() + with GzipFile(fileobj=bytes_out, mode='wb', compresslevel=compression) as gzip_file: + gzip_file.write(data) + data = bytes_out.getvalue() + f = _getFSPathOrObject(f) + if _isString(f): + filename8 = _encodeFilename(f) + with open(filename8, 'wb') as f: + f.write(data) + else: + f.write(data) + return + + if encoding is None: + c_enc = NULL + else: + encoding = _utf8(encoding) + c_enc = _cstr(encoding) + if doctype is None: + c_doctype = NULL + else: + doctype = _utf8(doctype) + c_doctype = _xcstr(doctype) + + writer = _create_output_buffer(f, c_enc, compression, &c_buffer, close=False) + if writer is None: + with nogil: + error_result = _serialise_node( + c_buffer, c_doctype, c_enc, element._c_node, c_method, + write_xml_declaration, write_doctype, pretty_print, with_tail, standalone) + else: + error_result = _serialise_node( + c_buffer, c_doctype, c_enc, element._c_node, c_method, + write_xml_declaration, write_doctype, pretty_print, with_tail, standalone) + + if writer is not None: + writer._exc_context._raise_if_stored() + if error_result != xmlerror.XML_ERR_OK: + _raiseSerialisationError(error_result) + + +cdef int _serialise_node(tree.xmlOutputBuffer* c_buffer, const_xmlChar* c_doctype, + const_char* c_enc, xmlNode* c_node, int c_method, + bint write_xml_declaration, bint write_doctype, bint pretty_print, + bint with_tail, int standalone) noexcept nogil: + _writeNodeToBuffer( + c_buffer, c_node, c_enc, c_doctype, c_method, + write_xml_declaration, write_doctype, pretty_print, with_tail, standalone) + error_result = c_buffer.error + if error_result == xmlerror.XML_ERR_OK: + error_result = tree.xmlOutputBufferClose(c_buffer) + if error_result != -1: + error_result = xmlerror.XML_ERR_OK + else: + tree.xmlOutputBufferClose(c_buffer) + return error_result + + +cdef _FilelikeWriter _create_output_buffer( + f, const_char* c_enc, int c_compression, + tree.xmlOutputBuffer** c_buffer_ret, bint close): + cdef tree.xmlOutputBuffer* c_buffer + cdef _FilelikeWriter writer + cdef bytes filename8 + enchandler = tree.xmlFindCharEncodingHandler(c_enc) + if enchandler is NULL: + raise LookupError( + f"unknown encoding: '{c_enc.decode('UTF-8') if c_enc is not NULL else u''}'") + try: + f = _getFSPathOrObject(f) + if _isString(f): + filename8 = _encodeFilename(f) + if b'%' in filename8 and ( + # Exclude absolute Windows paths and file:// URLs. + _isFilePath(filename8) not in (NO_FILE_PATH, ABS_WIN_FILE_PATH) + or filename8[:7].lower() == b'file://'): + # A file path (not a URL) containing the '%' URL escape character. + # libxml2 uses URL-unescaping on these, so escape the path before passing it in. + filename8 = filename8.replace(b'%', b'%25') + c_buffer = tree.xmlOutputBufferCreateFilename( + _cstr(filename8), enchandler, c_compression) + if c_buffer is NULL: + python.PyErr_SetFromErrno(IOError) # raises IOError + writer = None + elif hasattr(f, 'write'): + writer = _FilelikeWriter(f, compression=c_compression, close=close) + c_buffer = writer._createOutputBuffer(enchandler) + else: + raise TypeError( + f"File or filename expected, got '{python._fqtypename(f).decode('UTF-8')}'") + except: + tree.xmlCharEncCloseFunc(enchandler) + raise + c_buffer_ret[0] = c_buffer + return writer + +cdef xmlChar **_convert_ns_prefixes(tree.xmlDict* c_dict, ns_prefixes) except NULL: + cdef size_t i, num_ns_prefixes = len(ns_prefixes) + # Need to allocate one extra memory block to handle last NULL entry + c_ns_prefixes = python.lxml_malloc(num_ns_prefixes + 1, sizeof(xmlChar*)) + if not c_ns_prefixes: + raise MemoryError() + i = 0 + try: + for prefix in ns_prefixes: + prefix_utf = _utf8(prefix) + c_prefix = tree.xmlDictExists(c_dict, _xcstr(prefix_utf), len(prefix_utf)) + if c_prefix: + # unknown prefixes do not need to get serialised + c_ns_prefixes[i] = c_prefix + i += 1 + except: + python.lxml_free(c_ns_prefixes) + raise + + c_ns_prefixes[i] = NULL # append end marker + return c_ns_prefixes + +cdef _tofilelikeC14N(f, _Element element, bint exclusive, bint with_comments, + int compression, inclusive_ns_prefixes): + cdef _FilelikeWriter writer = None + cdef tree.xmlOutputBuffer* c_buffer + cdef xmlChar **c_inclusive_ns_prefixes = NULL + cdef char* c_filename + cdef xmlDoc* c_base_doc + cdef xmlDoc* c_doc + cdef int bytes_count, error = 0 + + c_base_doc = element._c_node.doc + c_doc = _fakeRootDoc(c_base_doc, element._c_node) + try: + c_inclusive_ns_prefixes = ( + _convert_ns_prefixes(c_doc.dict, inclusive_ns_prefixes) + if inclusive_ns_prefixes else NULL) + + f = _getFSPathOrObject(f) + if _isString(f): + filename8 = _encodeFilename(f) + c_filename = _cstr(filename8) + with nogil: + error = c14n.xmlC14NDocSave( + c_doc, NULL, exclusive, c_inclusive_ns_prefixes, + with_comments, c_filename, compression) + elif hasattr(f, 'write'): + writer = _FilelikeWriter(f, compression=compression) + c_buffer = writer._createOutputBuffer(NULL) + try: + with writer.error_log: + bytes_count = c14n.xmlC14NDocSaveTo( + c_doc, NULL, exclusive, c_inclusive_ns_prefixes, + with_comments, c_buffer) + finally: + error = tree.xmlOutputBufferClose(c_buffer) + if bytes_count < 0: + error = bytes_count + elif error != -1: + error = xmlerror.XML_ERR_OK + else: + raise TypeError(f"File or filename expected, got '{python._fqtypename(f).decode('UTF-8')}'") + finally: + _destroyFakeDoc(c_base_doc, c_doc) + if c_inclusive_ns_prefixes is not NULL: + python.lxml_free(c_inclusive_ns_prefixes) + + if writer is not None: + writer._exc_context._raise_if_stored() + + if error < 0: + message = "C14N failed" + if writer is not None: + errors = writer.error_log + if len(errors): + message = errors[0].message + raise C14NError(message) + + +# C14N 2.0 + +def canonicalize(xml_data=None, *, out=None, from_file=None, **options): + """Convert XML to its C14N 2.0 serialised form. + + If *out* is provided, it must be a file or file-like object that receives + the serialised canonical XML output (text, not bytes) through its ``.write()`` + method. To write to a file, open it in text mode with encoding "utf-8". + If *out* is not provided, this function returns the output as text string. + + Either *xml_data* (an XML string, tree or Element) or *file* + (a file path or file-like object) must be provided as input. + + The configuration options are the same as for the ``C14NWriterTarget``. + """ + if xml_data is None and from_file is None: + raise ValueError("Either 'xml_data' or 'from_file' must be provided as input") + + sio = None + if out is None: + sio = out = StringIO() + + target = C14NWriterTarget(out.write, **options) + + if xml_data is not None and not isinstance(xml_data, basestring): + _tree_to_target(xml_data, target) + return sio.getvalue() if sio is not None else None + + cdef _FeedParser parser = XMLParser( + target=target, + attribute_defaults=True, + collect_ids=False, + ) + + if xml_data is not None: + parser.feed(xml_data) + parser.close() + elif from_file is not None: + try: + _parseDocument(from_file, parser, base_url=None) + except _TargetParserResult: + pass + + return sio.getvalue() if sio is not None else None + + +cdef _tree_to_target(element, target): + for event, elem in iterwalk(element, events=('start', 'end', 'start-ns', 'comment', 'pi')): + text = None + if event == 'start': + target.start(elem.tag, elem.attrib) + text = elem.text + elif event == 'end': + target.end(elem.tag) + text = elem.tail + elif event == 'start-ns': + target.start_ns(*elem) + continue + elif event == 'comment': + target.comment(elem.text) + text = elem.tail + elif event == 'pi': + target.pi(elem.target, elem.text) + text = elem.tail + if text: + target.data(text) + return target.close() + + +cdef object _looks_like_prefix_name = re.compile(r'^\w+:\w+$', re.UNICODE).match + + +cdef class C14NWriterTarget: + """ + Canonicalization writer target for the XMLParser. + + Serialises parse events to XML C14N 2.0. + + Configuration options: + + - *with_comments*: set to true to include comments + - *strip_text*: set to true to strip whitespace before and after text content + - *rewrite_prefixes*: set to true to replace namespace prefixes by "n{number}" + - *qname_aware_tags*: a set of qname aware tag names in which prefixes + should be replaced in text content + - *qname_aware_attrs*: a set of qname aware attribute names in which prefixes + should be replaced in text content + - *exclude_attrs*: a set of attribute names that should not be serialised + - *exclude_tags*: a set of tag names that should not be serialised + """ + cdef object _write + cdef list _data + cdef set _qname_aware_tags + cdef object _find_qname_aware_attrs + cdef list _declared_ns_stack + cdef list _ns_stack + cdef dict _prefix_map + cdef list _preserve_space + cdef tuple _pending_start + cdef set _exclude_tags + cdef set _exclude_attrs + cdef Py_ssize_t _ignored_depth + cdef bint _with_comments + cdef bint _strip_text + cdef bint _rewrite_prefixes + cdef bint _root_seen + cdef bint _root_done + + def __init__(self, write, *, + with_comments=False, strip_text=False, rewrite_prefixes=False, + qname_aware_tags=None, qname_aware_attrs=None, + exclude_attrs=None, exclude_tags=None): + self._write = write + self._data = [] + self._with_comments = with_comments + self._strip_text = strip_text + self._exclude_attrs = set(exclude_attrs) if exclude_attrs else None + self._exclude_tags = set(exclude_tags) if exclude_tags else None + + self._rewrite_prefixes = rewrite_prefixes + if qname_aware_tags: + self._qname_aware_tags = set(qname_aware_tags) + else: + self._qname_aware_tags = None + if qname_aware_attrs: + self._find_qname_aware_attrs = set(qname_aware_attrs).intersection + else: + self._find_qname_aware_attrs = None + + # Stack with globally and newly declared namespaces as (uri, prefix) pairs. + self._declared_ns_stack = [[ + ("http://www.w3.org/XML/1998/namespace", "xml"), + ]] + # Stack with user declared namespace prefixes as (uri, prefix) pairs. + self._ns_stack = [] + if not rewrite_prefixes: + self._ns_stack.append(_DEFAULT_NAMESPACE_PREFIXES_ITEMS) + self._ns_stack.append([]) + self._prefix_map = {} + self._preserve_space = [False] + self._pending_start = None + self._ignored_depth = 0 + self._root_seen = False + self._root_done = False + + def _iter_namespaces(self, ns_stack): + for namespaces in reversed(ns_stack): + if namespaces: # almost no element declares new namespaces + yield from namespaces + + cdef _resolve_prefix_name(self, prefixed_name): + prefix, name = prefixed_name.split(':', 1) + for uri, p in self._iter_namespaces(self._ns_stack): + if p == prefix: + return f'{{{uri}}}{name}' + raise ValueError(f'Prefix {prefix} of QName "{prefixed_name}" is not declared in scope') + + cdef _qname(self, qname, uri=None): + if uri is None: + uri, tag = qname[1:].rsplit('}', 1) if qname[:1] == '{' else ('', qname) + else: + tag = qname + + prefixes_seen = set() + for u, prefix in self._iter_namespaces(self._declared_ns_stack): + if u == uri and prefix not in prefixes_seen: + return f'{prefix}:{tag}' if prefix else tag, tag, uri + prefixes_seen.add(prefix) + + # Not declared yet => add new declaration. + if self._rewrite_prefixes: + if uri in self._prefix_map: + prefix = self._prefix_map[uri] + else: + prefix = self._prefix_map[uri] = f'n{len(self._prefix_map)}' + self._declared_ns_stack[-1].append((uri, prefix)) + return f'{prefix}:{tag}', tag, uri + + if not uri and '' not in prefixes_seen: + # No default namespace declared => no prefix needed. + return tag, tag, uri + + for u, prefix in self._iter_namespaces(self._ns_stack): + if u == uri: + self._declared_ns_stack[-1].append((uri, prefix)) + return f'{prefix}:{tag}' if prefix else tag, tag, uri + + if not uri: + # As soon as a default namespace is defined, + # anything that has no namespace (and thus, no prefix) goes there. + return tag, tag, uri + + raise ValueError(f'Namespace "{uri}" of name "{tag}" is not declared in scope') + + def data(self, data): + if not self._ignored_depth: + self._data.append(data) + + cdef _flush(self): + cdef unicode data = ''.join(self._data) + del self._data[:] + if self._strip_text and not self._preserve_space[-1]: + data = data.strip() + if self._pending_start is not None: + (tag, attrs, new_namespaces), self._pending_start = self._pending_start, None + qname_text = data if ':' in data and _looks_like_prefix_name(data) else None + self._start(tag, attrs, new_namespaces, qname_text) + if qname_text is not None: + return + if data and self._root_seen: + self._write(_escape_cdata_c14n(data)) + + def start_ns(self, prefix, uri): + if self._ignored_depth: + return + # we may have to resolve qnames in text content + if self._data: + self._flush() + self._ns_stack[-1].append((uri, prefix)) + + def start(self, tag, attrs): + if self._exclude_tags is not None and ( + self._ignored_depth or tag in self._exclude_tags): + self._ignored_depth += 1 + return + if self._data: + self._flush() + + new_namespaces = [] + self._declared_ns_stack.append(new_namespaces) + + if self._qname_aware_tags is not None and tag in self._qname_aware_tags: + # Need to parse text first to see if it requires a prefix declaration. + self._pending_start = (tag, attrs, new_namespaces) + return + self._start(tag, attrs, new_namespaces) + + cdef _start(self, tag, attrs, new_namespaces, qname_text=None): + if self._exclude_attrs is not None and attrs: + attrs = {k: v for k, v in attrs.items() if k not in self._exclude_attrs} + + qnames = {tag, *attrs} + resolved_names = {} + + # Resolve prefixes in attribute and tag text. + if qname_text is not None: + qname = resolved_names[qname_text] = self._resolve_prefix_name(qname_text) + qnames.add(qname) + if self._find_qname_aware_attrs is not None and attrs: + qattrs = self._find_qname_aware_attrs(attrs) + if qattrs: + for attr_name in qattrs: + value = attrs[attr_name] + if _looks_like_prefix_name(value): + qname = resolved_names[value] = self._resolve_prefix_name(value) + qnames.add(qname) + else: + qattrs = None + else: + qattrs = None + + # Assign prefixes in lexicographical order of used URIs. + parsed_qnames = {n: self._qname(n) for n in sorted( + qnames, key=lambda n: n.split('}', 1))} + + # Write namespace declarations in prefix order ... + if new_namespaces: + attr_list = [ + ('xmlns:' + prefix if prefix else 'xmlns', uri) + for uri, prefix in new_namespaces + ] + attr_list.sort() + else: + # almost always empty + attr_list = [] + + # ... followed by attributes in URI+name order + if attrs: + for k, v in sorted(attrs.items()): + if qattrs is not None and k in qattrs and v in resolved_names: + v = parsed_qnames[resolved_names[v]][0] + attr_qname, attr_name, uri = parsed_qnames[k] + # No prefix for attributes in default ('') namespace. + attr_list.append((attr_qname if uri else attr_name, v)) + + # Honour xml:space attributes. + space_behaviour = attrs.get('{http://www.w3.org/XML/1998/namespace}space') + self._preserve_space.append( + space_behaviour == 'preserve' if space_behaviour + else self._preserve_space[-1]) + + # Write the tag. + write = self._write + write('<' + parsed_qnames[tag][0]) + if attr_list: + write(''.join([f' {k}="{_escape_attrib_c14n(v)}"' for k, v in attr_list])) + write('>') + + # Write the resolved qname text content. + if qname_text is not None: + write(_escape_cdata_c14n(parsed_qnames[resolved_names[qname_text]][0])) + + self._root_seen = True + self._ns_stack.append([]) + + def end(self, tag): + if self._ignored_depth: + self._ignored_depth -= 1 + return + if self._data: + self._flush() + self._write(f'') + self._preserve_space.pop() + self._root_done = len(self._preserve_space) == 1 + self._declared_ns_stack.pop() + self._ns_stack.pop() + + def comment(self, text): + if not self._with_comments: + return + if self._ignored_depth: + return + if self._root_done: + self._write('\n') + elif self._root_seen and self._data: + self._flush() + self._write(f'') + if not self._root_seen: + self._write('\n') + + def pi(self, target, data): + if self._ignored_depth: + return + if self._root_done: + self._write('\n') + elif self._root_seen and self._data: + self._flush() + self._write( + f'' if data else f'') + if not self._root_seen: + self._write('\n') + + def close(self): + return None + + +cdef _raise_serialization_error(text): + raise TypeError("cannot serialize %r (type %s)" % (text, type(text).__name__)) + + +cdef unicode _escape_cdata_c14n(stext): + # escape character data + cdef unicode text + cdef Py_UCS4 ch + cdef Py_ssize_t start = 0, pos = 0 + cdef list substrings = None + try: + text = unicode(stext) + except (TypeError, AttributeError): + return _raise_serialization_error(stext) + + for pos, ch in enumerate(text): + if ch == '&': + escape = '&' + elif ch == '<': + escape = '<' + elif ch == '>': + escape = '>' + elif ch == '\r': + escape = ' ' + else: + continue + + if substrings is None: + substrings = [] + if pos > start: + substrings.append(text[start:pos]) + substrings.append(escape) + start = pos + 1 + + if substrings is None: + return text + if pos >= start: + substrings.append(text[start:pos+1]) + return ''.join(substrings) + + +cdef unicode _escape_attrib_c14n(stext): + # escape attribute value + cdef unicode text + cdef Py_UCS4 ch + cdef Py_ssize_t start = 0, pos = 0 + cdef list substrings = None + try: + text = unicode(stext) + except (TypeError, AttributeError): + return _raise_serialization_error(stext) + + for pos, ch in enumerate(text): + if ch == '&': + escape = '&' + elif ch == '<': + escape = '<' + elif ch == '"': + escape = '"' + elif ch == '\t': + escape = ' ' + elif ch == '\n': + escape = ' ' + elif ch == '\r': + escape = ' ' + else: + continue + + if substrings is None: + substrings = [] + if pos > start: + substrings.append(text[start:pos]) + substrings.append(escape) + start = pos + 1 + + if substrings is None: + return text + if pos >= start: + substrings.append(text[start:pos+1]) + return ''.join(substrings) + + +# incremental serialisation + +cdef class xmlfile: + """xmlfile(self, output_file, encoding=None, compression=None, close=False, buffered=True) + + A simple mechanism for incremental XML serialisation. + + Usage example:: + + with xmlfile("somefile.xml", encoding='utf-8') as xf: + xf.write_declaration(standalone=True) + xf.write_doctype('') + + # generate an element (the root element) + with xf.element('root'): + # write a complete Element into the open root element + xf.write(etree.Element('test')) + + # generate and write more Elements, e.g. through iterparse + for element in generate_some_elements(): + # serialise generated elements into the XML file + xf.write(element) + + # or write multiple Elements or strings at once + xf.write(etree.Element('start'), "text", etree.Element('end')) + + If 'output_file' is a file(-like) object, passing ``close=True`` will + close it when exiting the context manager. By default, it is left + to the owner to do that. When a file path is used, lxml will take care + of opening and closing the file itself. Also, when a compression level + is set, lxml will deliberately close the file to make sure all data gets + compressed and written. + + Setting ``buffered=False`` will flush the output after each operation, + such as opening or closing an ``xf.element()`` block or calling + ``xf.write()``. Alternatively, calling ``xf.flush()`` can be used to + explicitly flush any pending output when buffering is enabled. + """ + cdef object output_file + cdef bytes encoding + cdef _IncrementalFileWriter writer + cdef _AsyncIncrementalFileWriter async_writer + cdef int compresslevel + cdef bint close + cdef bint buffered + cdef int method + + def __init__(self, output_file not None, encoding=None, compression=None, + close=False, buffered=True): + self.output_file = output_file + self.encoding = _utf8orNone(encoding) + self.compresslevel = compression or 0 + self.close = close + self.buffered = buffered + self.method = OUTPUT_METHOD_XML + + def __enter__(self): + assert self.output_file is not None + self.writer = _IncrementalFileWriter( + self.output_file, self.encoding, self.compresslevel, + self.close, self.buffered, self.method) + return self.writer + + def __exit__(self, exc_type, exc_val, exc_tb): + if self.writer is not None: + old_writer, self.writer = self.writer, None + raise_on_error = exc_type is None + old_writer._close(raise_on_error) + if self.close: + self.output_file = None + + async def __aenter__(self): + assert self.output_file is not None + if isinstance(self.output_file, basestring): + raise TypeError("Cannot asynchronously write to a plain file") + if not hasattr(self.output_file, 'write'): + raise TypeError("Output file needs an async .write() method") + self.async_writer = _AsyncIncrementalFileWriter( + self.output_file, self.encoding, self.compresslevel, + self.close, self.buffered, self.method) + return self.async_writer + + async def __aexit__(self, exc_type, exc_val, exc_tb): + if self.async_writer is not None: + old_writer, self.async_writer = self.async_writer, None + raise_on_error = exc_type is None + await old_writer._close(raise_on_error) + if self.close: + self.output_file = None + + +cdef class htmlfile(xmlfile): + """htmlfile(self, output_file, encoding=None, compression=None, close=False, buffered=True) + + A simple mechanism for incremental HTML serialisation. Works the same as + xmlfile. + """ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.method = OUTPUT_METHOD_HTML + + +cdef enum _IncrementalFileWriterStatus: + WRITER_STARTING = 0 + WRITER_DECL_WRITTEN = 1 + WRITER_DTD_WRITTEN = 2 + WRITER_IN_ELEMENT = 3 + WRITER_FINISHED = 4 + + +@cython.final +@cython.internal +cdef class _IncrementalFileWriter: + cdef tree.xmlOutputBuffer* _c_out + cdef bytes _encoding + cdef const_char* _c_encoding + cdef _FilelikeWriter _target + cdef list _element_stack + cdef int _status + cdef int _method + cdef bint _buffered + + def __cinit__(self, outfile, bytes encoding, int compresslevel, bint close, + bint buffered, int method): + self._status = WRITER_STARTING + self._element_stack = [] + if encoding is None: + encoding = b'ASCII' + self._encoding = encoding + self._c_encoding = _cstr(encoding) if encoding is not None else NULL + self._buffered = buffered + self._target = _create_output_buffer( + outfile, self._c_encoding, compresslevel, &self._c_out, close) + self._method = method + + def __dealloc__(self): + if self._c_out is not NULL: + tree.xmlOutputBufferClose(self._c_out) + + def write_declaration(self, version=None, standalone=None, doctype=None): + """write_declaration(self, version=None, standalone=None, doctype=None) + + Write an XML declaration and (optionally) a doctype into the file. + """ + assert self._c_out is not NULL + cdef const_xmlChar* c_version + cdef int c_standalone + if self._method != OUTPUT_METHOD_XML: + raise LxmlSyntaxError("only XML documents have declarations") + if self._status >= WRITER_DECL_WRITTEN: + raise LxmlSyntaxError("XML declaration already written") + version = _utf8orNone(version) + c_version = _xcstr(version) if version is not None else NULL + doctype = _utf8orNone(doctype) + if standalone is None: + c_standalone = -1 + else: + c_standalone = 1 if standalone else 0 + _writeDeclarationToBuffer(self._c_out, c_version, self._c_encoding, c_standalone) + if doctype is not None: + _writeDoctype(self._c_out, _xcstr(doctype)) + self._status = WRITER_DTD_WRITTEN + else: + self._status = WRITER_DECL_WRITTEN + if not self._buffered: + tree.xmlOutputBufferFlush(self._c_out) + self._handle_error(self._c_out.error) + + def write_doctype(self, doctype): + """write_doctype(self, doctype) + + Writes the given doctype declaration verbatimly into the file. + """ + assert self._c_out is not NULL + if doctype is None: + return + if self._status >= WRITER_DTD_WRITTEN: + raise LxmlSyntaxError("DOCTYPE already written or cannot write it here") + doctype = _utf8(doctype) + _writeDoctype(self._c_out, _xcstr(doctype)) + self._status = WRITER_DTD_WRITTEN + if not self._buffered: + tree.xmlOutputBufferFlush(self._c_out) + self._handle_error(self._c_out.error) + + def method(self, method): + """method(self, method) + + Returns a context manager that overrides and restores the output method. + method is one of (None, 'xml', 'html') where None means 'xml'. + """ + assert self._c_out is not NULL + c_method = self._method if method is None else _findOutputMethod(method) + return _MethodChanger(self, c_method) + + def element(self, tag, attrib=None, nsmap=None, method=None, **_extra): + """element(self, tag, attrib=None, nsmap=None, method, **_extra) + + Returns a context manager that writes an opening and closing tag. + method is one of (None, 'xml', 'html') where None means 'xml'. + """ + assert self._c_out is not NULL + attributes = [] + if attrib is not None: + for name, value in _iter_attrib(attrib): + if name not in _extra: + ns, name = _getNsTag(name) + attributes.append((ns, name, _utf8(value))) + if _extra: + for name, value in _extra.iteritems(): + ns, name = _getNsTag(name) + attributes.append((ns, name, _utf8(value))) + reversed_nsmap = {} + if nsmap: + for prefix, ns in nsmap.items(): + if prefix is not None: + prefix = _utf8(prefix) + _prefixValidOrRaise(prefix) + reversed_nsmap[_utf8(ns)] = prefix + ns, name = _getNsTag(tag) + + c_method = self._method if method is None else _findOutputMethod(method) + + return _FileWriterElement(self, (ns, name, attributes, reversed_nsmap), c_method) + + cdef _write_qname(self, bytes name, bytes prefix): + if prefix: # empty bytes for no prefix (not None to allow sorting) + tree.xmlOutputBufferWrite(self._c_out, len(prefix), _cstr(prefix)) + tree.xmlOutputBufferWrite(self._c_out, 1, ':') + tree.xmlOutputBufferWrite(self._c_out, len(name), _cstr(name)) + + cdef _write_start_element(self, element_config): + if self._status > WRITER_IN_ELEMENT: + raise LxmlSyntaxError("cannot append trailing element to complete XML document") + ns, name, attributes, nsmap = element_config + flat_namespace_map, new_namespaces = self._collect_namespaces(nsmap) + prefix = self._find_prefix(ns, flat_namespace_map, new_namespaces) + tree.xmlOutputBufferWrite(self._c_out, 1, '<') + self._write_qname(name, prefix) + + self._write_attributes_and_namespaces( + attributes, flat_namespace_map, new_namespaces) + + tree.xmlOutputBufferWrite(self._c_out, 1, '>') + if not self._buffered: + tree.xmlOutputBufferFlush(self._c_out) + self._handle_error(self._c_out.error) + + self._element_stack.append((ns, name, prefix, flat_namespace_map)) + self._status = WRITER_IN_ELEMENT + + cdef _write_attributes_and_namespaces(self, list attributes, + dict flat_namespace_map, + list new_namespaces): + if attributes: + # _find_prefix() may append to new_namespaces => build them first + attributes = [ + (self._find_prefix(ns, flat_namespace_map, new_namespaces), name, value) + for ns, name, value in attributes ] + if new_namespaces: + new_namespaces.sort() + self._write_attributes_list(new_namespaces) + if attributes: + self._write_attributes_list(attributes) + + cdef _write_attributes_list(self, list attributes): + for prefix, name, value in attributes: + tree.xmlOutputBufferWrite(self._c_out, 1, ' ') + self._write_qname(name, prefix) + tree.xmlOutputBufferWrite(self._c_out, 2, '="') + _write_attr_string(self._c_out, _cstr(value)) + + tree.xmlOutputBufferWrite(self._c_out, 1, '"') + + cdef _write_end_element(self, element_config): + if self._status != WRITER_IN_ELEMENT: + raise LxmlSyntaxError("not in an element") + if not self._element_stack or self._element_stack[-1][:2] != element_config[:2]: + raise LxmlSyntaxError("inconsistent exit action in context manager") + + # If previous write operations failed, the context manager exit might still call us. + # That is ok, but we stop writing closing tags and handling errors in that case. + # For all non-I/O errors, we continue writing closing tags if we can. + ok_to_write = self._c_out.error == xmlerror.XML_ERR_OK + + name, prefix = self._element_stack.pop()[1:3] + if ok_to_write: + tree.xmlOutputBufferWrite(self._c_out, 2, '') + + if not self._element_stack: + self._status = WRITER_FINISHED + if ok_to_write: + if not self._buffered: + tree.xmlOutputBufferFlush(self._c_out) + self._handle_error(self._c_out.error) + + cdef _find_prefix(self, bytes href, dict flat_namespaces_map, list new_namespaces): + if href is None: + return None + if href in flat_namespaces_map: + return flat_namespaces_map[href] + # need to create a new prefix + prefixes = flat_namespaces_map.values() + i = 0 + while True: + prefix = _utf8('ns%d' % i) + if prefix not in prefixes: + new_namespaces.append((b'xmlns', prefix, href)) + flat_namespaces_map[href] = prefix + return prefix + i += 1 + + cdef _collect_namespaces(self, dict nsmap): + new_namespaces = [] + flat_namespaces_map = {} + for ns, prefix in nsmap.iteritems(): + flat_namespaces_map[ns] = prefix + if prefix is None: + # use empty bytes rather than None to allow sorting + new_namespaces.append((b'', b'xmlns', ns)) + else: + new_namespaces.append((b'xmlns', prefix, ns)) + # merge in flat namespace map of parent + if self._element_stack: + for ns, prefix in (self._element_stack[-1][-1]).iteritems(): + if flat_namespaces_map.get(ns) is None: + # unknown or empty prefix => prefer a 'real' prefix + flat_namespaces_map[ns] = prefix + return flat_namespaces_map, new_namespaces + + def write(self, *args, bint with_tail=True, bint pretty_print=False, method=None): + """write(self, *args, with_tail=True, pretty_print=False, method=None) + + Write subtrees or strings into the file. + + If method is not None, it should be one of ('html', 'xml', 'text') + to temporarily override the output method. + """ + assert self._c_out is not NULL + c_method = self._method if method is None else _findOutputMethod(method) + + for content in args: + if _isString(content): + if self._status != WRITER_IN_ELEMENT: + if self._status > WRITER_IN_ELEMENT or content.strip(): + raise LxmlSyntaxError("not in an element") + bstring = _utf8(content) + if not bstring: + continue + + ns, name, _, _ = self._element_stack[-1] + if (c_method == OUTPUT_METHOD_HTML and + ns in (None, b'http://www.w3.org/1999/xhtml') and + name in (b'script', b'style')): + tree.xmlOutputBufferWrite(self._c_out, len(bstring), _cstr(bstring)) + + else: + tree.xmlOutputBufferWriteEscape(self._c_out, _xcstr(bstring), NULL) + + elif iselement(content): + if self._status > WRITER_IN_ELEMENT: + raise LxmlSyntaxError("cannot append trailing element to complete XML document") + _writeNodeToBuffer(self._c_out, (<_Element>content)._c_node, + self._c_encoding, NULL, c_method, + False, False, pretty_print, with_tail, False) + if (<_Element>content)._c_node.type == tree.XML_ELEMENT_NODE: + if not self._element_stack: + self._status = WRITER_FINISHED + + elif content is not None: + raise TypeError( + f"got invalid input value of type {type(content)}, expected string or Element") + self._handle_error(self._c_out.error) + if not self._buffered: + tree.xmlOutputBufferFlush(self._c_out) + self._handle_error(self._c_out.error) + + def flush(self): + """flush(self) + + Write any pending content of the current output buffer to the stream. + """ + assert self._c_out is not NULL + tree.xmlOutputBufferFlush(self._c_out) + self._handle_error(self._c_out.error) + + cdef _close(self, bint raise_on_error): + if raise_on_error: + if self._status < WRITER_IN_ELEMENT: + raise LxmlSyntaxError("no content written") + if self._element_stack: + raise LxmlSyntaxError("pending open tags on close") + error_result = self._c_out.error + if error_result == xmlerror.XML_ERR_OK: + error_result = tree.xmlOutputBufferClose(self._c_out) + if error_result != -1: + error_result = xmlerror.XML_ERR_OK + else: + tree.xmlOutputBufferClose(self._c_out) + self._status = WRITER_FINISHED + self._c_out = NULL + del self._element_stack[:] + if raise_on_error: + self._handle_error(error_result) + + cdef _handle_error(self, int error_result): + if error_result != xmlerror.XML_ERR_OK: + if self._target is not None: + self._target._exc_context._raise_if_stored() + _raiseSerialisationError(error_result) + + +@cython.final +@cython.internal +cdef class _AsyncDataWriter: + cdef list _data + def __cinit__(self): + self._data = [] + + cdef bytes collect(self): + data = b''.join(self._data) + del self._data[:] + return data + + def write(self, data): + self._data.append(data) + + def close(self): + pass + + +@cython.final +@cython.internal +cdef class _AsyncIncrementalFileWriter: + cdef _IncrementalFileWriter _writer + cdef _AsyncDataWriter _buffer + cdef object _async_outfile + cdef int _flush_after_writes + cdef bint _should_close + cdef bint _buffered + + def __cinit__(self, async_outfile, bytes encoding, int compresslevel, bint close, + bint buffered, int method): + self._flush_after_writes = 20 + self._async_outfile = async_outfile + self._should_close = close + self._buffered = buffered + self._buffer = _AsyncDataWriter() + self._writer = _IncrementalFileWriter( + self._buffer, encoding, compresslevel, close=True, buffered=False, method=method) + + cdef bytes _flush(self): + if not self._buffered or len(self._buffer._data) > self._flush_after_writes: + return self._buffer.collect() + return None + + async def flush(self): + self._writer.flush() + data = self._buffer.collect() + if data: + await self._async_outfile.write(data) + + async def write_declaration(self, version=None, standalone=None, doctype=None): + self._writer.write_declaration(version, standalone, doctype) + data = self._flush() + if data: + await self._async_outfile.write(data) + + async def write_doctype(self, doctype): + self._writer.write_doctype(doctype) + data = self._flush() + if data: + await self._async_outfile.write(data) + + async def write(self, *args, with_tail=True, pretty_print=False, method=None): + self._writer.write(*args, with_tail=with_tail, pretty_print=pretty_print, method=method) + data = self._flush() + if data: + await self._async_outfile.write(data) + + def method(self, method): + return self._writer.method(method) + + def element(self, tag, attrib=None, nsmap=None, method=None, **_extra): + element_writer = self._writer.element(tag, attrib, nsmap, method, **_extra) + return _AsyncFileWriterElement(element_writer, self) + + async def _close(self, bint raise_on_error): + self._writer._close(raise_on_error) + data = self._buffer.collect() + if data: + await self._async_outfile.write(data) + if self._should_close: + await self._async_outfile.close() + + +@cython.final +@cython.internal +cdef class _AsyncFileWriterElement: + cdef _FileWriterElement _element_writer + cdef _AsyncIncrementalFileWriter _writer + + def __cinit__(self, _FileWriterElement element_writer not None, + _AsyncIncrementalFileWriter writer not None): + self._element_writer = element_writer + self._writer = writer + + async def __aenter__(self): + self._element_writer.__enter__() + data = self._writer._flush() + if data: + await self._writer._async_outfile.write(data) + + async def __aexit__(self, *args): + self._element_writer.__exit__(*args) + data = self._writer._flush() + if data: + await self._writer._async_outfile.write(data) + + +@cython.final +@cython.internal +@cython.freelist(8) +cdef class _FileWriterElement: + cdef _IncrementalFileWriter _writer + cdef object _element + cdef int _new_method + cdef int _old_method + + def __cinit__(self, _IncrementalFileWriter writer not None, element_config, int method): + self._writer = writer + self._element = element_config + self._new_method = method + self._old_method = writer._method + + def __enter__(self): + self._writer._method = self._new_method + self._writer._write_start_element(self._element) + + def __exit__(self, exc_type, exc_val, exc_tb): + self._writer._write_end_element(self._element) + self._writer._method = self._old_method + + +@cython.final +@cython.internal +@cython.freelist(8) +cdef class _MethodChanger: + cdef _IncrementalFileWriter _writer + cdef int _new_method + cdef int _old_method + cdef bint _entered + cdef bint _exited + + def __cinit__(self, _IncrementalFileWriter writer not None, int method): + self._writer = writer + self._new_method = method + self._old_method = writer._method + self._entered = False + self._exited = False + + def __enter__(self): + if self._entered: + raise LxmlSyntaxError("Inconsistent enter action in context manager") + self._writer._method = self._new_method + self._entered = True + + def __exit__(self, exc_type, exc_val, exc_tb): + if self._exited: + raise LxmlSyntaxError("Inconsistent exit action in context manager") + if self._writer._method != self._new_method: + raise LxmlSyntaxError("Method changed outside of context manager") + self._writer._method = self._old_method + self._exited = True + + async def __aenter__(self): + # for your async convenience + return self.__enter__() + + async def __aexit__(self, *args): + # for your async convenience + return self.__exit__(*args) diff --git a/venv/lib/python3.10/site-packages/lxml/usedoctest.py b/venv/lib/python3.10/site-packages/lxml/usedoctest.py new file mode 100644 index 0000000000000000000000000000000000000000..f1da8cadfe710e215330c8ae3d28dfb7190dfd6b --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/usedoctest.py @@ -0,0 +1,13 @@ +"""Doctest module for XML comparison. + +Usage:: + + >>> import lxml.usedoctest + >>> # now do your XML doctests ... + +See `lxml.doctestcompare` +""" + +from lxml import doctestcompare + +doctestcompare.temp_install(del_module=__name__) diff --git a/venv/lib/python3.10/site-packages/lxml/xinclude.pxi b/venv/lib/python3.10/site-packages/lxml/xinclude.pxi new file mode 100644 index 0000000000000000000000000000000000000000..5c9ac45096efb2250e268dd2eed9ade07c2ca998 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/xinclude.pxi @@ -0,0 +1,67 @@ +# XInclude processing + +from lxml.includes cimport xinclude + + +cdef class XIncludeError(LxmlError): + """Error during XInclude processing. + """ + + +cdef class XInclude: + """XInclude(self) + XInclude processor. + + Create an instance and call it on an Element to run XInclude + processing. + """ + cdef _ErrorLog _error_log + def __init__(self): + self._error_log = _ErrorLog() + + @property + def error_log(self): + assert self._error_log is not None, "XInclude instance not initialised" + return self._error_log.copy() + + def __call__(self, _Element node not None): + "__call__(self, node)" + # We cannot pass the XML_PARSE_NOXINCNODE option as this would free + # the XInclude nodes - there may still be Python references to them! + # Therefore, we allow XInclude nodes to be converted to + # XML_XINCLUDE_START nodes. XML_XINCLUDE_END nodes are added as + # siblings. Tree traversal will simply ignore them as they are not + # typed as elements. The included fragment is added between the two, + # i.e. as a sibling, which does not conflict with traversal. + cdef int result + _assertValidNode(node) + assert self._error_log is not None, "XInclude processor not initialised" + if node._doc._parser is not None: + parse_options = node._doc._parser._parse_options + context = node._doc._parser._getParserContext() + c_context = context + else: + parse_options = 0 + context = None + c_context = NULL + + self._error_log.connect() + if tree.LIBXML_VERSION < 20704 or not c_context: + __GLOBAL_PARSER_CONTEXT.pushImpliedContext(context) + with nogil: + orig_loader = _register_document_loader() + if c_context: + result = xinclude.xmlXIncludeProcessTreeFlagsData( + node._c_node, parse_options, c_context) + else: + result = xinclude.xmlXIncludeProcessTree(node._c_node) + _reset_document_loader(orig_loader) + if tree.LIBXML_VERSION < 20704 or not c_context: + __GLOBAL_PARSER_CONTEXT.popImpliedContext() + self._error_log.disconnect() + + if result == -1: + raise XIncludeError( + self._error_log._buildExceptionMessage( + "XInclude processing failed"), + self._error_log) diff --git a/venv/lib/python3.10/site-packages/lxml/xmlerror.pxi b/venv/lib/python3.10/site-packages/lxml/xmlerror.pxi new file mode 100644 index 0000000000000000000000000000000000000000..79442a8b40deb52d60cdeb0c168ee17809e239f0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/xmlerror.pxi @@ -0,0 +1,1654 @@ +# DEBUG and error logging + +from lxml.includes cimport xmlerror +from lxml cimport cvarargs + +DEF GLOBAL_ERROR_LOG = "_GlobalErrorLog" +DEF XSLT_ERROR_LOG = "_XSLTErrorLog" + +# module level API functions + +def clear_error_log(): + """clear_error_log() + + Clear the global error log. Note that this log is already bound to a + fixed size. + + Note: since lxml 2.2, the global error log is local to a thread + and this function will only clear the global error log of the + current thread. + """ + _getThreadErrorLog(GLOBAL_ERROR_LOG).clear() + + +# setup for global log: + +cdef void _initThreadLogging() noexcept: + # Disable generic error lines from libxml2. + _connectGenericErrorLog(None) + + # Divert XSLT error messages to the global XSLT error log instead of stderr. + xslt.xsltSetGenericErrorFunc(NULL, _receiveXSLTError) + + +# Logging classes + +@cython.final +@cython.freelist(16) +cdef class _LogEntry: + """A log message entry from an error log. + + Attributes: + + - message: the message text + - domain: the domain ID (see lxml.etree.ErrorDomains) + - type: the message type ID (see lxml.etree.ErrorTypes) + - level: the log level ID (see lxml.etree.ErrorLevels) + - line: the line at which the message originated (if applicable) + - column: the character column at which the message originated (if applicable) + - filename: the name of the file in which the message originated (if applicable) + - path: the location in which the error was found (if available) + """ + cdef readonly int domain + cdef readonly int type + cdef readonly int level + cdef readonly long line + cdef readonly int column + cdef basestring _message + cdef basestring _filename + cdef char* _c_message + cdef xmlChar* _c_filename + cdef xmlChar* _c_path + + def __dealloc__(self): + tree.xmlFree(self._c_message) + tree.xmlFree(self._c_filename) + tree.xmlFree(self._c_path) + + @cython.final + cdef int _setError(self, const xmlerror.xmlError* error) except -1: + self.domain = error.domain + self.type = error.code + self.level = error.level + self.line = error.line + self.column = error.int2 + self._c_message = NULL + self._c_filename = NULL + self._c_path = NULL + if (error.message is NULL or + error.message[0] == b'\0' or + error.message[0] == b'\n' and error.message[1] == b'\0'): + self._message = "unknown error" + else: + self._message = None + self._c_message = tree.xmlStrdup( + error.message) + if not self._c_message: + raise MemoryError() + if error.file is NULL: + self._filename = '' + else: + self._filename = None + self._c_filename = tree.xmlStrdup( error.file) + if not self._c_filename: + raise MemoryError() + if error.node is not NULL: + self._c_path = tree.xmlGetNodePath( error.node) + c_line = tree.xmlGetLineNo( error.node) + if c_line > limits.INT_MAX: + self.line = c_line + + @cython.final + cdef _setGeneric(self, int domain, int type, int level, long line, + message, filename): + self.domain = domain + self.type = type + self.level = level + self.line = line + self.column = 0 + self._message = message + self._filename = filename + self._c_path = NULL + + def __repr__(self): + return "%s:%d:%d:%s:%s:%s: %s" % ( + self.filename, self.line, self.column, self.level_name, + self.domain_name, self.type_name, self.message) + + @property + def domain_name(self): + """The name of the error domain. See lxml.etree.ErrorDomains + """ + return ErrorDomains._getName(self.domain, "unknown") + + @property + def type_name(self): + """The name of the error type. See lxml.etree.ErrorTypes + """ + if self.domain == ErrorDomains.RELAXNGV: + getName = RelaxNGErrorTypes._getName + else: + getName = ErrorTypes._getName + return getName(self.type, "unknown") + + @property + def level_name(self): + """The name of the error level. See lxml.etree.ErrorLevels + """ + return ErrorLevels._getName(self.level, "unknown") + + @property + def message(self): + """The log message string. + """ + cdef size_t size + if self._message is not None: + return self._message + if self._c_message is NULL: + return None + size = cstring_h.strlen(self._c_message) + if size > 0 and self._c_message[size-1] == b'\n': + size -= 1 # strip EOL + # cannot use funicode() here because the message may contain + # byte encoded file paths etc. + try: + self._message = self._c_message[:size].decode('utf8') + except UnicodeDecodeError: + try: + self._message = self._c_message[:size].decode( + 'ascii', 'backslashreplace') + except UnicodeDecodeError: + self._message = '' + if self._c_message: + # clean up early + tree.xmlFree(self._c_message) + self._c_message = NULL + return self._message + + @property + def filename(self): + """The file path where the report originated, if any. + """ + if self._filename is None: + if self._c_filename is not NULL: + self._filename = _decodeFilename(self._c_filename) + # clean up early + tree.xmlFree(self._c_filename) + self._c_filename = NULL + return self._filename + + @property + def path(self): + """The XPath for the node where the error was detected. + """ + return funicode(self._c_path) if self._c_path is not NULL else None + + +cdef class _BaseErrorLog: + cdef _LogEntry _first_error + cdef readonly object last_error + def __init__(self, first_error, last_error): + self._first_error = first_error + self.last_error = last_error + + cpdef copy(self): + return _BaseErrorLog(self._first_error, self.last_error) + + def __repr__(self): + return '' + + cpdef receive(self, _LogEntry entry): + pass + + @cython.final + cdef int _receive(self, const xmlerror.xmlError* error) except -1: + cdef bint is_error + cdef _LogEntry entry + cdef _BaseErrorLog global_log + entry = _LogEntry.__new__(_LogEntry) + entry._setError(error) + is_error = error.level == xmlerror.XML_ERR_ERROR or \ + error.level == xmlerror.XML_ERR_FATAL + global_log = _getThreadErrorLog(GLOBAL_ERROR_LOG) + if global_log is not self: + global_log.receive(entry) + if is_error: + global_log.last_error = entry + self.receive(entry) + if is_error: + self.last_error = entry + + @cython.final + cdef int _receiveGeneric(self, int domain, int type, int level, long line, + message, filename) except -1: + cdef bint is_error + cdef _LogEntry entry + cdef _BaseErrorLog global_log + entry = _LogEntry.__new__(_LogEntry) + entry._setGeneric(domain, type, level, line, message, filename) + is_error = level == xmlerror.XML_ERR_ERROR or \ + level == xmlerror.XML_ERR_FATAL + global_log = _getThreadErrorLog(GLOBAL_ERROR_LOG) + if global_log is not self: + global_log.receive(entry) + if is_error: + global_log.last_error = entry + self.receive(entry) + if is_error: + self.last_error = entry + + @cython.final + cdef _buildParseException(self, exctype, default_message): + code = xmlerror.XML_ERR_INTERNAL_ERROR + if self._first_error is None: + return exctype(default_message, code, 0, 0) + message = self._first_error.message + if message: + code = self._first_error.type + else: + message = default_message + line = self._first_error.line + column = self._first_error.column + filename = self._first_error.filename + if line > 0: + if column > 0: + message = f"{message}, line {line}, column {column}" + else: + message = f"{message}, line {line}" + return exctype(message, code, line, column, filename) + + @cython.final + cdef _buildExceptionMessage(self, default_message): + if self._first_error is None: + return default_message + if self._first_error.message: + message = self._first_error.message + elif default_message is None: + return None + else: + message = default_message + if self._first_error.line > 0: + if self._first_error.column > 0: + message = f"{message}, line {self._first_error.line}, column {self._first_error.column}" + else: + message = f"{message}, line {self._first_error.line}" + return message + +cdef class _ListErrorLog(_BaseErrorLog): + "Immutable base version of a list based error log." + cdef list _entries + cdef int _offset + def __init__(self, entries, first_error, last_error): + if entries: + if first_error is None: + first_error = entries[0] + if last_error is None: + last_error = entries[-1] + _BaseErrorLog.__init__(self, first_error, last_error) + self._entries = entries + + cpdef copy(self): + """Creates a shallow copy of this error log. Reuses the list of + entries. + """ + cdef _ListErrorLog log = _ListErrorLog( + self._entries, self._first_error, self.last_error) + log._offset = self._offset + return log + + def __iter__(self): + entries = self._entries + if self._offset: + entries = islice(entries, self._offset) + return iter(entries) + + def __repr__(self): + return '\n'.join([repr(entry) for entry in self]) + + def __getitem__(self, index): + if self._offset: + index += self._offset + return self._entries[index] + + def __len__(self): + return len(self._entries) - self._offset + + def __contains__(self, error_type): + cdef Py_ssize_t i + for i, entry in enumerate(self._entries): + if i < self._offset: + continue + if entry.type == error_type: + return True + return False + + def __bool__(self): + return len(self._entries) > self._offset + + def filter_domains(self, domains): + """Filter the errors by the given domains and return a new error log + containing the matches. + """ + cdef _LogEntry entry + if isinstance(domains, int): + domains = (domains,) + filtered = [entry for entry in self if entry.domain in domains] + return _ListErrorLog(filtered, None, None) + + def filter_types(self, types): + """filter_types(self, types) + + Filter the errors by the given types and return a new error + log containing the matches. + """ + cdef _LogEntry entry + if isinstance(types, int): + types = (types,) + filtered = [entry for entry in self if entry.type in types] + return _ListErrorLog(filtered, None, None) + + def filter_levels(self, levels): + """filter_levels(self, levels) + + Filter the errors by the given error levels and return a new + error log containing the matches. + """ + cdef _LogEntry entry + if isinstance(levels, int): + levels = (levels,) + filtered = [entry for entry in self if entry.level in levels] + return _ListErrorLog(filtered, None, None) + + def filter_from_level(self, level): + """filter_from_level(self, level) + + Return a log with all messages of the requested level of worse. + """ + cdef _LogEntry entry + filtered = [entry for entry in self if entry.level >= level] + return _ListErrorLog(filtered, None, None) + + def filter_from_fatals(self): + """filter_from_fatals(self) + + Convenience method to get all fatal error messages. + """ + return self.filter_from_level(ErrorLevels.FATAL) + + def filter_from_errors(self): + """filter_from_errors(self) + + Convenience method to get all error messages or worse. + """ + return self.filter_from_level(ErrorLevels.ERROR) + + def filter_from_warnings(self): + """filter_from_warnings(self) + + Convenience method to get all warnings or worse. + """ + return self.filter_from_level(ErrorLevels.WARNING) + + +@cython.final +@cython.internal +cdef class _ErrorLogContext: + """ + Error log context for the 'with' statement. + Stores a reference to the current callbacks to allow for + recursively stacked log contexts. + """ + cdef xmlerror.xmlStructuredErrorFunc old_error_func + cdef void* old_error_context + cdef xmlerror.xmlGenericErrorFunc old_xslt_error_func + cdef void* old_xslt_error_context + cdef _BaseErrorLog old_xslt_error_log + + cdef int push_error_log(self, _BaseErrorLog log) except -1: + self.old_error_func = xmlerror.xmlStructuredError + self.old_error_context = xmlerror.xmlStructuredErrorContext + xmlerror.xmlSetStructuredErrorFunc( + log, _receiveError) + + # xslt.xsltSetGenericErrorFunc() is not thread-local => keep error log in TLS + self.old_xslt_error_func = xslt.xsltGenericError + self.old_xslt_error_context = xslt.xsltGenericErrorContext + self.old_xslt_error_log = _getThreadErrorLog(XSLT_ERROR_LOG) + _setThreadErrorLog(XSLT_ERROR_LOG, log) + xslt.xsltSetGenericErrorFunc( + NULL, _receiveXSLTError) + return 0 + + cdef int pop_error_log(self) except -1: + xmlerror.xmlSetStructuredErrorFunc( + self.old_error_context, self.old_error_func) + xslt.xsltSetGenericErrorFunc( + self.old_xslt_error_context, self.old_xslt_error_func) + _setThreadErrorLog(XSLT_ERROR_LOG, self.old_xslt_error_log) + self.old_xslt_error_log= None + return 0 + + +cdef class _ErrorLog(_ListErrorLog): + cdef list _logContexts + def __cinit__(self): + self._logContexts = [] + + def __init__(self): + _ListErrorLog.__init__(self, [], None, None) + + @cython.final + cdef int __enter__(self) except -1: + return self.connect() + + def __exit__(self, *args): + # TODO: make this a cdef function when Cython supports it + self.disconnect() + + @cython.final + cdef int connect(self) except -1: + self._first_error = None + del self._entries[:] + + cdef _ErrorLogContext context = _ErrorLogContext.__new__(_ErrorLogContext) + context.push_error_log(self) + self._logContexts.append(context) + return 0 + + @cython.final + cdef int disconnect(self) except -1: + cdef _ErrorLogContext context = self._logContexts.pop() + context.pop_error_log() + return 0 + + cpdef clear(self): + self._first_error = None + self.last_error = None + self._offset = 0 + del self._entries[:] + + cpdef copy(self): + """Creates a shallow copy of this error log and the list of entries. + """ + return _ListErrorLog( + self._entries[self._offset:], + self._first_error, self.last_error) + + def __iter__(self): + return iter(self._entries[self._offset:]) + + cpdef receive(self, _LogEntry entry): + if self._first_error is None and entry.level >= xmlerror.XML_ERR_ERROR: + self._first_error = entry + self._entries.append(entry) + +cdef class _DomainErrorLog(_ErrorLog): + def __init__(self, domains): + _ErrorLog.__init__(self) + self._accepted_domains = tuple(domains) + + cpdef receive(self, _LogEntry entry): + if entry.domain in self._accepted_domains: + _ErrorLog.receive(self, entry) + +cdef class _RotatingErrorLog(_ErrorLog): + cdef int _max_len + def __init__(self, max_len): + _ErrorLog.__init__(self) + self._max_len = max_len + + cpdef receive(self, _LogEntry entry): + if self._first_error is None and entry.level >= xmlerror.XML_ERR_ERROR: + self._first_error = entry + self._entries.append(entry) + + if len(self._entries) > self._max_len: + self._offset += 1 + if self._offset > self._max_len // 3: + offset = self._offset + self._offset = 0 + del self._entries[:offset] + +cdef class PyErrorLog(_BaseErrorLog): + """PyErrorLog(self, logger_name=None, logger=None) + A global error log that connects to the Python stdlib logging package. + + The constructor accepts an optional logger name or a readily + instantiated logger instance. + + If you want to change the mapping between libxml2's ErrorLevels and Python + logging levels, you can modify the level_map dictionary from a subclass. + + The default mapping is:: + + ErrorLevels.WARNING = logging.WARNING + ErrorLevels.ERROR = logging.ERROR + ErrorLevels.FATAL = logging.CRITICAL + + You can also override the method ``receive()`` that takes a LogEntry + object and calls ``self.log(log_entry, format_string, arg1, arg2, ...)`` + with appropriate data. + """ + cdef readonly dict level_map + cdef object _map_level + cdef object _log + def __init__(self, logger_name=None, logger=None): + _BaseErrorLog.__init__(self, None, None) + import logging + self.level_map = { + ErrorLevels.WARNING : logging.WARNING, + ErrorLevels.ERROR : logging.ERROR, + ErrorLevels.FATAL : logging.CRITICAL + } + self._map_level = self.level_map.get + if logger is None: + if logger_name: + logger = logging.getLogger(logger_name) + else: + logger = logging.getLogger() + self._log = logger.log + + cpdef copy(self): + """Dummy method that returns an empty error log. + """ + return _ListErrorLog([], None, None) + + def log(self, log_entry, message, *args): + """log(self, log_entry, message, *args) + + Called by the .receive() method to log a _LogEntry instance to + the Python logging system. This handles the error level + mapping. + + In the default implementation, the ``message`` argument + receives a complete log line, and there are no further + ``args``. To change the message format, it is best to + override the .receive() method instead of this one. + """ + self._log( + self._map_level(log_entry.level, 0), + message, *args + ) + + cpdef receive(self, _LogEntry log_entry): + """receive(self, log_entry) + + Receive a _LogEntry instance from the logging system. Calls + the .log() method with appropriate parameters:: + + self.log(log_entry, repr(log_entry)) + + You can override this method to provide your own log output + format. + """ + self.log(log_entry, repr(log_entry)) + +# thread-local, global list log to collect error output messages from +# libxml2/libxslt + +cdef _BaseErrorLog __GLOBAL_ERROR_LOG = _RotatingErrorLog(__MAX_LOG_SIZE) + + +cdef _BaseErrorLog _getThreadErrorLog(name): + """Retrieve the current error log with name 'name' of this thread.""" + cdef python.PyObject* thread_dict + thread_dict = python.PyThreadState_GetDict() + if thread_dict is NULL: + return __GLOBAL_ERROR_LOG + try: + return (thread_dict)[name] + except KeyError: + log = (thread_dict)[name] = \ + _RotatingErrorLog(__MAX_LOG_SIZE) + return log + + +cdef _setThreadErrorLog(name, _BaseErrorLog log): + """Set the global error log of this thread.""" + cdef python.PyObject* thread_dict + thread_dict = python.PyThreadState_GetDict() + if thread_dict is NULL: + if name == GLOBAL_ERROR_LOG: + global __GLOBAL_ERROR_LOG + __GLOBAL_ERROR_LOG = log + else: + (thread_dict)[name] = log + + +cdef __copyGlobalErrorLog(): + "Helper function for properties in exceptions." + return _getThreadErrorLog(GLOBAL_ERROR_LOG).copy() + + +def use_global_python_log(PyErrorLog log not None): + """use_global_python_log(log) + + Replace the global error log by an etree.PyErrorLog that uses the + standard Python logging package. + + Note that this disables access to the global error log from exceptions. + Parsers, XSLT etc. will continue to provide their normal local error log. + + Note: prior to lxml 2.2, this changed the error log globally. + Since lxml 2.2, the global error log is local to a thread and this + function will only set the global error log of the current thread. + """ + _setThreadErrorLog(GLOBAL_ERROR_LOG, log) + + +# local log functions: forward error to logger object +cdef void _forwardError(void* c_log_handler, const xmlerror.xmlError* error) noexcept with gil: + cdef _BaseErrorLog log_handler + if c_log_handler is not NULL: + log_handler = <_BaseErrorLog>c_log_handler + elif error.domain == xmlerror.XML_FROM_XSLT: + log_handler = _getThreadErrorLog(XSLT_ERROR_LOG) + else: + log_handler = _getThreadErrorLog(GLOBAL_ERROR_LOG) + log_handler._receive(error) + + +cdef void _receiveError(void* c_log_handler, const xmlerror.xmlError* error) noexcept nogil: + # no Python objects here, may be called without thread context ! + if __DEBUG: + _forwardError(c_log_handler, error) + + +cdef void _receiveXSLTError(void* c_log_handler, char* msg, ...) noexcept nogil: + # no Python objects here, may be called without thread context ! + cdef cvarargs.va_list args + cvarargs.va_start(args, msg) + _receiveGenericError(c_log_handler, xmlerror.XML_FROM_XSLT, msg, args) + cvarargs.va_end(args) + +cdef void _receiveRelaxNGParseError(void* c_log_handler, char* msg, ...) noexcept nogil: + # no Python objects here, may be called without thread context ! + cdef cvarargs.va_list args + cvarargs.va_start(args, msg) + _receiveGenericError(c_log_handler, xmlerror.XML_FROM_RELAXNGP, msg, args) + cvarargs.va_end(args) + +cdef void _receiveRelaxNGValidationError(void* c_log_handler, char* msg, ...) noexcept nogil: + # no Python objects here, may be called without thread context ! + cdef cvarargs.va_list args + cvarargs.va_start(args, msg) + _receiveGenericError(c_log_handler, xmlerror.XML_FROM_RELAXNGV, msg, args) + cvarargs.va_end(args) + +# dummy function: no log output at all +cdef void _nullGenericErrorFunc(void* ctxt, char* msg, ...) noexcept nogil: + pass + + +cdef void _connectGenericErrorLog(log, int c_domain=-1) noexcept: + cdef xmlerror.xmlGenericErrorFunc error_func = NULL + c_log = log + if c_domain == xmlerror.XML_FROM_XSLT: + error_func = _receiveXSLTError + elif c_domain == xmlerror.XML_FROM_RELAXNGP: + error_func = _receiveRelaxNGParseError + elif c_domain == xmlerror.XML_FROM_RELAXNGV: + error_func = _receiveRelaxNGValidationError + + if log is None or error_func is NULL: + c_log = NULL + error_func = _nullGenericErrorFunc + xmlerror.xmlSetGenericErrorFunc(c_log, error_func) + + +cdef void _receiveGenericError(void* c_log_handler, int c_domain, + char* msg, cvarargs.va_list args) noexcept nogil: + # no Python objects here, may be called without thread context ! + cdef xmlerror.xmlError c_error + cdef char* c_text + cdef char* c_message + cdef char* c_element + cdef char* c_pos + cdef char* c_name_pos + cdef char* c_str + cdef int text_size, element_size, format_count, c_int + if not __DEBUG or msg is NULL: + return + if msg[0] in b'\n\0': + return + + c_text = c_element = c_error.file = c_error.node = NULL + c_error.line = 0 + + # parse "NAME %s" chunks from the format string + c_name_pos = c_pos = msg + format_count = 0 + while c_pos[0]: + if c_pos[0] == b'%': + c_pos += 1 + if c_pos[0] == b's': # "%s" + format_count += 1 + c_str = cvarargs.va_charptr(args) + if c_pos == msg + 1: + c_text = c_str # msg == "%s..." + elif c_name_pos[0] == b'e': + if cstring_h.strncmp(c_name_pos, 'element %s', 10) == 0: + c_element = c_str + elif c_name_pos[0] == b'f': + if cstring_h.strncmp(c_name_pos, 'file %s', 7) == 0: + if cstring_h.strncmp('string://__STRING__XSLT', + c_str, 23) == 0: + c_str = '' + c_error.file = c_str + elif c_pos[0] == b'd': # "%d" + format_count += 1 + c_int = cvarargs.va_int(args) + if cstring_h.strncmp(c_name_pos, 'line %d', 7) == 0: + c_error.line = c_int + elif c_pos[0] != b'%': # "%%" == "%" + format_count += 1 + break # unexpected format or end of string => abort + elif c_pos[0] == b' ': + if c_pos[1] != b'%': + c_name_pos = c_pos + 1 + c_pos += 1 + + c_message = NULL + if c_text is NULL: + if c_element is not NULL and format_count == 1: + # special case: a single occurrence of 'element %s' + text_size = cstring_h.strlen(msg) + element_size = cstring_h.strlen(c_element) + c_message = stdlib.malloc( + (text_size + element_size + 1) * sizeof(char)) + stdio.sprintf(c_message, msg, c_element) + c_error.message = c_message + else: + c_error.message = '' + elif c_element is NULL: + c_error.message = c_text + else: + text_size = cstring_h.strlen(c_text) + element_size = cstring_h.strlen(c_element) + c_message = stdlib.malloc( + (text_size + 12 + element_size + 1) * sizeof(char)) + if c_message is NULL: + c_error.message = c_text + else: + stdio.sprintf(c_message, "%s, element '%s'", c_text, c_element) + c_error.message = c_message + + c_error.domain = c_domain + c_error.code = xmlerror.XML_ERR_OK # what else? + c_error.level = xmlerror.XML_ERR_ERROR # what else? + c_error.int2 = 0 + + _forwardError(c_log_handler, &c_error) + + if c_message is not NULL: + stdlib.free(c_message) + +################################################################################ +## CONSTANTS FROM "xmlerror.h" (or rather libxml-xmlerror.html) +################################################################################ + +cdef __initErrorConstants(): + "Called at setup time to parse the constants and build the classes below." + global __ERROR_LEVELS, __ERROR_DOMAINS, __PARSER_ERROR_TYPES, __RELAXNG_ERROR_TYPES + const_defs = ((ErrorLevels, __ERROR_LEVELS), + (ErrorDomains, __ERROR_DOMAINS), + (ErrorTypes, __PARSER_ERROR_TYPES), + (RelaxNGErrorTypes, __RELAXNG_ERROR_TYPES)) + + for cls, constants in const_defs: + reverse_dict = {} + cls._names = reverse_dict + cls._getName = reverse_dict.get + for line in constants.splitlines(): + if not line: + continue + name, value = line.split('=') + value = int(value) + setattr(cls, name, value) + reverse_dict[value] = name + + # discard the global string references after use + __ERROR_LEVELS = __ERROR_DOMAINS = __PARSER_ERROR_TYPES = __RELAXNG_ERROR_TYPES = None + + +class ErrorLevels(object): + """Libxml2 error levels""" + +class ErrorDomains(object): + """Libxml2 error domains""" + +class ErrorTypes(object): + """Libxml2 error types""" + +class RelaxNGErrorTypes(object): + """Libxml2 RelaxNG error types""" + + +# --- BEGIN: GENERATED CONSTANTS --- + +# This section is generated by the script 'update-error-constants.py'. + +cdef object __ERROR_LEVELS = """\ +NONE=0 +WARNING=1 +ERROR=2 +FATAL=3 +""" + +cdef object __ERROR_DOMAINS = """\ +NONE=0 +PARSER=1 +TREE=2 +NAMESPACE=3 +DTD=4 +HTML=5 +MEMORY=6 +OUTPUT=7 +IO=8 +FTP=9 +HTTP=10 +XINCLUDE=11 +XPATH=12 +XPOINTER=13 +REGEXP=14 +DATATYPE=15 +SCHEMASP=16 +SCHEMASV=17 +RELAXNGP=18 +RELAXNGV=19 +CATALOG=20 +C14N=21 +XSLT=22 +VALID=23 +CHECK=24 +WRITER=25 +MODULE=26 +I18N=27 +SCHEMATRONV=28 +BUFFER=29 +URI=30 +""" + +cdef object __PARSER_ERROR_TYPES = """\ +ERR_OK=0 +ERR_INTERNAL_ERROR=1 +ERR_NO_MEMORY=2 +ERR_DOCUMENT_START=3 +ERR_DOCUMENT_EMPTY=4 +ERR_DOCUMENT_END=5 +ERR_INVALID_HEX_CHARREF=6 +ERR_INVALID_DEC_CHARREF=7 +ERR_INVALID_CHARREF=8 +ERR_INVALID_CHAR=9 +ERR_CHARREF_AT_EOF=10 +ERR_CHARREF_IN_PROLOG=11 +ERR_CHARREF_IN_EPILOG=12 +ERR_CHARREF_IN_DTD=13 +ERR_ENTITYREF_AT_EOF=14 +ERR_ENTITYREF_IN_PROLOG=15 +ERR_ENTITYREF_IN_EPILOG=16 +ERR_ENTITYREF_IN_DTD=17 +ERR_PEREF_AT_EOF=18 +ERR_PEREF_IN_PROLOG=19 +ERR_PEREF_IN_EPILOG=20 +ERR_PEREF_IN_INT_SUBSET=21 +ERR_ENTITYREF_NO_NAME=22 +ERR_ENTITYREF_SEMICOL_MISSING=23 +ERR_PEREF_NO_NAME=24 +ERR_PEREF_SEMICOL_MISSING=25 +ERR_UNDECLARED_ENTITY=26 +WAR_UNDECLARED_ENTITY=27 +ERR_UNPARSED_ENTITY=28 +ERR_ENTITY_IS_EXTERNAL=29 +ERR_ENTITY_IS_PARAMETER=30 +ERR_UNKNOWN_ENCODING=31 +ERR_UNSUPPORTED_ENCODING=32 +ERR_STRING_NOT_STARTED=33 +ERR_STRING_NOT_CLOSED=34 +ERR_NS_DECL_ERROR=35 +ERR_ENTITY_NOT_STARTED=36 +ERR_ENTITY_NOT_FINISHED=37 +ERR_LT_IN_ATTRIBUTE=38 +ERR_ATTRIBUTE_NOT_STARTED=39 +ERR_ATTRIBUTE_NOT_FINISHED=40 +ERR_ATTRIBUTE_WITHOUT_VALUE=41 +ERR_ATTRIBUTE_REDEFINED=42 +ERR_LITERAL_NOT_STARTED=43 +ERR_LITERAL_NOT_FINISHED=44 +ERR_COMMENT_NOT_FINISHED=45 +ERR_PI_NOT_STARTED=46 +ERR_PI_NOT_FINISHED=47 +ERR_NOTATION_NOT_STARTED=48 +ERR_NOTATION_NOT_FINISHED=49 +ERR_ATTLIST_NOT_STARTED=50 +ERR_ATTLIST_NOT_FINISHED=51 +ERR_MIXED_NOT_STARTED=52 +ERR_MIXED_NOT_FINISHED=53 +ERR_ELEMCONTENT_NOT_STARTED=54 +ERR_ELEMCONTENT_NOT_FINISHED=55 +ERR_XMLDECL_NOT_STARTED=56 +ERR_XMLDECL_NOT_FINISHED=57 +ERR_CONDSEC_NOT_STARTED=58 +ERR_CONDSEC_NOT_FINISHED=59 +ERR_EXT_SUBSET_NOT_FINISHED=60 +ERR_DOCTYPE_NOT_FINISHED=61 +ERR_MISPLACED_CDATA_END=62 +ERR_CDATA_NOT_FINISHED=63 +ERR_RESERVED_XML_NAME=64 +ERR_SPACE_REQUIRED=65 +ERR_SEPARATOR_REQUIRED=66 +ERR_NMTOKEN_REQUIRED=67 +ERR_NAME_REQUIRED=68 +ERR_PCDATA_REQUIRED=69 +ERR_URI_REQUIRED=70 +ERR_PUBID_REQUIRED=71 +ERR_LT_REQUIRED=72 +ERR_GT_REQUIRED=73 +ERR_LTSLASH_REQUIRED=74 +ERR_EQUAL_REQUIRED=75 +ERR_TAG_NAME_MISMATCH=76 +ERR_TAG_NOT_FINISHED=77 +ERR_STANDALONE_VALUE=78 +ERR_ENCODING_NAME=79 +ERR_HYPHEN_IN_COMMENT=80 +ERR_INVALID_ENCODING=81 +ERR_EXT_ENTITY_STANDALONE=82 +ERR_CONDSEC_INVALID=83 +ERR_VALUE_REQUIRED=84 +ERR_NOT_WELL_BALANCED=85 +ERR_EXTRA_CONTENT=86 +ERR_ENTITY_CHAR_ERROR=87 +ERR_ENTITY_PE_INTERNAL=88 +ERR_ENTITY_LOOP=89 +ERR_ENTITY_BOUNDARY=90 +ERR_INVALID_URI=91 +ERR_URI_FRAGMENT=92 +WAR_CATALOG_PI=93 +ERR_NO_DTD=94 +ERR_CONDSEC_INVALID_KEYWORD=95 +ERR_VERSION_MISSING=96 +WAR_UNKNOWN_VERSION=97 +WAR_LANG_VALUE=98 +WAR_NS_URI=99 +WAR_NS_URI_RELATIVE=100 +ERR_MISSING_ENCODING=101 +WAR_SPACE_VALUE=102 +ERR_NOT_STANDALONE=103 +ERR_ENTITY_PROCESSING=104 +ERR_NOTATION_PROCESSING=105 +WAR_NS_COLUMN=106 +WAR_ENTITY_REDEFINED=107 +ERR_UNKNOWN_VERSION=108 +ERR_VERSION_MISMATCH=109 +ERR_NAME_TOO_LONG=110 +ERR_USER_STOP=111 +ERR_COMMENT_ABRUPTLY_ENDED=112 +NS_ERR_XML_NAMESPACE=200 +NS_ERR_UNDEFINED_NAMESPACE=201 +NS_ERR_QNAME=202 +NS_ERR_ATTRIBUTE_REDEFINED=203 +NS_ERR_EMPTY=204 +NS_ERR_COLON=205 +DTD_ATTRIBUTE_DEFAULT=500 +DTD_ATTRIBUTE_REDEFINED=501 +DTD_ATTRIBUTE_VALUE=502 +DTD_CONTENT_ERROR=503 +DTD_CONTENT_MODEL=504 +DTD_CONTENT_NOT_DETERMINIST=505 +DTD_DIFFERENT_PREFIX=506 +DTD_ELEM_DEFAULT_NAMESPACE=507 +DTD_ELEM_NAMESPACE=508 +DTD_ELEM_REDEFINED=509 +DTD_EMPTY_NOTATION=510 +DTD_ENTITY_TYPE=511 +DTD_ID_FIXED=512 +DTD_ID_REDEFINED=513 +DTD_ID_SUBSET=514 +DTD_INVALID_CHILD=515 +DTD_INVALID_DEFAULT=516 +DTD_LOAD_ERROR=517 +DTD_MISSING_ATTRIBUTE=518 +DTD_MIXED_CORRUPT=519 +DTD_MULTIPLE_ID=520 +DTD_NO_DOC=521 +DTD_NO_DTD=522 +DTD_NO_ELEM_NAME=523 +DTD_NO_PREFIX=524 +DTD_NO_ROOT=525 +DTD_NOTATION_REDEFINED=526 +DTD_NOTATION_VALUE=527 +DTD_NOT_EMPTY=528 +DTD_NOT_PCDATA=529 +DTD_NOT_STANDALONE=530 +DTD_ROOT_NAME=531 +DTD_STANDALONE_WHITE_SPACE=532 +DTD_UNKNOWN_ATTRIBUTE=533 +DTD_UNKNOWN_ELEM=534 +DTD_UNKNOWN_ENTITY=535 +DTD_UNKNOWN_ID=536 +DTD_UNKNOWN_NOTATION=537 +DTD_STANDALONE_DEFAULTED=538 +DTD_XMLID_VALUE=539 +DTD_XMLID_TYPE=540 +DTD_DUP_TOKEN=541 +HTML_STRUCURE_ERROR=800 +HTML_UNKNOWN_TAG=801 +RNGP_ANYNAME_ATTR_ANCESTOR=1000 +RNGP_ATTR_CONFLICT=1001 +RNGP_ATTRIBUTE_CHILDREN=1002 +RNGP_ATTRIBUTE_CONTENT=1003 +RNGP_ATTRIBUTE_EMPTY=1004 +RNGP_ATTRIBUTE_NOOP=1005 +RNGP_CHOICE_CONTENT=1006 +RNGP_CHOICE_EMPTY=1007 +RNGP_CREATE_FAILURE=1008 +RNGP_DATA_CONTENT=1009 +RNGP_DEF_CHOICE_AND_INTERLEAVE=1010 +RNGP_DEFINE_CREATE_FAILED=1011 +RNGP_DEFINE_EMPTY=1012 +RNGP_DEFINE_MISSING=1013 +RNGP_DEFINE_NAME_MISSING=1014 +RNGP_ELEM_CONTENT_EMPTY=1015 +RNGP_ELEM_CONTENT_ERROR=1016 +RNGP_ELEMENT_EMPTY=1017 +RNGP_ELEMENT_CONTENT=1018 +RNGP_ELEMENT_NAME=1019 +RNGP_ELEMENT_NO_CONTENT=1020 +RNGP_ELEM_TEXT_CONFLICT=1021 +RNGP_EMPTY=1022 +RNGP_EMPTY_CONSTRUCT=1023 +RNGP_EMPTY_CONTENT=1024 +RNGP_EMPTY_NOT_EMPTY=1025 +RNGP_ERROR_TYPE_LIB=1026 +RNGP_EXCEPT_EMPTY=1027 +RNGP_EXCEPT_MISSING=1028 +RNGP_EXCEPT_MULTIPLE=1029 +RNGP_EXCEPT_NO_CONTENT=1030 +RNGP_EXTERNALREF_EMTPY=1031 +RNGP_EXTERNAL_REF_FAILURE=1032 +RNGP_EXTERNALREF_RECURSE=1033 +RNGP_FORBIDDEN_ATTRIBUTE=1034 +RNGP_FOREIGN_ELEMENT=1035 +RNGP_GRAMMAR_CONTENT=1036 +RNGP_GRAMMAR_EMPTY=1037 +RNGP_GRAMMAR_MISSING=1038 +RNGP_GRAMMAR_NO_START=1039 +RNGP_GROUP_ATTR_CONFLICT=1040 +RNGP_HREF_ERROR=1041 +RNGP_INCLUDE_EMPTY=1042 +RNGP_INCLUDE_FAILURE=1043 +RNGP_INCLUDE_RECURSE=1044 +RNGP_INTERLEAVE_ADD=1045 +RNGP_INTERLEAVE_CREATE_FAILED=1046 +RNGP_INTERLEAVE_EMPTY=1047 +RNGP_INTERLEAVE_NO_CONTENT=1048 +RNGP_INVALID_DEFINE_NAME=1049 +RNGP_INVALID_URI=1050 +RNGP_INVALID_VALUE=1051 +RNGP_MISSING_HREF=1052 +RNGP_NAME_MISSING=1053 +RNGP_NEED_COMBINE=1054 +RNGP_NOTALLOWED_NOT_EMPTY=1055 +RNGP_NSNAME_ATTR_ANCESTOR=1056 +RNGP_NSNAME_NO_NS=1057 +RNGP_PARAM_FORBIDDEN=1058 +RNGP_PARAM_NAME_MISSING=1059 +RNGP_PARENTREF_CREATE_FAILED=1060 +RNGP_PARENTREF_NAME_INVALID=1061 +RNGP_PARENTREF_NO_NAME=1062 +RNGP_PARENTREF_NO_PARENT=1063 +RNGP_PARENTREF_NOT_EMPTY=1064 +RNGP_PARSE_ERROR=1065 +RNGP_PAT_ANYNAME_EXCEPT_ANYNAME=1066 +RNGP_PAT_ATTR_ATTR=1067 +RNGP_PAT_ATTR_ELEM=1068 +RNGP_PAT_DATA_EXCEPT_ATTR=1069 +RNGP_PAT_DATA_EXCEPT_ELEM=1070 +RNGP_PAT_DATA_EXCEPT_EMPTY=1071 +RNGP_PAT_DATA_EXCEPT_GROUP=1072 +RNGP_PAT_DATA_EXCEPT_INTERLEAVE=1073 +RNGP_PAT_DATA_EXCEPT_LIST=1074 +RNGP_PAT_DATA_EXCEPT_ONEMORE=1075 +RNGP_PAT_DATA_EXCEPT_REF=1076 +RNGP_PAT_DATA_EXCEPT_TEXT=1077 +RNGP_PAT_LIST_ATTR=1078 +RNGP_PAT_LIST_ELEM=1079 +RNGP_PAT_LIST_INTERLEAVE=1080 +RNGP_PAT_LIST_LIST=1081 +RNGP_PAT_LIST_REF=1082 +RNGP_PAT_LIST_TEXT=1083 +RNGP_PAT_NSNAME_EXCEPT_ANYNAME=1084 +RNGP_PAT_NSNAME_EXCEPT_NSNAME=1085 +RNGP_PAT_ONEMORE_GROUP_ATTR=1086 +RNGP_PAT_ONEMORE_INTERLEAVE_ATTR=1087 +RNGP_PAT_START_ATTR=1088 +RNGP_PAT_START_DATA=1089 +RNGP_PAT_START_EMPTY=1090 +RNGP_PAT_START_GROUP=1091 +RNGP_PAT_START_INTERLEAVE=1092 +RNGP_PAT_START_LIST=1093 +RNGP_PAT_START_ONEMORE=1094 +RNGP_PAT_START_TEXT=1095 +RNGP_PAT_START_VALUE=1096 +RNGP_PREFIX_UNDEFINED=1097 +RNGP_REF_CREATE_FAILED=1098 +RNGP_REF_CYCLE=1099 +RNGP_REF_NAME_INVALID=1100 +RNGP_REF_NO_DEF=1101 +RNGP_REF_NO_NAME=1102 +RNGP_REF_NOT_EMPTY=1103 +RNGP_START_CHOICE_AND_INTERLEAVE=1104 +RNGP_START_CONTENT=1105 +RNGP_START_EMPTY=1106 +RNGP_START_MISSING=1107 +RNGP_TEXT_EXPECTED=1108 +RNGP_TEXT_HAS_CHILD=1109 +RNGP_TYPE_MISSING=1110 +RNGP_TYPE_NOT_FOUND=1111 +RNGP_TYPE_VALUE=1112 +RNGP_UNKNOWN_ATTRIBUTE=1113 +RNGP_UNKNOWN_COMBINE=1114 +RNGP_UNKNOWN_CONSTRUCT=1115 +RNGP_UNKNOWN_TYPE_LIB=1116 +RNGP_URI_FRAGMENT=1117 +RNGP_URI_NOT_ABSOLUTE=1118 +RNGP_VALUE_EMPTY=1119 +RNGP_VALUE_NO_CONTENT=1120 +RNGP_XMLNS_NAME=1121 +RNGP_XML_NS=1122 +XPATH_EXPRESSION_OK=1200 +XPATH_NUMBER_ERROR=1201 +XPATH_UNFINISHED_LITERAL_ERROR=1202 +XPATH_START_LITERAL_ERROR=1203 +XPATH_VARIABLE_REF_ERROR=1204 +XPATH_UNDEF_VARIABLE_ERROR=1205 +XPATH_INVALID_PREDICATE_ERROR=1206 +XPATH_EXPR_ERROR=1207 +XPATH_UNCLOSED_ERROR=1208 +XPATH_UNKNOWN_FUNC_ERROR=1209 +XPATH_INVALID_OPERAND=1210 +XPATH_INVALID_TYPE=1211 +XPATH_INVALID_ARITY=1212 +XPATH_INVALID_CTXT_SIZE=1213 +XPATH_INVALID_CTXT_POSITION=1214 +XPATH_MEMORY_ERROR=1215 +XPTR_SYNTAX_ERROR=1216 +XPTR_RESOURCE_ERROR=1217 +XPTR_SUB_RESOURCE_ERROR=1218 +XPATH_UNDEF_PREFIX_ERROR=1219 +XPATH_ENCODING_ERROR=1220 +XPATH_INVALID_CHAR_ERROR=1221 +TREE_INVALID_HEX=1300 +TREE_INVALID_DEC=1301 +TREE_UNTERMINATED_ENTITY=1302 +TREE_NOT_UTF8=1303 +SAVE_NOT_UTF8=1400 +SAVE_CHAR_INVALID=1401 +SAVE_NO_DOCTYPE=1402 +SAVE_UNKNOWN_ENCODING=1403 +REGEXP_COMPILE_ERROR=1450 +IO_UNKNOWN=1500 +IO_EACCES=1501 +IO_EAGAIN=1502 +IO_EBADF=1503 +IO_EBADMSG=1504 +IO_EBUSY=1505 +IO_ECANCELED=1506 +IO_ECHILD=1507 +IO_EDEADLK=1508 +IO_EDOM=1509 +IO_EEXIST=1510 +IO_EFAULT=1511 +IO_EFBIG=1512 +IO_EINPROGRESS=1513 +IO_EINTR=1514 +IO_EINVAL=1515 +IO_EIO=1516 +IO_EISDIR=1517 +IO_EMFILE=1518 +IO_EMLINK=1519 +IO_EMSGSIZE=1520 +IO_ENAMETOOLONG=1521 +IO_ENFILE=1522 +IO_ENODEV=1523 +IO_ENOENT=1524 +IO_ENOEXEC=1525 +IO_ENOLCK=1526 +IO_ENOMEM=1527 +IO_ENOSPC=1528 +IO_ENOSYS=1529 +IO_ENOTDIR=1530 +IO_ENOTEMPTY=1531 +IO_ENOTSUP=1532 +IO_ENOTTY=1533 +IO_ENXIO=1534 +IO_EPERM=1535 +IO_EPIPE=1536 +IO_ERANGE=1537 +IO_EROFS=1538 +IO_ESPIPE=1539 +IO_ESRCH=1540 +IO_ETIMEDOUT=1541 +IO_EXDEV=1542 +IO_NETWORK_ATTEMPT=1543 +IO_ENCODER=1544 +IO_FLUSH=1545 +IO_WRITE=1546 +IO_NO_INPUT=1547 +IO_BUFFER_FULL=1548 +IO_LOAD_ERROR=1549 +IO_ENOTSOCK=1550 +IO_EISCONN=1551 +IO_ECONNREFUSED=1552 +IO_ENETUNREACH=1553 +IO_EADDRINUSE=1554 +IO_EALREADY=1555 +IO_EAFNOSUPPORT=1556 +XINCLUDE_RECURSION=1600 +XINCLUDE_PARSE_VALUE=1601 +XINCLUDE_ENTITY_DEF_MISMATCH=1602 +XINCLUDE_NO_HREF=1603 +XINCLUDE_NO_FALLBACK=1604 +XINCLUDE_HREF_URI=1605 +XINCLUDE_TEXT_FRAGMENT=1606 +XINCLUDE_TEXT_DOCUMENT=1607 +XINCLUDE_INVALID_CHAR=1608 +XINCLUDE_BUILD_FAILED=1609 +XINCLUDE_UNKNOWN_ENCODING=1610 +XINCLUDE_MULTIPLE_ROOT=1611 +XINCLUDE_XPTR_FAILED=1612 +XINCLUDE_XPTR_RESULT=1613 +XINCLUDE_INCLUDE_IN_INCLUDE=1614 +XINCLUDE_FALLBACKS_IN_INCLUDE=1615 +XINCLUDE_FALLBACK_NOT_IN_INCLUDE=1616 +XINCLUDE_DEPRECATED_NS=1617 +XINCLUDE_FRAGMENT_ID=1618 +CATALOG_MISSING_ATTR=1650 +CATALOG_ENTRY_BROKEN=1651 +CATALOG_PREFER_VALUE=1652 +CATALOG_NOT_CATALOG=1653 +CATALOG_RECURSION=1654 +SCHEMAP_PREFIX_UNDEFINED=1700 +SCHEMAP_ATTRFORMDEFAULT_VALUE=1701 +SCHEMAP_ATTRGRP_NONAME_NOREF=1702 +SCHEMAP_ATTR_NONAME_NOREF=1703 +SCHEMAP_COMPLEXTYPE_NONAME_NOREF=1704 +SCHEMAP_ELEMFORMDEFAULT_VALUE=1705 +SCHEMAP_ELEM_NONAME_NOREF=1706 +SCHEMAP_EXTENSION_NO_BASE=1707 +SCHEMAP_FACET_NO_VALUE=1708 +SCHEMAP_FAILED_BUILD_IMPORT=1709 +SCHEMAP_GROUP_NONAME_NOREF=1710 +SCHEMAP_IMPORT_NAMESPACE_NOT_URI=1711 +SCHEMAP_IMPORT_REDEFINE_NSNAME=1712 +SCHEMAP_IMPORT_SCHEMA_NOT_URI=1713 +SCHEMAP_INVALID_BOOLEAN=1714 +SCHEMAP_INVALID_ENUM=1715 +SCHEMAP_INVALID_FACET=1716 +SCHEMAP_INVALID_FACET_VALUE=1717 +SCHEMAP_INVALID_MAXOCCURS=1718 +SCHEMAP_INVALID_MINOCCURS=1719 +SCHEMAP_INVALID_REF_AND_SUBTYPE=1720 +SCHEMAP_INVALID_WHITE_SPACE=1721 +SCHEMAP_NOATTR_NOREF=1722 +SCHEMAP_NOTATION_NO_NAME=1723 +SCHEMAP_NOTYPE_NOREF=1724 +SCHEMAP_REF_AND_SUBTYPE=1725 +SCHEMAP_RESTRICTION_NONAME_NOREF=1726 +SCHEMAP_SIMPLETYPE_NONAME=1727 +SCHEMAP_TYPE_AND_SUBTYPE=1728 +SCHEMAP_UNKNOWN_ALL_CHILD=1729 +SCHEMAP_UNKNOWN_ANYATTRIBUTE_CHILD=1730 +SCHEMAP_UNKNOWN_ATTR_CHILD=1731 +SCHEMAP_UNKNOWN_ATTRGRP_CHILD=1732 +SCHEMAP_UNKNOWN_ATTRIBUTE_GROUP=1733 +SCHEMAP_UNKNOWN_BASE_TYPE=1734 +SCHEMAP_UNKNOWN_CHOICE_CHILD=1735 +SCHEMAP_UNKNOWN_COMPLEXCONTENT_CHILD=1736 +SCHEMAP_UNKNOWN_COMPLEXTYPE_CHILD=1737 +SCHEMAP_UNKNOWN_ELEM_CHILD=1738 +SCHEMAP_UNKNOWN_EXTENSION_CHILD=1739 +SCHEMAP_UNKNOWN_FACET_CHILD=1740 +SCHEMAP_UNKNOWN_FACET_TYPE=1741 +SCHEMAP_UNKNOWN_GROUP_CHILD=1742 +SCHEMAP_UNKNOWN_IMPORT_CHILD=1743 +SCHEMAP_UNKNOWN_LIST_CHILD=1744 +SCHEMAP_UNKNOWN_NOTATION_CHILD=1745 +SCHEMAP_UNKNOWN_PROCESSCONTENT_CHILD=1746 +SCHEMAP_UNKNOWN_REF=1747 +SCHEMAP_UNKNOWN_RESTRICTION_CHILD=1748 +SCHEMAP_UNKNOWN_SCHEMAS_CHILD=1749 +SCHEMAP_UNKNOWN_SEQUENCE_CHILD=1750 +SCHEMAP_UNKNOWN_SIMPLECONTENT_CHILD=1751 +SCHEMAP_UNKNOWN_SIMPLETYPE_CHILD=1752 +SCHEMAP_UNKNOWN_TYPE=1753 +SCHEMAP_UNKNOWN_UNION_CHILD=1754 +SCHEMAP_ELEM_DEFAULT_FIXED=1755 +SCHEMAP_REGEXP_INVALID=1756 +SCHEMAP_FAILED_LOAD=1757 +SCHEMAP_NOTHING_TO_PARSE=1758 +SCHEMAP_NOROOT=1759 +SCHEMAP_REDEFINED_GROUP=1760 +SCHEMAP_REDEFINED_TYPE=1761 +SCHEMAP_REDEFINED_ELEMENT=1762 +SCHEMAP_REDEFINED_ATTRGROUP=1763 +SCHEMAP_REDEFINED_ATTR=1764 +SCHEMAP_REDEFINED_NOTATION=1765 +SCHEMAP_FAILED_PARSE=1766 +SCHEMAP_UNKNOWN_PREFIX=1767 +SCHEMAP_DEF_AND_PREFIX=1768 +SCHEMAP_UNKNOWN_INCLUDE_CHILD=1769 +SCHEMAP_INCLUDE_SCHEMA_NOT_URI=1770 +SCHEMAP_INCLUDE_SCHEMA_NO_URI=1771 +SCHEMAP_NOT_SCHEMA=1772 +SCHEMAP_UNKNOWN_MEMBER_TYPE=1773 +SCHEMAP_INVALID_ATTR_USE=1774 +SCHEMAP_RECURSIVE=1775 +SCHEMAP_SUPERNUMEROUS_LIST_ITEM_TYPE=1776 +SCHEMAP_INVALID_ATTR_COMBINATION=1777 +SCHEMAP_INVALID_ATTR_INLINE_COMBINATION=1778 +SCHEMAP_MISSING_SIMPLETYPE_CHILD=1779 +SCHEMAP_INVALID_ATTR_NAME=1780 +SCHEMAP_REF_AND_CONTENT=1781 +SCHEMAP_CT_PROPS_CORRECT_1=1782 +SCHEMAP_CT_PROPS_CORRECT_2=1783 +SCHEMAP_CT_PROPS_CORRECT_3=1784 +SCHEMAP_CT_PROPS_CORRECT_4=1785 +SCHEMAP_CT_PROPS_CORRECT_5=1786 +SCHEMAP_DERIVATION_OK_RESTRICTION_1=1787 +SCHEMAP_DERIVATION_OK_RESTRICTION_2_1_1=1788 +SCHEMAP_DERIVATION_OK_RESTRICTION_2_1_2=1789 +SCHEMAP_DERIVATION_OK_RESTRICTION_2_2=1790 +SCHEMAP_DERIVATION_OK_RESTRICTION_3=1791 +SCHEMAP_WILDCARD_INVALID_NS_MEMBER=1792 +SCHEMAP_INTERSECTION_NOT_EXPRESSIBLE=1793 +SCHEMAP_UNION_NOT_EXPRESSIBLE=1794 +SCHEMAP_SRC_IMPORT_3_1=1795 +SCHEMAP_SRC_IMPORT_3_2=1796 +SCHEMAP_DERIVATION_OK_RESTRICTION_4_1=1797 +SCHEMAP_DERIVATION_OK_RESTRICTION_4_2=1798 +SCHEMAP_DERIVATION_OK_RESTRICTION_4_3=1799 +SCHEMAP_COS_CT_EXTENDS_1_3=1800 +SCHEMAV_NOROOT=1801 +SCHEMAV_UNDECLAREDELEM=1802 +SCHEMAV_NOTTOPLEVEL=1803 +SCHEMAV_MISSING=1804 +SCHEMAV_WRONGELEM=1805 +SCHEMAV_NOTYPE=1806 +SCHEMAV_NOROLLBACK=1807 +SCHEMAV_ISABSTRACT=1808 +SCHEMAV_NOTEMPTY=1809 +SCHEMAV_ELEMCONT=1810 +SCHEMAV_HAVEDEFAULT=1811 +SCHEMAV_NOTNILLABLE=1812 +SCHEMAV_EXTRACONTENT=1813 +SCHEMAV_INVALIDATTR=1814 +SCHEMAV_INVALIDELEM=1815 +SCHEMAV_NOTDETERMINIST=1816 +SCHEMAV_CONSTRUCT=1817 +SCHEMAV_INTERNAL=1818 +SCHEMAV_NOTSIMPLE=1819 +SCHEMAV_ATTRUNKNOWN=1820 +SCHEMAV_ATTRINVALID=1821 +SCHEMAV_VALUE=1822 +SCHEMAV_FACET=1823 +SCHEMAV_CVC_DATATYPE_VALID_1_2_1=1824 +SCHEMAV_CVC_DATATYPE_VALID_1_2_2=1825 +SCHEMAV_CVC_DATATYPE_VALID_1_2_3=1826 +SCHEMAV_CVC_TYPE_3_1_1=1827 +SCHEMAV_CVC_TYPE_3_1_2=1828 +SCHEMAV_CVC_FACET_VALID=1829 +SCHEMAV_CVC_LENGTH_VALID=1830 +SCHEMAV_CVC_MINLENGTH_VALID=1831 +SCHEMAV_CVC_MAXLENGTH_VALID=1832 +SCHEMAV_CVC_MININCLUSIVE_VALID=1833 +SCHEMAV_CVC_MAXINCLUSIVE_VALID=1834 +SCHEMAV_CVC_MINEXCLUSIVE_VALID=1835 +SCHEMAV_CVC_MAXEXCLUSIVE_VALID=1836 +SCHEMAV_CVC_TOTALDIGITS_VALID=1837 +SCHEMAV_CVC_FRACTIONDIGITS_VALID=1838 +SCHEMAV_CVC_PATTERN_VALID=1839 +SCHEMAV_CVC_ENUMERATION_VALID=1840 +SCHEMAV_CVC_COMPLEX_TYPE_2_1=1841 +SCHEMAV_CVC_COMPLEX_TYPE_2_2=1842 +SCHEMAV_CVC_COMPLEX_TYPE_2_3=1843 +SCHEMAV_CVC_COMPLEX_TYPE_2_4=1844 +SCHEMAV_CVC_ELT_1=1845 +SCHEMAV_CVC_ELT_2=1846 +SCHEMAV_CVC_ELT_3_1=1847 +SCHEMAV_CVC_ELT_3_2_1=1848 +SCHEMAV_CVC_ELT_3_2_2=1849 +SCHEMAV_CVC_ELT_4_1=1850 +SCHEMAV_CVC_ELT_4_2=1851 +SCHEMAV_CVC_ELT_4_3=1852 +SCHEMAV_CVC_ELT_5_1_1=1853 +SCHEMAV_CVC_ELT_5_1_2=1854 +SCHEMAV_CVC_ELT_5_2_1=1855 +SCHEMAV_CVC_ELT_5_2_2_1=1856 +SCHEMAV_CVC_ELT_5_2_2_2_1=1857 +SCHEMAV_CVC_ELT_5_2_2_2_2=1858 +SCHEMAV_CVC_ELT_6=1859 +SCHEMAV_CVC_ELT_7=1860 +SCHEMAV_CVC_ATTRIBUTE_1=1861 +SCHEMAV_CVC_ATTRIBUTE_2=1862 +SCHEMAV_CVC_ATTRIBUTE_3=1863 +SCHEMAV_CVC_ATTRIBUTE_4=1864 +SCHEMAV_CVC_COMPLEX_TYPE_3_1=1865 +SCHEMAV_CVC_COMPLEX_TYPE_3_2_1=1866 +SCHEMAV_CVC_COMPLEX_TYPE_3_2_2=1867 +SCHEMAV_CVC_COMPLEX_TYPE_4=1868 +SCHEMAV_CVC_COMPLEX_TYPE_5_1=1869 +SCHEMAV_CVC_COMPLEX_TYPE_5_2=1870 +SCHEMAV_ELEMENT_CONTENT=1871 +SCHEMAV_DOCUMENT_ELEMENT_MISSING=1872 +SCHEMAV_CVC_COMPLEX_TYPE_1=1873 +SCHEMAV_CVC_AU=1874 +SCHEMAV_CVC_TYPE_1=1875 +SCHEMAV_CVC_TYPE_2=1876 +SCHEMAV_CVC_IDC=1877 +SCHEMAV_CVC_WILDCARD=1878 +SCHEMAV_MISC=1879 +XPTR_UNKNOWN_SCHEME=1900 +XPTR_CHILDSEQ_START=1901 +XPTR_EVAL_FAILED=1902 +XPTR_EXTRA_OBJECTS=1903 +C14N_CREATE_CTXT=1950 +C14N_REQUIRES_UTF8=1951 +C14N_CREATE_STACK=1952 +C14N_INVALID_NODE=1953 +C14N_UNKNOW_NODE=1954 +C14N_RELATIVE_NAMESPACE=1955 +FTP_PASV_ANSWER=2000 +FTP_EPSV_ANSWER=2001 +FTP_ACCNT=2002 +FTP_URL_SYNTAX=2003 +HTTP_URL_SYNTAX=2020 +HTTP_USE_IP=2021 +HTTP_UNKNOWN_HOST=2022 +SCHEMAP_SRC_SIMPLE_TYPE_1=3000 +SCHEMAP_SRC_SIMPLE_TYPE_2=3001 +SCHEMAP_SRC_SIMPLE_TYPE_3=3002 +SCHEMAP_SRC_SIMPLE_TYPE_4=3003 +SCHEMAP_SRC_RESOLVE=3004 +SCHEMAP_SRC_RESTRICTION_BASE_OR_SIMPLETYPE=3005 +SCHEMAP_SRC_LIST_ITEMTYPE_OR_SIMPLETYPE=3006 +SCHEMAP_SRC_UNION_MEMBERTYPES_OR_SIMPLETYPES=3007 +SCHEMAP_ST_PROPS_CORRECT_1=3008 +SCHEMAP_ST_PROPS_CORRECT_2=3009 +SCHEMAP_ST_PROPS_CORRECT_3=3010 +SCHEMAP_COS_ST_RESTRICTS_1_1=3011 +SCHEMAP_COS_ST_RESTRICTS_1_2=3012 +SCHEMAP_COS_ST_RESTRICTS_1_3_1=3013 +SCHEMAP_COS_ST_RESTRICTS_1_3_2=3014 +SCHEMAP_COS_ST_RESTRICTS_2_1=3015 +SCHEMAP_COS_ST_RESTRICTS_2_3_1_1=3016 +SCHEMAP_COS_ST_RESTRICTS_2_3_1_2=3017 +SCHEMAP_COS_ST_RESTRICTS_2_3_2_1=3018 +SCHEMAP_COS_ST_RESTRICTS_2_3_2_2=3019 +SCHEMAP_COS_ST_RESTRICTS_2_3_2_3=3020 +SCHEMAP_COS_ST_RESTRICTS_2_3_2_4=3021 +SCHEMAP_COS_ST_RESTRICTS_2_3_2_5=3022 +SCHEMAP_COS_ST_RESTRICTS_3_1=3023 +SCHEMAP_COS_ST_RESTRICTS_3_3_1=3024 +SCHEMAP_COS_ST_RESTRICTS_3_3_1_2=3025 +SCHEMAP_COS_ST_RESTRICTS_3_3_2_2=3026 +SCHEMAP_COS_ST_RESTRICTS_3_3_2_1=3027 +SCHEMAP_COS_ST_RESTRICTS_3_3_2_3=3028 +SCHEMAP_COS_ST_RESTRICTS_3_3_2_4=3029 +SCHEMAP_COS_ST_RESTRICTS_3_3_2_5=3030 +SCHEMAP_COS_ST_DERIVED_OK_2_1=3031 +SCHEMAP_COS_ST_DERIVED_OK_2_2=3032 +SCHEMAP_S4S_ELEM_NOT_ALLOWED=3033 +SCHEMAP_S4S_ELEM_MISSING=3034 +SCHEMAP_S4S_ATTR_NOT_ALLOWED=3035 +SCHEMAP_S4S_ATTR_MISSING=3036 +SCHEMAP_S4S_ATTR_INVALID_VALUE=3037 +SCHEMAP_SRC_ELEMENT_1=3038 +SCHEMAP_SRC_ELEMENT_2_1=3039 +SCHEMAP_SRC_ELEMENT_2_2=3040 +SCHEMAP_SRC_ELEMENT_3=3041 +SCHEMAP_P_PROPS_CORRECT_1=3042 +SCHEMAP_P_PROPS_CORRECT_2_1=3043 +SCHEMAP_P_PROPS_CORRECT_2_2=3044 +SCHEMAP_E_PROPS_CORRECT_2=3045 +SCHEMAP_E_PROPS_CORRECT_3=3046 +SCHEMAP_E_PROPS_CORRECT_4=3047 +SCHEMAP_E_PROPS_CORRECT_5=3048 +SCHEMAP_E_PROPS_CORRECT_6=3049 +SCHEMAP_SRC_INCLUDE=3050 +SCHEMAP_SRC_ATTRIBUTE_1=3051 +SCHEMAP_SRC_ATTRIBUTE_2=3052 +SCHEMAP_SRC_ATTRIBUTE_3_1=3053 +SCHEMAP_SRC_ATTRIBUTE_3_2=3054 +SCHEMAP_SRC_ATTRIBUTE_4=3055 +SCHEMAP_NO_XMLNS=3056 +SCHEMAP_NO_XSI=3057 +SCHEMAP_COS_VALID_DEFAULT_1=3058 +SCHEMAP_COS_VALID_DEFAULT_2_1=3059 +SCHEMAP_COS_VALID_DEFAULT_2_2_1=3060 +SCHEMAP_COS_VALID_DEFAULT_2_2_2=3061 +SCHEMAP_CVC_SIMPLE_TYPE=3062 +SCHEMAP_COS_CT_EXTENDS_1_1=3063 +SCHEMAP_SRC_IMPORT_1_1=3064 +SCHEMAP_SRC_IMPORT_1_2=3065 +SCHEMAP_SRC_IMPORT_2=3066 +SCHEMAP_SRC_IMPORT_2_1=3067 +SCHEMAP_SRC_IMPORT_2_2=3068 +SCHEMAP_INTERNAL=3069 +SCHEMAP_NOT_DETERMINISTIC=3070 +SCHEMAP_SRC_ATTRIBUTE_GROUP_1=3071 +SCHEMAP_SRC_ATTRIBUTE_GROUP_2=3072 +SCHEMAP_SRC_ATTRIBUTE_GROUP_3=3073 +SCHEMAP_MG_PROPS_CORRECT_1=3074 +SCHEMAP_MG_PROPS_CORRECT_2=3075 +SCHEMAP_SRC_CT_1=3076 +SCHEMAP_DERIVATION_OK_RESTRICTION_2_1_3=3077 +SCHEMAP_AU_PROPS_CORRECT_2=3078 +SCHEMAP_A_PROPS_CORRECT_2=3079 +SCHEMAP_C_PROPS_CORRECT=3080 +SCHEMAP_SRC_REDEFINE=3081 +SCHEMAP_SRC_IMPORT=3082 +SCHEMAP_WARN_SKIP_SCHEMA=3083 +SCHEMAP_WARN_UNLOCATED_SCHEMA=3084 +SCHEMAP_WARN_ATTR_REDECL_PROH=3085 +SCHEMAP_WARN_ATTR_POINTLESS_PROH=3086 +SCHEMAP_AG_PROPS_CORRECT=3087 +SCHEMAP_COS_CT_EXTENDS_1_2=3088 +SCHEMAP_AU_PROPS_CORRECT=3089 +SCHEMAP_A_PROPS_CORRECT_3=3090 +SCHEMAP_COS_ALL_LIMITED=3091 +SCHEMATRONV_ASSERT=4000 +SCHEMATRONV_REPORT=4001 +MODULE_OPEN=4900 +MODULE_CLOSE=4901 +CHECK_FOUND_ELEMENT=5000 +CHECK_FOUND_ATTRIBUTE=5001 +CHECK_FOUND_TEXT=5002 +CHECK_FOUND_CDATA=5003 +CHECK_FOUND_ENTITYREF=5004 +CHECK_FOUND_ENTITY=5005 +CHECK_FOUND_PI=5006 +CHECK_FOUND_COMMENT=5007 +CHECK_FOUND_DOCTYPE=5008 +CHECK_FOUND_FRAGMENT=5009 +CHECK_FOUND_NOTATION=5010 +CHECK_UNKNOWN_NODE=5011 +CHECK_ENTITY_TYPE=5012 +CHECK_NO_PARENT=5013 +CHECK_NO_DOC=5014 +CHECK_NO_NAME=5015 +CHECK_NO_ELEM=5016 +CHECK_WRONG_DOC=5017 +CHECK_NO_PREV=5018 +CHECK_WRONG_PREV=5019 +CHECK_NO_NEXT=5020 +CHECK_WRONG_NEXT=5021 +CHECK_NOT_DTD=5022 +CHECK_NOT_ATTR=5023 +CHECK_NOT_ATTR_DECL=5024 +CHECK_NOT_ELEM_DECL=5025 +CHECK_NOT_ENTITY_DECL=5026 +CHECK_NOT_NS_DECL=5027 +CHECK_NO_HREF=5028 +CHECK_WRONG_PARENT=5029 +CHECK_NS_SCOPE=5030 +CHECK_NS_ANCESTOR=5031 +CHECK_NOT_UTF8=5032 +CHECK_NO_DICT=5033 +CHECK_NOT_NCNAME=5034 +CHECK_OUTSIDE_DICT=5035 +CHECK_WRONG_NAME=5036 +CHECK_NAME_NOT_NULL=5037 +I18N_NO_NAME=6000 +I18N_NO_HANDLER=6001 +I18N_EXCESS_HANDLER=6002 +I18N_CONV_FAILED=6003 +I18N_NO_OUTPUT=6004 +BUF_OVERFLOW=7000 +""" + +cdef object __RELAXNG_ERROR_TYPES = """\ +RELAXNG_OK=0 +RELAXNG_ERR_MEMORY=1 +RELAXNG_ERR_TYPE=2 +RELAXNG_ERR_TYPEVAL=3 +RELAXNG_ERR_DUPID=4 +RELAXNG_ERR_TYPECMP=5 +RELAXNG_ERR_NOSTATE=6 +RELAXNG_ERR_NODEFINE=7 +RELAXNG_ERR_LISTEXTRA=8 +RELAXNG_ERR_LISTEMPTY=9 +RELAXNG_ERR_INTERNODATA=10 +RELAXNG_ERR_INTERSEQ=11 +RELAXNG_ERR_INTEREXTRA=12 +RELAXNG_ERR_ELEMNAME=13 +RELAXNG_ERR_ATTRNAME=14 +RELAXNG_ERR_ELEMNONS=15 +RELAXNG_ERR_ATTRNONS=16 +RELAXNG_ERR_ELEMWRONGNS=17 +RELAXNG_ERR_ATTRWRONGNS=18 +RELAXNG_ERR_ELEMEXTRANS=19 +RELAXNG_ERR_ATTREXTRANS=20 +RELAXNG_ERR_ELEMNOTEMPTY=21 +RELAXNG_ERR_NOELEM=22 +RELAXNG_ERR_NOTELEM=23 +RELAXNG_ERR_ATTRVALID=24 +RELAXNG_ERR_CONTENTVALID=25 +RELAXNG_ERR_EXTRACONTENT=26 +RELAXNG_ERR_INVALIDATTR=27 +RELAXNG_ERR_DATAELEM=28 +RELAXNG_ERR_VALELEM=29 +RELAXNG_ERR_LISTELEM=30 +RELAXNG_ERR_DATATYPE=31 +RELAXNG_ERR_VALUE=32 +RELAXNG_ERR_LIST=33 +RELAXNG_ERR_NOGRAMMAR=34 +RELAXNG_ERR_EXTRADATA=35 +RELAXNG_ERR_LACKDATA=36 +RELAXNG_ERR_INTERNAL=37 +RELAXNG_ERR_ELEMWRONG=38 +RELAXNG_ERR_TEXTWRONG=39 +""" +# --- END: GENERATED CONSTANTS --- + +__initErrorConstants() diff --git a/venv/lib/python3.10/site-packages/lxml/xmlid.pxi b/venv/lib/python3.10/site-packages/lxml/xmlid.pxi new file mode 100644 index 0000000000000000000000000000000000000000..1531f6d98fdde3319e453826e12e375a80fafcc2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/xmlid.pxi @@ -0,0 +1,179 @@ +cdef object _find_id_attributes + +def XMLID(text, parser=None, *, base_url=None): + """XMLID(text, parser=None, base_url=None) + + Parse the text and return a tuple (root node, ID dictionary). The root + node is the same as returned by the XML() function. The dictionary + contains string-element pairs. The dictionary keys are the values of 'id' + attributes. The elements referenced by the ID are stored as dictionary + values. + """ + cdef dict dic + global _find_id_attributes + if _find_id_attributes is None: + _find_id_attributes = XPath('//*[string(@id)]') + + # ElementTree compatible implementation: parse and look for 'id' attributes + root = XML(text, parser, base_url=base_url) + dic = {} + for elem in _find_id_attributes(root): + dic[elem.get('id')] = elem + return root, dic + +def XMLDTDID(text, parser=None, *, base_url=None): + """XMLDTDID(text, parser=None, base_url=None) + + Parse the text and return a tuple (root node, ID dictionary). The root + node is the same as returned by the XML() function. The dictionary + contains string-element pairs. The dictionary keys are the values of ID + attributes as defined by the DTD. The elements referenced by the ID are + stored as dictionary values. + + Note that you must not modify the XML tree if you use the ID dictionary. + The results are undefined. + """ + cdef _Element root + root = XML(text, parser, base_url=base_url) + # xml:id spec compatible implementation: use DTD ID attributes from libxml2 + if root._doc._c_doc.ids is NULL: + return root, {} + else: + return root, _IDDict(root) + +def parseid(source, parser=None, *, base_url=None): + """parseid(source, parser=None) + + Parses the source into a tuple containing an ElementTree object and an + ID dictionary. If no parser is provided as second argument, the default + parser is used. + + Note that you must not modify the XML tree if you use the ID dictionary. + The results are undefined. + """ + cdef _Document doc + doc = _parseDocument(source, parser, base_url) + return _elementTreeFactory(doc, None), _IDDict(doc) + +cdef class _IDDict: + """IDDict(self, etree) + A dictionary-like proxy class that mapps ID attributes to elements. + + The dictionary must be instantiated with the root element of a parsed XML + document, otherwise the behaviour is undefined. Elements and XML trees + that were created or modified 'by hand' are not supported. + """ + cdef _Document _doc + cdef object _keys + cdef object _items + def __cinit__(self, etree): + cdef _Document doc + doc = _documentOrRaise(etree) + if doc._c_doc.ids is NULL: + raise ValueError, "No ID dictionary available." + self._doc = doc + self._keys = None + self._items = None + + def copy(self): + return _IDDict(self._doc) + + def __getitem__(self, id_name): + cdef tree.xmlHashTable* c_ids + cdef tree.xmlID* c_id + cdef xmlAttr* c_attr + c_ids = self._doc._c_doc.ids + id_utf = _utf8(id_name) + c_id = tree.xmlHashLookup(c_ids, _xcstr(id_utf)) + if c_id is NULL: + raise KeyError, "key not found." + c_attr = c_id.attr + if c_attr is NULL or c_attr.parent is NULL: + raise KeyError, "ID attribute not found." + return _elementFactory(self._doc, c_attr.parent) + + def get(self, id_name): + return self[id_name] + + def __contains__(self, id_name): + cdef tree.xmlID* c_id + id_utf = _utf8(id_name) + c_id = tree.xmlHashLookup( + self._doc._c_doc.ids, _xcstr(id_utf)) + return c_id is not NULL + + def has_key(self, id_name): + return id_name in self + + def __repr__(self): + return repr(dict(self)) + + def keys(self): + if self._keys is None: + self._keys = self._build_keys() + return self._keys[:] + + def __iter__(self): + if self._keys is None: + self._keys = self._build_keys() + return iter(self._keys) + + def iterkeys(self): + return self + + def __len__(self): + if self._keys is None: + self._keys = self._build_keys() + return len(self._keys) + + def items(self): + if self._items is None: + self._items = self._build_items() + return self._items[:] + + def iteritems(self): + if self._items is None: + self._items = self._build_items() + return iter(self._items) + + def values(self): + cdef list values = [] + if self._items is None: + self._items = self._build_items() + for item in self._items: + value = python.PyTuple_GET_ITEM(item, 1) + python.Py_INCREF(value) + values.append(value) + return values + + def itervalues(self): + return iter(self.values()) + + cdef object _build_keys(self): + keys = [] + tree.xmlHashScan(self._doc._c_doc.ids, + _collectIdHashKeys, keys) + return keys + + cdef object _build_items(self): + items = [] + context = (items, self._doc) + tree.xmlHashScan(self._doc._c_doc.ids, + _collectIdHashItemList, context) + return items + +cdef void _collectIdHashItemList(void* payload, void* context, xmlChar* name) noexcept: + # collect elements from ID attribute hash table + cdef list lst + c_id = payload + if c_id is NULL or c_id.attr is NULL or c_id.attr.parent is NULL: + return + lst, doc = context + element = _elementFactory(doc, c_id.attr.parent) + lst.append( (funicode(name), element) ) + +cdef void _collectIdHashKeys(void* payload, void* collect_list, xmlChar* name) noexcept: + c_id = payload + if c_id is NULL or c_id.attr is NULL or c_id.attr.parent is NULL: + return + (collect_list).append(funicode(name)) diff --git a/venv/lib/python3.10/site-packages/lxml/xmlschema.pxi b/venv/lib/python3.10/site-packages/lxml/xmlschema.pxi new file mode 100644 index 0000000000000000000000000000000000000000..ac5f95876e06147f481ca3f52a3185c6478428d8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/xmlschema.pxi @@ -0,0 +1,215 @@ +# support for XMLSchema validation +from lxml.includes cimport xmlschema + + +cdef class XMLSchemaError(LxmlError): + """Base class of all XML Schema errors + """ + +cdef class XMLSchemaParseError(XMLSchemaError): + """Error while parsing an XML document as XML Schema. + """ + +cdef class XMLSchemaValidateError(XMLSchemaError): + """Error while validating an XML document with an XML Schema. + """ + + +################################################################################ +# XMLSchema + +cdef XPath _check_for_default_attributes = XPath( + "boolean(//xs:attribute[@default or @fixed][1])", + namespaces={'xs': 'http://www.w3.org/2001/XMLSchema'}) + + +cdef class XMLSchema(_Validator): + """XMLSchema(self, etree=None, file=None) + Turn a document into an XML Schema validator. + + Either pass a schema as Element or ElementTree, or pass a file or + filename through the ``file`` keyword argument. + + Passing the ``attribute_defaults`` boolean option will make the + schema insert default/fixed attributes into validated documents. + """ + cdef xmlschema.xmlSchema* _c_schema + cdef _Document _doc + cdef bint _has_default_attributes + cdef bint _add_attribute_defaults + + def __cinit__(self): + self._has_default_attributes = True # play it safe + self._add_attribute_defaults = False + + def __init__(self, etree=None, *, file=None, bint attribute_defaults=False): + cdef xmlschema.xmlSchemaParserCtxt* parser_ctxt + cdef xmlDoc* c_doc + + self._add_attribute_defaults = attribute_defaults + _Validator.__init__(self) + c_doc = NULL + if etree is not None: + doc = _documentOrRaise(etree) + root_node = _rootNodeOrRaise(etree) + c_doc = _copyDocRoot(doc._c_doc, root_node._c_node) + self._doc = _documentFactory(c_doc, doc._parser) + parser_ctxt = xmlschema.xmlSchemaNewDocParserCtxt(c_doc) + elif file is not None: + file = _getFSPathOrObject(file) + if _isString(file): + filename = _encodeFilename(file) + parser_ctxt = xmlschema.xmlSchemaNewParserCtxt(_cstr(filename)) + else: + self._doc = _parseDocument(file, None, None) + parser_ctxt = xmlschema.xmlSchemaNewDocParserCtxt(self._doc._c_doc) + else: + raise XMLSchemaParseError, "No tree or file given" + + if parser_ctxt is NULL: + raise MemoryError() + + # Need a cast here because older libxml2 releases do not use 'const' in the functype. + xmlschema.xmlSchemaSetParserStructuredErrors( + parser_ctxt, _receiveError, self._error_log) + if self._doc is not None: + # calling xmlSchemaParse on a schema with imports or + # includes will cause libxml2 to create an internal + # context for parsing, so push an implied context to route + # resolve requests to the document's parser + __GLOBAL_PARSER_CONTEXT.pushImpliedContextFromParser(self._doc._parser) + with nogil: + orig_loader = _register_document_loader() + self._c_schema = xmlschema.xmlSchemaParse(parser_ctxt) + _reset_document_loader(orig_loader) + if self._doc is not None: + __GLOBAL_PARSER_CONTEXT.popImpliedContext() + xmlschema.xmlSchemaFreeParserCtxt(parser_ctxt) + + if self._c_schema is NULL: + raise XMLSchemaParseError( + self._error_log._buildExceptionMessage( + "Document is not valid XML Schema"), + self._error_log) + + if self._doc is not None: + self._has_default_attributes = _check_for_default_attributes(self._doc) + self._add_attribute_defaults = attribute_defaults and self._has_default_attributes + + def __dealloc__(self): + xmlschema.xmlSchemaFree(self._c_schema) + + def __call__(self, etree): + """__call__(self, etree) + + Validate doc using XML Schema. + + Returns true if document is valid, false if not. + """ + cdef xmlschema.xmlSchemaValidCtxt* valid_ctxt + cdef _Document doc + cdef _Element root_node + cdef xmlDoc* c_doc + cdef int ret + + assert self._c_schema is not NULL, "Schema instance not initialised" + doc = _documentOrRaise(etree) + root_node = _rootNodeOrRaise(etree) + + valid_ctxt = xmlschema.xmlSchemaNewValidCtxt(self._c_schema) + if valid_ctxt is NULL: + raise MemoryError() + + try: + if self._add_attribute_defaults: + xmlschema.xmlSchemaSetValidOptions( + valid_ctxt, xmlschema.XML_SCHEMA_VAL_VC_I_CREATE) + + self._error_log.clear() + # Need a cast here because older libxml2 releases do not use 'const' in the functype. + xmlschema.xmlSchemaSetValidStructuredErrors( + valid_ctxt, _receiveError, self._error_log) + + c_doc = _fakeRootDoc(doc._c_doc, root_node._c_node) + with nogil: + ret = xmlschema.xmlSchemaValidateDoc(valid_ctxt, c_doc) + _destroyFakeDoc(doc._c_doc, c_doc) + finally: + xmlschema.xmlSchemaFreeValidCtxt(valid_ctxt) + + if ret == -1: + raise XMLSchemaValidateError( + "Internal error in XML Schema validation.", + self._error_log) + if ret == 0: + return True + else: + return False + + cdef _ParserSchemaValidationContext _newSaxValidator( + self, bint add_default_attributes): + cdef _ParserSchemaValidationContext context + context = _ParserSchemaValidationContext.__new__(_ParserSchemaValidationContext) + context._schema = self + context._add_default_attributes = (self._has_default_attributes and ( + add_default_attributes or self._add_attribute_defaults)) + return context + +@cython.final +@cython.internal +cdef class _ParserSchemaValidationContext: + cdef XMLSchema _schema + cdef xmlschema.xmlSchemaValidCtxt* _valid_ctxt + cdef xmlschema.xmlSchemaSAXPlugStruct* _sax_plug + cdef bint _add_default_attributes + def __cinit__(self): + self._valid_ctxt = NULL + self._sax_plug = NULL + self._add_default_attributes = False + + def __dealloc__(self): + self.disconnect() + if self._valid_ctxt: + xmlschema.xmlSchemaFreeValidCtxt(self._valid_ctxt) + + cdef _ParserSchemaValidationContext copy(self): + assert self._schema is not None, "_ParserSchemaValidationContext not initialised" + return self._schema._newSaxValidator( + self._add_default_attributes) + + cdef void inject_default_attributes(self, xmlDoc* c_doc) noexcept: + # we currently need to insert default attributes manually + # after parsing, as libxml2 does not support this at parse + # time + if self._add_default_attributes: + with nogil: + xmlschema.xmlSchemaValidateDoc(self._valid_ctxt, c_doc) + + cdef int connect(self, xmlparser.xmlParserCtxt* c_ctxt, _BaseErrorLog error_log) except -1: + if self._valid_ctxt is NULL: + self._valid_ctxt = xmlschema.xmlSchemaNewValidCtxt( + self._schema._c_schema) + if self._valid_ctxt is NULL: + raise MemoryError() + if self._add_default_attributes: + xmlschema.xmlSchemaSetValidOptions( + self._valid_ctxt, xmlschema.XML_SCHEMA_VAL_VC_I_CREATE) + if error_log is not None: + # Need a cast here because older libxml2 releases do not use 'const' in the functype. + xmlschema.xmlSchemaSetValidStructuredErrors( + self._valid_ctxt, _receiveError, error_log) + self._sax_plug = xmlschema.xmlSchemaSAXPlug( + self._valid_ctxt, &c_ctxt.sax, &c_ctxt.userData) + + cdef void disconnect(self) noexcept: + if self._sax_plug is not NULL: + xmlschema.xmlSchemaSAXUnplug(self._sax_plug) + self._sax_plug = NULL + if self._valid_ctxt is not NULL: + xmlschema.xmlSchemaSetValidStructuredErrors( + self._valid_ctxt, NULL, NULL) + + cdef bint isvalid(self) noexcept: + if self._valid_ctxt is NULL: + return 1 # valid + return xmlschema.xmlSchemaIsValid(self._valid_ctxt) diff --git a/venv/lib/python3.10/site-packages/lxml/xpath.pxi b/venv/lib/python3.10/site-packages/lxml/xpath.pxi new file mode 100644 index 0000000000000000000000000000000000000000..352f63134734780e5a9c869ccb59b4cb4e4ade40 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/xpath.pxi @@ -0,0 +1,487 @@ +# XPath evaluation + +class XPathSyntaxError(LxmlSyntaxError, XPathError): + pass + +################################################################################ +# XPath + +cdef object _XPATH_SYNTAX_ERRORS = ( + xmlerror.XML_XPATH_NUMBER_ERROR, + xmlerror.XML_XPATH_UNFINISHED_LITERAL_ERROR, + xmlerror.XML_XPATH_VARIABLE_REF_ERROR, + xmlerror.XML_XPATH_INVALID_PREDICATE_ERROR, + xmlerror.XML_XPATH_UNCLOSED_ERROR, + xmlerror.XML_XPATH_INVALID_CHAR_ERROR +) + +cdef object _XPATH_EVAL_ERRORS = ( + xmlerror.XML_XPATH_UNDEF_VARIABLE_ERROR, + xmlerror.XML_XPATH_UNDEF_PREFIX_ERROR, + xmlerror.XML_XPATH_UNKNOWN_FUNC_ERROR, + xmlerror.XML_XPATH_INVALID_OPERAND, + xmlerror.XML_XPATH_INVALID_TYPE, + xmlerror.XML_XPATH_INVALID_ARITY, + xmlerror.XML_XPATH_INVALID_CTXT_SIZE, + xmlerror.XML_XPATH_INVALID_CTXT_POSITION +) + +cdef int _register_xpath_function(void* ctxt, name_utf, ns_utf) noexcept: + if ns_utf is None: + return xpath.xmlXPathRegisterFunc( + ctxt, _xcstr(name_utf), + _xpath_function_call) + else: + return xpath.xmlXPathRegisterFuncNS( + ctxt, _xcstr(name_utf), _xcstr(ns_utf), + _xpath_function_call) + +cdef int _unregister_xpath_function(void* ctxt, name_utf, ns_utf) noexcept: + if ns_utf is None: + return xpath.xmlXPathRegisterFunc( + ctxt, _xcstr(name_utf), NULL) + else: + return xpath.xmlXPathRegisterFuncNS( + ctxt, _xcstr(name_utf), _xcstr(ns_utf), NULL) + + +@cython.final +@cython.internal +cdef class _XPathContext(_BaseContext): + cdef object _variables + def __init__(self, namespaces, extensions, error_log, enable_regexp, variables, + build_smart_strings): + self._variables = variables + _BaseContext.__init__(self, namespaces, extensions, error_log, enable_regexp, + build_smart_strings) + + cdef set_context(self, xpath.xmlXPathContext* xpathCtxt): + self._set_xpath_context(xpathCtxt) + # This would be a good place to set up the XPath parser dict, but + # we cannot use the current thread dict as we do not know which + # thread will execute the XPath evaluator - so, no dict for now. + self.registerLocalNamespaces() + self.registerLocalFunctions(xpathCtxt, _register_xpath_function) + + cdef register_context(self, _Document doc): + self._register_context(doc) + self.registerGlobalNamespaces() + self.registerGlobalFunctions(self._xpathCtxt, _register_xpath_function) + self.registerExsltFunctions() + if self._variables is not None: + self.registerVariables(self._variables) + + cdef unregister_context(self): + self.unregisterGlobalFunctions( + self._xpathCtxt, _unregister_xpath_function) + self.unregisterGlobalNamespaces() + xpath.xmlXPathRegisteredVariablesCleanup(self._xpathCtxt) + self._cleanup_context() + + cdef void registerExsltFunctions(self) noexcept: + if xslt.LIBXSLT_VERSION < 10125: + # we'd only execute dummy functions anyway + return + tree.xmlHashScan( + self._xpathCtxt.nsHash, _registerExsltFunctionsForNamespaces, + self._xpathCtxt) + + cdef registerVariables(self, variable_dict): + for name, value in variable_dict.items(): + name_utf = self._to_utf(name) + xpath.xmlXPathRegisterVariable( + self._xpathCtxt, _xcstr(name_utf), _wrapXPathObject(value, None, None)) + + cdef registerVariable(self, name, value): + name_utf = self._to_utf(name) + xpath.xmlXPathRegisterVariable( + self._xpathCtxt, _xcstr(name_utf), _wrapXPathObject(value, None, None)) + + +cdef void _registerExsltFunctionsForNamespaces( + void* _c_href, void* _ctxt, const_xmlChar* c_prefix) noexcept: + c_href = _c_href + ctxt = _ctxt + + if tree.xmlStrcmp(c_href, xslt.EXSLT_DATE_NAMESPACE) == 0: + xslt.exsltDateXpathCtxtRegister(ctxt, c_prefix) + elif tree.xmlStrcmp(c_href, xslt.EXSLT_SETS_NAMESPACE) == 0: + xslt.exsltSetsXpathCtxtRegister(ctxt, c_prefix) + elif tree.xmlStrcmp(c_href, xslt.EXSLT_MATH_NAMESPACE) == 0: + xslt.exsltMathXpathCtxtRegister(ctxt, c_prefix) + elif tree.xmlStrcmp(c_href, xslt.EXSLT_STRINGS_NAMESPACE) == 0: + xslt.exsltStrXpathCtxtRegister(ctxt, c_prefix) + + +cdef class _XPathEvaluatorBase: + cdef xpath.xmlXPathContext* _xpathCtxt + cdef _XPathContext _context + cdef python.PyThread_type_lock _eval_lock + cdef _ErrorLog _error_log + def __cinit__(self): + self._xpathCtxt = NULL + if config.ENABLE_THREADING: + self._eval_lock = python.PyThread_allocate_lock() + if self._eval_lock is NULL: + raise MemoryError() + self._error_log = _ErrorLog() + + def __init__(self, namespaces, extensions, enable_regexp, + smart_strings): + self._context = _XPathContext(namespaces, extensions, self._error_log, + enable_regexp, None, smart_strings) + + @property + def error_log(self): + assert self._error_log is not None, "XPath evaluator not initialised" + return self._error_log.copy() + + def __dealloc__(self): + if self._xpathCtxt is not NULL: + xpath.xmlXPathFreeContext(self._xpathCtxt) + if config.ENABLE_THREADING: + if self._eval_lock is not NULL: + python.PyThread_free_lock(self._eval_lock) + + cdef set_context(self, xpath.xmlXPathContext* xpathCtxt): + self._xpathCtxt = xpathCtxt + self._context.set_context(xpathCtxt) + + cdef bint _checkAbsolutePath(self, char* path) noexcept: + cdef char c + if path is NULL: + return 0 + c = path[0] + while c == c' ' or c == c'\t': + path = path + 1 + c = path[0] + return c == c'/' + + @cython.final + cdef int _lock(self) except -1: + cdef int result + if config.ENABLE_THREADING and self._eval_lock != NULL: + with nogil: + result = python.PyThread_acquire_lock( + self._eval_lock, python.WAIT_LOCK) + if result == 0: + raise XPathError, "XPath evaluator locking failed" + return 0 + + @cython.final + cdef void _unlock(self) noexcept: + if config.ENABLE_THREADING and self._eval_lock != NULL: + python.PyThread_release_lock(self._eval_lock) + + cdef _build_parse_error(self): + cdef _BaseErrorLog entries + entries = self._error_log.filter_types(_XPATH_SYNTAX_ERRORS) + if entries: + message = entries._buildExceptionMessage(None) + if message is not None: + return XPathSyntaxError(message, self._error_log) + return XPathSyntaxError( + self._error_log._buildExceptionMessage("Error in xpath expression"), + self._error_log) + + cdef _build_eval_error(self): + cdef _BaseErrorLog entries + entries = self._error_log.filter_types(_XPATH_EVAL_ERRORS) + if not entries: + entries = self._error_log.filter_types(_XPATH_SYNTAX_ERRORS) + if entries: + message = entries._buildExceptionMessage(None) + if message is not None: + return XPathEvalError(message, self._error_log) + return XPathEvalError( + self._error_log._buildExceptionMessage("Error in xpath expression"), + self._error_log) + + cdef object _handle_result(self, xpath.xmlXPathObject* xpathObj, _Document doc): + if self._context._exc._has_raised(): + if xpathObj is not NULL: + _freeXPathObject(xpathObj) + xpathObj = NULL + self._context._release_temp_refs() + self._context._exc._raise_if_stored() + + if xpathObj is NULL: + self._context._release_temp_refs() + raise self._build_eval_error() + + try: + result = _unwrapXPathObject(xpathObj, doc, self._context) + finally: + _freeXPathObject(xpathObj) + self._context._release_temp_refs() + + return result + + +cdef class XPathElementEvaluator(_XPathEvaluatorBase): + """XPathElementEvaluator(self, element, namespaces=None, extensions=None, regexp=True, smart_strings=True) + Create an XPath evaluator for an element. + + Absolute XPath expressions (starting with '/') will be evaluated against + the ElementTree as returned by getroottree(). + + Additional namespace declarations can be passed with the + 'namespace' keyword argument. EXSLT regular expression support + can be disabled with the 'regexp' boolean keyword (defaults to + True). Smart strings will be returned for string results unless + you pass ``smart_strings=False``. + """ + cdef _Element _element + def __init__(self, _Element element not None, *, namespaces=None, + extensions=None, regexp=True, smart_strings=True): + cdef xpath.xmlXPathContext* xpathCtxt + cdef int ns_register_status + cdef _Document doc + _assertValidNode(element) + _assertValidDoc(element._doc) + self._element = element + doc = element._doc + _XPathEvaluatorBase.__init__(self, namespaces, extensions, + regexp, smart_strings) + xpathCtxt = xpath.xmlXPathNewContext(doc._c_doc) + if xpathCtxt is NULL: + raise MemoryError() + self.set_context(xpathCtxt) + + def register_namespace(self, prefix, uri): + """Register a namespace with the XPath context. + """ + assert self._xpathCtxt is not NULL, "XPath context not initialised" + self._context.addNamespace(prefix, uri) + + def register_namespaces(self, namespaces): + """Register a prefix -> uri dict. + """ + assert self._xpathCtxt is not NULL, "XPath context not initialised" + for prefix, uri in namespaces.items(): + self._context.addNamespace(prefix, uri) + + def __call__(self, _path, **_variables): + """__call__(self, _path, **_variables) + + Evaluate an XPath expression on the document. + + Variables may be provided as keyword arguments. Note that namespaces + are currently not supported for variables. + + Absolute XPath expressions (starting with '/') will be evaluated + against the ElementTree as returned by getroottree(). + """ + cdef xpath.xmlXPathObject* xpathObj + cdef _Document doc + assert self._xpathCtxt is not NULL, "XPath context not initialised" + path = _utf8(_path) + doc = self._element._doc + + self._lock() + self._xpathCtxt.node = self._element._c_node + try: + self._context.register_context(doc) + self._context.registerVariables(_variables) + c_path = _xcstr(path) + with nogil: + xpathObj = xpath.xmlXPathEvalExpression( + c_path, self._xpathCtxt) + result = self._handle_result(xpathObj, doc) + finally: + self._context.unregister_context() + self._unlock() + + return result + + +cdef class XPathDocumentEvaluator(XPathElementEvaluator): + """XPathDocumentEvaluator(self, etree, namespaces=None, extensions=None, regexp=True, smart_strings=True) + Create an XPath evaluator for an ElementTree. + + Additional namespace declarations can be passed with the + 'namespace' keyword argument. EXSLT regular expression support + can be disabled with the 'regexp' boolean keyword (defaults to + True). Smart strings will be returned for string results unless + you pass ``smart_strings=False``. + """ + def __init__(self, _ElementTree etree not None, *, namespaces=None, + extensions=None, regexp=True, smart_strings=True): + XPathElementEvaluator.__init__( + self, etree._context_node, namespaces=namespaces, + extensions=extensions, regexp=regexp, + smart_strings=smart_strings) + + def __call__(self, _path, **_variables): + """__call__(self, _path, **_variables) + + Evaluate an XPath expression on the document. + + Variables may be provided as keyword arguments. Note that namespaces + are currently not supported for variables. + """ + cdef xpath.xmlXPathObject* xpathObj + cdef xmlDoc* c_doc + cdef _Document doc + assert self._xpathCtxt is not NULL, "XPath context not initialised" + path = _utf8(_path) + doc = self._element._doc + + self._lock() + try: + self._context.register_context(doc) + c_doc = _fakeRootDoc(doc._c_doc, self._element._c_node) + try: + self._context.registerVariables(_variables) + c_path = _xcstr(path) + with nogil: + self._xpathCtxt.doc = c_doc + self._xpathCtxt.node = tree.xmlDocGetRootElement(c_doc) + xpathObj = xpath.xmlXPathEvalExpression( + c_path, self._xpathCtxt) + result = self._handle_result(xpathObj, doc) + finally: + _destroyFakeDoc(doc._c_doc, c_doc) + self._context.unregister_context() + finally: + self._unlock() + + return result + + +def XPathEvaluator(etree_or_element, *, namespaces=None, extensions=None, + regexp=True, smart_strings=True): + """XPathEvaluator(etree_or_element, namespaces=None, extensions=None, regexp=True, smart_strings=True) + + Creates an XPath evaluator for an ElementTree or an Element. + + The resulting object can be called with an XPath expression as argument + and XPath variables provided as keyword arguments. + + Additional namespace declarations can be passed with the + 'namespace' keyword argument. EXSLT regular expression support + can be disabled with the 'regexp' boolean keyword (defaults to + True). Smart strings will be returned for string results unless + you pass ``smart_strings=False``. + """ + if isinstance(etree_or_element, _ElementTree): + return XPathDocumentEvaluator( + etree_or_element, namespaces=namespaces, + extensions=extensions, regexp=regexp, smart_strings=smart_strings) + else: + return XPathElementEvaluator( + etree_or_element, namespaces=namespaces, + extensions=extensions, regexp=regexp, smart_strings=smart_strings) + + +cdef class XPath(_XPathEvaluatorBase): + """XPath(self, path, namespaces=None, extensions=None, regexp=True, smart_strings=True) + A compiled XPath expression that can be called on Elements and ElementTrees. + + Besides the XPath expression, you can pass prefix-namespace + mappings and extension functions to the constructor through the + keyword arguments ``namespaces`` and ``extensions``. EXSLT + regular expression support can be disabled with the 'regexp' + boolean keyword (defaults to True). Smart strings will be + returned for string results unless you pass + ``smart_strings=False``. + """ + cdef xpath.xmlXPathCompExpr* _xpath + cdef bytes _path + def __cinit__(self): + self._xpath = NULL + + def __init__(self, path, *, namespaces=None, extensions=None, + regexp=True, smart_strings=True): + cdef xpath.xmlXPathContext* xpathCtxt + _XPathEvaluatorBase.__init__(self, namespaces, extensions, + regexp, smart_strings) + self._path = _utf8(path) + xpathCtxt = xpath.xmlXPathNewContext(NULL) + if xpathCtxt is NULL: + raise MemoryError() + self.set_context(xpathCtxt) + self._xpath = xpath.xmlXPathCtxtCompile(xpathCtxt, _xcstr(self._path)) + if self._xpath is NULL: + raise self._build_parse_error() + + def __call__(self, _etree_or_element, **_variables): + "__call__(self, _etree_or_element, **_variables)" + cdef xpath.xmlXPathObject* xpathObj + cdef _Document document + cdef _Element element + + assert self._xpathCtxt is not NULL, "XPath context not initialised" + document = _documentOrRaise(_etree_or_element) + element = _rootNodeOrRaise(_etree_or_element) + + self._lock() + self._xpathCtxt.doc = document._c_doc + self._xpathCtxt.node = element._c_node + + try: + self._context.register_context(document) + self._context.registerVariables(_variables) + with nogil: + xpathObj = xpath.xmlXPathCompiledEval( + self._xpath, self._xpathCtxt) + result = self._handle_result(xpathObj, document) + finally: + self._context.unregister_context() + self._unlock() + return result + + @property + def path(self): + """The literal XPath expression. + """ + return self._path.decode('UTF-8') + + def __dealloc__(self): + if self._xpath is not NULL: + xpath.xmlXPathFreeCompExpr(self._xpath) + + def __repr__(self): + return self.path + + +cdef object _replace_strings = re.compile(b'("[^"]*")|(\'[^\']*\')').sub +cdef object _find_namespaces = re.compile(b'({[^}]+})').findall + +cdef class ETXPath(XPath): + """ETXPath(self, path, extensions=None, regexp=True, smart_strings=True) + Special XPath class that supports the ElementTree {uri} notation for namespaces. + + Note that this class does not accept the ``namespace`` keyword + argument. All namespaces must be passed as part of the path + string. Smart strings will be returned for string results unless + you pass ``smart_strings=False``. + """ + def __init__(self, path, *, extensions=None, regexp=True, + smart_strings=True): + path, namespaces = self._nsextract_path(path) + XPath.__init__(self, path, namespaces=namespaces, + extensions=extensions, regexp=regexp, + smart_strings=smart_strings) + + cdef _nsextract_path(self, path): + # replace {namespaces} by new prefixes + cdef dict namespaces = {} + cdef list namespace_defs = [] + cdef int i + path_utf = _utf8(path) + stripped_path = _replace_strings(b'', path_utf) # remove string literals + i = 1 + for namespace_def in _find_namespaces(stripped_path): + if namespace_def not in namespace_defs: + prefix = python.PyBytes_FromFormat("__xpp%02d", i) + i += 1 + namespace_defs.append(namespace_def) + namespace = namespace_def[1:-1] # remove '{}' + namespace = (namespace).decode('utf8') + namespaces[prefix.decode('utf8')] = namespace + prefix_str = prefix + b':' + # FIXME: this also replaces {namespaces} within strings! + path_utf = path_utf.replace(namespace_def, prefix_str) + path = path_utf.decode('utf8') + return path, namespaces diff --git a/venv/lib/python3.10/site-packages/lxml/xslt.pxi b/venv/lib/python3.10/site-packages/lxml/xslt.pxi new file mode 100644 index 0000000000000000000000000000000000000000..f7a7be294edf2ae69f4e37895e1f7a143b5b79cb --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/xslt.pxi @@ -0,0 +1,950 @@ +# XSLT +from lxml.includes cimport xslt + + +cdef class XSLTError(LxmlError): + """Base class of all XSLT errors. + """ + +cdef class XSLTParseError(XSLTError): + """Error parsing a stylesheet document. + """ + +cdef class XSLTApplyError(XSLTError): + """Error running an XSL transformation. + """ + +class XSLTSaveError(XSLTError, SerialisationError): + """Error serialising an XSLT result. + """ + +cdef class XSLTExtensionError(XSLTError): + """Error registering an XSLT extension. + """ + + +# version information +LIBXSLT_COMPILED_VERSION = __unpackIntVersion(xslt.LIBXSLT_VERSION) +LIBXSLT_VERSION = __unpackIntVersion(xslt.xsltLibxsltVersion) + + +################################################################################ +# Where do we store what? +# +# xsltStylesheet->doc->_private +# == _XSLTResolverContext for XSL stylesheet +# +# xsltTransformContext->_private +# == _XSLTResolverContext for transformed document +# +################################################################################ + + +################################################################################ +# XSLT document loaders + +@cython.final +@cython.internal +cdef class _XSLTResolverContext(_ResolverContext): + cdef xmlDoc* _c_style_doc + cdef _BaseParser _parser + + cdef _XSLTResolverContext _copy(self): + cdef _XSLTResolverContext context + context = _XSLTResolverContext() + _initXSLTResolverContext(context, self._parser) + context._c_style_doc = self._c_style_doc + return context + +cdef _initXSLTResolverContext(_XSLTResolverContext context, + _BaseParser parser): + _initResolverContext(context, parser.resolvers) + context._parser = parser + context._c_style_doc = NULL + +cdef xmlDoc* _xslt_resolve_from_python(const_xmlChar* c_uri, void* c_context, + int parse_options, int* error) with gil: + # call the Python document loaders + cdef _XSLTResolverContext context + cdef _ResolverRegistry resolvers + cdef _InputDocument doc_ref + cdef xmlDoc* c_doc + cdef xmlDoc* c_return_doc = NULL + + error[0] = 0 + context = <_XSLTResolverContext>c_context + + # shortcut if we resolve the stylesheet itself + c_doc = context._c_style_doc + try: + if c_doc is not NULL and c_doc.URL is not NULL: + if tree.xmlStrcmp(c_uri, c_doc.URL) == 0: + c_return_doc = _copyDoc(c_doc, 1) + return c_return_doc # 'goto', see 'finally' below + + # delegate to the Python resolvers + resolvers = context._resolvers + if tree.xmlStrncmp('string://__STRING__XSLT__/', c_uri, 26) == 0: + c_uri += 26 + uri = _decodeFilename(c_uri) + doc_ref = resolvers.resolve(uri, None, context) + + if doc_ref is not None: + if doc_ref._type == PARSER_DATA_STRING: + c_return_doc = _parseDoc( + doc_ref._data_bytes, doc_ref._filename, context._parser) + elif doc_ref._type == PARSER_DATA_FILENAME: + c_return_doc = _parseDocFromFile( + doc_ref._filename, context._parser) + elif doc_ref._type == PARSER_DATA_FILE: + c_return_doc = _parseDocFromFilelike( + doc_ref._file, doc_ref._filename, context._parser) + elif doc_ref._type == PARSER_DATA_EMPTY: + c_return_doc = _newXMLDoc() + if c_return_doc is not NULL and c_return_doc.URL is NULL: + c_return_doc.URL = tree.xmlStrdup(c_uri) + except: + error[0] = 1 + context._store_raised() + finally: + return c_return_doc # and swallow any further exceptions + + +cdef void _xslt_store_resolver_exception(const_xmlChar* c_uri, void* context, + xslt.xsltLoadType c_type) noexcept with gil: + try: + message = f"Cannot resolve URI {_decodeFilename(c_uri)}" + if c_type == xslt.XSLT_LOAD_DOCUMENT: + exception = XSLTApplyError(message) + else: + exception = XSLTParseError(message) + (<_XSLTResolverContext>context)._store_exception(exception) + except BaseException as e: + (<_XSLTResolverContext>context)._store_exception(e) + finally: + return # and swallow any further exceptions + + +cdef xmlDoc* _xslt_doc_loader(const_xmlChar* c_uri, tree.xmlDict* c_dict, + int parse_options, void* c_ctxt, + xslt.xsltLoadType c_type) noexcept nogil: + # nogil => no Python objects here, may be called without thread context ! + cdef xmlDoc* c_doc + cdef xmlDoc* result + cdef void* c_pcontext + cdef int error = 0 + # find resolver contexts of stylesheet and transformed doc + if c_type == xslt.XSLT_LOAD_DOCUMENT: + # transformation time + c_pcontext = (c_ctxt)._private + elif c_type == xslt.XSLT_LOAD_STYLESHEET: + # include/import resolution while parsing + c_pcontext = (c_ctxt).doc._private + else: + c_pcontext = NULL + + if c_pcontext is NULL: + # can't call Python without context, fall back to default loader + return XSLT_DOC_DEFAULT_LOADER( + c_uri, c_dict, parse_options, c_ctxt, c_type) + + c_doc = _xslt_resolve_from_python(c_uri, c_pcontext, parse_options, &error) + if c_doc is NULL and not error: + c_doc = XSLT_DOC_DEFAULT_LOADER( + c_uri, c_dict, parse_options, c_ctxt, c_type) + if c_doc is NULL: + _xslt_store_resolver_exception(c_uri, c_pcontext, c_type) + + if c_doc is not NULL and c_type == xslt.XSLT_LOAD_STYLESHEET: + c_doc._private = c_pcontext + return c_doc + +cdef xslt.xsltDocLoaderFunc XSLT_DOC_DEFAULT_LOADER = xslt.xsltDocDefaultLoader +xslt.xsltSetLoaderFunc(_xslt_doc_loader) + +################################################################################ +# XSLT file/network access control + +cdef class XSLTAccessControl: + """XSLTAccessControl(self, read_file=True, write_file=True, create_dir=True, read_network=True, write_network=True) + + Access control for XSLT: reading/writing files, directories and + network I/O. Access to a type of resource is granted or denied by + passing any of the following boolean keyword arguments. All of + them default to True to allow access. + + - read_file + - write_file + - create_dir + - read_network + - write_network + + For convenience, there is also a class member `DENY_ALL` that + provides an XSLTAccessControl instance that is readily configured + to deny everything, and a `DENY_WRITE` member that denies all + write access but allows read access. + + See `XSLT`. + """ + cdef xslt.xsltSecurityPrefs* _prefs + def __cinit__(self): + self._prefs = xslt.xsltNewSecurityPrefs() + if self._prefs is NULL: + raise MemoryError() + + def __init__(self, *, bint read_file=True, bint write_file=True, bint create_dir=True, + bint read_network=True, bint write_network=True): + self._setAccess(xslt.XSLT_SECPREF_READ_FILE, read_file) + self._setAccess(xslt.XSLT_SECPREF_WRITE_FILE, write_file) + self._setAccess(xslt.XSLT_SECPREF_CREATE_DIRECTORY, create_dir) + self._setAccess(xslt.XSLT_SECPREF_READ_NETWORK, read_network) + self._setAccess(xslt.XSLT_SECPREF_WRITE_NETWORK, write_network) + + DENY_ALL = XSLTAccessControl( + read_file=False, write_file=False, create_dir=False, + read_network=False, write_network=False) + + DENY_WRITE = XSLTAccessControl( + read_file=True, write_file=False, create_dir=False, + read_network=True, write_network=False) + + def __dealloc__(self): + if self._prefs is not NULL: + xslt.xsltFreeSecurityPrefs(self._prefs) + + @cython.final + cdef _setAccess(self, xslt.xsltSecurityOption option, bint allow): + cdef xslt.xsltSecurityCheck function + if allow: + function = xslt.xsltSecurityAllow + else: + function = xslt.xsltSecurityForbid + xslt.xsltSetSecurityPrefs(self._prefs, option, function) + + @cython.final + cdef void _register_in_context(self, xslt.xsltTransformContext* ctxt) noexcept: + xslt.xsltSetCtxtSecurityPrefs(self._prefs, ctxt) + + @property + def options(self): + """The access control configuration as a map of options.""" + return { + 'read_file': self._optval(xslt.XSLT_SECPREF_READ_FILE), + 'write_file': self._optval(xslt.XSLT_SECPREF_WRITE_FILE), + 'create_dir': self._optval(xslt.XSLT_SECPREF_CREATE_DIRECTORY), + 'read_network': self._optval(xslt.XSLT_SECPREF_READ_NETWORK), + 'write_network': self._optval(xslt.XSLT_SECPREF_WRITE_NETWORK), + } + + @cython.final + cdef _optval(self, xslt.xsltSecurityOption option): + cdef xslt.xsltSecurityCheck function + function = xslt.xsltGetSecurityPrefs(self._prefs, option) + if function is xslt.xsltSecurityAllow: + return True + elif function is xslt.xsltSecurityForbid: + return False + else: + return None + + def __repr__(self): + items = sorted(self.options.items()) + return "%s(%s)" % ( + python._fqtypename(self).decode('UTF-8').split('.')[-1], + ', '.join(["%s=%r" % item for item in items])) + +################################################################################ +# XSLT + +cdef int _register_xslt_function(void* ctxt, name_utf, ns_utf) noexcept: + if ns_utf is None: + return 0 + # libxml2 internalises the strings if ctxt has a dict + return xslt.xsltRegisterExtFunction( + ctxt, _xcstr(name_utf), _xcstr(ns_utf), + _xpath_function_call) + +cdef dict EMPTY_DICT = {} + +@cython.final +@cython.internal +cdef class _XSLTContext(_BaseContext): + cdef xslt.xsltTransformContext* _xsltCtxt + cdef _ReadOnlyElementProxy _extension_element_proxy + cdef dict _extension_elements + def __cinit__(self): + self._xsltCtxt = NULL + self._extension_elements = EMPTY_DICT + + def __init__(self, namespaces, extensions, error_log, enable_regexp, + build_smart_strings): + if extensions is not None and extensions: + for ns_name_tuple, extension in extensions.items(): + if ns_name_tuple[0] is None: + raise XSLTExtensionError, \ + "extensions must not have empty namespaces" + if isinstance(extension, XSLTExtension): + if self._extension_elements is EMPTY_DICT: + self._extension_elements = {} + extensions = extensions.copy() + ns_utf = _utf8(ns_name_tuple[0]) + name_utf = _utf8(ns_name_tuple[1]) + self._extension_elements[(ns_utf, name_utf)] = extension + del extensions[ns_name_tuple] + _BaseContext.__init__(self, namespaces, extensions, error_log, enable_regexp, + build_smart_strings) + + cdef _BaseContext _copy(self): + cdef _XSLTContext context + context = <_XSLTContext>_BaseContext._copy(self) + context._extension_elements = self._extension_elements + return context + + cdef register_context(self, xslt.xsltTransformContext* xsltCtxt, + _Document doc): + self._xsltCtxt = xsltCtxt + self._set_xpath_context(xsltCtxt.xpathCtxt) + self._register_context(doc) + self.registerLocalFunctions(xsltCtxt, _register_xslt_function) + self.registerGlobalFunctions(xsltCtxt, _register_xslt_function) + _registerXSLTExtensions(xsltCtxt, self._extension_elements) + + cdef free_context(self): + self._cleanup_context() + self._release_context() + if self._xsltCtxt is not NULL: + xslt.xsltFreeTransformContext(self._xsltCtxt) + self._xsltCtxt = NULL + self._release_temp_refs() + + +@cython.final +@cython.internal +@cython.freelist(8) +cdef class _XSLTQuotedStringParam: + """A wrapper class for literal XSLT string parameters that require + quote escaping. + """ + cdef bytes strval + def __cinit__(self, strval): + self.strval = _utf8(strval) + + +@cython.no_gc_clear +cdef class XSLT: + """XSLT(self, xslt_input, extensions=None, regexp=True, access_control=None) + + Turn an XSL document into an XSLT object. + + Calling this object on a tree or Element will execute the XSLT:: + + transform = etree.XSLT(xsl_tree) + result = transform(xml_tree) + + Keyword arguments of the constructor: + + - extensions: a dict mapping ``(namespace, name)`` pairs to + extension functions or extension elements + - regexp: enable exslt regular expression support in XPath + (default: True) + - access_control: access restrictions for network or file + system (see `XSLTAccessControl`) + + Keyword arguments of the XSLT call: + + - profile_run: enable XSLT profiling and make the profile available + as XML document in ``result.xslt_profile`` (default: False) + + Other keyword arguments of the call are passed to the stylesheet + as parameters. + """ + cdef _XSLTContext _context + cdef xslt.xsltStylesheet* _c_style + cdef _XSLTResolverContext _xslt_resolver_context + cdef XSLTAccessControl _access_control + cdef _ErrorLog _error_log + + def __cinit__(self): + self._c_style = NULL + + def __init__(self, xslt_input, *, extensions=None, regexp=True, + access_control=None): + cdef xslt.xsltStylesheet* c_style = NULL + cdef xmlDoc* c_doc + cdef _Document doc + cdef _Element root_node + + doc = _documentOrRaise(xslt_input) + root_node = _rootNodeOrRaise(xslt_input) + + # set access control or raise TypeError + self._access_control = access_control + + # make a copy of the document as stylesheet parsing modifies it + c_doc = _copyDocRoot(doc._c_doc, root_node._c_node) + + # make sure we always have a stylesheet URL + if c_doc.URL is NULL: + doc_url_utf = python.PyUnicode_AsASCIIString( + f"string://__STRING__XSLT__/{id(self)}.xslt") + c_doc.URL = tree.xmlStrdup(_xcstr(doc_url_utf)) + + self._error_log = _ErrorLog() + self._xslt_resolver_context = _XSLTResolverContext() + _initXSLTResolverContext(self._xslt_resolver_context, doc._parser) + # keep a copy in case we need to access the stylesheet via 'document()' + self._xslt_resolver_context._c_style_doc = _copyDoc(c_doc, 1) + c_doc._private = self._xslt_resolver_context + + with self._error_log: + orig_loader = _register_document_loader() + c_style = xslt.xsltParseStylesheetDoc(c_doc) + _reset_document_loader(orig_loader) + + if c_style is NULL or c_style.errors: + tree.xmlFreeDoc(c_doc) + if c_style is not NULL: + xslt.xsltFreeStylesheet(c_style) + self._xslt_resolver_context._raise_if_stored() + # last error seems to be the most accurate here + if self._error_log.last_error is not None and \ + self._error_log.last_error.message: + raise XSLTParseError(self._error_log.last_error.message, + self._error_log) + else: + raise XSLTParseError( + self._error_log._buildExceptionMessage( + "Cannot parse stylesheet"), + self._error_log) + + c_doc._private = NULL # no longer used! + self._c_style = c_style + self._context = _XSLTContext(None, extensions, self._error_log, regexp, True) + + def __dealloc__(self): + if self._xslt_resolver_context is not None and \ + self._xslt_resolver_context._c_style_doc is not NULL: + tree.xmlFreeDoc(self._xslt_resolver_context._c_style_doc) + # this cleans up the doc copy as well + if self._c_style is not NULL: + xslt.xsltFreeStylesheet(self._c_style) + + @property + def error_log(self): + """The log of errors and warnings of an XSLT execution.""" + return self._error_log.copy() + + @staticmethod + def strparam(strval): + """strparam(strval) + + Mark an XSLT string parameter that requires quote escaping + before passing it into the transformation. Use it like this:: + + result = transform(doc, some_strval = XSLT.strparam( + '''it's \"Monty Python's\" ...''')) + + Escaped string parameters can be reused without restriction. + """ + return _XSLTQuotedStringParam(strval) + + @staticmethod + def set_global_max_depth(int max_depth): + """set_global_max_depth(max_depth) + + The maximum traversal depth that the stylesheet engine will allow. + This does not only count the template recursion depth but also takes + the number of variables/parameters into account. The required setting + for a run depends on both the stylesheet and the input data. + + Example:: + + XSLT.set_global_max_depth(5000) + + Note that this is currently a global, module-wide setting because + libxslt does not support it at a per-stylesheet level. + """ + if max_depth < 0: + raise ValueError("cannot set a maximum stylesheet traversal depth < 0") + xslt.xsltMaxDepth = max_depth + + def tostring(self, _ElementTree result_tree): + """tostring(self, result_tree) + + Save result doc to string based on stylesheet output method. + + :deprecated: use str(result_tree) instead. + """ + return str(result_tree) + + def __deepcopy__(self, memo): + return self.__copy__() + + def __copy__(self): + return _copyXSLT(self) + + def __call__(self, _input, *, profile_run=False, **kw): + """__call__(self, _input, profile_run=False, **kw) + + Execute the XSL transformation on a tree or Element. + + Pass the ``profile_run`` option to get profile information + about the XSLT. The result of the XSLT will have a property + xslt_profile that holds an XML tree with profiling data. + """ + cdef _XSLTContext context = None + cdef _XSLTResolverContext resolver_context + cdef _Document input_doc + cdef _Element root_node + cdef _Document result_doc + cdef _Document profile_doc = None + cdef xmlDoc* c_profile_doc + cdef xslt.xsltTransformContext* transform_ctxt + cdef xmlDoc* c_result = NULL + cdef xmlDoc* c_doc + cdef tree.xmlDict* c_dict + cdef const_char** params = NULL + + assert self._c_style is not NULL, "XSLT stylesheet not initialised" + input_doc = _documentOrRaise(_input) + root_node = _rootNodeOrRaise(_input) + + c_doc = _fakeRootDoc(input_doc._c_doc, root_node._c_node) + + transform_ctxt = xslt.xsltNewTransformContext(self._c_style, c_doc) + if transform_ctxt is NULL: + _destroyFakeDoc(input_doc._c_doc, c_doc) + raise MemoryError() + + # using the stylesheet dict is safer than using a possibly + # unrelated dict from the current thread. Almost all + # non-input tag/attr names will come from the stylesheet + # anyway. + if transform_ctxt.dict is not NULL: + xmlparser.xmlDictFree(transform_ctxt.dict) + if kw: + # parameter values are stored in the dict + # => avoid unnecessarily cluttering the global dict + transform_ctxt.dict = xmlparser.xmlDictCreateSub(self._c_style.doc.dict) + if transform_ctxt.dict is NULL: + xslt.xsltFreeTransformContext(transform_ctxt) + raise MemoryError() + else: + transform_ctxt.dict = self._c_style.doc.dict + xmlparser.xmlDictReference(transform_ctxt.dict) + + xslt.xsltSetCtxtParseOptions( + transform_ctxt, input_doc._parser._parse_options) + + if profile_run: + transform_ctxt.profile = 1 + + try: + context = self._context._copy() + context.register_context(transform_ctxt, input_doc) + + resolver_context = self._xslt_resolver_context._copy() + transform_ctxt._private = resolver_context + + _convert_xslt_parameters(transform_ctxt, kw, ¶ms) + c_result = self._run_transform( + c_doc, params, context, transform_ctxt) + if params is not NULL: + # deallocate space for parameters + python.lxml_free(params) + + if transform_ctxt.state != xslt.XSLT_STATE_OK: + if c_result is not NULL: + tree.xmlFreeDoc(c_result) + c_result = NULL + + if transform_ctxt.profile: + c_profile_doc = xslt.xsltGetProfileInformation(transform_ctxt) + if c_profile_doc is not NULL: + profile_doc = _documentFactory( + c_profile_doc, input_doc._parser) + finally: + if context is not None: + context.free_context() + _destroyFakeDoc(input_doc._c_doc, c_doc) + + try: + if resolver_context is not None and resolver_context._has_raised(): + if c_result is not NULL: + tree.xmlFreeDoc(c_result) + c_result = NULL + resolver_context._raise_if_stored() + + if context._exc._has_raised(): + if c_result is not NULL: + tree.xmlFreeDoc(c_result) + c_result = NULL + context._exc._raise_if_stored() + + if c_result is NULL: + # last error seems to be the most accurate here + error = self._error_log.last_error + if error is not None and error.message: + if error.line > 0: + message = f"{error.message}, line {error.line}" + else: + message = error.message + elif error is not None and error.line > 0: + message = f"Error applying stylesheet, line {error.line}" + else: + message = "Error applying stylesheet" + raise XSLTApplyError(message, self._error_log) + finally: + if resolver_context is not None: + resolver_context.clear() + + result_doc = _documentFactory(c_result, input_doc._parser) + + c_dict = c_result.dict + xmlparser.xmlDictReference(c_dict) + __GLOBAL_PARSER_CONTEXT.initThreadDictRef(&c_result.dict) + if c_dict is not c_result.dict or \ + self._c_style.doc.dict is not c_result.dict or \ + input_doc._c_doc.dict is not c_result.dict: + with nogil: + if c_dict is not c_result.dict: + fixThreadDictNames(c_result, + c_dict, c_result.dict) + if self._c_style.doc.dict is not c_result.dict: + fixThreadDictNames(c_result, + self._c_style.doc.dict, c_result.dict) + if input_doc._c_doc.dict is not c_result.dict: + fixThreadDictNames(c_result, + input_doc._c_doc.dict, c_result.dict) + xmlparser.xmlDictFree(c_dict) + + return _xsltResultTreeFactory(result_doc, self, profile_doc) + + cdef xmlDoc* _run_transform(self, xmlDoc* c_input_doc, + const_char** params, _XSLTContext context, + xslt.xsltTransformContext* transform_ctxt): + cdef xmlDoc* c_result + xslt.xsltSetTransformErrorFunc(transform_ctxt, self._error_log, + _receiveXSLTError) + if self._access_control is not None: + self._access_control._register_in_context(transform_ctxt) + with self._error_log, nogil: + orig_loader = _register_document_loader() + c_result = xslt.xsltApplyStylesheetUser( + self._c_style, c_input_doc, params, NULL, NULL, transform_ctxt) + _reset_document_loader(orig_loader) + return c_result + + +cdef _convert_xslt_parameters(xslt.xsltTransformContext* transform_ctxt, + dict parameters, const_char*** params_ptr): + cdef Py_ssize_t i, parameter_count + cdef const_char** params + cdef tree.xmlDict* c_dict = transform_ctxt.dict + params_ptr[0] = NULL + parameter_count = len(parameters) + if parameter_count == 0: + return + # allocate space for parameters + # * 2 as we want an entry for both key and value, + # and + 1 as array is NULL terminated + params = python.lxml_malloc(parameter_count * 2 + 1, sizeof(const_char*)) + if not params: + raise MemoryError() + try: + i = 0 + for key, value in parameters.iteritems(): + k = _utf8(key) + if isinstance(value, _XSLTQuotedStringParam): + v = (<_XSLTQuotedStringParam>value).strval + xslt.xsltQuoteOneUserParam( + transform_ctxt, _xcstr(k), _xcstr(v)) + else: + if isinstance(value, XPath): + v = (value)._path + else: + v = _utf8(value) + params[i] = tree.xmlDictLookup(c_dict, _xcstr(k), len(k)) + i += 1 + params[i] = tree.xmlDictLookup(c_dict, _xcstr(v), len(v)) + i += 1 + except: + python.lxml_free(params) + raise + params[i] = NULL + params_ptr[0] = params + +cdef XSLT _copyXSLT(XSLT stylesheet): + cdef XSLT new_xslt + cdef xmlDoc* c_doc + assert stylesheet._c_style is not NULL, "XSLT stylesheet not initialised" + new_xslt = XSLT.__new__(XSLT) + new_xslt._access_control = stylesheet._access_control + new_xslt._error_log = _ErrorLog() + new_xslt._context = stylesheet._context._copy() + + new_xslt._xslt_resolver_context = stylesheet._xslt_resolver_context._copy() + new_xslt._xslt_resolver_context._c_style_doc = _copyDoc( + stylesheet._xslt_resolver_context._c_style_doc, 1) + + c_doc = _copyDoc(stylesheet._c_style.doc, 1) + new_xslt._c_style = xslt.xsltParseStylesheetDoc(c_doc) + if new_xslt._c_style is NULL: + tree.xmlFreeDoc(c_doc) + raise MemoryError() + + return new_xslt + +@cython.final +cdef class _XSLTResultTree(_ElementTree): + """The result of an XSLT evaluation. + + Use ``str()`` or ``bytes()`` (or ``unicode()`` in Python 2.x) to serialise to a string, + and the ``.write_output()`` method to write serialise to a file. + """ + cdef XSLT _xslt + cdef _Document _profile + cdef xmlChar* _buffer + cdef Py_ssize_t _buffer_len + cdef Py_ssize_t _buffer_refcnt + + def write_output(self, file, *, compression=0): + """write_output(self, file, *, compression=0) + + Serialise the XSLT output to a file or file-like object. + + As opposed to the generic ``.write()`` method, ``.write_output()`` serialises + the result as defined by the ```` tag. + """ + cdef _FilelikeWriter writer = None + cdef _Document doc + cdef int r, rclose, c_compression + cdef const_xmlChar* c_encoding = NULL + cdef tree.xmlOutputBuffer* c_buffer + + if self._context_node is not None: + doc = self._context_node._doc + else: + doc = None + if doc is None: + doc = self._doc + if doc is None: + raise XSLTSaveError("No document to serialise") + c_compression = compression or 0 + xslt.LXML_GET_XSLT_ENCODING(c_encoding, self._xslt._c_style) + writer = _create_output_buffer(file, c_encoding, compression, &c_buffer, close=False) + if writer is None: + with nogil: + r = xslt.xsltSaveResultTo(c_buffer, doc._c_doc, self._xslt._c_style) + rclose = tree.xmlOutputBufferClose(c_buffer) + else: + r = xslt.xsltSaveResultTo(c_buffer, doc._c_doc, self._xslt._c_style) + rclose = tree.xmlOutputBufferClose(c_buffer) + if writer is not None: + writer._exc_context._raise_if_stored() + if r < 0 or rclose == -1: + python.PyErr_SetFromErrno(IOError) # raises IOError + + cdef _saveToStringAndSize(self, xmlChar** s, int* l): + cdef _Document doc + cdef int r + if self._context_node is not None: + doc = self._context_node._doc + else: + doc = None + if doc is None: + doc = self._doc + if doc is None: + s[0] = NULL + return + with nogil: + r = xslt.xsltSaveResultToString(s, l, doc._c_doc, + self._xslt._c_style) + if r == -1: + raise MemoryError() + + def __str__(self): + cdef xmlChar* encoding + cdef xmlChar* s = NULL + cdef int l = 0 + self._saveToStringAndSize(&s, &l) + if s is NULL: + return '' + encoding = self._xslt._c_style.encoding + try: + if encoding is NULL: + result = s[:l].decode('UTF-8') + else: + result = s[:l].decode(encoding) + finally: + tree.xmlFree(s) + return _stripEncodingDeclaration(result) + + def __getbuffer__(self, Py_buffer* buffer, int flags): + cdef int l = 0 + if buffer is NULL: + return + if self._buffer is NULL or flags & python.PyBUF_WRITABLE: + self._saveToStringAndSize(&buffer.buf, &l) + buffer.len = l + if self._buffer is NULL and not flags & python.PyBUF_WRITABLE: + self._buffer = buffer.buf + self._buffer_len = l + self._buffer_refcnt = 1 + else: + buffer.buf = self._buffer + buffer.len = self._buffer_len + self._buffer_refcnt += 1 + if flags & python.PyBUF_WRITABLE: + buffer.readonly = 0 + else: + buffer.readonly = 1 + if flags & python.PyBUF_FORMAT: + buffer.format = "B" + else: + buffer.format = NULL + buffer.ndim = 0 + buffer.shape = NULL + buffer.strides = NULL + buffer.suboffsets = NULL + buffer.itemsize = 1 + buffer.internal = NULL + if buffer.obj is not self: # set by Cython? + buffer.obj = self + + def __releasebuffer__(self, Py_buffer* buffer): + if buffer is NULL: + return + if buffer.buf is self._buffer: + self._buffer_refcnt -= 1 + if self._buffer_refcnt == 0: + tree.xmlFree(self._buffer) + self._buffer = NULL + else: + tree.xmlFree(buffer.buf) + buffer.buf = NULL + + property xslt_profile: + """Return an ElementTree with profiling data for the stylesheet run. + """ + def __get__(self): + cdef object root + if self._profile is None: + return None + root = self._profile.getroot() + if root is None: + return None + return ElementTree(root) + + def __del__(self): + self._profile = None + +cdef _xsltResultTreeFactory(_Document doc, XSLT xslt, _Document profile): + cdef _XSLTResultTree result + result = <_XSLTResultTree>_newElementTree(doc, None, _XSLTResultTree) + result._xslt = xslt + result._profile = profile + return result + +# functions like "output" and "write" are a potential security risk, but we +# rely on the user to configure XSLTAccessControl as needed +xslt.xsltRegisterAllExtras() + +# enable EXSLT support for XSLT +xslt.exsltRegisterAll() + + +################################################################################ +# XSLT PI support + +cdef object _RE_PI_HREF = re.compile(r'\s+href\s*=\s*(?:\'([^\']*)\'|"([^"]*)")') +cdef object _FIND_PI_HREF = _RE_PI_HREF.findall +cdef object _REPLACE_PI_HREF = _RE_PI_HREF.sub +cdef XPath __findStylesheetByID = None + +cdef _findStylesheetByID(_Document doc, id): + global __findStylesheetByID + if __findStylesheetByID is None: + __findStylesheetByID = XPath( + "//xsl:stylesheet[@xml:id = $id]", + namespaces={"xsl" : "http://www.w3.org/1999/XSL/Transform"}) + return __findStylesheetByID(doc, id=id) + +cdef class _XSLTProcessingInstruction(PIBase): + def parseXSL(self, parser=None): + """parseXSL(self, parser=None) + + Try to parse the stylesheet referenced by this PI and return + an ElementTree for it. If the stylesheet is embedded in the + same document (referenced via xml:id), find and return an + ElementTree for the stylesheet Element. + + The optional ``parser`` keyword argument can be passed to specify the + parser used to read from external stylesheet URLs. + """ + cdef _Document result_doc + cdef _Element result_node + cdef bytes href_utf + cdef const_xmlChar* c_href + cdef xmlAttr* c_attr + _assertValidNode(self) + if self._c_node.content is NULL: + raise ValueError, "PI lacks content" + hrefs = _FIND_PI_HREF(' ' + (self._c_node.content).decode('UTF-8')) + if len(hrefs) != 1: + raise ValueError, "malformed PI attributes" + hrefs = hrefs[0] + href_utf = utf8(hrefs[0] or hrefs[1]) + c_href = _xcstr(href_utf) + + if c_href[0] != c'#': + # normal URL, try to parse from it + c_href = tree.xmlBuildURI( + c_href, + tree.xmlNodeGetBase(self._c_node.doc, self._c_node)) + if c_href is not NULL: + try: + href_utf = c_href + finally: + tree.xmlFree(c_href) + result_doc = _parseDocumentFromURL(href_utf, parser) + return _elementTreeFactory(result_doc, None) + + # ID reference to embedded stylesheet + # try XML:ID lookup + _assertValidDoc(self._doc) + c_href += 1 # skip leading '#' + c_attr = tree.xmlGetID(self._c_node.doc, c_href) + if c_attr is not NULL and c_attr.doc is self._c_node.doc: + result_node = _elementFactory(self._doc, c_attr.parent) + return _elementTreeFactory(result_node._doc, result_node) + + # try XPath search + root = _findStylesheetByID(self._doc, funicode(c_href)) + if not root: + raise ValueError, "reference to non-existing embedded stylesheet" + elif len(root) > 1: + raise ValueError, "ambiguous reference to embedded stylesheet" + result_node = root[0] + return _elementTreeFactory(result_node._doc, result_node) + + def set(self, key, value): + """set(self, key, value) + + Supports setting the 'href' pseudo-attribute in the text of + the processing instruction. + """ + if key != "href": + raise AttributeError, \ + "only setting the 'href' attribute is supported on XSLT-PIs" + if value is None: + attrib = "" + elif '"' in value or '>' in value: + raise ValueError, "Invalid URL, must not contain '\"' or '>'" + else: + attrib = f' href="{value}"' + text = ' ' + self.text + if _FIND_PI_HREF(text): + self.text = _REPLACE_PI_HREF(attrib, text) + else: + self.text = text + attrib diff --git a/venv/lib/python3.10/site-packages/lxml/xsltext.pxi b/venv/lib/python3.10/site-packages/lxml/xsltext.pxi new file mode 100644 index 0000000000000000000000000000000000000000..21894b9ef5859a455fd2f9f4443e805818b94517 --- /dev/null +++ b/venv/lib/python3.10/site-packages/lxml/xsltext.pxi @@ -0,0 +1,242 @@ +# XSLT extension elements + +cdef class XSLTExtension: + """Base class of an XSLT extension element. + """ + def execute(self, context, self_node, input_node, output_parent): + """execute(self, context, self_node, input_node, output_parent) + Execute this extension element. + + Subclasses must override this method. They may append + elements to the `output_parent` element here, or set its text + content. To this end, the `input_node` provides read-only + access to the current node in the input document, and the + `self_node` points to the extension element in the stylesheet. + + Note that the `output_parent` parameter may be `None` if there + is no parent element in the current context (e.g. no content + was added to the output tree yet). + """ + pass + + def apply_templates(self, _XSLTContext context not None, node, output_parent=None, + *, elements_only=False, remove_blank_text=False): + """apply_templates(self, context, node, output_parent=None, elements_only=False, remove_blank_text=False) + + Call this method to retrieve the result of applying templates + to an element. + + The return value is a list of elements or text strings that + were generated by the XSLT processor. If you pass + ``elements_only=True``, strings will be discarded from the result + list. The option ``remove_blank_text=True`` will only discard + strings that consist entirely of whitespace (e.g. formatting). + These options do not apply to Elements, only to bare string results. + + If you pass an Element as `output_parent` parameter, the result + will instead be appended to the element (including attributes + etc.) and the return value will be `None`. This is a safe way + to generate content into the output document directly, without + having to take care of special values like text or attributes. + Note that the string discarding options will be ignored in this + case. + """ + cdef xmlNode* c_parent + cdef xmlNode* c_node + cdef xmlNode* c_context_node + assert context._xsltCtxt is not NULL, "XSLT context not initialised" + c_context_node = _roNodeOf(node) + #assert c_context_node.doc is context._xsltContext.node.doc, \ + # "switching input documents during transformation is not currently supported" + + if output_parent is not None: + c_parent = _nonRoNodeOf(output_parent) + else: + c_parent = tree.xmlNewDocNode( + context._xsltCtxt.output, NULL, "fake-parent", NULL) + + c_node = context._xsltCtxt.insert + context._xsltCtxt.insert = c_parent + xslt.xsltProcessOneNode( + context._xsltCtxt, c_context_node, NULL) + context._xsltCtxt.insert = c_node + + if output_parent is not None: + return None + + try: + return self._collectXSLTResultContent( + context, c_parent, elements_only, remove_blank_text) + finally: + # free all intermediate nodes that will not be freed by proxies + tree.xmlFreeNode(c_parent) + + def process_children(self, _XSLTContext context not None, output_parent=None, + *, elements_only=False, remove_blank_text=False): + """process_children(self, context, output_parent=None, elements_only=False, remove_blank_text=False) + + Call this method to process the XSLT content of the extension + element itself. + + The return value is a list of elements or text strings that + were generated by the XSLT processor. If you pass + ``elements_only=True``, strings will be discarded from the result + list. The option ``remove_blank_text=True`` will only discard + strings that consist entirely of whitespace (e.g. formatting). + These options do not apply to Elements, only to bare string results. + + If you pass an Element as `output_parent` parameter, the result + will instead be appended to the element (including attributes + etc.) and the return value will be `None`. This is a safe way + to generate content into the output document directly, without + having to take care of special values like text or attributes. + Note that the string discarding options will be ignored in this + case. + """ + cdef xmlNode* c_parent + cdef xslt.xsltTransformContext* c_ctxt = context._xsltCtxt + cdef xmlNode* c_old_output_parent = c_ctxt.insert + assert context._xsltCtxt is not NULL, "XSLT context not initialised" + + # output_parent node is used for adding results instead of + # elements list used in apply_templates, that's easier and allows to + # use attributes added to extension element with . + + if output_parent is not None: + c_parent = _nonRoNodeOf(output_parent) + else: + c_parent = tree.xmlNewDocNode( + context._xsltCtxt.output, NULL, "fake-parent", NULL) + + c_ctxt.insert = c_parent + xslt.xsltApplyOneTemplate(c_ctxt, + c_ctxt.node, c_ctxt.inst.children, NULL, NULL) + c_ctxt.insert = c_old_output_parent + + if output_parent is not None: + return None + + try: + return self._collectXSLTResultContent( + context, c_parent, elements_only, remove_blank_text) + finally: + # free all intermediate nodes that will not be freed by proxies + tree.xmlFreeNode(c_parent) + + cdef _collectXSLTResultContent(self, _XSLTContext context, xmlNode* c_parent, + bint elements_only, bint remove_blank_text): + cdef xmlNode* c_node + cdef xmlNode* c_next + cdef _ReadOnlyProxy proxy + cdef list results = [] # or maybe _collectAttributes(c_parent, 2) ? + c_node = c_parent.children + while c_node is not NULL: + c_next = c_node.next + if c_node.type == tree.XML_TEXT_NODE: + if not elements_only: + s = funicode(c_node.content) + if not remove_blank_text or s.strip(): + results.append(s) + s = None + elif c_node.type == tree.XML_ELEMENT_NODE: + proxy = _newReadOnlyProxy( + context._extension_element_proxy, c_node) + results.append(proxy) + # unlink node and make sure it will be freed later on + tree.xmlUnlinkNode(c_node) + proxy.free_after_use() + else: + raise TypeError, \ + f"unsupported XSLT result type: {c_node.type}" + c_node = c_next + return results + + +cdef _registerXSLTExtensions(xslt.xsltTransformContext* c_ctxt, + extension_dict): + for ns_utf, name_utf in extension_dict: + xslt.xsltRegisterExtElement( + c_ctxt, _xcstr(name_utf), _xcstr(ns_utf), + _callExtensionElement) + +cdef void _callExtensionElement(xslt.xsltTransformContext* c_ctxt, + xmlNode* c_context_node, + xmlNode* c_inst_node, + void* dummy) noexcept with gil: + cdef _XSLTContext context + cdef XSLTExtension extension + cdef python.PyObject* dict_result + cdef xmlNode* c_node + cdef _ReadOnlyProxy context_node = None, self_node = None + cdef object output_parent # not restricted to ro-nodes + c_uri = _getNs(c_inst_node) + if c_uri is NULL: + # not allowed, and should never happen + return + if c_ctxt.xpathCtxt.userData is NULL: + # just for safety, should never happen + return + context = <_XSLTContext>c_ctxt.xpathCtxt.userData + try: + try: + dict_result = python.PyDict_GetItem( + context._extension_elements, (c_uri, c_inst_node.name)) + if dict_result is NULL: + raise KeyError, f"extension element {funicode(c_inst_node.name)} not found" + extension = dict_result + + try: + # build the context proxy nodes + self_node = _newReadOnlyProxy(None, c_inst_node) + if _isElement(c_ctxt.insert): + output_parent = _newAppendOnlyProxy(self_node, c_ctxt.insert) + else: + # may be the document node or other stuff + output_parent = _newOpaqueAppendOnlyNodeWrapper(c_ctxt.insert) + if c_context_node.type in (tree.XML_DOCUMENT_NODE, + tree.XML_HTML_DOCUMENT_NODE): + c_node = tree.xmlDocGetRootElement(c_context_node) + if c_node is not NULL: + context_node = _newReadOnlyProxy(self_node, c_node) + else: + context_node = None + elif c_context_node.type in (tree.XML_ATTRIBUTE_NODE, + tree.XML_TEXT_NODE, + tree.XML_CDATA_SECTION_NODE): + # this isn't easy to support using read-only + # nodes, as the smart-string factory must + # instantiate the parent proxy somehow... + raise TypeError(f"Unsupported element type: {c_context_node.type}") + else: + context_node = _newReadOnlyProxy(self_node, c_context_node) + + # run the XSLT extension + context._extension_element_proxy = self_node + extension.execute(context, self_node, context_node, output_parent) + finally: + context._extension_element_proxy = None + if self_node is not None: + _freeReadOnlyProxies(self_node) + except Exception as e: + try: + e = unicode(e).encode("UTF-8") + except: + e = repr(e).encode("UTF-8") + message = python.PyBytes_FromFormat( + "Error executing extension element '%s': %s", + c_inst_node.name, _cstr(e)) + xslt.xsltTransformError(c_ctxt, NULL, c_inst_node, "%s", message) + context._exc._store_raised() + except: + # just in case + message = python.PyBytes_FromFormat( + "Error executing extension element '%s'", c_inst_node.name) + xslt.xsltTransformError(c_ctxt, NULL, c_inst_node, "%s", message) + context._exc._store_raised() + except: + # no Python functions here - everything can fail... + xslt.xsltTransformError(c_ctxt, NULL, c_inst_node, + "Error during XSLT extension element evaluation") + context._exc._store_raised() + finally: + return # swallow any further exceptions