text
stringlengths 145
7.65M
|
---|
==============================================================================================================
SOURCE CODE FILE: poolmanager.py
LINES: 1
SIZE: 22.38 KB
PATH: scripts\freecad_env\Lib\site-packages\urllib3\poolmanager.py
ENCODING: utf-8
```py
from __future__ import annotations
import functools
import logging
import typing
import warnings
from types import TracebackType
from urllib.parse import urljoin
from ._collections import HTTPHeaderDict, RecentlyUsedContainer
from ._request_methods import RequestMethods
from .connection import ProxyConfig
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, port_by_scheme
from .exceptions import (
LocationValueError,
MaxRetryError,
ProxySchemeUnknown,
URLSchemeUnknown,
)
from .response import BaseHTTPResponse
from .util.connection import _TYPE_SOCKET_OPTIONS
from .util.proxy import connection_requires_http_tunnel
from .util.retry import Retry
from .util.timeout import Timeout
from .util.url import Url, parse_url
if typing.TYPE_CHECKING:
import ssl
from typing_extensions import Self
__all__ = ["PoolManager", "ProxyManager", "proxy_from_url"]
log = logging.getLogger(__name__)
SSL_KEYWORDS = (
"key_file",
"cert_file",
"cert_reqs",
"ca_certs",
"ca_cert_data",
"ssl_version",
"ssl_minimum_version",
"ssl_maximum_version",
"ca_cert_dir",
"ssl_context",
"key_password",
"server_hostname",
)
# Default value for `blocksize` - a new parameter introduced to
# http.client.HTTPConnection & http.client.HTTPSConnection in Python 3.7
_DEFAULT_BLOCKSIZE = 16384
class PoolKey(typing.NamedTuple):
"""
All known keyword arguments that could be provided to the pool manager, its
pools, or the underlying connections.
All custom key schemes should include the fields in this key at a minimum.
"""
key_scheme: str
key_host: str
key_port: int | None
key_timeout: Timeout | float | int | None
key_retries: Retry | bool | int | None
key_block: bool | None
key_source_address: tuple[str, int] | None
key_key_file: str | None
key_key_password: str | None
key_cert_file: str | None
key_cert_reqs: str | None
key_ca_certs: str | None
key_ca_cert_data: str | bytes | None
key_ssl_version: int | str | None
key_ssl_minimum_version: ssl.TLSVersion | None
key_ssl_maximum_version: ssl.TLSVersion | None
key_ca_cert_dir: str | None
key_ssl_context: ssl.SSLContext | None
key_maxsize: int | None
key_headers: frozenset[tuple[str, str]] | None
key__proxy: Url | None
key__proxy_headers: frozenset[tuple[str, str]] | None
key__proxy_config: ProxyConfig | None
key_socket_options: _TYPE_SOCKET_OPTIONS | None
key__socks_options: frozenset[tuple[str, str]] | None
key_assert_hostname: bool | str | None
key_assert_fingerprint: str | None
key_server_hostname: str | None
key_blocksize: int | None
def _default_key_normalizer(
key_class: type[PoolKey], request_context: dict[str, typing.Any]
) -> PoolKey:
"""
Create a pool key out of a request context dictionary.
According to RFC 3986, both the scheme and host are case-insensitive.
Therefore, this function normalizes both before constructing the pool
key for an HTTPS request. If you wish to change this behaviour, provide
alternate callables to ``key_fn_by_scheme``.
:param key_class:
The class to use when constructing the key. This should be a namedtuple
with the ``scheme`` and ``host`` keys at a minimum.
:type key_class: namedtuple
:param request_context:
A dictionary-like object that contain the context for a request.
:type request_context: dict
:return: A namedtuple that can be used as a connection pool key.
:rtype: PoolKey
"""
# Since we mutate the dictionary, make a copy first
context = request_context.copy()
context["scheme"] = context["scheme"].lower()
context["host"] = context["host"].lower()
# These are both dictionaries and need to be transformed into frozensets
for key in ("headers", "_proxy_headers", "_socks_options"):
if key in context and context[key] is not None:
context[key] = frozenset(context[key].items())
# The socket_options key may be a list and needs to be transformed into a
# tuple.
socket_opts = context.get("socket_options")
if socket_opts is not None:
context["socket_options"] = tuple(socket_opts)
# Map the kwargs to the names in the namedtuple - this is necessary since
# namedtuples can't have fields starting with '_'.
for key in list(context.keys()):
context["key_" + key] = context.pop(key)
# Default to ``None`` for keys missing from the context
for field in key_class._fields:
if field not in context:
context[field] = None
# Default key_blocksize to _DEFAULT_BLOCKSIZE if missing from the context
if context.get("key_blocksize") is None:
context["key_blocksize"] = _DEFAULT_BLOCKSIZE
return key_class(**context)
#: A dictionary that maps a scheme to a callable that creates a pool key.
#: This can be used to alter the way pool keys are constructed, if desired.
#: Each PoolManager makes a copy of this dictionary so they can be configured
#: globally here, or individually on the instance.
key_fn_by_scheme = {
"http": functools.partial(_default_key_normalizer, PoolKey),
"https": functools.partial(_default_key_normalizer, PoolKey),
}
pool_classes_by_scheme = {"http": HTTPConnectionPool, "https": HTTPSConnectionPool}
class PoolManager(RequestMethods):
"""
Allows for arbitrary requests while transparently keeping track of
necessary connection pools for you.
:param num_pools:
Number of connection pools to cache before discarding the least
recently used pool.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param \\**connection_pool_kw:
Additional parameters are used to create fresh
:class:`urllib3.connectionpool.ConnectionPool` instances.
Example:
.. code-block:: python
import urllib3
http = urllib3.PoolManager(num_pools=2)
resp1 = http.request("GET", "https://google.com/")
resp2 = http.request("GET", "https://google.com/mail")
resp3 = http.request("GET", "https://yahoo.com/")
print(len(http.pools))
# 2
"""
proxy: Url | None = None
proxy_config: ProxyConfig | None = None
def __init__(
self,
num_pools: int = 10,
headers: typing.Mapping[str, str] | None = None,
**connection_pool_kw: typing.Any,
) -> None:
super().__init__(headers)
self.connection_pool_kw = connection_pool_kw
self.pools: RecentlyUsedContainer[PoolKey, HTTPConnectionPool]
self.pools = RecentlyUsedContainer(num_pools)
# Locally set the pool classes and keys so other PoolManagers can
# override them.
self.pool_classes_by_scheme = pool_classes_by_scheme
self.key_fn_by_scheme = key_fn_by_scheme.copy()
def __enter__(self) -> Self:
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> typing.Literal[False]:
self.clear()
# Return False to re-raise any potential exceptions
return False
def _new_pool(
self,
scheme: str,
host: str,
port: int,
request_context: dict[str, typing.Any] | None = None,
) -> HTTPConnectionPool:
"""
Create a new :class:`urllib3.connectionpool.ConnectionPool` based on host, port, scheme, and
any additional pool keyword arguments.
If ``request_context`` is provided, it is provided as keyword arguments
to the pool class used. This method is used to actually create the
connection pools handed out by :meth:`connection_from_url` and
companion methods. It is intended to be overridden for customization.
"""
pool_cls: type[HTTPConnectionPool] = self.pool_classes_by_scheme[scheme]
if request_context is None:
request_context = self.connection_pool_kw.copy()
# Default blocksize to _DEFAULT_BLOCKSIZE if missing or explicitly
# set to 'None' in the request_context.
if request_context.get("blocksize") is None:
request_context["blocksize"] = _DEFAULT_BLOCKSIZE
# Although the context has everything necessary to create the pool,
# this function has historically only used the scheme, host, and port
# in the positional args. When an API change is acceptable these can
# be removed.
for key in ("scheme", "host", "port"):
request_context.pop(key, None)
if scheme == "http":
for kw in SSL_KEYWORDS:
request_context.pop(kw, None)
return pool_cls(host, port, **request_context)
def clear(self) -> None:
"""
Empty our store of pools and direct them all to close.
This will not affect in-flight connections, but they will not be
re-used after completion.
"""
self.pools.clear()
def connection_from_host(
self,
host: str | None,
port: int | None = None,
scheme: str | None = "http",
pool_kwargs: dict[str, typing.Any] | None = None,
) -> HTTPConnectionPool:
"""
Get a :class:`urllib3.connectionpool.ConnectionPool` based on the host, port, and scheme.
If ``port`` isn't given, it will be derived from the ``scheme`` using
``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is
provided, it is merged with the instance's ``connection_pool_kw``
variable and used to create the new connection pool, if one is
needed.
"""
if not host:
raise LocationValueError("No host specified.")
request_context = self._merge_pool_kwargs(pool_kwargs)
request_context["scheme"] = scheme or "http"
if not port:
port = port_by_scheme.get(request_context["scheme"].lower(), 80)
request_context["port"] = port
request_context["host"] = host
return self.connection_from_context(request_context)
def connection_from_context(
self, request_context: dict[str, typing.Any]
) -> HTTPConnectionPool:
"""
Get a :class:`urllib3.connectionpool.ConnectionPool` based on the request context.
``request_context`` must at least contain the ``scheme`` key and its
value must be a key in ``key_fn_by_scheme`` instance variable.
"""
if "strict" in request_context:
warnings.warn(
"The 'strict' parameter is no longer needed on Python 3+. "
"This will raise an error in urllib3 v2.1.0.",
DeprecationWarning,
)
request_context.pop("strict")
scheme = request_context["scheme"].lower()
pool_key_constructor = self.key_fn_by_scheme.get(scheme)
if not pool_key_constructor:
raise URLSchemeUnknown(scheme)
pool_key = pool_key_constructor(request_context)
return self.connection_from_pool_key(pool_key, request_context=request_context)
def connection_from_pool_key(
self, pool_key: PoolKey, request_context: dict[str, typing.Any]
) -> HTTPConnectionPool:
"""
Get a :class:`urllib3.connectionpool.ConnectionPool` based on the provided pool key.
``pool_key`` should be a namedtuple that only contains immutable
objects. At a minimum it must have the ``scheme``, ``host``, and
``port`` fields.
"""
with self.pools.lock:
# If the scheme, host, or port doesn't match existing open
# connections, open a new ConnectionPool.
pool = self.pools.get(pool_key)
if pool:
return pool
# Make a fresh ConnectionPool of the desired type
scheme = request_context["scheme"]
host = request_context["host"]
port = request_context["port"]
pool = self._new_pool(scheme, host, port, request_context=request_context)
self.pools[pool_key] = pool
return pool
def connection_from_url(
self, url: str, pool_kwargs: dict[str, typing.Any] | None = None
) -> HTTPConnectionPool:
"""
Similar to :func:`urllib3.connectionpool.connection_from_url`.
If ``pool_kwargs`` is not provided and a new pool needs to be
constructed, ``self.connection_pool_kw`` is used to initialize
the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs``
is provided, it is used instead. Note that if a new pool does not
need to be created for the request, the provided ``pool_kwargs`` are
not used.
"""
u = parse_url(url)
return self.connection_from_host(
u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs
)
def _merge_pool_kwargs(
self, override: dict[str, typing.Any] | None
) -> dict[str, typing.Any]:
"""
Merge a dictionary of override values for self.connection_pool_kw.
This does not modify self.connection_pool_kw and returns a new dict.
Any keys in the override dictionary with a value of ``None`` are
removed from the merged dictionary.
"""
base_pool_kwargs = self.connection_pool_kw.copy()
if override:
for key, value in override.items():
if value is None:
try:
del base_pool_kwargs[key]
except KeyError:
pass
else:
base_pool_kwargs[key] = value
return base_pool_kwargs
def _proxy_requires_url_absolute_form(self, parsed_url: Url) -> bool:
"""
Indicates if the proxy requires the complete destination URL in the
request. Normally this is only needed when not using an HTTP CONNECT
tunnel.
"""
if self.proxy is None:
return False
return not connection_requires_http_tunnel(
self.proxy, self.proxy_config, parsed_url.scheme
)
def urlopen( # type: ignore[override]
self, method: str, url: str, redirect: bool = True, **kw: typing.Any
) -> BaseHTTPResponse:
"""
Same as :meth:`urllib3.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
"""
u = parse_url(url)
if u.scheme is None:
warnings.warn(
"URLs without a scheme (ie 'https://') are deprecated and will raise an error "
"in a future version of urllib3. To avoid this DeprecationWarning ensure all URLs "
"start with 'https://' or 'http://'. Read more in this issue: "
"https://github.com/urllib3/urllib3/issues/2920",
category=DeprecationWarning,
stacklevel=2,
)
conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
kw["assert_same_host"] = False
kw["redirect"] = False
if "headers" not in kw:
kw["headers"] = self.headers
if self._proxy_requires_url_absolute_form(u):
response = conn.urlopen(method, url, **kw)
else:
response = conn.urlopen(method, u.request_uri, **kw)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
# Support relative URLs for redirecting.
redirect_location = urljoin(url, redirect_location)
if response.status == 303:
# Change the method according to RFC 9110, Section 15.4.4.
method = "GET"
# And lose the body not to transfer anything sensitive.
kw["body"] = None
kw["headers"] = HTTPHeaderDict(kw["headers"])._prepare_for_method_change()
retries = kw.get("retries")
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect)
# Strip headers marked as unsafe to forward to the redirected location.
# Check remove_headers_on_redirect to avoid a potential network call within
# conn.is_same_host() which may use socket.gethostbyname() in the future.
if retries.remove_headers_on_redirect and not conn.is_same_host(
redirect_location
):
new_headers = kw["headers"].copy()
for header in kw["headers"]:
if header.lower() in retries.remove_headers_on_redirect:
new_headers.pop(header, None)
kw["headers"] = new_headers
try:
retries = retries.increment(method, url, response=response, _pool=conn)
except MaxRetryError:
if retries.raise_on_redirect:
response.drain_conn()
raise
return response
kw["retries"] = retries
kw["redirect"] = redirect
log.info("Redirecting %s -> %s", url, redirect_location)
response.drain_conn()
return self.urlopen(method, redirect_location, **kw)
class ProxyManager(PoolManager):
"""
Behaves just like :class:`PoolManager`, but sends all requests through
the defined proxy, using the CONNECT method for HTTPS URLs.
:param proxy_url:
The URL of the proxy to be used.
:param proxy_headers:
A dictionary containing headers that will be sent to the proxy. In case
of HTTP they are being sent with each request, while in the
HTTPS/CONNECT case they are sent only once. Could be used for proxy
authentication.
:param proxy_ssl_context:
The proxy SSL context is used to establish the TLS connection to the
proxy when using HTTPS proxies.
:param use_forwarding_for_https:
(Defaults to False) If set to True will forward requests to the HTTPS
proxy to be made on behalf of the client instead of creating a TLS
tunnel via the CONNECT method. **Enabling this flag means that request
and response headers and content will be visible from the HTTPS proxy**
whereas tunneling keeps request and response headers and content
private. IP address, target hostname, SNI, and port are always visible
to an HTTPS proxy even when this flag is disabled.
:param proxy_assert_hostname:
The hostname of the certificate to verify against.
:param proxy_assert_fingerprint:
The fingerprint of the certificate to verify against.
Example:
.. code-block:: python
import urllib3
proxy = urllib3.ProxyManager("https://localhost:3128/")
resp1 = proxy.request("GET", "https://google.com/")
resp2 = proxy.request("GET", "https://httpbin.org/")
print(len(proxy.pools))
# 1
resp3 = proxy.request("GET", "https://httpbin.org/")
resp4 = proxy.request("GET", "https://twitter.com/")
print(len(proxy.pools))
# 3
"""
def __init__(
self,
proxy_url: str,
num_pools: int = 10,
headers: typing.Mapping[str, str] | None = None,
proxy_headers: typing.Mapping[str, str] | None = None,
proxy_ssl_context: ssl.SSLContext | None = None,
use_forwarding_for_https: bool = False,
proxy_assert_hostname: None | str | typing.Literal[False] = None,
proxy_assert_fingerprint: str | None = None,
**connection_pool_kw: typing.Any,
) -> None:
if isinstance(proxy_url, HTTPConnectionPool):
str_proxy_url = f"{proxy_url.scheme}://{proxy_url.host}:{proxy_url.port}"
else:
str_proxy_url = proxy_url
proxy = parse_url(str_proxy_url)
if proxy.scheme not in ("http", "https"):
raise ProxySchemeUnknown(proxy.scheme)
if not proxy.port:
port = port_by_scheme.get(proxy.scheme, 80)
proxy = proxy._replace(port=port)
self.proxy = proxy
self.proxy_headers = proxy_headers or {}
self.proxy_ssl_context = proxy_ssl_context
self.proxy_config = ProxyConfig(
proxy_ssl_context,
use_forwarding_for_https,
proxy_assert_hostname,
proxy_assert_fingerprint,
)
connection_pool_kw["_proxy"] = self.proxy
connection_pool_kw["_proxy_headers"] = self.proxy_headers
connection_pool_kw["_proxy_config"] = self.proxy_config
super().__init__(num_pools, headers, **connection_pool_kw)
def connection_from_host(
self,
host: str | None,
port: int | None = None,
scheme: str | None = "http",
pool_kwargs: dict[str, typing.Any] | None = None,
) -> HTTPConnectionPool:
if scheme == "https":
return super().connection_from_host(
host, port, scheme, pool_kwargs=pool_kwargs
)
return super().connection_from_host(
self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs # type: ignore[union-attr]
)
def _set_proxy_headers(
self, url: str, headers: typing.Mapping[str, str] | None = None
) -> typing.Mapping[str, str]:
"""
Sets headers needed by proxies: specifically, the Accept and Host
headers. Only sets headers not provided by the user.
"""
headers_ = {"Accept": "*/*"}
netloc = parse_url(url).netloc
if netloc:
headers_["Host"] = netloc
if headers:
headers_.update(headers)
return headers_
def urlopen( # type: ignore[override]
self, method: str, url: str, redirect: bool = True, **kw: typing.Any
) -> BaseHTTPResponse:
"Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
u = parse_url(url)
if not connection_requires_http_tunnel(self.proxy, self.proxy_config, u.scheme):
# For connections using HTTP CONNECT, httplib sets the necessary
# headers on the CONNECT to the proxy. If we're not using CONNECT,
# we'll definitely need to set 'Host' at the very least.
headers = kw.get("headers", self.headers)
kw["headers"] = self._set_proxy_headers(url, headers)
return super().urlopen(method, url, redirect=redirect, **kw)
def proxy_from_url(url: str, **kw: typing.Any) -> ProxyManager:
return ProxyManager(proxy_url=url, **kw)
```
|
===========================================================================================================
SOURCE CODE FILE: response.py
LINES: 8
SIZE: 44.13 KB
PATH: scripts\freecad_env\Lib\site-packages\urllib3\response.py
ENCODING: utf-8
```py
from __future__ import annotations
import collections
import io
import json as _json
import logging
import re
import socket
import sys
import typing
import warnings
import zlib
from contextlib import contextmanager
from http.client import HTTPMessage as _HttplibHTTPMessage
from http.client import HTTPResponse as _HttplibHTTPResponse
from socket import timeout as SocketTimeout
if typing.TYPE_CHECKING:
from ._base_connection import BaseHTTPConnection
try:
try:
import brotlicffi as brotli # type: ignore[import-not-found]
except ImportError:
import brotli # type: ignore[import-not-found]
except ImportError:
brotli = None
try:
import zstandard as zstd
except (AttributeError, ImportError, ValueError): # Defensive:
HAS_ZSTD = False
else:
# The package 'zstandard' added the 'eof' property starting
# in v0.18.0 which we require to ensure a complete and
# valid zstd stream was fed into the ZstdDecoder.
# See: https://github.com/urllib3/urllib3/pull/2624
_zstd_version = tuple(
map(int, re.search(r"^([0-9]+)\.([0-9]+)", zstd.__version__).groups()) # type: ignore[union-attr]
)
if _zstd_version < (0, 18): # Defensive:
HAS_ZSTD = False
else:
HAS_ZSTD = True
from . import util
from ._base_connection import _TYPE_BODY
from ._collections import HTTPHeaderDict
from .connection import BaseSSLError, HTTPConnection, HTTPException
from .exceptions import (
BodyNotHttplibCompatible,
DecodeError,
HTTPError,
IncompleteRead,
InvalidChunkLength,
InvalidHeader,
ProtocolError,
ReadTimeoutError,
ResponseNotChunked,
SSLError,
)
from .util.response import is_fp_closed, is_response_to_head
from .util.retry import Retry
if typing.TYPE_CHECKING:
from .connectionpool import HTTPConnectionPool
log = logging.getLogger(__name__)
class ContentDecoder:
def decompress(self, data: bytes) -> bytes:
raise NotImplementedError()
def flush(self) -> bytes:
raise NotImplementedError()
class DeflateDecoder(ContentDecoder):
def __init__(self) -> None:
self._first_try = True
self._data = b""
self._obj = zlib.decompressobj()
def decompress(self, data: bytes) -> bytes:
if not data:
return data
if not self._first_try:
return self._obj.decompress(data)
self._data += data
try:
decompressed = self._obj.decompress(data)
if decompressed:
self._first_try = False
self._data = None # type: ignore[assignment]
return decompressed
except zlib.error:
self._first_try = False
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
try:
return self.decompress(self._data)
finally:
self._data = None # type: ignore[assignment]
def flush(self) -> bytes:
return self._obj.flush()
class GzipDecoderState:
FIRST_MEMBER = 0
OTHER_MEMBERS = 1
SWALLOW_DATA = 2
class GzipDecoder(ContentDecoder):
def __init__(self) -> None:
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
self._state = GzipDecoderState.FIRST_MEMBER
def decompress(self, data: bytes) -> bytes:
ret = bytearray()
if self._state == GzipDecoderState.SWALLOW_DATA or not data:
return bytes(ret)
while True:
try:
ret += self._obj.decompress(data)
except zlib.error:
previous_state = self._state
# Ignore data after the first error
self._state = GzipDecoderState.SWALLOW_DATA
if previous_state == GzipDecoderState.OTHER_MEMBERS:
# Allow trailing garbage acceptable in other gzip clients
return bytes(ret)
raise
data = self._obj.unused_data
if not data:
return bytes(ret)
self._state = GzipDecoderState.OTHER_MEMBERS
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
def flush(self) -> bytes:
return self._obj.flush()
if brotli is not None:
class BrotliDecoder(ContentDecoder):
# Supports both 'brotlipy' and 'Brotli' packages
# since they share an import name. The top branches
# are for 'brotlipy' and bottom branches for 'Brotli'
def __init__(self) -> None:
self._obj = brotli.Decompressor()
if hasattr(self._obj, "decompress"):
setattr(self, "decompress", self._obj.decompress)
else:
setattr(self, "decompress", self._obj.process)
def flush(self) -> bytes:
if hasattr(self._obj, "flush"):
return self._obj.flush() # type: ignore[no-any-return]
return b""
if HAS_ZSTD:
class ZstdDecoder(ContentDecoder):
def __init__(self) -> None:
self._obj = zstd.ZstdDecompressor().decompressobj()
def decompress(self, data: bytes) -> bytes:
if not data:
return b""
data_parts = [self._obj.decompress(data)]
while self._obj.eof and self._obj.unused_data:
unused_data = self._obj.unused_data
self._obj = zstd.ZstdDecompressor().decompressobj()
data_parts.append(self._obj.decompress(unused_data))
return b"".join(data_parts)
def flush(self) -> bytes:
ret = self._obj.flush() # note: this is a no-op
if not self._obj.eof:
raise DecodeError("Zstandard data is incomplete")
return ret
class MultiDecoder(ContentDecoder):
"""
From RFC7231:
If one or more encodings have been applied to a representation, the
sender that applied the encodings MUST generate a Content-Encoding
header field that lists the content codings in the order in which
they were applied.
"""
def __init__(self, modes: str) -> None:
self._decoders = [_get_decoder(m.strip()) for m in modes.split(",")]
def flush(self) -> bytes:
return self._decoders[0].flush()
def decompress(self, data: bytes) -> bytes:
for d in reversed(self._decoders):
data = d.decompress(data)
return data
def _get_decoder(mode: str) -> ContentDecoder:
if "," in mode:
return MultiDecoder(mode)
# According to RFC 9110 section 8.4.1.3, recipients should
# consider x-gzip equivalent to gzip
if mode in ("gzip", "x-gzip"):
return GzipDecoder()
if brotli is not None and mode == "br":
return BrotliDecoder()
if HAS_ZSTD and mode == "zstd":
return ZstdDecoder()
return DeflateDecoder()
class BytesQueueBuffer:
"""Memory-efficient bytes buffer
To return decoded data in read() and still follow the BufferedIOBase API, we need a
buffer to always return the correct amount of bytes.
This buffer should be filled using calls to put()
Our maximum memory usage is determined by the sum of the size of:
* self.buffer, which contains the full data
* the largest chunk that we will copy in get()
The worst case scenario is a single chunk, in which case we'll make a full copy of
the data inside get().
"""
def __init__(self) -> None:
self.buffer: typing.Deque[bytes] = collections.deque()
self._size: int = 0
def __len__(self) -> int:
return self._size
def put(self, data: bytes) -> None:
self.buffer.append(data)
self._size += len(data)
def get(self, n: int) -> bytes:
if n == 0:
return b""
elif not self.buffer:
raise RuntimeError("buffer is empty")
elif n < 0:
raise ValueError("n should be > 0")
fetched = 0
ret = io.BytesIO()
while fetched < n:
remaining = n - fetched
chunk = self.buffer.popleft()
chunk_length = len(chunk)
if remaining < chunk_length:
left_chunk, right_chunk = chunk[:remaining], chunk[remaining:]
ret.write(left_chunk)
self.buffer.appendleft(right_chunk)
self._size -= remaining
break
else:
ret.write(chunk)
self._size -= chunk_length
fetched += chunk_length
if not self.buffer:
break
return ret.getvalue()
def get_all(self) -> bytes:
buffer = self.buffer
if not buffer:
assert self._size == 0
return b""
if len(buffer) == 1:
result = buffer.pop()
else:
ret = io.BytesIO()
ret.writelines(buffer.popleft() for _ in range(len(buffer)))
result = ret.getvalue()
self._size = 0
return result
class BaseHTTPResponse(io.IOBase):
CONTENT_DECODERS = ["gzip", "x-gzip", "deflate"]
if brotli is not None:
CONTENT_DECODERS += ["br"]
if HAS_ZSTD:
CONTENT_DECODERS += ["zstd"]
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
DECODER_ERROR_CLASSES: tuple[type[Exception], ...] = (IOError, zlib.error)
if brotli is not None:
DECODER_ERROR_CLASSES += (brotli.error,)
if HAS_ZSTD:
DECODER_ERROR_CLASSES += (zstd.ZstdError,)
def __init__(
self,
*,
headers: typing.Mapping[str, str] | typing.Mapping[bytes, bytes] | None = None,
status: int,
version: int,
version_string: str,
reason: str | None,
decode_content: bool,
request_url: str | None,
retries: Retry | None = None,
) -> None:
if isinstance(headers, HTTPHeaderDict):
self.headers = headers
else:
self.headers = HTTPHeaderDict(headers) # type: ignore[arg-type]
self.status = status
self.version = version
self.version_string = version_string
self.reason = reason
self.decode_content = decode_content
self._has_decoded_content = False
self._request_url: str | None = request_url
self.retries = retries
self.chunked = False
tr_enc = self.headers.get("transfer-encoding", "").lower()
# Don't incur the penalty of creating a list and then discarding it
encodings = (enc.strip() for enc in tr_enc.split(","))
if "chunked" in encodings:
self.chunked = True
self._decoder: ContentDecoder | None = None
self.length_remaining: int | None
def get_redirect_location(self) -> str | None | typing.Literal[False]:
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in self.REDIRECT_STATUSES:
return self.headers.get("location")
return False
@property
def data(self) -> bytes:
raise NotImplementedError()
def json(self) -> typing.Any:
"""
Deserializes the body of the HTTP response as a Python object.
The body of the HTTP response must be encoded using UTF-8, as per
`RFC 8529 Section 8.1 <https://www.rfc-editor.org/rfc/rfc8259#section-8.1>`_.
To use a custom JSON decoder pass the result of :attr:`HTTPResponse.data` to
your custom decoder instead.
If the body of the HTTP response is not decodable to UTF-8, a
`UnicodeDecodeError` will be raised. If the body of the HTTP response is not a
valid JSON document, a `json.JSONDecodeError` will be raised.
Read more :ref:`here <json_content>`.
:returns: The body of the HTTP response as a Python object.
"""
data = self.data.decode("utf-8")
return _json.loads(data)
@property
def url(self) -> str | None:
raise NotImplementedError()
@url.setter
def url(self, url: str | None) -> None:
raise NotImplementedError()
@property
def connection(self) -> BaseHTTPConnection | None:
raise NotImplementedError()
@property
def retries(self) -> Retry | None:
return self._retries
@retries.setter
def retries(self, retries: Retry | None) -> None:
# Override the request_url if retries has a redirect location.
if retries is not None and retries.history:
self.url = retries.history[-1].redirect_location
self._retries = retries
def stream(
self, amt: int | None = 2**16, decode_content: bool | None = None
) -> typing.Iterator[bytes]:
raise NotImplementedError()
def read(
self,
amt: int | None = None,
decode_content: bool | None = None,
cache_content: bool = False,
) -> bytes:
raise NotImplementedError()
def read1(
self,
amt: int | None = None,
decode_content: bool | None = None,
) -> bytes:
raise NotImplementedError()
def read_chunked(
self,
amt: int | None = None,
decode_content: bool | None = None,
) -> typing.Iterator[bytes]:
raise NotImplementedError()
def release_conn(self) -> None:
raise NotImplementedError()
def drain_conn(self) -> None:
raise NotImplementedError()
def shutdown(self) -> None:
raise NotImplementedError()
def close(self) -> None:
raise NotImplementedError()
def _init_decoder(self) -> None:
"""
Set-up the _decoder attribute if necessary.
"""
# Note: content-encoding value should be case-insensitive, per RFC 7230
# Section 3.2
content_encoding = self.headers.get("content-encoding", "").lower()
if self._decoder is None:
if content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
elif "," in content_encoding:
encodings = [
e.strip()
for e in content_encoding.split(",")
if e.strip() in self.CONTENT_DECODERS
]
if encodings:
self._decoder = _get_decoder(content_encoding)
def _decode(
self, data: bytes, decode_content: bool | None, flush_decoder: bool
) -> bytes:
"""
Decode the data passed in and potentially flush the decoder.
"""
if not decode_content:
if self._has_decoded_content:
raise RuntimeError(
"Calling read(decode_content=False) is not supported after "
"read(decode_content=True) was called."
)
return data
try:
if self._decoder:
data = self._decoder.decompress(data)
self._has_decoded_content = True
except self.DECODER_ERROR_CLASSES as e:
content_encoding = self.headers.get("content-encoding", "").lower()
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding,
e,
) from e
if flush_decoder:
data += self._flush_decoder()
return data
def _flush_decoder(self) -> bytes:
"""
Flushes the decoder. Should only be called if the decoder is actually
being used.
"""
if self._decoder:
return self._decoder.decompress(b"") + self._decoder.flush()
return b""
# Compatibility methods for `io` module
def readinto(self, b: bytearray) -> int:
temp = self.read(len(b))
if len(temp) == 0:
return 0
else:
b[: len(temp)] = temp
return len(temp)
# Compatibility methods for http.client.HTTPResponse
def getheaders(self) -> HTTPHeaderDict:
warnings.warn(
"HTTPResponse.getheaders() is deprecated and will be removed "
"in urllib3 v2.1.0. Instead access HTTPResponse.headers directly.",
category=DeprecationWarning,
stacklevel=2,
)
return self.headers
def getheader(self, name: str, default: str | None = None) -> str | None:
warnings.warn(
"HTTPResponse.getheader() is deprecated and will be removed "
"in urllib3 v2.1.0. Instead use HTTPResponse.headers.get(name, default).",
category=DeprecationWarning,
stacklevel=2,
)
return self.headers.get(name, default)
# Compatibility method for http.cookiejar
def info(self) -> HTTPHeaderDict:
return self.headers
def geturl(self) -> str | None:
return self.url
class HTTPResponse(BaseHTTPResponse):
"""
HTTP Response container.
Backwards-compatible with :class:`http.client.HTTPResponse` but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed. This
class is also compatible with the Python standard library's :mod:`io`
module, and can hence be treated as a readable object in the context of that
framework.
Extra parameters for behaviour not present in :class:`http.client.HTTPResponse`:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param original_response:
When this HTTPResponse wrapper is generated from an :class:`http.client.HTTPResponse`
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
:param retries:
The retries contains the last :class:`~urllib3.util.retry.Retry` that
was used during the request.
:param enforce_content_length:
Enforce content length checking. Body returned by server must match
value of Content-Length header, if present. Otherwise, raise error.
"""
def __init__(
self,
body: _TYPE_BODY = "",
headers: typing.Mapping[str, str] | typing.Mapping[bytes, bytes] | None = None,
status: int = 0,
version: int = 0,
version_string: str = "HTTP/?",
reason: str | None = None,
preload_content: bool = True,
decode_content: bool = True,
original_response: _HttplibHTTPResponse | None = None,
pool: HTTPConnectionPool | None = None,
connection: HTTPConnection | None = None,
msg: _HttplibHTTPMessage | None = None,
retries: Retry | None = None,
enforce_content_length: bool = True,
request_method: str | None = None,
request_url: str | None = None,
auto_close: bool = True,
sock_shutdown: typing.Callable[[int], None] | None = None,
) -> None:
super().__init__(
headers=headers,
status=status,
version=version,
version_string=version_string,
reason=reason,
decode_content=decode_content,
request_url=request_url,
retries=retries,
)
self.enforce_content_length = enforce_content_length
self.auto_close = auto_close
self._body = None
self._fp: _HttplibHTTPResponse | None = None
self._original_response = original_response
self._fp_bytes_read = 0
self.msg = msg
if body and isinstance(body, (str, bytes)):
self._body = body
self._pool = pool
self._connection = connection
if hasattr(body, "read"):
self._fp = body # type: ignore[assignment]
self._sock_shutdown = sock_shutdown
# Are we using the chunked-style of transfer encoding?
self.chunk_left: int | None = None
# Determine length of response
self.length_remaining = self._init_length(request_method)
# Used to return the correct amount of bytes for partial read()s
self._decoded_buffer = BytesQueueBuffer()
# If requested, preload the body.
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def release_conn(self) -> None:
if not self._pool or not self._connection:
return None
self._pool._put_conn(self._connection)
self._connection = None
def drain_conn(self) -> None:
"""
Read and discard any remaining HTTP response data in the response connection.
Unread data in the HTTPResponse connection blocks the connection from being released back to the pool.
"""
try:
self.read()
except (HTTPError, OSError, BaseSSLError, HTTPException):
pass
@property
def data(self) -> bytes:
# For backwards-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body # type: ignore[return-value]
if self._fp:
return self.read(cache_content=True)
return None # type: ignore[return-value]
@property
def connection(self) -> HTTPConnection | None:
return self._connection
def isclosed(self) -> bool:
return is_fp_closed(self._fp)
def tell(self) -> int:
"""
Obtain the number of bytes pulled over the wire so far. May differ from
the amount of content returned by :meth:``urllib3.response.HTTPResponse.read``
if bytes are encoded on the wire (e.g, compressed).
"""
return self._fp_bytes_read
def _init_length(self, request_method: str | None) -> int | None:
"""
Set initial length value for Response content if available.
"""
length: int | None
content_length: str | None = self.headers.get("content-length")
if content_length is not None:
if self.chunked:
# This Response will fail with an IncompleteRead if it can't be
# received as chunked. This method falls back to attempt reading
# the response before raising an exception.
log.warning(
"Received response with both Content-Length and "
"Transfer-Encoding set. This is expressly forbidden "
"by RFC 7230 sec 3.3.2. Ignoring Content-Length and "
"attempting to process response as Transfer-Encoding: "
"chunked."
)
return None
try:
# RFC 7230 section 3.3.2 specifies multiple content lengths can
# be sent in a single Content-Length header
# (e.g. Content-Length: 42, 42). This line ensures the values
# are all valid ints and that as long as the `set` length is 1,
# all values are the same. Otherwise, the header is invalid.
lengths = {int(val) for val in content_length.split(",")}
if len(lengths) > 1:
raise InvalidHeader(
"Content-Length contained multiple "
"unmatching values (%s)" % content_length
)
length = lengths.pop()
except ValueError:
length = None
else:
if length < 0:
length = None
else: # if content_length is None
length = None
# Convert status to int for comparison
# In some cases, httplib returns a status of "_UNKNOWN"
try:
status = int(self.status)
except ValueError:
status = 0
# Check for responses that shouldn't include a body
if status in (204, 304) or 100 <= status < 200 or request_method == "HEAD":
length = 0
return length
@contextmanager
def _error_catcher(self) -> typing.Generator[None]:
"""
Catch low-level python exceptions, instead re-raising urllib3
variants, so that low-level exceptions are not leaked in the
high-level api.
On exit, release the connection back to the pool.
"""
clean_exit = False
try:
try:
yield
except SocketTimeout as e:
# FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
# there is yet no clean way to get at it from this context.
raise ReadTimeoutError(self._pool, None, "Read timed out.") from e # type: ignore[arg-type]
except BaseSSLError as e:
# FIXME: Is there a better way to differentiate between SSLErrors?
if "read operation timed out" not in str(e):
# SSL errors related to framing/MAC get wrapped and reraised here
raise SSLError(e) from e
raise ReadTimeoutError(self._pool, None, "Read timed out.") from e # type: ignore[arg-type]
except IncompleteRead as e:
if (
e.expected is not None
and e.partial is not None
and e.expected == -e.partial
):
arg = "Response may not contain content."
else:
arg = f"Connection broken: {e!r}"
raise ProtocolError(arg, e) from e
except (HTTPException, OSError) as e:
raise ProtocolError(f"Connection broken: {e!r}", e) from e
# If no exception is thrown, we should avoid cleaning up
# unnecessarily.
clean_exit = True
finally:
# If we didn't terminate cleanly, we need to throw away our
# connection.
if not clean_exit:
# The response may not be closed but we're not going to use it
# anymore so close it now to ensure that the connection is
# released back to the pool.
if self._original_response:
self._original_response.close()
# Closing the response may not actually be sufficient to close
# everything, so if we have a hold of the connection close that
# too.
if self._connection:
self._connection.close()
# If we hold the original response but it's closed now, we should
# return the connection back to the pool.
if self._original_response and self._original_response.isclosed():
self.release_conn()
def _fp_read(
self,
amt: int | None = None,
*,
read1: bool = False,
) -> bytes:
"""
Read a response with the thought that reading the number of bytes
larger than can fit in a 32-bit int at a time via SSL in some
known cases leads to an overflow error that has to be prevented
if `amt` or `self.length_remaining` indicate that a problem may
happen.
The known cases:
* CPython < 3.9.7 because of a bug
https://github.com/urllib3/urllib3/issues/2513#issuecomment-1152559900.
* urllib3 injected with pyOpenSSL-backed SSL-support.
* CPython < 3.10 only when `amt` does not fit 32-bit int.
"""
assert self._fp
c_int_max = 2**31 - 1
if (
(amt and amt > c_int_max)
or (
amt is None
and self.length_remaining
and self.length_remaining > c_int_max
)
) and (util.IS_PYOPENSSL or sys.version_info < (3, 10)):
if read1:
return self._fp.read1(c_int_max)
buffer = io.BytesIO()
# Besides `max_chunk_amt` being a maximum chunk size, it
# affects memory overhead of reading a response by this
# method in CPython.
# `c_int_max` equal to 2 GiB - 1 byte is the actual maximum
# chunk size that does not lead to an overflow error, but
# 256 MiB is a compromise.
max_chunk_amt = 2**28
while amt is None or amt != 0:
if amt is not None:
chunk_amt = min(amt, max_chunk_amt)
amt -= chunk_amt
else:
chunk_amt = max_chunk_amt
data = self._fp.read(chunk_amt)
if not data:
break
buffer.write(data)
del data # to reduce peak memory usage by `max_chunk_amt`.
return buffer.getvalue()
elif read1:
return self._fp.read1(amt) if amt is not None else self._fp.read1()
else:
# StringIO doesn't like amt=None
return self._fp.read(amt) if amt is not None else self._fp.read()
def _raw_read(
self,
amt: int | None = None,
*,
read1: bool = False,
) -> bytes:
"""
Reads `amt` of bytes from the socket.
"""
if self._fp is None:
return None # type: ignore[return-value]
fp_closed = getattr(self._fp, "closed", False)
with self._error_catcher():
data = self._fp_read(amt, read1=read1) if not fp_closed else b""
if amt is not None and amt != 0 and not data:
# Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do
# not properly close the connection in all cases. There is
# no harm in redundantly calling close.
self._fp.close()
if (
self.enforce_content_length
and self.length_remaining is not None
and self.length_remaining != 0
):
# This is an edge case that httplib failed to cover due
# to concerns of backward compatibility. We're
# addressing it here to make sure IncompleteRead is
# raised during streaming, so all calls with incorrect
# Content-Length are caught.
raise IncompleteRead(self._fp_bytes_read, self.length_remaining)
elif read1 and (
(amt != 0 and not data) or self.length_remaining == len(data)
):
# All data has been read, but `self._fp.read1` in
# CPython 3.12 and older doesn't always close
# `http.client.HTTPResponse`, so we close it here.
# See https://github.com/python/cpython/issues/113199
self._fp.close()
if data:
self._fp_bytes_read += len(data)
if self.length_remaining is not None:
self.length_remaining -= len(data)
return data
def read(
self,
amt: int | None = None,
decode_content: bool | None = None,
cache_content: bool = False,
) -> bytes:
"""
Similar to :meth:`http.client.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
self._init_decoder()
if decode_content is None:
decode_content = self.decode_content
if amt and amt < 0:
# Negative numbers and `None` should be treated the same.
amt = None
elif amt is not None:
cache_content = False
if len(self._decoded_buffer) >= amt:
return self._decoded_buffer.get(amt)
data = self._raw_read(amt)
flush_decoder = amt is None or (amt != 0 and not data)
if not data and len(self._decoded_buffer) == 0:
return data
if amt is None:
data = self._decode(data, decode_content, flush_decoder)
if cache_content:
self._body = data
else:
# do not waste memory on buffer when not decoding
if not decode_content:
if self._has_decoded_content:
raise RuntimeError(
"Calling read(decode_content=False) is not supported after "
"read(decode_content=True) was called."
)
return data
decoded_data = self._decode(data, decode_content, flush_decoder)
self._decoded_buffer.put(decoded_data)
while len(self._decoded_buffer) < amt and data:
# TODO make sure to initially read enough data to get past the headers
# For example, the GZ file header takes 10 bytes, we don't want to read
# it one byte at a time
data = self._raw_read(amt)
decoded_data = self._decode(data, decode_content, flush_decoder)
self._decoded_buffer.put(decoded_data)
data = self._decoded_buffer.get(amt)
return data
def read1(
self,
amt: int | None = None,
decode_content: bool | None = None,
) -> bytes:
"""
Similar to ``http.client.HTTPResponse.read1`` and documented
in :meth:`io.BufferedReader.read1`, but with an additional parameter:
``decode_content``.
:param amt:
How much of the content to read.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
if decode_content is None:
decode_content = self.decode_content
if amt and amt < 0:
# Negative numbers and `None` should be treated the same.
amt = None
# try and respond without going to the network
if self._has_decoded_content:
if not decode_content:
raise RuntimeError(
"Calling read1(decode_content=False) is not supported after "
"read1(decode_content=True) was called."
)
if len(self._decoded_buffer) > 0:
if amt is None:
return self._decoded_buffer.get_all()
return self._decoded_buffer.get(amt)
if amt == 0:
return b""
# FIXME, this method's type doesn't say returning None is possible
data = self._raw_read(amt, read1=True)
if not decode_content or data is None:
return data
self._init_decoder()
while True:
flush_decoder = not data
decoded_data = self._decode(data, decode_content, flush_decoder)
self._decoded_buffer.put(decoded_data)
if decoded_data or flush_decoder:
break
data = self._raw_read(8192, read1=True)
if amt is None:
return self._decoded_buffer.get_all()
return self._decoded_buffer.get(amt)
def stream(
self, amt: int | None = 2**16, decode_content: bool | None = None
) -> typing.Generator[bytes]:
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
if self.chunked and self.supports_chunked_reads():
yield from self.read_chunked(amt, decode_content=decode_content)
else:
while not is_fp_closed(self._fp) or len(self._decoded_buffer) > 0:
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
# Overrides from io.IOBase
def readable(self) -> bool:
return True
def shutdown(self) -> None:
if not self._sock_shutdown:
raise ValueError("Cannot shutdown socket as self._sock_shutdown is not set")
self._sock_shutdown(socket.SHUT_RD)
def close(self) -> None:
self._sock_shutdown = None
if not self.closed and self._fp:
self._fp.close()
if self._connection:
self._connection.close()
if not self.auto_close:
io.IOBase.close(self)
@property
def closed(self) -> bool:
if not self.auto_close:
return io.IOBase.closed.__get__(self) # type: ignore[no-any-return]
elif self._fp is None:
return True
elif hasattr(self._fp, "isclosed"):
return self._fp.isclosed()
elif hasattr(self._fp, "closed"):
return self._fp.closed
else:
return True
def fileno(self) -> int:
if self._fp is None:
raise OSError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise OSError(
"The file-like object this HTTPResponse is wrapped "
"around has no file descriptor"
)
def flush(self) -> None:
if (
self._fp is not None
and hasattr(self._fp, "flush")
and not getattr(self._fp, "closed", False)
):
return self._fp.flush()
def supports_chunked_reads(self) -> bool:
"""
Checks if the underlying file-like object looks like a
:class:`http.client.HTTPResponse` object. We do this by testing for
the fp attribute. If it is present we assume it returns raw chunks as
processed by read_chunked().
"""
return hasattr(self._fp, "fp")
def _update_chunk_length(self) -> None:
# First, we'll figure out length of a chunk and then
# we'll try to read it from socket.
if self.chunk_left is not None:
return None
line = self._fp.fp.readline() # type: ignore[union-attr]
line = line.split(b";", 1)[0]
try:
self.chunk_left = int(line, 16)
except ValueError:
self.close()
if line:
# Invalid chunked protocol response, abort.
raise InvalidChunkLength(self, line) from None
else:
# Truncated at start of next chunk
raise ProtocolError("Response ended prematurely") from None
def _handle_chunk(self, amt: int | None) -> bytes:
returned_chunk = None
if amt is None:
chunk = self._fp._safe_read(self.chunk_left) # type: ignore[union-attr]
returned_chunk = chunk
self._fp._safe_read(2) # type: ignore[union-attr] # Toss the CRLF at the end of the chunk.
self.chunk_left = None
elif self.chunk_left is not None and amt < self.chunk_left:
value = self._fp._safe_read(amt) # type: ignore[union-attr]
self.chunk_left = self.chunk_left - amt
returned_chunk = value
elif amt == self.chunk_left:
value = self._fp._safe_read(amt) # type: ignore[union-attr]
self._fp._safe_read(2) # type: ignore[union-attr] # Toss the CRLF at the end of the chunk.
self.chunk_left = None
returned_chunk = value
else: # amt > self.chunk_left
returned_chunk = self._fp._safe_read(self.chunk_left) # type: ignore[union-attr]
self._fp._safe_read(2) # type: ignore[union-attr] # Toss the CRLF at the end of the chunk.
self.chunk_left = None
return returned_chunk # type: ignore[no-any-return]
def read_chunked(
self, amt: int | None = None, decode_content: bool | None = None
) -> typing.Generator[bytes]:
"""
Similar to :meth:`HTTPResponse.read`, but with an additional
parameter: ``decode_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
self._init_decoder()
# FIXME: Rewrite this method and make it a class with a better structured logic.
if not self.chunked:
raise ResponseNotChunked(
"Response is not chunked. "
"Header 'transfer-encoding: chunked' is missing."
)
if not self.supports_chunked_reads():
raise BodyNotHttplibCompatible(
"Body should be http.client.HTTPResponse like. "
"It should have have an fp attribute which returns raw chunks."
)
with self._error_catcher():
# Don't bother reading the body of a HEAD request.
if self._original_response and is_response_to_head(self._original_response):
self._original_response.close()
return None
# If a response is already read and closed
# then return immediately.
if self._fp.fp is None: # type: ignore[union-attr]
return None
if amt and amt < 0:
# Negative numbers and `None` should be treated the same,
# but httplib handles only `None` correctly.
amt = None
while True:
self._update_chunk_length()
if self.chunk_left == 0:
break
chunk = self._handle_chunk(amt)
decoded = self._decode(
chunk, decode_content=decode_content, flush_decoder=False
)
if decoded:
yield decoded
if decode_content:
# On CPython and PyPy, we should never need to flush the
# decoder. However, on Jython we *might* need to, so
# lets defensively do it anyway.
decoded = self._flush_decoder()
if decoded: # Platform-specific: Jython.
yield decoded
# Chunk content ends with \r\n: discard it.
while self._fp is not None:
line = self._fp.fp.readline()
if not line:
# Some sites may not end with '\r\n'.
break
if line == b"\r\n":
break
# We read everything; close the "file".
if self._original_response:
self._original_response.close()
@property
def url(self) -> str | None:
"""
Returns the URL that was the source of this response.
If the request that generated this response redirected, this method
will return the final redirect location.
"""
return self._request_url
@url.setter
def url(self, url: str) -> None:
self._request_url = url
def __iter__(self) -> typing.Iterator[bytes]:
buffer: list[bytes] = []
for chunk in self.stream(decode_content=True):
if b"\n" in chunk:
chunks = chunk.split(b"\n")
yield b"".join(buffer) + chunks[0] + b"\n"
for x in chunks[1:-1]:
yield x + b"\n"
if chunks[-1]:
buffer = [chunks[-1]]
else:
buffer = []
else:
buffer.append(chunk)
if buffer:
yield b"".join(buffer)
```
|
================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.98 KB
PATH: scripts\freecad_env\Lib\site-packages\urllib3\util\__init__.py
ENCODING: utf-8
```py
# For backwards compatibility, provide imports that used to be here.
from __future__ import annotations
from .connection import is_connection_dropped
from .request import SKIP_HEADER, SKIPPABLE_HEADERS, make_headers
from .response import is_fp_closed
from .retry import Retry
from .ssl_ import (
ALPN_PROTOCOLS,
IS_PYOPENSSL,
SSLContext,
assert_fingerprint,
create_urllib3_context,
resolve_cert_reqs,
resolve_ssl_version,
ssl_wrap_socket,
)
from .timeout import Timeout
from .url import Url, parse_url
from .wait import wait_for_read, wait_for_write
__all__ = (
"IS_PYOPENSSL",
"SSLContext",
"ALPN_PROTOCOLS",
"Retry",
"Timeout",
"Url",
"assert_fingerprint",
"create_urllib3_context",
"is_connection_dropped",
"is_fp_closed",
"parse_url",
"make_headers",
"resolve_cert_reqs",
"resolve_ssl_version",
"ssl_wrap_socket",
"wait_for_read",
"wait_for_write",
"SKIP_HEADER",
"SKIPPABLE_HEADERS",
)
```
|
==================================================================================================================
SOURCE CODE FILE: connection.py
LINES: 1
SIZE: 4.34 KB
PATH: scripts\freecad_env\Lib\site-packages\urllib3\util\connection.py
ENCODING: utf-8
```py
from __future__ import annotations
import socket
import typing
from ..exceptions import LocationParseError
from .timeout import _DEFAULT_TIMEOUT, _TYPE_TIMEOUT
_TYPE_SOCKET_OPTIONS = list[tuple[int, int, typing.Union[int, bytes]]]
if typing.TYPE_CHECKING:
from .._base_connection import BaseHTTPConnection
def is_connection_dropped(conn: BaseHTTPConnection) -> bool: # Platform-specific
"""
Returns True if the connection is dropped and should be closed.
:param conn: :class:`urllib3.connection.HTTPConnection` object.
"""
return not conn.is_connected
# This function is copied from socket.py in the Python 2.7 standard
# library test suite. Added to its signature is only `socket_options`.
# One additional modification is that we avoid binding to IPv6 servers
# discovered in DNS if the system doesn't have IPv6 functionality.
def create_connection(
address: tuple[str, int],
timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,
source_address: tuple[str, int] | None = None,
socket_options: _TYPE_SOCKET_OPTIONS | None = None,
) -> socket.socket:
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`socket.getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
if host.startswith("["):
host = host.strip("[]")
err = None
# Using the value from allowed_gai_family() in the context of getaddrinfo lets
# us select whether to work with IPv4 DNS records, IPv6 records, or both.
# The original create_connection function always returns all records.
family = allowed_gai_family()
try:
host.encode("idna")
except UnicodeError:
raise LocationParseError(f"'{host}', label empty or too long") from None
for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
# If provided, set socket level options before connecting.
_set_socket_options(sock, socket_options)
if timeout is not _DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
# Break explicitly a reference cycle
err = None
return sock
except OSError as _:
err = _
if sock is not None:
sock.close()
if err is not None:
try:
raise err
finally:
# Break explicitly a reference cycle
err = None
else:
raise OSError("getaddrinfo returns an empty list")
def _set_socket_options(
sock: socket.socket, options: _TYPE_SOCKET_OPTIONS | None
) -> None:
if options is None:
return
for opt in options:
sock.setsockopt(*opt)
def allowed_gai_family() -> socket.AddressFamily:
"""This function is designed to work in the context of
getaddrinfo, where family=socket.AF_UNSPEC is the default and
will perform a DNS search for both IPv6 and IPv4 records."""
family = socket.AF_INET
if HAS_IPV6:
family = socket.AF_UNSPEC
return family
def _has_ipv6(host: str) -> bool:
"""Returns True if the system can bind an IPv6 address."""
sock = None
has_ipv6 = False
if socket.has_ipv6:
# has_ipv6 returns true if cPython was compiled with IPv6 support.
# It does not tell us if the system has IPv6 support enabled. To
# determine that we must bind to an IPv6 address.
# https://github.com/urllib3/urllib3/pull/611
# https://bugs.python.org/issue658327
try:
sock = socket.socket(socket.AF_INET6)
sock.bind((host, 0))
has_ipv6 = True
except Exception:
pass
if sock:
sock.close()
return has_ipv6
HAS_IPV6 = _has_ipv6("::1")
```
|
=============================================================================================================
SOURCE CODE FILE: proxy.py
LINES: 1
SIZE: 1.12 KB
PATH: scripts\freecad_env\Lib\site-packages\urllib3\util\proxy.py
ENCODING: utf-8
```py
from __future__ import annotations
import typing
from .url import Url
if typing.TYPE_CHECKING:
from ..connection import ProxyConfig
def connection_requires_http_tunnel(
proxy_url: Url | None = None,
proxy_config: ProxyConfig | None = None,
destination_scheme: str | None = None,
) -> bool:
"""
Returns True if the connection requires an HTTP CONNECT through the proxy.
:param URL proxy_url:
URL of the proxy.
:param ProxyConfig proxy_config:
Proxy configuration from poolmanager.py
:param str destination_scheme:
The scheme of the destination. (i.e https, http, etc)
"""
# If we're not using a proxy, no way to use a tunnel.
if proxy_url is None:
return False
# HTTP destinations never require tunneling, we always forward.
if destination_scheme == "http":
return False
# Support for forwarding with HTTPS proxies and HTTPS destinations.
if (
proxy_url.scheme == "https"
and proxy_config
and proxy_config.use_forwarding_for_https
):
return False
# Otherwise always use a tunnel.
return True
```
|
===============================================================================================================
SOURCE CODE FILE: request.py
LINES: 1
SIZE: 8.03 KB
PATH: scripts\freecad_env\Lib\site-packages\urllib3\util\request.py
ENCODING: utf-8
```py
from __future__ import annotations
import io
import typing
from base64 import b64encode
from enum import Enum
from ..exceptions import UnrewindableBodyError
from .util import to_bytes
if typing.TYPE_CHECKING:
from typing import Final
# Pass as a value within ``headers`` to skip
# emitting some HTTP headers that are added automatically.
# The only headers that are supported are ``Accept-Encoding``,
# ``Host``, and ``User-Agent``.
SKIP_HEADER = "@@@SKIP_HEADER@@@"
SKIPPABLE_HEADERS = frozenset(["accept-encoding", "host", "user-agent"])
ACCEPT_ENCODING = "gzip,deflate"
try:
try:
import brotlicffi as _unused_module_brotli # type: ignore[import-not-found] # noqa: F401
except ImportError:
import brotli as _unused_module_brotli # type: ignore[import-not-found] # noqa: F401
except ImportError:
pass
else:
ACCEPT_ENCODING += ",br"
try:
import zstandard as _unused_module_zstd # noqa: F401
except ImportError:
pass
else:
ACCEPT_ENCODING += ",zstd"
class _TYPE_FAILEDTELL(Enum):
token = 0
_FAILEDTELL: Final[_TYPE_FAILEDTELL] = _TYPE_FAILEDTELL.token
_TYPE_BODY_POSITION = typing.Union[int, _TYPE_FAILEDTELL]
# When sending a request with these methods we aren't expecting
# a body so don't need to set an explicit 'Content-Length: 0'
# The reason we do this in the negative instead of tracking methods
# which 'should' have a body is because unknown methods should be
# treated as if they were 'POST' which *does* expect a body.
_METHODS_NOT_EXPECTING_BODY = {"GET", "HEAD", "DELETE", "TRACE", "OPTIONS", "CONNECT"}
def make_headers(
keep_alive: bool | None = None,
accept_encoding: bool | list[str] | str | None = None,
user_agent: str | None = None,
basic_auth: str | None = None,
proxy_basic_auth: str | None = None,
disable_cache: bool | None = None,
) -> dict[str, str]:
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'. If the dependencies for
Brotli (either the ``brotli`` or ``brotlicffi`` package) and/or Zstandard
(the ``zstandard`` package) algorithms are installed, then their encodings are
included in the string ('br' and 'zstd', respectively).
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
:param proxy_basic_auth:
Colon-separated username:password string for 'proxy-authorization: basic ...'
auth header.
:param disable_cache:
If ``True``, adds 'cache-control: no-cache' header.
Example:
.. code-block:: python
import urllib3
print(urllib3.util.make_headers(keep_alive=True, user_agent="Batman/1.0"))
# {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
print(urllib3.util.make_headers(accept_encoding=True))
# {'accept-encoding': 'gzip,deflate'}
"""
headers: dict[str, str] = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ",".join(accept_encoding)
else:
accept_encoding = ACCEPT_ENCODING
headers["accept-encoding"] = accept_encoding
if user_agent:
headers["user-agent"] = user_agent
if keep_alive:
headers["connection"] = "keep-alive"
if basic_auth:
headers["authorization"] = (
f"Basic {b64encode(basic_auth.encode('latin-1')).decode()}"
)
if proxy_basic_auth:
headers["proxy-authorization"] = (
f"Basic {b64encode(proxy_basic_auth.encode('latin-1')).decode()}"
)
if disable_cache:
headers["cache-control"] = "no-cache"
return headers
def set_file_position(
body: typing.Any, pos: _TYPE_BODY_POSITION | None
) -> _TYPE_BODY_POSITION | None:
"""
If a position is provided, move file to that point.
Otherwise, we'll attempt to record a position for future use.
"""
if pos is not None:
rewind_body(body, pos)
elif getattr(body, "tell", None) is not None:
try:
pos = body.tell()
except OSError:
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body.
pos = _FAILEDTELL
return pos
def rewind_body(body: typing.IO[typing.AnyStr], body_pos: _TYPE_BODY_POSITION) -> None:
"""
Attempt to rewind body to a certain position.
Primarily used for request redirects and retries.
:param body:
File-like object that supports seek.
:param int pos:
Position to seek to in file.
"""
body_seek = getattr(body, "seek", None)
if body_seek is not None and isinstance(body_pos, int):
try:
body_seek(body_pos)
except OSError as e:
raise UnrewindableBodyError(
"An error occurred when rewinding request body for redirect/retry."
) from e
elif body_pos is _FAILEDTELL:
raise UnrewindableBodyError(
"Unable to record file position for rewinding "
"request body during a redirect/retry."
)
else:
raise ValueError(
f"body_pos must be of type integer, instead it was {type(body_pos)}."
)
class ChunksAndContentLength(typing.NamedTuple):
chunks: typing.Iterable[bytes] | None
content_length: int | None
def body_to_chunks(
body: typing.Any | None, method: str, blocksize: int
) -> ChunksAndContentLength:
"""Takes the HTTP request method, body, and blocksize and
transforms them into an iterable of chunks to pass to
socket.sendall() and an optional 'Content-Length' header.
A 'Content-Length' of 'None' indicates the length of the body
can't be determined so should use 'Transfer-Encoding: chunked'
for framing instead.
"""
chunks: typing.Iterable[bytes] | None
content_length: int | None
# No body, we need to make a recommendation on 'Content-Length'
# based on whether that request method is expected to have
# a body or not.
if body is None:
chunks = None
if method.upper() not in _METHODS_NOT_EXPECTING_BODY:
content_length = 0
else:
content_length = None
# Bytes or strings become bytes
elif isinstance(body, (str, bytes)):
chunks = (to_bytes(body),)
content_length = len(chunks[0])
# File-like object, TODO: use seek() and tell() for length?
elif hasattr(body, "read"):
def chunk_readable() -> typing.Iterable[bytes]:
nonlocal body, blocksize
encode = isinstance(body, io.TextIOBase)
while True:
datablock = body.read(blocksize)
if not datablock:
break
if encode:
datablock = datablock.encode("utf-8")
yield datablock
chunks = chunk_readable()
content_length = None
# Otherwise we need to start checking via duck-typing.
else:
try:
# Check if the body implements the buffer API.
mv = memoryview(body)
except TypeError:
try:
# Check if the body is an iterable
chunks = iter(body)
content_length = None
except TypeError:
raise TypeError(
f"'body' must be a bytes-like object, file-like "
f"object, or iterable. Instead was {body!r}"
) from None
else:
# Since it implements the buffer API can be passed directly to socket.sendall()
chunks = (body,)
content_length = mv.nbytes
return ChunksAndContentLength(chunks=chunks, content_length=content_length)
```
|
================================================================================================================
SOURCE CODE FILE: response.py
LINES: 1
SIZE: 3.29 KB
PATH: scripts\freecad_env\Lib\site-packages\urllib3\util\response.py
ENCODING: utf-8
```py
from __future__ import annotations
import http.client as httplib
from email.errors import MultipartInvariantViolationDefect, StartBoundaryNotFoundDefect
from ..exceptions import HeaderParsingError
def is_fp_closed(obj: object) -> bool:
"""
Checks whether a given file-like object is closed.
:param obj:
The file-like object to check.
"""
try:
# Check `isclosed()` first, in case Python3 doesn't set `closed`.
# GH Issue #928
return obj.isclosed() # type: ignore[no-any-return, attr-defined]
except AttributeError:
pass
try:
# Check via the official file-like-object way.
return obj.closed # type: ignore[no-any-return, attr-defined]
except AttributeError:
pass
try:
# Check if the object is a container for another file-like object that
# gets released on exhaustion (e.g. HTTPResponse).
return obj.fp is None # type: ignore[attr-defined]
except AttributeError:
pass
raise ValueError("Unable to determine whether fp is closed.")
def assert_header_parsing(headers: httplib.HTTPMessage) -> None:
"""
Asserts whether all headers have been successfully parsed.
Extracts encountered errors from the result of parsing headers.
Only works on Python 3.
:param http.client.HTTPMessage headers: Headers to verify.
:raises urllib3.exceptions.HeaderParsingError:
If parsing errors are found.
"""
# This will fail silently if we pass in the wrong kind of parameter.
# To make debugging easier add an explicit check.
if not isinstance(headers, httplib.HTTPMessage):
raise TypeError(f"expected httplib.Message, got {type(headers)}.")
unparsed_data = None
# get_payload is actually email.message.Message.get_payload;
# we're only interested in the result if it's not a multipart message
if not headers.is_multipart():
payload = headers.get_payload()
if isinstance(payload, (bytes, str)):
unparsed_data = payload
# httplib is assuming a response body is available
# when parsing headers even when httplib only sends
# header data to parse_headers() This results in
# defects on multipart responses in particular.
# See: https://github.com/urllib3/urllib3/issues/800
# So we ignore the following defects:
# - StartBoundaryNotFoundDefect:
# The claimed start boundary was never found.
# - MultipartInvariantViolationDefect:
# A message claimed to be a multipart but no subparts were found.
defects = [
defect
for defect in headers.defects
if not isinstance(
defect, (StartBoundaryNotFoundDefect, MultipartInvariantViolationDefect)
)
]
if defects or unparsed_data:
raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
def is_response_to_head(response: httplib.HTTPResponse) -> bool:
"""
Checks whether the request of a response has been a HEAD-request.
:param http.client.HTTPResponse response:
Response to check if the originating request
used 'HEAD' as a method.
"""
# FIXME: Can we do this somehow without accessing private httplib _method?
method_str = response._method # type: str # type: ignore[attr-defined]
return method_str.upper() == "HEAD"
```
|
=============================================================================================================
SOURCE CODE FILE: retry.py
LINES: 1
SIZE: 18.03 KB
PATH: scripts\freecad_env\Lib\site-packages\urllib3\util\retry.py
ENCODING: utf-8
```py
from __future__ import annotations
import email
import logging
import random
import re
import time
import typing
from itertools import takewhile
from types import TracebackType
from ..exceptions import (
ConnectTimeoutError,
InvalidHeader,
MaxRetryError,
ProtocolError,
ProxyError,
ReadTimeoutError,
ResponseError,
)
from .util import reraise
if typing.TYPE_CHECKING:
from typing_extensions import Self
from ..connectionpool import ConnectionPool
from ..response import BaseHTTPResponse
log = logging.getLogger(__name__)
# Data structure for representing the metadata of requests that result in a retry.
class RequestHistory(typing.NamedTuple):
method: str | None
url: str | None
error: Exception | None
status: int | None
redirect_location: str | None
class Retry:
"""Retry configuration.
Each retry attempt will create a new Retry object with updated values, so
they can be safely reused.
Retries can be defined as a default for a pool:
.. code-block:: python
retries = Retry(connect=5, read=2, redirect=5)
http = PoolManager(retries=retries)
response = http.request("GET", "https://example.com/")
Or per-request (which overrides the default for the pool):
.. code-block:: python
response = http.request("GET", "https://example.com/", retries=Retry(10))
Retries can be disabled by passing ``False``:
.. code-block:: python
response = http.request("GET", "https://example.com/", retries=False)
Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
retries are disabled, in which case the causing exception will be raised.
:param int total:
Total number of retries to allow. Takes precedence over other counts.
Set to ``None`` to remove this constraint and fall back on other
counts.
Set to ``0`` to fail on the first retry.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int connect:
How many connection-related errors to retry on.
These are errors raised before the request is sent to the remote server,
which we assume has not triggered the server to process the request.
Set to ``0`` to fail on the first retry of this type.
:param int read:
How many times to retry on read errors.
These errors are raised after the request was sent to the server, so the
request may have side-effects.
Set to ``0`` to fail on the first retry of this type.
:param int redirect:
How many redirects to perform. Limit this to avoid infinite redirect
loops.
A redirect is a HTTP response with a status code 301, 302, 303, 307 or
308.
Set to ``0`` to fail on the first retry of this type.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int status:
How many times to retry on bad status codes.
These are retries made on responses, where status code matches
``status_forcelist``.
Set to ``0`` to fail on the first retry of this type.
:param int other:
How many times to retry on other errors.
Other errors are errors that are not connect, read, redirect or status errors.
These errors might be raised after the request was sent to the server, so the
request might have side-effects.
Set to ``0`` to fail on the first retry of this type.
If ``total`` is not set, it's a good idea to set this to 0 to account
for unexpected edge cases and avoid infinite retry loops.
:param Collection allowed_methods:
Set of uppercased HTTP method verbs that we should retry on.
By default, we only retry on methods which are considered to be
idempotent (multiple requests with the same parameters end with the
same state). See :attr:`Retry.DEFAULT_ALLOWED_METHODS`.
Set to a ``None`` value to retry on any verb.
:param Collection status_forcelist:
A set of integer HTTP status codes that we should force a retry on.
A retry is initiated if the request method is in ``allowed_methods``
and the response status code is in ``status_forcelist``.
By default, this is disabled with ``None``.
:param float backoff_factor:
A backoff factor to apply between attempts after the second try
(most errors are resolved immediately by a second try without a
delay). urllib3 will sleep for::
{backoff factor} * (2 ** ({number of previous retries}))
seconds. If `backoff_jitter` is non-zero, this sleep is extended by::
random.uniform(0, {backoff jitter})
seconds. For example, if the backoff_factor is 0.1, then :func:`Retry.sleep` will
sleep for [0.0s, 0.2s, 0.4s, 0.8s, ...] between retries. No backoff will ever
be longer than `backoff_max`.
By default, backoff is disabled (factor set to 0).
:param bool raise_on_redirect: Whether, if the number of redirects is
exhausted, to raise a MaxRetryError, or to return a response with a
response code in the 3xx range.
:param bool raise_on_status: Similar meaning to ``raise_on_redirect``:
whether we should raise an exception, or return a response,
if status falls in ``status_forcelist`` range and retries have
been exhausted.
:param tuple history: The history of the request encountered during
each call to :meth:`~Retry.increment`. The list is in the order
the requests occurred. Each list item is of class :class:`RequestHistory`.
:param bool respect_retry_after_header:
Whether to respect Retry-After header on status codes defined as
:attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.
:param Collection remove_headers_on_redirect:
Sequence of headers to remove from the request when a response
indicating a redirect is returned before firing off the redirected
request.
"""
#: Default methods to be used for ``allowed_methods``
DEFAULT_ALLOWED_METHODS = frozenset(
["HEAD", "GET", "PUT", "DELETE", "OPTIONS", "TRACE"]
)
#: Default status codes to be used for ``status_forcelist``
RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
#: Default headers to be used for ``remove_headers_on_redirect``
DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset(
["Cookie", "Authorization", "Proxy-Authorization"]
)
#: Default maximum backoff time.
DEFAULT_BACKOFF_MAX = 120
# Backward compatibility; assigned outside of the class.
DEFAULT: typing.ClassVar[Retry]
def __init__(
self,
total: bool | int | None = 10,
connect: int | None = None,
read: int | None = None,
redirect: bool | int | None = None,
status: int | None = None,
other: int | None = None,
allowed_methods: typing.Collection[str] | None = DEFAULT_ALLOWED_METHODS,
status_forcelist: typing.Collection[int] | None = None,
backoff_factor: float = 0,
backoff_max: float = DEFAULT_BACKOFF_MAX,
raise_on_redirect: bool = True,
raise_on_status: bool = True,
history: tuple[RequestHistory, ...] | None = None,
respect_retry_after_header: bool = True,
remove_headers_on_redirect: typing.Collection[
str
] = DEFAULT_REMOVE_HEADERS_ON_REDIRECT,
backoff_jitter: float = 0.0,
) -> None:
self.total = total
self.connect = connect
self.read = read
self.status = status
self.other = other
if redirect is False or total is False:
redirect = 0
raise_on_redirect = False
self.redirect = redirect
self.status_forcelist = status_forcelist or set()
self.allowed_methods = allowed_methods
self.backoff_factor = backoff_factor
self.backoff_max = backoff_max
self.raise_on_redirect = raise_on_redirect
self.raise_on_status = raise_on_status
self.history = history or ()
self.respect_retry_after_header = respect_retry_after_header
self.remove_headers_on_redirect = frozenset(
h.lower() for h in remove_headers_on_redirect
)
self.backoff_jitter = backoff_jitter
def new(self, **kw: typing.Any) -> Self:
params = dict(
total=self.total,
connect=self.connect,
read=self.read,
redirect=self.redirect,
status=self.status,
other=self.other,
allowed_methods=self.allowed_methods,
status_forcelist=self.status_forcelist,
backoff_factor=self.backoff_factor,
backoff_max=self.backoff_max,
raise_on_redirect=self.raise_on_redirect,
raise_on_status=self.raise_on_status,
history=self.history,
remove_headers_on_redirect=self.remove_headers_on_redirect,
respect_retry_after_header=self.respect_retry_after_header,
backoff_jitter=self.backoff_jitter,
)
params.update(kw)
return type(self)(**params) # type: ignore[arg-type]
@classmethod
def from_int(
cls,
retries: Retry | bool | int | None,
redirect: bool | int | None = True,
default: Retry | bool | int | None = None,
) -> Retry:
"""Backwards-compatibility for the old retries format."""
if retries is None:
retries = default if default is not None else cls.DEFAULT
if isinstance(retries, Retry):
return retries
redirect = bool(redirect) and None
new_retries = cls(retries, redirect=redirect)
log.debug("Converted retries value: %r -> %r", retries, new_retries)
return new_retries
def get_backoff_time(self) -> float:
"""Formula for computing the current backoff
:rtype: float
"""
# We want to consider only the last consecutive errors sequence (Ignore redirects).
consecutive_errors_len = len(
list(
takewhile(lambda x: x.redirect_location is None, reversed(self.history))
)
)
if consecutive_errors_len <= 1:
return 0
backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))
if self.backoff_jitter != 0.0:
backoff_value += random.random() * self.backoff_jitter
return float(max(0, min(self.backoff_max, backoff_value)))
def parse_retry_after(self, retry_after: str) -> float:
seconds: float
# Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4
if re.match(r"^\s*[0-9]+\s*$", retry_after):
seconds = int(retry_after)
else:
retry_date_tuple = email.utils.parsedate_tz(retry_after)
if retry_date_tuple is None:
raise InvalidHeader(f"Invalid Retry-After header: {retry_after}")
retry_date = email.utils.mktime_tz(retry_date_tuple)
seconds = retry_date - time.time()
seconds = max(seconds, 0)
return seconds
def get_retry_after(self, response: BaseHTTPResponse) -> float | None:
"""Get the value of Retry-After in seconds."""
retry_after = response.headers.get("Retry-After")
if retry_after is None:
return None
return self.parse_retry_after(retry_after)
def sleep_for_retry(self, response: BaseHTTPResponse) -> bool:
retry_after = self.get_retry_after(response)
if retry_after:
time.sleep(retry_after)
return True
return False
def _sleep_backoff(self) -> None:
backoff = self.get_backoff_time()
if backoff <= 0:
return
time.sleep(backoff)
def sleep(self, response: BaseHTTPResponse | None = None) -> None:
"""Sleep between retry attempts.
This method will respect a server's ``Retry-After`` response header
and sleep the duration of the time requested. If that is not present, it
will use an exponential backoff. By default, the backoff factor is 0 and
this method will return immediately.
"""
if self.respect_retry_after_header and response:
slept = self.sleep_for_retry(response)
if slept:
return
self._sleep_backoff()
def _is_connection_error(self, err: Exception) -> bool:
"""Errors when we're fairly sure that the server did not receive the
request, so it should be safe to retry.
"""
if isinstance(err, ProxyError):
err = err.original_error
return isinstance(err, ConnectTimeoutError)
def _is_read_error(self, err: Exception) -> bool:
"""Errors that occur after the request has been started, so we should
assume that the server began processing it.
"""
return isinstance(err, (ReadTimeoutError, ProtocolError))
def _is_method_retryable(self, method: str) -> bool:
"""Checks if a given HTTP method should be retried upon, depending if
it is included in the allowed_methods
"""
if self.allowed_methods and method.upper() not in self.allowed_methods:
return False
return True
def is_retry(
self, method: str, status_code: int, has_retry_after: bool = False
) -> bool:
"""Is this method/status code retryable? (Based on allowlists and control
variables such as the number of total retries to allow, whether to
respect the Retry-After header, whether this header is present, and
whether the returned status code is on the list of status codes to
be retried upon on the presence of the aforementioned header)
"""
if not self._is_method_retryable(method):
return False
if self.status_forcelist and status_code in self.status_forcelist:
return True
return bool(
self.total
and self.respect_retry_after_header
and has_retry_after
and (status_code in self.RETRY_AFTER_STATUS_CODES)
)
def is_exhausted(self) -> bool:
"""Are we out of retries?"""
retry_counts = [
x
for x in (
self.total,
self.connect,
self.read,
self.redirect,
self.status,
self.other,
)
if x
]
if not retry_counts:
return False
return min(retry_counts) < 0
def increment(
self,
method: str | None = None,
url: str | None = None,
response: BaseHTTPResponse | None = None,
error: Exception | None = None,
_pool: ConnectionPool | None = None,
_stacktrace: TracebackType | None = None,
) -> Self:
"""Return a new Retry object with incremented retry counters.
:param response: A response object, or None, if the server did not
return a response.
:type response: :class:`~urllib3.response.BaseHTTPResponse`
:param Exception error: An error encountered during the request, or
None if the response was received successfully.
:return: A new ``Retry`` object.
"""
if self.total is False and error:
# Disabled, indicate to re-raise the error.
raise reraise(type(error), error, _stacktrace)
total = self.total
if total is not None:
total -= 1
connect = self.connect
read = self.read
redirect = self.redirect
status_count = self.status
other = self.other
cause = "unknown"
status = None
redirect_location = None
if error and self._is_connection_error(error):
# Connect retry?
if connect is False:
raise reraise(type(error), error, _stacktrace)
elif connect is not None:
connect -= 1
elif error and self._is_read_error(error):
# Read retry?
if read is False or method is None or not self._is_method_retryable(method):
raise reraise(type(error), error, _stacktrace)
elif read is not None:
read -= 1
elif error:
# Other retry?
if other is not None:
other -= 1
elif response and response.get_redirect_location():
# Redirect retry?
if redirect is not None:
redirect -= 1
cause = "too many redirects"
response_redirect_location = response.get_redirect_location()
if response_redirect_location:
redirect_location = response_redirect_location
status = response.status
else:
# Incrementing because of a server error like a 500 in
# status_forcelist and the given method is in the allowed_methods
cause = ResponseError.GENERIC_ERROR
if response and response.status:
if status_count is not None:
status_count -= 1
cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status)
status = response.status
history = self.history + (
RequestHistory(method, url, error, status, redirect_location),
)
new_retry = self.new(
total=total,
connect=connect,
read=read,
redirect=redirect,
status=status_count,
other=other,
history=history,
)
if new_retry.is_exhausted():
reason = error or ResponseError(cause)
raise MaxRetryError(_pool, url, reason) from reason # type: ignore[arg-type]
log.debug("Incremented Retry for (url='%s'): %r", url, new_retry)
return new_retry
def __repr__(self) -> str:
return (
f"{type(self).__name__}(total={self.total}, connect={self.connect}, "
f"read={self.read}, redirect={self.redirect}, status={self.status})"
)
# For backwards compatibility (equivalent to pre-v1.9):
Retry.DEFAULT = Retry(3)
```
|
============================================================================================================
SOURCE CODE FILE: ssl_.py
LINES: 1
SIZE: 19.32 KB
PATH: scripts\freecad_env\Lib\site-packages\urllib3\util\ssl_.py
ENCODING: utf-8
```py
from __future__ import annotations
import hashlib
import hmac
import os
import socket
import sys
import typing
import warnings
from binascii import unhexlify
from ..exceptions import ProxySchemeUnsupported, SSLError
from .url import _BRACELESS_IPV6_ADDRZ_RE, _IPV4_RE
SSLContext = None
SSLTransport = None
HAS_NEVER_CHECK_COMMON_NAME = False
IS_PYOPENSSL = False
ALPN_PROTOCOLS = ["http/1.1"]
_TYPE_VERSION_INFO = tuple[int, int, int, str, int]
# Maps the length of a digest to a possible hash function producing this digest
HASHFUNC_MAP = {
length: getattr(hashlib, algorithm, None)
for length, algorithm in ((32, "md5"), (40, "sha1"), (64, "sha256"))
}
def _is_bpo_43522_fixed(
implementation_name: str,
version_info: _TYPE_VERSION_INFO,
pypy_version_info: _TYPE_VERSION_INFO | None,
) -> bool:
"""Return True for CPython 3.9.3+ or 3.10+ and PyPy 7.3.8+ where
setting SSLContext.hostname_checks_common_name to False works.
Outside of CPython and PyPy we don't know which implementations work
or not so we conservatively use our hostname matching as we know that works
on all implementations.
https://github.com/urllib3/urllib3/issues/2192#issuecomment-821832963
https://foss.heptapod.net/pypy/pypy/-/issues/3539
"""
if implementation_name == "pypy":
# https://foss.heptapod.net/pypy/pypy/-/issues/3129
return pypy_version_info >= (7, 3, 8) # type: ignore[operator]
elif implementation_name == "cpython":
major_minor = version_info[:2]
micro = version_info[2]
return (major_minor == (3, 9) and micro >= 3) or major_minor >= (3, 10)
else: # Defensive:
return False
def _is_has_never_check_common_name_reliable(
openssl_version: str,
openssl_version_number: int,
implementation_name: str,
version_info: _TYPE_VERSION_INFO,
pypy_version_info: _TYPE_VERSION_INFO | None,
) -> bool:
# As of May 2023, all released versions of LibreSSL fail to reject certificates with
# only common names, see https://github.com/urllib3/urllib3/pull/3024
is_openssl = openssl_version.startswith("OpenSSL ")
# Before fixing OpenSSL issue #14579, the SSL_new() API was not copying hostflags
# like X509_CHECK_FLAG_NEVER_CHECK_SUBJECT, which tripped up CPython.
# https://github.com/openssl/openssl/issues/14579
# This was released in OpenSSL 1.1.1l+ (>=0x101010cf)
is_openssl_issue_14579_fixed = openssl_version_number >= 0x101010CF
return is_openssl and (
is_openssl_issue_14579_fixed
or _is_bpo_43522_fixed(implementation_name, version_info, pypy_version_info)
)
if typing.TYPE_CHECKING:
from ssl import VerifyMode
from typing import TypedDict
from .ssltransport import SSLTransport as SSLTransportType
class _TYPE_PEER_CERT_RET_DICT(TypedDict, total=False):
subjectAltName: tuple[tuple[str, str], ...]
subject: tuple[tuple[tuple[str, str], ...], ...]
serialNumber: str
# Mapping from 'ssl.PROTOCOL_TLSX' to 'TLSVersion.X'
_SSL_VERSION_TO_TLS_VERSION: dict[int, int] = {}
try: # Do we have ssl at all?
import ssl
from ssl import ( # type: ignore[assignment]
CERT_REQUIRED,
HAS_NEVER_CHECK_COMMON_NAME,
OP_NO_COMPRESSION,
OP_NO_TICKET,
OPENSSL_VERSION,
OPENSSL_VERSION_NUMBER,
PROTOCOL_TLS,
PROTOCOL_TLS_CLIENT,
VERIFY_X509_STRICT,
OP_NO_SSLv2,
OP_NO_SSLv3,
SSLContext,
TLSVersion,
)
PROTOCOL_SSLv23 = PROTOCOL_TLS
# Needed for Python 3.9 which does not define this
VERIFY_X509_PARTIAL_CHAIN = getattr(ssl, "VERIFY_X509_PARTIAL_CHAIN", 0x80000)
# Setting SSLContext.hostname_checks_common_name = False didn't work before CPython
# 3.9.3, and 3.10 (but OK on PyPy) or OpenSSL 1.1.1l+
if HAS_NEVER_CHECK_COMMON_NAME and not _is_has_never_check_common_name_reliable(
OPENSSL_VERSION,
OPENSSL_VERSION_NUMBER,
sys.implementation.name,
sys.version_info,
sys.pypy_version_info if sys.implementation.name == "pypy" else None, # type: ignore[attr-defined]
): # Defensive: for Python < 3.9.3
HAS_NEVER_CHECK_COMMON_NAME = False
# Need to be careful here in case old TLS versions get
# removed in future 'ssl' module implementations.
for attr in ("TLSv1", "TLSv1_1", "TLSv1_2"):
try:
_SSL_VERSION_TO_TLS_VERSION[getattr(ssl, f"PROTOCOL_{attr}")] = getattr(
TLSVersion, attr
)
except AttributeError: # Defensive:
continue
from .ssltransport import SSLTransport # type: ignore[assignment]
except ImportError:
OP_NO_COMPRESSION = 0x20000 # type: ignore[assignment]
OP_NO_TICKET = 0x4000 # type: ignore[assignment]
OP_NO_SSLv2 = 0x1000000 # type: ignore[assignment]
OP_NO_SSLv3 = 0x2000000 # type: ignore[assignment]
PROTOCOL_SSLv23 = PROTOCOL_TLS = 2 # type: ignore[assignment]
PROTOCOL_TLS_CLIENT = 16 # type: ignore[assignment]
VERIFY_X509_PARTIAL_CHAIN = 0x80000
VERIFY_X509_STRICT = 0x20 # type: ignore[assignment]
_TYPE_PEER_CERT_RET = typing.Union["_TYPE_PEER_CERT_RET_DICT", bytes, None]
def assert_fingerprint(cert: bytes | None, fingerprint: str) -> None:
"""
Checks if given fingerprint matches the supplied certificate.
:param cert:
Certificate as bytes object.
:param fingerprint:
Fingerprint as string of hexdigits, can be interspersed by colons.
"""
if cert is None:
raise SSLError("No certificate for the peer.")
fingerprint = fingerprint.replace(":", "").lower()
digest_length = len(fingerprint)
if digest_length not in HASHFUNC_MAP:
raise SSLError(f"Fingerprint of invalid length: {fingerprint}")
hashfunc = HASHFUNC_MAP.get(digest_length)
if hashfunc is None:
raise SSLError(
f"Hash function implementation unavailable for fingerprint length: {digest_length}"
)
# We need encode() here for py32; works on py2 and p33.
fingerprint_bytes = unhexlify(fingerprint.encode())
cert_digest = hashfunc(cert).digest()
if not hmac.compare_digest(cert_digest, fingerprint_bytes):
raise SSLError(
f'Fingerprints did not match. Expected "{fingerprint}", got "{cert_digest.hex()}"'
)
def resolve_cert_reqs(candidate: None | int | str) -> VerifyMode:
"""
Resolves the argument to a numeric constant, which can be passed to
the wrap_socket function/method from the ssl module.
Defaults to :data:`ssl.CERT_REQUIRED`.
If given a string it is assumed to be the name of the constant in the
:mod:`ssl` module or its abbreviation.
(So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
If it's neither `None` nor a string we assume it is already the numeric
constant which can directly be passed to wrap_socket.
"""
if candidate is None:
return CERT_REQUIRED
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, "CERT_" + candidate)
return res # type: ignore[no-any-return]
return candidate # type: ignore[return-value]
def resolve_ssl_version(candidate: None | int | str) -> int:
"""
like resolve_cert_reqs
"""
if candidate is None:
return PROTOCOL_TLS
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, "PROTOCOL_" + candidate)
return typing.cast(int, res)
return candidate
def create_urllib3_context(
ssl_version: int | None = None,
cert_reqs: int | None = None,
options: int | None = None,
ciphers: str | None = None,
ssl_minimum_version: int | None = None,
ssl_maximum_version: int | None = None,
verify_flags: int | None = None,
) -> ssl.SSLContext:
"""Creates and configures an :class:`ssl.SSLContext` instance for use with urllib3.
:param ssl_version:
The desired protocol version to use. This will default to
PROTOCOL_SSLv23 which will negotiate the highest protocol that both
the server and your installation of OpenSSL support.
This parameter is deprecated instead use 'ssl_minimum_version'.
:param ssl_minimum_version:
The minimum version of TLS to be used. Use the 'ssl.TLSVersion' enum for specifying the value.
:param ssl_maximum_version:
The maximum version of TLS to be used. Use the 'ssl.TLSVersion' enum for specifying the value.
Not recommended to set to anything other than 'ssl.TLSVersion.MAXIMUM_SUPPORTED' which is the
default value.
:param cert_reqs:
Whether to require the certificate verification. This defaults to
``ssl.CERT_REQUIRED``.
:param options:
Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``, and ``ssl.OP_NO_TICKET``.
:param ciphers:
Which cipher suites to allow the server to select. Defaults to either system configured
ciphers if OpenSSL 1.1.1+, otherwise uses a secure default set of ciphers.
:param verify_flags:
The flags for certificate verification operations. These default to
``ssl.VERIFY_X509_PARTIAL_CHAIN`` and ``ssl.VERIFY_X509_STRICT`` for Python 3.13+.
:returns:
Constructed SSLContext object with specified options
:rtype: SSLContext
"""
if SSLContext is None:
raise TypeError("Can't create an SSLContext object without an ssl module")
# This means 'ssl_version' was specified as an exact value.
if ssl_version not in (None, PROTOCOL_TLS, PROTOCOL_TLS_CLIENT):
# Disallow setting 'ssl_version' and 'ssl_minimum|maximum_version'
# to avoid conflicts.
if ssl_minimum_version is not None or ssl_maximum_version is not None:
raise ValueError(
"Can't specify both 'ssl_version' and either "
"'ssl_minimum_version' or 'ssl_maximum_version'"
)
# 'ssl_version' is deprecated and will be removed in the future.
else:
# Use 'ssl_minimum_version' and 'ssl_maximum_version' instead.
ssl_minimum_version = _SSL_VERSION_TO_TLS_VERSION.get(
ssl_version, TLSVersion.MINIMUM_SUPPORTED
)
ssl_maximum_version = _SSL_VERSION_TO_TLS_VERSION.get(
ssl_version, TLSVersion.MAXIMUM_SUPPORTED
)
# This warning message is pushing users to use 'ssl_minimum_version'
# instead of both min/max. Best practice is to only set the minimum version and
# keep the maximum version to be it's default value: 'TLSVersion.MAXIMUM_SUPPORTED'
warnings.warn(
"'ssl_version' option is deprecated and will be "
"removed in urllib3 v2.1.0. Instead use 'ssl_minimum_version'",
category=DeprecationWarning,
stacklevel=2,
)
# PROTOCOL_TLS is deprecated in Python 3.10 so we always use PROTOCOL_TLS_CLIENT
context = SSLContext(PROTOCOL_TLS_CLIENT)
if ssl_minimum_version is not None:
context.minimum_version = ssl_minimum_version
else: # Python <3.10 defaults to 'MINIMUM_SUPPORTED' so explicitly set TLSv1.2 here
context.minimum_version = TLSVersion.TLSv1_2
if ssl_maximum_version is not None:
context.maximum_version = ssl_maximum_version
# Unless we're given ciphers defer to either system ciphers in
# the case of OpenSSL 1.1.1+ or use our own secure default ciphers.
if ciphers:
context.set_ciphers(ciphers)
# Setting the default here, as we may have no ssl module on import
cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
if options is None:
options = 0
# SSLv2 is easily broken and is considered harmful and dangerous
options |= OP_NO_SSLv2
# SSLv3 has several problems and is now dangerous
options |= OP_NO_SSLv3
# Disable compression to prevent CRIME attacks for OpenSSL 1.0+
# (issue #309)
options |= OP_NO_COMPRESSION
# TLSv1.2 only. Unless set explicitly, do not request tickets.
# This may save some bandwidth on wire, and although the ticket is encrypted,
# there is a risk associated with it being on wire,
# if the server is not rotating its ticketing keys properly.
options |= OP_NO_TICKET
context.options |= options
if verify_flags is None:
verify_flags = 0
# In Python 3.13+ ssl.create_default_context() sets VERIFY_X509_PARTIAL_CHAIN
# and VERIFY_X509_STRICT so we do the same
if sys.version_info >= (3, 13):
verify_flags |= VERIFY_X509_PARTIAL_CHAIN
verify_flags |= VERIFY_X509_STRICT
context.verify_flags |= verify_flags
# Enable post-handshake authentication for TLS 1.3, see GH #1634. PHA is
# necessary for conditional client cert authentication with TLS 1.3.
# The attribute is None for OpenSSL <= 1.1.0 or does not exist when using
# an SSLContext created by pyOpenSSL.
if getattr(context, "post_handshake_auth", None) is not None:
context.post_handshake_auth = True
# The order of the below lines setting verify_mode and check_hostname
# matter due to safe-guards SSLContext has to prevent an SSLContext with
# check_hostname=True, verify_mode=NONE/OPTIONAL.
# We always set 'check_hostname=False' for pyOpenSSL so we rely on our own
# 'ssl.match_hostname()' implementation.
if cert_reqs == ssl.CERT_REQUIRED and not IS_PYOPENSSL:
context.verify_mode = cert_reqs
context.check_hostname = True
else:
context.check_hostname = False
context.verify_mode = cert_reqs
try:
context.hostname_checks_common_name = False
except AttributeError: # Defensive: for CPython < 3.9.3; for PyPy < 7.3.8
pass
sslkeylogfile = os.environ.get("SSLKEYLOGFILE")
if sslkeylogfile:
context.keylog_filename = sslkeylogfile
return context
@typing.overload
def ssl_wrap_socket(
sock: socket.socket,
keyfile: str | None = ...,
certfile: str | None = ...,
cert_reqs: int | None = ...,
ca_certs: str | None = ...,
server_hostname: str | None = ...,
ssl_version: int | None = ...,
ciphers: str | None = ...,
ssl_context: ssl.SSLContext | None = ...,
ca_cert_dir: str | None = ...,
key_password: str | None = ...,
ca_cert_data: None | str | bytes = ...,
tls_in_tls: typing.Literal[False] = ...,
) -> ssl.SSLSocket: ...
@typing.overload
def ssl_wrap_socket(
sock: socket.socket,
keyfile: str | None = ...,
certfile: str | None = ...,
cert_reqs: int | None = ...,
ca_certs: str | None = ...,
server_hostname: str | None = ...,
ssl_version: int | None = ...,
ciphers: str | None = ...,
ssl_context: ssl.SSLContext | None = ...,
ca_cert_dir: str | None = ...,
key_password: str | None = ...,
ca_cert_data: None | str | bytes = ...,
tls_in_tls: bool = ...,
) -> ssl.SSLSocket | SSLTransportType: ...
def ssl_wrap_socket(
sock: socket.socket,
keyfile: str | None = None,
certfile: str | None = None,
cert_reqs: int | None = None,
ca_certs: str | None = None,
server_hostname: str | None = None,
ssl_version: int | None = None,
ciphers: str | None = None,
ssl_context: ssl.SSLContext | None = None,
ca_cert_dir: str | None = None,
key_password: str | None = None,
ca_cert_data: None | str | bytes = None,
tls_in_tls: bool = False,
) -> ssl.SSLSocket | SSLTransportType:
"""
All arguments except for server_hostname, ssl_context, tls_in_tls, ca_cert_data and
ca_cert_dir have the same meaning as they do when using
:func:`ssl.create_default_context`, :meth:`ssl.SSLContext.load_cert_chain`,
:meth:`ssl.SSLContext.set_ciphers` and :meth:`ssl.SSLContext.wrap_socket`.
:param server_hostname:
When SNI is supported, the expected hostname of the certificate
:param ssl_context:
A pre-made :class:`SSLContext` object. If none is provided, one will
be created using :func:`create_urllib3_context`.
:param ciphers:
A string of ciphers we wish the client to support.
:param ca_cert_dir:
A directory containing CA certificates in multiple separate files, as
supported by OpenSSL's -CApath flag or the capath argument to
SSLContext.load_verify_locations().
:param key_password:
Optional password if the keyfile is encrypted.
:param ca_cert_data:
Optional string containing CA certificates in PEM format suitable for
passing as the cadata parameter to SSLContext.load_verify_locations()
:param tls_in_tls:
Use SSLTransport to wrap the existing socket.
"""
context = ssl_context
if context is None:
# Note: This branch of code and all the variables in it are only used in tests.
# We should consider deprecating and removing this code.
context = create_urllib3_context(ssl_version, cert_reqs, ciphers=ciphers)
if ca_certs or ca_cert_dir or ca_cert_data:
try:
context.load_verify_locations(ca_certs, ca_cert_dir, ca_cert_data)
except OSError as e:
raise SSLError(e) from e
elif ssl_context is None and hasattr(context, "load_default_certs"):
# try to load OS default certs; works well on Windows.
context.load_default_certs()
# Attempt to detect if we get the goofy behavior of the
# keyfile being encrypted and OpenSSL asking for the
# passphrase via the terminal and instead error out.
if keyfile and key_password is None and _is_key_file_encrypted(keyfile):
raise SSLError("Client private key is encrypted, password is required")
if certfile:
if key_password is None:
context.load_cert_chain(certfile, keyfile)
else:
context.load_cert_chain(certfile, keyfile, key_password)
context.set_alpn_protocols(ALPN_PROTOCOLS)
ssl_sock = _ssl_wrap_socket_impl(sock, context, tls_in_tls, server_hostname)
return ssl_sock
def is_ipaddress(hostname: str | bytes) -> bool:
"""Detects whether the hostname given is an IPv4 or IPv6 address.
Also detects IPv6 addresses with Zone IDs.
:param str hostname: Hostname to examine.
:return: True if the hostname is an IP address, False otherwise.
"""
if isinstance(hostname, bytes):
# IDN A-label bytes are ASCII compatible.
hostname = hostname.decode("ascii")
return bool(_IPV4_RE.match(hostname) or _BRACELESS_IPV6_ADDRZ_RE.match(hostname))
def _is_key_file_encrypted(key_file: str) -> bool:
"""Detects if a key file is encrypted or not."""
with open(key_file) as f:
for line in f:
# Look for Proc-Type: 4,ENCRYPTED
if "ENCRYPTED" in line:
return True
return False
def _ssl_wrap_socket_impl(
sock: socket.socket,
ssl_context: ssl.SSLContext,
tls_in_tls: bool,
server_hostname: str | None = None,
) -> ssl.SSLSocket | SSLTransportType:
if tls_in_tls:
if not SSLTransport:
# Import error, ssl is not available.
raise ProxySchemeUnsupported(
"TLS in TLS requires support for the 'ssl' module"
)
SSLTransport._validate_ssl_context_for_tls_in_tls(ssl_context)
return SSLTransport(sock, ssl_context, server_hostname)
return ssl_context.wrap_socket(sock, server_hostname=server_hostname)
```
|
==========================================================================================================================
SOURCE CODE FILE: ssl_match_hostname.py
LINES: 1
SIZE: 5.71 KB
PATH: scripts\freecad_env\Lib\site-packages\urllib3\util\ssl_match_hostname.py
ENCODING: utf-8
```py
"""The match_hostname() function from Python 3.5, essential when using SSL."""
# Note: This file is under the PSF license as the code comes from the python
# stdlib. http://docs.python.org/3/license.html
# It is modified to remove commonName support.
from __future__ import annotations
import ipaddress
import re
import typing
from ipaddress import IPv4Address, IPv6Address
if typing.TYPE_CHECKING:
from .ssl_ import _TYPE_PEER_CERT_RET_DICT
__version__ = "3.5.0.1"
class CertificateError(ValueError):
pass
def _dnsname_match(
dn: typing.Any, hostname: str, max_wildcards: int = 1
) -> typing.Match[str] | None | bool:
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r".")
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count("*")
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn)
)
# speed up common case w/o wildcards
if not wildcards:
return bool(dn.lower() == hostname.lower())
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == "*":
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append("[^.]+")
elif leftmost.startswith("xn--") or hostname.startswith("xn--"):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r"\*", "[^.]*"))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r"\A" + r"\.".join(pats) + r"\Z", re.IGNORECASE)
return pat.match(hostname)
def _ipaddress_match(ipname: str, host_ip: IPv4Address | IPv6Address) -> bool:
"""Exact matching of IP addresses.
RFC 9110 section 4.3.5: "A reference identity of IP-ID contains the decoded
bytes of the IP address. An IP version 4 address is 4 octets, and an IP
version 6 address is 16 octets. [...] A reference identity of type IP-ID
matches if the address is identical to an iPAddress value of the
subjectAltName extension of the certificate."
"""
# OpenSSL may add a trailing newline to a subjectAltName's IP address
# Divergence from upstream: ipaddress can't handle byte str
ip = ipaddress.ip_address(ipname.rstrip())
return bool(ip.packed == host_ip.packed)
def match_hostname(
cert: _TYPE_PEER_CERT_RET_DICT | None,
hostname: str,
hostname_checks_common_name: bool = False,
) -> None:
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError(
"empty or no certificate, match_hostname needs a "
"SSL socket or SSL context with either "
"CERT_OPTIONAL or CERT_REQUIRED"
)
try:
# Divergence from upstream: ipaddress can't handle byte str
#
# The ipaddress module shipped with Python < 3.9 does not support
# scoped IPv6 addresses so we unconditionally strip the Zone IDs for
# now. Once we drop support for Python 3.9 we can remove this branch.
if "%" in hostname:
host_ip = ipaddress.ip_address(hostname[: hostname.rfind("%")])
else:
host_ip = ipaddress.ip_address(hostname)
except ValueError:
# Not an IP address (common case)
host_ip = None
dnsnames = []
san: tuple[tuple[str, str], ...] = cert.get("subjectAltName", ())
key: str
value: str
for key, value in san:
if key == "DNS":
if host_ip is None and _dnsname_match(value, hostname):
return
dnsnames.append(value)
elif key == "IP Address":
if host_ip is not None and _ipaddress_match(value, host_ip):
return
dnsnames.append(value)
# We only check 'commonName' if it's enabled and we're not verifying
# an IP address. IP addresses aren't valid within 'commonName'.
if hostname_checks_common_name and host_ip is None and not dnsnames:
for sub in cert.get("subject", ()):
for key, value in sub:
if key == "commonName":
if _dnsname_match(value, hostname):
return
dnsnames.append(value) # Defensive: for Python < 3.9.3
if len(dnsnames) > 1:
raise CertificateError(
"hostname %r "
"doesn't match either of %s" % (hostname, ", ".join(map(repr, dnsnames)))
)
elif len(dnsnames) == 1:
raise CertificateError(f"hostname {hostname!r} doesn't match {dnsnames[0]!r}")
else:
raise CertificateError("no appropriate subjectAltName fields were found")
```
|
====================================================================================================================
SOURCE CODE FILE: ssltransport.py
LINES: 1
SIZE: 8.64 KB
PATH: scripts\freecad_env\Lib\site-packages\urllib3\util\ssltransport.py
ENCODING: utf-8
```py
from __future__ import annotations
import io
import socket
import ssl
import typing
from ..exceptions import ProxySchemeUnsupported
if typing.TYPE_CHECKING:
from typing_extensions import Self
from .ssl_ import _TYPE_PEER_CERT_RET, _TYPE_PEER_CERT_RET_DICT
_WriteBuffer = typing.Union[bytearray, memoryview]
_ReturnValue = typing.TypeVar("_ReturnValue")
SSL_BLOCKSIZE = 16384
class SSLTransport:
"""
The SSLTransport wraps an existing socket and establishes an SSL connection.
Contrary to Python's implementation of SSLSocket, it allows you to chain
multiple TLS connections together. It's particularly useful if you need to
implement TLS within TLS.
The class supports most of the socket API operations.
"""
@staticmethod
def _validate_ssl_context_for_tls_in_tls(ssl_context: ssl.SSLContext) -> None:
"""
Raises a ProxySchemeUnsupported if the provided ssl_context can't be used
for TLS in TLS.
The only requirement is that the ssl_context provides the 'wrap_bio'
methods.
"""
if not hasattr(ssl_context, "wrap_bio"):
raise ProxySchemeUnsupported(
"TLS in TLS requires SSLContext.wrap_bio() which isn't "
"available on non-native SSLContext"
)
def __init__(
self,
socket: socket.socket,
ssl_context: ssl.SSLContext,
server_hostname: str | None = None,
suppress_ragged_eofs: bool = True,
) -> None:
"""
Create an SSLTransport around socket using the provided ssl_context.
"""
self.incoming = ssl.MemoryBIO()
self.outgoing = ssl.MemoryBIO()
self.suppress_ragged_eofs = suppress_ragged_eofs
self.socket = socket
self.sslobj = ssl_context.wrap_bio(
self.incoming, self.outgoing, server_hostname=server_hostname
)
# Perform initial handshake.
self._ssl_io_loop(self.sslobj.do_handshake)
def __enter__(self) -> Self:
return self
def __exit__(self, *_: typing.Any) -> None:
self.close()
def fileno(self) -> int:
return self.socket.fileno()
def read(self, len: int = 1024, buffer: typing.Any | None = None) -> int | bytes:
return self._wrap_ssl_read(len, buffer)
def recv(self, buflen: int = 1024, flags: int = 0) -> int | bytes:
if flags != 0:
raise ValueError("non-zero flags not allowed in calls to recv")
return self._wrap_ssl_read(buflen)
def recv_into(
self,
buffer: _WriteBuffer,
nbytes: int | None = None,
flags: int = 0,
) -> None | int | bytes:
if flags != 0:
raise ValueError("non-zero flags not allowed in calls to recv_into")
if nbytes is None:
nbytes = len(buffer)
return self.read(nbytes, buffer)
def sendall(self, data: bytes, flags: int = 0) -> None:
if flags != 0:
raise ValueError("non-zero flags not allowed in calls to sendall")
count = 0
with memoryview(data) as view, view.cast("B") as byte_view:
amount = len(byte_view)
while count < amount:
v = self.send(byte_view[count:])
count += v
def send(self, data: bytes, flags: int = 0) -> int:
if flags != 0:
raise ValueError("non-zero flags not allowed in calls to send")
return self._ssl_io_loop(self.sslobj.write, data)
def makefile(
self,
mode: str,
buffering: int | None = None,
*,
encoding: str | None = None,
errors: str | None = None,
newline: str | None = None,
) -> typing.BinaryIO | typing.TextIO | socket.SocketIO:
"""
Python's httpclient uses makefile and buffered io when reading HTTP
messages and we need to support it.
This is unfortunately a copy and paste of socket.py makefile with small
changes to point to the socket directly.
"""
if not set(mode) <= {"r", "w", "b"}:
raise ValueError(f"invalid mode {mode!r} (only r, w, b allowed)")
writing = "w" in mode
reading = "r" in mode or not writing
assert reading or writing
binary = "b" in mode
rawmode = ""
if reading:
rawmode += "r"
if writing:
rawmode += "w"
raw = socket.SocketIO(self, rawmode) # type: ignore[arg-type]
self.socket._io_refs += 1 # type: ignore[attr-defined]
if buffering is None:
buffering = -1
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
if not binary:
raise ValueError("unbuffered streams must be binary")
return raw
buffer: typing.BinaryIO
if reading and writing:
buffer = io.BufferedRWPair(raw, raw, buffering) # type: ignore[assignment]
elif reading:
buffer = io.BufferedReader(raw, buffering)
else:
assert writing
buffer = io.BufferedWriter(raw, buffering)
if binary:
return buffer
text = io.TextIOWrapper(buffer, encoding, errors, newline)
text.mode = mode # type: ignore[misc]
return text
def unwrap(self) -> None:
self._ssl_io_loop(self.sslobj.unwrap)
def close(self) -> None:
self.socket.close()
@typing.overload
def getpeercert(
self, binary_form: typing.Literal[False] = ...
) -> _TYPE_PEER_CERT_RET_DICT | None: ...
@typing.overload
def getpeercert(self, binary_form: typing.Literal[True]) -> bytes | None: ...
def getpeercert(self, binary_form: bool = False) -> _TYPE_PEER_CERT_RET:
return self.sslobj.getpeercert(binary_form) # type: ignore[return-value]
def version(self) -> str | None:
return self.sslobj.version()
def cipher(self) -> tuple[str, str, int] | None:
return self.sslobj.cipher()
def selected_alpn_protocol(self) -> str | None:
return self.sslobj.selected_alpn_protocol()
def shared_ciphers(self) -> list[tuple[str, str, int]] | None:
return self.sslobj.shared_ciphers()
def compression(self) -> str | None:
return self.sslobj.compression()
def settimeout(self, value: float | None) -> None:
self.socket.settimeout(value)
def gettimeout(self) -> float | None:
return self.socket.gettimeout()
def _decref_socketios(self) -> None:
self.socket._decref_socketios() # type: ignore[attr-defined]
def _wrap_ssl_read(self, len: int, buffer: bytearray | None = None) -> int | bytes:
try:
return self._ssl_io_loop(self.sslobj.read, len, buffer)
except ssl.SSLError as e:
if e.errno == ssl.SSL_ERROR_EOF and self.suppress_ragged_eofs:
return 0 # eof, return 0.
else:
raise
# func is sslobj.do_handshake or sslobj.unwrap
@typing.overload
def _ssl_io_loop(self, func: typing.Callable[[], None]) -> None: ...
# func is sslobj.write, arg1 is data
@typing.overload
def _ssl_io_loop(self, func: typing.Callable[[bytes], int], arg1: bytes) -> int: ...
# func is sslobj.read, arg1 is len, arg2 is buffer
@typing.overload
def _ssl_io_loop(
self,
func: typing.Callable[[int, bytearray | None], bytes],
arg1: int,
arg2: bytearray | None,
) -> bytes: ...
def _ssl_io_loop(
self,
func: typing.Callable[..., _ReturnValue],
arg1: None | bytes | int = None,
arg2: bytearray | None = None,
) -> _ReturnValue:
"""Performs an I/O loop between incoming/outgoing and the socket."""
should_loop = True
ret = None
while should_loop:
errno = None
try:
if arg1 is None and arg2 is None:
ret = func()
elif arg2 is None:
ret = func(arg1)
else:
ret = func(arg1, arg2)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE):
# WANT_READ, and WANT_WRITE are expected, others are not.
raise e
errno = e.errno
buf = self.outgoing.read()
self.socket.sendall(buf)
if errno is None:
should_loop = False
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = self.socket.recv(SSL_BLOCKSIZE)
if buf:
self.incoming.write(buf)
else:
self.incoming.write_eof()
return typing.cast(_ReturnValue, ret)
```
|
===============================================================================================================
SOURCE CODE FILE: timeout.py
LINES: 1
SIZE: 10.10 KB
PATH: scripts\freecad_env\Lib\site-packages\urllib3\util\timeout.py
ENCODING: utf-8
```py
from __future__ import annotations
import time
import typing
from enum import Enum
from socket import getdefaulttimeout
from ..exceptions import TimeoutStateError
if typing.TYPE_CHECKING:
from typing import Final
class _TYPE_DEFAULT(Enum):
# This value should never be passed to socket.settimeout() so for safety we use a -1.
# socket.settimout() raises a ValueError for negative values.
token = -1
_DEFAULT_TIMEOUT: Final[_TYPE_DEFAULT] = _TYPE_DEFAULT.token
_TYPE_TIMEOUT = typing.Optional[typing.Union[float, _TYPE_DEFAULT]]
class Timeout:
"""Timeout configuration.
Timeouts can be defined as a default for a pool:
.. code-block:: python
import urllib3
timeout = urllib3.util.Timeout(connect=2.0, read=7.0)
http = urllib3.PoolManager(timeout=timeout)
resp = http.request("GET", "https://example.com/")
print(resp.status)
Or per-request (which overrides the default for the pool):
.. code-block:: python
response = http.request("GET", "https://example.com/", timeout=Timeout(10))
Timeouts can be disabled by setting all the parameters to ``None``:
.. code-block:: python
no_timeout = Timeout(connect=None, read=None)
response = http.request("GET", "https://example.com/", timeout=no_timeout)
:param total:
This combines the connect and read timeouts into one; the read timeout
will be set to the time leftover from the connect attempt. In the
event that both a connect timeout and a total are specified, or a read
timeout and a total are specified, the shorter timeout will be applied.
Defaults to None.
:type total: int, float, or None
:param connect:
The maximum amount of time (in seconds) to wait for a connection
attempt to a server to succeed. Omitting the parameter will default the
connect timeout to the system default, probably `the global default
timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout for connection attempts.
:type connect: int, float, or None
:param read:
The maximum amount of time (in seconds) to wait between consecutive
read operations for a response from the server. Omitting the parameter
will default the read timeout to the system default, probably `the
global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout.
:type read: int, float, or None
.. note::
Many factors can affect the total amount of time for urllib3 to return
an HTTP response.
For example, Python's DNS resolver does not obey the timeout specified
on the socket. Other factors that can affect total request time include
high CPU load, high swap, the program running at a low priority level,
or other behaviors.
In addition, the read and total timeouts only measure the time between
read operations on the socket connecting the client and the server,
not the total amount of time for the request to return a complete
response. For most requests, the timeout is raised because the server
has not sent the first byte in the specified time. This is not always
the case; if a server streams one byte every fifteen seconds, a timeout
of 20 seconds will not trigger, even though the request will take
several minutes to complete.
"""
#: A sentinel object representing the default timeout value
DEFAULT_TIMEOUT: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT
def __init__(
self,
total: _TYPE_TIMEOUT = None,
connect: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,
read: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,
) -> None:
self._connect = self._validate_timeout(connect, "connect")
self._read = self._validate_timeout(read, "read")
self.total = self._validate_timeout(total, "total")
self._start_connect: float | None = None
def __repr__(self) -> str:
return f"{type(self).__name__}(connect={self._connect!r}, read={self._read!r}, total={self.total!r})"
# __str__ provided for backwards compatibility
__str__ = __repr__
@staticmethod
def resolve_default_timeout(timeout: _TYPE_TIMEOUT) -> float | None:
return getdefaulttimeout() if timeout is _DEFAULT_TIMEOUT else timeout
@classmethod
def _validate_timeout(cls, value: _TYPE_TIMEOUT, name: str) -> _TYPE_TIMEOUT:
"""Check that a timeout attribute is valid.
:param value: The timeout value to validate
:param name: The name of the timeout attribute to validate. This is
used to specify in error messages.
:return: The validated and casted version of the given value.
:raises ValueError: If it is a numeric value less than or equal to
zero, or the type is not an integer, float, or None.
"""
if value is None or value is _DEFAULT_TIMEOUT:
return value
if isinstance(value, bool):
raise ValueError(
"Timeout cannot be a boolean value. It must "
"be an int, float or None."
)
try:
float(value)
except (TypeError, ValueError):
raise ValueError(
"Timeout value %s was %s, but it must be an "
"int, float or None." % (name, value)
) from None
try:
if value <= 0:
raise ValueError(
"Attempted to set %s timeout to %s, but the "
"timeout cannot be set to a value less "
"than or equal to 0." % (name, value)
)
except TypeError:
raise ValueError(
"Timeout value %s was %s, but it must be an "
"int, float or None." % (name, value)
) from None
return value
@classmethod
def from_float(cls, timeout: _TYPE_TIMEOUT) -> Timeout:
"""Create a new Timeout from a legacy timeout value.
The timeout value used by httplib.py sets the same timeout on the
connect(), and recv() socket requests. This creates a :class:`Timeout`
object that sets the individual timeouts to the ``timeout`` value
passed to this function.
:param timeout: The legacy timeout value.
:type timeout: integer, float, :attr:`urllib3.util.Timeout.DEFAULT_TIMEOUT`, or None
:return: Timeout object
:rtype: :class:`Timeout`
"""
return Timeout(read=timeout, connect=timeout)
def clone(self) -> Timeout:
"""Create a copy of the timeout object
Timeout properties are stored per-pool but each request needs a fresh
Timeout object to ensure each one has its own start/stop configured.
:return: a copy of the timeout object
:rtype: :class:`Timeout`
"""
# We can't use copy.deepcopy because that will also create a new object
# for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
# detect the user default.
return Timeout(connect=self._connect, read=self._read, total=self.total)
def start_connect(self) -> float:
"""Start the timeout clock, used during a connect() attempt
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to start a timer that has been started already.
"""
if self._start_connect is not None:
raise TimeoutStateError("Timeout timer has already been started.")
self._start_connect = time.monotonic()
return self._start_connect
def get_connect_duration(self) -> float:
"""Gets the time elapsed since the call to :meth:`start_connect`.
:return: Elapsed time in seconds.
:rtype: float
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to get duration for a timer that hasn't been started.
"""
if self._start_connect is None:
raise TimeoutStateError(
"Can't get connect duration for timer that has not started."
)
return time.monotonic() - self._start_connect
@property
def connect_timeout(self) -> _TYPE_TIMEOUT:
"""Get the value to use when setting a connection timeout.
This will be a positive float or integer, the value None
(never timeout), or the default system timeout.
:return: Connect timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
"""
if self.total is None:
return self._connect
if self._connect is None or self._connect is _DEFAULT_TIMEOUT:
return self.total
return min(self._connect, self.total) # type: ignore[type-var]
@property
def read_timeout(self) -> float | None:
"""Get the value for the read timeout.
This assumes some time has elapsed in the connection timeout and
computes the read timeout appropriately.
If self.total is set, the read timeout is dependent on the amount of
time taken by the connect timeout. If the connection time has not been
established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
raised.
:return: Value to use for the read timeout.
:rtype: int, float or None
:raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
has not yet been called on this object.
"""
if (
self.total is not None
and self.total is not _DEFAULT_TIMEOUT
and self._read is not None
and self._read is not _DEFAULT_TIMEOUT
):
# In case the connect timeout has not yet been established.
if self._start_connect is None:
return self._read
return max(0, min(self.total - self.get_connect_duration(), self._read))
elif self.total is not None and self.total is not _DEFAULT_TIMEOUT:
return max(0, self.total - self.get_connect_duration())
else:
return self.resolve_default_timeout(self._read)
```
|
===========================================================================================================
SOURCE CODE FILE: url.py
LINES: 1
SIZE: 14.85 KB
PATH: scripts\freecad_env\Lib\site-packages\urllib3\util\url.py
ENCODING: utf-8
```py
from __future__ import annotations
import re
import typing
from ..exceptions import LocationParseError
from .util import to_str
# We only want to normalize urls with an HTTP(S) scheme.
# urllib3 infers URLs without a scheme (None) to be http.
_NORMALIZABLE_SCHEMES = ("http", "https", None)
# Almost all of these patterns were derived from the
# 'rfc3986' module: https://github.com/python-hyper/rfc3986
_PERCENT_RE = re.compile(r"%[a-fA-F0-9]{2}")
_SCHEME_RE = re.compile(r"^(?:[a-zA-Z][a-zA-Z0-9+-]*:|/)")
_URI_RE = re.compile(
r"^(?:([a-zA-Z][a-zA-Z0-9+.-]*):)?"
r"(?://([^\\/?#]*))?"
r"([^?#]*)"
r"(?:\?([^#]*))?"
r"(?:#(.*))?$",
re.UNICODE | re.DOTALL,
)
_IPV4_PAT = r"(?:[0-9]{1,3}\.){3}[0-9]{1,3}"
_HEX_PAT = "[0-9A-Fa-f]{1,4}"
_LS32_PAT = "(?:{hex}:{hex}|{ipv4})".format(hex=_HEX_PAT, ipv4=_IPV4_PAT)
_subs = {"hex": _HEX_PAT, "ls32": _LS32_PAT}
_variations = [
# 6( h16 ":" ) ls32
"(?:%(hex)s:){6}%(ls32)s",
# "::" 5( h16 ":" ) ls32
"::(?:%(hex)s:){5}%(ls32)s",
# [ h16 ] "::" 4( h16 ":" ) ls32
"(?:%(hex)s)?::(?:%(hex)s:){4}%(ls32)s",
# [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32
"(?:(?:%(hex)s:)?%(hex)s)?::(?:%(hex)s:){3}%(ls32)s",
# [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32
"(?:(?:%(hex)s:){0,2}%(hex)s)?::(?:%(hex)s:){2}%(ls32)s",
# [ *3( h16 ":" ) h16 ] "::" h16 ":" ls32
"(?:(?:%(hex)s:){0,3}%(hex)s)?::%(hex)s:%(ls32)s",
# [ *4( h16 ":" ) h16 ] "::" ls32
"(?:(?:%(hex)s:){0,4}%(hex)s)?::%(ls32)s",
# [ *5( h16 ":" ) h16 ] "::" h16
"(?:(?:%(hex)s:){0,5}%(hex)s)?::%(hex)s",
# [ *6( h16 ":" ) h16 ] "::"
"(?:(?:%(hex)s:){0,6}%(hex)s)?::",
]
_UNRESERVED_PAT = r"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._\-~"
_IPV6_PAT = "(?:" + "|".join([x % _subs for x in _variations]) + ")"
_ZONE_ID_PAT = "(?:%25|%)(?:[" + _UNRESERVED_PAT + "]|%[a-fA-F0-9]{2})+"
_IPV6_ADDRZ_PAT = r"\[" + _IPV6_PAT + r"(?:" + _ZONE_ID_PAT + r")?\]"
_REG_NAME_PAT = r"(?:[^\[\]%:/?#]|%[a-fA-F0-9]{2})*"
_TARGET_RE = re.compile(r"^(/[^?#]*)(?:\?([^#]*))?(?:#.*)?$")
_IPV4_RE = re.compile("^" + _IPV4_PAT + "$")
_IPV6_RE = re.compile("^" + _IPV6_PAT + "$")
_IPV6_ADDRZ_RE = re.compile("^" + _IPV6_ADDRZ_PAT + "$")
_BRACELESS_IPV6_ADDRZ_RE = re.compile("^" + _IPV6_ADDRZ_PAT[2:-2] + "$")
_ZONE_ID_RE = re.compile("(" + _ZONE_ID_PAT + r")\]$")
_HOST_PORT_PAT = ("^(%s|%s|%s)(?::0*?(|0|[1-9][0-9]{0,4}))?$") % (
_REG_NAME_PAT,
_IPV4_PAT,
_IPV6_ADDRZ_PAT,
)
_HOST_PORT_RE = re.compile(_HOST_PORT_PAT, re.UNICODE | re.DOTALL)
_UNRESERVED_CHARS = set(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._-~"
)
_SUB_DELIM_CHARS = set("!$&'()*+,;=")
_USERINFO_CHARS = _UNRESERVED_CHARS | _SUB_DELIM_CHARS | {":"}
_PATH_CHARS = _USERINFO_CHARS | {"@", "/"}
_QUERY_CHARS = _FRAGMENT_CHARS = _PATH_CHARS | {"?"}
class Url(
typing.NamedTuple(
"Url",
[
("scheme", typing.Optional[str]),
("auth", typing.Optional[str]),
("host", typing.Optional[str]),
("port", typing.Optional[int]),
("path", typing.Optional[str]),
("query", typing.Optional[str]),
("fragment", typing.Optional[str]),
],
)
):
"""
Data structure for representing an HTTP URL. Used as a return value for
:func:`parse_url`. Both the scheme and host are normalized as they are
both case-insensitive according to RFC 3986.
"""
def __new__( # type: ignore[no-untyped-def]
cls,
scheme: str | None = None,
auth: str | None = None,
host: str | None = None,
port: int | None = None,
path: str | None = None,
query: str | None = None,
fragment: str | None = None,
):
if path and not path.startswith("/"):
path = "/" + path
if scheme is not None:
scheme = scheme.lower()
return super().__new__(cls, scheme, auth, host, port, path, query, fragment)
@property
def hostname(self) -> str | None:
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self) -> str:
"""Absolute path including the query string."""
uri = self.path or "/"
if self.query is not None:
uri += "?" + self.query
return uri
@property
def authority(self) -> str | None:
"""
Authority component as defined in RFC 3986 3.2.
This includes userinfo (auth), host and port.
i.e.
userinfo@host:port
"""
userinfo = self.auth
netloc = self.netloc
if netloc is None or userinfo is None:
return netloc
else:
return f"{userinfo}@{netloc}"
@property
def netloc(self) -> str | None:
"""
Network location including host and port.
If you need the equivalent of urllib.parse's ``netloc``,
use the ``authority`` property instead.
"""
if self.host is None:
return None
if self.port:
return f"{self.host}:{self.port}"
return self.host
@property
def url(self) -> str:
"""
Convert self into a url
This function should more or less round-trip with :func:`.parse_url`. The
returned url may not be exactly the same as the url inputted to
:func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
with a blank port will have : removed).
Example:
.. code-block:: python
import urllib3
U = urllib3.util.parse_url("https://google.com/mail/")
print(U.url)
# "https://google.com/mail/"
print( urllib3.util.Url("https", "username:password",
"host.com", 80, "/path", "query", "fragment"
).url
)
# "https://username:[email protected]:80/path?query#fragment"
"""
scheme, auth, host, port, path, query, fragment = self
url = ""
# We use "is not None" we want things to happen with empty strings (or 0 port)
if scheme is not None:
url += scheme + "://"
if auth is not None:
url += auth + "@"
if host is not None:
url += host
if port is not None:
url += ":" + str(port)
if path is not None:
url += path
if query is not None:
url += "?" + query
if fragment is not None:
url += "#" + fragment
return url
def __str__(self) -> str:
return self.url
@typing.overload
def _encode_invalid_chars(
component: str, allowed_chars: typing.Container[str]
) -> str: # Abstract
...
@typing.overload
def _encode_invalid_chars(
component: None, allowed_chars: typing.Container[str]
) -> None: # Abstract
...
def _encode_invalid_chars(
component: str | None, allowed_chars: typing.Container[str]
) -> str | None:
"""Percent-encodes a URI component without reapplying
onto an already percent-encoded component.
"""
if component is None:
return component
component = to_str(component)
# Normalize existing percent-encoded bytes.
# Try to see if the component we're encoding is already percent-encoded
# so we can skip all '%' characters but still encode all others.
component, percent_encodings = _PERCENT_RE.subn(
lambda match: match.group(0).upper(), component
)
uri_bytes = component.encode("utf-8", "surrogatepass")
is_percent_encoded = percent_encodings == uri_bytes.count(b"%")
encoded_component = bytearray()
for i in range(0, len(uri_bytes)):
# Will return a single character bytestring
byte = uri_bytes[i : i + 1]
byte_ord = ord(byte)
if (is_percent_encoded and byte == b"%") or (
byte_ord < 128 and byte.decode() in allowed_chars
):
encoded_component += byte
continue
encoded_component.extend(b"%" + (hex(byte_ord)[2:].encode().zfill(2).upper()))
return encoded_component.decode()
def _remove_path_dot_segments(path: str) -> str:
# See http://tools.ietf.org/html/rfc3986#section-5.2.4 for pseudo-code
segments = path.split("/") # Turn the path into a list of segments
output = [] # Initialize the variable to use to store output
for segment in segments:
# '.' is the current directory, so ignore it, it is superfluous
if segment == ".":
continue
# Anything other than '..', should be appended to the output
if segment != "..":
output.append(segment)
# In this case segment == '..', if we can, we should pop the last
# element
elif output:
output.pop()
# If the path starts with '/' and the output is empty or the first string
# is non-empty
if path.startswith("/") and (not output or output[0]):
output.insert(0, "")
# If the path starts with '/.' or '/..' ensure we add one more empty
# string to add a trailing '/'
if path.endswith(("/.", "/..")):
output.append("")
return "/".join(output)
@typing.overload
def _normalize_host(host: None, scheme: str | None) -> None: ...
@typing.overload
def _normalize_host(host: str, scheme: str | None) -> str: ...
def _normalize_host(host: str | None, scheme: str | None) -> str | None:
if host:
if scheme in _NORMALIZABLE_SCHEMES:
is_ipv6 = _IPV6_ADDRZ_RE.match(host)
if is_ipv6:
# IPv6 hosts of the form 'a::b%zone' are encoded in a URL as
# such per RFC 6874: 'a::b%25zone'. Unquote the ZoneID
# separator as necessary to return a valid RFC 4007 scoped IP.
match = _ZONE_ID_RE.search(host)
if match:
start, end = match.span(1)
zone_id = host[start:end]
if zone_id.startswith("%25") and zone_id != "%25":
zone_id = zone_id[3:]
else:
zone_id = zone_id[1:]
zone_id = _encode_invalid_chars(zone_id, _UNRESERVED_CHARS)
return f"{host[:start].lower()}%{zone_id}{host[end:]}"
else:
return host.lower()
elif not _IPV4_RE.match(host):
return to_str(
b".".join([_idna_encode(label) for label in host.split(".")]),
"ascii",
)
return host
def _idna_encode(name: str) -> bytes:
if not name.isascii():
try:
import idna
except ImportError:
raise LocationParseError(
"Unable to parse URL without the 'idna' module"
) from None
try:
return idna.encode(name.lower(), strict=True, std3_rules=True)
except idna.IDNAError:
raise LocationParseError(
f"Name '{name}' is not a valid IDNA label"
) from None
return name.lower().encode("ascii")
def _encode_target(target: str) -> str:
"""Percent-encodes a request target so that there are no invalid characters
Pre-condition for this function is that 'target' must start with '/'.
If that is the case then _TARGET_RE will always produce a match.
"""
match = _TARGET_RE.match(target)
if not match: # Defensive:
raise LocationParseError(f"{target!r} is not a valid request URI")
path, query = match.groups()
encoded_target = _encode_invalid_chars(path, _PATH_CHARS)
if query is not None:
query = _encode_invalid_chars(query, _QUERY_CHARS)
encoded_target += "?" + query
return encoded_target
def parse_url(url: str) -> Url:
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
This parser is RFC 3986 and RFC 6874 compliant.
The parser logic and helper functions are based heavily on
work done in the ``rfc3986`` module.
:param str url: URL to parse into a :class:`.Url` namedtuple.
Partly backwards-compatible with :mod:`urllib.parse`.
Example:
.. code-block:: python
import urllib3
print( urllib3.util.parse_url('http://google.com/mail/'))
# Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
print( urllib3.util.parse_url('google.com:80'))
# Url(scheme=None, host='google.com', port=80, path=None, ...)
print( urllib3.util.parse_url('/foo?bar'))
# Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
if not url:
# Empty
return Url()
source_url = url
if not _SCHEME_RE.search(url):
url = "//" + url
scheme: str | None
authority: str | None
auth: str | None
host: str | None
port: str | None
port_int: int | None
path: str | None
query: str | None
fragment: str | None
try:
scheme, authority, path, query, fragment = _URI_RE.match(url).groups() # type: ignore[union-attr]
normalize_uri = scheme is None or scheme.lower() in _NORMALIZABLE_SCHEMES
if scheme:
scheme = scheme.lower()
if authority:
auth, _, host_port = authority.rpartition("@")
auth = auth or None
host, port = _HOST_PORT_RE.match(host_port).groups() # type: ignore[union-attr]
if auth and normalize_uri:
auth = _encode_invalid_chars(auth, _USERINFO_CHARS)
if port == "":
port = None
else:
auth, host, port = None, None, None
if port is not None:
port_int = int(port)
if not (0 <= port_int <= 65535):
raise LocationParseError(url)
else:
port_int = None
host = _normalize_host(host, scheme)
if normalize_uri and path:
path = _remove_path_dot_segments(path)
path = _encode_invalid_chars(path, _PATH_CHARS)
if normalize_uri and query:
query = _encode_invalid_chars(query, _QUERY_CHARS)
if normalize_uri and fragment:
fragment = _encode_invalid_chars(fragment, _FRAGMENT_CHARS)
except (ValueError, AttributeError) as e:
raise LocationParseError(source_url) from e
# For the sake of backwards compatibility we put empty
# string values for path if there are any defined values
# beyond the path in the URL.
# TODO: Remove this when we break backwards compatibility.
if not path:
if query is not None or fragment is not None:
path = ""
else:
path = None
return Url(
scheme=scheme,
auth=auth,
host=host,
port=port_int,
path=path,
query=query,
fragment=fragment,
)
```
|
============================================================================================================
SOURCE CODE FILE: util.py
LINES: 1
SIZE: 1.12 KB
PATH: scripts\freecad_env\Lib\site-packages\urllib3\util\util.py
ENCODING: utf-8
```py
from __future__ import annotations
import typing
from types import TracebackType
def to_bytes(
x: str | bytes, encoding: str | None = None, errors: str | None = None
) -> bytes:
if isinstance(x, bytes):
return x
elif not isinstance(x, str):
raise TypeError(f"not expecting type {type(x).__name__}")
if encoding or errors:
return x.encode(encoding or "utf-8", errors=errors or "strict")
return x.encode()
def to_str(
x: str | bytes, encoding: str | None = None, errors: str | None = None
) -> str:
if isinstance(x, str):
return x
elif not isinstance(x, bytes):
raise TypeError(f"not expecting type {type(x).__name__}")
if encoding or errors:
return x.decode(encoding or "utf-8", errors=errors or "strict")
return x.decode()
def reraise(
tp: type[BaseException] | None,
value: BaseException,
tb: TracebackType | None = None,
) -> typing.NoReturn:
try:
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
finally:
value = None # type: ignore[assignment]
tb = None
```
|
============================================================================================================
SOURCE CODE FILE: wait.py
LINES: 1
SIZE: 4.32 KB
PATH: scripts\freecad_env\Lib\site-packages\urllib3\util\wait.py
ENCODING: utf-8
```py
from __future__ import annotations
import select
import socket
from functools import partial
__all__ = ["wait_for_read", "wait_for_write"]
# How should we wait on sockets?
#
# There are two types of APIs you can use for waiting on sockets: the fancy
# modern stateful APIs like epoll/kqueue, and the older stateless APIs like
# select/poll. The stateful APIs are more efficient when you have a lots of
# sockets to keep track of, because you can set them up once and then use them
# lots of times. But we only ever want to wait on a single socket at a time
# and don't want to keep track of state, so the stateless APIs are actually
# more efficient. So we want to use select() or poll().
#
# Now, how do we choose between select() and poll()? On traditional Unixes,
# select() has a strange calling convention that makes it slow, or fail
# altogether, for high-numbered file descriptors. The point of poll() is to fix
# that, so on Unixes, we prefer poll().
#
# On Windows, there is no poll() (or at least Python doesn't provide a wrapper
# for it), but that's OK, because on Windows, select() doesn't have this
# strange calling convention; plain select() works fine.
#
# So: on Windows we use select(), and everywhere else we use poll(). We also
# fall back to select() in case poll() is somehow broken or missing.
def select_wait_for_socket(
sock: socket.socket,
read: bool = False,
write: bool = False,
timeout: float | None = None,
) -> bool:
if not read and not write:
raise RuntimeError("must specify at least one of read=True, write=True")
rcheck = []
wcheck = []
if read:
rcheck.append(sock)
if write:
wcheck.append(sock)
# When doing a non-blocking connect, most systems signal success by
# marking the socket writable. Windows, though, signals success by marked
# it as "exceptional". We paper over the difference by checking the write
# sockets for both conditions. (The stdlib selectors module does the same
# thing.)
fn = partial(select.select, rcheck, wcheck, wcheck)
rready, wready, xready = fn(timeout)
return bool(rready or wready or xready)
def poll_wait_for_socket(
sock: socket.socket,
read: bool = False,
write: bool = False,
timeout: float | None = None,
) -> bool:
if not read and not write:
raise RuntimeError("must specify at least one of read=True, write=True")
mask = 0
if read:
mask |= select.POLLIN
if write:
mask |= select.POLLOUT
poll_obj = select.poll()
poll_obj.register(sock, mask)
# For some reason, poll() takes timeout in milliseconds
def do_poll(t: float | None) -> list[tuple[int, int]]:
if t is not None:
t *= 1000
return poll_obj.poll(t)
return bool(do_poll(timeout))
def _have_working_poll() -> bool:
# Apparently some systems have a select.poll that fails as soon as you try
# to use it, either due to strange configuration or broken monkeypatching
# from libraries like eventlet/greenlet.
try:
poll_obj = select.poll()
poll_obj.poll(0)
except (AttributeError, OSError):
return False
else:
return True
def wait_for_socket(
sock: socket.socket,
read: bool = False,
write: bool = False,
timeout: float | None = None,
) -> bool:
# We delay choosing which implementation to use until the first time we're
# called. We could do it at import time, but then we might make the wrong
# decision if someone goes wild with monkeypatching select.poll after
# we're imported.
global wait_for_socket
if _have_working_poll():
wait_for_socket = poll_wait_for_socket
elif hasattr(select, "select"):
wait_for_socket = select_wait_for_socket
return wait_for_socket(sock, read, write, timeout)
def wait_for_read(sock: socket.socket, timeout: float | None = None) -> bool:
"""Waits for reading to be available on a given socket.
Returns True if the socket is readable, or False if the timeout expired.
"""
return wait_for_socket(sock, read=True, timeout=timeout)
def wait_for_write(sock: socket.socket, timeout: float | None = None) -> bool:
"""Waits for writing to be available on a given socket.
Returns True if the socket is readable, or False if the timeout expired.
"""
return wait_for_socket(sock, write=True, timeout=timeout)
```
|
====================================================================================================================================
python-webencodings
===================
This is a Python implementation of the `WHATWG Encoding standard
<http://encoding.spec.whatwg.org/>`_.
* Latest documentation: http://packages.python.org/webencodings/
* Source code and issue tracker:
https://github.com/gsnedders/python-webencodings
* PyPI releases: http://pypi.python.org/pypi/webencodings
* License: BSD
* Python 2.6+ and 3.3+
In order to be compatible with legacy web content
when interpreting something like ``Content-Type: text/html; charset=latin1``,
tools need to use a particular set of aliases for encoding labels
as well as some overriding rules.
For example, ``US-ASCII`` and ``iso-8859-1`` on the web are actually
aliases for ``windows-1252``, and an UTF-8 or UTF-16 BOM takes precedence
over any other encoding declaration.
The Encoding standard defines all such details so that implementations do
not have to reverse-engineer each other.
This module has encoding labels and BOM detection,
but the actual implementation for encoders and decoders is Python’s.
|
================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 2
SIZE: 10.33 KB
PATH: scripts\freecad_env\Lib\site-packages\webencodings\__init__.py
ENCODING: utf-8
```py
# coding: utf-8
"""
webencodings
~~~~~~~~~~~~
This is a Python implementation of the `WHATWG Encoding standard
<http://encoding.spec.whatwg.org/>`. See README for details.
:copyright: Copyright 2012 by Simon Sapin
:license: BSD, see LICENSE for details.
"""
from __future__ import unicode_literals
import codecs
from .labels import LABELS
VERSION = '0.5.1'
# Some names in Encoding are not valid Python aliases. Remap these.
PYTHON_NAMES = {
'iso-8859-8-i': 'iso-8859-8',
'x-mac-cyrillic': 'mac-cyrillic',
'macintosh': 'mac-roman',
'windows-874': 'cp874'}
CACHE = {}
def ascii_lower(string):
r"""Transform (only) ASCII letters to lower case: A-Z is mapped to a-z.
:param string: An Unicode string.
:returns: A new Unicode string.
This is used for `ASCII case-insensitive
<http://encoding.spec.whatwg.org/#ascii-case-insensitive>`_
matching of encoding labels.
The same matching is also used, among other things,
for `CSS keywords <http://dev.w3.org/csswg/css-values/#keywords>`_.
This is different from the :meth:`~py:str.lower` method of Unicode strings
which also affect non-ASCII characters,
sometimes mapping them into the ASCII range:
>>> keyword = u'Bac\N{KELVIN SIGN}ground'
>>> assert keyword.lower() == u'background'
>>> assert ascii_lower(keyword) != keyword.lower()
>>> assert ascii_lower(keyword) == u'bac\N{KELVIN SIGN}ground'
"""
# This turns out to be faster than unicode.translate()
return string.encode('utf8').lower().decode('utf8')
def lookup(label):
"""
Look for an encoding by its label.
This is the spec’s `get an encoding
<http://encoding.spec.whatwg.org/#concept-encoding-get>`_ algorithm.
Supported labels are listed there.
:param label: A string.
:returns:
An :class:`Encoding` object, or :obj:`None` for an unknown label.
"""
# Only strip ASCII whitespace: U+0009, U+000A, U+000C, U+000D, and U+0020.
label = ascii_lower(label.strip('\t\n\f\r '))
name = LABELS.get(label)
if name is None:
return None
encoding = CACHE.get(name)
if encoding is None:
if name == 'x-user-defined':
from .x_user_defined import codec_info
else:
python_name = PYTHON_NAMES.get(name, name)
# Any python_name value that gets to here should be valid.
codec_info = codecs.lookup(python_name)
encoding = Encoding(name, codec_info)
CACHE[name] = encoding
return encoding
def _get_encoding(encoding_or_label):
"""
Accept either an encoding object or label.
:param encoding: An :class:`Encoding` object or a label string.
:returns: An :class:`Encoding` object.
:raises: :exc:`~exceptions.LookupError` for an unknown label.
"""
if hasattr(encoding_or_label, 'codec_info'):
return encoding_or_label
encoding = lookup(encoding_or_label)
if encoding is None:
raise LookupError('Unknown encoding label: %r' % encoding_or_label)
return encoding
class Encoding(object):
"""Reresents a character encoding such as UTF-8,
that can be used for decoding or encoding.
.. attribute:: name
Canonical name of the encoding
.. attribute:: codec_info
The actual implementation of the encoding,
a stdlib :class:`~codecs.CodecInfo` object.
See :func:`codecs.register`.
"""
def __init__(self, name, codec_info):
self.name = name
self.codec_info = codec_info
def __repr__(self):
return '<Encoding %s>' % self.name
#: The UTF-8 encoding. Should be used for new content and formats.
UTF8 = lookup('utf-8')
_UTF16LE = lookup('utf-16le')
_UTF16BE = lookup('utf-16be')
def decode(input, fallback_encoding, errors='replace'):
"""
Decode a single string.
:param input: A byte string
:param fallback_encoding:
An :class:`Encoding` object or a label string.
The encoding to use if :obj:`input` does note have a BOM.
:param errors: Type of error handling. See :func:`codecs.register`.
:raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
:return:
A ``(output, encoding)`` tuple of an Unicode string
and an :obj:`Encoding`.
"""
# Fail early if `encoding` is an invalid label.
fallback_encoding = _get_encoding(fallback_encoding)
bom_encoding, input = _detect_bom(input)
encoding = bom_encoding or fallback_encoding
return encoding.codec_info.decode(input, errors)[0], encoding
def _detect_bom(input):
"""Return (bom_encoding, input), with any BOM removed from the input."""
if input.startswith(b'\xFF\xFE'):
return _UTF16LE, input[2:]
if input.startswith(b'\xFE\xFF'):
return _UTF16BE, input[2:]
if input.startswith(b'\xEF\xBB\xBF'):
return UTF8, input[3:]
return None, input
def encode(input, encoding=UTF8, errors='strict'):
"""
Encode a single string.
:param input: An Unicode string.
:param encoding: An :class:`Encoding` object or a label string.
:param errors: Type of error handling. See :func:`codecs.register`.
:raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
:return: A byte string.
"""
return _get_encoding(encoding).codec_info.encode(input, errors)[0]
def iter_decode(input, fallback_encoding, errors='replace'):
"""
"Pull"-based decoder.
:param input:
An iterable of byte strings.
The input is first consumed just enough to determine the encoding
based on the precense of a BOM,
then consumed on demand when the return value is.
:param fallback_encoding:
An :class:`Encoding` object or a label string.
The encoding to use if :obj:`input` does note have a BOM.
:param errors: Type of error handling. See :func:`codecs.register`.
:raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
:returns:
An ``(output, encoding)`` tuple.
:obj:`output` is an iterable of Unicode strings,
:obj:`encoding` is the :obj:`Encoding` that is being used.
"""
decoder = IncrementalDecoder(fallback_encoding, errors)
generator = _iter_decode_generator(input, decoder)
encoding = next(generator)
return generator, encoding
def _iter_decode_generator(input, decoder):
"""Return a generator that first yields the :obj:`Encoding`,
then yields output chukns as Unicode strings.
"""
decode = decoder.decode
input = iter(input)
for chunck in input:
output = decode(chunck)
if output:
assert decoder.encoding is not None
yield decoder.encoding
yield output
break
else:
# Input exhausted without determining the encoding
output = decode(b'', final=True)
assert decoder.encoding is not None
yield decoder.encoding
if output:
yield output
return
for chunck in input:
output = decode(chunck)
if output:
yield output
output = decode(b'', final=True)
if output:
yield output
def iter_encode(input, encoding=UTF8, errors='strict'):
"""
“Pull”-based encoder.
:param input: An iterable of Unicode strings.
:param encoding: An :class:`Encoding` object or a label string.
:param errors: Type of error handling. See :func:`codecs.register`.
:raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
:returns: An iterable of byte strings.
"""
# Fail early if `encoding` is an invalid label.
encode = IncrementalEncoder(encoding, errors).encode
return _iter_encode_generator(input, encode)
def _iter_encode_generator(input, encode):
for chunck in input:
output = encode(chunck)
if output:
yield output
output = encode('', final=True)
if output:
yield output
class IncrementalDecoder(object):
"""
“Push”-based decoder.
:param fallback_encoding:
An :class:`Encoding` object or a label string.
The encoding to use if :obj:`input` does note have a BOM.
:param errors: Type of error handling. See :func:`codecs.register`.
:raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
"""
def __init__(self, fallback_encoding, errors='replace'):
# Fail early if `encoding` is an invalid label.
self._fallback_encoding = _get_encoding(fallback_encoding)
self._errors = errors
self._buffer = b''
self._decoder = None
#: The actual :class:`Encoding` that is being used,
#: or :obj:`None` if that is not determined yet.
#: (Ie. if there is not enough input yet to determine
#: if there is a BOM.)
self.encoding = None # Not known yet.
def decode(self, input, final=False):
"""Decode one chunk of the input.
:param input: A byte string.
:param final:
Indicate that no more input is available.
Must be :obj:`True` if this is the last call.
:returns: An Unicode string.
"""
decoder = self._decoder
if decoder is not None:
return decoder(input, final)
input = self._buffer + input
encoding, input = _detect_bom(input)
if encoding is None:
if len(input) < 3 and not final: # Not enough data yet.
self._buffer = input
return ''
else: # No BOM
encoding = self._fallback_encoding
decoder = encoding.codec_info.incrementaldecoder(self._errors).decode
self._decoder = decoder
self.encoding = encoding
return decoder(input, final)
class IncrementalEncoder(object):
"""
“Push”-based encoder.
:param encoding: An :class:`Encoding` object or a label string.
:param errors: Type of error handling. See :func:`codecs.register`.
:raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
.. method:: encode(input, final=False)
:param input: An Unicode string.
:param final:
Indicate that no more input is available.
Must be :obj:`True` if this is the last call.
:returns: A byte string.
"""
def __init__(self, encoding=UTF8, errors='strict'):
encoding = _get_encoding(encoding)
self.encode = encoding.codec_info.incrementalencoder(errors).encode
```
|
==============================================================================================================
SOURCE CODE FILE: labels.py
LINES: 1
SIZE: 8.77 KB
PATH: scripts\freecad_env\Lib\site-packages\webencodings\labels.py
ENCODING: utf-8
```py
"""
webencodings.labels
~~~~~~~~~~~~~~~~~~~
Map encoding labels to their name.
:copyright: Copyright 2012 by Simon Sapin
:license: BSD, see LICENSE for details.
"""
# XXX Do not edit!
# This file is automatically generated by mklabels.py
LABELS = {
'unicode-1-1-utf-8': 'utf-8',
'utf-8': 'utf-8',
'utf8': 'utf-8',
'866': 'ibm866',
'cp866': 'ibm866',
'csibm866': 'ibm866',
'ibm866': 'ibm866',
'csisolatin2': 'iso-8859-2',
'iso-8859-2': 'iso-8859-2',
'iso-ir-101': 'iso-8859-2',
'iso8859-2': 'iso-8859-2',
'iso88592': 'iso-8859-2',
'iso_8859-2': 'iso-8859-2',
'iso_8859-2:1987': 'iso-8859-2',
'l2': 'iso-8859-2',
'latin2': 'iso-8859-2',
'csisolatin3': 'iso-8859-3',
'iso-8859-3': 'iso-8859-3',
'iso-ir-109': 'iso-8859-3',
'iso8859-3': 'iso-8859-3',
'iso88593': 'iso-8859-3',
'iso_8859-3': 'iso-8859-3',
'iso_8859-3:1988': 'iso-8859-3',
'l3': 'iso-8859-3',
'latin3': 'iso-8859-3',
'csisolatin4': 'iso-8859-4',
'iso-8859-4': 'iso-8859-4',
'iso-ir-110': 'iso-8859-4',
'iso8859-4': 'iso-8859-4',
'iso88594': 'iso-8859-4',
'iso_8859-4': 'iso-8859-4',
'iso_8859-4:1988': 'iso-8859-4',
'l4': 'iso-8859-4',
'latin4': 'iso-8859-4',
'csisolatincyrillic': 'iso-8859-5',
'cyrillic': 'iso-8859-5',
'iso-8859-5': 'iso-8859-5',
'iso-ir-144': 'iso-8859-5',
'iso8859-5': 'iso-8859-5',
'iso88595': 'iso-8859-5',
'iso_8859-5': 'iso-8859-5',
'iso_8859-5:1988': 'iso-8859-5',
'arabic': 'iso-8859-6',
'asmo-708': 'iso-8859-6',
'csiso88596e': 'iso-8859-6',
'csiso88596i': 'iso-8859-6',
'csisolatinarabic': 'iso-8859-6',
'ecma-114': 'iso-8859-6',
'iso-8859-6': 'iso-8859-6',
'iso-8859-6-e': 'iso-8859-6',
'iso-8859-6-i': 'iso-8859-6',
'iso-ir-127': 'iso-8859-6',
'iso8859-6': 'iso-8859-6',
'iso88596': 'iso-8859-6',
'iso_8859-6': 'iso-8859-6',
'iso_8859-6:1987': 'iso-8859-6',
'csisolatingreek': 'iso-8859-7',
'ecma-118': 'iso-8859-7',
'elot_928': 'iso-8859-7',
'greek': 'iso-8859-7',
'greek8': 'iso-8859-7',
'iso-8859-7': 'iso-8859-7',
'iso-ir-126': 'iso-8859-7',
'iso8859-7': 'iso-8859-7',
'iso88597': 'iso-8859-7',
'iso_8859-7': 'iso-8859-7',
'iso_8859-7:1987': 'iso-8859-7',
'sun_eu_greek': 'iso-8859-7',
'csiso88598e': 'iso-8859-8',
'csisolatinhebrew': 'iso-8859-8',
'hebrew': 'iso-8859-8',
'iso-8859-8': 'iso-8859-8',
'iso-8859-8-e': 'iso-8859-8',
'iso-ir-138': 'iso-8859-8',
'iso8859-8': 'iso-8859-8',
'iso88598': 'iso-8859-8',
'iso_8859-8': 'iso-8859-8',
'iso_8859-8:1988': 'iso-8859-8',
'visual': 'iso-8859-8',
'csiso88598i': 'iso-8859-8-i',
'iso-8859-8-i': 'iso-8859-8-i',
'logical': 'iso-8859-8-i',
'csisolatin6': 'iso-8859-10',
'iso-8859-10': 'iso-8859-10',
'iso-ir-157': 'iso-8859-10',
'iso8859-10': 'iso-8859-10',
'iso885910': 'iso-8859-10',
'l6': 'iso-8859-10',
'latin6': 'iso-8859-10',
'iso-8859-13': 'iso-8859-13',
'iso8859-13': 'iso-8859-13',
'iso885913': 'iso-8859-13',
'iso-8859-14': 'iso-8859-14',
'iso8859-14': 'iso-8859-14',
'iso885914': 'iso-8859-14',
'csisolatin9': 'iso-8859-15',
'iso-8859-15': 'iso-8859-15',
'iso8859-15': 'iso-8859-15',
'iso885915': 'iso-8859-15',
'iso_8859-15': 'iso-8859-15',
'l9': 'iso-8859-15',
'iso-8859-16': 'iso-8859-16',
'cskoi8r': 'koi8-r',
'koi': 'koi8-r',
'koi8': 'koi8-r',
'koi8-r': 'koi8-r',
'koi8_r': 'koi8-r',
'koi8-u': 'koi8-u',
'csmacintosh': 'macintosh',
'mac': 'macintosh',
'macintosh': 'macintosh',
'x-mac-roman': 'macintosh',
'dos-874': 'windows-874',
'iso-8859-11': 'windows-874',
'iso8859-11': 'windows-874',
'iso885911': 'windows-874',
'tis-620': 'windows-874',
'windows-874': 'windows-874',
'cp1250': 'windows-1250',
'windows-1250': 'windows-1250',
'x-cp1250': 'windows-1250',
'cp1251': 'windows-1251',
'windows-1251': 'windows-1251',
'x-cp1251': 'windows-1251',
'ansi_x3.4-1968': 'windows-1252',
'ascii': 'windows-1252',
'cp1252': 'windows-1252',
'cp819': 'windows-1252',
'csisolatin1': 'windows-1252',
'ibm819': 'windows-1252',
'iso-8859-1': 'windows-1252',
'iso-ir-100': 'windows-1252',
'iso8859-1': 'windows-1252',
'iso88591': 'windows-1252',
'iso_8859-1': 'windows-1252',
'iso_8859-1:1987': 'windows-1252',
'l1': 'windows-1252',
'latin1': 'windows-1252',
'us-ascii': 'windows-1252',
'windows-1252': 'windows-1252',
'x-cp1252': 'windows-1252',
'cp1253': 'windows-1253',
'windows-1253': 'windows-1253',
'x-cp1253': 'windows-1253',
'cp1254': 'windows-1254',
'csisolatin5': 'windows-1254',
'iso-8859-9': 'windows-1254',
'iso-ir-148': 'windows-1254',
'iso8859-9': 'windows-1254',
'iso88599': 'windows-1254',
'iso_8859-9': 'windows-1254',
'iso_8859-9:1989': 'windows-1254',
'l5': 'windows-1254',
'latin5': 'windows-1254',
'windows-1254': 'windows-1254',
'x-cp1254': 'windows-1254',
'cp1255': 'windows-1255',
'windows-1255': 'windows-1255',
'x-cp1255': 'windows-1255',
'cp1256': 'windows-1256',
'windows-1256': 'windows-1256',
'x-cp1256': 'windows-1256',
'cp1257': 'windows-1257',
'windows-1257': 'windows-1257',
'x-cp1257': 'windows-1257',
'cp1258': 'windows-1258',
'windows-1258': 'windows-1258',
'x-cp1258': 'windows-1258',
'x-mac-cyrillic': 'x-mac-cyrillic',
'x-mac-ukrainian': 'x-mac-cyrillic',
'chinese': 'gbk',
'csgb2312': 'gbk',
'csiso58gb231280': 'gbk',
'gb2312': 'gbk',
'gb_2312': 'gbk',
'gb_2312-80': 'gbk',
'gbk': 'gbk',
'iso-ir-58': 'gbk',
'x-gbk': 'gbk',
'gb18030': 'gb18030',
'hz-gb-2312': 'hz-gb-2312',
'big5': 'big5',
'big5-hkscs': 'big5',
'cn-big5': 'big5',
'csbig5': 'big5',
'x-x-big5': 'big5',
'cseucpkdfmtjapanese': 'euc-jp',
'euc-jp': 'euc-jp',
'x-euc-jp': 'euc-jp',
'csiso2022jp': 'iso-2022-jp',
'iso-2022-jp': 'iso-2022-jp',
'csshiftjis': 'shift_jis',
'ms_kanji': 'shift_jis',
'shift-jis': 'shift_jis',
'shift_jis': 'shift_jis',
'sjis': 'shift_jis',
'windows-31j': 'shift_jis',
'x-sjis': 'shift_jis',
'cseuckr': 'euc-kr',
'csksc56011987': 'euc-kr',
'euc-kr': 'euc-kr',
'iso-ir-149': 'euc-kr',
'korean': 'euc-kr',
'ks_c_5601-1987': 'euc-kr',
'ks_c_5601-1989': 'euc-kr',
'ksc5601': 'euc-kr',
'ksc_5601': 'euc-kr',
'windows-949': 'euc-kr',
'csiso2022kr': 'iso-2022-kr',
'iso-2022-kr': 'iso-2022-kr',
'utf-16be': 'utf-16be',
'utf-16': 'utf-16le',
'utf-16le': 'utf-16le',
'x-user-defined': 'x-user-defined',
}
```
|
================================================================================================================
SOURCE CODE FILE: mklabels.py
LINES: 2
SIZE: 1.27 KB
PATH: scripts\freecad_env\Lib\site-packages\webencodings\mklabels.py
ENCODING: utf-8
```py
"""
webencodings.mklabels
~~~~~~~~~~~~~~~~~~~~~
Regenarate the webencodings.labels module.
:copyright: Copyright 2012 by Simon Sapin
:license: BSD, see LICENSE for details.
"""
import json
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
def assert_lower(string):
assert string == string.lower()
return string
def generate(url):
parts = ['''\
"""
webencodings.labels
~~~~~~~~~~~~~~~~~~~
Map encoding labels to their name.
:copyright: Copyright 2012 by Simon Sapin
:license: BSD, see LICENSE for details.
"""
# XXX Do not edit!
# This file is automatically generated by mklabels.py
LABELS = {
''']
labels = [
(repr(assert_lower(label)).lstrip('u'),
repr(encoding['name']).lstrip('u'))
for category in json.loads(urlopen(url).read().decode('ascii'))
for encoding in category['encodings']
for label in encoding['labels']]
max_len = max(len(label) for label, name in labels)
parts.extend(
' %s:%s %s,\n' % (label, ' ' * (max_len - len(label)), name)
for label, name in labels)
parts.append('}')
return ''.join(parts)
if __name__ == '__main__':
print(generate('http://encoding.spec.whatwg.org/encodings.json'))
```
|
=============================================================================================================
SOURCE CODE FILE: tests.py
LINES: 2
SIZE: 6.41 KB
PATH: scripts\freecad_env\Lib\site-packages\webencodings\tests.py
ENCODING: utf-8
```py
# coding: utf-8
"""
webencodings.tests
~~~~~~~~~~~~~~~~~~
A basic test suite for Encoding.
:copyright: Copyright 2012 by Simon Sapin
:license: BSD, see LICENSE for details.
"""
from __future__ import unicode_literals
from . import (lookup, LABELS, decode, encode, iter_decode, iter_encode,
IncrementalDecoder, IncrementalEncoder, UTF8)
def assert_raises(exception, function, *args, **kwargs):
try:
function(*args, **kwargs)
except exception:
return
else: # pragma: no cover
raise AssertionError('Did not raise %s.' % exception)
def test_labels():
assert lookup('utf-8').name == 'utf-8'
assert lookup('Utf-8').name == 'utf-8'
assert lookup('UTF-8').name == 'utf-8'
assert lookup('utf8').name == 'utf-8'
assert lookup('utf8').name == 'utf-8'
assert lookup('utf8 ').name == 'utf-8'
assert lookup(' \r\nutf8\t').name == 'utf-8'
assert lookup('u8') is None # Python label.
assert lookup('utf-8 ') is None # Non-ASCII white space.
assert lookup('US-ASCII').name == 'windows-1252'
assert lookup('iso-8859-1').name == 'windows-1252'
assert lookup('latin1').name == 'windows-1252'
assert lookup('LATIN1').name == 'windows-1252'
assert lookup('latin-1') is None
assert lookup('LATİN1') is None # ASCII-only case insensitivity.
def test_all_labels():
for label in LABELS:
assert decode(b'', label) == ('', lookup(label))
assert encode('', label) == b''
for repeat in [0, 1, 12]:
output, _ = iter_decode([b''] * repeat, label)
assert list(output) == []
assert list(iter_encode([''] * repeat, label)) == []
decoder = IncrementalDecoder(label)
assert decoder.decode(b'') == ''
assert decoder.decode(b'', final=True) == ''
encoder = IncrementalEncoder(label)
assert encoder.encode('') == b''
assert encoder.encode('', final=True) == b''
# All encoding names are valid labels too:
for name in set(LABELS.values()):
assert lookup(name).name == name
def test_invalid_label():
assert_raises(LookupError, decode, b'\xEF\xBB\xBF\xc3\xa9', 'invalid')
assert_raises(LookupError, encode, 'é', 'invalid')
assert_raises(LookupError, iter_decode, [], 'invalid')
assert_raises(LookupError, iter_encode, [], 'invalid')
assert_raises(LookupError, IncrementalDecoder, 'invalid')
assert_raises(LookupError, IncrementalEncoder, 'invalid')
def test_decode():
assert decode(b'\x80', 'latin1') == ('€', lookup('latin1'))
assert decode(b'\x80', lookup('latin1')) == ('€', lookup('latin1'))
assert decode(b'\xc3\xa9', 'utf8') == ('é', lookup('utf8'))
assert decode(b'\xc3\xa9', UTF8) == ('é', lookup('utf8'))
assert decode(b'\xc3\xa9', 'ascii') == ('é', lookup('ascii'))
assert decode(b'\xEF\xBB\xBF\xc3\xa9', 'ascii') == ('é', lookup('utf8')) # UTF-8 with BOM
assert decode(b'\xFE\xFF\x00\xe9', 'ascii') == ('é', lookup('utf-16be')) # UTF-16-BE with BOM
assert decode(b'\xFF\xFE\xe9\x00', 'ascii') == ('é', lookup('utf-16le')) # UTF-16-LE with BOM
assert decode(b'\xFE\xFF\xe9\x00', 'ascii') == ('\ue900', lookup('utf-16be'))
assert decode(b'\xFF\xFE\x00\xe9', 'ascii') == ('\ue900', lookup('utf-16le'))
assert decode(b'\x00\xe9', 'UTF-16BE') == ('é', lookup('utf-16be'))
assert decode(b'\xe9\x00', 'UTF-16LE') == ('é', lookup('utf-16le'))
assert decode(b'\xe9\x00', 'UTF-16') == ('é', lookup('utf-16le'))
assert decode(b'\xe9\x00', 'UTF-16BE') == ('\ue900', lookup('utf-16be'))
assert decode(b'\x00\xe9', 'UTF-16LE') == ('\ue900', lookup('utf-16le'))
assert decode(b'\x00\xe9', 'UTF-16') == ('\ue900', lookup('utf-16le'))
def test_encode():
assert encode('é', 'latin1') == b'\xe9'
assert encode('é', 'utf8') == b'\xc3\xa9'
assert encode('é', 'utf8') == b'\xc3\xa9'
assert encode('é', 'utf-16') == b'\xe9\x00'
assert encode('é', 'utf-16le') == b'\xe9\x00'
assert encode('é', 'utf-16be') == b'\x00\xe9'
def test_iter_decode():
def iter_decode_to_string(input, fallback_encoding):
output, _encoding = iter_decode(input, fallback_encoding)
return ''.join(output)
assert iter_decode_to_string([], 'latin1') == ''
assert iter_decode_to_string([b''], 'latin1') == ''
assert iter_decode_to_string([b'\xe9'], 'latin1') == 'é'
assert iter_decode_to_string([b'hello'], 'latin1') == 'hello'
assert iter_decode_to_string([b'he', b'llo'], 'latin1') == 'hello'
assert iter_decode_to_string([b'hell', b'o'], 'latin1') == 'hello'
assert iter_decode_to_string([b'\xc3\xa9'], 'latin1') == 'é'
assert iter_decode_to_string([b'\xEF\xBB\xBF\xc3\xa9'], 'latin1') == 'é'
assert iter_decode_to_string([
b'\xEF\xBB\xBF', b'\xc3', b'\xa9'], 'latin1') == 'é'
assert iter_decode_to_string([
b'\xEF\xBB\xBF', b'a', b'\xc3'], 'latin1') == 'a\uFFFD'
assert iter_decode_to_string([
b'', b'\xEF', b'', b'', b'\xBB\xBF\xc3', b'\xa9'], 'latin1') == 'é'
assert iter_decode_to_string([b'\xEF\xBB\xBF'], 'latin1') == ''
assert iter_decode_to_string([b'\xEF\xBB'], 'latin1') == 'ï»'
assert iter_decode_to_string([b'\xFE\xFF\x00\xe9'], 'latin1') == 'é'
assert iter_decode_to_string([b'\xFF\xFE\xe9\x00'], 'latin1') == 'é'
assert iter_decode_to_string([
b'', b'\xFF', b'', b'', b'\xFE\xe9', b'\x00'], 'latin1') == 'é'
assert iter_decode_to_string([
b'', b'h\xe9', b'llo'], 'x-user-defined') == 'h\uF7E9llo'
def test_iter_encode():
assert b''.join(iter_encode([], 'latin1')) == b''
assert b''.join(iter_encode([''], 'latin1')) == b''
assert b''.join(iter_encode(['é'], 'latin1')) == b'\xe9'
assert b''.join(iter_encode(['', 'é', '', ''], 'latin1')) == b'\xe9'
assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16')) == b'\xe9\x00'
assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16le')) == b'\xe9\x00'
assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16be')) == b'\x00\xe9'
assert b''.join(iter_encode([
'', 'h\uF7E9', '', 'llo'], 'x-user-defined')) == b'h\xe9llo'
def test_x_user_defined():
encoded = b'2,\x0c\x0b\x1aO\xd9#\xcb\x0f\xc9\xbbt\xcf\xa8\xca'
decoded = '2,\x0c\x0b\x1aO\uf7d9#\uf7cb\x0f\uf7c9\uf7bbt\uf7cf\uf7a8\uf7ca'
encoded = b'aa'
decoded = 'aa'
assert decode(encoded, 'x-user-defined') == (decoded, lookup('x-user-defined'))
assert encode(decoded, 'x-user-defined') == encoded
```
|
======================================================================================================================
SOURCE CODE FILE: x_user_defined.py
LINES: 2
SIZE: 4.21 KB
PATH: scripts\freecad_env\Lib\site-packages\webencodings\x_user_defined.py
ENCODING: utf-8
```py
# coding: utf-8
"""
webencodings.x_user_defined
~~~~~~~~~~~~~~~~~~~~~~~~~~~
An implementation of the x-user-defined encoding.
:copyright: Copyright 2012 by Simon Sapin
:license: BSD, see LICENSE for details.
"""
from __future__ import unicode_literals
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return codecs.charmap_encode(input, errors, encoding_table)
def decode(self, input, errors='strict'):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
### encodings module API
codec_info = codecs.CodecInfo(
name='x-user-defined',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
# Python 3:
# for c in range(256): print(' %r' % chr(c if c < 128 else c + 0xF700))
decoding_table = (
'\x00'
'\x01'
'\x02'
'\x03'
'\x04'
'\x05'
'\x06'
'\x07'
'\x08'
'\t'
'\n'
'\x0b'
'\x0c'
'\r'
'\x0e'
'\x0f'
'\x10'
'\x11'
'\x12'
'\x13'
'\x14'
'\x15'
'\x16'
'\x17'
'\x18'
'\x19'
'\x1a'
'\x1b'
'\x1c'
'\x1d'
'\x1e'
'\x1f'
' '
'!'
'"'
'#'
'$'
'%'
'&'
"'"
'('
')'
'*'
'+'
','
'-'
'.'
'/'
'0'
'1'
'2'
'3'
'4'
'5'
'6'
'7'
'8'
'9'
':'
';'
'<'
'='
'>'
'?'
'@'
'A'
'B'
'C'
'D'
'E'
'F'
'G'
'H'
'I'
'J'
'K'
'L'
'M'
'N'
'O'
'P'
'Q'
'R'
'S'
'T'
'U'
'V'
'W'
'X'
'Y'
'Z'
'['
'\\'
']'
'^'
'_'
'`'
'a'
'b'
'c'
'd'
'e'
'f'
'g'
'h'
'i'
'j'
'k'
'l'
'm'
'n'
'o'
'p'
'q'
'r'
's'
't'
'u'
'v'
'w'
'x'
'y'
'z'
'{'
'|'
'}'
'~'
'\x7f'
'\uf780'
'\uf781'
'\uf782'
'\uf783'
'\uf784'
'\uf785'
'\uf786'
'\uf787'
'\uf788'
'\uf789'
'\uf78a'
'\uf78b'
'\uf78c'
'\uf78d'
'\uf78e'
'\uf78f'
'\uf790'
'\uf791'
'\uf792'
'\uf793'
'\uf794'
'\uf795'
'\uf796'
'\uf797'
'\uf798'
'\uf799'
'\uf79a'
'\uf79b'
'\uf79c'
'\uf79d'
'\uf79e'
'\uf79f'
'\uf7a0'
'\uf7a1'
'\uf7a2'
'\uf7a3'
'\uf7a4'
'\uf7a5'
'\uf7a6'
'\uf7a7'
'\uf7a8'
'\uf7a9'
'\uf7aa'
'\uf7ab'
'\uf7ac'
'\uf7ad'
'\uf7ae'
'\uf7af'
'\uf7b0'
'\uf7b1'
'\uf7b2'
'\uf7b3'
'\uf7b4'
'\uf7b5'
'\uf7b6'
'\uf7b7'
'\uf7b8'
'\uf7b9'
'\uf7ba'
'\uf7bb'
'\uf7bc'
'\uf7bd'
'\uf7be'
'\uf7bf'
'\uf7c0'
'\uf7c1'
'\uf7c2'
'\uf7c3'
'\uf7c4'
'\uf7c5'
'\uf7c6'
'\uf7c7'
'\uf7c8'
'\uf7c9'
'\uf7ca'
'\uf7cb'
'\uf7cc'
'\uf7cd'
'\uf7ce'
'\uf7cf'
'\uf7d0'
'\uf7d1'
'\uf7d2'
'\uf7d3'
'\uf7d4'
'\uf7d5'
'\uf7d6'
'\uf7d7'
'\uf7d8'
'\uf7d9'
'\uf7da'
'\uf7db'
'\uf7dc'
'\uf7dd'
'\uf7de'
'\uf7df'
'\uf7e0'
'\uf7e1'
'\uf7e2'
'\uf7e3'
'\uf7e4'
'\uf7e5'
'\uf7e6'
'\uf7e7'
'\uf7e8'
'\uf7e9'
'\uf7ea'
'\uf7eb'
'\uf7ec'
'\uf7ed'
'\uf7ee'
'\uf7ef'
'\uf7f0'
'\uf7f1'
'\uf7f2'
'\uf7f3'
'\uf7f4'
'\uf7f5'
'\uf7f6'
'\uf7f7'
'\uf7f8'
'\uf7f9'
'\uf7fa'
'\uf7fb'
'\uf7fc'
'\uf7fd'
'\uf7fe'
'\uf7ff'
)
### Encoding table
encoding_table = codecs.charmap_build(decoding_table)
```
|
==========================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 1.12 KB
PATH: scripts\freecad_env\Lib\site-packages\xxhash\__init__.py
ENCODING: utf-8
```py
from ._xxhash import (
xxh32,
xxh32_digest,
xxh32_intdigest,
xxh32_hexdigest,
xxh64,
xxh64_digest,
xxh64_intdigest,
xxh64_hexdigest,
xxh3_64,
xxh3_64_digest,
xxh3_64_intdigest,
xxh3_64_hexdigest,
xxh3_128,
xxh3_128_digest,
xxh3_128_intdigest,
xxh3_128_hexdigest,
XXHASH_VERSION,
)
from .version import VERSION, VERSION_TUPLE
xxh128 = xxh3_128
xxh128_hexdigest = xxh3_128_hexdigest
xxh128_intdigest = xxh3_128_intdigest
xxh128_digest = xxh3_128_digest
algorithms_available = set([
"xxh32",
"xxh64",
"xxh3_64",
"xxh128",
"xxh3_128",
])
__all__ = [
"xxh32",
"xxh32_digest",
"xxh32_intdigest",
"xxh32_hexdigest",
"xxh64",
"xxh64_digest",
"xxh64_intdigest",
"xxh64_hexdigest",
"xxh3_64",
"xxh3_64_digest",
"xxh3_64_intdigest",
"xxh3_64_hexdigest",
"xxh3_128",
"xxh3_128_digest",
"xxh3_128_intdigest",
"xxh3_128_hexdigest",
"xxh128",
"xxh128_digest",
"xxh128_intdigest",
"xxh128_hexdigest",
"VERSION",
"VERSION_TUPLE",
"XXHASH_VERSION",
"algorithms_available",
]
```
|
=========================================================================================================
SOURCE CODE FILE: version.py
LINES: 1
SIZE: 0.04 KB
PATH: scripts\freecad_env\Lib\site-packages\xxhash\version.py
ENCODING: utf-8
```py
VERSION = "3.5.0"
VERSION_TUPLE = (3, 5, 0)
```
|
========================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 12.02 KB
PATH: scripts\freecad_env\Lib\site-packages\yaml\__init__.py
ENCODING: utf-8
```py
from .error import *
from .tokens import *
from .events import *
from .nodes import *
from .loader import *
from .dumper import *
__version__ = '6.0.2'
try:
from .cyaml import *
__with_libyaml__ = True
except ImportError:
__with_libyaml__ = False
import io
#------------------------------------------------------------------------------
# XXX "Warnings control" is now deprecated. Leaving in the API function to not
# break code that uses it.
#------------------------------------------------------------------------------
def warnings(settings=None):
if settings is None:
return {}
#------------------------------------------------------------------------------
def scan(stream, Loader=Loader):
"""
Scan a YAML stream and produce scanning tokens.
"""
loader = Loader(stream)
try:
while loader.check_token():
yield loader.get_token()
finally:
loader.dispose()
def parse(stream, Loader=Loader):
"""
Parse a YAML stream and produce parsing events.
"""
loader = Loader(stream)
try:
while loader.check_event():
yield loader.get_event()
finally:
loader.dispose()
def compose(stream, Loader=Loader):
"""
Parse the first YAML document in a stream
and produce the corresponding representation tree.
"""
loader = Loader(stream)
try:
return loader.get_single_node()
finally:
loader.dispose()
def compose_all(stream, Loader=Loader):
"""
Parse all YAML documents in a stream
and produce corresponding representation trees.
"""
loader = Loader(stream)
try:
while loader.check_node():
yield loader.get_node()
finally:
loader.dispose()
def load(stream, Loader):
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
"""
loader = Loader(stream)
try:
return loader.get_single_data()
finally:
loader.dispose()
def load_all(stream, Loader):
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
"""
loader = Loader(stream)
try:
while loader.check_data():
yield loader.get_data()
finally:
loader.dispose()
def full_load(stream):
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
Resolve all tags except those known to be
unsafe on untrusted input.
"""
return load(stream, FullLoader)
def full_load_all(stream):
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
Resolve all tags except those known to be
unsafe on untrusted input.
"""
return load_all(stream, FullLoader)
def safe_load(stream):
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
Resolve only basic YAML tags. This is known
to be safe for untrusted input.
"""
return load(stream, SafeLoader)
def safe_load_all(stream):
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
Resolve only basic YAML tags. This is known
to be safe for untrusted input.
"""
return load_all(stream, SafeLoader)
def unsafe_load(stream):
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
Resolve all tags, even those known to be
unsafe on untrusted input.
"""
return load(stream, UnsafeLoader)
def unsafe_load_all(stream):
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
Resolve all tags, even those known to be
unsafe on untrusted input.
"""
return load_all(stream, UnsafeLoader)
def emit(events, stream=None, Dumper=Dumper,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None):
"""
Emit YAML parsing events into a stream.
If stream is None, return the produced string instead.
"""
getvalue = None
if stream is None:
stream = io.StringIO()
getvalue = stream.getvalue
dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
try:
for event in events:
dumper.emit(event)
finally:
dumper.dispose()
if getvalue:
return getvalue()
def serialize_all(nodes, stream=None, Dumper=Dumper,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
"""
Serialize a sequence of representation trees into a YAML stream.
If stream is None, return the produced string instead.
"""
getvalue = None
if stream is None:
if encoding is None:
stream = io.StringIO()
else:
stream = io.BytesIO()
getvalue = stream.getvalue
dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break,
encoding=encoding, version=version, tags=tags,
explicit_start=explicit_start, explicit_end=explicit_end)
try:
dumper.open()
for node in nodes:
dumper.serialize(node)
dumper.close()
finally:
dumper.dispose()
if getvalue:
return getvalue()
def serialize(node, stream=None, Dumper=Dumper, **kwds):
"""
Serialize a representation tree into a YAML stream.
If stream is None, return the produced string instead.
"""
return serialize_all([node], stream, Dumper=Dumper, **kwds)
def dump_all(documents, stream=None, Dumper=Dumper,
default_style=None, default_flow_style=False,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None, sort_keys=True):
"""
Serialize a sequence of Python objects into a YAML stream.
If stream is None, return the produced string instead.
"""
getvalue = None
if stream is None:
if encoding is None:
stream = io.StringIO()
else:
stream = io.BytesIO()
getvalue = stream.getvalue
dumper = Dumper(stream, default_style=default_style,
default_flow_style=default_flow_style,
canonical=canonical, indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break,
encoding=encoding, version=version, tags=tags,
explicit_start=explicit_start, explicit_end=explicit_end, sort_keys=sort_keys)
try:
dumper.open()
for data in documents:
dumper.represent(data)
dumper.close()
finally:
dumper.dispose()
if getvalue:
return getvalue()
def dump(data, stream=None, Dumper=Dumper, **kwds):
"""
Serialize a Python object into a YAML stream.
If stream is None, return the produced string instead.
"""
return dump_all([data], stream, Dumper=Dumper, **kwds)
def safe_dump_all(documents, stream=None, **kwds):
"""
Serialize a sequence of Python objects into a YAML stream.
Produce only basic YAML tags.
If stream is None, return the produced string instead.
"""
return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
def safe_dump(data, stream=None, **kwds):
"""
Serialize a Python object into a YAML stream.
Produce only basic YAML tags.
If stream is None, return the produced string instead.
"""
return dump_all([data], stream, Dumper=SafeDumper, **kwds)
def add_implicit_resolver(tag, regexp, first=None,
Loader=None, Dumper=Dumper):
"""
Add an implicit scalar detector.
If an implicit scalar value matches the given regexp,
the corresponding tag is assigned to the scalar.
first is a sequence of possible initial characters or None.
"""
if Loader is None:
loader.Loader.add_implicit_resolver(tag, regexp, first)
loader.FullLoader.add_implicit_resolver(tag, regexp, first)
loader.UnsafeLoader.add_implicit_resolver(tag, regexp, first)
else:
Loader.add_implicit_resolver(tag, regexp, first)
Dumper.add_implicit_resolver(tag, regexp, first)
def add_path_resolver(tag, path, kind=None, Loader=None, Dumper=Dumper):
"""
Add a path based resolver for the given tag.
A path is a list of keys that forms a path
to a node in the representation tree.
Keys can be string values, integers, or None.
"""
if Loader is None:
loader.Loader.add_path_resolver(tag, path, kind)
loader.FullLoader.add_path_resolver(tag, path, kind)
loader.UnsafeLoader.add_path_resolver(tag, path, kind)
else:
Loader.add_path_resolver(tag, path, kind)
Dumper.add_path_resolver(tag, path, kind)
def add_constructor(tag, constructor, Loader=None):
"""
Add a constructor for the given tag.
Constructor is a function that accepts a Loader instance
and a node object and produces the corresponding Python object.
"""
if Loader is None:
loader.Loader.add_constructor(tag, constructor)
loader.FullLoader.add_constructor(tag, constructor)
loader.UnsafeLoader.add_constructor(tag, constructor)
else:
Loader.add_constructor(tag, constructor)
def add_multi_constructor(tag_prefix, multi_constructor, Loader=None):
"""
Add a multi-constructor for the given tag prefix.
Multi-constructor is called for a node if its tag starts with tag_prefix.
Multi-constructor accepts a Loader instance, a tag suffix,
and a node object and produces the corresponding Python object.
"""
if Loader is None:
loader.Loader.add_multi_constructor(tag_prefix, multi_constructor)
loader.FullLoader.add_multi_constructor(tag_prefix, multi_constructor)
loader.UnsafeLoader.add_multi_constructor(tag_prefix, multi_constructor)
else:
Loader.add_multi_constructor(tag_prefix, multi_constructor)
def add_representer(data_type, representer, Dumper=Dumper):
"""
Add a representer for the given type.
Representer is a function accepting a Dumper instance
and an instance of the given data type
and producing the corresponding representation node.
"""
Dumper.add_representer(data_type, representer)
def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
"""
Add a representer for the given type.
Multi-representer is a function accepting a Dumper instance
and an instance of the given data type or subtype
and producing the corresponding representation node.
"""
Dumper.add_multi_representer(data_type, multi_representer)
class YAMLObjectMetaclass(type):
"""
The metaclass for YAMLObject.
"""
def __init__(cls, name, bases, kwds):
super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
if isinstance(cls.yaml_loader, list):
for loader in cls.yaml_loader:
loader.add_constructor(cls.yaml_tag, cls.from_yaml)
else:
cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
cls.yaml_dumper.add_representer(cls, cls.to_yaml)
class YAMLObject(metaclass=YAMLObjectMetaclass):
"""
An object that can dump itself to a YAML stream
and load itself from a YAML stream.
"""
__slots__ = () # no direct instantiation, so allow immutable subclasses
yaml_loader = [Loader, FullLoader, UnsafeLoader]
yaml_dumper = Dumper
yaml_tag = None
yaml_flow_style = None
@classmethod
def from_yaml(cls, loader, node):
"""
Convert a representation node to a Python object.
"""
return loader.construct_yaml_object(node, cls)
@classmethod
def to_yaml(cls, dumper, data):
"""
Convert a Python object to a representation node.
"""
return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
flow_style=cls.yaml_flow_style)
```
|
========================================================================================================
SOURCE CODE FILE: composer.py
LINES: 1
SIZE: 4.77 KB
PATH: scripts\freecad_env\Lib\site-packages\yaml\composer.py
ENCODING: utf-8
```py
__all__ = ['Composer', 'ComposerError']
from .error import MarkedYAMLError
from .events import *
from .nodes import *
class ComposerError(MarkedYAMLError):
pass
class Composer:
def __init__(self):
self.anchors = {}
def check_node(self):
# Drop the STREAM-START event.
if self.check_event(StreamStartEvent):
self.get_event()
# If there are more documents available?
return not self.check_event(StreamEndEvent)
def get_node(self):
# Get the root node of the next document.
if not self.check_event(StreamEndEvent):
return self.compose_document()
def get_single_node(self):
# Drop the STREAM-START event.
self.get_event()
# Compose a document if the stream is not empty.
document = None
if not self.check_event(StreamEndEvent):
document = self.compose_document()
# Ensure that the stream contains no more documents.
if not self.check_event(StreamEndEvent):
event = self.get_event()
raise ComposerError("expected a single document in the stream",
document.start_mark, "but found another document",
event.start_mark)
# Drop the STREAM-END event.
self.get_event()
return document
def compose_document(self):
# Drop the DOCUMENT-START event.
self.get_event()
# Compose the root node.
node = self.compose_node(None, None)
# Drop the DOCUMENT-END event.
self.get_event()
self.anchors = {}
return node
def compose_node(self, parent, index):
if self.check_event(AliasEvent):
event = self.get_event()
anchor = event.anchor
if anchor not in self.anchors:
raise ComposerError(None, None, "found undefined alias %r"
% anchor, event.start_mark)
return self.anchors[anchor]
event = self.peek_event()
anchor = event.anchor
if anchor is not None:
if anchor in self.anchors:
raise ComposerError("found duplicate anchor %r; first occurrence"
% anchor, self.anchors[anchor].start_mark,
"second occurrence", event.start_mark)
self.descend_resolver(parent, index)
if self.check_event(ScalarEvent):
node = self.compose_scalar_node(anchor)
elif self.check_event(SequenceStartEvent):
node = self.compose_sequence_node(anchor)
elif self.check_event(MappingStartEvent):
node = self.compose_mapping_node(anchor)
self.ascend_resolver()
return node
def compose_scalar_node(self, anchor):
event = self.get_event()
tag = event.tag
if tag is None or tag == '!':
tag = self.resolve(ScalarNode, event.value, event.implicit)
node = ScalarNode(tag, event.value,
event.start_mark, event.end_mark, style=event.style)
if anchor is not None:
self.anchors[anchor] = node
return node
def compose_sequence_node(self, anchor):
start_event = self.get_event()
tag = start_event.tag
if tag is None or tag == '!':
tag = self.resolve(SequenceNode, None, start_event.implicit)
node = SequenceNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style)
if anchor is not None:
self.anchors[anchor] = node
index = 0
while not self.check_event(SequenceEndEvent):
node.value.append(self.compose_node(node, index))
index += 1
end_event = self.get_event()
node.end_mark = end_event.end_mark
return node
def compose_mapping_node(self, anchor):
start_event = self.get_event()
tag = start_event.tag
if tag is None or tag == '!':
tag = self.resolve(MappingNode, None, start_event.implicit)
node = MappingNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style)
if anchor is not None:
self.anchors[anchor] = node
while not self.check_event(MappingEndEvent):
#key_event = self.peek_event()
item_key = self.compose_node(node, None)
#if item_key in node.value:
# raise ComposerError("while composing a mapping", start_event.start_mark,
# "found duplicate key", key_event.start_mark)
item_value = self.compose_node(node, item_key)
#node.value[item_key] = item_value
node.value.append((item_key, item_value))
end_event = self.get_event()
node.end_mark = end_event.end_mark
return node
```
|
===========================================================================================================
SOURCE CODE FILE: constructor.py
LINES: 1
SIZE: 27.97 KB
PATH: scripts\freecad_env\Lib\site-packages\yaml\constructor.py
ENCODING: utf-8
```py
__all__ = [
'BaseConstructor',
'SafeConstructor',
'FullConstructor',
'UnsafeConstructor',
'Constructor',
'ConstructorError'
]
from .error import *
from .nodes import *
import collections.abc, datetime, base64, binascii, re, sys, types
class ConstructorError(MarkedYAMLError):
pass
class BaseConstructor:
yaml_constructors = {}
yaml_multi_constructors = {}
def __init__(self):
self.constructed_objects = {}
self.recursive_objects = {}
self.state_generators = []
self.deep_construct = False
def check_data(self):
# If there are more documents available?
return self.check_node()
def check_state_key(self, key):
"""Block special attributes/methods from being set in a newly created
object, to prevent user-controlled methods from being called during
deserialization"""
if self.get_state_keys_blacklist_regexp().match(key):
raise ConstructorError(None, None,
"blacklisted key '%s' in instance state found" % (key,), None)
def get_data(self):
# Construct and return the next document.
if self.check_node():
return self.construct_document(self.get_node())
def get_single_data(self):
# Ensure that the stream contains a single document and construct it.
node = self.get_single_node()
if node is not None:
return self.construct_document(node)
return None
def construct_document(self, node):
data = self.construct_object(node)
while self.state_generators:
state_generators = self.state_generators
self.state_generators = []
for generator in state_generators:
for dummy in generator:
pass
self.constructed_objects = {}
self.recursive_objects = {}
self.deep_construct = False
return data
def construct_object(self, node, deep=False):
if node in self.constructed_objects:
return self.constructed_objects[node]
if deep:
old_deep = self.deep_construct
self.deep_construct = True
if node in self.recursive_objects:
raise ConstructorError(None, None,
"found unconstructable recursive node", node.start_mark)
self.recursive_objects[node] = None
constructor = None
tag_suffix = None
if node.tag in self.yaml_constructors:
constructor = self.yaml_constructors[node.tag]
else:
for tag_prefix in self.yaml_multi_constructors:
if tag_prefix is not None and node.tag.startswith(tag_prefix):
tag_suffix = node.tag[len(tag_prefix):]
constructor = self.yaml_multi_constructors[tag_prefix]
break
else:
if None in self.yaml_multi_constructors:
tag_suffix = node.tag
constructor = self.yaml_multi_constructors[None]
elif None in self.yaml_constructors:
constructor = self.yaml_constructors[None]
elif isinstance(node, ScalarNode):
constructor = self.__class__.construct_scalar
elif isinstance(node, SequenceNode):
constructor = self.__class__.construct_sequence
elif isinstance(node, MappingNode):
constructor = self.__class__.construct_mapping
if tag_suffix is None:
data = constructor(self, node)
else:
data = constructor(self, tag_suffix, node)
if isinstance(data, types.GeneratorType):
generator = data
data = next(generator)
if self.deep_construct:
for dummy in generator:
pass
else:
self.state_generators.append(generator)
self.constructed_objects[node] = data
del self.recursive_objects[node]
if deep:
self.deep_construct = old_deep
return data
def construct_scalar(self, node):
if not isinstance(node, ScalarNode):
raise ConstructorError(None, None,
"expected a scalar node, but found %s" % node.id,
node.start_mark)
return node.value
def construct_sequence(self, node, deep=False):
if not isinstance(node, SequenceNode):
raise ConstructorError(None, None,
"expected a sequence node, but found %s" % node.id,
node.start_mark)
return [self.construct_object(child, deep=deep)
for child in node.value]
def construct_mapping(self, node, deep=False):
if not isinstance(node, MappingNode):
raise ConstructorError(None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
mapping = {}
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
if not isinstance(key, collections.abc.Hashable):
raise ConstructorError("while constructing a mapping", node.start_mark,
"found unhashable key", key_node.start_mark)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
def construct_pairs(self, node, deep=False):
if not isinstance(node, MappingNode):
raise ConstructorError(None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
pairs = []
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
value = self.construct_object(value_node, deep=deep)
pairs.append((key, value))
return pairs
@classmethod
def add_constructor(cls, tag, constructor):
if not 'yaml_constructors' in cls.__dict__:
cls.yaml_constructors = cls.yaml_constructors.copy()
cls.yaml_constructors[tag] = constructor
@classmethod
def add_multi_constructor(cls, tag_prefix, multi_constructor):
if not 'yaml_multi_constructors' in cls.__dict__:
cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
cls.yaml_multi_constructors[tag_prefix] = multi_constructor
class SafeConstructor(BaseConstructor):
def construct_scalar(self, node):
if isinstance(node, MappingNode):
for key_node, value_node in node.value:
if key_node.tag == 'tag:yaml.org,2002:value':
return self.construct_scalar(value_node)
return super().construct_scalar(node)
def flatten_mapping(self, node):
merge = []
index = 0
while index < len(node.value):
key_node, value_node = node.value[index]
if key_node.tag == 'tag:yaml.org,2002:merge':
del node.value[index]
if isinstance(value_node, MappingNode):
self.flatten_mapping(value_node)
merge.extend(value_node.value)
elif isinstance(value_node, SequenceNode):
submerge = []
for subnode in value_node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError("while constructing a mapping",
node.start_mark,
"expected a mapping for merging, but found %s"
% subnode.id, subnode.start_mark)
self.flatten_mapping(subnode)
submerge.append(subnode.value)
submerge.reverse()
for value in submerge:
merge.extend(value)
else:
raise ConstructorError("while constructing a mapping", node.start_mark,
"expected a mapping or list of mappings for merging, but found %s"
% value_node.id, value_node.start_mark)
elif key_node.tag == 'tag:yaml.org,2002:value':
key_node.tag = 'tag:yaml.org,2002:str'
index += 1
else:
index += 1
if merge:
node.value = merge + node.value
def construct_mapping(self, node, deep=False):
if isinstance(node, MappingNode):
self.flatten_mapping(node)
return super().construct_mapping(node, deep=deep)
def construct_yaml_null(self, node):
self.construct_scalar(node)
return None
bool_values = {
'yes': True,
'no': False,
'true': True,
'false': False,
'on': True,
'off': False,
}
def construct_yaml_bool(self, node):
value = self.construct_scalar(node)
return self.bool_values[value.lower()]
def construct_yaml_int(self, node):
value = self.construct_scalar(node)
value = value.replace('_', '')
sign = +1
if value[0] == '-':
sign = -1
if value[0] in '+-':
value = value[1:]
if value == '0':
return 0
elif value.startswith('0b'):
return sign*int(value[2:], 2)
elif value.startswith('0x'):
return sign*int(value[2:], 16)
elif value[0] == '0':
return sign*int(value, 8)
elif ':' in value:
digits = [int(part) for part in value.split(':')]
digits.reverse()
base = 1
value = 0
for digit in digits:
value += digit*base
base *= 60
return sign*value
else:
return sign*int(value)
inf_value = 1e300
while inf_value != inf_value*inf_value:
inf_value *= inf_value
nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
def construct_yaml_float(self, node):
value = self.construct_scalar(node)
value = value.replace('_', '').lower()
sign = +1
if value[0] == '-':
sign = -1
if value[0] in '+-':
value = value[1:]
if value == '.inf':
return sign*self.inf_value
elif value == '.nan':
return self.nan_value
elif ':' in value:
digits = [float(part) for part in value.split(':')]
digits.reverse()
base = 1
value = 0.0
for digit in digits:
value += digit*base
base *= 60
return sign*value
else:
return sign*float(value)
def construct_yaml_binary(self, node):
try:
value = self.construct_scalar(node).encode('ascii')
except UnicodeEncodeError as exc:
raise ConstructorError(None, None,
"failed to convert base64 data into ascii: %s" % exc,
node.start_mark)
try:
if hasattr(base64, 'decodebytes'):
return base64.decodebytes(value)
else:
return base64.decodestring(value)
except binascii.Error as exc:
raise ConstructorError(None, None,
"failed to decode base64 data: %s" % exc, node.start_mark)
timestamp_regexp = re.compile(
r'''^(?P<year>[0-9][0-9][0-9][0-9])
-(?P<month>[0-9][0-9]?)
-(?P<day>[0-9][0-9]?)
(?:(?:[Tt]|[ \t]+)
(?P<hour>[0-9][0-9]?)
:(?P<minute>[0-9][0-9])
:(?P<second>[0-9][0-9])
(?:\.(?P<fraction>[0-9]*))?
(?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
(?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
def construct_yaml_timestamp(self, node):
value = self.construct_scalar(node)
match = self.timestamp_regexp.match(node.value)
values = match.groupdict()
year = int(values['year'])
month = int(values['month'])
day = int(values['day'])
if not values['hour']:
return datetime.date(year, month, day)
hour = int(values['hour'])
minute = int(values['minute'])
second = int(values['second'])
fraction = 0
tzinfo = None
if values['fraction']:
fraction = values['fraction'][:6]
while len(fraction) < 6:
fraction += '0'
fraction = int(fraction)
if values['tz_sign']:
tz_hour = int(values['tz_hour'])
tz_minute = int(values['tz_minute'] or 0)
delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
if values['tz_sign'] == '-':
delta = -delta
tzinfo = datetime.timezone(delta)
elif values['tz']:
tzinfo = datetime.timezone.utc
return datetime.datetime(year, month, day, hour, minute, second, fraction,
tzinfo=tzinfo)
def construct_yaml_omap(self, node):
# Note: we do not check for duplicate keys, because it's too
# CPU-expensive.
omap = []
yield omap
if not isinstance(node, SequenceNode):
raise ConstructorError("while constructing an ordered map", node.start_mark,
"expected a sequence, but found %s" % node.id, node.start_mark)
for subnode in node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError("while constructing an ordered map", node.start_mark,
"expected a mapping of length 1, but found %s" % subnode.id,
subnode.start_mark)
if len(subnode.value) != 1:
raise ConstructorError("while constructing an ordered map", node.start_mark,
"expected a single mapping item, but found %d items" % len(subnode.value),
subnode.start_mark)
key_node, value_node = subnode.value[0]
key = self.construct_object(key_node)
value = self.construct_object(value_node)
omap.append((key, value))
def construct_yaml_pairs(self, node):
# Note: the same code as `construct_yaml_omap`.
pairs = []
yield pairs
if not isinstance(node, SequenceNode):
raise ConstructorError("while constructing pairs", node.start_mark,
"expected a sequence, but found %s" % node.id, node.start_mark)
for subnode in node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError("while constructing pairs", node.start_mark,
"expected a mapping of length 1, but found %s" % subnode.id,
subnode.start_mark)
if len(subnode.value) != 1:
raise ConstructorError("while constructing pairs", node.start_mark,
"expected a single mapping item, but found %d items" % len(subnode.value),
subnode.start_mark)
key_node, value_node = subnode.value[0]
key = self.construct_object(key_node)
value = self.construct_object(value_node)
pairs.append((key, value))
def construct_yaml_set(self, node):
data = set()
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_yaml_str(self, node):
return self.construct_scalar(node)
def construct_yaml_seq(self, node):
data = []
yield data
data.extend(self.construct_sequence(node))
def construct_yaml_map(self, node):
data = {}
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_yaml_object(self, node, cls):
data = cls.__new__(cls)
yield data
if hasattr(data, '__setstate__'):
state = self.construct_mapping(node, deep=True)
data.__setstate__(state)
else:
state = self.construct_mapping(node)
data.__dict__.update(state)
def construct_undefined(self, node):
raise ConstructorError(None, None,
"could not determine a constructor for the tag %r" % node.tag,
node.start_mark)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:null',
SafeConstructor.construct_yaml_null)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:bool',
SafeConstructor.construct_yaml_bool)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:int',
SafeConstructor.construct_yaml_int)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:float',
SafeConstructor.construct_yaml_float)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:binary',
SafeConstructor.construct_yaml_binary)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:timestamp',
SafeConstructor.construct_yaml_timestamp)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:omap',
SafeConstructor.construct_yaml_omap)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:pairs',
SafeConstructor.construct_yaml_pairs)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:set',
SafeConstructor.construct_yaml_set)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:str',
SafeConstructor.construct_yaml_str)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:seq',
SafeConstructor.construct_yaml_seq)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:map',
SafeConstructor.construct_yaml_map)
SafeConstructor.add_constructor(None,
SafeConstructor.construct_undefined)
class FullConstructor(SafeConstructor):
# 'extend' is blacklisted because it is used by
# construct_python_object_apply to add `listitems` to a newly generate
# python instance
def get_state_keys_blacklist(self):
return ['^extend$', '^__.*__$']
def get_state_keys_blacklist_regexp(self):
if not hasattr(self, 'state_keys_blacklist_regexp'):
self.state_keys_blacklist_regexp = re.compile('(' + '|'.join(self.get_state_keys_blacklist()) + ')')
return self.state_keys_blacklist_regexp
def construct_python_str(self, node):
return self.construct_scalar(node)
def construct_python_unicode(self, node):
return self.construct_scalar(node)
def construct_python_bytes(self, node):
try:
value = self.construct_scalar(node).encode('ascii')
except UnicodeEncodeError as exc:
raise ConstructorError(None, None,
"failed to convert base64 data into ascii: %s" % exc,
node.start_mark)
try:
if hasattr(base64, 'decodebytes'):
return base64.decodebytes(value)
else:
return base64.decodestring(value)
except binascii.Error as exc:
raise ConstructorError(None, None,
"failed to decode base64 data: %s" % exc, node.start_mark)
def construct_python_long(self, node):
return self.construct_yaml_int(node)
def construct_python_complex(self, node):
return complex(self.construct_scalar(node))
def construct_python_tuple(self, node):
return tuple(self.construct_sequence(node))
def find_python_module(self, name, mark, unsafe=False):
if not name:
raise ConstructorError("while constructing a Python module", mark,
"expected non-empty name appended to the tag", mark)
if unsafe:
try:
__import__(name)
except ImportError as exc:
raise ConstructorError("while constructing a Python module", mark,
"cannot find module %r (%s)" % (name, exc), mark)
if name not in sys.modules:
raise ConstructorError("while constructing a Python module", mark,
"module %r is not imported" % name, mark)
return sys.modules[name]
def find_python_name(self, name, mark, unsafe=False):
if not name:
raise ConstructorError("while constructing a Python object", mark,
"expected non-empty name appended to the tag", mark)
if '.' in name:
module_name, object_name = name.rsplit('.', 1)
else:
module_name = 'builtins'
object_name = name
if unsafe:
try:
__import__(module_name)
except ImportError as exc:
raise ConstructorError("while constructing a Python object", mark,
"cannot find module %r (%s)" % (module_name, exc), mark)
if module_name not in sys.modules:
raise ConstructorError("while constructing a Python object", mark,
"module %r is not imported" % module_name, mark)
module = sys.modules[module_name]
if not hasattr(module, object_name):
raise ConstructorError("while constructing a Python object", mark,
"cannot find %r in the module %r"
% (object_name, module.__name__), mark)
return getattr(module, object_name)
def construct_python_name(self, suffix, node):
value = self.construct_scalar(node)
if value:
raise ConstructorError("while constructing a Python name", node.start_mark,
"expected the empty value, but found %r" % value, node.start_mark)
return self.find_python_name(suffix, node.start_mark)
def construct_python_module(self, suffix, node):
value = self.construct_scalar(node)
if value:
raise ConstructorError("while constructing a Python module", node.start_mark,
"expected the empty value, but found %r" % value, node.start_mark)
return self.find_python_module(suffix, node.start_mark)
def make_python_instance(self, suffix, node,
args=None, kwds=None, newobj=False, unsafe=False):
if not args:
args = []
if not kwds:
kwds = {}
cls = self.find_python_name(suffix, node.start_mark)
if not (unsafe or isinstance(cls, type)):
raise ConstructorError("while constructing a Python instance", node.start_mark,
"expected a class, but found %r" % type(cls),
node.start_mark)
if newobj and isinstance(cls, type):
return cls.__new__(cls, *args, **kwds)
else:
return cls(*args, **kwds)
def set_python_instance_state(self, instance, state, unsafe=False):
if hasattr(instance, '__setstate__'):
instance.__setstate__(state)
else:
slotstate = {}
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
if hasattr(instance, '__dict__'):
if not unsafe and state:
for key in state.keys():
self.check_state_key(key)
instance.__dict__.update(state)
elif state:
slotstate.update(state)
for key, value in slotstate.items():
if not unsafe:
self.check_state_key(key)
setattr(instance, key, value)
def construct_python_object(self, suffix, node):
# Format:
# !!python/object:module.name { ... state ... }
instance = self.make_python_instance(suffix, node, newobj=True)
yield instance
deep = hasattr(instance, '__setstate__')
state = self.construct_mapping(node, deep=deep)
self.set_python_instance_state(instance, state)
def construct_python_object_apply(self, suffix, node, newobj=False):
# Format:
# !!python/object/apply # (or !!python/object/new)
# args: [ ... arguments ... ]
# kwds: { ... keywords ... }
# state: ... state ...
# listitems: [ ... listitems ... ]
# dictitems: { ... dictitems ... }
# or short format:
# !!python/object/apply [ ... arguments ... ]
# The difference between !!python/object/apply and !!python/object/new
# is how an object is created, check make_python_instance for details.
if isinstance(node, SequenceNode):
args = self.construct_sequence(node, deep=True)
kwds = {}
state = {}
listitems = []
dictitems = {}
else:
value = self.construct_mapping(node, deep=True)
args = value.get('args', [])
kwds = value.get('kwds', {})
state = value.get('state', {})
listitems = value.get('listitems', [])
dictitems = value.get('dictitems', {})
instance = self.make_python_instance(suffix, node, args, kwds, newobj)
if state:
self.set_python_instance_state(instance, state)
if listitems:
instance.extend(listitems)
if dictitems:
for key in dictitems:
instance[key] = dictitems[key]
return instance
def construct_python_object_new(self, suffix, node):
return self.construct_python_object_apply(suffix, node, newobj=True)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/none',
FullConstructor.construct_yaml_null)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/bool',
FullConstructor.construct_yaml_bool)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/str',
FullConstructor.construct_python_str)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/unicode',
FullConstructor.construct_python_unicode)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/bytes',
FullConstructor.construct_python_bytes)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/int',
FullConstructor.construct_yaml_int)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/long',
FullConstructor.construct_python_long)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/float',
FullConstructor.construct_yaml_float)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/complex',
FullConstructor.construct_python_complex)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/list',
FullConstructor.construct_yaml_seq)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/tuple',
FullConstructor.construct_python_tuple)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/dict',
FullConstructor.construct_yaml_map)
FullConstructor.add_multi_constructor(
'tag:yaml.org,2002:python/name:',
FullConstructor.construct_python_name)
class UnsafeConstructor(FullConstructor):
def find_python_module(self, name, mark):
return super(UnsafeConstructor, self).find_python_module(name, mark, unsafe=True)
def find_python_name(self, name, mark):
return super(UnsafeConstructor, self).find_python_name(name, mark, unsafe=True)
def make_python_instance(self, suffix, node, args=None, kwds=None, newobj=False):
return super(UnsafeConstructor, self).make_python_instance(
suffix, node, args, kwds, newobj, unsafe=True)
def set_python_instance_state(self, instance, state):
return super(UnsafeConstructor, self).set_python_instance_state(
instance, state, unsafe=True)
UnsafeConstructor.add_multi_constructor(
'tag:yaml.org,2002:python/module:',
UnsafeConstructor.construct_python_module)
UnsafeConstructor.add_multi_constructor(
'tag:yaml.org,2002:python/object:',
UnsafeConstructor.construct_python_object)
UnsafeConstructor.add_multi_constructor(
'tag:yaml.org,2002:python/object/new:',
UnsafeConstructor.construct_python_object_new)
UnsafeConstructor.add_multi_constructor(
'tag:yaml.org,2002:python/object/apply:',
UnsafeConstructor.construct_python_object_apply)
# Constructor is same as UnsafeConstructor. Need to leave this in place in case
# people have extended it directly.
class Constructor(UnsafeConstructor):
pass
```
|
=====================================================================================================
SOURCE CODE FILE: cyaml.py
LINES: 1
SIZE: 3.76 KB
PATH: scripts\freecad_env\Lib\site-packages\yaml\cyaml.py
ENCODING: utf-8
```py
__all__ = [
'CBaseLoader', 'CSafeLoader', 'CFullLoader', 'CUnsafeLoader', 'CLoader',
'CBaseDumper', 'CSafeDumper', 'CDumper'
]
from yaml._yaml import CParser, CEmitter
from .constructor import *
from .serializer import *
from .representer import *
from .resolver import *
class CBaseLoader(CParser, BaseConstructor, BaseResolver):
def __init__(self, stream):
CParser.__init__(self, stream)
BaseConstructor.__init__(self)
BaseResolver.__init__(self)
class CSafeLoader(CParser, SafeConstructor, Resolver):
def __init__(self, stream):
CParser.__init__(self, stream)
SafeConstructor.__init__(self)
Resolver.__init__(self)
class CFullLoader(CParser, FullConstructor, Resolver):
def __init__(self, stream):
CParser.__init__(self, stream)
FullConstructor.__init__(self)
Resolver.__init__(self)
class CUnsafeLoader(CParser, UnsafeConstructor, Resolver):
def __init__(self, stream):
CParser.__init__(self, stream)
UnsafeConstructor.__init__(self)
Resolver.__init__(self)
class CLoader(CParser, Constructor, Resolver):
def __init__(self, stream):
CParser.__init__(self, stream)
Constructor.__init__(self)
Resolver.__init__(self)
class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
def __init__(self, stream,
default_style=None, default_flow_style=False,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None, sort_keys=True):
CEmitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width, encoding=encoding,
allow_unicode=allow_unicode, line_break=line_break,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style, sort_keys=sort_keys)
Resolver.__init__(self)
class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=False,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None, sort_keys=True):
CEmitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width, encoding=encoding,
allow_unicode=allow_unicode, line_break=line_break,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
SafeRepresenter.__init__(self, default_style=default_style,
default_flow_style=default_flow_style, sort_keys=sort_keys)
Resolver.__init__(self)
class CDumper(CEmitter, Serializer, Representer, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=False,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None, sort_keys=True):
CEmitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width, encoding=encoding,
allow_unicode=allow_unicode, line_break=line_break,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style, sort_keys=sort_keys)
Resolver.__init__(self)
```
|
======================================================================================================
SOURCE CODE FILE: dumper.py
LINES: 1
SIZE: 2.77 KB
PATH: scripts\freecad_env\Lib\site-packages\yaml\dumper.py
ENCODING: utf-8
```py
__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
from .emitter import *
from .serializer import *
from .representer import *
from .resolver import *
class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
def __init__(self, stream,
default_style=None, default_flow_style=False,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None, sort_keys=True):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style, sort_keys=sort_keys)
Resolver.__init__(self)
class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=False,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None, sort_keys=True):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
SafeRepresenter.__init__(self, default_style=default_style,
default_flow_style=default_flow_style, sort_keys=sort_keys)
Resolver.__init__(self)
class Dumper(Emitter, Serializer, Representer, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=False,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None, sort_keys=True):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style, sort_keys=sort_keys)
Resolver.__init__(self)
```
|
=======================================================================================================
SOURCE CODE FILE: emitter.py
LINES: 32
SIZE: 42.00 KB
PATH: scripts\freecad_env\Lib\site-packages\yaml\emitter.py
ENCODING: utf-8
```py
# Emitter expects events obeying the following grammar:
# stream ::= STREAM-START document* STREAM-END
# document ::= DOCUMENT-START node DOCUMENT-END
# node ::= SCALAR | sequence | mapping
# sequence ::= SEQUENCE-START node* SEQUENCE-END
# mapping ::= MAPPING-START (node node)* MAPPING-END
__all__ = ['Emitter', 'EmitterError']
from .error import YAMLError
from .events import *
class EmitterError(YAMLError):
pass
class ScalarAnalysis:
def __init__(self, scalar, empty, multiline,
allow_flow_plain, allow_block_plain,
allow_single_quoted, allow_double_quoted,
allow_block):
self.scalar = scalar
self.empty = empty
self.multiline = multiline
self.allow_flow_plain = allow_flow_plain
self.allow_block_plain = allow_block_plain
self.allow_single_quoted = allow_single_quoted
self.allow_double_quoted = allow_double_quoted
self.allow_block = allow_block
class Emitter:
DEFAULT_TAG_PREFIXES = {
'!' : '!',
'tag:yaml.org,2002:' : '!!',
}
def __init__(self, stream, canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None):
# The stream should have the methods `write` and possibly `flush`.
self.stream = stream
# Encoding can be overridden by STREAM-START.
self.encoding = None
# Emitter is a state machine with a stack of states to handle nested
# structures.
self.states = []
self.state = self.expect_stream_start
# Current event and the event queue.
self.events = []
self.event = None
# The current indentation level and the stack of previous indents.
self.indents = []
self.indent = None
# Flow level.
self.flow_level = 0
# Contexts.
self.root_context = False
self.sequence_context = False
self.mapping_context = False
self.simple_key_context = False
# Characteristics of the last emitted character:
# - current position.
# - is it a whitespace?
# - is it an indention character
# (indentation space, '-', '?', or ':')?
self.line = 0
self.column = 0
self.whitespace = True
self.indention = True
# Whether the document requires an explicit document indicator
self.open_ended = False
# Formatting details.
self.canonical = canonical
self.allow_unicode = allow_unicode
self.best_indent = 2
if indent and 1 < indent < 10:
self.best_indent = indent
self.best_width = 80
if width and width > self.best_indent*2:
self.best_width = width
self.best_line_break = '\n'
if line_break in ['\r', '\n', '\r\n']:
self.best_line_break = line_break
# Tag prefixes.
self.tag_prefixes = None
# Prepared anchor and tag.
self.prepared_anchor = None
self.prepared_tag = None
# Scalar analysis and style.
self.analysis = None
self.style = None
def dispose(self):
# Reset the state attributes (to clear self-references)
self.states = []
self.state = None
def emit(self, event):
self.events.append(event)
while not self.need_more_events():
self.event = self.events.pop(0)
self.state()
self.event = None
# In some cases, we wait for a few next events before emitting.
def need_more_events(self):
if not self.events:
return True
event = self.events[0]
if isinstance(event, DocumentStartEvent):
return self.need_events(1)
elif isinstance(event, SequenceStartEvent):
return self.need_events(2)
elif isinstance(event, MappingStartEvent):
return self.need_events(3)
else:
return False
def need_events(self, count):
level = 0
for event in self.events[1:]:
if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
level += 1
elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
level -= 1
elif isinstance(event, StreamEndEvent):
level = -1
if level < 0:
return False
return (len(self.events) < count+1)
def increase_indent(self, flow=False, indentless=False):
self.indents.append(self.indent)
if self.indent is None:
if flow:
self.indent = self.best_indent
else:
self.indent = 0
elif not indentless:
self.indent += self.best_indent
# States.
# Stream handlers.
def expect_stream_start(self):
if isinstance(self.event, StreamStartEvent):
if self.event.encoding and not hasattr(self.stream, 'encoding'):
self.encoding = self.event.encoding
self.write_stream_start()
self.state = self.expect_first_document_start
else:
raise EmitterError("expected StreamStartEvent, but got %s"
% self.event)
def expect_nothing(self):
raise EmitterError("expected nothing, but got %s" % self.event)
# Document handlers.
def expect_first_document_start(self):
return self.expect_document_start(first=True)
def expect_document_start(self, first=False):
if isinstance(self.event, DocumentStartEvent):
if (self.event.version or self.event.tags) and self.open_ended:
self.write_indicator('...', True)
self.write_indent()
if self.event.version:
version_text = self.prepare_version(self.event.version)
self.write_version_directive(version_text)
self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
if self.event.tags:
handles = sorted(self.event.tags.keys())
for handle in handles:
prefix = self.event.tags[handle]
self.tag_prefixes[prefix] = handle
handle_text = self.prepare_tag_handle(handle)
prefix_text = self.prepare_tag_prefix(prefix)
self.write_tag_directive(handle_text, prefix_text)
implicit = (first and not self.event.explicit and not self.canonical
and not self.event.version and not self.event.tags
and not self.check_empty_document())
if not implicit:
self.write_indent()
self.write_indicator('---', True)
if self.canonical:
self.write_indent()
self.state = self.expect_document_root
elif isinstance(self.event, StreamEndEvent):
if self.open_ended:
self.write_indicator('...', True)
self.write_indent()
self.write_stream_end()
self.state = self.expect_nothing
else:
raise EmitterError("expected DocumentStartEvent, but got %s"
% self.event)
def expect_document_end(self):
if isinstance(self.event, DocumentEndEvent):
self.write_indent()
if self.event.explicit:
self.write_indicator('...', True)
self.write_indent()
self.flush_stream()
self.state = self.expect_document_start
else:
raise EmitterError("expected DocumentEndEvent, but got %s"
% self.event)
def expect_document_root(self):
self.states.append(self.expect_document_end)
self.expect_node(root=True)
# Node handlers.
def expect_node(self, root=False, sequence=False, mapping=False,
simple_key=False):
self.root_context = root
self.sequence_context = sequence
self.mapping_context = mapping
self.simple_key_context = simple_key
if isinstance(self.event, AliasEvent):
self.expect_alias()
elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
self.process_anchor('&')
self.process_tag()
if isinstance(self.event, ScalarEvent):
self.expect_scalar()
elif isinstance(self.event, SequenceStartEvent):
if self.flow_level or self.canonical or self.event.flow_style \
or self.check_empty_sequence():
self.expect_flow_sequence()
else:
self.expect_block_sequence()
elif isinstance(self.event, MappingStartEvent):
if self.flow_level or self.canonical or self.event.flow_style \
or self.check_empty_mapping():
self.expect_flow_mapping()
else:
self.expect_block_mapping()
else:
raise EmitterError("expected NodeEvent, but got %s" % self.event)
def expect_alias(self):
if self.event.anchor is None:
raise EmitterError("anchor is not specified for alias")
self.process_anchor('*')
self.state = self.states.pop()
def expect_scalar(self):
self.increase_indent(flow=True)
self.process_scalar()
self.indent = self.indents.pop()
self.state = self.states.pop()
# Flow sequence handlers.
def expect_flow_sequence(self):
self.write_indicator('[', True, whitespace=True)
self.flow_level += 1
self.increase_indent(flow=True)
self.state = self.expect_first_flow_sequence_item
def expect_first_flow_sequence_item(self):
if isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
self.write_indicator(']', False)
self.state = self.states.pop()
else:
if self.canonical or self.column > self.best_width:
self.write_indent()
self.states.append(self.expect_flow_sequence_item)
self.expect_node(sequence=True)
def expect_flow_sequence_item(self):
if isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
if self.canonical:
self.write_indicator(',', False)
self.write_indent()
self.write_indicator(']', False)
self.state = self.states.pop()
else:
self.write_indicator(',', False)
if self.canonical or self.column > self.best_width:
self.write_indent()
self.states.append(self.expect_flow_sequence_item)
self.expect_node(sequence=True)
# Flow mapping handlers.
def expect_flow_mapping(self):
self.write_indicator('{', True, whitespace=True)
self.flow_level += 1
self.increase_indent(flow=True)
self.state = self.expect_first_flow_mapping_key
def expect_first_flow_mapping_key(self):
if isinstance(self.event, MappingEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
self.write_indicator('}', False)
self.state = self.states.pop()
else:
if self.canonical or self.column > self.best_width:
self.write_indent()
if not self.canonical and self.check_simple_key():
self.states.append(self.expect_flow_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator('?', True)
self.states.append(self.expect_flow_mapping_value)
self.expect_node(mapping=True)
def expect_flow_mapping_key(self):
if isinstance(self.event, MappingEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
if self.canonical:
self.write_indicator(',', False)
self.write_indent()
self.write_indicator('}', False)
self.state = self.states.pop()
else:
self.write_indicator(',', False)
if self.canonical or self.column > self.best_width:
self.write_indent()
if not self.canonical and self.check_simple_key():
self.states.append(self.expect_flow_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator('?', True)
self.states.append(self.expect_flow_mapping_value)
self.expect_node(mapping=True)
def expect_flow_mapping_simple_value(self):
self.write_indicator(':', False)
self.states.append(self.expect_flow_mapping_key)
self.expect_node(mapping=True)
def expect_flow_mapping_value(self):
if self.canonical or self.column > self.best_width:
self.write_indent()
self.write_indicator(':', True)
self.states.append(self.expect_flow_mapping_key)
self.expect_node(mapping=True)
# Block sequence handlers.
def expect_block_sequence(self):
indentless = (self.mapping_context and not self.indention)
self.increase_indent(flow=False, indentless=indentless)
self.state = self.expect_first_block_sequence_item
def expect_first_block_sequence_item(self):
return self.expect_block_sequence_item(first=True)
def expect_block_sequence_item(self, first=False):
if not first and isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
self.state = self.states.pop()
else:
self.write_indent()
self.write_indicator('-', True, indention=True)
self.states.append(self.expect_block_sequence_item)
self.expect_node(sequence=True)
# Block mapping handlers.
def expect_block_mapping(self):
self.increase_indent(flow=False)
self.state = self.expect_first_block_mapping_key
def expect_first_block_mapping_key(self):
return self.expect_block_mapping_key(first=True)
def expect_block_mapping_key(self, first=False):
if not first and isinstance(self.event, MappingEndEvent):
self.indent = self.indents.pop()
self.state = self.states.pop()
else:
self.write_indent()
if self.check_simple_key():
self.states.append(self.expect_block_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator('?', True, indention=True)
self.states.append(self.expect_block_mapping_value)
self.expect_node(mapping=True)
def expect_block_mapping_simple_value(self):
self.write_indicator(':', False)
self.states.append(self.expect_block_mapping_key)
self.expect_node(mapping=True)
def expect_block_mapping_value(self):
self.write_indent()
self.write_indicator(':', True, indention=True)
self.states.append(self.expect_block_mapping_key)
self.expect_node(mapping=True)
# Checkers.
def check_empty_sequence(self):
return (isinstance(self.event, SequenceStartEvent) and self.events
and isinstance(self.events[0], SequenceEndEvent))
def check_empty_mapping(self):
return (isinstance(self.event, MappingStartEvent) and self.events
and isinstance(self.events[0], MappingEndEvent))
def check_empty_document(self):
if not isinstance(self.event, DocumentStartEvent) or not self.events:
return False
event = self.events[0]
return (isinstance(event, ScalarEvent) and event.anchor is None
and event.tag is None and event.implicit and event.value == '')
def check_simple_key(self):
length = 0
if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
if self.prepared_anchor is None:
self.prepared_anchor = self.prepare_anchor(self.event.anchor)
length += len(self.prepared_anchor)
if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
and self.event.tag is not None:
if self.prepared_tag is None:
self.prepared_tag = self.prepare_tag(self.event.tag)
length += len(self.prepared_tag)
if isinstance(self.event, ScalarEvent):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
length += len(self.analysis.scalar)
return (length < 128 and (isinstance(self.event, AliasEvent)
or (isinstance(self.event, ScalarEvent)
and not self.analysis.empty and not self.analysis.multiline)
or self.check_empty_sequence() or self.check_empty_mapping()))
# Anchor, Tag, and Scalar processors.
def process_anchor(self, indicator):
if self.event.anchor is None:
self.prepared_anchor = None
return
if self.prepared_anchor is None:
self.prepared_anchor = self.prepare_anchor(self.event.anchor)
if self.prepared_anchor:
self.write_indicator(indicator+self.prepared_anchor, True)
self.prepared_anchor = None
def process_tag(self):
tag = self.event.tag
if isinstance(self.event, ScalarEvent):
if self.style is None:
self.style = self.choose_scalar_style()
if ((not self.canonical or tag is None) and
((self.style == '' and self.event.implicit[0])
or (self.style != '' and self.event.implicit[1]))):
self.prepared_tag = None
return
if self.event.implicit[0] and tag is None:
tag = '!'
self.prepared_tag = None
else:
if (not self.canonical or tag is None) and self.event.implicit:
self.prepared_tag = None
return
if tag is None:
raise EmitterError("tag is not specified")
if self.prepared_tag is None:
self.prepared_tag = self.prepare_tag(tag)
if self.prepared_tag:
self.write_indicator(self.prepared_tag, True)
self.prepared_tag = None
def choose_scalar_style(self):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
if self.event.style == '"' or self.canonical:
return '"'
if not self.event.style and self.event.implicit[0]:
if (not (self.simple_key_context and
(self.analysis.empty or self.analysis.multiline))
and (self.flow_level and self.analysis.allow_flow_plain
or (not self.flow_level and self.analysis.allow_block_plain))):
return ''
if self.event.style and self.event.style in '|>':
if (not self.flow_level and not self.simple_key_context
and self.analysis.allow_block):
return self.event.style
if not self.event.style or self.event.style == '\'':
if (self.analysis.allow_single_quoted and
not (self.simple_key_context and self.analysis.multiline)):
return '\''
return '"'
def process_scalar(self):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
if self.style is None:
self.style = self.choose_scalar_style()
split = (not self.simple_key_context)
#if self.analysis.multiline and split \
# and (not self.style or self.style in '\'\"'):
# self.write_indent()
if self.style == '"':
self.write_double_quoted(self.analysis.scalar, split)
elif self.style == '\'':
self.write_single_quoted(self.analysis.scalar, split)
elif self.style == '>':
self.write_folded(self.analysis.scalar)
elif self.style == '|':
self.write_literal(self.analysis.scalar)
else:
self.write_plain(self.analysis.scalar, split)
self.analysis = None
self.style = None
# Analyzers.
def prepare_version(self, version):
major, minor = version
if major != 1:
raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
return '%d.%d' % (major, minor)
def prepare_tag_handle(self, handle):
if not handle:
raise EmitterError("tag handle must not be empty")
if handle[0] != '!' or handle[-1] != '!':
raise EmitterError("tag handle must start and end with '!': %r" % handle)
for ch in handle[1:-1]:
if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-_'):
raise EmitterError("invalid character %r in the tag handle: %r"
% (ch, handle))
return handle
def prepare_tag_prefix(self, prefix):
if not prefix:
raise EmitterError("tag prefix must not be empty")
chunks = []
start = end = 0
if prefix[0] == '!':
end = 1
while end < len(prefix):
ch = prefix[end]
if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-;/?!:@&=+$,_.~*\'()[]':
end += 1
else:
if start < end:
chunks.append(prefix[start:end])
start = end = end+1
data = ch.encode('utf-8')
for ch in data:
chunks.append('%%%02X' % ord(ch))
if start < end:
chunks.append(prefix[start:end])
return ''.join(chunks)
def prepare_tag(self, tag):
if not tag:
raise EmitterError("tag must not be empty")
if tag == '!':
return tag
handle = None
suffix = tag
prefixes = sorted(self.tag_prefixes.keys())
for prefix in prefixes:
if tag.startswith(prefix) \
and (prefix == '!' or len(prefix) < len(tag)):
handle = self.tag_prefixes[prefix]
suffix = tag[len(prefix):]
chunks = []
start = end = 0
while end < len(suffix):
ch = suffix[end]
if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-;/?:@&=+$,_.~*\'()[]' \
or (ch == '!' and handle != '!'):
end += 1
else:
if start < end:
chunks.append(suffix[start:end])
start = end = end+1
data = ch.encode('utf-8')
for ch in data:
chunks.append('%%%02X' % ch)
if start < end:
chunks.append(suffix[start:end])
suffix_text = ''.join(chunks)
if handle:
return '%s%s' % (handle, suffix_text)
else:
return '!<%s>' % suffix_text
def prepare_anchor(self, anchor):
if not anchor:
raise EmitterError("anchor must not be empty")
for ch in anchor:
if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-_'):
raise EmitterError("invalid character %r in the anchor: %r"
% (ch, anchor))
return anchor
def analyze_scalar(self, scalar):
# Empty scalar is a special case.
if not scalar:
return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
allow_flow_plain=False, allow_block_plain=True,
allow_single_quoted=True, allow_double_quoted=True,
allow_block=False)
# Indicators and special characters.
block_indicators = False
flow_indicators = False
line_breaks = False
special_characters = False
# Important whitespace combinations.
leading_space = False
leading_break = False
trailing_space = False
trailing_break = False
break_space = False
space_break = False
# Check document indicators.
if scalar.startswith('---') or scalar.startswith('...'):
block_indicators = True
flow_indicators = True
# First character or preceded by a whitespace.
preceded_by_whitespace = True
# Last character or followed by a whitespace.
followed_by_whitespace = (len(scalar) == 1 or
scalar[1] in '\0 \t\r\n\x85\u2028\u2029')
# The previous character is a space.
previous_space = False
# The previous character is a break.
previous_break = False
index = 0
while index < len(scalar):
ch = scalar[index]
# Check for indicators.
if index == 0:
# Leading indicators are special characters.
if ch in '#,[]{}&*!|>\'\"%@`':
flow_indicators = True
block_indicators = True
if ch in '?:':
flow_indicators = True
if followed_by_whitespace:
block_indicators = True
if ch == '-' and followed_by_whitespace:
flow_indicators = True
block_indicators = True
else:
# Some indicators cannot appear within a scalar as well.
if ch in ',?[]{}':
flow_indicators = True
if ch == ':':
flow_indicators = True
if followed_by_whitespace:
block_indicators = True
if ch == '#' and preceded_by_whitespace:
flow_indicators = True
block_indicators = True
# Check for line breaks, special, and unicode characters.
if ch in '\n\x85\u2028\u2029':
line_breaks = True
if not (ch == '\n' or '\x20' <= ch <= '\x7E'):
if (ch == '\x85' or '\xA0' <= ch <= '\uD7FF'
or '\uE000' <= ch <= '\uFFFD'
or '\U00010000' <= ch < '\U0010ffff') and ch != '\uFEFF':
unicode_characters = True
if not self.allow_unicode:
special_characters = True
else:
special_characters = True
# Detect important whitespace combinations.
if ch == ' ':
if index == 0:
leading_space = True
if index == len(scalar)-1:
trailing_space = True
if previous_break:
break_space = True
previous_space = True
previous_break = False
elif ch in '\n\x85\u2028\u2029':
if index == 0:
leading_break = True
if index == len(scalar)-1:
trailing_break = True
if previous_space:
space_break = True
previous_space = False
previous_break = True
else:
previous_space = False
previous_break = False
# Prepare for the next character.
index += 1
preceded_by_whitespace = (ch in '\0 \t\r\n\x85\u2028\u2029')
followed_by_whitespace = (index+1 >= len(scalar) or
scalar[index+1] in '\0 \t\r\n\x85\u2028\u2029')
# Let's decide what styles are allowed.
allow_flow_plain = True
allow_block_plain = True
allow_single_quoted = True
allow_double_quoted = True
allow_block = True
# Leading and trailing whitespaces are bad for plain scalars.
if (leading_space or leading_break
or trailing_space or trailing_break):
allow_flow_plain = allow_block_plain = False
# We do not permit trailing spaces for block scalars.
if trailing_space:
allow_block = False
# Spaces at the beginning of a new line are only acceptable for block
# scalars.
if break_space:
allow_flow_plain = allow_block_plain = allow_single_quoted = False
# Spaces followed by breaks, as well as special character are only
# allowed for double quoted scalars.
if space_break or special_characters:
allow_flow_plain = allow_block_plain = \
allow_single_quoted = allow_block = False
# Although the plain scalar writer supports breaks, we never emit
# multiline plain scalars.
if line_breaks:
allow_flow_plain = allow_block_plain = False
# Flow indicators are forbidden for flow plain scalars.
if flow_indicators:
allow_flow_plain = False
# Block indicators are forbidden for block plain scalars.
if block_indicators:
allow_block_plain = False
return ScalarAnalysis(scalar=scalar,
empty=False, multiline=line_breaks,
allow_flow_plain=allow_flow_plain,
allow_block_plain=allow_block_plain,
allow_single_quoted=allow_single_quoted,
allow_double_quoted=allow_double_quoted,
allow_block=allow_block)
# Writers.
def flush_stream(self):
if hasattr(self.stream, 'flush'):
self.stream.flush()
def write_stream_start(self):
# Write BOM if needed.
if self.encoding and self.encoding.startswith('utf-16'):
self.stream.write('\uFEFF'.encode(self.encoding))
def write_stream_end(self):
self.flush_stream()
def write_indicator(self, indicator, need_whitespace,
whitespace=False, indention=False):
if self.whitespace or not need_whitespace:
data = indicator
else:
data = ' '+indicator
self.whitespace = whitespace
self.indention = self.indention and indention
self.column += len(data)
self.open_ended = False
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
def write_indent(self):
indent = self.indent or 0
if not self.indention or self.column > indent \
or (self.column == indent and not self.whitespace):
self.write_line_break()
if self.column < indent:
self.whitespace = True
data = ' '*(indent-self.column)
self.column = indent
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
def write_line_break(self, data=None):
if data is None:
data = self.best_line_break
self.whitespace = True
self.indention = True
self.line += 1
self.column = 0
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
def write_version_directive(self, version_text):
data = '%%YAML %s' % version_text
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_line_break()
def write_tag_directive(self, handle_text, prefix_text):
data = '%%TAG %s %s' % (handle_text, prefix_text)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_line_break()
# Scalar streams.
def write_single_quoted(self, text, split=True):
self.write_indicator('\'', True)
spaces = False
breaks = False
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if spaces:
if ch is None or ch != ' ':
if start+1 == end and self.column > self.best_width and split \
and start != 0 and end != len(text):
self.write_indent()
else:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
elif breaks:
if ch is None or ch not in '\n\x85\u2028\u2029':
if text[start] == '\n':
self.write_line_break()
for br in text[start:end]:
if br == '\n':
self.write_line_break()
else:
self.write_line_break(br)
self.write_indent()
start = end
else:
if ch is None or ch in ' \n\x85\u2028\u2029' or ch == '\'':
if start < end:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch == '\'':
data = '\'\''
self.column += 2
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end + 1
if ch is not None:
spaces = (ch == ' ')
breaks = (ch in '\n\x85\u2028\u2029')
end += 1
self.write_indicator('\'', False)
ESCAPE_REPLACEMENTS = {
'\0': '0',
'\x07': 'a',
'\x08': 'b',
'\x09': 't',
'\x0A': 'n',
'\x0B': 'v',
'\x0C': 'f',
'\x0D': 'r',
'\x1B': 'e',
'\"': '\"',
'\\': '\\',
'\x85': 'N',
'\xA0': '_',
'\u2028': 'L',
'\u2029': 'P',
}
def write_double_quoted(self, text, split=True):
self.write_indicator('"', True)
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if ch is None or ch in '"\\\x85\u2028\u2029\uFEFF' \
or not ('\x20' <= ch <= '\x7E'
or (self.allow_unicode
and ('\xA0' <= ch <= '\uD7FF'
or '\uE000' <= ch <= '\uFFFD'))):
if start < end:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch is not None:
if ch in self.ESCAPE_REPLACEMENTS:
data = '\\'+self.ESCAPE_REPLACEMENTS[ch]
elif ch <= '\xFF':
data = '\\x%02X' % ord(ch)
elif ch <= '\uFFFF':
data = '\\u%04X' % ord(ch)
else:
data = '\\U%08X' % ord(ch)
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end+1
if 0 < end < len(text)-1 and (ch == ' ' or start >= end) \
and self.column+(end-start) > self.best_width and split:
data = text[start:end]+'\\'
if start < end:
start = end
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_indent()
self.whitespace = False
self.indention = False
if text[start] == ' ':
data = '\\'
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
end += 1
self.write_indicator('"', False)
def determine_block_hints(self, text):
hints = ''
if text:
if text[0] in ' \n\x85\u2028\u2029':
hints += str(self.best_indent)
if text[-1] not in '\n\x85\u2028\u2029':
hints += '-'
elif len(text) == 1 or text[-2] in '\n\x85\u2028\u2029':
hints += '+'
return hints
def write_folded(self, text):
hints = self.determine_block_hints(text)
self.write_indicator('>'+hints, True)
if hints[-1:] == '+':
self.open_ended = True
self.write_line_break()
leading_space = True
spaces = False
breaks = True
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if breaks:
if ch is None or ch not in '\n\x85\u2028\u2029':
if not leading_space and ch is not None and ch != ' ' \
and text[start] == '\n':
self.write_line_break()
leading_space = (ch == ' ')
for br in text[start:end]:
if br == '\n':
self.write_line_break()
else:
self.write_line_break(br)
if ch is not None:
self.write_indent()
start = end
elif spaces:
if ch != ' ':
if start+1 == end and self.column > self.best_width:
self.write_indent()
else:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
else:
if ch is None or ch in ' \n\x85\u2028\u2029':
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
if ch is None:
self.write_line_break()
start = end
if ch is not None:
breaks = (ch in '\n\x85\u2028\u2029')
spaces = (ch == ' ')
end += 1
def write_literal(self, text):
hints = self.determine_block_hints(text)
self.write_indicator('|'+hints, True)
if hints[-1:] == '+':
self.open_ended = True
self.write_line_break()
breaks = True
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if breaks:
if ch is None or ch not in '\n\x85\u2028\u2029':
for br in text[start:end]:
if br == '\n':
self.write_line_break()
else:
self.write_line_break(br)
if ch is not None:
self.write_indent()
start = end
else:
if ch is None or ch in '\n\x85\u2028\u2029':
data = text[start:end]
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
if ch is None:
self.write_line_break()
start = end
if ch is not None:
breaks = (ch in '\n\x85\u2028\u2029')
end += 1
def write_plain(self, text, split=True):
if self.root_context:
self.open_ended = True
if not text:
return
if not self.whitespace:
data = ' '
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.whitespace = False
self.indention = False
spaces = False
breaks = False
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if spaces:
if ch != ' ':
if start+1 == end and self.column > self.best_width and split:
self.write_indent()
self.whitespace = False
self.indention = False
else:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
elif breaks:
if ch not in '\n\x85\u2028\u2029':
if text[start] == '\n':
self.write_line_break()
for br in text[start:end]:
if br == '\n':
self.write_line_break()
else:
self.write_line_break(br)
self.write_indent()
self.whitespace = False
self.indention = False
start = end
else:
if ch is None or ch in ' \n\x85\u2028\u2029':
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch is not None:
spaces = (ch == ' ')
breaks = (ch in '\n\x85\u2028\u2029')
end += 1
```
|
=====================================================================================================
SOURCE CODE FILE: error.py
LINES: 6
SIZE: 2.47 KB
PATH: scripts\freecad_env\Lib\site-packages\yaml\error.py
ENCODING: utf-8
```py
__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
class Mark:
def __init__(self, name, index, line, column, buffer, pointer):
self.name = name
self.index = index
self.line = line
self.column = column
self.buffer = buffer
self.pointer = pointer
def get_snippet(self, indent=4, max_length=75):
if self.buffer is None:
return None
head = ''
start = self.pointer
while start > 0 and self.buffer[start-1] not in '\0\r\n\x85\u2028\u2029':
start -= 1
if self.pointer-start > max_length/2-1:
head = ' ... '
start += 5
break
tail = ''
end = self.pointer
while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029':
end += 1
if end-self.pointer > max_length/2-1:
tail = ' ... '
end -= 5
break
snippet = self.buffer[start:end]
return ' '*indent + head + snippet + tail + '\n' \
+ ' '*(indent+self.pointer-start+len(head)) + '^'
def __str__(self):
snippet = self.get_snippet()
where = " in \"%s\", line %d, column %d" \
% (self.name, self.line+1, self.column+1)
if snippet is not None:
where += ":\n"+snippet
return where
class YAMLError(Exception):
pass
class MarkedYAMLError(YAMLError):
def __init__(self, context=None, context_mark=None,
problem=None, problem_mark=None, note=None):
self.context = context
self.context_mark = context_mark
self.problem = problem
self.problem_mark = problem_mark
self.note = note
def __str__(self):
lines = []
if self.context is not None:
lines.append(self.context)
if self.context_mark is not None \
and (self.problem is None or self.problem_mark is None
or self.context_mark.name != self.problem_mark.name
or self.context_mark.line != self.problem_mark.line
or self.context_mark.column != self.problem_mark.column):
lines.append(str(self.context_mark))
if self.problem is not None:
lines.append(self.problem)
if self.problem_mark is not None:
lines.append(str(self.problem_mark))
if self.note is not None:
lines.append(self.note)
return '\n'.join(lines)
```
|
======================================================================================================
SOURCE CODE FILE: events.py
LINES: 1
SIZE: 2.39 KB
PATH: scripts\freecad_env\Lib\site-packages\yaml\events.py
ENCODING: utf-8
```py
# Abstract classes.
class Event(object):
def __init__(self, start_mark=None, end_mark=None):
self.start_mark = start_mark
self.end_mark = end_mark
def __repr__(self):
attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
if hasattr(self, key)]
arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
for key in attributes])
return '%s(%s)' % (self.__class__.__name__, arguments)
class NodeEvent(Event):
def __init__(self, anchor, start_mark=None, end_mark=None):
self.anchor = anchor
self.start_mark = start_mark
self.end_mark = end_mark
class CollectionStartEvent(NodeEvent):
def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
flow_style=None):
self.anchor = anchor
self.tag = tag
self.implicit = implicit
self.start_mark = start_mark
self.end_mark = end_mark
self.flow_style = flow_style
class CollectionEndEvent(Event):
pass
# Implementations.
class StreamStartEvent(Event):
def __init__(self, start_mark=None, end_mark=None, encoding=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.encoding = encoding
class StreamEndEvent(Event):
pass
class DocumentStartEvent(Event):
def __init__(self, start_mark=None, end_mark=None,
explicit=None, version=None, tags=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.explicit = explicit
self.version = version
self.tags = tags
class DocumentEndEvent(Event):
def __init__(self, start_mark=None, end_mark=None,
explicit=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.explicit = explicit
class AliasEvent(NodeEvent):
pass
class ScalarEvent(NodeEvent):
def __init__(self, anchor, tag, implicit, value,
start_mark=None, end_mark=None, style=None):
self.anchor = anchor
self.tag = tag
self.implicit = implicit
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
self.style = style
class SequenceStartEvent(CollectionStartEvent):
pass
class SequenceEndEvent(CollectionEndEvent):
pass
class MappingStartEvent(CollectionStartEvent):
pass
class MappingEndEvent(CollectionEndEvent):
pass
```
|
======================================================================================================
SOURCE CODE FILE: loader.py
LINES: 1
SIZE: 2.01 KB
PATH: scripts\freecad_env\Lib\site-packages\yaml\loader.py
ENCODING: utf-8
```py
__all__ = ['BaseLoader', 'FullLoader', 'SafeLoader', 'Loader', 'UnsafeLoader']
from .reader import *
from .scanner import *
from .parser import *
from .composer import *
from .constructor import *
from .resolver import *
class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
def __init__(self, stream):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
BaseConstructor.__init__(self)
BaseResolver.__init__(self)
class FullLoader(Reader, Scanner, Parser, Composer, FullConstructor, Resolver):
def __init__(self, stream):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
FullConstructor.__init__(self)
Resolver.__init__(self)
class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
def __init__(self, stream):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
SafeConstructor.__init__(self)
Resolver.__init__(self)
class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
def __init__(self, stream):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
Constructor.__init__(self)
Resolver.__init__(self)
# UnsafeLoader is the same as Loader (which is and was always unsafe on
# untrusted input). Use of either Loader or UnsafeLoader should be rare, since
# FullLoad should be able to load almost all YAML safely. Loader is left intact
# to ensure backwards compatibility.
class UnsafeLoader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
def __init__(self, stream):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
Constructor.__init__(self)
Resolver.__init__(self)
```
|
=====================================================================================================
SOURCE CODE FILE: nodes.py
LINES: 1
SIZE: 1.41 KB
PATH: scripts\freecad_env\Lib\site-packages\yaml\nodes.py
ENCODING: utf-8
```py
class Node(object):
def __init__(self, tag, value, start_mark, end_mark):
self.tag = tag
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
def __repr__(self):
value = self.value
#if isinstance(value, list):
# if len(value) == 0:
# value = '<empty>'
# elif len(value) == 1:
# value = '<1 item>'
# else:
# value = '<%d items>' % len(value)
#else:
# if len(value) > 75:
# value = repr(value[:70]+u' ... ')
# else:
# value = repr(value)
value = repr(value)
return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
class ScalarNode(Node):
id = 'scalar'
def __init__(self, tag, value,
start_mark=None, end_mark=None, style=None):
self.tag = tag
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
self.style = style
class CollectionNode(Node):
def __init__(self, tag, value,
start_mark=None, end_mark=None, flow_style=None):
self.tag = tag
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
self.flow_style = flow_style
class SequenceNode(CollectionNode):
id = 'sequence'
class MappingNode(CollectionNode):
id = 'mapping'
```
|
======================================================================================================
SOURCE CODE FILE: parser.py
LINES: 1
SIZE: 24.90 KB
PATH: scripts\freecad_env\Lib\site-packages\yaml\parser.py
ENCODING: utf-8
```py
# The following YAML grammar is LL(1) and is parsed by a recursive descent
# parser.
#
# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
# implicit_document ::= block_node DOCUMENT-END*
# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
# block_node_or_indentless_sequence ::=
# ALIAS
# | properties (block_content | indentless_block_sequence)?
# | block_content
# | indentless_block_sequence
# block_node ::= ALIAS
# | properties block_content?
# | block_content
# flow_node ::= ALIAS
# | properties flow_content?
# | flow_content
# properties ::= TAG ANCHOR? | ANCHOR TAG?
# block_content ::= block_collection | flow_collection | SCALAR
# flow_content ::= flow_collection | SCALAR
# block_collection ::= block_sequence | block_mapping
# flow_collection ::= flow_sequence | flow_mapping
# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
# block_mapping ::= BLOCK-MAPPING_START
# ((KEY block_node_or_indentless_sequence?)?
# (VALUE block_node_or_indentless_sequence?)?)*
# BLOCK-END
# flow_sequence ::= FLOW-SEQUENCE-START
# (flow_sequence_entry FLOW-ENTRY)*
# flow_sequence_entry?
# FLOW-SEQUENCE-END
# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
# flow_mapping ::= FLOW-MAPPING-START
# (flow_mapping_entry FLOW-ENTRY)*
# flow_mapping_entry?
# FLOW-MAPPING-END
# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
#
# FIRST sets:
#
# stream: { STREAM-START }
# explicit_document: { DIRECTIVE DOCUMENT-START }
# implicit_document: FIRST(block_node)
# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
# block_sequence: { BLOCK-SEQUENCE-START }
# block_mapping: { BLOCK-MAPPING-START }
# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
# indentless_sequence: { ENTRY }
# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
# flow_sequence: { FLOW-SEQUENCE-START }
# flow_mapping: { FLOW-MAPPING-START }
# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
__all__ = ['Parser', 'ParserError']
from .error import MarkedYAMLError
from .tokens import *
from .events import *
from .scanner import *
class ParserError(MarkedYAMLError):
pass
class Parser:
# Since writing a recursive-descendant parser is a straightforward task, we
# do not give many comments here.
DEFAULT_TAGS = {
'!': '!',
'!!': 'tag:yaml.org,2002:',
}
def __init__(self):
self.current_event = None
self.yaml_version = None
self.tag_handles = {}
self.states = []
self.marks = []
self.state = self.parse_stream_start
def dispose(self):
# Reset the state attributes (to clear self-references)
self.states = []
self.state = None
def check_event(self, *choices):
# Check the type of the next event.
if self.current_event is None:
if self.state:
self.current_event = self.state()
if self.current_event is not None:
if not choices:
return True
for choice in choices:
if isinstance(self.current_event, choice):
return True
return False
def peek_event(self):
# Get the next event.
if self.current_event is None:
if self.state:
self.current_event = self.state()
return self.current_event
def get_event(self):
# Get the next event and proceed further.
if self.current_event is None:
if self.state:
self.current_event = self.state()
value = self.current_event
self.current_event = None
return value
# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
# implicit_document ::= block_node DOCUMENT-END*
# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
def parse_stream_start(self):
# Parse the stream start.
token = self.get_token()
event = StreamStartEvent(token.start_mark, token.end_mark,
encoding=token.encoding)
# Prepare the next state.
self.state = self.parse_implicit_document_start
return event
def parse_implicit_document_start(self):
# Parse an implicit document.
if not self.check_token(DirectiveToken, DocumentStartToken,
StreamEndToken):
self.tag_handles = self.DEFAULT_TAGS
token = self.peek_token()
start_mark = end_mark = token.start_mark
event = DocumentStartEvent(start_mark, end_mark,
explicit=False)
# Prepare the next state.
self.states.append(self.parse_document_end)
self.state = self.parse_block_node
return event
else:
return self.parse_document_start()
def parse_document_start(self):
# Parse any extra document end indicators.
while self.check_token(DocumentEndToken):
self.get_token()
# Parse an explicit document.
if not self.check_token(StreamEndToken):
token = self.peek_token()
start_mark = token.start_mark
version, tags = self.process_directives()
if not self.check_token(DocumentStartToken):
raise ParserError(None, None,
"expected '<document start>', but found %r"
% self.peek_token().id,
self.peek_token().start_mark)
token = self.get_token()
end_mark = token.end_mark
event = DocumentStartEvent(start_mark, end_mark,
explicit=True, version=version, tags=tags)
self.states.append(self.parse_document_end)
self.state = self.parse_document_content
else:
# Parse the end of the stream.
token = self.get_token()
event = StreamEndEvent(token.start_mark, token.end_mark)
assert not self.states
assert not self.marks
self.state = None
return event
def parse_document_end(self):
# Parse the document end.
token = self.peek_token()
start_mark = end_mark = token.start_mark
explicit = False
if self.check_token(DocumentEndToken):
token = self.get_token()
end_mark = token.end_mark
explicit = True
event = DocumentEndEvent(start_mark, end_mark,
explicit=explicit)
# Prepare the next state.
self.state = self.parse_document_start
return event
def parse_document_content(self):
if self.check_token(DirectiveToken,
DocumentStartToken, DocumentEndToken, StreamEndToken):
event = self.process_empty_scalar(self.peek_token().start_mark)
self.state = self.states.pop()
return event
else:
return self.parse_block_node()
def process_directives(self):
self.yaml_version = None
self.tag_handles = {}
while self.check_token(DirectiveToken):
token = self.get_token()
if token.name == 'YAML':
if self.yaml_version is not None:
raise ParserError(None, None,
"found duplicate YAML directive", token.start_mark)
major, minor = token.value
if major != 1:
raise ParserError(None, None,
"found incompatible YAML document (version 1.* is required)",
token.start_mark)
self.yaml_version = token.value
elif token.name == 'TAG':
handle, prefix = token.value
if handle in self.tag_handles:
raise ParserError(None, None,
"duplicate tag handle %r" % handle,
token.start_mark)
self.tag_handles[handle] = prefix
if self.tag_handles:
value = self.yaml_version, self.tag_handles.copy()
else:
value = self.yaml_version, None
for key in self.DEFAULT_TAGS:
if key not in self.tag_handles:
self.tag_handles[key] = self.DEFAULT_TAGS[key]
return value
# block_node_or_indentless_sequence ::= ALIAS
# | properties (block_content | indentless_block_sequence)?
# | block_content
# | indentless_block_sequence
# block_node ::= ALIAS
# | properties block_content?
# | block_content
# flow_node ::= ALIAS
# | properties flow_content?
# | flow_content
# properties ::= TAG ANCHOR? | ANCHOR TAG?
# block_content ::= block_collection | flow_collection | SCALAR
# flow_content ::= flow_collection | SCALAR
# block_collection ::= block_sequence | block_mapping
# flow_collection ::= flow_sequence | flow_mapping
def parse_block_node(self):
return self.parse_node(block=True)
def parse_flow_node(self):
return self.parse_node()
def parse_block_node_or_indentless_sequence(self):
return self.parse_node(block=True, indentless_sequence=True)
def parse_node(self, block=False, indentless_sequence=False):
if self.check_token(AliasToken):
token = self.get_token()
event = AliasEvent(token.value, token.start_mark, token.end_mark)
self.state = self.states.pop()
else:
anchor = None
tag = None
start_mark = end_mark = tag_mark = None
if self.check_token(AnchorToken):
token = self.get_token()
start_mark = token.start_mark
end_mark = token.end_mark
anchor = token.value
if self.check_token(TagToken):
token = self.get_token()
tag_mark = token.start_mark
end_mark = token.end_mark
tag = token.value
elif self.check_token(TagToken):
token = self.get_token()
start_mark = tag_mark = token.start_mark
end_mark = token.end_mark
tag = token.value
if self.check_token(AnchorToken):
token = self.get_token()
end_mark = token.end_mark
anchor = token.value
if tag is not None:
handle, suffix = tag
if handle is not None:
if handle not in self.tag_handles:
raise ParserError("while parsing a node", start_mark,
"found undefined tag handle %r" % handle,
tag_mark)
tag = self.tag_handles[handle]+suffix
else:
tag = suffix
#if tag == '!':
# raise ParserError("while parsing a node", start_mark,
# "found non-specific tag '!'", tag_mark,
# "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
if start_mark is None:
start_mark = end_mark = self.peek_token().start_mark
event = None
implicit = (tag is None or tag == '!')
if indentless_sequence and self.check_token(BlockEntryToken):
end_mark = self.peek_token().end_mark
event = SequenceStartEvent(anchor, tag, implicit,
start_mark, end_mark)
self.state = self.parse_indentless_sequence_entry
else:
if self.check_token(ScalarToken):
token = self.get_token()
end_mark = token.end_mark
if (token.plain and tag is None) or tag == '!':
implicit = (True, False)
elif tag is None:
implicit = (False, True)
else:
implicit = (False, False)
event = ScalarEvent(anchor, tag, implicit, token.value,
start_mark, end_mark, style=token.style)
self.state = self.states.pop()
elif self.check_token(FlowSequenceStartToken):
end_mark = self.peek_token().end_mark
event = SequenceStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=True)
self.state = self.parse_flow_sequence_first_entry
elif self.check_token(FlowMappingStartToken):
end_mark = self.peek_token().end_mark
event = MappingStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=True)
self.state = self.parse_flow_mapping_first_key
elif block and self.check_token(BlockSequenceStartToken):
end_mark = self.peek_token().start_mark
event = SequenceStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=False)
self.state = self.parse_block_sequence_first_entry
elif block and self.check_token(BlockMappingStartToken):
end_mark = self.peek_token().start_mark
event = MappingStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=False)
self.state = self.parse_block_mapping_first_key
elif anchor is not None or tag is not None:
# Empty scalars are allowed even if a tag or an anchor is
# specified.
event = ScalarEvent(anchor, tag, (implicit, False), '',
start_mark, end_mark)
self.state = self.states.pop()
else:
if block:
node = 'block'
else:
node = 'flow'
token = self.peek_token()
raise ParserError("while parsing a %s node" % node, start_mark,
"expected the node content, but found %r" % token.id,
token.start_mark)
return event
# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
def parse_block_sequence_first_entry(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_block_sequence_entry()
def parse_block_sequence_entry(self):
if self.check_token(BlockEntryToken):
token = self.get_token()
if not self.check_token(BlockEntryToken, BlockEndToken):
self.states.append(self.parse_block_sequence_entry)
return self.parse_block_node()
else:
self.state = self.parse_block_sequence_entry
return self.process_empty_scalar(token.end_mark)
if not self.check_token(BlockEndToken):
token = self.peek_token()
raise ParserError("while parsing a block collection", self.marks[-1],
"expected <block end>, but found %r" % token.id, token.start_mark)
token = self.get_token()
event = SequenceEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
def parse_indentless_sequence_entry(self):
if self.check_token(BlockEntryToken):
token = self.get_token()
if not self.check_token(BlockEntryToken,
KeyToken, ValueToken, BlockEndToken):
self.states.append(self.parse_indentless_sequence_entry)
return self.parse_block_node()
else:
self.state = self.parse_indentless_sequence_entry
return self.process_empty_scalar(token.end_mark)
token = self.peek_token()
event = SequenceEndEvent(token.start_mark, token.start_mark)
self.state = self.states.pop()
return event
# block_mapping ::= BLOCK-MAPPING_START
# ((KEY block_node_or_indentless_sequence?)?
# (VALUE block_node_or_indentless_sequence?)?)*
# BLOCK-END
def parse_block_mapping_first_key(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_block_mapping_key()
def parse_block_mapping_key(self):
if self.check_token(KeyToken):
token = self.get_token()
if not self.check_token(KeyToken, ValueToken, BlockEndToken):
self.states.append(self.parse_block_mapping_value)
return self.parse_block_node_or_indentless_sequence()
else:
self.state = self.parse_block_mapping_value
return self.process_empty_scalar(token.end_mark)
if not self.check_token(BlockEndToken):
token = self.peek_token()
raise ParserError("while parsing a block mapping", self.marks[-1],
"expected <block end>, but found %r" % token.id, token.start_mark)
token = self.get_token()
event = MappingEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
def parse_block_mapping_value(self):
if self.check_token(ValueToken):
token = self.get_token()
if not self.check_token(KeyToken, ValueToken, BlockEndToken):
self.states.append(self.parse_block_mapping_key)
return self.parse_block_node_or_indentless_sequence()
else:
self.state = self.parse_block_mapping_key
return self.process_empty_scalar(token.end_mark)
else:
self.state = self.parse_block_mapping_key
token = self.peek_token()
return self.process_empty_scalar(token.start_mark)
# flow_sequence ::= FLOW-SEQUENCE-START
# (flow_sequence_entry FLOW-ENTRY)*
# flow_sequence_entry?
# FLOW-SEQUENCE-END
# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
#
# Note that while production rules for both flow_sequence_entry and
# flow_mapping_entry are equal, their interpretations are different.
# For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
# generate an inline mapping (set syntax).
def parse_flow_sequence_first_entry(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_flow_sequence_entry(first=True)
def parse_flow_sequence_entry(self, first=False):
if not self.check_token(FlowSequenceEndToken):
if not first:
if self.check_token(FlowEntryToken):
self.get_token()
else:
token = self.peek_token()
raise ParserError("while parsing a flow sequence", self.marks[-1],
"expected ',' or ']', but got %r" % token.id, token.start_mark)
if self.check_token(KeyToken):
token = self.peek_token()
event = MappingStartEvent(None, None, True,
token.start_mark, token.end_mark,
flow_style=True)
self.state = self.parse_flow_sequence_entry_mapping_key
return event
elif not self.check_token(FlowSequenceEndToken):
self.states.append(self.parse_flow_sequence_entry)
return self.parse_flow_node()
token = self.get_token()
event = SequenceEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
def parse_flow_sequence_entry_mapping_key(self):
token = self.get_token()
if not self.check_token(ValueToken,
FlowEntryToken, FlowSequenceEndToken):
self.states.append(self.parse_flow_sequence_entry_mapping_value)
return self.parse_flow_node()
else:
self.state = self.parse_flow_sequence_entry_mapping_value
return self.process_empty_scalar(token.end_mark)
def parse_flow_sequence_entry_mapping_value(self):
if self.check_token(ValueToken):
token = self.get_token()
if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
self.states.append(self.parse_flow_sequence_entry_mapping_end)
return self.parse_flow_node()
else:
self.state = self.parse_flow_sequence_entry_mapping_end
return self.process_empty_scalar(token.end_mark)
else:
self.state = self.parse_flow_sequence_entry_mapping_end
token = self.peek_token()
return self.process_empty_scalar(token.start_mark)
def parse_flow_sequence_entry_mapping_end(self):
self.state = self.parse_flow_sequence_entry
token = self.peek_token()
return MappingEndEvent(token.start_mark, token.start_mark)
# flow_mapping ::= FLOW-MAPPING-START
# (flow_mapping_entry FLOW-ENTRY)*
# flow_mapping_entry?
# FLOW-MAPPING-END
# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
def parse_flow_mapping_first_key(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_flow_mapping_key(first=True)
def parse_flow_mapping_key(self, first=False):
if not self.check_token(FlowMappingEndToken):
if not first:
if self.check_token(FlowEntryToken):
self.get_token()
else:
token = self.peek_token()
raise ParserError("while parsing a flow mapping", self.marks[-1],
"expected ',' or '}', but got %r" % token.id, token.start_mark)
if self.check_token(KeyToken):
token = self.get_token()
if not self.check_token(ValueToken,
FlowEntryToken, FlowMappingEndToken):
self.states.append(self.parse_flow_mapping_value)
return self.parse_flow_node()
else:
self.state = self.parse_flow_mapping_value
return self.process_empty_scalar(token.end_mark)
elif not self.check_token(FlowMappingEndToken):
self.states.append(self.parse_flow_mapping_empty_value)
return self.parse_flow_node()
token = self.get_token()
event = MappingEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
def parse_flow_mapping_value(self):
if self.check_token(ValueToken):
token = self.get_token()
if not self.check_token(FlowEntryToken, FlowMappingEndToken):
self.states.append(self.parse_flow_mapping_key)
return self.parse_flow_node()
else:
self.state = self.parse_flow_mapping_key
return self.process_empty_scalar(token.end_mark)
else:
self.state = self.parse_flow_mapping_key
token = self.peek_token()
return self.process_empty_scalar(token.start_mark)
def parse_flow_mapping_empty_value(self):
self.state = self.parse_flow_mapping_key
return self.process_empty_scalar(self.peek_token().start_mark)
def process_empty_scalar(self, mark):
return ScalarEvent(None, None, (True, False), '', mark, mark)
```
|
======================================================================================================
SOURCE CODE FILE: reader.py
LINES: 5
SIZE: 6.63 KB
PATH: scripts\freecad_env\Lib\site-packages\yaml\reader.py
ENCODING: utf-8
```py
# This module contains abstractions for the input stream. You don't have to
# looks further, there are no pretty code.
#
# We define two classes here.
#
# Mark(source, line, column)
# It's just a record and its only use is producing nice error messages.
# Parser does not use it for any other purposes.
#
# Reader(source, data)
# Reader determines the encoding of `data` and converts it to unicode.
# Reader provides the following methods and attributes:
# reader.peek(length=1) - return the next `length` characters
# reader.forward(length=1) - move the current position to `length` characters.
# reader.index - the number of the current character.
# reader.line, stream.column - the line and the column of the current character.
__all__ = ['Reader', 'ReaderError']
from .error import YAMLError, Mark
import codecs, re
class ReaderError(YAMLError):
def __init__(self, name, position, character, encoding, reason):
self.name = name
self.character = character
self.position = position
self.encoding = encoding
self.reason = reason
def __str__(self):
if isinstance(self.character, bytes):
return "'%s' codec can't decode byte #x%02x: %s\n" \
" in \"%s\", position %d" \
% (self.encoding, ord(self.character), self.reason,
self.name, self.position)
else:
return "unacceptable character #x%04x: %s\n" \
" in \"%s\", position %d" \
% (self.character, self.reason,
self.name, self.position)
class Reader(object):
# Reader:
# - determines the data encoding and converts it to a unicode string,
# - checks if characters are in allowed range,
# - adds '\0' to the end.
# Reader accepts
# - a `bytes` object,
# - a `str` object,
# - a file-like object with its `read` method returning `str`,
# - a file-like object with its `read` method returning `unicode`.
# Yeah, it's ugly and slow.
def __init__(self, stream):
self.name = None
self.stream = None
self.stream_pointer = 0
self.eof = True
self.buffer = ''
self.pointer = 0
self.raw_buffer = None
self.raw_decode = None
self.encoding = None
self.index = 0
self.line = 0
self.column = 0
if isinstance(stream, str):
self.name = "<unicode string>"
self.check_printable(stream)
self.buffer = stream+'\0'
elif isinstance(stream, bytes):
self.name = "<byte string>"
self.raw_buffer = stream
self.determine_encoding()
else:
self.stream = stream
self.name = getattr(stream, 'name', "<file>")
self.eof = False
self.raw_buffer = None
self.determine_encoding()
def peek(self, index=0):
try:
return self.buffer[self.pointer+index]
except IndexError:
self.update(index+1)
return self.buffer[self.pointer+index]
def prefix(self, length=1):
if self.pointer+length >= len(self.buffer):
self.update(length)
return self.buffer[self.pointer:self.pointer+length]
def forward(self, length=1):
if self.pointer+length+1 >= len(self.buffer):
self.update(length+1)
while length:
ch = self.buffer[self.pointer]
self.pointer += 1
self.index += 1
if ch in '\n\x85\u2028\u2029' \
or (ch == '\r' and self.buffer[self.pointer] != '\n'):
self.line += 1
self.column = 0
elif ch != '\uFEFF':
self.column += 1
length -= 1
def get_mark(self):
if self.stream is None:
return Mark(self.name, self.index, self.line, self.column,
self.buffer, self.pointer)
else:
return Mark(self.name, self.index, self.line, self.column,
None, None)
def determine_encoding(self):
while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
self.update_raw()
if isinstance(self.raw_buffer, bytes):
if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
self.raw_decode = codecs.utf_16_le_decode
self.encoding = 'utf-16-le'
elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
self.raw_decode = codecs.utf_16_be_decode
self.encoding = 'utf-16-be'
else:
self.raw_decode = codecs.utf_8_decode
self.encoding = 'utf-8'
self.update(1)
NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD\U00010000-\U0010ffff]')
def check_printable(self, data):
match = self.NON_PRINTABLE.search(data)
if match:
character = match.group()
position = self.index+(len(self.buffer)-self.pointer)+match.start()
raise ReaderError(self.name, position, ord(character),
'unicode', "special characters are not allowed")
def update(self, length):
if self.raw_buffer is None:
return
self.buffer = self.buffer[self.pointer:]
self.pointer = 0
while len(self.buffer) < length:
if not self.eof:
self.update_raw()
if self.raw_decode is not None:
try:
data, converted = self.raw_decode(self.raw_buffer,
'strict', self.eof)
except UnicodeDecodeError as exc:
character = self.raw_buffer[exc.start]
if self.stream is not None:
position = self.stream_pointer-len(self.raw_buffer)+exc.start
else:
position = exc.start
raise ReaderError(self.name, position, character,
exc.encoding, exc.reason)
else:
data = self.raw_buffer
converted = len(data)
self.check_printable(data)
self.buffer += data
self.raw_buffer = self.raw_buffer[converted:]
if self.eof:
self.buffer += '\0'
self.raw_buffer = None
break
def update_raw(self, size=4096):
data = self.stream.read(size)
if self.raw_buffer is None:
self.raw_buffer = data
else:
self.raw_buffer += data
self.stream_pointer += len(data)
if not data:
self.eof = True
```
|
===========================================================================================================
SOURCE CODE FILE: representer.py
LINES: 1
SIZE: 13.86 KB
PATH: scripts\freecad_env\Lib\site-packages\yaml\representer.py
ENCODING: utf-8
```py
__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
'RepresenterError']
from .error import *
from .nodes import *
import datetime, copyreg, types, base64, collections
class RepresenterError(YAMLError):
pass
class BaseRepresenter:
yaml_representers = {}
yaml_multi_representers = {}
def __init__(self, default_style=None, default_flow_style=False, sort_keys=True):
self.default_style = default_style
self.sort_keys = sort_keys
self.default_flow_style = default_flow_style
self.represented_objects = {}
self.object_keeper = []
self.alias_key = None
def represent(self, data):
node = self.represent_data(data)
self.serialize(node)
self.represented_objects = {}
self.object_keeper = []
self.alias_key = None
def represent_data(self, data):
if self.ignore_aliases(data):
self.alias_key = None
else:
self.alias_key = id(data)
if self.alias_key is not None:
if self.alias_key in self.represented_objects:
node = self.represented_objects[self.alias_key]
#if node is None:
# raise RepresenterError("recursive objects are not allowed: %r" % data)
return node
#self.represented_objects[alias_key] = None
self.object_keeper.append(data)
data_types = type(data).__mro__
if data_types[0] in self.yaml_representers:
node = self.yaml_representers[data_types[0]](self, data)
else:
for data_type in data_types:
if data_type in self.yaml_multi_representers:
node = self.yaml_multi_representers[data_type](self, data)
break
else:
if None in self.yaml_multi_representers:
node = self.yaml_multi_representers[None](self, data)
elif None in self.yaml_representers:
node = self.yaml_representers[None](self, data)
else:
node = ScalarNode(None, str(data))
#if alias_key is not None:
# self.represented_objects[alias_key] = node
return node
@classmethod
def add_representer(cls, data_type, representer):
if not 'yaml_representers' in cls.__dict__:
cls.yaml_representers = cls.yaml_representers.copy()
cls.yaml_representers[data_type] = representer
@classmethod
def add_multi_representer(cls, data_type, representer):
if not 'yaml_multi_representers' in cls.__dict__:
cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
cls.yaml_multi_representers[data_type] = representer
def represent_scalar(self, tag, value, style=None):
if style is None:
style = self.default_style
node = ScalarNode(tag, value, style=style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
return node
def represent_sequence(self, tag, sequence, flow_style=None):
value = []
node = SequenceNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
for item in sequence:
node_item = self.represent_data(item)
if not (isinstance(node_item, ScalarNode) and not node_item.style):
best_style = False
value.append(node_item)
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def represent_mapping(self, tag, mapping, flow_style=None):
value = []
node = MappingNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
if hasattr(mapping, 'items'):
mapping = list(mapping.items())
if self.sort_keys:
try:
mapping = sorted(mapping)
except TypeError:
pass
for item_key, item_value in mapping:
node_key = self.represent_data(item_key)
node_value = self.represent_data(item_value)
if not (isinstance(node_key, ScalarNode) and not node_key.style):
best_style = False
if not (isinstance(node_value, ScalarNode) and not node_value.style):
best_style = False
value.append((node_key, node_value))
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def ignore_aliases(self, data):
return False
class SafeRepresenter(BaseRepresenter):
def ignore_aliases(self, data):
if data is None:
return True
if isinstance(data, tuple) and data == ():
return True
if isinstance(data, (str, bytes, bool, int, float)):
return True
def represent_none(self, data):
return self.represent_scalar('tag:yaml.org,2002:null', 'null')
def represent_str(self, data):
return self.represent_scalar('tag:yaml.org,2002:str', data)
def represent_binary(self, data):
if hasattr(base64, 'encodebytes'):
data = base64.encodebytes(data).decode('ascii')
else:
data = base64.encodestring(data).decode('ascii')
return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|')
def represent_bool(self, data):
if data:
value = 'true'
else:
value = 'false'
return self.represent_scalar('tag:yaml.org,2002:bool', value)
def represent_int(self, data):
return self.represent_scalar('tag:yaml.org,2002:int', str(data))
inf_value = 1e300
while repr(inf_value) != repr(inf_value*inf_value):
inf_value *= inf_value
def represent_float(self, data):
if data != data or (data == 0.0 and data == 1.0):
value = '.nan'
elif data == self.inf_value:
value = '.inf'
elif data == -self.inf_value:
value = '-.inf'
else:
value = repr(data).lower()
# Note that in some cases `repr(data)` represents a float number
# without the decimal parts. For instance:
# >>> repr(1e17)
# '1e17'
# Unfortunately, this is not a valid float representation according
# to the definition of the `!!float` tag. We fix this by adding
# '.0' before the 'e' symbol.
if '.' not in value and 'e' in value:
value = value.replace('e', '.0e', 1)
return self.represent_scalar('tag:yaml.org,2002:float', value)
def represent_list(self, data):
#pairs = (len(data) > 0 and isinstance(data, list))
#if pairs:
# for item in data:
# if not isinstance(item, tuple) or len(item) != 2:
# pairs = False
# break
#if not pairs:
return self.represent_sequence('tag:yaml.org,2002:seq', data)
#value = []
#for item_key, item_value in data:
# value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
# [(item_key, item_value)]))
#return SequenceNode(u'tag:yaml.org,2002:pairs', value)
def represent_dict(self, data):
return self.represent_mapping('tag:yaml.org,2002:map', data)
def represent_set(self, data):
value = {}
for key in data:
value[key] = None
return self.represent_mapping('tag:yaml.org,2002:set', value)
def represent_date(self, data):
value = data.isoformat()
return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
def represent_datetime(self, data):
value = data.isoformat(' ')
return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
def represent_yaml_object(self, tag, data, cls, flow_style=None):
if hasattr(data, '__getstate__'):
state = data.__getstate__()
else:
state = data.__dict__.copy()
return self.represent_mapping(tag, state, flow_style=flow_style)
def represent_undefined(self, data):
raise RepresenterError("cannot represent an object", data)
SafeRepresenter.add_representer(type(None),
SafeRepresenter.represent_none)
SafeRepresenter.add_representer(str,
SafeRepresenter.represent_str)
SafeRepresenter.add_representer(bytes,
SafeRepresenter.represent_binary)
SafeRepresenter.add_representer(bool,
SafeRepresenter.represent_bool)
SafeRepresenter.add_representer(int,
SafeRepresenter.represent_int)
SafeRepresenter.add_representer(float,
SafeRepresenter.represent_float)
SafeRepresenter.add_representer(list,
SafeRepresenter.represent_list)
SafeRepresenter.add_representer(tuple,
SafeRepresenter.represent_list)
SafeRepresenter.add_representer(dict,
SafeRepresenter.represent_dict)
SafeRepresenter.add_representer(set,
SafeRepresenter.represent_set)
SafeRepresenter.add_representer(datetime.date,
SafeRepresenter.represent_date)
SafeRepresenter.add_representer(datetime.datetime,
SafeRepresenter.represent_datetime)
SafeRepresenter.add_representer(None,
SafeRepresenter.represent_undefined)
class Representer(SafeRepresenter):
def represent_complex(self, data):
if data.imag == 0.0:
data = '%r' % data.real
elif data.real == 0.0:
data = '%rj' % data.imag
elif data.imag > 0:
data = '%r+%rj' % (data.real, data.imag)
else:
data = '%r%rj' % (data.real, data.imag)
return self.represent_scalar('tag:yaml.org,2002:python/complex', data)
def represent_tuple(self, data):
return self.represent_sequence('tag:yaml.org,2002:python/tuple', data)
def represent_name(self, data):
name = '%s.%s' % (data.__module__, data.__name__)
return self.represent_scalar('tag:yaml.org,2002:python/name:'+name, '')
def represent_module(self, data):
return self.represent_scalar(
'tag:yaml.org,2002:python/module:'+data.__name__, '')
def represent_object(self, data):
# We use __reduce__ API to save the data. data.__reduce__ returns
# a tuple of length 2-5:
# (function, args, state, listitems, dictitems)
# For reconstructing, we calls function(*args), then set its state,
# listitems, and dictitems if they are not None.
# A special case is when function.__name__ == '__newobj__'. In this
# case we create the object with args[0].__new__(*args).
# Another special case is when __reduce__ returns a string - we don't
# support it.
# We produce a !!python/object, !!python/object/new or
# !!python/object/apply node.
cls = type(data)
if cls in copyreg.dispatch_table:
reduce = copyreg.dispatch_table[cls](data)
elif hasattr(data, '__reduce_ex__'):
reduce = data.__reduce_ex__(2)
elif hasattr(data, '__reduce__'):
reduce = data.__reduce__()
else:
raise RepresenterError("cannot represent an object", data)
reduce = (list(reduce)+[None]*5)[:5]
function, args, state, listitems, dictitems = reduce
args = list(args)
if state is None:
state = {}
if listitems is not None:
listitems = list(listitems)
if dictitems is not None:
dictitems = dict(dictitems)
if function.__name__ == '__newobj__':
function = args[0]
args = args[1:]
tag = 'tag:yaml.org,2002:python/object/new:'
newobj = True
else:
tag = 'tag:yaml.org,2002:python/object/apply:'
newobj = False
function_name = '%s.%s' % (function.__module__, function.__name__)
if not args and not listitems and not dictitems \
and isinstance(state, dict) and newobj:
return self.represent_mapping(
'tag:yaml.org,2002:python/object:'+function_name, state)
if not listitems and not dictitems \
and isinstance(state, dict) and not state:
return self.represent_sequence(tag+function_name, args)
value = {}
if args:
value['args'] = args
if state or not isinstance(state, dict):
value['state'] = state
if listitems:
value['listitems'] = listitems
if dictitems:
value['dictitems'] = dictitems
return self.represent_mapping(tag+function_name, value)
def represent_ordered_dict(self, data):
# Provide uniform representation across different Python versions.
data_type = type(data)
tag = 'tag:yaml.org,2002:python/object/apply:%s.%s' \
% (data_type.__module__, data_type.__name__)
items = [[key, value] for key, value in data.items()]
return self.represent_sequence(tag, [items])
Representer.add_representer(complex,
Representer.represent_complex)
Representer.add_representer(tuple,
Representer.represent_tuple)
Representer.add_multi_representer(type,
Representer.represent_name)
Representer.add_representer(collections.OrderedDict,
Representer.represent_ordered_dict)
Representer.add_representer(types.FunctionType,
Representer.represent_name)
Representer.add_representer(types.BuiltinFunctionType,
Representer.represent_name)
Representer.add_representer(types.ModuleType,
Representer.represent_module)
Representer.add_multi_representer(object,
Representer.represent_object)
```
|
========================================================================================================
SOURCE CODE FILE: resolver.py
LINES: 1
SIZE: 8.79 KB
PATH: scripts\freecad_env\Lib\site-packages\yaml\resolver.py
ENCODING: utf-8
```py
__all__ = ['BaseResolver', 'Resolver']
from .error import *
from .nodes import *
import re
class ResolverError(YAMLError):
pass
class BaseResolver:
DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str'
DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq'
DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map'
yaml_implicit_resolvers = {}
yaml_path_resolvers = {}
def __init__(self):
self.resolver_exact_paths = []
self.resolver_prefix_paths = []
@classmethod
def add_implicit_resolver(cls, tag, regexp, first):
if not 'yaml_implicit_resolvers' in cls.__dict__:
implicit_resolvers = {}
for key in cls.yaml_implicit_resolvers:
implicit_resolvers[key] = cls.yaml_implicit_resolvers[key][:]
cls.yaml_implicit_resolvers = implicit_resolvers
if first is None:
first = [None]
for ch in first:
cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
@classmethod
def add_path_resolver(cls, tag, path, kind=None):
# Note: `add_path_resolver` is experimental. The API could be changed.
# `new_path` is a pattern that is matched against the path from the
# root to the node that is being considered. `node_path` elements are
# tuples `(node_check, index_check)`. `node_check` is a node class:
# `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
# matches any kind of a node. `index_check` could be `None`, a boolean
# value, a string value, or a number. `None` and `False` match against
# any _value_ of sequence and mapping nodes. `True` matches against
# any _key_ of a mapping node. A string `index_check` matches against
# a mapping value that corresponds to a scalar key which content is
# equal to the `index_check` value. An integer `index_check` matches
# against a sequence value with the index equal to `index_check`.
if not 'yaml_path_resolvers' in cls.__dict__:
cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
new_path = []
for element in path:
if isinstance(element, (list, tuple)):
if len(element) == 2:
node_check, index_check = element
elif len(element) == 1:
node_check = element[0]
index_check = True
else:
raise ResolverError("Invalid path element: %s" % element)
else:
node_check = None
index_check = element
if node_check is str:
node_check = ScalarNode
elif node_check is list:
node_check = SequenceNode
elif node_check is dict:
node_check = MappingNode
elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
and not isinstance(node_check, str) \
and node_check is not None:
raise ResolverError("Invalid node checker: %s" % node_check)
if not isinstance(index_check, (str, int)) \
and index_check is not None:
raise ResolverError("Invalid index checker: %s" % index_check)
new_path.append((node_check, index_check))
if kind is str:
kind = ScalarNode
elif kind is list:
kind = SequenceNode
elif kind is dict:
kind = MappingNode
elif kind not in [ScalarNode, SequenceNode, MappingNode] \
and kind is not None:
raise ResolverError("Invalid node kind: %s" % kind)
cls.yaml_path_resolvers[tuple(new_path), kind] = tag
def descend_resolver(self, current_node, current_index):
if not self.yaml_path_resolvers:
return
exact_paths = {}
prefix_paths = []
if current_node:
depth = len(self.resolver_prefix_paths)
for path, kind in self.resolver_prefix_paths[-1]:
if self.check_resolver_prefix(depth, path, kind,
current_node, current_index):
if len(path) > depth:
prefix_paths.append((path, kind))
else:
exact_paths[kind] = self.yaml_path_resolvers[path, kind]
else:
for path, kind in self.yaml_path_resolvers:
if not path:
exact_paths[kind] = self.yaml_path_resolvers[path, kind]
else:
prefix_paths.append((path, kind))
self.resolver_exact_paths.append(exact_paths)
self.resolver_prefix_paths.append(prefix_paths)
def ascend_resolver(self):
if not self.yaml_path_resolvers:
return
self.resolver_exact_paths.pop()
self.resolver_prefix_paths.pop()
def check_resolver_prefix(self, depth, path, kind,
current_node, current_index):
node_check, index_check = path[depth-1]
if isinstance(node_check, str):
if current_node.tag != node_check:
return
elif node_check is not None:
if not isinstance(current_node, node_check):
return
if index_check is True and current_index is not None:
return
if (index_check is False or index_check is None) \
and current_index is None:
return
if isinstance(index_check, str):
if not (isinstance(current_index, ScalarNode)
and index_check == current_index.value):
return
elif isinstance(index_check, int) and not isinstance(index_check, bool):
if index_check != current_index:
return
return True
def resolve(self, kind, value, implicit):
if kind is ScalarNode and implicit[0]:
if value == '':
resolvers = self.yaml_implicit_resolvers.get('', [])
else:
resolvers = self.yaml_implicit_resolvers.get(value[0], [])
wildcard_resolvers = self.yaml_implicit_resolvers.get(None, [])
for tag, regexp in resolvers + wildcard_resolvers:
if regexp.match(value):
return tag
implicit = implicit[1]
if self.yaml_path_resolvers:
exact_paths = self.resolver_exact_paths[-1]
if kind in exact_paths:
return exact_paths[kind]
if None in exact_paths:
return exact_paths[None]
if kind is ScalarNode:
return self.DEFAULT_SCALAR_TAG
elif kind is SequenceNode:
return self.DEFAULT_SEQUENCE_TAG
elif kind is MappingNode:
return self.DEFAULT_MAPPING_TAG
class Resolver(BaseResolver):
pass
Resolver.add_implicit_resolver(
'tag:yaml.org,2002:bool',
re.compile(r'''^(?:yes|Yes|YES|no|No|NO
|true|True|TRUE|false|False|FALSE
|on|On|ON|off|Off|OFF)$''', re.X),
list('yYnNtTfFoO'))
Resolver.add_implicit_resolver(
'tag:yaml.org,2002:float',
re.compile(r'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
|\.[0-9][0-9_]*(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
|[-+]?\.(?:inf|Inf|INF)
|\.(?:nan|NaN|NAN))$''', re.X),
list('-+0123456789.'))
Resolver.add_implicit_resolver(
'tag:yaml.org,2002:int',
re.compile(r'''^(?:[-+]?0b[0-1_]+
|[-+]?0[0-7_]+
|[-+]?(?:0|[1-9][0-9_]*)
|[-+]?0x[0-9a-fA-F_]+
|[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
list('-+0123456789'))
Resolver.add_implicit_resolver(
'tag:yaml.org,2002:merge',
re.compile(r'^(?:<<)$'),
['<'])
Resolver.add_implicit_resolver(
'tag:yaml.org,2002:null',
re.compile(r'''^(?: ~
|null|Null|NULL
| )$''', re.X),
['~', 'n', 'N', ''])
Resolver.add_implicit_resolver(
'tag:yaml.org,2002:timestamp',
re.compile(r'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
|[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
(?:[Tt]|[ \t]+)[0-9][0-9]?
:[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
(?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
list('0123456789'))
Resolver.add_implicit_resolver(
'tag:yaml.org,2002:value',
re.compile(r'^(?:=)$'),
['='])
# The following resolver is only for documentation purposes. It cannot work
# because plain scalars cannot start with '!', '&', or '*'.
Resolver.add_implicit_resolver(
'tag:yaml.org,2002:yaml',
re.compile(r'^(?:!|&|\*)$'),
list('!&*'))
```
|
=======================================================================================================
SOURCE CODE FILE: scanner.py
LINES: 54
SIZE: 50.08 KB
PATH: scripts\freecad_env\Lib\site-packages\yaml\scanner.py
ENCODING: utf-8
```py
# Scanner produces tokens of the following types:
# STREAM-START
# STREAM-END
# DIRECTIVE(name, value)
# DOCUMENT-START
# DOCUMENT-END
# BLOCK-SEQUENCE-START
# BLOCK-MAPPING-START
# BLOCK-END
# FLOW-SEQUENCE-START
# FLOW-MAPPING-START
# FLOW-SEQUENCE-END
# FLOW-MAPPING-END
# BLOCK-ENTRY
# FLOW-ENTRY
# KEY
# VALUE
# ALIAS(value)
# ANCHOR(value)
# TAG(value)
# SCALAR(value, plain, style)
#
# Read comments in the Scanner code for more details.
#
__all__ = ['Scanner', 'ScannerError']
from .error import MarkedYAMLError
from .tokens import *
class ScannerError(MarkedYAMLError):
pass
class SimpleKey:
# See below simple keys treatment.
def __init__(self, token_number, required, index, line, column, mark):
self.token_number = token_number
self.required = required
self.index = index
self.line = line
self.column = column
self.mark = mark
class Scanner:
def __init__(self):
"""Initialize the scanner."""
# It is assumed that Scanner and Reader will have a common descendant.
# Reader do the dirty work of checking for BOM and converting the
# input data to Unicode. It also adds NUL to the end.
#
# Reader supports the following methods
# self.peek(i=0) # peek the next i-th character
# self.prefix(l=1) # peek the next l characters
# self.forward(l=1) # read the next l characters and move the pointer.
# Had we reached the end of the stream?
self.done = False
# The number of unclosed '{' and '['. `flow_level == 0` means block
# context.
self.flow_level = 0
# List of processed tokens that are not yet emitted.
self.tokens = []
# Add the STREAM-START token.
self.fetch_stream_start()
# Number of tokens that were emitted through the `get_token` method.
self.tokens_taken = 0
# The current indentation level.
self.indent = -1
# Past indentation levels.
self.indents = []
# Variables related to simple keys treatment.
# A simple key is a key that is not denoted by the '?' indicator.
# Example of simple keys:
# ---
# block simple key: value
# ? not a simple key:
# : { flow simple key: value }
# We emit the KEY token before all keys, so when we find a potential
# simple key, we try to locate the corresponding ':' indicator.
# Simple keys should be limited to a single line and 1024 characters.
# Can a simple key start at the current position? A simple key may
# start:
# - at the beginning of the line, not counting indentation spaces
# (in block context),
# - after '{', '[', ',' (in the flow context),
# - after '?', ':', '-' (in the block context).
# In the block context, this flag also signifies if a block collection
# may start at the current position.
self.allow_simple_key = True
# Keep track of possible simple keys. This is a dictionary. The key
# is `flow_level`; there can be no more that one possible simple key
# for each level. The value is a SimpleKey record:
# (token_number, required, index, line, column, mark)
# A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
# '[', or '{' tokens.
self.possible_simple_keys = {}
# Public methods.
def check_token(self, *choices):
# Check if the next token is one of the given types.
while self.need_more_tokens():
self.fetch_more_tokens()
if self.tokens:
if not choices:
return True
for choice in choices:
if isinstance(self.tokens[0], choice):
return True
return False
def peek_token(self):
# Return the next token, but do not delete if from the queue.
# Return None if no more tokens.
while self.need_more_tokens():
self.fetch_more_tokens()
if self.tokens:
return self.tokens[0]
else:
return None
def get_token(self):
# Return the next token.
while self.need_more_tokens():
self.fetch_more_tokens()
if self.tokens:
self.tokens_taken += 1
return self.tokens.pop(0)
# Private methods.
def need_more_tokens(self):
if self.done:
return False
if not self.tokens:
return True
# The current token may be a potential simple key, so we
# need to look further.
self.stale_possible_simple_keys()
if self.next_possible_simple_key() == self.tokens_taken:
return True
def fetch_more_tokens(self):
# Eat whitespaces and comments until we reach the next token.
self.scan_to_next_token()
# Remove obsolete possible simple keys.
self.stale_possible_simple_keys()
# Compare the current indentation and column. It may add some tokens
# and decrease the current indentation level.
self.unwind_indent(self.column)
# Peek the next character.
ch = self.peek()
# Is it the end of stream?
if ch == '\0':
return self.fetch_stream_end()
# Is it a directive?
if ch == '%' and self.check_directive():
return self.fetch_directive()
# Is it the document start?
if ch == '-' and self.check_document_start():
return self.fetch_document_start()
# Is it the document end?
if ch == '.' and self.check_document_end():
return self.fetch_document_end()
# TODO: support for BOM within a stream.
#if ch == '\uFEFF':
# return self.fetch_bom() <-- issue BOMToken
# Note: the order of the following checks is NOT significant.
# Is it the flow sequence start indicator?
if ch == '[':
return self.fetch_flow_sequence_start()
# Is it the flow mapping start indicator?
if ch == '{':
return self.fetch_flow_mapping_start()
# Is it the flow sequence end indicator?
if ch == ']':
return self.fetch_flow_sequence_end()
# Is it the flow mapping end indicator?
if ch == '}':
return self.fetch_flow_mapping_end()
# Is it the flow entry indicator?
if ch == ',':
return self.fetch_flow_entry()
# Is it the block entry indicator?
if ch == '-' and self.check_block_entry():
return self.fetch_block_entry()
# Is it the key indicator?
if ch == '?' and self.check_key():
return self.fetch_key()
# Is it the value indicator?
if ch == ':' and self.check_value():
return self.fetch_value()
# Is it an alias?
if ch == '*':
return self.fetch_alias()
# Is it an anchor?
if ch == '&':
return self.fetch_anchor()
# Is it a tag?
if ch == '!':
return self.fetch_tag()
# Is it a literal scalar?
if ch == '|' and not self.flow_level:
return self.fetch_literal()
# Is it a folded scalar?
if ch == '>' and not self.flow_level:
return self.fetch_folded()
# Is it a single quoted scalar?
if ch == '\'':
return self.fetch_single()
# Is it a double quoted scalar?
if ch == '\"':
return self.fetch_double()
# It must be a plain scalar then.
if self.check_plain():
return self.fetch_plain()
# No? It's an error. Let's produce a nice error message.
raise ScannerError("while scanning for the next token", None,
"found character %r that cannot start any token" % ch,
self.get_mark())
# Simple keys treatment.
def next_possible_simple_key(self):
# Return the number of the nearest possible simple key. Actually we
# don't need to loop through the whole dictionary. We may replace it
# with the following code:
# if not self.possible_simple_keys:
# return None
# return self.possible_simple_keys[
# min(self.possible_simple_keys.keys())].token_number
min_token_number = None
for level in self.possible_simple_keys:
key = self.possible_simple_keys[level]
if min_token_number is None or key.token_number < min_token_number:
min_token_number = key.token_number
return min_token_number
def stale_possible_simple_keys(self):
# Remove entries that are no longer possible simple keys. According to
# the YAML specification, simple keys
# - should be limited to a single line,
# - should be no longer than 1024 characters.
# Disabling this procedure will allow simple keys of any length and
# height (may cause problems if indentation is broken though).
for level in list(self.possible_simple_keys):
key = self.possible_simple_keys[level]
if key.line != self.line \
or self.index-key.index > 1024:
if key.required:
raise ScannerError("while scanning a simple key", key.mark,
"could not find expected ':'", self.get_mark())
del self.possible_simple_keys[level]
def save_possible_simple_key(self):
# The next token may start a simple key. We check if it's possible
# and save its position. This function is called for
# ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
# Check if a simple key is required at the current position.
required = not self.flow_level and self.indent == self.column
# The next token might be a simple key. Let's save it's number and
# position.
if self.allow_simple_key:
self.remove_possible_simple_key()
token_number = self.tokens_taken+len(self.tokens)
key = SimpleKey(token_number, required,
self.index, self.line, self.column, self.get_mark())
self.possible_simple_keys[self.flow_level] = key
def remove_possible_simple_key(self):
# Remove the saved possible key position at the current flow level.
if self.flow_level in self.possible_simple_keys:
key = self.possible_simple_keys[self.flow_level]
if key.required:
raise ScannerError("while scanning a simple key", key.mark,
"could not find expected ':'", self.get_mark())
del self.possible_simple_keys[self.flow_level]
# Indentation functions.
def unwind_indent(self, column):
## In flow context, tokens should respect indentation.
## Actually the condition should be `self.indent >= column` according to
## the spec. But this condition will prohibit intuitively correct
## constructions such as
## key : {
## }
#if self.flow_level and self.indent > column:
# raise ScannerError(None, None,
# "invalid indentation or unclosed '[' or '{'",
# self.get_mark())
# In the flow context, indentation is ignored. We make the scanner less
# restrictive then specification requires.
if self.flow_level:
return
# In block context, we may need to issue the BLOCK-END tokens.
while self.indent > column:
mark = self.get_mark()
self.indent = self.indents.pop()
self.tokens.append(BlockEndToken(mark, mark))
def add_indent(self, column):
# Check if we need to increase indentation.
if self.indent < column:
self.indents.append(self.indent)
self.indent = column
return True
return False
# Fetchers.
def fetch_stream_start(self):
# We always add STREAM-START as the first token and STREAM-END as the
# last token.
# Read the token.
mark = self.get_mark()
# Add STREAM-START.
self.tokens.append(StreamStartToken(mark, mark,
encoding=self.encoding))
def fetch_stream_end(self):
# Set the current indentation to -1.
self.unwind_indent(-1)
# Reset simple keys.
self.remove_possible_simple_key()
self.allow_simple_key = False
self.possible_simple_keys = {}
# Read the token.
mark = self.get_mark()
# Add STREAM-END.
self.tokens.append(StreamEndToken(mark, mark))
# The steam is finished.
self.done = True
def fetch_directive(self):
# Set the current indentation to -1.
self.unwind_indent(-1)
# Reset simple keys.
self.remove_possible_simple_key()
self.allow_simple_key = False
# Scan and add DIRECTIVE.
self.tokens.append(self.scan_directive())
def fetch_document_start(self):
self.fetch_document_indicator(DocumentStartToken)
def fetch_document_end(self):
self.fetch_document_indicator(DocumentEndToken)
def fetch_document_indicator(self, TokenClass):
# Set the current indentation to -1.
self.unwind_indent(-1)
# Reset simple keys. Note that there could not be a block collection
# after '---'.
self.remove_possible_simple_key()
self.allow_simple_key = False
# Add DOCUMENT-START or DOCUMENT-END.
start_mark = self.get_mark()
self.forward(3)
end_mark = self.get_mark()
self.tokens.append(TokenClass(start_mark, end_mark))
def fetch_flow_sequence_start(self):
self.fetch_flow_collection_start(FlowSequenceStartToken)
def fetch_flow_mapping_start(self):
self.fetch_flow_collection_start(FlowMappingStartToken)
def fetch_flow_collection_start(self, TokenClass):
# '[' and '{' may start a simple key.
self.save_possible_simple_key()
# Increase the flow level.
self.flow_level += 1
# Simple keys are allowed after '[' and '{'.
self.allow_simple_key = True
# Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(TokenClass(start_mark, end_mark))
def fetch_flow_sequence_end(self):
self.fetch_flow_collection_end(FlowSequenceEndToken)
def fetch_flow_mapping_end(self):
self.fetch_flow_collection_end(FlowMappingEndToken)
def fetch_flow_collection_end(self, TokenClass):
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Decrease the flow level.
self.flow_level -= 1
# No simple keys after ']' or '}'.
self.allow_simple_key = False
# Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(TokenClass(start_mark, end_mark))
def fetch_flow_entry(self):
# Simple keys are allowed after ','.
self.allow_simple_key = True
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Add FLOW-ENTRY.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(FlowEntryToken(start_mark, end_mark))
def fetch_block_entry(self):
# Block context needs additional checks.
if not self.flow_level:
# Are we allowed to start a new entry?
if not self.allow_simple_key:
raise ScannerError(None, None,
"sequence entries are not allowed here",
self.get_mark())
# We may need to add BLOCK-SEQUENCE-START.
if self.add_indent(self.column):
mark = self.get_mark()
self.tokens.append(BlockSequenceStartToken(mark, mark))
# It's an error for the block entry to occur in the flow context,
# but we let the parser detect this.
else:
pass
# Simple keys are allowed after '-'.
self.allow_simple_key = True
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Add BLOCK-ENTRY.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(BlockEntryToken(start_mark, end_mark))
def fetch_key(self):
# Block context needs additional checks.
if not self.flow_level:
# Are we allowed to start a key (not necessary a simple)?
if not self.allow_simple_key:
raise ScannerError(None, None,
"mapping keys are not allowed here",
self.get_mark())
# We may need to add BLOCK-MAPPING-START.
if self.add_indent(self.column):
mark = self.get_mark()
self.tokens.append(BlockMappingStartToken(mark, mark))
# Simple keys are allowed after '?' in the block context.
self.allow_simple_key = not self.flow_level
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Add KEY.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(KeyToken(start_mark, end_mark))
def fetch_value(self):
# Do we determine a simple key?
if self.flow_level in self.possible_simple_keys:
# Add KEY.
key = self.possible_simple_keys[self.flow_level]
del self.possible_simple_keys[self.flow_level]
self.tokens.insert(key.token_number-self.tokens_taken,
KeyToken(key.mark, key.mark))
# If this key starts a new block mapping, we need to add
# BLOCK-MAPPING-START.
if not self.flow_level:
if self.add_indent(key.column):
self.tokens.insert(key.token_number-self.tokens_taken,
BlockMappingStartToken(key.mark, key.mark))
# There cannot be two simple keys one after another.
self.allow_simple_key = False
# It must be a part of a complex key.
else:
# Block context needs additional checks.
# (Do we really need them? They will be caught by the parser
# anyway.)
if not self.flow_level:
# We are allowed to start a complex value if and only if
# we can start a simple key.
if not self.allow_simple_key:
raise ScannerError(None, None,
"mapping values are not allowed here",
self.get_mark())
# If this value starts a new block mapping, we need to add
# BLOCK-MAPPING-START. It will be detected as an error later by
# the parser.
if not self.flow_level:
if self.add_indent(self.column):
mark = self.get_mark()
self.tokens.append(BlockMappingStartToken(mark, mark))
# Simple keys are allowed after ':' in the block context.
self.allow_simple_key = not self.flow_level
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Add VALUE.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(ValueToken(start_mark, end_mark))
def fetch_alias(self):
# ALIAS could be a simple key.
self.save_possible_simple_key()
# No simple keys after ALIAS.
self.allow_simple_key = False
# Scan and add ALIAS.
self.tokens.append(self.scan_anchor(AliasToken))
def fetch_anchor(self):
# ANCHOR could start a simple key.
self.save_possible_simple_key()
# No simple keys after ANCHOR.
self.allow_simple_key = False
# Scan and add ANCHOR.
self.tokens.append(self.scan_anchor(AnchorToken))
def fetch_tag(self):
# TAG could start a simple key.
self.save_possible_simple_key()
# No simple keys after TAG.
self.allow_simple_key = False
# Scan and add TAG.
self.tokens.append(self.scan_tag())
def fetch_literal(self):
self.fetch_block_scalar(style='|')
def fetch_folded(self):
self.fetch_block_scalar(style='>')
def fetch_block_scalar(self, style):
# A simple key may follow a block scalar.
self.allow_simple_key = True
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Scan and add SCALAR.
self.tokens.append(self.scan_block_scalar(style))
def fetch_single(self):
self.fetch_flow_scalar(style='\'')
def fetch_double(self):
self.fetch_flow_scalar(style='"')
def fetch_flow_scalar(self, style):
# A flow scalar could be a simple key.
self.save_possible_simple_key()
# No simple keys after flow scalars.
self.allow_simple_key = False
# Scan and add SCALAR.
self.tokens.append(self.scan_flow_scalar(style))
def fetch_plain(self):
# A plain scalar could be a simple key.
self.save_possible_simple_key()
# No simple keys after plain scalars. But note that `scan_plain` will
# change this flag if the scan is finished at the beginning of the
# line.
self.allow_simple_key = False
# Scan and add SCALAR. May change `allow_simple_key`.
self.tokens.append(self.scan_plain())
# Checkers.
def check_directive(self):
# DIRECTIVE: ^ '%' ...
# The '%' indicator is already checked.
if self.column == 0:
return True
def check_document_start(self):
# DOCUMENT-START: ^ '---' (' '|'\n')
if self.column == 0:
if self.prefix(3) == '---' \
and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
return True
def check_document_end(self):
# DOCUMENT-END: ^ '...' (' '|'\n')
if self.column == 0:
if self.prefix(3) == '...' \
and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
return True
def check_block_entry(self):
# BLOCK-ENTRY: '-' (' '|'\n')
return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
def check_key(self):
# KEY(flow context): '?'
if self.flow_level:
return True
# KEY(block context): '?' (' '|'\n')
else:
return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
def check_value(self):
# VALUE(flow context): ':'
if self.flow_level:
return True
# VALUE(block context): ':' (' '|'\n')
else:
return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
def check_plain(self):
# A plain scalar may start with any non-space character except:
# '-', '?', ':', ',', '[', ']', '{', '}',
# '#', '&', '*', '!', '|', '>', '\'', '\"',
# '%', '@', '`'.
#
# It may also start with
# '-', '?', ':'
# if it is followed by a non-space character.
#
# Note that we limit the last rule to the block context (except the
# '-' character) because we want the flow context to be space
# independent.
ch = self.peek()
return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
or (self.peek(1) not in '\0 \t\r\n\x85\u2028\u2029'
and (ch == '-' or (not self.flow_level and ch in '?:')))
# Scanners.
def scan_to_next_token(self):
# We ignore spaces, line breaks and comments.
# If we find a line break in the block context, we set the flag
# `allow_simple_key` on.
# The byte order mark is stripped if it's the first character in the
# stream. We do not yet support BOM inside the stream as the
# specification requires. Any such mark will be considered as a part
# of the document.
#
# TODO: We need to make tab handling rules more sane. A good rule is
# Tabs cannot precede tokens
# BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
# KEY(block), VALUE(block), BLOCK-ENTRY
# So the checking code is
# if <TAB>:
# self.allow_simple_keys = False
# We also need to add the check for `allow_simple_keys == True` to
# `unwind_indent` before issuing BLOCK-END.
# Scanners for block, flow, and plain scalars need to be modified.
if self.index == 0 and self.peek() == '\uFEFF':
self.forward()
found = False
while not found:
while self.peek() == ' ':
self.forward()
if self.peek() == '#':
while self.peek() not in '\0\r\n\x85\u2028\u2029':
self.forward()
if self.scan_line_break():
if not self.flow_level:
self.allow_simple_key = True
else:
found = True
def scan_directive(self):
# See the specification for details.
start_mark = self.get_mark()
self.forward()
name = self.scan_directive_name(start_mark)
value = None
if name == 'YAML':
value = self.scan_yaml_directive_value(start_mark)
end_mark = self.get_mark()
elif name == 'TAG':
value = self.scan_tag_directive_value(start_mark)
end_mark = self.get_mark()
else:
end_mark = self.get_mark()
while self.peek() not in '\0\r\n\x85\u2028\u2029':
self.forward()
self.scan_directive_ignored_line(start_mark)
return DirectiveToken(name, value, start_mark, end_mark)
def scan_directive_name(self, start_mark):
# See the specification for details.
length = 0
ch = self.peek(length)
while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-_':
length += 1
ch = self.peek(length)
if not length:
raise ScannerError("while scanning a directive", start_mark,
"expected alphabetic or numeric character, but found %r"
% ch, self.get_mark())
value = self.prefix(length)
self.forward(length)
ch = self.peek()
if ch not in '\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a directive", start_mark,
"expected alphabetic or numeric character, but found %r"
% ch, self.get_mark())
return value
def scan_yaml_directive_value(self, start_mark):
# See the specification for details.
while self.peek() == ' ':
self.forward()
major = self.scan_yaml_directive_number(start_mark)
if self.peek() != '.':
raise ScannerError("while scanning a directive", start_mark,
"expected a digit or '.', but found %r" % self.peek(),
self.get_mark())
self.forward()
minor = self.scan_yaml_directive_number(start_mark)
if self.peek() not in '\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a directive", start_mark,
"expected a digit or ' ', but found %r" % self.peek(),
self.get_mark())
return (major, minor)
def scan_yaml_directive_number(self, start_mark):
# See the specification for details.
ch = self.peek()
if not ('0' <= ch <= '9'):
raise ScannerError("while scanning a directive", start_mark,
"expected a digit, but found %r" % ch, self.get_mark())
length = 0
while '0' <= self.peek(length) <= '9':
length += 1
value = int(self.prefix(length))
self.forward(length)
return value
def scan_tag_directive_value(self, start_mark):
# See the specification for details.
while self.peek() == ' ':
self.forward()
handle = self.scan_tag_directive_handle(start_mark)
while self.peek() == ' ':
self.forward()
prefix = self.scan_tag_directive_prefix(start_mark)
return (handle, prefix)
def scan_tag_directive_handle(self, start_mark):
# See the specification for details.
value = self.scan_tag_handle('directive', start_mark)
ch = self.peek()
if ch != ' ':
raise ScannerError("while scanning a directive", start_mark,
"expected ' ', but found %r" % ch, self.get_mark())
return value
def scan_tag_directive_prefix(self, start_mark):
# See the specification for details.
value = self.scan_tag_uri('directive', start_mark)
ch = self.peek()
if ch not in '\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a directive", start_mark,
"expected ' ', but found %r" % ch, self.get_mark())
return value
def scan_directive_ignored_line(self, start_mark):
# See the specification for details.
while self.peek() == ' ':
self.forward()
if self.peek() == '#':
while self.peek() not in '\0\r\n\x85\u2028\u2029':
self.forward()
ch = self.peek()
if ch not in '\0\r\n\x85\u2028\u2029':
raise ScannerError("while scanning a directive", start_mark,
"expected a comment or a line break, but found %r"
% ch, self.get_mark())
self.scan_line_break()
def scan_anchor(self, TokenClass):
# The specification does not restrict characters for anchors and
# aliases. This may lead to problems, for instance, the document:
# [ *alias, value ]
# can be interpreted in two ways, as
# [ "value" ]
# and
# [ *alias , "value" ]
# Therefore we restrict aliases to numbers and ASCII letters.
start_mark = self.get_mark()
indicator = self.peek()
if indicator == '*':
name = 'alias'
else:
name = 'anchor'
self.forward()
length = 0
ch = self.peek(length)
while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-_':
length += 1
ch = self.peek(length)
if not length:
raise ScannerError("while scanning an %s" % name, start_mark,
"expected alphabetic or numeric character, but found %r"
% ch, self.get_mark())
value = self.prefix(length)
self.forward(length)
ch = self.peek()
if ch not in '\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
raise ScannerError("while scanning an %s" % name, start_mark,
"expected alphabetic or numeric character, but found %r"
% ch, self.get_mark())
end_mark = self.get_mark()
return TokenClass(value, start_mark, end_mark)
def scan_tag(self):
# See the specification for details.
start_mark = self.get_mark()
ch = self.peek(1)
if ch == '<':
handle = None
self.forward(2)
suffix = self.scan_tag_uri('tag', start_mark)
if self.peek() != '>':
raise ScannerError("while parsing a tag", start_mark,
"expected '>', but found %r" % self.peek(),
self.get_mark())
self.forward()
elif ch in '\0 \t\r\n\x85\u2028\u2029':
handle = None
suffix = '!'
self.forward()
else:
length = 1
use_handle = False
while ch not in '\0 \r\n\x85\u2028\u2029':
if ch == '!':
use_handle = True
break
length += 1
ch = self.peek(length)
handle = '!'
if use_handle:
handle = self.scan_tag_handle('tag', start_mark)
else:
handle = '!'
self.forward()
suffix = self.scan_tag_uri('tag', start_mark)
ch = self.peek()
if ch not in '\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a tag", start_mark,
"expected ' ', but found %r" % ch, self.get_mark())
value = (handle, suffix)
end_mark = self.get_mark()
return TagToken(value, start_mark, end_mark)
def scan_block_scalar(self, style):
# See the specification for details.
if style == '>':
folded = True
else:
folded = False
chunks = []
start_mark = self.get_mark()
# Scan the header.
self.forward()
chomping, increment = self.scan_block_scalar_indicators(start_mark)
self.scan_block_scalar_ignored_line(start_mark)
# Determine the indentation level and go to the first non-empty line.
min_indent = self.indent+1
if min_indent < 1:
min_indent = 1
if increment is None:
breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
indent = max(min_indent, max_indent)
else:
indent = min_indent+increment-1
breaks, end_mark = self.scan_block_scalar_breaks(indent)
line_break = ''
# Scan the inner part of the block scalar.
while self.column == indent and self.peek() != '\0':
chunks.extend(breaks)
leading_non_space = self.peek() not in ' \t'
length = 0
while self.peek(length) not in '\0\r\n\x85\u2028\u2029':
length += 1
chunks.append(self.prefix(length))
self.forward(length)
line_break = self.scan_line_break()
breaks, end_mark = self.scan_block_scalar_breaks(indent)
if self.column == indent and self.peek() != '\0':
# Unfortunately, folding rules are ambiguous.
#
# This is the folding according to the specification:
if folded and line_break == '\n' \
and leading_non_space and self.peek() not in ' \t':
if not breaks:
chunks.append(' ')
else:
chunks.append(line_break)
# This is Clark Evans's interpretation (also in the spec
# examples):
#
#if folded and line_break == '\n':
# if not breaks:
# if self.peek() not in ' \t':
# chunks.append(' ')
# else:
# chunks.append(line_break)
#else:
# chunks.append(line_break)
else:
break
# Chomp the tail.
if chomping is not False:
chunks.append(line_break)
if chomping is True:
chunks.extend(breaks)
# We are done.
return ScalarToken(''.join(chunks), False, start_mark, end_mark,
style)
def scan_block_scalar_indicators(self, start_mark):
# See the specification for details.
chomping = None
increment = None
ch = self.peek()
if ch in '+-':
if ch == '+':
chomping = True
else:
chomping = False
self.forward()
ch = self.peek()
if ch in '0123456789':
increment = int(ch)
if increment == 0:
raise ScannerError("while scanning a block scalar", start_mark,
"expected indentation indicator in the range 1-9, but found 0",
self.get_mark())
self.forward()
elif ch in '0123456789':
increment = int(ch)
if increment == 0:
raise ScannerError("while scanning a block scalar", start_mark,
"expected indentation indicator in the range 1-9, but found 0",
self.get_mark())
self.forward()
ch = self.peek()
if ch in '+-':
if ch == '+':
chomping = True
else:
chomping = False
self.forward()
ch = self.peek()
if ch not in '\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a block scalar", start_mark,
"expected chomping or indentation indicators, but found %r"
% ch, self.get_mark())
return chomping, increment
def scan_block_scalar_ignored_line(self, start_mark):
# See the specification for details.
while self.peek() == ' ':
self.forward()
if self.peek() == '#':
while self.peek() not in '\0\r\n\x85\u2028\u2029':
self.forward()
ch = self.peek()
if ch not in '\0\r\n\x85\u2028\u2029':
raise ScannerError("while scanning a block scalar", start_mark,
"expected a comment or a line break, but found %r" % ch,
self.get_mark())
self.scan_line_break()
def scan_block_scalar_indentation(self):
# See the specification for details.
chunks = []
max_indent = 0
end_mark = self.get_mark()
while self.peek() in ' \r\n\x85\u2028\u2029':
if self.peek() != ' ':
chunks.append(self.scan_line_break())
end_mark = self.get_mark()
else:
self.forward()
if self.column > max_indent:
max_indent = self.column
return chunks, max_indent, end_mark
def scan_block_scalar_breaks(self, indent):
# See the specification for details.
chunks = []
end_mark = self.get_mark()
while self.column < indent and self.peek() == ' ':
self.forward()
while self.peek() in '\r\n\x85\u2028\u2029':
chunks.append(self.scan_line_break())
end_mark = self.get_mark()
while self.column < indent and self.peek() == ' ':
self.forward()
return chunks, end_mark
def scan_flow_scalar(self, style):
# See the specification for details.
# Note that we loose indentation rules for quoted scalars. Quoted
# scalars don't need to adhere indentation because " and ' clearly
# mark the beginning and the end of them. Therefore we are less
# restrictive then the specification requires. We only need to check
# that document separators are not included in scalars.
if style == '"':
double = True
else:
double = False
chunks = []
start_mark = self.get_mark()
quote = self.peek()
self.forward()
chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
while self.peek() != quote:
chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
self.forward()
end_mark = self.get_mark()
return ScalarToken(''.join(chunks), False, start_mark, end_mark,
style)
ESCAPE_REPLACEMENTS = {
'0': '\0',
'a': '\x07',
'b': '\x08',
't': '\x09',
'\t': '\x09',
'n': '\x0A',
'v': '\x0B',
'f': '\x0C',
'r': '\x0D',
'e': '\x1B',
' ': '\x20',
'\"': '\"',
'\\': '\\',
'/': '/',
'N': '\x85',
'_': '\xA0',
'L': '\u2028',
'P': '\u2029',
}
ESCAPE_CODES = {
'x': 2,
'u': 4,
'U': 8,
}
def scan_flow_scalar_non_spaces(self, double, start_mark):
# See the specification for details.
chunks = []
while True:
length = 0
while self.peek(length) not in '\'\"\\\0 \t\r\n\x85\u2028\u2029':
length += 1
if length:
chunks.append(self.prefix(length))
self.forward(length)
ch = self.peek()
if not double and ch == '\'' and self.peek(1) == '\'':
chunks.append('\'')
self.forward(2)
elif (double and ch == '\'') or (not double and ch in '\"\\'):
chunks.append(ch)
self.forward()
elif double and ch == '\\':
self.forward()
ch = self.peek()
if ch in self.ESCAPE_REPLACEMENTS:
chunks.append(self.ESCAPE_REPLACEMENTS[ch])
self.forward()
elif ch in self.ESCAPE_CODES:
length = self.ESCAPE_CODES[ch]
self.forward()
for k in range(length):
if self.peek(k) not in '0123456789ABCDEFabcdef':
raise ScannerError("while scanning a double-quoted scalar", start_mark,
"expected escape sequence of %d hexadecimal numbers, but found %r" %
(length, self.peek(k)), self.get_mark())
code = int(self.prefix(length), 16)
chunks.append(chr(code))
self.forward(length)
elif ch in '\r\n\x85\u2028\u2029':
self.scan_line_break()
chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
else:
raise ScannerError("while scanning a double-quoted scalar", start_mark,
"found unknown escape character %r" % ch, self.get_mark())
else:
return chunks
def scan_flow_scalar_spaces(self, double, start_mark):
# See the specification for details.
chunks = []
length = 0
while self.peek(length) in ' \t':
length += 1
whitespaces = self.prefix(length)
self.forward(length)
ch = self.peek()
if ch == '\0':
raise ScannerError("while scanning a quoted scalar", start_mark,
"found unexpected end of stream", self.get_mark())
elif ch in '\r\n\x85\u2028\u2029':
line_break = self.scan_line_break()
breaks = self.scan_flow_scalar_breaks(double, start_mark)
if line_break != '\n':
chunks.append(line_break)
elif not breaks:
chunks.append(' ')
chunks.extend(breaks)
else:
chunks.append(whitespaces)
return chunks
def scan_flow_scalar_breaks(self, double, start_mark):
# See the specification for details.
chunks = []
while True:
# Instead of checking indentation, we check for document
# separators.
prefix = self.prefix(3)
if (prefix == '---' or prefix == '...') \
and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
raise ScannerError("while scanning a quoted scalar", start_mark,
"found unexpected document separator", self.get_mark())
while self.peek() in ' \t':
self.forward()
if self.peek() in '\r\n\x85\u2028\u2029':
chunks.append(self.scan_line_break())
else:
return chunks
def scan_plain(self):
# See the specification for details.
# We add an additional restriction for the flow context:
# plain scalars in the flow context cannot contain ',' or '?'.
# We also keep track of the `allow_simple_key` flag here.
# Indentation rules are loosed for the flow context.
chunks = []
start_mark = self.get_mark()
end_mark = start_mark
indent = self.indent+1
# We allow zero indentation for scalars, but then we need to check for
# document separators at the beginning of the line.
#if indent == 0:
# indent = 1
spaces = []
while True:
length = 0
if self.peek() == '#':
break
while True:
ch = self.peek(length)
if ch in '\0 \t\r\n\x85\u2028\u2029' \
or (ch == ':' and
self.peek(length+1) in '\0 \t\r\n\x85\u2028\u2029'
+ (u',[]{}' if self.flow_level else u''))\
or (self.flow_level and ch in ',?[]{}'):
break
length += 1
if length == 0:
break
self.allow_simple_key = False
chunks.extend(spaces)
chunks.append(self.prefix(length))
self.forward(length)
end_mark = self.get_mark()
spaces = self.scan_plain_spaces(indent, start_mark)
if not spaces or self.peek() == '#' \
or (not self.flow_level and self.column < indent):
break
return ScalarToken(''.join(chunks), True, start_mark, end_mark)
def scan_plain_spaces(self, indent, start_mark):
# See the specification for details.
# The specification is really confusing about tabs in plain scalars.
# We just forbid them completely. Do not use tabs in YAML!
chunks = []
length = 0
while self.peek(length) in ' ':
length += 1
whitespaces = self.prefix(length)
self.forward(length)
ch = self.peek()
if ch in '\r\n\x85\u2028\u2029':
line_break = self.scan_line_break()
self.allow_simple_key = True
prefix = self.prefix(3)
if (prefix == '---' or prefix == '...') \
and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
return
breaks = []
while self.peek() in ' \r\n\x85\u2028\u2029':
if self.peek() == ' ':
self.forward()
else:
breaks.append(self.scan_line_break())
prefix = self.prefix(3)
if (prefix == '---' or prefix == '...') \
and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
return
if line_break != '\n':
chunks.append(line_break)
elif not breaks:
chunks.append(' ')
chunks.extend(breaks)
elif whitespaces:
chunks.append(whitespaces)
return chunks
def scan_tag_handle(self, name, start_mark):
# See the specification for details.
# For some strange reasons, the specification does not allow '_' in
# tag handles. I have allowed it anyway.
ch = self.peek()
if ch != '!':
raise ScannerError("while scanning a %s" % name, start_mark,
"expected '!', but found %r" % ch, self.get_mark())
length = 1
ch = self.peek(length)
if ch != ' ':
while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-_':
length += 1
ch = self.peek(length)
if ch != '!':
self.forward(length)
raise ScannerError("while scanning a %s" % name, start_mark,
"expected '!', but found %r" % ch, self.get_mark())
length += 1
value = self.prefix(length)
self.forward(length)
return value
def scan_tag_uri(self, name, start_mark):
# See the specification for details.
# Note: we do not check if URI is well-formed.
chunks = []
length = 0
ch = self.peek(length)
while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-;/?:@&=+$,_.!~*\'()[]%':
if ch == '%':
chunks.append(self.prefix(length))
self.forward(length)
length = 0
chunks.append(self.scan_uri_escapes(name, start_mark))
else:
length += 1
ch = self.peek(length)
if length:
chunks.append(self.prefix(length))
self.forward(length)
length = 0
if not chunks:
raise ScannerError("while parsing a %s" % name, start_mark,
"expected URI, but found %r" % ch, self.get_mark())
return ''.join(chunks)
def scan_uri_escapes(self, name, start_mark):
# See the specification for details.
codes = []
mark = self.get_mark()
while self.peek() == '%':
self.forward()
for k in range(2):
if self.peek(k) not in '0123456789ABCDEFabcdef':
raise ScannerError("while scanning a %s" % name, start_mark,
"expected URI escape sequence of 2 hexadecimal numbers, but found %r"
% self.peek(k), self.get_mark())
codes.append(int(self.prefix(2), 16))
self.forward(2)
try:
value = bytes(codes).decode('utf-8')
except UnicodeDecodeError as exc:
raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
return value
def scan_line_break(self):
# Transforms:
# '\r\n' : '\n'
# '\r' : '\n'
# '\n' : '\n'
# '\x85' : '\n'
# '\u2028' : '\u2028'
# '\u2029 : '\u2029'
# default : ''
ch = self.peek()
if ch in '\r\n\x85':
if self.prefix(2) == '\r\n':
self.forward(2)
else:
self.forward()
return '\n'
elif ch in '\u2028\u2029':
self.forward()
return ch
return ''
```
|
==========================================================================================================
SOURCE CODE FILE: serializer.py
LINES: 1
SIZE: 4.07 KB
PATH: scripts\freecad_env\Lib\site-packages\yaml\serializer.py
ENCODING: utf-8
```py
__all__ = ['Serializer', 'SerializerError']
from .error import YAMLError
from .events import *
from .nodes import *
class SerializerError(YAMLError):
pass
class Serializer:
ANCHOR_TEMPLATE = 'id%03d'
def __init__(self, encoding=None,
explicit_start=None, explicit_end=None, version=None, tags=None):
self.use_encoding = encoding
self.use_explicit_start = explicit_start
self.use_explicit_end = explicit_end
self.use_version = version
self.use_tags = tags
self.serialized_nodes = {}
self.anchors = {}
self.last_anchor_id = 0
self.closed = None
def open(self):
if self.closed is None:
self.emit(StreamStartEvent(encoding=self.use_encoding))
self.closed = False
elif self.closed:
raise SerializerError("serializer is closed")
else:
raise SerializerError("serializer is already opened")
def close(self):
if self.closed is None:
raise SerializerError("serializer is not opened")
elif not self.closed:
self.emit(StreamEndEvent())
self.closed = True
#def __del__(self):
# self.close()
def serialize(self, node):
if self.closed is None:
raise SerializerError("serializer is not opened")
elif self.closed:
raise SerializerError("serializer is closed")
self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
version=self.use_version, tags=self.use_tags))
self.anchor_node(node)
self.serialize_node(node, None, None)
self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
self.serialized_nodes = {}
self.anchors = {}
self.last_anchor_id = 0
def anchor_node(self, node):
if node in self.anchors:
if self.anchors[node] is None:
self.anchors[node] = self.generate_anchor(node)
else:
self.anchors[node] = None
if isinstance(node, SequenceNode):
for item in node.value:
self.anchor_node(item)
elif isinstance(node, MappingNode):
for key, value in node.value:
self.anchor_node(key)
self.anchor_node(value)
def generate_anchor(self, node):
self.last_anchor_id += 1
return self.ANCHOR_TEMPLATE % self.last_anchor_id
def serialize_node(self, node, parent, index):
alias = self.anchors[node]
if node in self.serialized_nodes:
self.emit(AliasEvent(alias))
else:
self.serialized_nodes[node] = True
self.descend_resolver(parent, index)
if isinstance(node, ScalarNode):
detected_tag = self.resolve(ScalarNode, node.value, (True, False))
default_tag = self.resolve(ScalarNode, node.value, (False, True))
implicit = (node.tag == detected_tag), (node.tag == default_tag)
self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
style=node.style))
elif isinstance(node, SequenceNode):
implicit = (node.tag
== self.resolve(SequenceNode, node.value, True))
self.emit(SequenceStartEvent(alias, node.tag, implicit,
flow_style=node.flow_style))
index = 0
for item in node.value:
self.serialize_node(item, node, index)
index += 1
self.emit(SequenceEndEvent())
elif isinstance(node, MappingNode):
implicit = (node.tag
== self.resolve(MappingNode, node.value, True))
self.emit(MappingStartEvent(alias, node.tag, implicit,
flow_style=node.flow_style))
for key, value in node.value:
self.serialize_node(key, node, None)
self.serialize_node(value, node, key)
self.emit(MappingEndEvent())
self.ascend_resolver()
```
|
======================================================================================================
SOURCE CODE FILE: tokens.py
LINES: 1
SIZE: 2.51 KB
PATH: scripts\freecad_env\Lib\site-packages\yaml\tokens.py
ENCODING: utf-8
```py
class Token(object):
def __init__(self, start_mark, end_mark):
self.start_mark = start_mark
self.end_mark = end_mark
def __repr__(self):
attributes = [key for key in self.__dict__
if not key.endswith('_mark')]
attributes.sort()
arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
for key in attributes])
return '%s(%s)' % (self.__class__.__name__, arguments)
#class BOMToken(Token):
# id = '<byte order mark>'
class DirectiveToken(Token):
id = '<directive>'
def __init__(self, name, value, start_mark, end_mark):
self.name = name
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
class DocumentStartToken(Token):
id = '<document start>'
class DocumentEndToken(Token):
id = '<document end>'
class StreamStartToken(Token):
id = '<stream start>'
def __init__(self, start_mark=None, end_mark=None,
encoding=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.encoding = encoding
class StreamEndToken(Token):
id = '<stream end>'
class BlockSequenceStartToken(Token):
id = '<block sequence start>'
class BlockMappingStartToken(Token):
id = '<block mapping start>'
class BlockEndToken(Token):
id = '<block end>'
class FlowSequenceStartToken(Token):
id = '['
class FlowMappingStartToken(Token):
id = '{'
class FlowSequenceEndToken(Token):
id = ']'
class FlowMappingEndToken(Token):
id = '}'
class KeyToken(Token):
id = '?'
class ValueToken(Token):
id = ':'
class BlockEntryToken(Token):
id = '-'
class FlowEntryToken(Token):
id = ','
class AliasToken(Token):
id = '<alias>'
def __init__(self, value, start_mark, end_mark):
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
class AnchorToken(Token):
id = '<anchor>'
def __init__(self, value, start_mark, end_mark):
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
class TagToken(Token):
id = '<tag>'
def __init__(self, value, start_mark, end_mark):
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
class ScalarToken(Token):
id = '<scalar>'
def __init__(self, value, plain, start_mark, end_mark, style=None):
self.value = value
self.plain = plain
self.start_mark = start_mark
self.end_mark = end_mark
self.style = style
```
|
========================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.27 KB
PATH: scripts\freecad_env\Lib\site-packages\yarl\__init__.py
ENCODING: utf-8
```py
from ._query import Query, QueryVariable, SimpleQuery
from ._url import URL, cache_clear, cache_configure, cache_info
__version__ = "1.20.0"
__all__ = (
"URL",
"SimpleQuery",
"QueryVariable",
"Query",
"cache_clear",
"cache_configure",
"cache_info",
)
```
|
======================================================================================================
SOURCE CODE FILE: _parse.py
LINES: 3
SIZE: 6.99 KB
PATH: scripts\freecad_env\Lib\site-packages\yarl\_parse.py
ENCODING: utf-8
```py
"""URL parsing utilities."""
import re
import unicodedata
from functools import lru_cache
from typing import Union
from urllib.parse import scheme_chars, uses_netloc
from ._quoters import QUOTER, UNQUOTER_PLUS
# Leading and trailing C0 control and space to be stripped per WHATWG spec.
# == "".join([chr(i) for i in range(0, 0x20 + 1)])
WHATWG_C0_CONTROL_OR_SPACE = (
"\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10"
"\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f "
)
# Unsafe bytes to be removed per WHATWG spec
UNSAFE_URL_BYTES_TO_REMOVE = ["\t", "\r", "\n"]
USES_AUTHORITY = frozenset(uses_netloc)
SplitURLType = tuple[str, str, str, str, str]
def split_url(url: str) -> SplitURLType:
"""Split URL into parts."""
# Adapted from urllib.parse.urlsplit
# Only lstrip url as some applications rely on preserving trailing space.
# (https://url.spec.whatwg.org/#concept-basic-url-parser would strip both)
url = url.lstrip(WHATWG_C0_CONTROL_OR_SPACE)
for b in UNSAFE_URL_BYTES_TO_REMOVE:
if b in url:
url = url.replace(b, "")
scheme = netloc = query = fragment = ""
i = url.find(":")
if i > 0 and url[0] in scheme_chars:
for c in url[1:i]:
if c not in scheme_chars:
break
else:
scheme, url = url[:i].lower(), url[i + 1 :]
has_hash = "#" in url
has_question_mark = "?" in url
if url[:2] == "//":
delim = len(url) # position of end of domain part of url, default is end
if has_hash and has_question_mark:
delim_chars = "/?#"
elif has_question_mark:
delim_chars = "/?"
elif has_hash:
delim_chars = "/#"
else:
delim_chars = "/"
for c in delim_chars: # look for delimiters; the order is NOT important
wdelim = url.find(c, 2) # find first of this delim
if wdelim >= 0 and wdelim < delim: # if found
delim = wdelim # use earliest delim position
netloc = url[2:delim]
url = url[delim:]
has_left_bracket = "[" in netloc
has_right_bracket = "]" in netloc
if (has_left_bracket and not has_right_bracket) or (
has_right_bracket and not has_left_bracket
):
raise ValueError("Invalid IPv6 URL")
if has_left_bracket:
bracketed_host = netloc.partition("[")[2].partition("]")[0]
# Valid bracketed hosts are defined in
# https://www.rfc-editor.org/rfc/rfc3986#page-49
# https://url.spec.whatwg.org/
if bracketed_host[0] == "v":
if not re.match(r"\Av[a-fA-F0-9]+\..+\Z", bracketed_host):
raise ValueError("IPvFuture address is invalid")
elif ":" not in bracketed_host:
raise ValueError("An IPv4 address cannot be in brackets")
if has_hash:
url, _, fragment = url.partition("#")
if has_question_mark:
url, _, query = url.partition("?")
if netloc and not netloc.isascii():
_check_netloc(netloc)
return scheme, netloc, url, query, fragment
def _check_netloc(netloc: str) -> None:
# Adapted from urllib.parse._checknetloc
# looking for characters like \u2100 that expand to 'a/c'
# IDNA uses NFKC equivalence, so normalize for this check
# ignore characters already included
# but not the surrounding text
n = netloc.replace("@", "").replace(":", "").replace("#", "").replace("?", "")
normalized_netloc = unicodedata.normalize("NFKC", n)
if n == normalized_netloc:
return
# Note that there are no unicode decompositions for the character '@' so
# its currently impossible to have test coverage for this branch, however if the
# one should be added in the future we want to make sure its still checked.
for c in "/?#@:": # pragma: no branch
if c in normalized_netloc:
raise ValueError(
f"netloc '{netloc}' contains invalid "
"characters under NFKC normalization"
)
@lru_cache # match the same size as urlsplit
def split_netloc(
netloc: str,
) -> tuple[Union[str, None], Union[str, None], Union[str, None], Union[int, None]]:
"""Split netloc into username, password, host and port."""
if "@" not in netloc:
username: Union[str, None] = None
password: Union[str, None] = None
hostinfo = netloc
else:
userinfo, _, hostinfo = netloc.rpartition("@")
username, have_password, password = userinfo.partition(":")
if not have_password:
password = None
if "[" in hostinfo:
_, _, bracketed = hostinfo.partition("[")
hostname, _, port_str = bracketed.partition("]")
_, _, port_str = port_str.partition(":")
else:
hostname, _, port_str = hostinfo.partition(":")
if not port_str:
return username or None, password, hostname or None, None
try:
port = int(port_str)
except ValueError:
raise ValueError("Invalid URL: port can't be converted to integer")
if not (0 <= port <= 65535):
raise ValueError("Port out of range 0-65535")
return username or None, password, hostname or None, port
def unsplit_result(
scheme: str, netloc: str, url: str, query: str, fragment: str
) -> str:
"""Unsplit a URL without any normalization."""
if netloc or (scheme and scheme in USES_AUTHORITY) or url[:2] == "//":
if url and url[:1] != "/":
url = f"{scheme}://{netloc}/{url}" if scheme else f"{scheme}:{url}"
else:
url = f"{scheme}://{netloc}{url}" if scheme else f"//{netloc}{url}"
elif scheme:
url = f"{scheme}:{url}"
if query:
url = f"{url}?{query}"
return f"{url}#{fragment}" if fragment else url
@lru_cache # match the same size as urlsplit
def make_netloc(
user: Union[str, None],
password: Union[str, None],
host: Union[str, None],
port: Union[int, None],
encode: bool = False,
) -> str:
"""Make netloc from parts.
The user and password are encoded if encode is True.
The host must already be encoded with _encode_host.
"""
if host is None:
return ""
ret = host
if port is not None:
ret = f"{ret}:{port}"
if user is None and password is None:
return ret
if password is not None:
if not user:
user = ""
elif encode:
user = QUOTER(user)
if encode:
password = QUOTER(password)
user = f"{user}:{password}"
elif user and encode:
user = QUOTER(user)
return f"{user}@{ret}" if user else ret
def query_to_pairs(query_string: str) -> list[tuple[str, str]]:
"""Parse a query given as a string argument.
Works like urllib.parse.parse_qsl with keep empty values.
"""
pairs: list[tuple[str, str]] = []
if not query_string:
return pairs
for k_v in query_string.split("&"):
k, _, v = k_v.partition("=")
pairs.append((UNQUOTER_PLUS(k), UNQUOTER_PLUS(v)))
return pairs
```
|
=====================================================================================================
SOURCE CODE FILE: _path.py
LINES: 1
SIZE: 1.26 KB
PATH: scripts\freecad_env\Lib\site-packages\yarl\_path.py
ENCODING: utf-8
```py
"""Utilities for working with paths."""
from collections.abc import Sequence
from contextlib import suppress
def normalize_path_segments(segments: Sequence[str]) -> list[str]:
"""Drop '.' and '..' from a sequence of str segments"""
resolved_path: list[str] = []
for seg in segments:
if seg == "..":
# ignore any .. segments that would otherwise cause an
# IndexError when popped from resolved_path if
# resolving for rfc3986
with suppress(IndexError):
resolved_path.pop()
elif seg != ".":
resolved_path.append(seg)
if segments and segments[-1] in (".", ".."):
# do some post-processing here.
# if the last segment was a relative dir,
# then we need to append the trailing '/'
resolved_path.append("")
return resolved_path
def normalize_path(path: str) -> str:
# Drop '.' and '..' from str path
prefix = ""
if path and path[0] == "/":
# preserve the "/" root element of absolute paths, copying it to the
# normalised output as per sections 5.2.4 and 6.2.2.3 of rfc3986.
prefix = "/"
path = path[1:]
segments = path.split("/")
return prefix + "/".join(normalize_path_segments(segments))
```
|
======================================================================================================
SOURCE CODE FILE: _query.py
LINES: 1
SIZE: 3.79 KB
PATH: scripts\freecad_env\Lib\site-packages\yarl\_query.py
ENCODING: utf-8
```py
"""Query string handling."""
import math
from collections.abc import Iterable, Mapping, Sequence
from typing import Any, SupportsInt, Union
from multidict import istr
from ._quoters import QUERY_PART_QUOTER, QUERY_QUOTER
SimpleQuery = Union[str, SupportsInt, float]
QueryVariable = Union[SimpleQuery, Sequence[SimpleQuery]]
Query = Union[
None, str, Mapping[str, QueryVariable], Sequence[tuple[str, QueryVariable]]
]
def query_var(v: SimpleQuery) -> str:
"""Convert a query variable to a string."""
cls = type(v)
if cls is int: # Fast path for non-subclassed int
return str(v)
if isinstance(v, str):
return v
if isinstance(v, float):
if math.isinf(v):
raise ValueError("float('inf') is not supported")
if math.isnan(v):
raise ValueError("float('nan') is not supported")
return str(float(v))
if cls is not bool and isinstance(v, SupportsInt):
return str(int(v))
raise TypeError(
"Invalid variable type: value "
"should be str, int or float, got {!r} "
"of type {}".format(v, cls)
)
def get_str_query_from_sequence_iterable(
items: Iterable[tuple[Union[str, istr], QueryVariable]],
) -> str:
"""Return a query string from a sequence of (key, value) pairs.
value is a single value or a sequence of values for the key
The sequence of values must be a list or tuple.
"""
quoter = QUERY_PART_QUOTER
pairs = [
f"{quoter(k)}={quoter(v if type(v) is str else query_var(v))}"
for k, val in items
for v in (
val if type(val) is not str and isinstance(val, (list, tuple)) else (val,)
)
]
return "&".join(pairs)
def get_str_query_from_iterable(
items: Iterable[tuple[Union[str, istr], SimpleQuery]],
) -> str:
"""Return a query string from an iterable.
The iterable must contain (key, value) pairs.
The values are not allowed to be sequences, only single values are
allowed. For sequences, use `_get_str_query_from_sequence_iterable`.
"""
quoter = QUERY_PART_QUOTER
# A listcomp is used since listcomps are inlined on CPython 3.12+ and
# they are a bit faster than a generator expression.
pairs = [
f"{quoter(k)}={quoter(v if type(v) is str else query_var(v))}" for k, v in items
]
return "&".join(pairs)
def get_str_query(*args: Any, **kwargs: Any) -> Union[str, None]:
"""Return a query string from supported args."""
query: Union[str, Mapping[str, QueryVariable], None]
if kwargs:
if args:
msg = "Either kwargs or single query parameter must be present"
raise ValueError(msg)
query = kwargs
elif len(args) == 1:
query = args[0]
else:
raise ValueError("Either kwargs or single query parameter must be present")
if query is None:
return None
if not query:
return ""
if type(query) is dict:
return get_str_query_from_sequence_iterable(query.items())
if type(query) is str or isinstance(query, str):
return QUERY_QUOTER(query)
if isinstance(query, Mapping):
return get_str_query_from_sequence_iterable(query.items())
if isinstance(query, (bytes, bytearray, memoryview)): # type: ignore[unreachable]
msg = "Invalid query type: bytes, bytearray and memoryview are forbidden"
raise TypeError(msg)
if isinstance(query, Sequence):
# We don't expect sequence values if we're given a list of pairs
# already; only mappings like builtin `dict` which can't have the
# same key pointing to multiple values are allowed to use
# `_query_seq_pairs`.
return get_str_query_from_iterable(query)
raise TypeError(
"Invalid query type: only str, mapping or "
"sequence of (key, value) pairs is allowed"
)
```
|
========================================================================================================
SOURCE CODE FILE: _quoters.py
LINES: 1
SIZE: 1.13 KB
PATH: scripts\freecad_env\Lib\site-packages\yarl\_quoters.py
ENCODING: utf-8
```py
"""Quoting and unquoting utilities for URL parts."""
from typing import Union
from urllib.parse import quote
from ._quoting import _Quoter, _Unquoter
QUOTER = _Quoter(requote=False)
REQUOTER = _Quoter()
PATH_QUOTER = _Quoter(safe="@:", protected="/+", requote=False)
PATH_REQUOTER = _Quoter(safe="@:", protected="/+")
QUERY_QUOTER = _Quoter(safe="?/:@", protected="=+&;", qs=True, requote=False)
QUERY_REQUOTER = _Quoter(safe="?/:@", protected="=+&;", qs=True)
QUERY_PART_QUOTER = _Quoter(safe="?/:@", qs=True, requote=False)
FRAGMENT_QUOTER = _Quoter(safe="?/:@", requote=False)
FRAGMENT_REQUOTER = _Quoter(safe="?/:@")
UNQUOTER = _Unquoter()
PATH_UNQUOTER = _Unquoter(unsafe="+")
PATH_SAFE_UNQUOTER = _Unquoter(ignore="/%", unsafe="+")
QS_UNQUOTER = _Unquoter(qs=True)
UNQUOTER_PLUS = _Unquoter(plus=True) # to match urllib.parse.unquote_plus
def human_quote(s: Union[str, None], unsafe: str) -> Union[str, None]:
if not s:
return s
for c in "%" + unsafe:
if c in s:
s = s.replace(c, f"%{ord(c):02X}")
if s.isprintable():
return s
return "".join(c if c.isprintable() else quote(c) for c in s)
```
|
========================================================================================================
SOURCE CODE FILE: _quoting.py
LINES: 1
SIZE: 0.49 KB
PATH: scripts\freecad_env\Lib\site-packages\yarl\_quoting.py
ENCODING: utf-8
```py
import os
import sys
from typing import TYPE_CHECKING
__all__ = ("_Quoter", "_Unquoter")
NO_EXTENSIONS = bool(os.environ.get("YARL_NO_EXTENSIONS")) # type: bool
if sys.implementation.name != "cpython":
NO_EXTENSIONS = True
if TYPE_CHECKING or NO_EXTENSIONS:
from ._quoting_py import _Quoter, _Unquoter
else:
try:
from ._quoting_c import _Quoter, _Unquoter
except ImportError: # pragma: no cover
from ._quoting_py import _Quoter, _Unquoter # type: ignore[assignment]
```
|
===========================================================================================================
SOURCE CODE FILE: _quoting_py.py
LINES: 1
SIZE: 6.67 KB
PATH: scripts\freecad_env\Lib\site-packages\yarl\_quoting_py.py
ENCODING: utf-8
```py
import codecs
import re
from string import ascii_letters, ascii_lowercase, digits
from typing import Union, cast, overload
BASCII_LOWERCASE = ascii_lowercase.encode("ascii")
BPCT_ALLOWED = {f"%{i:02X}".encode("ascii") for i in range(256)}
GEN_DELIMS = ":/?#[]@"
SUB_DELIMS_WITHOUT_QS = "!$'()*,"
SUB_DELIMS = SUB_DELIMS_WITHOUT_QS + "+&=;"
RESERVED = GEN_DELIMS + SUB_DELIMS
UNRESERVED = ascii_letters + digits + "-._~"
ALLOWED = UNRESERVED + SUB_DELIMS_WITHOUT_QS
_IS_HEX = re.compile(b"[A-Z0-9][A-Z0-9]")
_IS_HEX_STR = re.compile("[A-Fa-f0-9][A-Fa-f0-9]")
utf8_decoder = codecs.getincrementaldecoder("utf-8")
class _Quoter:
def __init__(
self,
*,
safe: str = "",
protected: str = "",
qs: bool = False,
requote: bool = True,
) -> None:
self._safe = safe
self._protected = protected
self._qs = qs
self._requote = requote
@overload
def __call__(self, val: str) -> str: ...
@overload
def __call__(self, val: None) -> None: ...
def __call__(self, val: Union[str, None]) -> Union[str, None]:
if val is None:
return None
if not isinstance(val, str):
raise TypeError("Argument should be str")
if not val:
return ""
bval = val.encode("utf8", errors="ignore")
ret = bytearray()
pct = bytearray()
safe = self._safe
safe += ALLOWED
if not self._qs:
safe += "+&=;"
safe += self._protected
bsafe = safe.encode("ascii")
idx = 0
while idx < len(bval):
ch = bval[idx]
idx += 1
if pct:
if ch in BASCII_LOWERCASE:
ch = ch - 32 # convert to uppercase
pct.append(ch)
if len(pct) == 3: # pragma: no branch # peephole optimizer
buf = pct[1:]
if not _IS_HEX.match(buf):
ret.extend(b"%25")
pct.clear()
idx -= 2
continue
try:
unquoted = chr(int(pct[1:].decode("ascii"), base=16))
except ValueError:
ret.extend(b"%25")
pct.clear()
idx -= 2
continue
if unquoted in self._protected:
ret.extend(pct)
elif unquoted in safe:
ret.append(ord(unquoted))
else:
ret.extend(pct)
pct.clear()
# special case, if we have only one char after "%"
elif len(pct) == 2 and idx == len(bval):
ret.extend(b"%25")
pct.clear()
idx -= 1
continue
elif ch == ord("%") and self._requote:
pct.clear()
pct.append(ch)
# special case if "%" is last char
if idx == len(bval):
ret.extend(b"%25")
continue
if self._qs and ch == ord(" "):
ret.append(ord("+"))
continue
if ch in bsafe:
ret.append(ch)
continue
ret.extend((f"%{ch:02X}").encode("ascii"))
ret2 = ret.decode("ascii")
if ret2 == val:
return val
return ret2
class _Unquoter:
def __init__(
self,
*,
ignore: str = "",
unsafe: str = "",
qs: bool = False,
plus: bool = False,
) -> None:
self._ignore = ignore
self._unsafe = unsafe
self._qs = qs
self._plus = plus # to match urllib.parse.unquote_plus
self._quoter = _Quoter()
self._qs_quoter = _Quoter(qs=True)
@overload
def __call__(self, val: str) -> str: ...
@overload
def __call__(self, val: None) -> None: ...
def __call__(self, val: Union[str, None]) -> Union[str, None]:
if val is None:
return None
if not isinstance(val, str):
raise TypeError("Argument should be str")
if not val:
return ""
decoder = cast(codecs.BufferedIncrementalDecoder, utf8_decoder())
ret = []
idx = 0
while idx < len(val):
ch = val[idx]
idx += 1
if ch == "%" and idx <= len(val) - 2:
pct = val[idx : idx + 2]
if _IS_HEX_STR.fullmatch(pct):
b = bytes([int(pct, base=16)])
idx += 2
try:
unquoted = decoder.decode(b)
except UnicodeDecodeError:
start_pct = idx - 3 - len(decoder.buffer) * 3
ret.append(val[start_pct : idx - 3])
decoder.reset()
try:
unquoted = decoder.decode(b)
except UnicodeDecodeError:
ret.append(val[idx - 3 : idx])
continue
if not unquoted:
continue
if self._qs and unquoted in "+=&;":
to_add = self._qs_quoter(unquoted)
if to_add is None: # pragma: no cover
raise RuntimeError("Cannot quote None")
ret.append(to_add)
elif unquoted in self._unsafe or unquoted in self._ignore:
to_add = self._quoter(unquoted)
if to_add is None: # pragma: no cover
raise RuntimeError("Cannot quote None")
ret.append(to_add)
else:
ret.append(unquoted)
continue
if decoder.buffer:
start_pct = idx - 1 - len(decoder.buffer) * 3
ret.append(val[start_pct : idx - 1])
decoder.reset()
if ch == "+":
if (not self._qs and not self._plus) or ch in self._unsafe:
ret.append("+")
else:
ret.append(" ")
continue
if ch in self._unsafe:
ret.append("%")
h = hex(ord(ch)).upper()[2:]
for ch in h:
ret.append(ch)
continue
ret.append(ch)
if decoder.buffer:
ret.append(val[-len(decoder.buffer) * 3 :])
ret2 = "".join(ret)
if ret2 == val:
return val
return ret2
```
|
====================================================================================================
SOURCE CODE FILE: _url.py
LINES: 1
SIZE: 54.03 KB
PATH: scripts\freecad_env\Lib\site-packages\yarl\_url.py
ENCODING: utf-8
```py
import re
import sys
import warnings
from collections.abc import Mapping, Sequence
from enum import Enum
from functools import _CacheInfo, lru_cache
from ipaddress import ip_address
from typing import TYPE_CHECKING, Any, NoReturn, TypedDict, TypeVar, Union, overload
from urllib.parse import SplitResult, uses_relative
import idna
from multidict import MultiDict, MultiDictProxy
from propcache.api import under_cached_property as cached_property
from ._parse import (
USES_AUTHORITY,
SplitURLType,
make_netloc,
query_to_pairs,
split_netloc,
split_url,
unsplit_result,
)
from ._path import normalize_path, normalize_path_segments
from ._query import (
Query,
QueryVariable,
SimpleQuery,
get_str_query,
get_str_query_from_iterable,
get_str_query_from_sequence_iterable,
)
from ._quoters import (
FRAGMENT_QUOTER,
FRAGMENT_REQUOTER,
PATH_QUOTER,
PATH_REQUOTER,
PATH_SAFE_UNQUOTER,
PATH_UNQUOTER,
QS_UNQUOTER,
QUERY_QUOTER,
QUERY_REQUOTER,
QUOTER,
REQUOTER,
UNQUOTER,
human_quote,
)
DEFAULT_PORTS = {"http": 80, "https": 443, "ws": 80, "wss": 443, "ftp": 21}
USES_RELATIVE = frozenset(uses_relative)
# Special schemes https://url.spec.whatwg.org/#special-scheme
# are not allowed to have an empty host https://url.spec.whatwg.org/#url-representation
SCHEME_REQUIRES_HOST = frozenset(("http", "https", "ws", "wss", "ftp"))
# reg-name: unreserved / pct-encoded / sub-delims
# this pattern matches anything that is *not* in those classes. and is only used
# on lower-cased ASCII values.
NOT_REG_NAME = re.compile(
r"""
# any character not in the unreserved or sub-delims sets, plus %
# (validated with the additional check for pct-encoded sequences below)
[^a-z0-9\-._~!$&'()*+,;=%]
|
# % only allowed if it is part of a pct-encoded
# sequence of 2 hex digits.
%(?![0-9a-f]{2})
""",
re.VERBOSE,
)
_T = TypeVar("_T")
if sys.version_info >= (3, 11):
from typing import Self
else:
Self = Any
class UndefinedType(Enum):
"""Singleton type for use with not set sentinel values."""
_singleton = 0
UNDEFINED = UndefinedType._singleton
class CacheInfo(TypedDict):
"""Host encoding cache."""
idna_encode: _CacheInfo
idna_decode: _CacheInfo
ip_address: _CacheInfo
host_validate: _CacheInfo
encode_host: _CacheInfo
class _InternalURLCache(TypedDict, total=False):
_val: SplitURLType
_origin: "URL"
absolute: bool
hash: int
scheme: str
raw_authority: str
authority: str
raw_user: Union[str, None]
user: Union[str, None]
raw_password: Union[str, None]
password: Union[str, None]
raw_host: Union[str, None]
host: Union[str, None]
host_subcomponent: Union[str, None]
host_port_subcomponent: Union[str, None]
port: Union[int, None]
explicit_port: Union[int, None]
raw_path: str
path: str
_parsed_query: list[tuple[str, str]]
query: "MultiDictProxy[str]"
raw_query_string: str
query_string: str
path_qs: str
raw_path_qs: str
raw_fragment: str
fragment: str
raw_parts: tuple[str, ...]
parts: tuple[str, ...]
parent: "URL"
raw_name: str
name: str
raw_suffix: str
suffix: str
raw_suffixes: tuple[str, ...]
suffixes: tuple[str, ...]
def rewrite_module(obj: _T) -> _T:
obj.__module__ = "yarl"
return obj
@lru_cache
def encode_url(url_str: str) -> "URL":
"""Parse unencoded URL."""
cache: _InternalURLCache = {}
host: Union[str, None]
scheme, netloc, path, query, fragment = split_url(url_str)
if not netloc: # netloc
host = ""
else:
if ":" in netloc or "@" in netloc or "[" in netloc:
# Complex netloc
username, password, host, port = split_netloc(netloc)
else:
username = password = port = None
host = netloc
if host is None:
if scheme in SCHEME_REQUIRES_HOST:
msg = (
"Invalid URL: host is required for "
f"absolute urls with the {scheme} scheme"
)
raise ValueError(msg)
else:
host = ""
host = _encode_host(host, validate_host=False)
# Remove brackets as host encoder adds back brackets for IPv6 addresses
cache["raw_host"] = host[1:-1] if "[" in host else host
cache["explicit_port"] = port
if password is None and username is None:
# Fast path for URLs without user, password
netloc = host if port is None else f"{host}:{port}"
cache["raw_user"] = None
cache["raw_password"] = None
else:
raw_user = REQUOTER(username) if username else username
raw_password = REQUOTER(password) if password else password
netloc = make_netloc(raw_user, raw_password, host, port)
cache["raw_user"] = raw_user
cache["raw_password"] = raw_password
if path:
path = PATH_REQUOTER(path)
if netloc and "." in path:
path = normalize_path(path)
if query:
query = QUERY_REQUOTER(query)
if fragment:
fragment = FRAGMENT_REQUOTER(fragment)
cache["scheme"] = scheme
cache["raw_path"] = "/" if not path and netloc else path
cache["raw_query_string"] = query
cache["raw_fragment"] = fragment
self = object.__new__(URL)
self._scheme = scheme
self._netloc = netloc
self._path = path
self._query = query
self._fragment = fragment
self._cache = cache
return self
@lru_cache
def pre_encoded_url(url_str: str) -> "URL":
"""Parse pre-encoded URL."""
self = object.__new__(URL)
val = split_url(url_str)
self._scheme, self._netloc, self._path, self._query, self._fragment = val
self._cache = {}
return self
@lru_cache
def build_pre_encoded_url(
scheme: str,
authority: str,
user: Union[str, None],
password: Union[str, None],
host: str,
port: Union[int, None],
path: str,
query_string: str,
fragment: str,
) -> "URL":
"""Build a pre-encoded URL from parts."""
self = object.__new__(URL)
self._scheme = scheme
if authority:
self._netloc = authority
elif host:
if port is not None:
port = None if port == DEFAULT_PORTS.get(scheme) else port
if user is None and password is None:
self._netloc = host if port is None else f"{host}:{port}"
else:
self._netloc = make_netloc(user, password, host, port)
else:
self._netloc = ""
self._path = path
self._query = query_string
self._fragment = fragment
self._cache = {}
return self
def from_parts_uncached(
scheme: str, netloc: str, path: str, query: str, fragment: str
) -> "URL":
"""Create a new URL from parts."""
self = object.__new__(URL)
self._scheme = scheme
self._netloc = netloc
self._path = path
self._query = query
self._fragment = fragment
self._cache = {}
return self
from_parts = lru_cache(from_parts_uncached)
@rewrite_module
class URL:
# Don't derive from str
# follow pathlib.Path design
# probably URL will not suffer from pathlib problems:
# it's intended for libraries like aiohttp,
# not to be passed into standard library functions like os.open etc.
# URL grammar (RFC 3986)
# pct-encoded = "%" HEXDIG HEXDIG
# reserved = gen-delims / sub-delims
# gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
# URI = scheme ":" hier-part [ "?" query ] [ "#" fragment ]
# hier-part = "//" authority path-abempty
# / path-absolute
# / path-rootless
# / path-empty
# scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." )
# authority = [ userinfo "@" ] host [ ":" port ]
# userinfo = *( unreserved / pct-encoded / sub-delims / ":" )
# host = IP-literal / IPv4address / reg-name
# IP-literal = "[" ( IPv6address / IPvFuture ) "]"
# IPvFuture = "v" 1*HEXDIG "." 1*( unreserved / sub-delims / ":" )
# IPv6address = 6( h16 ":" ) ls32
# / "::" 5( h16 ":" ) ls32
# / [ h16 ] "::" 4( h16 ":" ) ls32
# / [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32
# / [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32
# / [ *3( h16 ":" ) h16 ] "::" h16 ":" ls32
# / [ *4( h16 ":" ) h16 ] "::" ls32
# / [ *5( h16 ":" ) h16 ] "::" h16
# / [ *6( h16 ":" ) h16 ] "::"
# ls32 = ( h16 ":" h16 ) / IPv4address
# ; least-significant 32 bits of address
# h16 = 1*4HEXDIG
# ; 16 bits of address represented in hexadecimal
# IPv4address = dec-octet "." dec-octet "." dec-octet "." dec-octet
# dec-octet = DIGIT ; 0-9
# / %x31-39 DIGIT ; 10-99
# / "1" 2DIGIT ; 100-199
# / "2" %x30-34 DIGIT ; 200-249
# / "25" %x30-35 ; 250-255
# reg-name = *( unreserved / pct-encoded / sub-delims )
# port = *DIGIT
# path = path-abempty ; begins with "/" or is empty
# / path-absolute ; begins with "/" but not "//"
# / path-noscheme ; begins with a non-colon segment
# / path-rootless ; begins with a segment
# / path-empty ; zero characters
# path-abempty = *( "/" segment )
# path-absolute = "/" [ segment-nz *( "/" segment ) ]
# path-noscheme = segment-nz-nc *( "/" segment )
# path-rootless = segment-nz *( "/" segment )
# path-empty = 0<pchar>
# segment = *pchar
# segment-nz = 1*pchar
# segment-nz-nc = 1*( unreserved / pct-encoded / sub-delims / "@" )
# ; non-zero-length segment without any colon ":"
# pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
# query = *( pchar / "/" / "?" )
# fragment = *( pchar / "/" / "?" )
# URI-reference = URI / relative-ref
# relative-ref = relative-part [ "?" query ] [ "#" fragment ]
# relative-part = "//" authority path-abempty
# / path-absolute
# / path-noscheme
# / path-empty
# absolute-URI = scheme ":" hier-part [ "?" query ]
__slots__ = ("_cache", "_scheme", "_netloc", "_path", "_query", "_fragment")
_cache: _InternalURLCache
_scheme: str
_netloc: str
_path: str
_query: str
_fragment: str
def __new__(
cls,
val: Union[str, SplitResult, "URL", UndefinedType] = UNDEFINED,
*,
encoded: bool = False,
strict: Union[bool, None] = None,
) -> "URL":
if strict is not None: # pragma: no cover
warnings.warn("strict parameter is ignored")
if type(val) is str:
return pre_encoded_url(val) if encoded else encode_url(val)
if type(val) is cls:
return val
if type(val) is SplitResult:
if not encoded:
raise ValueError("Cannot apply decoding to SplitResult")
return from_parts(*val)
if isinstance(val, str):
return pre_encoded_url(str(val)) if encoded else encode_url(str(val))
if val is UNDEFINED:
# Special case for UNDEFINED since it might be unpickling and we do
# not want to cache as the `__set_state__` call would mutate the URL
# object in the `pre_encoded_url` or `encoded_url` caches.
self = object.__new__(URL)
self._scheme = self._netloc = self._path = self._query = self._fragment = ""
self._cache = {}
return self
raise TypeError("Constructor parameter should be str")
@classmethod
def build(
cls,
*,
scheme: str = "",
authority: str = "",
user: Union[str, None] = None,
password: Union[str, None] = None,
host: str = "",
port: Union[int, None] = None,
path: str = "",
query: Union[Query, None] = None,
query_string: str = "",
fragment: str = "",
encoded: bool = False,
) -> "URL":
"""Creates and returns a new URL"""
if authority and (user or password or host or port):
raise ValueError(
'Can\'t mix "authority" with "user", "password", "host" or "port".'
)
if port is not None and not isinstance(port, int):
raise TypeError(f"The port is required to be int, got {type(port)!r}.")
if port and not host:
raise ValueError('Can\'t build URL with "port" but without "host".')
if query and query_string:
raise ValueError('Only one of "query" or "query_string" should be passed')
if (
scheme is None # type: ignore[redundant-expr]
or authority is None # type: ignore[redundant-expr]
or host is None # type: ignore[redundant-expr]
or path is None # type: ignore[redundant-expr]
or query_string is None # type: ignore[redundant-expr]
or fragment is None
):
raise TypeError(
'NoneType is illegal for "scheme", "authority", "host", "path", '
'"query_string", and "fragment" args, use empty string instead.'
)
if query:
query_string = get_str_query(query) or ""
if encoded:
return build_pre_encoded_url(
scheme,
authority,
user,
password,
host,
port,
path,
query_string,
fragment,
)
self = object.__new__(URL)
self._scheme = scheme
_host: Union[str, None] = None
if authority:
user, password, _host, port = split_netloc(authority)
_host = _encode_host(_host, validate_host=False) if _host else ""
elif host:
_host = _encode_host(host, validate_host=True)
else:
self._netloc = ""
if _host is not None:
if port is not None:
port = None if port == DEFAULT_PORTS.get(scheme) else port
if user is None and password is None:
self._netloc = _host if port is None else f"{_host}:{port}"
else:
self._netloc = make_netloc(user, password, _host, port, True)
path = PATH_QUOTER(path) if path else path
if path and self._netloc:
if "." in path:
path = normalize_path(path)
if path[0] != "/":
msg = (
"Path in a URL with authority should "
"start with a slash ('/') if set"
)
raise ValueError(msg)
self._path = path
if not query and query_string:
query_string = QUERY_QUOTER(query_string)
self._query = query_string
self._fragment = FRAGMENT_QUOTER(fragment) if fragment else fragment
self._cache = {}
return self
def __init_subclass__(cls) -> NoReturn:
raise TypeError(f"Inheriting a class {cls!r} from URL is forbidden")
def __str__(self) -> str:
if not self._path and self._netloc and (self._query or self._fragment):
path = "/"
else:
path = self._path
if (port := self.explicit_port) is not None and port == DEFAULT_PORTS.get(
self._scheme
):
# port normalization - using None for default ports to remove from rendering
# https://datatracker.ietf.org/doc/html/rfc3986.html#section-6.2.3
host = self.host_subcomponent
netloc = make_netloc(self.raw_user, self.raw_password, host, None)
else:
netloc = self._netloc
return unsplit_result(self._scheme, netloc, path, self._query, self._fragment)
def __repr__(self) -> str:
return f"{self.__class__.__name__}('{str(self)}')"
def __bytes__(self) -> bytes:
return str(self).encode("ascii")
def __eq__(self, other: object) -> bool:
if type(other) is not URL:
return NotImplemented
path1 = "/" if not self._path and self._netloc else self._path
path2 = "/" if not other._path and other._netloc else other._path
return (
self._scheme == other._scheme
and self._netloc == other._netloc
and path1 == path2
and self._query == other._query
and self._fragment == other._fragment
)
def __hash__(self) -> int:
if (ret := self._cache.get("hash")) is None:
path = "/" if not self._path and self._netloc else self._path
ret = self._cache["hash"] = hash(
(self._scheme, self._netloc, path, self._query, self._fragment)
)
return ret
def __le__(self, other: object) -> bool:
if type(other) is not URL:
return NotImplemented
return self._val <= other._val
def __lt__(self, other: object) -> bool:
if type(other) is not URL:
return NotImplemented
return self._val < other._val
def __ge__(self, other: object) -> bool:
if type(other) is not URL:
return NotImplemented
return self._val >= other._val
def __gt__(self, other: object) -> bool:
if type(other) is not URL:
return NotImplemented
return self._val > other._val
def __truediv__(self, name: str) -> "URL":
if not isinstance(name, str):
return NotImplemented # type: ignore[unreachable]
return self._make_child((str(name),))
def __mod__(self, query: Query) -> "URL":
return self.update_query(query)
def __bool__(self) -> bool:
return bool(self._netloc or self._path or self._query or self._fragment)
def __getstate__(self) -> tuple[SplitResult]:
return (tuple.__new__(SplitResult, self._val),)
def __setstate__(
self, state: Union[tuple[SplitURLType], tuple[None, _InternalURLCache]]
) -> None:
if state[0] is None and isinstance(state[1], dict):
# default style pickle
val = state[1]["_val"]
else:
unused: list[object]
val, *unused = state
self._scheme, self._netloc, self._path, self._query, self._fragment = val
self._cache = {}
def _cache_netloc(self) -> None:
"""Cache the netloc parts of the URL."""
c = self._cache
split_loc = split_netloc(self._netloc)
c["raw_user"], c["raw_password"], c["raw_host"], c["explicit_port"] = split_loc
def is_absolute(self) -> bool:
"""A check for absolute URLs.
Return True for absolute ones (having scheme or starting
with //), False otherwise.
Is is preferred to call the .absolute property instead
as it is cached.
"""
return self.absolute
def is_default_port(self) -> bool:
"""A check for default port.
Return True if port is default for specified scheme,
e.g. 'http://python.org' or 'http://python.org:80', False
otherwise.
Return False for relative URLs.
"""
if (explicit := self.explicit_port) is None:
# If the explicit port is None, then the URL must be
# using the default port unless its a relative URL
# which does not have an implicit port / default port
return self._netloc != ""
return explicit == DEFAULT_PORTS.get(self._scheme)
def origin(self) -> "URL":
"""Return an URL with scheme, host and port parts only.
user, password, path, query and fragment are removed.
"""
# TODO: add a keyword-only option for keeping user/pass maybe?
return self._origin
@cached_property
def _val(self) -> SplitURLType:
return (self._scheme, self._netloc, self._path, self._query, self._fragment)
@cached_property
def _origin(self) -> "URL":
"""Return an URL with scheme, host and port parts only.
user, password, path, query and fragment are removed.
"""
if not (netloc := self._netloc):
raise ValueError("URL should be absolute")
if not (scheme := self._scheme):
raise ValueError("URL should have scheme")
if "@" in netloc:
encoded_host = self.host_subcomponent
netloc = make_netloc(None, None, encoded_host, self.explicit_port)
elif not self._path and not self._query and not self._fragment:
return self
return from_parts(scheme, netloc, "", "", "")
def relative(self) -> "URL":
"""Return a relative part of the URL.
scheme, user, password, host and port are removed.
"""
if not self._netloc:
raise ValueError("URL should be absolute")
return from_parts("", "", self._path, self._query, self._fragment)
@cached_property
def absolute(self) -> bool:
"""A check for absolute URLs.
Return True for absolute ones (having scheme or starting
with //), False otherwise.
"""
# `netloc`` is an empty string for relative URLs
# Checking `netloc` is faster than checking `hostname`
# because `hostname` is a property that does some extra work
# to parse the host from the `netloc`
return self._netloc != ""
@cached_property
def scheme(self) -> str:
"""Scheme for absolute URLs.
Empty string for relative URLs or URLs starting with //
"""
return self._scheme
@cached_property
def raw_authority(self) -> str:
"""Encoded authority part of URL.
Empty string for relative URLs.
"""
return self._netloc
@cached_property
def authority(self) -> str:
"""Decoded authority part of URL.
Empty string for relative URLs.
"""
return make_netloc(self.user, self.password, self.host, self.port)
@cached_property
def raw_user(self) -> Union[str, None]:
"""Encoded user part of URL.
None if user is missing.
"""
# not .username
self._cache_netloc()
return self._cache["raw_user"]
@cached_property
def user(self) -> Union[str, None]:
"""Decoded user part of URL.
None if user is missing.
"""
if (raw_user := self.raw_user) is None:
return None
return UNQUOTER(raw_user)
@cached_property
def raw_password(self) -> Union[str, None]:
"""Encoded password part of URL.
None if password is missing.
"""
self._cache_netloc()
return self._cache["raw_password"]
@cached_property
def password(self) -> Union[str, None]:
"""Decoded password part of URL.
None if password is missing.
"""
if (raw_password := self.raw_password) is None:
return None
return UNQUOTER(raw_password)
@cached_property
def raw_host(self) -> Union[str, None]:
"""Encoded host part of URL.
None for relative URLs.
When working with IPv6 addresses, use the `host_subcomponent` property instead
as it will return the host subcomponent with brackets.
"""
# Use host instead of hostname for sake of shortness
# May add .hostname prop later
self._cache_netloc()
return self._cache["raw_host"]
@cached_property
def host(self) -> Union[str, None]:
"""Decoded host part of URL.
None for relative URLs.
"""
if (raw := self.raw_host) is None:
return None
if raw and raw[-1].isdigit() or ":" in raw:
# IP addresses are never IDNA encoded
return raw
return _idna_decode(raw)
@cached_property
def host_subcomponent(self) -> Union[str, None]:
"""Return the host subcomponent part of URL.
None for relative URLs.
https://datatracker.ietf.org/doc/html/rfc3986#section-3.2.2
`IP-literal = "[" ( IPv6address / IPvFuture ) "]"`
Examples:
- `http://example.com:8080` -> `example.com`
- `http://example.com:80` -> `example.com`
- `https://127.0.0.1:8443` -> `127.0.0.1`
- `https://[::1]:8443` -> `[::1]`
- `http://[::1]` -> `[::1]`
"""
if (raw := self.raw_host) is None:
return None
return f"[{raw}]" if ":" in raw else raw
@cached_property
def host_port_subcomponent(self) -> Union[str, None]:
"""Return the host and port subcomponent part of URL.
Trailing dots are removed from the host part.
This value is suitable for use in the Host header of an HTTP request.
None for relative URLs.
https://datatracker.ietf.org/doc/html/rfc3986#section-3.2.2
`IP-literal = "[" ( IPv6address / IPvFuture ) "]"`
https://datatracker.ietf.org/doc/html/rfc3986#section-3.2.3
port = *DIGIT
Examples:
- `http://example.com:8080` -> `example.com:8080`
- `http://example.com:80` -> `example.com`
- `http://example.com.:80` -> `example.com`
- `https://127.0.0.1:8443` -> `127.0.0.1:8443`
- `https://[::1]:8443` -> `[::1]:8443`
- `http://[::1]` -> `[::1]`
"""
if (raw := self.raw_host) is None:
return None
if raw[-1] == ".":
# Remove all trailing dots from the netloc as while
# they are valid FQDNs in DNS, TLS validation fails.
# See https://github.com/aio-libs/aiohttp/issues/3636.
# To avoid string manipulation we only call rstrip if
# the last character is a dot.
raw = raw.rstrip(".")
port = self.explicit_port
if port is None or port == DEFAULT_PORTS.get(self._scheme):
return f"[{raw}]" if ":" in raw else raw
return f"[{raw}]:{port}" if ":" in raw else f"{raw}:{port}"
@cached_property
def port(self) -> Union[int, None]:
"""Port part of URL, with scheme-based fallback.
None for relative URLs or URLs without explicit port and
scheme without default port substitution.
"""
if (explicit_port := self.explicit_port) is not None:
return explicit_port
return DEFAULT_PORTS.get(self._scheme)
@cached_property
def explicit_port(self) -> Union[int, None]:
"""Port part of URL, without scheme-based fallback.
None for relative URLs or URLs without explicit port.
"""
self._cache_netloc()
return self._cache["explicit_port"]
@cached_property
def raw_path(self) -> str:
"""Encoded path of URL.
/ for absolute URLs without path part.
"""
return self._path if self._path or not self._netloc else "/"
@cached_property
def path(self) -> str:
"""Decoded path of URL.
/ for absolute URLs without path part.
"""
return PATH_UNQUOTER(self._path) if self._path else "/" if self._netloc else ""
@cached_property
def path_safe(self) -> str:
"""Decoded path of URL.
/ for absolute URLs without path part.
/ (%2F) and % (%25) are not decoded
"""
if self._path:
return PATH_SAFE_UNQUOTER(self._path)
return "/" if self._netloc else ""
@cached_property
def _parsed_query(self) -> list[tuple[str, str]]:
"""Parse query part of URL."""
return query_to_pairs(self._query)
@cached_property
def query(self) -> "MultiDictProxy[str]":
"""A MultiDictProxy representing parsed query parameters in decoded
representation.
Empty value if URL has no query part.
"""
return MultiDictProxy(MultiDict(self._parsed_query))
@cached_property
def raw_query_string(self) -> str:
"""Encoded query part of URL.
Empty string if query is missing.
"""
return self._query
@cached_property
def query_string(self) -> str:
"""Decoded query part of URL.
Empty string if query is missing.
"""
return QS_UNQUOTER(self._query) if self._query else ""
@cached_property
def path_qs(self) -> str:
"""Decoded path of URL with query."""
return self.path if not (q := self.query_string) else f"{self.path}?{q}"
@cached_property
def raw_path_qs(self) -> str:
"""Encoded path of URL with query."""
if q := self._query:
return f"{self._path}?{q}" if self._path or not self._netloc else f"/?{q}"
return self._path if self._path or not self._netloc else "/"
@cached_property
def raw_fragment(self) -> str:
"""Encoded fragment part of URL.
Empty string if fragment is missing.
"""
return self._fragment
@cached_property
def fragment(self) -> str:
"""Decoded fragment part of URL.
Empty string if fragment is missing.
"""
return UNQUOTER(self._fragment) if self._fragment else ""
@cached_property
def raw_parts(self) -> tuple[str, ...]:
"""A tuple containing encoded *path* parts.
('/',) for absolute URLs if *path* is missing.
"""
path = self._path
if self._netloc:
return ("/", *path[1:].split("/")) if path else ("/",)
if path and path[0] == "/":
return ("/", *path[1:].split("/"))
return tuple(path.split("/"))
@cached_property
def parts(self) -> tuple[str, ...]:
"""A tuple containing decoded *path* parts.
('/',) for absolute URLs if *path* is missing.
"""
return tuple(UNQUOTER(part) for part in self.raw_parts)
@cached_property
def parent(self) -> "URL":
"""A new URL with last part of path removed and cleaned up query and
fragment.
"""
path = self._path
if not path or path == "/":
if self._fragment or self._query:
return from_parts(self._scheme, self._netloc, path, "", "")
return self
parts = path.split("/")
return from_parts(self._scheme, self._netloc, "/".join(parts[:-1]), "", "")
@cached_property
def raw_name(self) -> str:
"""The last part of raw_parts."""
parts = self.raw_parts
if not self._netloc:
return parts[-1]
parts = parts[1:]
return parts[-1] if parts else ""
@cached_property
def name(self) -> str:
"""The last part of parts."""
return UNQUOTER(self.raw_name)
@cached_property
def raw_suffix(self) -> str:
name = self.raw_name
i = name.rfind(".")
return name[i:] if 0 < i < len(name) - 1 else ""
@cached_property
def suffix(self) -> str:
return UNQUOTER(self.raw_suffix)
@cached_property
def raw_suffixes(self) -> tuple[str, ...]:
name = self.raw_name
if name.endswith("."):
return ()
name = name.lstrip(".")
return tuple("." + suffix for suffix in name.split(".")[1:])
@cached_property
def suffixes(self) -> tuple[str, ...]:
return tuple(UNQUOTER(suffix) for suffix in self.raw_suffixes)
def _make_child(self, paths: "Sequence[str]", encoded: bool = False) -> "URL":
"""
add paths to self._path, accounting for absolute vs relative paths,
keep existing, but do not create new, empty segments
"""
parsed: list[str] = []
needs_normalize: bool = False
for idx, path in enumerate(reversed(paths)):
# empty segment of last is not removed
last = idx == 0
if path and path[0] == "/":
raise ValueError(
f"Appending path {path!r} starting from slash is forbidden"
)
# We need to quote the path if it is not already encoded
# This cannot be done at the end because the existing
# path is already quoted and we do not want to double quote
# the existing path.
path = path if encoded else PATH_QUOTER(path)
needs_normalize |= "." in path
segments = path.split("/")
segments.reverse()
# remove trailing empty segment for all but the last path
parsed += segments[1:] if not last and segments[0] == "" else segments
if (path := self._path) and (old_segments := path.split("/")):
# If the old path ends with a slash, the last segment is an empty string
# and should be removed before adding the new path segments.
old = old_segments[:-1] if old_segments[-1] == "" else old_segments
old.reverse()
parsed += old
# If the netloc is present, inject a leading slash when adding a
# path to an absolute URL where there was none before.
if (netloc := self._netloc) and parsed and parsed[-1] != "":
parsed.append("")
parsed.reverse()
if not netloc or not needs_normalize:
return from_parts(self._scheme, netloc, "/".join(parsed), "", "")
path = "/".join(normalize_path_segments(parsed))
# If normalizing the path segments removed the leading slash, add it back.
if path and path[0] != "/":
path = f"/{path}"
return from_parts(self._scheme, netloc, path, "", "")
def with_scheme(self, scheme: str) -> "URL":
"""Return a new URL with scheme replaced."""
# N.B. doesn't cleanup query/fragment
if not isinstance(scheme, str):
raise TypeError("Invalid scheme type")
lower_scheme = scheme.lower()
netloc = self._netloc
if not netloc and lower_scheme in SCHEME_REQUIRES_HOST:
msg = (
"scheme replacement is not allowed for "
f"relative URLs for the {lower_scheme} scheme"
)
raise ValueError(msg)
return from_parts(lower_scheme, netloc, self._path, self._query, self._fragment)
def with_user(self, user: Union[str, None]) -> "URL":
"""Return a new URL with user replaced.
Autoencode user if needed.
Clear user/password if user is None.
"""
# N.B. doesn't cleanup query/fragment
if user is None:
password = None
elif isinstance(user, str):
user = QUOTER(user)
password = self.raw_password
else:
raise TypeError("Invalid user type")
if not (netloc := self._netloc):
raise ValueError("user replacement is not allowed for relative URLs")
encoded_host = self.host_subcomponent or ""
netloc = make_netloc(user, password, encoded_host, self.explicit_port)
return from_parts(self._scheme, netloc, self._path, self._query, self._fragment)
def with_password(self, password: Union[str, None]) -> "URL":
"""Return a new URL with password replaced.
Autoencode password if needed.
Clear password if argument is None.
"""
# N.B. doesn't cleanup query/fragment
if password is None:
pass
elif isinstance(password, str):
password = QUOTER(password)
else:
raise TypeError("Invalid password type")
if not (netloc := self._netloc):
raise ValueError("password replacement is not allowed for relative URLs")
encoded_host = self.host_subcomponent or ""
port = self.explicit_port
netloc = make_netloc(self.raw_user, password, encoded_host, port)
return from_parts(self._scheme, netloc, self._path, self._query, self._fragment)
def with_host(self, host: str) -> "URL":
"""Return a new URL with host replaced.
Autoencode host if needed.
Changing host for relative URLs is not allowed, use .join()
instead.
"""
# N.B. doesn't cleanup query/fragment
if not isinstance(host, str):
raise TypeError("Invalid host type")
if not (netloc := self._netloc):
raise ValueError("host replacement is not allowed for relative URLs")
if not host:
raise ValueError("host removing is not allowed")
encoded_host = _encode_host(host, validate_host=True) if host else ""
port = self.explicit_port
netloc = make_netloc(self.raw_user, self.raw_password, encoded_host, port)
return from_parts(self._scheme, netloc, self._path, self._query, self._fragment)
def with_port(self, port: Union[int, None]) -> "URL":
"""Return a new URL with port replaced.
Clear port to default if None is passed.
"""
# N.B. doesn't cleanup query/fragment
if port is not None:
if isinstance(port, bool) or not isinstance(port, int):
raise TypeError(f"port should be int or None, got {type(port)}")
if not (0 <= port <= 65535):
raise ValueError(f"port must be between 0 and 65535, got {port}")
if not (netloc := self._netloc):
raise ValueError("port replacement is not allowed for relative URLs")
encoded_host = self.host_subcomponent or ""
netloc = make_netloc(self.raw_user, self.raw_password, encoded_host, port)
return from_parts(self._scheme, netloc, self._path, self._query, self._fragment)
def with_path(
self,
path: str,
*,
encoded: bool = False,
keep_query: bool = False,
keep_fragment: bool = False,
) -> "URL":
"""Return a new URL with path replaced."""
netloc = self._netloc
if not encoded:
path = PATH_QUOTER(path)
if netloc:
path = normalize_path(path) if "." in path else path
if path and path[0] != "/":
path = f"/{path}"
query = self._query if keep_query else ""
fragment = self._fragment if keep_fragment else ""
return from_parts(self._scheme, netloc, path, query, fragment)
@overload
def with_query(self, query: Query) -> "URL": ...
@overload
def with_query(self, **kwargs: QueryVariable) -> "URL": ...
def with_query(self, *args: Any, **kwargs: Any) -> "URL":
"""Return a new URL with query part replaced.
Accepts any Mapping (e.g. dict, multidict.MultiDict instances)
or str, autoencode the argument if needed.
A sequence of (key, value) pairs is supported as well.
It also can take an arbitrary number of keyword arguments.
Clear query if None is passed.
"""
# N.B. doesn't cleanup query/fragment
query = get_str_query(*args, **kwargs) or ""
return from_parts_uncached(
self._scheme, self._netloc, self._path, query, self._fragment
)
@overload
def extend_query(self, query: Query) -> "URL": ...
@overload
def extend_query(self, **kwargs: QueryVariable) -> "URL": ...
def extend_query(self, *args: Any, **kwargs: Any) -> "URL":
"""Return a new URL with query part combined with the existing.
This method will not remove existing query parameters.
Example:
>>> url = URL('http://example.com/?a=1&b=2')
>>> url.extend_query(a=3, c=4)
URL('http://example.com/?a=1&b=2&a=3&c=4')
"""
if not (new_query := get_str_query(*args, **kwargs)):
return self
if query := self._query:
# both strings are already encoded so we can use a simple
# string join
query += new_query if query[-1] == "&" else f"&{new_query}"
else:
query = new_query
return from_parts_uncached(
self._scheme, self._netloc, self._path, query, self._fragment
)
@overload
def update_query(self, query: Query) -> "URL": ...
@overload
def update_query(self, **kwargs: QueryVariable) -> "URL": ...
def update_query(self, *args: Any, **kwargs: Any) -> "URL":
"""Return a new URL with query part updated.
This method will overwrite existing query parameters.
Example:
>>> url = URL('http://example.com/?a=1&b=2')
>>> url.update_query(a=3, c=4)
URL('http://example.com/?a=3&b=2&c=4')
"""
in_query: Union[str, Mapping[str, QueryVariable], None]
if kwargs:
if args:
msg = "Either kwargs or single query parameter must be present"
raise ValueError(msg)
in_query = kwargs
elif len(args) == 1:
in_query = args[0]
else:
raise ValueError("Either kwargs or single query parameter must be present")
if in_query is None:
query = ""
elif not in_query:
query = self._query
elif isinstance(in_query, Mapping):
qm: MultiDict[QueryVariable] = MultiDict(self._parsed_query)
qm.update(in_query)
query = get_str_query_from_sequence_iterable(qm.items())
elif isinstance(in_query, str):
qstr: MultiDict[str] = MultiDict(self._parsed_query)
qstr.update(query_to_pairs(in_query))
query = get_str_query_from_iterable(qstr.items())
elif isinstance(in_query, (bytes, bytearray, memoryview)): # type: ignore[unreachable]
msg = "Invalid query type: bytes, bytearray and memoryview are forbidden"
raise TypeError(msg)
elif isinstance(in_query, Sequence):
# We don't expect sequence values if we're given a list of pairs
# already; only mappings like builtin `dict` which can't have the
# same key pointing to multiple values are allowed to use
# `_query_seq_pairs`.
qs: MultiDict[SimpleQuery] = MultiDict(self._parsed_query)
qs.update(in_query)
query = get_str_query_from_iterable(qs.items())
else:
raise TypeError(
"Invalid query type: only str, mapping or "
"sequence of (key, value) pairs is allowed"
)
return from_parts_uncached(
self._scheme, self._netloc, self._path, query, self._fragment
)
def without_query_params(self, *query_params: str) -> "URL":
"""Remove some keys from query part and return new URL."""
params_to_remove = set(query_params) & self.query.keys()
if not params_to_remove:
return self
return self.with_query(
tuple(
(name, value)
for name, value in self.query.items()
if name not in params_to_remove
)
)
def with_fragment(self, fragment: Union[str, None]) -> "URL":
"""Return a new URL with fragment replaced.
Autoencode fragment if needed.
Clear fragment to default if None is passed.
"""
# N.B. doesn't cleanup query/fragment
if fragment is None:
raw_fragment = ""
elif not isinstance(fragment, str):
raise TypeError("Invalid fragment type")
else:
raw_fragment = FRAGMENT_QUOTER(fragment)
if self._fragment == raw_fragment:
return self
return from_parts(
self._scheme, self._netloc, self._path, self._query, raw_fragment
)
def with_name(
self,
name: str,
*,
keep_query: bool = False,
keep_fragment: bool = False,
) -> "URL":
"""Return a new URL with name (last part of path) replaced.
Query and fragment parts are cleaned up.
Name is encoded if needed.
"""
# N.B. DOES cleanup query/fragment
if not isinstance(name, str):
raise TypeError("Invalid name type")
if "/" in name:
raise ValueError("Slash in name is not allowed")
name = PATH_QUOTER(name)
if name in (".", ".."):
raise ValueError(". and .. values are forbidden")
parts = list(self.raw_parts)
if netloc := self._netloc:
if len(parts) == 1:
parts.append(name)
else:
parts[-1] = name
parts[0] = "" # replace leading '/'
else:
parts[-1] = name
if parts[0] == "/":
parts[0] = "" # replace leading '/'
query = self._query if keep_query else ""
fragment = self._fragment if keep_fragment else ""
return from_parts(self._scheme, netloc, "/".join(parts), query, fragment)
def with_suffix(
self,
suffix: str,
*,
keep_query: bool = False,
keep_fragment: bool = False,
) -> "URL":
"""Return a new URL with suffix (file extension of name) replaced.
Query and fragment parts are cleaned up.
suffix is encoded if needed.
"""
if not isinstance(suffix, str):
raise TypeError("Invalid suffix type")
if suffix and not suffix[0] == "." or suffix == "." or "/" in suffix:
raise ValueError(f"Invalid suffix {suffix!r}")
name = self.raw_name
if not name:
raise ValueError(f"{self!r} has an empty name")
old_suffix = self.raw_suffix
suffix = PATH_QUOTER(suffix)
name = name + suffix if not old_suffix else name[: -len(old_suffix)] + suffix
if name in (".", ".."):
raise ValueError(". and .. values are forbidden")
parts = list(self.raw_parts)
if netloc := self._netloc:
if len(parts) == 1:
parts.append(name)
else:
parts[-1] = name
parts[0] = "" # replace leading '/'
else:
parts[-1] = name
if parts[0] == "/":
parts[0] = "" # replace leading '/'
query = self._query if keep_query else ""
fragment = self._fragment if keep_fragment else ""
return from_parts(self._scheme, netloc, "/".join(parts), query, fragment)
def join(self, url: "URL") -> "URL":
"""Join URLs
Construct a full (“absolute”) URL by combining a “base URL”
(self) with another URL (url).
Informally, this uses components of the base URL, in
particular the addressing scheme, the network location and
(part of) the path, to provide missing components in the
relative URL.
"""
if type(url) is not URL:
raise TypeError("url should be URL")
scheme = url._scheme or self._scheme
if scheme != self._scheme or scheme not in USES_RELATIVE:
return url
# scheme is in uses_authority as uses_authority is a superset of uses_relative
if (join_netloc := url._netloc) and scheme in USES_AUTHORITY:
return from_parts(scheme, join_netloc, url._path, url._query, url._fragment)
orig_path = self._path
if join_path := url._path:
if join_path[0] == "/":
path = join_path
elif not orig_path:
path = f"/{join_path}"
elif orig_path[-1] == "/":
path = f"{orig_path}{join_path}"
else:
# …
# and relativizing ".."
# parts[0] is / for absolute urls,
# this join will add a double slash there
path = "/".join([*self.parts[:-1], ""]) + join_path
# which has to be removed
if orig_path[0] == "/":
path = path[1:]
path = normalize_path(path) if "." in path else path
else:
path = orig_path
return from_parts(
scheme,
self._netloc,
path,
url._query if join_path or url._query else self._query,
url._fragment if join_path or url._fragment else self._fragment,
)
def joinpath(self, *other: str, encoded: bool = False) -> "URL":
"""Return a new URL with the elements in other appended to the path."""
return self._make_child(other, encoded=encoded)
def human_repr(self) -> str:
"""Return decoded human readable string for URL representation."""
user = human_quote(self.user, "#/:?@[]")
password = human_quote(self.password, "#/:?@[]")
if (host := self.host) and ":" in host:
host = f"[{host}]"
path = human_quote(self.path, "#?")
if TYPE_CHECKING:
assert path is not None
query_string = "&".join(
"{}={}".format(human_quote(k, "#&+;="), human_quote(v, "#&+;="))
for k, v in self.query.items()
)
fragment = human_quote(self.fragment, "")
if TYPE_CHECKING:
assert fragment is not None
netloc = make_netloc(user, password, host, self.explicit_port)
return unsplit_result(self._scheme, netloc, path, query_string, fragment)
_DEFAULT_IDNA_SIZE = 256
_DEFAULT_ENCODE_SIZE = 512
@lru_cache(_DEFAULT_IDNA_SIZE)
def _idna_decode(raw: str) -> str:
try:
return idna.decode(raw.encode("ascii"))
except UnicodeError: # e.g. '::1'
return raw.encode("ascii").decode("idna")
@lru_cache(_DEFAULT_IDNA_SIZE)
def _idna_encode(host: str) -> str:
try:
return idna.encode(host, uts46=True).decode("ascii")
except UnicodeError:
return host.encode("idna").decode("ascii")
@lru_cache(_DEFAULT_ENCODE_SIZE)
def _encode_host(host: str, validate_host: bool) -> str:
"""Encode host part of URL."""
# If the host ends with a digit or contains a colon, its likely
# an IP address.
if host and (host[-1].isdigit() or ":" in host):
raw_ip, sep, zone = host.partition("%")
# If it looks like an IP, we check with _ip_compressed_version
# and fall-through if its not an IP address. This is a performance
# optimization to avoid parsing IP addresses as much as possible
# because it is orders of magnitude slower than almost any other
# operation this library does.
# Might be an IP address, check it
#
# IP Addresses can look like:
# https://datatracker.ietf.org/doc/html/rfc3986#section-3.2.2
# - 127.0.0.1 (last character is a digit)
# - 2001:db8::ff00:42:8329 (contains a colon)
# - 2001:db8::ff00:42:8329%eth0 (contains a colon)
# - [2001:db8::ff00:42:8329] (contains a colon -- brackets should
# have been removed before it gets here)
# Rare IP Address formats are not supported per:
# https://datatracker.ietf.org/doc/html/rfc3986#section-7.4
#
# IP parsing is slow, so its wrapped in an LRU
try:
ip = ip_address(raw_ip)
except ValueError:
pass
else:
# These checks should not happen in the
# LRU to keep the cache size small
host = ip.compressed
if ip.version == 6:
return f"[{host}%{zone}]" if sep else f"[{host}]"
return f"{host}%{zone}" if sep else host
# IDNA encoding is slow, skip it for ASCII-only strings
if host.isascii():
# Check for invalid characters explicitly; _idna_encode() does this
# for non-ascii host names.
host = host.lower()
if validate_host and (invalid := NOT_REG_NAME.search(host)):
value, pos, extra = invalid.group(), invalid.start(), ""
if value == "@" or (value == ":" and "@" in host[pos:]):
# this looks like an authority string
extra = (
", if the value includes a username or password, "
"use 'authority' instead of 'host'"
)
raise ValueError(
f"Host {host!r} cannot contain {value!r} (at position {pos}){extra}"
) from None
return host
return _idna_encode(host)
@rewrite_module
def cache_clear() -> None:
"""Clear all LRU caches."""
_idna_encode.cache_clear()
_idna_decode.cache_clear()
_encode_host.cache_clear()
@rewrite_module
def cache_info() -> CacheInfo:
"""Report cache statistics."""
return {
"idna_encode": _idna_encode.cache_info(),
"idna_decode": _idna_decode.cache_info(),
"ip_address": _encode_host.cache_info(),
"host_validate": _encode_host.cache_info(),
"encode_host": _encode_host.cache_info(),
}
@rewrite_module
def cache_configure(
*,
idna_encode_size: Union[int, None] = _DEFAULT_IDNA_SIZE,
idna_decode_size: Union[int, None] = _DEFAULT_IDNA_SIZE,
ip_address_size: Union[int, None, UndefinedType] = UNDEFINED,
host_validate_size: Union[int, None, UndefinedType] = UNDEFINED,
encode_host_size: Union[int, None, UndefinedType] = UNDEFINED,
) -> None:
"""Configure LRU cache sizes."""
global _idna_decode, _idna_encode, _encode_host
# ip_address_size, host_validate_size are no longer
# used, but are kept for backwards compatibility.
if ip_address_size is not UNDEFINED or host_validate_size is not UNDEFINED:
warnings.warn(
"cache_configure() no longer accepts the "
"ip_address_size or host_validate_size arguments, "
"they are used to set the encode_host_size instead "
"and will be removed in the future",
DeprecationWarning,
stacklevel=2,
)
if encode_host_size is not None:
for size in (ip_address_size, host_validate_size):
if size is None:
encode_host_size = None
elif encode_host_size is UNDEFINED:
if size is not UNDEFINED:
encode_host_size = size
elif size is not UNDEFINED:
if TYPE_CHECKING:
assert isinstance(size, int)
assert isinstance(encode_host_size, int)
encode_host_size = max(size, encode_host_size)
if encode_host_size is UNDEFINED:
encode_host_size = _DEFAULT_ENCODE_SIZE
_encode_host = lru_cache(encode_host_size)(_encode_host.__wrapped__)
_idna_decode = lru_cache(idna_decode_size)(_idna_decode.__wrapped__)
_idna_encode = lru_cache(idna_encode_size)(_idna_encode.__wrapped__)
```
|
==========================================================================================
SOURCE CODE FILE: activate.bat
LINES: 1
SIZE: 1.00 KB
PATH: scripts\freecad_env\Scripts\activate.bat
ENCODING: utf-8
```bat
@echo off
rem This file is UTF-8 encoded, so we need to update the current code page while executing it
for /f "tokens=2 delims=:." %%a in ('"%SystemRoot%\System32\chcp.com"') do (
set _OLD_CODEPAGE=%%a
)
if defined _OLD_CODEPAGE (
"%SystemRoot%\System32\chcp.com" 65001 > nul
)
set "VIRTUAL_ENV=C:\FreeCAD_repo\scripts\freecad_env"
if not defined PROMPT set PROMPT=$P$G
if defined _OLD_VIRTUAL_PROMPT set PROMPT=%_OLD_VIRTUAL_PROMPT%
if defined _OLD_VIRTUAL_PYTHONHOME set PYTHONHOME=%_OLD_VIRTUAL_PYTHONHOME%
set "_OLD_VIRTUAL_PROMPT=%PROMPT%"
set "PROMPT=(freecad_env) %PROMPT%"
if defined PYTHONHOME set _OLD_VIRTUAL_PYTHONHOME=%PYTHONHOME%
set PYTHONHOME=
if defined _OLD_VIRTUAL_PATH set PATH=%_OLD_VIRTUAL_PATH%
if not defined _OLD_VIRTUAL_PATH set _OLD_VIRTUAL_PATH=%PATH%
set "PATH=%VIRTUAL_ENV%\Scripts;%PATH%"
set "VIRTUAL_ENV_PROMPT=freecad_env"
:END
if defined _OLD_CODEPAGE (
"%SystemRoot%\System32\chcp.com" %_OLD_CODEPAGE% > nul
set _OLD_CODEPAGE=
)
```
|
============================================================================================
SOURCE CODE FILE: deactivate.bat
LINES: 1
SIZE: 0.38 KB
PATH: scripts\freecad_env\Scripts\deactivate.bat
ENCODING: utf-8
```bat
@echo off
if defined _OLD_VIRTUAL_PROMPT (
set "PROMPT=%_OLD_VIRTUAL_PROMPT%"
)
set _OLD_VIRTUAL_PROMPT=
if defined _OLD_VIRTUAL_PYTHONHOME (
set "PYTHONHOME=%_OLD_VIRTUAL_PYTHONHOME%"
set _OLD_VIRTUAL_PYTHONHOME=
)
if defined _OLD_VIRTUAL_PATH (
set "PATH=%_OLD_VIRTUAL_PATH%"
)
set _OLD_VIRTUAL_PATH=
set VIRTUAL_ENV=
set VIRTUAL_ENV_PROMPT=
:END
```
|
================================================================================
home = C:\ProgramData\miniconda3
include-system-site-packages = false
version = 3.13.2
executable = C:\ProgramData\miniconda3\python.exe
command = C:\ProgramData\miniconda3\python.exe -m venv C:\FreeCAD_repo\scripts\freecad_env
|
=========================================================================
SOURCE CODE FILE: freecad_repo.py
LINES: 11
SIZE: 12.78 KB
PATH: scripts\freecad_repo.py
ENCODING: utf-8
```py
import os
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin, urlparse
import subprocess
import time
from pathlib import Path
import json
import re
from concurrent.futures import ThreadPoolExecutor
# Configuration
BASE_DIR = "freecad_corpus"
os.makedirs(BASE_DIR, exist_ok=True)
MAX_WORKERS = 5 # For concurrent downloads
USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
HEADERS = {'User-Agent': USER_AGENT}
def sanitize_filename(name):
"""Make safe filenames from URLs"""
name = re.sub(r'[^\w\-_. ]', '_', name)
return name[:200].rstrip()
def download_text(url, output_path):
"""Download and save text content from a URL"""
try:
print(f"Downloading: {url}")
response = requests.get(url, headers=HEADERS, timeout=20)
soup = BeautifulSoup(response.text, 'html.parser')
# Remove unwanted elements
for element in soup(['script', 'style', 'nav', 'footer', 'iframe', 'head', 'table', 'img', 'svg']):
element.decompose()
# Get clean text
text = soup.get_text(separator='\n', strip=True)
with open(output_path, "w", encoding="utf-8") as f:
f.write(f"URL: {url}\n\n")
f.write(text)
return True
except Exception as e:
print(f"Error downloading {url}: {str(e)}")
return False
def clone_or_pull_repo(repo_url, output_dir):
"""Clone or pull a git repository"""
repo_name = repo_url.split('/')[-1]
if repo_name.endswith('.git'):
repo_name = repo_name[:-4]
repo_path = os.path.join(output_dir, repo_name)
if os.path.exists(repo_path):
print(f"Updating repository: {repo_name}")
try:
subprocess.run(['git', '-C', repo_path, 'pull'], check=True)
except subprocess.CalledProcessError as e:
print(f"Error updating {repo_name}: {str(e)}")
else:
print(f"Cloning repository: {repo_name}")
try:
subprocess.run(['git', 'clone', '--depth', '1', repo_url, repo_path], check=True)
except subprocess.CalledProcessError as e:
print(f"Error cloning {repo_name}: {str(e)}")
return repo_path
def find_and_download_links(start_url, output_dir, base_domain=None, depth=2, current_depth=0):
"""Recursively find and download links from a page"""
if current_depth >= depth:
return
if not base_domain:
base_domain = urlparse(start_url).netloc
try:
# Download the page
page_name = sanitize_filename(urlparse(start_url).path[1:].replace('/', '_') or "index")
output_path = os.path.join(output_dir, f"{page_name}.txt")
if not os.path.exists(output_path):
success = download_text(start_url, output_path)
if not success:
return
# Find links on the page
response = requests.get(start_url, headers=HEADERS, timeout=15)
soup = BeautifulSoup(response.text, 'html.parser')
links = set()
for link in soup.find_all('a', href=True):
href = link['href']
if not href or href.startswith('#'):
continue
# Handle relative URLs
full_url = urljoin(start_url, href)
parsed = urlparse(full_url)
# Filter by domain and file type
if parsed.netloc == base_domain or not parsed.netloc:
if any(full_url.endswith(x) for x in ['.pdf', '.zip', '.tar.gz']):
continue # Skip binary files
if 'action=' in full_url or 'edit' in full_url:
continue # Skip edit links
links.add(full_url)
# Process found links
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
futures = []
for link in links:
if link != start_url:
futures.append(
executor.submit(
find_and_download_links,
link,
output_dir,
base_domain,
depth,
current_depth + 1
)
)
for future in futures:
future.result()
except Exception as e:
print(f"Error processing {start_url}: {str(e)}")
def download_github_repos(org_name, output_dir):
"""Download all repositories from a GitHub organization"""
os.makedirs(output_dir, exist_ok=True)
api_url = f"https://api.github.com/orgs/{org_name}/repos?per_page=100"
try:
response = requests.get(api_url, headers=HEADERS, timeout=15)
repos = response.json()
while 'next' in response.links:
response = requests.get(response.links['next']['url'], headers=HEADERS, timeout=15)
repos.extend(response.json())
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
for repo in repos:
if not repo['fork']: # Skip forks
clone_url = repo['clone_url']
executor.submit(clone_or_pull_repo, clone_url, output_dir)
# Also download wiki if available
if repo['has_wiki']:
wiki_url = clone_url.replace('.git', '.wiki.git')
wiki_dir = os.path.join(output_dir, f"{repo['name']}_wiki")
executor.submit(clone_or_pull_repo, wiki_url, wiki_dir)
except Exception as e:
print(f"Error fetching GitHub repos: {str(e)}")
def discover_forum_content(base_url, output_dir):
"""Discover and download forum content"""
os.makedirs(output_dir, exist_ok=True)
# Download main forum pages
forum_sections = [
"Python-scripting",
"Developers",
"Help-on-using-FreeCAD",
"Open-Discussion",
"Tutorials-and-articles"
]
for section in forum_sections:
section_url = f"{base_url}/viewforum.php?f={section}"
find_and_download_links(section_url, output_dir, urlparse(base_url).netloc, depth=1)
def download_external_resources():
"""Download resources from external sites"""
output_dir = os.path.join(BASE_DIR, "external_resources")
os.makedirs(output_dir, exist_ok=True)
# List of known FreeCAD resource sites
resource_sites = [
"https://wiki.opensourceecology.org/wiki/FreeCAD",
"https://www.freecadweb.org/wiki/",
"https://forum.freecadweb.org/",
"https://www.reddit.com/r/FreeCAD/",
"https://hackaday.io/projects/tag/freecad",
"https://grabcad.com/library?query=freecad",
"https://www.thingiverse.com/search?q=freecad",
"https://all3dp.com/search/freecad/",
"https://www.instructables.com/search/?q=freecad",
"https://dev.opendesk.cc/search?q=freecad",
"https://www.youmagine.com/search?query=freecad",
"https://www.cadforum.net/search?q=freecad",
"https://www.3dcontentcentral.com/search.aspx?arg=freecad",
"https://www.myminifactory.com/search/freecad"
]
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
for site in resource_sites:
executor.submit(find_and_download_links, site, output_dir, depth=1)
def download_developer_documentation():
"""Download comprehensive developer documentation"""
output_dir = os.path.join(BASE_DIR, "developer_docs")
os.makedirs(output_dir, exist_ok=True)
dev_resources = [
"https://wiki.freecad.org/Developer_hub",
"https://wiki.freecad.org/Source_documentation",
"https://wiki.freecad.org/Compiling",
"https://wiki.freecad.org/Licence",
"https://wiki.freecad.org/Source_code_management",
"https://wiki.freecad.org/Third_Party_Tools",
"https://wiki.freecad.org/Third_Party_Libraries",
"https://wiki.freecad.org/Testing",
"https://wiki.freecad.org/Bug_Triage",
"https://wiki.freecad.org/Translating_an_external_workbench",
"https://wiki.freecad.org/Embedding_FreeCAD",
"https://wiki.freecad.org/FreeCAD_Build_Tool",
"https://wiki.freecad.org/Doxygen",
"https://wiki.freecad.org/Code_snippets",
"https://wiki.freecad.org/Line_drawing_function",
"https://wiki.freecad.org/Std_PythonHelp",
"https://wiki.freecad.org/Property_editor",
"https://wiki.freecad.org/Interface_Customization",
"https://wiki.freecad.org/Macro_recipes",
"https://api.freecad.org/"
]
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
for resource in dev_resources:
executor.submit(download_text, resource, os.path.join(output_dir, f"{sanitize_filename(resource.split('/')[-1])}.txt"))
def download_workbenches():
"""Download information about all FreeCAD workbenches"""
output_dir = os.path.join(BASE_DIR, "workbenches")
os.makedirs(output_dir, exist_ok=True)
# Main workbench page
workbench_url = "https://wiki.freecad.org/Workbenches"
download_text(workbench_url, os.path.join(output_dir, "workbenches_overview.txt"))
# Find all workbench links
response = requests.get(workbench_url, headers=HEADERS, timeout=15)
soup = BeautifulSoup(response.text, 'html.parser')
workbench_links = set()
for link in soup.select('a[href^="/Workbench_"]'):
full_url = urljoin(workbench_url, link['href'])
workbench_links.add(full_url)
# Download each workbench documentation
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
for link in workbench_links:
executor.submit(download_text, link, os.path.join(output_dir, f"{sanitize_filename(link.split('/')[-1])}.txt"))
def download_api_documentation():
"""Download API and code reference documentation"""
output_dir = os.path.join(BASE_DIR, "api_documentation")
os.makedirs(output_dir, exist_ok=True)
api_sources = [
"https://freecad.github.io/SourceDoc/",
"https://wiki.freecad.org/Doxygen",
"https://wiki.freecad.org/Python_scripting_api",
"https://wiki.freecad.org/Code_snippets",
"https://wiki.freecad.org/Std_PythonHelp",
"https://wiki.freecad.org/Scripted_objects",
"https://wiki.freecad.org/Creating_a_FeaturePython_object",
"https://wiki.freecad.org/Scripted_objects_with_attachment",
"https://wiki.freecad.org/Embedding_FreeCADGui",
"https://wiki.freecad.org/Embedding_FreeCAD",
"https://wiki.freecad.org/FreeCAD_API",
"https://wiki.freecad.org/FreeCADGui_API",
"https://wiki.freecad.org/Base_API",
"https://wiki.freecad.org/Placement_API",
"https://wiki.freecad.org/Mesh_API",
"https://wiki.freecad.org/Part_API",
"https://wiki.freecad.org/PartDesign_API"
]
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
for api_url in api_sources:
executor.submit(download_text, api_url, os.path.join(output_dir, f"{sanitize_filename(api_url.split('/')[-1])}.txt"))
def main():
print("Starting comprehensive FreeCAD corpus creation...")
# 1. Download all official documentation
print("\n[1/6] Downloading official documentation...")
official_docs_dir = os.path.join(BASE_DIR, "official_documentation")
find_and_download_links("https://wiki.freecad.org/Main_Page", official_docs_dir, "wiki.freecad.org", depth=3)
# 2. Download all repositories
print("\n[2/6] Downloading repositories...")
repos_dir = os.path.join(BASE_DIR, "repositories")
download_github_repos("FreeCAD", repos_dir)
# 3. Download examples and tutorials
print("\n[3/6] Downloading examples and tutorials...")
examples_dir = os.path.join(BASE_DIR, "examples_tutorials")
os.makedirs(examples_dir, exist_ok=True)
clone_or_pull_repo("https://github.com/FreeCAD/Examples", examples_dir)
clone_or_pull_repo("https://github.com/FreeCAD/FreeCAD-macros", examples_dir)
find_and_download_links("https://wiki.freecad.org/Tutorials", examples_dir, "wiki.freecad.org", depth=2)
# 4. Download developer resources
print("\n[4/6] Downloading developer resources...")
download_developer_documentation()
download_api_documentation()
# 5. Download workbench documentation
print("\n[5/6] Downloading workbench documentation...")
download_workbenches()
# 6. Download external resources
print("\n[6/6] Downloading external resources...")
download_external_resources()
print("\nFreeCAD corpus creation complete!")
print(f"All data has been saved to: {os.path.abspath(BASE_DIR)}")
if __name__ == "__main__":
main()
```
|
===========================================================================
SOURCE CODE FILE: process_corpus.py
LINES: 9
SIZE: 4.95 KB
PATH: scripts\process_corpus.py
ENCODING: utf-8
```py
import os
import re
import shutil
import PyPDF2
from bs4 import BeautifulSoup
from pathlib import Path
import concurrent.futures
import json
import html2text
# Configuration
ROOT_DIR = r"C:\FreeCAD_repo"
PDF_DIR = os.path.join(ROOT_DIR, "pdf")
SCRIPTS_DIR = os.path.join(ROOT_DIR, "scripts")
CORPUS_DIR = os.path.join(SCRIPTS_DIR, "freecad_corpus")
OUTPUT_DIR = os.path.join(ROOT_DIR, "processed_corpus")
os.makedirs(OUTPUT_DIR, exist_ok=True)
# Initialize HTML to text converter
html_converter = html2text.HTML2Text()
html_converter.ignore_links = False
html_converter.bypass_tables = False
def clean_text(text):
"""Clean and normalize text content"""
# Remove special characters and excessive whitespace
text = re.sub(r'\s+', ' ', text)
# Remove non-ASCII characters
text = text.encode('ascii', 'ignore').decode('ascii')
# Remove code blocks (we'll process them separately)
text = re.sub(r'```.*?```', '', text, flags=re.DOTALL)
return text.strip()
def extract_pdf_text(pdf_path):
"""Extract text from PDF files"""
try:
with open(pdf_path, 'rb') as file:
reader = PyPDF2.PdfReader(file)
text = ""
for page in reader.pages:
text += page.extract_text() + "\n"
return clean_text(text)
except Exception as e:
print(f"Error processing PDF {pdf_path}: {str(e)}")
return None
def process_html_file(html_path):
"""Convert HTML to clean text"""
try:
with open(html_path, 'r', encoding='utf-8') as file:
soup = BeautifulSoup(file.read(), 'html.parser')
# Remove unwanted elements
for element in soup(['script', 'style', 'nav', 'footer', 'iframe', 'head']):
element.decompose()
text = soup.get_text(separator='\n', strip=True)
return clean_text(text)
except Exception as e:
print(f"Error processing HTML {html_path}: {str(e)}")
return None
def process_source_code_file(code_path):
"""Process source code files with appropriate formatting"""
try:
with open(code_path, 'r', encoding='utf-8') as file:
content = file.read()
# Create a formatted version for documentation
formatted = f"SOURCE CODE FILE: {os.path.basename(code_path)}\n\n"
formatted += "```python\n" + content + "\n```\n"
return formatted
except Exception as e:
print(f"Error processing code file {code_path}: {str(e)}")
return None
def process_file(file_path):
"""Process individual files based on their type"""
ext = os.path.splitext(file_path)[1].lower()
output_text = None
if ext == '.pdf':
output_text = extract_pdf_text(file_path)
elif ext in ['.html', '.htm']:
output_text = process_html_file(file_path)
elif ext in ['.py', '.cpp', '.h', '.cxx', '.hxx']:
output_text = process_source_code_file(file_path)
elif ext in ['.txt', '.md', '.rst']:
try:
with open(file_path, 'r', encoding='utf-8') as file:
output_text = clean_text(file.read())
except Exception as e:
print(f"Error processing text file {file_path}: {str(e)}")
if output_text:
# Save processed file
rel_path = os.path.relpath(file_path, ROOT_DIR)
output_path = os.path.join(OUTPUT_DIR, re.sub(r'[^\w\-_.]', '_', rel_path) + '.txt')
os.makedirs(os.path.dirname(output_path), exist_ok=True)
with open(output_path, 'w', encoding='utf-8') as out_file:
out_file.write(output_text)
def process_directory(directory):
"""Process all files in a directory"""
for root, _, files in os.walk(directory):
for file in files:
file_path = os.path.join(root, file)
process_file(file_path)
def create_corpus_index():
"""Create an index of all processed files"""
index = []
for root, _, files in os.walk(OUTPUT_DIR):
for file in files:
file_path = os.path.join(root, file)
rel_path = os.path.relpath(file_path, OUTPUT_DIR)
index.append({
"path": rel_path,
"size": os.path.getsize(file_path),
"modified": os.path.getmtime(file_path)
})
index_path = os.path.join(OUTPUT_DIR, "corpus_index.json")
with open(index_path, 'w', encoding='utf-8') as f:
json.dump(index, f, indent=2)
def main():
print("Starting FreeCAD corpus processing...")
# Process all directories in parallel
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.submit(process_directory, PDF_DIR)
executor.submit(process_directory, CORPUS_DIR)
executor.submit(process_directory, SCRIPTS_DIR)
# Create corpus index
create_corpus_index()
print(f"\nCorpus processing complete! Processed files saved to: {OUTPUT_DIR}")
print(f"Corpus index created at: {os.path.join(OUTPUT_DIR, 'corpus_index.json')}")
if __name__ == "__main__":
main()
```
|
=================================================================================
SOURCE CODE FILE: process_merge_corpus.py
LINES: 36
SIZE: 24.16 KB
PATH: scripts\process_merge_corpus.py
ENCODING: utf-8
```py
import os
import re
import shutil
import PyPDF2
from bs4 import BeautifulSoup
from pathlib import Path
import concurrent.futures
import json
import html2text
import logging
from tqdm import tqdm
import hashlib
import warnings
from bs4 import MarkupResemblesLocatorWarning
# Suppress BeautifulSoup warnings
warnings.filterwarnings("ignore", category=MarkupResemblesLocatorWarning)
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler('corpus_processing.log'),
logging.StreamHandler()
]
)
# --- Configuration ---
ROOT_DIR = r"C:\FreeCAD_repo"
PDF_DIR = os.path.join(ROOT_DIR, "pdf")
SCRIPTS_DIR = os.path.join(ROOT_DIR, "scripts")
CORPUS_DIR = os.path.join(SCRIPTS_DIR, "freecad_corpus")
OUTPUT_DIR = os.path.join(ROOT_DIR, "processed_corpus")
os.makedirs(OUTPUT_DIR, exist_ok=True)
MIN_OUTPUT_TEXT_LENGTH = 30 # Minimum characters for a processed file to be saved
# Initialize HTML to text converter
html_converter = html2text.HTML2Text()
html_converter.ignore_links = True
html_converter.bypass_tables = True # Keep table content, but not structure
html_converter.body_width = 0 # No line wrapping
html_converter.ignore_images = True
html_converter.ignore_emphasis = True
html_converter.skip_internal_links = True
html_converter.ignore_tables = False # Try to get text from tables
# --- Helper Functions ---
def setup_directories():
"""Ensure all required directories exist"""
dirs = [PDF_DIR, SCRIPTS_DIR, CORPUS_DIR, OUTPUT_DIR]
for dir_path in dirs:
os.makedirs(dir_path, exist_ok=True)
logging.info(f"All required directories verified/created.")
def clean_text(text):
"""Clean and normalize text content."""
if not text:
return ""
# Boilerplate removal (case-insensitive, whole words)
boilerplate_phrases = [
"Terms of Use", "Privacy Policy", "Cookie Policy",
"All rights reserved", "Copyright ©", "Generated by Doxygen", "Generated by"
]
# (?i) for case-insensitivity, \b for word boundaries
boilerplate_regex = re.compile(r'(?i)\b(' + '|'.join(map(re.escape, boilerplate_phrases)) + r')\b')
text = boilerplate_regex.sub('', text)
# Whitespace normalization (inspired by effective Qt script logic)
# 1. Convert all newline types to \n
text = text.replace('\r\n', '\n').replace('\r', '\n')
# 2. Consolidate multiple newlines that might have spaces between them.
# e.g., "\n \n" becomes "\n\n".
text = re.sub(r'\n\s*\n', '\n\n', text)
# 3. Replace multiple horizontal whitespace (spaces, tabs, non-breaking spaces) with a single space.
text = re.sub(r'[ \t\u00A0]{2,}', ' ', text)
# 4. Strip leading/trailing whitespace from each line.
# This helps clean up lines after regex ops and boilerplate removal.
lines = [line.strip() for line in text.splitlines()]
text = "\n".join(lines)
# 5. Ensure text is not just empty lines. text.strip() handles if it becomes empty.
# Re-consolidate newlines after line stripping, ensuring at most one blank line.
text = re.sub(r'\n{3,}', '\n\n', text)
return text.strip()
def extract_pdf_text(pdf_path):
"""Extract text from PDF files with improved error handling"""
try:
with open(pdf_path, 'rb') as file:
reader = PyPDF2.PdfReader(file)
if reader.is_encrypted:
try:
reader.decrypt('')
except Exception as e:
logging.warning(f"Could not decrypt PDF {pdf_path}: {e}. Skipping.")
return None
text_parts = []
for i, page in enumerate(reader.pages):
try:
page_text = page.extract_text()
if page_text:
text_parts.append(page_text)
except Exception as e:
logging.warning(f"Error extracting page {i+1} from {pdf_path}: {str(e)}")
continue
if not text_parts:
logging.info(f"No text extracted from PDF {pdf_path}")
return None
return clean_text('\n'.join(text_parts))
except Exception as e:
logging.error(f"Error processing PDF {pdf_path}: {str(e)}")
return None
def parse_with_bs4(content):
"""Parse HTML using BeautifulSoup with multiple parser fallbacks"""
parsers = ["lxml", "html.parser", "html5lib"]
for parser in parsers:
try:
soup = BeautifulSoup(content, parser)
# Remove common non-content tags
for element_type in ['script', 'style', 'nav', 'footer', 'iframe', 'head', 'noscript', 'header', 'aside', 'form']:
for element in soup.find_all(element_type):
element.decompose()
# Optionally, remove elements by common boilerplate IDs/classes
# for selector in ['.sidebar', '#TableOfContents', '.toc', '.breadcrumbs']:
# for element in soup.select(selector):
# element.decompose()
return soup.get_text(separator='\n', strip=True)
except Exception as e:
logging.debug(f"BS4 parser {parser} failed: {e}")
continue
logging.warning("All BS4 parsers failed for a document.")
raise Exception("All BS4 parsers failed")
def parse_with_html2text(content):
"""Fallback to html2text conversion"""
return html_converter.handle(content)
def parse_with_regex(content):
"""Final fallback using regex to remove tags and normalize space"""
# Add newlines for block elements before stripping all tags
content = re.sub(r'<(br|p|div|h[1-6]|li|tr|hr)[^>]*>', '\n', content, flags=re.IGNORECASE)
content = re.sub(r'<[^>]+>', ' ', content) # Remove all other tags
return re.sub(r'\s+', ' ', content).strip() # Normalize whitespace
def process_html_file(html_path):
"""Convert HTML to clean text with multiple fallback strategies"""
try:
# Try to read with utf-8, then latin-1 as a fallback for HTML files
content = None
try:
with open(html_path, 'r', encoding='utf-8') as file:
content = file.read()
except UnicodeDecodeError:
logging.debug(f"UTF-8 failed for {html_path}, trying latin-1.")
try:
with open(html_path, 'r', encoding='latin-1') as file:
content = file.read()
except Exception as e:
logging.error(f"Error reading HTML file {html_path} with latin-1: {str(e)}")
return None
if content is None: # Should not happen if previous block is correct
logging.error(f"Could not read HTML file {html_path}")
return None
# Pre-process problematic HTML patterns (optional, can be aggressive)
# content = re.sub(r'<(!DOCTYPE|!--|script).*?>.*?</\1>', '', content, flags=re.DOTALL|re.IGNORECASE)
strategies = [
("BeautifulSoup", parse_with_bs4),
("html2text", parse_with_html2text),
("Regex", parse_with_regex)
]
parsed_text = None
for strategy_name, strategy_func in strategies:
try:
result = strategy_func(content)
if result and result.strip(): # Strategy successful if it returns non-empty, non-whitespace text
logging.debug(f"Strategy '{strategy_name}' succeeded for {html_path}")
parsed_text = result
break
except Exception as e:
logging.debug(f"Strategy '{strategy_name}' failed for {html_path}: {e}")
continue
if parsed_text:
return clean_text(parsed_text)
else:
logging.warning(f"All parsing strategies failed to extract meaningful content from {html_path}")
return None
except Exception as e:
logging.error(f"Error processing HTML file {html_path}: {str(e)}")
return None
def process_source_code_file(code_path):
"""Process source code files with improved formatting and encoding handling."""
encodings_to_try = ['utf-8', 'latin-1', 'cp1252']
content = None
detected_encoding = None
for enc in encodings_to_try:
try:
with open(code_path, 'r', encoding=enc) as file:
content = file.read()
detected_encoding = enc
logging.debug(f"Successfully read {code_path} with encoding {enc}")
break
except UnicodeDecodeError:
logging.debug(f"Failed to read {code_path} with encoding {enc}")
continue
except Exception as e:
logging.error(f"Error reading code file {code_path} with encoding {enc}: {str(e)}")
return None
if content is None:
try:
logging.warning(f"All tried encodings failed for {code_path}. Attempting utf-8 with error replacement.")
with open(code_path, 'r', encoding='utf-8', errors='replace') as file:
content = file.read()
detected_encoding = "utf-8 (with replacements)"
except Exception as e:
logging.error(f"Truly failed to read {code_path} after all attempts: {str(e)}")
return None
# Generate file metadata
metadata_parts = [
f"SOURCE CODE FILE: {os.path.basename(code_path)}",
f"LINES: {content.count(r'\n') + 1}",
f"SIZE: {os.path.getsize(code_path) / 1024:.2f} KB",
f"PATH: {os.path.relpath(code_path, ROOT_DIR)}",
f"ENCODING: {detected_encoding}"
]
cleaned_metadata = clean_text("\n".join(metadata_parts))
# Normalize line endings for the code content itself
code_content_normalized = "\n".join(content.splitlines())
lang_ext = os.path.splitext(code_path)[1]
if lang_ext.startswith('.'):
lang_ext = lang_ext[1:]
lang_ext = lang_ext.lower() if lang_ext else "text"
# Combine cleaned metadata with largely preserved code content
formatted_output = (
f"{cleaned_metadata}\n\n"
f"```{lang_ext}\n"
f"{code_content_normalized}\n"
f"```\n"
)
return formatted_output
def process_text_file(text_path):
"""Process plain text files with encoding fallback"""
encodings = ['utf-8', 'latin-1', 'iso-8859-1', 'cp1252']
content = None
for encoding in encodings:
try:
with open(text_path, 'r', encoding=encoding) as file:
content = file.read()
break
except UnicodeDecodeError:
continue
except Exception as e:
logging.error(f"Error reading text file {text_path} with encoding {encoding}: {e}")
return None
if content is None:
logging.warning(f"Could not decode text file {text_path} with any supported encoding. Trying utf-8 with replace.")
try:
with open(text_path, 'r', encoding='utf-8', errors='replace') as file:
content = file.read()
except Exception as e:
logging.error(f"Truly failed to read text file {text_path}: {str(e)}")
return None
return clean_text(content) if content else None
def generate_file_hash(file_path):
"""Generate MD5 hash of file content for duplicate detection"""
hasher = hashlib.md5()
try:
with open(file_path, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b''):
hasher.update(chunk)
return hasher.hexdigest()
except OSError as e:
logging.error(f"Could not open/read file for hashing {file_path}: {e}")
return None
def process_file(file_path):
"""Process individual files with enhanced error handling"""
try:
# Skip known problematic files or types if necessary (example)
# if "repositories/API" in file_path.replace("\\", "/") and file_path.endswith(".html"):
# logging.debug(f"Skipping known problematic API file: {file_path}")
# return False # Indicate processed, but not saved
ext = os.path.splitext(file_path)[1].lower()
output_text = None
if ext == '.pdf':
output_text = extract_pdf_text(file_path)
elif ext in ['.html', '.htm']:
output_text = process_html_file(file_path)
elif ext in ['.py', '.cpp', '.h', '.cxx', '.hxx', '.cs', '.java', '.js', '.qml', '.sh', '.bat', '.cmake', '.xml']: # Expanded list
output_text = process_source_code_file(file_path)
elif ext in ['.txt', '.md', '.rst', '.asc', '.log', '.ini', '.cfg', '.json', '.yaml', '.yml']: # Expanded list
output_text = process_text_file(file_path)
else:
logging.debug(f"Skipping file with unhandled extension {ext}: {file_path}")
return False # Not processed due to extension
if output_text:
cleaned_output_text = output_text.strip() # For length check, strip outer whitespace
if len(cleaned_output_text) < MIN_OUTPUT_TEXT_LENGTH:
logging.info(f"Skipping {file_path} due to short content after processing (length: {len(cleaned_output_text)}).")
return False # Processed, but not saved (too short)
# Use original file's relative path for safer output naming
try:
rel_path = os.path.relpath(file_path, ROOT_DIR)
except ValueError: # If file_path is not under ROOT_DIR (e.g. absolute paths from different drives)
rel_path = os.path.basename(file_path) # Fallback to basename
safe_filename = re.sub(r'[^\w\-_.]', '_', rel_path) + '.txt'
output_path = os.path.join(OUTPUT_DIR, safe_filename)
os.makedirs(os.path.dirname(output_path), exist_ok=True)
with open(output_path, 'w', encoding='utf-8') as out_file:
out_file.write(output_text) # Write the non-stripped version if it passed length check
return True # Successfully processed and saved
logging.debug(f"No output text generated for {file_path}")
return False # Processed, but no output or not saved
except Exception as e:
logging.error(f"Fatal error processing {file_path}: {str(e)}", exc_info=False) # exc_info=True for full traceback
return False
def process_directory(directory_path):
"""Process all files in a directory with progress bar"""
if not os.path.exists(directory_path):
logging.warning(f"Directory not found: {directory_path}")
return 0, 0
files_to_process = []
for root, _, files in os.walk(directory_path):
for file_name in files:
files_to_process.append(os.path.join(root, file_name))
processed_count = 0
saved_count = 0
# Use tqdm for progress if files_to_process is not empty
if not files_to_process:
logging.info(f"No files found in {directory_path}")
return 0, 0
with tqdm(total=len(files_to_process), desc=f"Processing {os.path.basename(directory_path)}", unit="file") as pbar:
# Single-threaded processing for easier debugging initially
# For multi-threading, use ThreadPoolExecutor as in original script, ensuring process_file is thread-safe
for file_path_item in files_to_process:
if process_file(file_path_item):
saved_count += 1
processed_count +=1
pbar.update(1)
return processed_count, saved_count
def create_corpus_index():
"""Create a detailed index of all processed files"""
index_data = []
seen_hashes = set()
duplicate_files_removed = 0
valid_files_indexed = 0
all_output_files = []
for root, _, files in os.walk(OUTPUT_DIR):
for file_name in files:
if file_name.endswith(".txt") and not file_name.startswith("full_corpus.part") and file_name != "corpus_index.json":
all_output_files.append(os.path.join(root, file_name))
if not all_output_files:
logging.warning("No output files found to index in {OUTPUT_DIR}")
return
for file_path in tqdm(all_output_files, desc="Indexing processed files"):
file_hash = generate_file_hash(file_path)
if not file_hash: # Hash generation failed
continue
if file_hash in seen_hashes:
logging.info(f"Duplicate content hash {file_hash} for file {file_path}. Removing duplicate.")
try:
os.remove(file_path) # Remove the duplicate file
duplicate_files_removed += 1
except OSError as e:
logging.error(f"Could not remove duplicate file {file_path}: {e}")
continue
seen_hashes.add(file_hash)
valid_files_indexed += 1
index_data.append({
"path": os.path.relpath(file_path, OUTPUT_DIR),
"size_bytes": os.path.getsize(file_path),
"modified_timestamp": os.path.getmtime(file_path),
"md5_hash": file_hash
})
total_size_bytes = sum(f['size_bytes'] for f in index_data)
index_summary = {
"corpus_stats": {
"total_indexed_files": valid_files_indexed,
"duplicate_content_files_removed": duplicate_files_removed,
"total_corpus_size_mb": round(total_size_bytes / (1024 * 1024), 2),
"index_generated_at": logging.time.asctime()
},
"indexed_files": index_data
}
index_path = os.path.join(OUTPUT_DIR, "corpus_index.json")
with open(index_path, 'w', encoding='utf-8') as f:
json.dump(index_summary, f, indent=2)
logging.info(f"Created corpus index: {index_path}")
logging.info(f"Indexed {valid_files_indexed} unique files. Removed {duplicate_files_removed} duplicate content files.")
def merge_corpus_to_chunks(max_chunk_size_mb=500):
"""Merge unique, indexed corpus files into manageable chunks."""
index_path = os.path.join(OUTPUT_DIR, "corpus_index.json")
if not os.path.exists(index_path):
logging.error(f"Corpus index not found at {index_path}. Run indexing first.")
return
with open(index_path, 'r', encoding='utf-8') as f:
corpus_data = json.load(f)
files_to_merge = corpus_data.get("indexed_files", [])
if not files_to_merge:
logging.info("No files listed in the index to merge.")
return
# Sort files by path for consistent ordering, or by size for better packing (optional)
files_to_merge.sort(key=lambda x: x["path"])
chunk_num = 1
current_chunk_size_bytes = 0
max_bytes_per_chunk = max_chunk_size_mb * 1024 * 1024
output_chunk_path = os.path.join(OUTPUT_DIR, f"full_corpus.part{chunk_num}.txt")
current_chunk_file = open(output_chunk_path, 'w', encoding='utf-8')
logging.info(f"Starting merge. Writing to chunk: {output_chunk_path}")
total_files_merged = 0
for file_info in tqdm(files_to_merge, desc="Merging indexed files into chunks"):
original_file_path_in_output = os.path.join(OUTPUT_DIR, file_info["path"])
try:
with open(original_file_path_in_output, 'r', encoding='utf-8') as f_in:
content = f_in.read()
if not content.strip(): # Skip if file became empty somehow
continue
# Metadata header for each file within the chunk
header = (
f"\n\n{'='*20} FILE: {file_info['path']} {'='*20}\n"
f"ORIGINAL_SIZE_BYTES: {file_info['size_bytes']}\n"
f"MD5_HASH: {file_info['md5_hash']}\n"
f"{'='* (46 + len(file_info['path']))}\n\n"
)
entry_size_bytes = len(header.encode('utf-8')) + len(content.encode('utf-8'))
if current_chunk_size_bytes + entry_size_bytes > max_bytes_per_chunk and current_chunk_size_bytes > 0:
current_chunk_file.close()
chunk_num += 1
output_chunk_path = os.path.join(OUTPUT_DIR, f"full_corpus.part{chunk_num}.txt")
current_chunk_file = open(output_chunk_path, 'w', encoding='utf-8')
current_chunk_size_bytes = 0
logging.info(f"Max chunk size reached. Writing to new chunk: {output_chunk_path}")
current_chunk_file.write(header + content)
current_chunk_size_bytes += entry_size_bytes
total_files_merged += 1
except FileNotFoundError:
logging.warning(f"File {original_file_path_in_output} not found during merge (listed in index). Skipping.")
except Exception as e:
logging.error(f"Error merging file {original_file_path_in_output}: {str(e)}")
if current_chunk_file and not current_chunk_file.closed:
current_chunk_file.close()
logging.info(f"Corpus merging complete. {total_files_merged} files merged into {chunk_num} chunk(s).")
def main():
logging.info("Starting FreeCAD corpus processing...")
setup_directories()
total_processed_files = 0
total_saved_files = 0
try:
# Define directories to process
# Ensure these are distinct to avoid processing files multiple times if they overlap
# For example, SCRIPTS_DIR might be inside ROOT_DIR. CORPUS_DIR might be inside SCRIPTS_DIR.
# os.walk will handle subdirectory traversal.
# If processing ROOT_DIR, it would cover PDF_DIR, SCRIPTS_DIR, CORPUS_DIR if they are subdirectories.
# For this example, assuming they are somewhat distinct top-level content sources or need specific handling.
# The current structure processes each specified directory. If they nest, files in nested dirs might be processed multiple times
# if listed explicitly. A better way might be to process ROOT_DIR and let filetype handlers do their job,
# or collect all unique file paths first.
# For now, sticking to provided structure:
directories_to_scan = [CORPUS_DIR, PDF_DIR, SCRIPTS_DIR] # Order might matter if cleanup happens between steps
# Example: If CORPUS_DIR is where raw HTMLs are, and SCRIPTS_DIR has .py files.
# PDF_DIR for PDFs.
# Using ThreadPoolExecutor for parallel processing of directories (each directory processed sequentially internally)
# If process_file itself is to be parallelized, then collect all files first, then submit process_file tasks.
# For now, let's keep process_directory as the unit of work for simplicity given its internal tqdm.
# Single-threaded processing for easier debugging and to respect tqdm per directory:
for dir_path in directories_to_scan:
if os.path.exists(dir_path):
logging.info(f"Processing directory: {dir_path}")
p_count, s_count = process_directory(dir_path)
total_processed_files += p_count
total_saved_files += s_count
else:
logging.warning(f"Directory specified for processing not found: {dir_path}")
logging.info(f"Initial file processing complete. Total files attempted: {total_processed_files}, Total files saved: {total_saved_files}")
# Post-processing: Indexing (deduplication) and Merging
if total_saved_files > 0:
logging.info("Starting post-processing: Indexing and Merging...")
create_corpus_index() # Creates index, removes duplicates based on content hash
merge_corpus_to_chunks(max_chunk_size_mb=500) # Merges unique files from index
else:
logging.info("No files were saved, skipping indexing and merging.")
logging.info(f"\nCorpus processing finished!")
logging.info(f"Cleaned and processed files are in: {OUTPUT_DIR}")
logging.info(f"Corpus index (after deduplication): {os.path.join(OUTPUT_DIR, 'corpus_index.json')}")
logging.info(f"Merged corpus chunks: {os.path.join(OUTPUT_DIR, 'full_corpus.part*.txt')}")
except KeyboardInterrupt:
logging.warning("Processing interrupted by user.")
except Exception as e:
logging.error(f"Fatal error in main processing pipeline: {str(e)}", exc_info=True)
# Consider cleanup or state saving if needed here
raise
if __name__ == "__main__":
main()
```
|
========================================================================================
SOURCE CODE FILE: process_merge_corpus - Copy.py
LINES: 21
SIZE: 13.29 KB
PATH: scripts\process_merge_corpus - Copy.py
ENCODING: utf-8
```py
import os
import re
import shutil
import PyPDF2
from bs4 import BeautifulSoup
from pathlib import Path
import concurrent.futures
import json
import html2text
import logging
from tqdm import tqdm
import hashlib
import warnings
from bs4 import MarkupResemblesLocatorWarning
# Suppress BeautifulSoup warnings
warnings.filterwarnings("ignore", category=MarkupResemblesLocatorWarning)
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler('corpus_processing.log'),
logging.StreamHandler()
]
)
# Configuration
ROOT_DIR = r"C:\FreeCAD_repo"
PDF_DIR = os.path.join(ROOT_DIR, "pdf")
SCRIPTS_DIR = os.path.join(ROOT_DIR, "scripts")
CORPUS_DIR = os.path.join(SCRIPTS_DIR, "freecad_corpus")
OUTPUT_DIR = os.path.join(ROOT_DIR, "processed_corpus")
os.makedirs(OUTPUT_DIR, exist_ok=True)
# Initialize HTML to text converter
html_converter = html2text.HTML2Text()
html_converter.ignore_links = True
html_converter.bypass_tables = True
html_converter.body_width = 0
html_converter.ignore_images = True
html_converter.ignore_emphasis = True
def setup_directories():
"""Ensure all required directories exist"""
dirs = [PDF_DIR, SCRIPTS_DIR, CORPUS_DIR, OUTPUT_DIR]
for dir_path in dirs:
os.makedirs(dir_path, exist_ok=True)
logging.info(f"Directory verified: {dir_path}")
def clean_text(text):
"""Clean and normalize text content with improved processing"""
if not text:
return ""
# Remove non-ASCII characters if needed
text = text.encode('ascii', 'ignore').decode('ascii')
# Normalize whitespace and clean special patterns
text = re.sub(r'\s+', ' ', text).strip()
text = re.sub(r'```.*?```', '', text, flags=re.DOTALL)
text = re.sub(r'(\n\s*){3,}', '\n\n', text)
# Remove common boilerplate text
boilerplate = [
"Terms of Use", "Privacy Policy", "Cookie Policy",
"All rights reserved", "Copyright ©", "Generated by"
]
for phrase in boilerplate:
text = text.replace(phrase, '')
return text
def extract_pdf_text(pdf_path):
"""Extract text from PDF files with improved error handling"""
try:
with open(pdf_path, 'rb') as file:
reader = PyPDF2.PdfReader(file)
text = []
for page in reader.pages:
try:
page_text = page.extract_text()
if page_text:
text.append(page_text)
except Exception as e:
logging.warning(f"Error extracting page from {pdf_path}: {str(e)}")
continue
return clean_text('\n'.join(text))
except Exception as e:
logging.error(f"Error processing PDF {pdf_path}: {str(e)}")
return None
def parse_with_bs4(content):
"""Parse HTML using BeautifulSoup with multiple parser fallbacks"""
parsers = ["lxml", "html.parser", "html5lib"]
for parser in parsers:
try:
soup = BeautifulSoup(content, parser)
for element in soup(['script', 'style', 'nav', 'footer', 'iframe', 'head', 'noscript']):
element.decompose()
return soup.get_text(separator='\n', strip=True)
except Exception:
continue
raise Exception("All BS4 parsers failed")
def parse_with_html2text(content):
"""Fallback to html2text conversion"""
return html_converter.handle(content)
def parse_with_regex(content):
"""Final fallback using regex to remove tags"""
content = re.sub(r'<[^>]+>', ' ', content)
return re.sub(r'\s+', ' ', content).strip()
def process_html_file(html_path):
"""Convert HTML to clean text with multiple fallback strategies"""
try:
with open(html_path, 'r', encoding='utf-8', errors='replace') as file:
content = file.read()
# Pre-process problematic HTML patterns
content = re.sub(r'<(!DOCTYPE|!--|script).*?>.*?</\1>', '', content, flags=re.DOTALL)
# Try multiple parsing strategies
strategies = [
parse_with_bs4,
parse_with_html2text,
parse_with_regex
]
for strategy in strategies:
try:
result = strategy(content)
if result and len(result) > 100: # Minimum content threshold
return clean_text(result)
except Exception:
continue
logging.warning(f"All parsing strategies failed for {html_path}")
return None
except Exception as e:
logging.error(f"Error reading HTML file {html_path}: {str(e)}")
return None
def process_source_code_file(code_path):
"""Process source code files with improved formatting"""
try:
with open(code_path, 'r', encoding='utf-8') as file:
content = file.read()
# Generate file metadata
lines = content.count('\n') + 1
size_kb = os.path.getsize(code_path) / 1024
formatted = f"SOURCE CODE FILE: {os.path.basename(code_path)}\n"
formatted += f"LINES: {lines}\nSIZE: {size_kb:.2f} KB\n"
formatted += f"PATH: {os.path.relpath(code_path, ROOT_DIR)}\n\n"
formatted += "```" + os.path.splitext(code_path)[1][1:] + "\n"
formatted += content + "\n```\n"
return clean_text(formatted)
except Exception as e:
logging.error(f"Error processing code file {code_path}: {str(e)}")
return None
def process_text_file(text_path):
"""Process plain text files with encoding fallback"""
encodings = ['utf-8', 'latin-1', 'iso-8859-1', 'cp1252']
for encoding in encodings:
try:
with open(text_path, 'r', encoding=encoding) as file:
return clean_text(file.read())
except UnicodeDecodeError:
continue
logging.warning(f"Could not decode text file {text_path} with any supported encoding")
return None
def generate_file_hash(file_path):
"""Generate MD5 hash of file content for duplicate detection"""
hasher = hashlib.md5()
with open(file_path, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b''):
hasher.update(chunk)
return hasher.hexdigest()
def process_file(file_path):
"""Process individual files with enhanced error handling"""
try:
# Skip known problematic files
if "repositories/API" in file_path.replace("\\", "/") and file_path.endswith(".html"):
logging.debug(f"Skipping known problematic API file: {file_path}")
return False
ext = os.path.splitext(file_path)[1].lower()
output_text = None
if ext == '.pdf':
output_text = extract_pdf_text(file_path)
elif ext in ['.html', '.htm']:
output_text = process_html_file(file_path)
elif ext in ['.py', '.cpp', '.h', '.cxx', '.hxx', '.cs', '.java']:
output_text = process_source_code_file(file_path)
elif ext in ['.txt', '.md', '.rst']:
output_text = process_text_file(file_path)
if output_text:
rel_path = os.path.relpath(file_path, ROOT_DIR)
safe_path = re.sub(r'[^\w\-_.]', '_', rel_path)
output_path = os.path.join(OUTPUT_DIR, safe_path + '.txt')
os.makedirs(os.path.dirname(output_path), exist_ok=True)
with open(output_path, 'w', encoding='utf-8') as out_file:
out_file.write(output_text)
return True
return False
except Exception as e:
logging.error(f"Fatal error processing {file_path}: {str(e)}")
return False
def process_directory(directory):
"""Process all files in a directory with progress bar"""
if not os.path.exists(directory):
logging.warning(f"Directory not found: {directory}")
return
files_to_process = []
for root, _, files in os.walk(directory):
for file in files:
files_to_process.append(os.path.join(root, file))
with tqdm(total=len(files_to_process), desc=f"Processing {os.path.basename(directory)}") as pbar:
for file_path in files_to_process:
process_file(file_path)
pbar.update(1)
def create_corpus_index():
"""Create a detailed index of all processed files"""
index = []
seen_hashes = set()
duplicate_count = 0
for root, _, files in os.walk(OUTPUT_DIR):
for file in files:
if file == "corpus_index.json":
continue
file_path = os.path.join(root, file)
rel_path = os.path.relpath(file_path, OUTPUT_DIR)
file_hash = generate_file_hash(file_path)
if file_hash in seen_hashes:
duplicate_count += 1
continue
seen_hashes.add(file_hash)
index.append({
"path": rel_path,
"size": os.path.getsize(file_path),
"modified": os.path.getmtime(file_path),
"hash": file_hash
})
index_path = os.path.join(OUTPUT_DIR, "corpus_index.json")
with open(index_path, 'w', encoding='utf-8') as f:
json.dump({
"files": index,
"stats": {
"total_files": len(index),
"duplicates_removed": duplicate_count,
"total_size_mb": sum(f['size'] for f in index) / (1024 * 1024)
}
}, f, indent=2)
logging.info(f"Created corpus index with {len(index)} files ({duplicate_count} duplicates removed)")
def merge_corpus_to_chunks(max_size_mb=500):
"""Merge corpus into manageable chunks with improved organization"""
chunk_num = 1
current_chunk_size = 0
merged_path = os.path.join(OUTPUT_DIR, f"full_corpus.part{chunk_num}.txt")
merged_file = open(merged_path, 'w', encoding='utf-8')
file_list = []
# Collect all files first for proper sorting
for root, _, files in os.walk(OUTPUT_DIR):
for file in files:
if file.endswith(".txt") and not file.startswith("full_corpus.part"):
file_path = os.path.join(root, file)
file_list.append((file_path, os.path.getsize(file_path)))
# Sort files by size (smallest first) for better chunk distribution
file_list.sort(key=lambda x: x[1])
total_processed = 0
for file_path, file_size in tqdm(file_list, desc="Merging corpus"):
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read().strip()
if not content:
continue
content_size = len(content.encode('utf-8'))
if current_chunk_size + content_size > max_size_mb * 1024 * 1024:
merged_file.close()
chunk_num += 1
merged_path = os.path.join(OUTPUT_DIR, f"full_corpus.part{chunk_num}.txt")
merged_file = open(merged_path, 'w', encoding='utf-8')
current_chunk_size = 0
# Add metadata header
rel_path = os.path.relpath(file_path, OUTPUT_DIR)
header = f"\n\n=== FILE: {rel_path} === SIZE: {file_size} bytes ===\n\n"
merged_file.write(header + content)
current_chunk_size += content_size + len(header.encode('utf-8'))
total_processed += 1
except Exception as e:
logging.error(f"Skipping {file_path} (error: {str(e)})")
merged_file.close()
logging.info(f"\nCorpus merged into {chunk_num} chunks (max {max_size_mb}MB each)")
logging.info(f"Total files processed: {total_processed}")
def main():
"""Main processing function with enhanced error handling"""
logging.info("Starting FreeCAD corpus processing...")
setup_directories()
try:
# Process directories with progress tracking
directories = [PDF_DIR, CORPUS_DIR, SCRIPTS_DIR]
with concurrent.futures.ThreadPoolExecutor(max_workers=min(4, os.cpu_count() or 1)) as executor:
futures = []
for directory in directories:
if os.path.exists(directory):
futures.append(executor.submit(process_directory, directory))
else:
logging.warning(f"Directory not found: {directory}")
for future in concurrent.futures.as_completed(futures):
try:
future.result()
except Exception as e:
logging.error(f"Directory processing failed: {str(e)}")
# Post-processing
create_corpus_index()
merge_corpus_to_chunks(max_size_mb=500)
logging.info(f"\nProcessing complete! Files saved to: {OUTPUT_DIR}")
logging.info(f"Corpus index: {os.path.join(OUTPUT_DIR, 'corpus_index.json')}")
logging.info("Merged chunks: full_corpus.part*.txt")
except KeyboardInterrupt:
logging.warning("Processing interrupted by user")
except Exception as e:
logging.error(f"Fatal error in main processing: {str(e)}", exc_info=True)
raise
if __name__ == "__main__":
main()
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.