diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/__init__.py b/env-llmeval/lib/python3.10/site-packages/joblib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..962a43482801e803549c949ab274974742d969b7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/__init__.py @@ -0,0 +1,148 @@ +"""Joblib is a set of tools to provide **lightweight pipelining in +Python**. In particular: + +1. transparent disk-caching of functions and lazy re-evaluation + (memoize pattern) + +2. easy simple parallel computing + +Joblib is optimized to be **fast** and **robust** on large +data in particular and has specific optimizations for `numpy` arrays. It is +**BSD-licensed**. + + + ==================== =============================================== + **Documentation:** https://joblib.readthedocs.io + + **Download:** https://pypi.python.org/pypi/joblib#downloads + + **Source code:** https://github.com/joblib/joblib + + **Report issues:** https://github.com/joblib/joblib/issues + ==================== =============================================== + + +Vision +-------- + +The vision is to provide tools to easily achieve better performance and +reproducibility when working with long running jobs. + + * **Avoid computing the same thing twice**: code is often rerun again and + again, for instance when prototyping computational-heavy jobs (as in + scientific development), but hand-crafted solutions to alleviate this + issue are error-prone and often lead to unreproducible results. + + * **Persist to disk transparently**: efficiently persisting + arbitrary objects containing large data is hard. Using + joblib's caching mechanism avoids hand-written persistence and + implicitly links the file on disk to the execution context of + the original Python object. As a result, joblib's persistence is + good for resuming an application status or computational job, eg + after a crash. + +Joblib addresses these problems while **leaving your code and your flow +control as unmodified as possible** (no framework, no new paradigms). + +Main features +------------------ + +1) **Transparent and fast disk-caching of output value:** a memoize or + make-like functionality for Python functions that works well for + arbitrary Python objects, including very large numpy arrays. Separate + persistence and flow-execution logic from domain logic or algorithmic + code by writing the operations as a set of steps with well-defined + inputs and outputs: Python functions. Joblib can save their + computation to disk and rerun it only if necessary:: + + >>> from joblib import Memory + >>> cachedir = 'your_cache_dir_goes_here' + >>> mem = Memory(cachedir) + >>> import numpy as np + >>> a = np.vander(np.arange(3)).astype(float) + >>> square = mem.cache(np.square) + >>> b = square(a) # doctest: +ELLIPSIS + ______________________________________________________________________... + [Memory] Calling square... + square(array([[0., 0., 1.], + [1., 1., 1.], + [4., 2., 1.]])) + _________________________________________________...square - ...s, 0.0min + + >>> c = square(a) + >>> # The above call did not trigger an evaluation + +2) **Embarrassingly parallel helper:** to make it easy to write readable + parallel code and debug it quickly:: + + >>> from joblib import Parallel, delayed + >>> from math import sqrt + >>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10)) + [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0] + + +3) **Fast compressed Persistence**: a replacement for pickle to work + efficiently on Python objects containing large data ( + *joblib.dump* & *joblib.load* ). + +.. + >>> import shutil ; shutil.rmtree(cachedir) + +""" + +# PEP0440 compatible formatted version, see: +# https://www.python.org/dev/peps/pep-0440/ +# +# Generic release markers: +# X.Y +# X.Y.Z # For bugfix releases +# +# Admissible pre-release markers: +# X.YaN # Alpha release +# X.YbN # Beta release +# X.YrcN # Release Candidate +# X.Y # Final release +# +# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer. +# 'X.Y.dev0' is the canonical version of 'X.Y.dev' +# +__version__ = '1.4.0' + + +import os + +from .memory import Memory +from .memory import MemorizedResult +from .memory import register_store_backend +from .memory import expires_after + +from .logger import PrintTime +from .logger import Logger + +from .hashing import hash + +from .numpy_pickle import dump +from .numpy_pickle import load + +from .compressor import register_compressor + +from .parallel import Parallel +from .parallel import delayed +from .parallel import cpu_count +from .parallel import register_parallel_backend +from .parallel import parallel_backend +from .parallel import parallel_config +from .parallel import effective_n_jobs +from ._cloudpickle_wrapper import wrap_non_picklable_objects + + +__all__ = ['Memory', 'MemorizedResult', 'PrintTime', 'Logger', 'hash', 'dump', + 'load', 'Parallel', 'delayed', 'cpu_count', 'effective_n_jobs', + 'register_parallel_backend', 'parallel_backend', 'expires_after', + 'register_store_backend', 'register_compressor', + 'wrap_non_picklable_objects', 'parallel_config'] + + +# Workaround issue discovered in intel-openmp 2019.5: +# https://github.com/ContinuumIO/anaconda-issues/issues/11294 +os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE") diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/_cloudpickle_wrapper.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/_cloudpickle_wrapper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee767efbd3e02cb6adbeba8d79d78b7ba60ccf70 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/_cloudpickle_wrapper.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/_multiprocessing_helpers.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/_multiprocessing_helpers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7aed205ade73550963334e298d5165804234209 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/_multiprocessing_helpers.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/_parallel_backends.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/_parallel_backends.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c9f68574ca5988f2dc009c339e22936917a8d06 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/_parallel_backends.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/_store_backends.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/_store_backends.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98d6adbbbecc57daf6469686b7f4932386e09e2f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/_store_backends.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/compressor.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/compressor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a8cdf83335d25525c19901346a9474b432c2270 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/compressor.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/disk.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/disk.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ec15351b1b7131717d04d6f28e44b3b1d29f6e4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/disk.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/func_inspect.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/func_inspect.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28ee1d8cb3166c8bcec7b547ffbe7b7a40ff06c1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/func_inspect.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/memory.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/memory.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7aeaafbd574b033f9f76c0f7f249c2347823bb36 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/memory.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..632fee1361843cd0ae9ba46f72685028edc6a488 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_compat.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ff04e77ea65f7e23b175c4d05e4aa3b38074c1f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_compat.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/testing.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/testing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a46c7c07ea87cb061dcaa62ae6001af5c65d0ab Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/__pycache__/testing.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/_cloudpickle_wrapper.py b/env-llmeval/lib/python3.10/site-packages/joblib/_cloudpickle_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..daf899d04ff513e1ce9c9c41871adcfd72c8fcf7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/_cloudpickle_wrapper.py @@ -0,0 +1,19 @@ +""" +Small shim of loky's cloudpickle_wrapper to avoid failure when +multiprocessing is not available. +""" + + +from ._multiprocessing_helpers import mp + + +def _my_wrap_non_picklable_objects(obj, keep_wrapper=True): + return obj + + +if mp is not None: + from .externals.loky import wrap_non_picklable_objects +else: + wrap_non_picklable_objects = _my_wrap_non_picklable_objects + +__all__ = ["wrap_non_picklable_objects"] diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/_dask.py b/env-llmeval/lib/python3.10/site-packages/joblib/_dask.py new file mode 100644 index 0000000000000000000000000000000000000000..4288ed05cd9290a1db19044ea9ceb4ec28974082 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/_dask.py @@ -0,0 +1,379 @@ +from __future__ import print_function, division, absolute_import + +import asyncio +import concurrent.futures +import contextlib + +import time +from uuid import uuid4 +import weakref + +from .parallel import parallel_config +from .parallel import AutoBatchingMixin, ParallelBackendBase + +from ._utils import ( + _TracebackCapturingWrapper, + _retrieve_traceback_capturing_wrapped_call +) + +try: + import dask + import distributed +except ImportError: + dask = None + distributed = None + +if dask is not None and distributed is not None: + from dask.utils import funcname + from dask.sizeof import sizeof + from dask.distributed import ( + Client, + as_completed, + get_client, + secede, + rejoin, + ) + from distributed.utils import thread_state + + try: + # asyncio.TimeoutError, Python3-only error thrown by recent versions of + # distributed + from distributed.utils import TimeoutError as _TimeoutError + except ImportError: + from tornado.gen import TimeoutError as _TimeoutError + + +def is_weakrefable(obj): + try: + weakref.ref(obj) + return True + except TypeError: + return False + + +class _WeakKeyDictionary: + """A variant of weakref.WeakKeyDictionary for unhashable objects. + + This datastructure is used to store futures for broadcasted data objects + such as large numpy arrays or pandas dataframes that are not hashable and + therefore cannot be used as keys of traditional python dicts. + + Furthermore using a dict with id(array) as key is not safe because the + Python is likely to reuse id of recently collected arrays. + """ + + def __init__(self): + self._data = {} + + def __getitem__(self, obj): + ref, val = self._data[id(obj)] + if ref() is not obj: + # In case of a race condition with on_destroy. + raise KeyError(obj) + return val + + def __setitem__(self, obj, value): + key = id(obj) + try: + ref, _ = self._data[key] + if ref() is not obj: + # In case of race condition with on_destroy. + raise KeyError(obj) + except KeyError: + # Insert the new entry in the mapping along with a weakref + # callback to automatically delete the entry from the mapping + # as soon as the object used as key is garbage collected. + def on_destroy(_): + del self._data[key] + ref = weakref.ref(obj, on_destroy) + self._data[key] = ref, value + + def __len__(self): + return len(self._data) + + def clear(self): + self._data.clear() + + +def _funcname(x): + try: + if isinstance(x, list): + x = x[0][0] + except Exception: + pass + return funcname(x) + + +def _make_tasks_summary(tasks): + """Summarize of list of (func, args, kwargs) function calls""" + unique_funcs = {func for func, args, kwargs in tasks} + + if len(unique_funcs) == 1: + mixed = False + else: + mixed = True + return len(tasks), mixed, _funcname(tasks) + + +class Batch: + """dask-compatible wrapper that executes a batch of tasks""" + def __init__(self, tasks): + # collect some metadata from the tasks to ease Batch calls + # introspection when debugging + self._num_tasks, self._mixed, self._funcname = _make_tasks_summary( + tasks + ) + + def __call__(self, tasks=None): + results = [] + with parallel_config(backend='dask'): + for func, args, kwargs in tasks: + results.append(func(*args, **kwargs)) + return results + + def __repr__(self): + descr = f"batch_of_{self._funcname}_{self._num_tasks}_calls" + if self._mixed: + descr = "mixed_" + descr + return descr + + +def _joblib_probe_task(): + # Noop used by the joblib connector to probe when workers are ready. + pass + + +class DaskDistributedBackend(AutoBatchingMixin, ParallelBackendBase): + MIN_IDEAL_BATCH_DURATION = 0.2 + MAX_IDEAL_BATCH_DURATION = 1.0 + supports_retrieve_callback = True + default_n_jobs = -1 + + def __init__(self, scheduler_host=None, scatter=None, + client=None, loop=None, wait_for_workers_timeout=10, + **submit_kwargs): + super().__init__() + + if distributed is None: + msg = ("You are trying to use 'dask' as a joblib parallel backend " + "but dask is not installed. Please install dask " + "to fix this error.") + raise ValueError(msg) + + if client is None: + if scheduler_host: + client = Client(scheduler_host, loop=loop, + set_as_default=False) + else: + try: + client = get_client() + except ValueError as e: + msg = ("To use Joblib with Dask first create a Dask Client" + "\n\n" + " from dask.distributed import Client\n" + " client = Client()\n" + "or\n" + " client = Client('scheduler-address:8786')") + raise ValueError(msg) from e + + self.client = client + + if scatter is not None and not isinstance(scatter, (list, tuple)): + raise TypeError("scatter must be a list/tuple, got " + "`%s`" % type(scatter).__name__) + + if scatter is not None and len(scatter) > 0: + # Keep a reference to the scattered data to keep the ids the same + self._scatter = list(scatter) + scattered = self.client.scatter(scatter, broadcast=True) + self.data_futures = {id(x): f for x, f in zip(scatter, scattered)} + else: + self._scatter = [] + self.data_futures = {} + self.wait_for_workers_timeout = wait_for_workers_timeout + self.submit_kwargs = submit_kwargs + self.waiting_futures = as_completed( + [], + loop=client.loop, + with_results=True, + raise_errors=False + ) + self._results = {} + self._callbacks = {} + + async def _collect(self): + while self._continue: + async for future, result in self.waiting_futures: + cf_future = self._results.pop(future) + callback = self._callbacks.pop(future) + if future.status == "error": + typ, exc, tb = result + cf_future.set_exception(exc) + else: + cf_future.set_result(result) + callback(result) + await asyncio.sleep(0.01) + + def __reduce__(self): + return (DaskDistributedBackend, ()) + + def get_nested_backend(self): + return DaskDistributedBackend(client=self.client), -1 + + def configure(self, n_jobs=1, parallel=None, **backend_args): + self.parallel = parallel + return self.effective_n_jobs(n_jobs) + + def start_call(self): + self._continue = True + self.client.loop.add_callback(self._collect) + self.call_data_futures = _WeakKeyDictionary() + + def stop_call(self): + # The explicit call to clear is required to break a cycling reference + # to the futures. + self._continue = False + # wait for the future collection routine (self._backend._collect) to + # finish in order to limit asyncio warnings due to aborting _collect + # during a following backend termination call + time.sleep(0.01) + self.call_data_futures.clear() + + def effective_n_jobs(self, n_jobs): + effective_n_jobs = sum(self.client.ncores().values()) + if effective_n_jobs != 0 or not self.wait_for_workers_timeout: + return effective_n_jobs + + # If there is no worker, schedule a probe task to wait for the workers + # to come up and be available. If the dask cluster is in adaptive mode + # task might cause the cluster to provision some workers. + try: + self.client.submit(_joblib_probe_task).result( + timeout=self.wait_for_workers_timeout + ) + except _TimeoutError as e: + error_msg = ( + "DaskDistributedBackend has no worker after {} seconds. " + "Make sure that workers are started and can properly connect " + "to the scheduler and increase the joblib/dask connection " + "timeout with:\n\n" + "parallel_config(backend='dask', wait_for_workers_timeout={})" + ).format(self.wait_for_workers_timeout, + max(10, 2 * self.wait_for_workers_timeout)) + raise TimeoutError(error_msg) from e + return sum(self.client.ncores().values()) + + async def _to_func_args(self, func): + itemgetters = dict() + + # Futures that are dynamically generated during a single call to + # Parallel.__call__. + call_data_futures = getattr(self, 'call_data_futures', None) + + async def maybe_to_futures(args): + out = [] + for arg in args: + arg_id = id(arg) + if arg_id in itemgetters: + out.append(itemgetters[arg_id]) + continue + + f = self.data_futures.get(arg_id, None) + if f is None and call_data_futures is not None: + try: + f = await call_data_futures[arg] + except KeyError: + pass + if f is None: + if is_weakrefable(arg) and sizeof(arg) > 1e3: + # Automatically scatter large objects to some of + # the workers to avoid duplicated data transfers. + # Rely on automated inter-worker data stealing if + # more workers need to reuse this data + # concurrently. + # set hash=False - nested scatter calls (i.e + # calling client.scatter inside a dask worker) + # using hash=True often raise CancelledError, + # see dask/distributed#3703 + _coro = self.client.scatter( + arg, + asynchronous=True, + hash=False + ) + # Centralize the scattering of identical arguments + # between concurrent apply_async callbacks by + # exposing the running coroutine in + # call_data_futures before it completes. + t = asyncio.Task(_coro) + call_data_futures[arg] = t + + f = await t + + if f is not None: + out.append(f) + else: + out.append(arg) + return out + + tasks = [] + for f, args, kwargs in func.items: + args = list(await maybe_to_futures(args)) + kwargs = dict(zip(kwargs.keys(), + await maybe_to_futures(kwargs.values()))) + tasks.append((f, args, kwargs)) + + return (Batch(tasks), tasks) + + def apply_async(self, func, callback=None): + + cf_future = concurrent.futures.Future() + cf_future.get = cf_future.result # achieve AsyncResult API + + async def f(func, callback): + batch, tasks = await self._to_func_args(func) + key = f'{repr(batch)}-{uuid4().hex}' + + dask_future = self.client.submit( + _TracebackCapturingWrapper(batch), + tasks=tasks, + key=key, + **self.submit_kwargs + ) + self.waiting_futures.add(dask_future) + self._callbacks[dask_future] = callback + self._results[dask_future] = cf_future + + self.client.loop.add_callback(f, func, callback) + + return cf_future + + def retrieve_result_callback(self, out): + return _retrieve_traceback_capturing_wrapped_call(out) + + def abort_everything(self, ensure_ready=True): + """ Tell the client to cancel any task submitted via this instance + + joblib.Parallel will never access those results + """ + with self.waiting_futures.lock: + self.waiting_futures.futures.clear() + while not self.waiting_futures.queue.empty(): + self.waiting_futures.queue.get() + + @contextlib.contextmanager + def retrieval_context(self): + """Override ParallelBackendBase.retrieval_context to avoid deadlocks. + + This removes thread from the worker's thread pool (using 'secede'). + Seceding avoids deadlock in nested parallelism settings. + """ + # See 'joblib.Parallel.__call__' and 'joblib.Parallel.retrieve' for how + # this is used. + if hasattr(thread_state, 'execution_state'): + # we are in a worker. Secede to avoid deadlock. + secede() + + yield + + if hasattr(thread_state, 'execution_state'): + rejoin() diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/_memmapping_reducer.py b/env-llmeval/lib/python3.10/site-packages/joblib/_memmapping_reducer.py new file mode 100644 index 0000000000000000000000000000000000000000..13f5c4a17ef0794dc965ca3e3a3ef216125e2946 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/_memmapping_reducer.py @@ -0,0 +1,657 @@ +""" +Reducer using memory mapping for numpy arrays +""" +# Author: Thomas Moreau +# Copyright: 2017, Thomas Moreau +# License: BSD 3 clause + +from mmap import mmap +import errno +import os +import stat +import threading +import atexit +import tempfile +import time +import warnings +import weakref +from uuid import uuid4 +from multiprocessing import util + +from pickle import whichmodule, loads, dumps, HIGHEST_PROTOCOL, PicklingError + +try: + WindowsError +except NameError: + WindowsError = type(None) + +try: + import numpy as np + from numpy.lib.stride_tricks import as_strided +except ImportError: + np = None + +from .numpy_pickle import dump, load, load_temporary_memmap +from .backports import make_memmap +from .disk import delete_folder +from .externals.loky.backend import resource_tracker + +# Some system have a ramdisk mounted by default, we can use it instead of /tmp +# as the default folder to dump big arrays to share with subprocesses. +SYSTEM_SHARED_MEM_FS = '/dev/shm' + +# Minimal number of bytes available on SYSTEM_SHARED_MEM_FS to consider using +# it as the default folder to dump big arrays to share with subprocesses. +SYSTEM_SHARED_MEM_FS_MIN_SIZE = int(2e9) + +# Folder and file permissions to chmod temporary files generated by the +# memmapping pool. Only the owner of the Python process can access the +# temporary files and folder. +FOLDER_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR +FILE_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR + +# Set used in joblib workers, referencing the filenames of temporary memmaps +# created by joblib to speed up data communication. In child processes, we add +# a finalizer to these memmaps that sends a maybe_unlink call to the +# resource_tracker, in order to free main memory as fast as possible. +JOBLIB_MMAPS = set() + + +def _log_and_unlink(filename): + from .externals.loky.backend.resource_tracker import _resource_tracker + util.debug( + "[FINALIZER CALL] object mapping to {} about to be deleted," + " decrementing the refcount of the file (pid: {})".format( + os.path.basename(filename), os.getpid())) + _resource_tracker.maybe_unlink(filename, "file") + + +def add_maybe_unlink_finalizer(memmap): + util.debug( + "[FINALIZER ADD] adding finalizer to {} (id {}, filename {}, pid {})" + "".format(type(memmap), id(memmap), os.path.basename(memmap.filename), + os.getpid())) + weakref.finalize(memmap, _log_and_unlink, memmap.filename) + + +def unlink_file(filename): + """Wrapper around os.unlink with a retry mechanism. + + The retry mechanism has been implemented primarily to overcome a race + condition happening during the finalizer of a np.memmap: when a process + holding the last reference to a mmap-backed np.memmap/np.array is about to + delete this array (and close the reference), it sends a maybe_unlink + request to the resource_tracker. This request can be processed faster than + it takes for the last reference of the memmap to be closed, yielding (on + Windows) a PermissionError in the resource_tracker loop. + """ + NUM_RETRIES = 10 + for retry_no in range(1, NUM_RETRIES + 1): + try: + os.unlink(filename) + break + except PermissionError: + util.debug( + '[ResourceTracker] tried to unlink {}, got ' + 'PermissionError'.format(filename) + ) + if retry_no == NUM_RETRIES: + raise + else: + time.sleep(.2) + except FileNotFoundError: + # In case of a race condition when deleting the temporary folder, + # avoid noisy FileNotFoundError exception in the resource tracker. + pass + + +resource_tracker._CLEANUP_FUNCS['file'] = unlink_file + + +class _WeakArrayKeyMap: + """A variant of weakref.WeakKeyDictionary for unhashable numpy arrays. + + This datastructure will be used with numpy arrays as obj keys, therefore we + do not use the __get__ / __set__ methods to avoid any conflict with the + numpy fancy indexing syntax. + """ + + def __init__(self): + self._data = {} + + def get(self, obj): + ref, val = self._data[id(obj)] + if ref() is not obj: + # In case of race condition with on_destroy: could never be + # triggered by the joblib tests with CPython. + raise KeyError(obj) + return val + + def set(self, obj, value): + key = id(obj) + try: + ref, _ = self._data[key] + if ref() is not obj: + # In case of race condition with on_destroy: could never be + # triggered by the joblib tests with CPython. + raise KeyError(obj) + except KeyError: + # Insert the new entry in the mapping along with a weakref + # callback to automatically delete the entry from the mapping + # as soon as the object used as key is garbage collected. + def on_destroy(_): + del self._data[key] + ref = weakref.ref(obj, on_destroy) + self._data[key] = ref, value + + def __getstate__(self): + raise PicklingError("_WeakArrayKeyMap is not pickleable") + + +############################################################################### +# Support for efficient transient pickling of numpy data structures + + +def _get_backing_memmap(a): + """Recursively look up the original np.memmap instance base if any.""" + b = getattr(a, 'base', None) + if b is None: + # TODO: check scipy sparse datastructure if scipy is installed + # a nor its descendants do not have a memmap base + return None + + elif isinstance(b, mmap): + # a is already a real memmap instance. + return a + + else: + # Recursive exploration of the base ancestry + return _get_backing_memmap(b) + + +def _get_temp_dir(pool_folder_name, temp_folder=None): + """Get the full path to a subfolder inside the temporary folder. + + Parameters + ---------- + pool_folder_name : str + Sub-folder name used for the serialization of a pool instance. + + temp_folder: str, optional + Folder to be used by the pool for memmapping large arrays + for sharing memory with worker processes. If None, this will try in + order: + + - a folder pointed by the JOBLIB_TEMP_FOLDER environment + variable, + - /dev/shm if the folder exists and is writable: this is a + RAMdisk filesystem available by default on modern Linux + distributions, + - the default system temporary folder that can be + overridden with TMP, TMPDIR or TEMP environment + variables, typically /tmp under Unix operating systems. + + Returns + ------- + pool_folder : str + full path to the temporary folder + use_shared_mem : bool + whether the temporary folder is written to the system shared memory + folder or some other temporary folder. + """ + use_shared_mem = False + if temp_folder is None: + temp_folder = os.environ.get('JOBLIB_TEMP_FOLDER', None) + if temp_folder is None: + if os.path.exists(SYSTEM_SHARED_MEM_FS) and hasattr(os, 'statvfs'): + try: + shm_stats = os.statvfs(SYSTEM_SHARED_MEM_FS) + available_nbytes = shm_stats.f_bsize * shm_stats.f_bavail + if available_nbytes > SYSTEM_SHARED_MEM_FS_MIN_SIZE: + # Try to see if we have write access to the shared mem + # folder only if it is reasonably large (that is 2GB or + # more). + temp_folder = SYSTEM_SHARED_MEM_FS + pool_folder = os.path.join(temp_folder, pool_folder_name) + if not os.path.exists(pool_folder): + os.makedirs(pool_folder) + use_shared_mem = True + except (IOError, OSError): + # Missing rights in the /dev/shm partition, fallback to regular + # temp folder. + temp_folder = None + if temp_folder is None: + # Fallback to the default tmp folder, typically /tmp + temp_folder = tempfile.gettempdir() + temp_folder = os.path.abspath(os.path.expanduser(temp_folder)) + pool_folder = os.path.join(temp_folder, pool_folder_name) + return pool_folder, use_shared_mem + + +def has_shareable_memory(a): + """Return True if a is backed by some mmap buffer directly or not.""" + return _get_backing_memmap(a) is not None + + +def _strided_from_memmap(filename, dtype, mode, offset, order, shape, strides, + total_buffer_len, unlink_on_gc_collect): + """Reconstruct an array view on a memory mapped file.""" + if mode == 'w+': + # Do not zero the original data when unpickling + mode = 'r+' + + if strides is None: + # Simple, contiguous memmap + return make_memmap( + filename, dtype=dtype, shape=shape, mode=mode, offset=offset, + order=order, unlink_on_gc_collect=unlink_on_gc_collect + ) + else: + # For non-contiguous data, memmap the total enclosing buffer and then + # extract the non-contiguous view with the stride-tricks API + base = make_memmap( + filename, dtype=dtype, shape=total_buffer_len, offset=offset, + mode=mode, order=order, unlink_on_gc_collect=unlink_on_gc_collect + ) + return as_strided(base, shape=shape, strides=strides) + + +def _reduce_memmap_backed(a, m): + """Pickling reduction for memmap backed arrays. + + a is expected to be an instance of np.ndarray (or np.memmap) + m is expected to be an instance of np.memmap on the top of the ``base`` + attribute ancestry of a. ``m.base`` should be the real python mmap object. + """ + # offset that comes from the striding differences between a and m + util.debug('[MEMMAP REDUCE] reducing a memmap-backed array ' + '(shape, {}, pid: {})'.format(a.shape, os.getpid())) + try: + from numpy.lib.array_utils import byte_bounds + except (ModuleNotFoundError, ImportError): + # Backward-compat for numpy < 2.0 + from numpy import byte_bounds + a_start, a_end = byte_bounds(a) + m_start = byte_bounds(m)[0] + offset = a_start - m_start + + # offset from the backing memmap + offset += m.offset + + if m.flags['F_CONTIGUOUS']: + order = 'F' + else: + # The backing memmap buffer is necessarily contiguous hence C if not + # Fortran + order = 'C' + + if a.flags['F_CONTIGUOUS'] or a.flags['C_CONTIGUOUS']: + # If the array is a contiguous view, no need to pass the strides + strides = None + total_buffer_len = None + else: + # Compute the total number of items to map from which the strided + # view will be extracted. + strides = a.strides + total_buffer_len = (a_end - a_start) // a.itemsize + + return (_strided_from_memmap, + (m.filename, a.dtype, m.mode, offset, order, a.shape, strides, + total_buffer_len, False)) + + +def reduce_array_memmap_backward(a): + """reduce a np.array or a np.memmap from a child process""" + m = _get_backing_memmap(a) + if isinstance(m, np.memmap) and m.filename not in JOBLIB_MMAPS: + # if a is backed by a memmaped file, reconstruct a using the + # memmaped file. + return _reduce_memmap_backed(a, m) + else: + # a is either a regular (not memmap-backed) numpy array, or an array + # backed by a shared temporary file created by joblib. In the latter + # case, in order to limit the lifespan of these temporary files, we + # serialize the memmap as a regular numpy array, and decref the + # file backing the memmap (done implicitly in a previously registered + # finalizer, see ``unlink_on_gc_collect`` for more details) + return ( + loads, (dumps(np.asarray(a), protocol=HIGHEST_PROTOCOL), ) + ) + + +class ArrayMemmapForwardReducer(object): + """Reducer callable to dump large arrays to memmap files. + + Parameters + ---------- + max_nbytes: int + Threshold to trigger memmapping of large arrays to files created + a folder. + temp_folder_resolver: callable + An callable in charge of resolving a temporary folder name where files + for backing memmapped arrays are created. + mmap_mode: 'r', 'r+' or 'c' + Mode for the created memmap datastructure. See the documentation of + numpy.memmap for more details. Note: 'w+' is coerced to 'r+' + automatically to avoid zeroing the data on unpickling. + verbose: int, optional, 0 by default + If verbose > 0, memmap creations are logged. + If verbose > 1, both memmap creations, reuse and array pickling are + logged. + prewarm: bool, optional, False by default. + Force a read on newly memmapped array to make sure that OS pre-cache it + memory. This can be useful to avoid concurrent disk access when the + same data array is passed to different worker processes. + """ + + def __init__(self, max_nbytes, temp_folder_resolver, mmap_mode, + unlink_on_gc_collect, verbose=0, prewarm=True): + self._max_nbytes = max_nbytes + self._temp_folder_resolver = temp_folder_resolver + self._mmap_mode = mmap_mode + self.verbose = int(verbose) + if prewarm == "auto": + self._prewarm = not self._temp_folder.startswith( + SYSTEM_SHARED_MEM_FS + ) + else: + self._prewarm = prewarm + self._prewarm = prewarm + self._memmaped_arrays = _WeakArrayKeyMap() + self._temporary_memmaped_filenames = set() + self._unlink_on_gc_collect = unlink_on_gc_collect + + @property + def _temp_folder(self): + return self._temp_folder_resolver() + + def __reduce__(self): + # The ArrayMemmapForwardReducer is passed to the children processes: it + # needs to be pickled but the _WeakArrayKeyMap need to be skipped as + # it's only guaranteed to be consistent with the parent process memory + # garbage collection. + # Although this reducer is pickled, it is not needed in its destination + # process (child processes), as we only use this reducer to send + # memmaps from the parent process to the children processes. For this + # reason, we can afford skipping the resolver, (which would otherwise + # be unpicklable), and pass it as None instead. + args = (self._max_nbytes, None, self._mmap_mode, + self._unlink_on_gc_collect) + kwargs = { + 'verbose': self.verbose, + 'prewarm': self._prewarm, + } + return ArrayMemmapForwardReducer, args, kwargs + + def __call__(self, a): + m = _get_backing_memmap(a) + if m is not None and isinstance(m, np.memmap): + # a is already backed by a memmap file, let's reuse it directly + return _reduce_memmap_backed(a, m) + + if (not a.dtype.hasobject and self._max_nbytes is not None and + a.nbytes > self._max_nbytes): + # check that the folder exists (lazily create the pool temp folder + # if required) + try: + os.makedirs(self._temp_folder) + os.chmod(self._temp_folder, FOLDER_PERMISSIONS) + except OSError as e: + if e.errno != errno.EEXIST: + raise e + + try: + basename = self._memmaped_arrays.get(a) + except KeyError: + # Generate a new unique random filename. The process and thread + # ids are only useful for debugging purpose and to make it + # easier to cleanup orphaned files in case of hard process + # kill (e.g. by "kill -9" or segfault). + basename = "{}-{}-{}.pkl".format( + os.getpid(), id(threading.current_thread()), uuid4().hex) + self._memmaped_arrays.set(a, basename) + filename = os.path.join(self._temp_folder, basename) + + # In case the same array with the same content is passed several + # times to the pool subprocess children, serialize it only once + + is_new_memmap = filename not in self._temporary_memmaped_filenames + + # add the memmap to the list of temporary memmaps created by joblib + self._temporary_memmaped_filenames.add(filename) + + if self._unlink_on_gc_collect: + # Bump reference count of the memmap by 1 to account for + # shared usage of the memmap by a child process. The + # corresponding decref call will be executed upon calling + # resource_tracker.maybe_unlink, registered as a finalizer in + # the child. + # the incref/decref calls here are only possible when the child + # and the parent share the same resource_tracker. It is not the + # case for the multiprocessing backend, but it does not matter + # because unlinking a memmap from a child process is only + # useful to control the memory usage of long-lasting child + # processes, while the multiprocessing-based pools terminate + # their workers at the end of a map() call. + resource_tracker.register(filename, "file") + + if is_new_memmap: + # Incref each temporary memmap created by joblib one extra + # time. This means that these memmaps will only be deleted + # once an extra maybe_unlink() is called, which is done once + # all the jobs have completed (or been canceled) in the + # Parallel._terminate_backend() method. + resource_tracker.register(filename, "file") + + if not os.path.exists(filename): + util.debug( + "[ARRAY DUMP] Pickling new array (shape={}, dtype={}) " + "creating a new memmap at {}".format( + a.shape, a.dtype, filename)) + for dumped_filename in dump(a, filename): + os.chmod(dumped_filename, FILE_PERMISSIONS) + + if self._prewarm: + # Warm up the data by accessing it. This operation ensures + # that the disk access required to create the memmapping + # file are performed in the reducing process and avoids + # concurrent memmap creation in multiple children + # processes. + load(filename, mmap_mode=self._mmap_mode).max() + + else: + util.debug( + "[ARRAY DUMP] Pickling known array (shape={}, dtype={}) " + "reusing memmap file: {}".format( + a.shape, a.dtype, os.path.basename(filename))) + + # The worker process will use joblib.load to memmap the data + return ( + (load_temporary_memmap, (filename, self._mmap_mode, + self._unlink_on_gc_collect)) + ) + else: + # do not convert a into memmap, let pickler do its usual copy with + # the default system pickler + util.debug( + '[ARRAY DUMP] Pickling array (NO MEMMAPPING) (shape={}, ' + ' dtype={}).'.format(a.shape, a.dtype)) + return (loads, (dumps(a, protocol=HIGHEST_PROTOCOL),)) + + +def get_memmapping_reducers( + forward_reducers=None, backward_reducers=None, + temp_folder_resolver=None, max_nbytes=1e6, mmap_mode='r', verbose=0, + prewarm=False, unlink_on_gc_collect=True, **kwargs): + """Construct a pair of memmapping reducer linked to a tmpdir. + + This function manage the creation and the clean up of the temporary folders + underlying the memory maps and should be use to get the reducers necessary + to construct joblib pool or executor. + """ + if forward_reducers is None: + forward_reducers = dict() + if backward_reducers is None: + backward_reducers = dict() + + if np is not None: + # Register smart numpy.ndarray reducers that detects memmap backed + # arrays and that is also able to dump to memmap large in-memory + # arrays over the max_nbytes threshold + forward_reduce_ndarray = ArrayMemmapForwardReducer( + max_nbytes, temp_folder_resolver, mmap_mode, unlink_on_gc_collect, + verbose, prewarm=prewarm) + forward_reducers[np.ndarray] = forward_reduce_ndarray + forward_reducers[np.memmap] = forward_reduce_ndarray + + # Communication from child process to the parent process always + # pickles in-memory numpy.ndarray without dumping them as memmap + # to avoid confusing the caller and make it tricky to collect the + # temporary folder + backward_reducers[np.ndarray] = reduce_array_memmap_backward + backward_reducers[np.memmap] = reduce_array_memmap_backward + + return forward_reducers, backward_reducers + + +class TemporaryResourcesManager(object): + """Stateful object able to manage temporary folder and pickles + + It exposes: + - a per-context folder name resolving API that memmap-based reducers will + rely on to know where to pickle the temporary memmaps + - a temporary file/folder management API that internally uses the + resource_tracker. + """ + + def __init__(self, temp_folder_root=None, context_id=None): + self._current_temp_folder = None + self._temp_folder_root = temp_folder_root + self._use_shared_mem = None + self._cached_temp_folders = dict() + self._id = uuid4().hex + self._finalizers = {} + if context_id is None: + # It would be safer to not assign a default context id (less silent + # bugs), but doing this while maintaining backward compatibility + # with the previous, context-unaware version get_memmaping_executor + # exposes too many low-level details. + context_id = uuid4().hex + self.set_current_context(context_id) + + def set_current_context(self, context_id): + self._current_context_id = context_id + self.register_new_context(context_id) + + def register_new_context(self, context_id): + # Prepare a sub-folder name specific to a context (usually a unique id + # generated by each instance of the Parallel class). Do not create in + # advance to spare FS write access if no array is to be dumped). + if context_id in self._cached_temp_folders: + return + else: + # During its lifecycle, one Parallel object can have several + # executors associated to it (for instance, if a loky worker raises + # an exception, joblib shutdowns the executor and instantly + # recreates a new one before raising the error - see + # ``ensure_ready``. Because we don't want two executors tied to + # the same Parallel object (and thus the same context id) to + # register/use/delete the same folder, we also add an id specific + # to the current Manager (and thus specific to its associated + # executor) to the folder name. + new_folder_name = ( + "joblib_memmapping_folder_{}_{}_{}".format( + os.getpid(), self._id, context_id) + ) + new_folder_path, _ = _get_temp_dir( + new_folder_name, self._temp_folder_root + ) + self.register_folder_finalizer(new_folder_path, context_id) + self._cached_temp_folders[context_id] = new_folder_path + + def resolve_temp_folder_name(self): + """Return a folder name specific to the currently activated context""" + return self._cached_temp_folders[self._current_context_id] + + # resource management API + + def register_folder_finalizer(self, pool_subfolder, context_id): + # Register the garbage collector at program exit in case caller forgets + # to call terminate explicitly: note we do not pass any reference to + # ensure that this callback won't prevent garbage collection of + # parallel instance and related file handler resources such as POSIX + # semaphores and pipes + pool_module_name = whichmodule(delete_folder, 'delete_folder') + resource_tracker.register(pool_subfolder, "folder") + + def _cleanup(): + # In some cases the Python runtime seems to set delete_folder to + # None just before exiting when accessing the delete_folder + # function from the closure namespace. So instead we reimport + # the delete_folder function explicitly. + # https://github.com/joblib/joblib/issues/328 + # We cannot just use from 'joblib.pool import delete_folder' + # because joblib should only use relative imports to allow + # easy vendoring. + delete_folder = __import__( + pool_module_name, fromlist=['delete_folder'] + ).delete_folder + try: + delete_folder(pool_subfolder, allow_non_empty=True) + resource_tracker.unregister(pool_subfolder, "folder") + except OSError: + warnings.warn("Failed to delete temporary folder: {}" + .format(pool_subfolder)) + + self._finalizers[context_id] = atexit.register(_cleanup) + + def _clean_temporary_resources(self, context_id=None, force=False, + allow_non_empty=False): + """Clean temporary resources created by a process-based pool""" + if context_id is None: + # Iterates over a copy of the cache keys to avoid Error due to + # iterating over a changing size dictionary. + for context_id in list(self._cached_temp_folders): + self._clean_temporary_resources( + context_id, force=force, allow_non_empty=allow_non_empty + ) + else: + temp_folder = self._cached_temp_folders.get(context_id) + if temp_folder and os.path.exists(temp_folder): + for filename in os.listdir(temp_folder): + if force: + # Some workers have failed and the ref counted might + # be off. The workers should have shut down by this + # time so forcefully clean up the files. + resource_tracker.unregister( + os.path.join(temp_folder, filename), "file" + ) + else: + resource_tracker.maybe_unlink( + os.path.join(temp_folder, filename), "file" + ) + + # When forcing clean-up, try to delete the folder even if some + # files are still in it. Otherwise, try to delete the folder + allow_non_empty |= force + + # Clean up the folder if possible, either if it is empty or + # if none of the files in it are in used and allow_non_empty. + try: + delete_folder( + temp_folder, allow_non_empty=allow_non_empty + ) + # Forget the folder once it has been deleted + self._cached_temp_folders.pop(context_id, None) + resource_tracker.unregister(temp_folder, "folder") + + # Also cancel the finalizers that gets triggered at gc. + finalizer = self._finalizers.pop(context_id, None) + if finalizer is not None: + atexit.unregister(finalizer) + + except OSError: + # Temporary folder cannot be deleted right now. + # This folder will be cleaned up by an atexit + # finalizer registered by the memmapping_reducer. + pass diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/_multiprocessing_helpers.py b/env-llmeval/lib/python3.10/site-packages/joblib/_multiprocessing_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..bde4bc1905311cdc4cd337e9b72e3b24f50a3ed5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/_multiprocessing_helpers.py @@ -0,0 +1,53 @@ +"""Helper module to factorize the conditional multiprocessing import logic + +We use a distinct module to simplify import statements and avoid introducing +circular dependencies (for instance for the assert_spawning name). +""" +import os +import warnings + + +# Obtain possible configuration from the environment, assuming 1 (on) +# by default, upon 0 set to None. Should instructively fail if some non +# 0/1 value is set. +mp = int(os.environ.get('JOBLIB_MULTIPROCESSING', 1)) or None +if mp: + try: + import multiprocessing as mp + import _multiprocessing # noqa + except ImportError: + mp = None + +# 2nd stage: validate that locking is available on the system and +# issue a warning if not +if mp is not None: + try: + # try to create a named semaphore using SemLock to make sure they are + # available on this platform. We use the low level object + # _multiprocessing.SemLock to avoid spawning a resource tracker on + # Unix system or changing the default backend. + import tempfile + from _multiprocessing import SemLock + + _rand = tempfile._RandomNameSequence() + for i in range(100): + try: + name = '/joblib-{}-{}' .format( + os.getpid(), next(_rand)) + _sem = SemLock(0, 0, 1, name=name, unlink=True) + del _sem # cleanup + break + except FileExistsError as e: # pragma: no cover + if i >= 99: + raise FileExistsError( + 'cannot find name for semaphore') from e + except (FileExistsError, AttributeError, ImportError, OSError) as e: + mp = None + warnings.warn('%s. joblib will operate in serial mode' % (e,)) + + +# 3rd stage: backward compat for the assert_spawning helper +if mp is not None: + from multiprocessing.context import assert_spawning +else: + assert_spawning = None diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/_parallel_backends.py b/env-llmeval/lib/python3.10/site-packages/joblib/_parallel_backends.py new file mode 100644 index 0000000000000000000000000000000000000000..8201c96bcf61b5c6e61821e7073a0babf6510268 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/_parallel_backends.py @@ -0,0 +1,649 @@ +""" +Backends for embarrassingly parallel code. +""" + +import gc +import os +import warnings +import threading +import contextlib +from abc import ABCMeta, abstractmethod + +from ._utils import ( + _TracebackCapturingWrapper, + _retrieve_traceback_capturing_wrapped_call +) + +from ._multiprocessing_helpers import mp + +if mp is not None: + from .pool import MemmappingPool + from multiprocessing.pool import ThreadPool + from .executor import get_memmapping_executor + + # Import loky only if multiprocessing is present + from .externals.loky import process_executor, cpu_count + from .externals.loky.process_executor import ShutdownExecutorError + + +class ParallelBackendBase(metaclass=ABCMeta): + """Helper abc which defines all methods a ParallelBackend must implement""" + + supports_inner_max_num_threads = False + supports_retrieve_callback = False + default_n_jobs = 1 + + @property + def supports_return_generator(self): + return self.supports_retrieve_callback + + @property + def supports_timeout(self): + return self.supports_retrieve_callback + + nesting_level = None + + def __init__(self, nesting_level=None, inner_max_num_threads=None, + **kwargs): + super().__init__(**kwargs) + self.nesting_level = nesting_level + self.inner_max_num_threads = inner_max_num_threads + + MAX_NUM_THREADS_VARS = [ + 'OMP_NUM_THREADS', 'OPENBLAS_NUM_THREADS', 'MKL_NUM_THREADS', + 'BLIS_NUM_THREADS', 'VECLIB_MAXIMUM_THREADS', 'NUMBA_NUM_THREADS', + 'NUMEXPR_NUM_THREADS', + ] + + TBB_ENABLE_IPC_VAR = "ENABLE_IPC" + + @abstractmethod + def effective_n_jobs(self, n_jobs): + """Determine the number of jobs that can actually run in parallel + + n_jobs is the number of workers requested by the callers. Passing + n_jobs=-1 means requesting all available workers for instance matching + the number of CPU cores on the worker host(s). + + This method should return a guesstimate of the number of workers that + can actually perform work concurrently. The primary use case is to make + it possible for the caller to know in how many chunks to slice the + work. + + In general working on larger data chunks is more efficient (less + scheduling overhead and better use of CPU cache prefetching heuristics) + as long as all the workers have enough work to do. + """ + + @abstractmethod + def apply_async(self, func, callback=None): + """Schedule a func to be run""" + + def retrieve_result_callback(self, out): + """Called within the callback function passed in apply_async. + + The argument of this function is the argument given to a callback in + the considered backend. It is supposed to return the outcome of a task + if it succeeded or raise the exception if it failed. + """ + + def configure(self, n_jobs=1, parallel=None, prefer=None, require=None, + **backend_args): + """Reconfigure the backend and return the number of workers. + + This makes it possible to reuse an existing backend instance for + successive independent calls to Parallel with different parameters. + """ + self.parallel = parallel + return self.effective_n_jobs(n_jobs) + + def start_call(self): + """Call-back method called at the beginning of a Parallel call""" + + def stop_call(self): + """Call-back method called at the end of a Parallel call""" + + def terminate(self): + """Shutdown the workers and free the shared memory.""" + + def compute_batch_size(self): + """Determine the optimal batch size""" + return 1 + + def batch_completed(self, batch_size, duration): + """Callback indicate how long it took to run a batch""" + + def get_exceptions(self): + """List of exception types to be captured.""" + return [] + + def abort_everything(self, ensure_ready=True): + """Abort any running tasks + + This is called when an exception has been raised when executing a task + and all the remaining tasks will be ignored and can therefore be + aborted to spare computation resources. + + If ensure_ready is True, the backend should be left in an operating + state as future tasks might be re-submitted via that same backend + instance. + + If ensure_ready is False, the implementer of this method can decide + to leave the backend in a closed / terminated state as no new task + are expected to be submitted to this backend. + + Setting ensure_ready to False is an optimization that can be leveraged + when aborting tasks via killing processes from a local process pool + managed by the backend it-self: if we expect no new tasks, there is no + point in re-creating new workers. + """ + # Does nothing by default: to be overridden in subclasses when + # canceling tasks is possible. + pass + + def get_nested_backend(self): + """Backend instance to be used by nested Parallel calls. + + By default a thread-based backend is used for the first level of + nesting. Beyond, switch to sequential backend to avoid spawning too + many threads on the host. + """ + nesting_level = getattr(self, 'nesting_level', 0) + 1 + if nesting_level > 1: + return SequentialBackend(nesting_level=nesting_level), None + else: + return ThreadingBackend(nesting_level=nesting_level), None + + @contextlib.contextmanager + def retrieval_context(self): + """Context manager to manage an execution context. + + Calls to Parallel.retrieve will be made inside this context. + + By default, this does nothing. It may be useful for subclasses to + handle nested parallelism. In particular, it may be required to avoid + deadlocks if a backend manages a fixed number of workers, when those + workers may be asked to do nested Parallel calls. Without + 'retrieval_context' this could lead to deadlock, as all the workers + managed by the backend may be "busy" waiting for the nested parallel + calls to finish, but the backend has no free workers to execute those + tasks. + """ + yield + + def _prepare_worker_env(self, n_jobs): + """Return environment variables limiting threadpools in external libs. + + This function return a dict containing environment variables to pass + when creating a pool of process. These environment variables limit the + number of threads to `n_threads` for OpenMP, MKL, Accelerated and + OpenBLAS libraries in the child processes. + """ + explicit_n_threads = self.inner_max_num_threads + default_n_threads = max(cpu_count() // n_jobs, 1) + + # Set the inner environment variables to self.inner_max_num_threads if + # it is given. Else, default to cpu_count // n_jobs unless the variable + # is already present in the parent process environment. + env = {} + for var in self.MAX_NUM_THREADS_VARS: + if explicit_n_threads is None: + var_value = os.environ.get(var, default_n_threads) + else: + var_value = explicit_n_threads + + env[var] = str(var_value) + + if self.TBB_ENABLE_IPC_VAR not in os.environ: + # To avoid over-subscription when using TBB, let the TBB schedulers + # use Inter Process Communication to coordinate: + env[self.TBB_ENABLE_IPC_VAR] = "1" + return env + + @staticmethod + def in_main_thread(): + return isinstance(threading.current_thread(), threading._MainThread) + + +class SequentialBackend(ParallelBackendBase): + """A ParallelBackend which will execute all batches sequentially. + + Does not use/create any threading objects, and hence has minimal + overhead. Used when n_jobs == 1. + """ + + uses_threads = True + supports_timeout = False + supports_retrieve_callback = False + supports_sharedmem = True + + def effective_n_jobs(self, n_jobs): + """Determine the number of jobs which are going to run in parallel""" + if n_jobs == 0: + raise ValueError('n_jobs == 0 in Parallel has no meaning') + return 1 + + def apply_async(self, func, callback=None): + """Schedule a func to be run""" + raise RuntimeError("Should never be called for SequentialBackend.") + + def retrieve_result_callback(self, out): + raise RuntimeError("Should never be called for SequentialBackend.") + + def get_nested_backend(self): + # import is not top level to avoid cyclic import errors. + from .parallel import get_active_backend + + # SequentialBackend should neither change the nesting level, the + # default backend or the number of jobs. Just return the current one. + return get_active_backend() + + +class PoolManagerMixin(object): + """A helper class for managing pool of workers.""" + + _pool = None + + def effective_n_jobs(self, n_jobs): + """Determine the number of jobs which are going to run in parallel""" + if n_jobs == 0: + raise ValueError('n_jobs == 0 in Parallel has no meaning') + elif mp is None or n_jobs is None: + # multiprocessing is not available or disabled, fallback + # to sequential mode + return 1 + elif n_jobs < 0: + n_jobs = max(cpu_count() + 1 + n_jobs, 1) + return n_jobs + + def terminate(self): + """Shutdown the process or thread pool""" + if self._pool is not None: + self._pool.close() + self._pool.terminate() # terminate does a join() + self._pool = None + + def _get_pool(self): + """Used by apply_async to make it possible to implement lazy init""" + return self._pool + + def apply_async(self, func, callback=None): + """Schedule a func to be run""" + # Here, we need a wrapper to avoid crashes on KeyboardInterruptErrors. + # We also call the callback on error, to make sure the pool does not + # wait on crashed jobs. + return self._get_pool().apply_async( + _TracebackCapturingWrapper(func), (), + callback=callback, error_callback=callback + ) + + def retrieve_result_callback(self, out): + """Mimic concurrent.futures results, raising an error if needed.""" + return _retrieve_traceback_capturing_wrapped_call(out) + + def abort_everything(self, ensure_ready=True): + """Shutdown the pool and restart a new one with the same parameters""" + self.terminate() + if ensure_ready: + self.configure(n_jobs=self.parallel.n_jobs, parallel=self.parallel, + **self.parallel._backend_args) + + +class AutoBatchingMixin(object): + """A helper class for automagically batching jobs.""" + + # In seconds, should be big enough to hide multiprocessing dispatching + # overhead. + # This settings was found by running benchmarks/bench_auto_batching.py + # with various parameters on various platforms. + MIN_IDEAL_BATCH_DURATION = .2 + + # Should not be too high to avoid stragglers: long jobs running alone + # on a single worker while other workers have no work to process any more. + MAX_IDEAL_BATCH_DURATION = 2 + + # Batching counters default values + _DEFAULT_EFFECTIVE_BATCH_SIZE = 1 + _DEFAULT_SMOOTHED_BATCH_DURATION = 0.0 + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self._effective_batch_size = self._DEFAULT_EFFECTIVE_BATCH_SIZE + self._smoothed_batch_duration = self._DEFAULT_SMOOTHED_BATCH_DURATION + + def compute_batch_size(self): + """Determine the optimal batch size""" + old_batch_size = self._effective_batch_size + batch_duration = self._smoothed_batch_duration + if (batch_duration > 0 and + batch_duration < self.MIN_IDEAL_BATCH_DURATION): + # The current batch size is too small: the duration of the + # processing of a batch of task is not large enough to hide + # the scheduling overhead. + ideal_batch_size = int(old_batch_size * + self.MIN_IDEAL_BATCH_DURATION / + batch_duration) + # Multiply by two to limit oscilations between min and max. + ideal_batch_size *= 2 + + # dont increase the batch size too fast to limit huge batch sizes + # potentially leading to starving worker + batch_size = min(2 * old_batch_size, ideal_batch_size) + + batch_size = max(batch_size, 1) + + self._effective_batch_size = batch_size + if self.parallel.verbose >= 10: + self.parallel._print( + f"Batch computation too fast ({batch_duration}s.) " + f"Setting batch_size={batch_size}." + ) + elif (batch_duration > self.MAX_IDEAL_BATCH_DURATION and + old_batch_size >= 2): + # The current batch size is too big. If we schedule overly long + # running batches some CPUs might wait with nothing left to do + # while a couple of CPUs a left processing a few long running + # batches. Better reduce the batch size a bit to limit the + # likelihood of scheduling such stragglers. + + # decrease the batch size quickly to limit potential starving + ideal_batch_size = int( + old_batch_size * self.MIN_IDEAL_BATCH_DURATION / batch_duration + ) + # Multiply by two to limit oscilations between min and max. + batch_size = max(2 * ideal_batch_size, 1) + self._effective_batch_size = batch_size + if self.parallel.verbose >= 10: + self.parallel._print( + f"Batch computation too slow ({batch_duration}s.) " + f"Setting batch_size={batch_size}." + ) + else: + # No batch size adjustment + batch_size = old_batch_size + + if batch_size != old_batch_size: + # Reset estimation of the smoothed mean batch duration: this + # estimate is updated in the multiprocessing apply_async + # CallBack as long as the batch_size is constant. Therefore + # we need to reset the estimate whenever we re-tune the batch + # size. + self._smoothed_batch_duration = \ + self._DEFAULT_SMOOTHED_BATCH_DURATION + + return batch_size + + def batch_completed(self, batch_size, duration): + """Callback indicate how long it took to run a batch""" + if batch_size == self._effective_batch_size: + # Update the smoothed streaming estimate of the duration of a batch + # from dispatch to completion + old_duration = self._smoothed_batch_duration + if old_duration == self._DEFAULT_SMOOTHED_BATCH_DURATION: + # First record of duration for this batch size after the last + # reset. + new_duration = duration + else: + # Update the exponentially weighted average of the duration of + # batch for the current effective size. + new_duration = 0.8 * old_duration + 0.2 * duration + self._smoothed_batch_duration = new_duration + + def reset_batch_stats(self): + """Reset batch statistics to default values. + + This avoids interferences with future jobs. + """ + self._effective_batch_size = self._DEFAULT_EFFECTIVE_BATCH_SIZE + self._smoothed_batch_duration = self._DEFAULT_SMOOTHED_BATCH_DURATION + + +class ThreadingBackend(PoolManagerMixin, ParallelBackendBase): + """A ParallelBackend which will use a thread pool to execute batches in. + + This is a low-overhead backend but it suffers from the Python Global + Interpreter Lock if the called function relies a lot on Python objects. + Mostly useful when the execution bottleneck is a compiled extension that + explicitly releases the GIL (for instance a Cython loop wrapped in a "with + nogil" block or an expensive call to a library such as NumPy). + + The actual thread pool is lazily initialized: the actual thread pool + construction is delayed to the first call to apply_async. + + ThreadingBackend is used as the default backend for nested calls. + """ + + supports_retrieve_callback = True + uses_threads = True + supports_sharedmem = True + + def configure(self, n_jobs=1, parallel=None, **backend_args): + """Build a process or thread pool and return the number of workers""" + n_jobs = self.effective_n_jobs(n_jobs) + if n_jobs == 1: + # Avoid unnecessary overhead and use sequential backend instead. + raise FallbackToBackend( + SequentialBackend(nesting_level=self.nesting_level)) + self.parallel = parallel + self._n_jobs = n_jobs + return n_jobs + + def _get_pool(self): + """Lazily initialize the thread pool + + The actual pool of worker threads is only initialized at the first + call to apply_async. + """ + if self._pool is None: + self._pool = ThreadPool(self._n_jobs) + return self._pool + + +class MultiprocessingBackend(PoolManagerMixin, AutoBatchingMixin, + ParallelBackendBase): + """A ParallelBackend which will use a multiprocessing.Pool. + + Will introduce some communication and memory overhead when exchanging + input and output data with the with the worker Python processes. + However, does not suffer from the Python Global Interpreter Lock. + """ + + supports_retrieve_callback = True + supports_return_generator = False + + def effective_n_jobs(self, n_jobs): + """Determine the number of jobs which are going to run in parallel. + + This also checks if we are attempting to create a nested parallel + loop. + """ + if mp is None: + return 1 + + if mp.current_process().daemon: + # Daemonic processes cannot have children + if n_jobs != 1: + if inside_dask_worker(): + msg = ( + "Inside a Dask worker with daemon=True, " + "setting n_jobs=1.\nPossible work-arounds:\n" + "- dask.config.set(" + "{'distributed.worker.daemon': False})" + "- set the environment variable " + "DASK_DISTRIBUTED__WORKER__DAEMON=False\n" + "before creating your Dask cluster." + ) + else: + msg = ( + 'Multiprocessing-backed parallel loops ' + 'cannot be nested, setting n_jobs=1' + ) + warnings.warn(msg, stacklevel=3) + return 1 + + if process_executor._CURRENT_DEPTH > 0: + # Mixing loky and multiprocessing in nested loop is not supported + if n_jobs != 1: + warnings.warn( + 'Multiprocessing-backed parallel loops cannot be nested,' + ' below loky, setting n_jobs=1', + stacklevel=3) + return 1 + + elif not (self.in_main_thread() or self.nesting_level == 0): + # Prevent posix fork inside in non-main posix threads + if n_jobs != 1: + warnings.warn( + 'Multiprocessing-backed parallel loops cannot be nested' + ' below threads, setting n_jobs=1', + stacklevel=3) + return 1 + + return super(MultiprocessingBackend, self).effective_n_jobs(n_jobs) + + def configure(self, n_jobs=1, parallel=None, prefer=None, require=None, + **memmappingpool_args): + """Build a process or thread pool and return the number of workers""" + n_jobs = self.effective_n_jobs(n_jobs) + if n_jobs == 1: + raise FallbackToBackend( + SequentialBackend(nesting_level=self.nesting_level)) + + # Make sure to free as much memory as possible before forking + gc.collect() + self._pool = MemmappingPool(n_jobs, **memmappingpool_args) + self.parallel = parallel + return n_jobs + + def terminate(self): + """Shutdown the process or thread pool""" + super(MultiprocessingBackend, self).terminate() + self.reset_batch_stats() + + +class LokyBackend(AutoBatchingMixin, ParallelBackendBase): + """Managing pool of workers with loky instead of multiprocessing.""" + + supports_retrieve_callback = True + supports_inner_max_num_threads = True + + def configure(self, n_jobs=1, parallel=None, prefer=None, require=None, + idle_worker_timeout=300, **memmappingexecutor_args): + """Build a process executor and return the number of workers""" + n_jobs = self.effective_n_jobs(n_jobs) + if n_jobs == 1: + raise FallbackToBackend( + SequentialBackend(nesting_level=self.nesting_level)) + + self._workers = get_memmapping_executor( + n_jobs, timeout=idle_worker_timeout, + env=self._prepare_worker_env(n_jobs=n_jobs), + context_id=parallel._id, **memmappingexecutor_args) + self.parallel = parallel + return n_jobs + + def effective_n_jobs(self, n_jobs): + """Determine the number of jobs which are going to run in parallel""" + if n_jobs == 0: + raise ValueError('n_jobs == 0 in Parallel has no meaning') + elif mp is None or n_jobs is None: + # multiprocessing is not available or disabled, fallback + # to sequential mode + return 1 + elif mp.current_process().daemon: + # Daemonic processes cannot have children + if n_jobs != 1: + if inside_dask_worker(): + msg = ( + "Inside a Dask worker with daemon=True, " + "setting n_jobs=1.\nPossible work-arounds:\n" + "- dask.config.set(" + "{'distributed.worker.daemon': False})\n" + "- set the environment variable " + "DASK_DISTRIBUTED__WORKER__DAEMON=False\n" + "before creating your Dask cluster." + ) + else: + msg = ( + 'Loky-backed parallel loops cannot be called in a' + ' multiprocessing, setting n_jobs=1' + ) + warnings.warn(msg, stacklevel=3) + + return 1 + elif not (self.in_main_thread() or self.nesting_level == 0): + # Prevent posix fork inside in non-main posix threads + if n_jobs != 1: + warnings.warn( + 'Loky-backed parallel loops cannot be nested below ' + 'threads, setting n_jobs=1', + stacklevel=3) + return 1 + elif n_jobs < 0: + n_jobs = max(cpu_count() + 1 + n_jobs, 1) + return n_jobs + + def apply_async(self, func, callback=None): + """Schedule a func to be run""" + future = self._workers.submit(func) + if callback is not None: + future.add_done_callback(callback) + return future + + def retrieve_result_callback(self, out): + try: + return out.result() + except ShutdownExecutorError: + raise RuntimeError( + "The executor underlying Parallel has been shutdown. " + "This is likely due to the garbage collection of a previous " + "generator from a call to Parallel with return_as='generator'." + " Make sure the generator is not garbage collected when " + "submitting a new job or that it is first properly exhausted." + ) + + def terminate(self): + if self._workers is not None: + # Don't terminate the workers as we want to reuse them in later + # calls, but cleanup the temporary resources that the Parallel call + # created. This 'hack' requires a private, low-level operation. + self._workers._temp_folder_manager._clean_temporary_resources( + context_id=self.parallel._id, force=False + ) + self._workers = None + + self.reset_batch_stats() + + def abort_everything(self, ensure_ready=True): + """Shutdown the workers and restart a new one with the same parameters + """ + self._workers.terminate(kill_workers=True) + self._workers = None + + if ensure_ready: + self.configure(n_jobs=self.parallel.n_jobs, parallel=self.parallel) + + +class FallbackToBackend(Exception): + """Raised when configuration should fallback to another backend""" + + def __init__(self, backend): + self.backend = backend + + +def inside_dask_worker(): + """Check whether the current function is executed inside a Dask worker. + """ + # This function can not be in joblib._dask because there would be a + # circular import: + # _dask imports _parallel_backend that imports _dask ... + try: + from distributed import get_worker + except ImportError: + return False + + try: + get_worker() + return True + except ValueError: + return False diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/_store_backends.py b/env-llmeval/lib/python3.10/site-packages/joblib/_store_backends.py new file mode 100644 index 0000000000000000000000000000000000000000..68e207c17d452faf7539fe0185dc0739a569774b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/_store_backends.py @@ -0,0 +1,474 @@ +"""Storage providers backends for Memory caching.""" + +from pickle import PicklingError +import re +import os +import os.path +import datetime +import json +import shutil +import time +import warnings +import collections +import operator +import threading +from abc import ABCMeta, abstractmethod + +from .backports import concurrency_safe_rename +from .disk import mkdirp, memstr_to_bytes, rm_subdirs +from .logger import format_time +from . import numpy_pickle + +CacheItemInfo = collections.namedtuple('CacheItemInfo', + 'path size last_access') + + +class CacheWarning(Warning): + """Warning to capture dump failures except for PicklingError.""" + pass + + +def concurrency_safe_write(object_to_write, filename, write_func): + """Writes an object into a unique file in a concurrency-safe way.""" + thread_id = id(threading.current_thread()) + temporary_filename = '{}.thread-{}-pid-{}'.format( + filename, thread_id, os.getpid()) + write_func(object_to_write, temporary_filename) + + return temporary_filename + + +class StoreBackendBase(metaclass=ABCMeta): + """Helper Abstract Base Class which defines all methods that + a StorageBackend must implement.""" + + location = None + + @abstractmethod + def _open_item(self, f, mode): + """Opens an item on the store and return a file-like object. + + This method is private and only used by the StoreBackendMixin object. + + Parameters + ---------- + f: a file-like object + The file-like object where an item is stored and retrieved + mode: string, optional + the mode in which the file-like object is opened allowed valued are + 'rb', 'wb' + + Returns + ------- + a file-like object + """ + + @abstractmethod + def _item_exists(self, location): + """Checks if an item location exists in the store. + + This method is private and only used by the StoreBackendMixin object. + + Parameters + ---------- + location: string + The location of an item. On a filesystem, this corresponds to the + absolute path, including the filename, of a file. + + Returns + ------- + True if the item exists, False otherwise + """ + + @abstractmethod + def _move_item(self, src, dst): + """Moves an item from src to dst in the store. + + This method is private and only used by the StoreBackendMixin object. + + Parameters + ---------- + src: string + The source location of an item + dst: string + The destination location of an item + """ + + @abstractmethod + def create_location(self, location): + """Creates a location on the store. + + Parameters + ---------- + location: string + The location in the store. On a filesystem, this corresponds to a + directory. + """ + + @abstractmethod + def clear_location(self, location): + """Clears a location on the store. + + Parameters + ---------- + location: string + The location in the store. On a filesystem, this corresponds to a + directory or a filename absolute path + """ + + @abstractmethod + def get_items(self): + """Returns the whole list of items available in the store. + + Returns + ------- + The list of items identified by their ids (e.g filename in a + filesystem). + """ + + @abstractmethod + def configure(self, location, verbose=0, backend_options=dict()): + """Configures the store. + + Parameters + ---------- + location: string + The base location used by the store. On a filesystem, this + corresponds to a directory. + verbose: int + The level of verbosity of the store + backend_options: dict + Contains a dictionary of named parameters used to configure the + store backend. + """ + + +class StoreBackendMixin(object): + """Class providing all logic for managing the store in a generic way. + + The StoreBackend subclass has to implement 3 methods: create_location, + clear_location and configure. The StoreBackend also has to provide + a private _open_item, _item_exists and _move_item methods. The _open_item + method has to have the same signature as the builtin open and return a + file-like object. + """ + + def load_item(self, call_id, verbose=1, timestamp=None, metadata=None): + """Load an item from the store given its id as a list of str.""" + full_path = os.path.join(self.location, *call_id) + + if verbose > 1: + ts_string = ('{: <16}'.format(format_time(time.time() - timestamp)) + if timestamp is not None else '') + signature = os.path.basename(call_id[0]) + if metadata is not None and 'input_args' in metadata: + kwargs = ', '.join('{}={}'.format(*item) + for item in metadata['input_args'].items()) + signature += '({})'.format(kwargs) + msg = '[Memory]{}: Loading {}'.format(ts_string, signature) + if verbose < 10: + print('{0}...'.format(msg)) + else: + print('{0} from {1}'.format(msg, full_path)) + + mmap_mode = (None if not hasattr(self, 'mmap_mode') + else self.mmap_mode) + + filename = os.path.join(full_path, 'output.pkl') + if not self._item_exists(filename): + raise KeyError("Non-existing item (may have been " + "cleared).\nFile %s does not exist" % filename) + + # file-like object cannot be used when mmap_mode is set + if mmap_mode is None: + with self._open_item(filename, "rb") as f: + item = numpy_pickle.load(f) + else: + item = numpy_pickle.load(filename, mmap_mode=mmap_mode) + return item + + def dump_item(self, call_id, item, verbose=1): + """Dump an item in the store at the id given as a list of str.""" + try: + item_path = os.path.join(self.location, *call_id) + if not self._item_exists(item_path): + self.create_location(item_path) + filename = os.path.join(item_path, 'output.pkl') + if verbose > 10: + print('Persisting in %s' % item_path) + + def write_func(to_write, dest_filename): + with self._open_item(dest_filename, "wb") as f: + try: + numpy_pickle.dump(to_write, f, compress=self.compress) + except PicklingError as e: + # TODO(1.5) turn into error + warnings.warn( + "Unable to cache to disk: failed to pickle " + "output. In version 1.5 this will raise an " + f"exception. Exception: {e}.", + FutureWarning + ) + + self._concurrency_safe_write(item, filename, write_func) + except Exception as e: # noqa: E722 + warnings.warn( + "Unable to cache to disk. Possibly a race condition in the " + f"creation of the directory. Exception: {e}.", + CacheWarning + ) + + def clear_item(self, call_id): + """Clear the item at the id, given as a list of str.""" + item_path = os.path.join(self.location, *call_id) + if self._item_exists(item_path): + self.clear_location(item_path) + + def contains_item(self, call_id): + """Check if there is an item at the id, given as a list of str.""" + item_path = os.path.join(self.location, *call_id) + filename = os.path.join(item_path, 'output.pkl') + + return self._item_exists(filename) + + def get_item_info(self, call_id): + """Return information about item.""" + return {'location': os.path.join(self.location, *call_id)} + + def get_metadata(self, call_id): + """Return actual metadata of an item.""" + try: + item_path = os.path.join(self.location, *call_id) + filename = os.path.join(item_path, 'metadata.json') + with self._open_item(filename, 'rb') as f: + return json.loads(f.read().decode('utf-8')) + except: # noqa: E722 + return {} + + def store_metadata(self, call_id, metadata): + """Store metadata of a computation.""" + try: + item_path = os.path.join(self.location, *call_id) + self.create_location(item_path) + filename = os.path.join(item_path, 'metadata.json') + + def write_func(to_write, dest_filename): + with self._open_item(dest_filename, "wb") as f: + f.write(json.dumps(to_write).encode('utf-8')) + + self._concurrency_safe_write(metadata, filename, write_func) + except: # noqa: E722 + pass + + def contains_path(self, call_id): + """Check cached function is available in store.""" + func_path = os.path.join(self.location, *call_id) + return self.object_exists(func_path) + + def clear_path(self, call_id): + """Clear all items with a common path in the store.""" + func_path = os.path.join(self.location, *call_id) + if self._item_exists(func_path): + self.clear_location(func_path) + + def store_cached_func_code(self, call_id, func_code=None): + """Store the code of the cached function.""" + func_path = os.path.join(self.location, *call_id) + if not self._item_exists(func_path): + self.create_location(func_path) + + if func_code is not None: + filename = os.path.join(func_path, "func_code.py") + with self._open_item(filename, 'wb') as f: + f.write(func_code.encode('utf-8')) + + def get_cached_func_code(self, call_id): + """Store the code of the cached function.""" + filename = os.path.join(self.location, *call_id, 'func_code.py') + try: + with self._open_item(filename, 'rb') as f: + return f.read().decode('utf-8') + except: # noqa: E722 + raise + + def get_cached_func_info(self, call_id): + """Return information related to the cached function if it exists.""" + return {'location': os.path.join(self.location, *call_id)} + + def clear(self): + """Clear the whole store content.""" + self.clear_location(self.location) + + def enforce_store_limits( + self, bytes_limit, items_limit=None, age_limit=None + ): + """ + Remove the store's oldest files to enforce item, byte, and age limits. + """ + items_to_delete = self._get_items_to_delete( + bytes_limit, items_limit, age_limit + ) + + for item in items_to_delete: + if self.verbose > 10: + print('Deleting item {0}'.format(item)) + try: + self.clear_location(item.path) + except OSError: + # Even with ignore_errors=True shutil.rmtree can raise OSError + # with: + # [Errno 116] Stale file handle if another process has deleted + # the folder already. + pass + + def _get_items_to_delete( + self, bytes_limit, items_limit=None, age_limit=None + ): + """ + Get items to delete to keep the store under size, file, & age limits. + """ + if isinstance(bytes_limit, str): + bytes_limit = memstr_to_bytes(bytes_limit) + + items = self.get_items() + if not items: + return [] + + size = sum(item.size for item in items) + + if bytes_limit is not None: + to_delete_size = size - bytes_limit + else: + to_delete_size = 0 + + if items_limit is not None: + to_delete_items = len(items) - items_limit + else: + to_delete_items = 0 + + if age_limit is not None: + older_item = min(item.last_access for item in items) + deadline = datetime.datetime.now() - age_limit + else: + deadline = None + + if ( + to_delete_size <= 0 and to_delete_items <= 0 + and (deadline is None or older_item > deadline) + ): + return [] + + # We want to delete first the cache items that were accessed a + # long time ago + items.sort(key=operator.attrgetter('last_access')) + + items_to_delete = [] + size_so_far = 0 + items_so_far = 0 + + for item in items: + if ( + (size_so_far >= to_delete_size) + and items_so_far >= to_delete_items + and (deadline is None or deadline < item.last_access) + ): + break + + items_to_delete.append(item) + size_so_far += item.size + items_so_far += 1 + + return items_to_delete + + def _concurrency_safe_write(self, to_write, filename, write_func): + """Writes an object into a file in a concurrency-safe way.""" + temporary_filename = concurrency_safe_write(to_write, + filename, write_func) + self._move_item(temporary_filename, filename) + + def __repr__(self): + """Printable representation of the store location.""" + return '{class_name}(location="{location}")'.format( + class_name=self.__class__.__name__, location=self.location) + + +class FileSystemStoreBackend(StoreBackendBase, StoreBackendMixin): + """A StoreBackend used with local or network file systems.""" + + _open_item = staticmethod(open) + _item_exists = staticmethod(os.path.exists) + _move_item = staticmethod(concurrency_safe_rename) + + def clear_location(self, location): + """Delete location on store.""" + if (location == self.location): + rm_subdirs(location) + else: + shutil.rmtree(location, ignore_errors=True) + + def create_location(self, location): + """Create object location on store""" + mkdirp(location) + + def get_items(self): + """Returns the whole list of items available in the store.""" + items = [] + + for dirpath, _, filenames in os.walk(self.location): + is_cache_hash_dir = re.match('[a-f0-9]{32}', + os.path.basename(dirpath)) + + if is_cache_hash_dir: + output_filename = os.path.join(dirpath, 'output.pkl') + try: + last_access = os.path.getatime(output_filename) + except OSError: + try: + last_access = os.path.getatime(dirpath) + except OSError: + # The directory has already been deleted + continue + + last_access = datetime.datetime.fromtimestamp(last_access) + try: + full_filenames = [os.path.join(dirpath, fn) + for fn in filenames] + dirsize = sum(os.path.getsize(fn) + for fn in full_filenames) + except OSError: + # Either output_filename or one of the files in + # dirpath does not exist any more. We assume this + # directory is being cleaned by another process already + continue + + items.append(CacheItemInfo(dirpath, dirsize, + last_access)) + + return items + + def configure(self, location, verbose=1, backend_options=None): + """Configure the store backend. + + For this backend, valid store options are 'compress' and 'mmap_mode' + """ + if backend_options is None: + backend_options = {} + + # setup location directory + self.location = location + if not os.path.exists(self.location): + mkdirp(self.location) + + # item can be stored compressed for faster I/O + self.compress = backend_options.get('compress', False) + + # FileSystemStoreBackend can be used with mmap_mode options under + # certain conditions. + mmap_mode = backend_options.get('mmap_mode') + if self.compress and mmap_mode is not None: + warnings.warn('Compressed items cannot be memmapped in a ' + 'filesystem store. Option will be ignored.', + stacklevel=2) + + self.mmap_mode = mmap_mode + self.verbose = verbose diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/_utils.py b/env-llmeval/lib/python3.10/site-packages/joblib/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0b7cc64ee51695fe18e2fc8a819696e0246b54f8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/_utils.py @@ -0,0 +1,83 @@ +# Adapted from https://stackoverflow.com/a/9558001/2536294 + +import ast +from dataclasses import dataclass +import operator as op + + +from ._multiprocessing_helpers import mp + +if mp is not None: + from .externals.loky.process_executor import _ExceptionWithTraceback + + +# supported operators +operators = { + ast.Add: op.add, + ast.Sub: op.sub, + ast.Mult: op.mul, + ast.Div: op.truediv, + ast.FloorDiv: op.floordiv, + ast.Mod: op.mod, + ast.Pow: op.pow, + ast.USub: op.neg, +} + + +def eval_expr(expr): + """ + >>> eval_expr('2*6') + 12 + >>> eval_expr('2**6') + 64 + >>> eval_expr('1 + 2*3**(4) / (6 + -7)') + -161.0 + """ + try: + return eval_(ast.parse(expr, mode="eval").body) + except (TypeError, SyntaxError, KeyError) as e: + raise ValueError( + f"{expr!r} is not a valid or supported arithmetic expression." + ) from e + + +def eval_(node): + if isinstance(node, ast.Constant): # + return node.value + elif isinstance(node, ast.BinOp): # + return operators[type(node.op)](eval_(node.left), eval_(node.right)) + elif isinstance(node, ast.UnaryOp): # e.g., -1 + return operators[type(node.op)](eval_(node.operand)) + else: + raise TypeError(node) + + +@dataclass(frozen=True) +class _Sentinel: + """A sentinel to mark a parameter as not explicitly set""" + default_value: object + + def __repr__(self): + return f"default({self.default_value!r})" + + +class _TracebackCapturingWrapper: + """Protect function call and return error with traceback.""" + + def __init__(self, func): + self.func = func + + def __call__(self, **kwargs): + try: + return self.func(**kwargs) + except BaseException as e: + return _ExceptionWithTraceback(e) + + +def _retrieve_traceback_capturing_wrapped_call(out): + if isinstance(out, _ExceptionWithTraceback): + rebuild, args = out.__reduce__() + out = rebuild(*args) + if isinstance(out, BaseException): + raise out + return out diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/compressor.py b/env-llmeval/lib/python3.10/site-packages/joblib/compressor.py new file mode 100644 index 0000000000000000000000000000000000000000..0d9e2618a48339e1af8cf2da573fd0af8c96f0b0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/compressor.py @@ -0,0 +1,570 @@ +"""Classes and functions for managing compressors.""" + +import io +import zlib +from joblib.backports import LooseVersion + +try: + from threading import RLock +except ImportError: + from dummy_threading import RLock + +try: + import bz2 +except ImportError: + bz2 = None + +try: + import lz4 + from lz4.frame import LZ4FrameFile +except ImportError: + lz4 = None + +try: + import lzma +except ImportError: + lzma = None + + +LZ4_NOT_INSTALLED_ERROR = ('LZ4 is not installed. Install it with pip: ' + 'https://python-lz4.readthedocs.io/') + +# Registered compressors +_COMPRESSORS = {} + +# Magic numbers of supported compression file formats. +_ZFILE_PREFIX = b'ZF' # used with pickle files created before 0.9.3. +_ZLIB_PREFIX = b'\x78' +_GZIP_PREFIX = b'\x1f\x8b' +_BZ2_PREFIX = b'BZ' +_XZ_PREFIX = b'\xfd\x37\x7a\x58\x5a' +_LZMA_PREFIX = b'\x5d\x00' +_LZ4_PREFIX = b'\x04\x22\x4D\x18' + + +def register_compressor(compressor_name, compressor, + force=False): + """Register a new compressor. + + Parameters + ---------- + compressor_name: str. + The name of the compressor. + compressor: CompressorWrapper + An instance of a 'CompressorWrapper'. + """ + global _COMPRESSORS + if not isinstance(compressor_name, str): + raise ValueError("Compressor name should be a string, " + "'{}' given.".format(compressor_name)) + + if not isinstance(compressor, CompressorWrapper): + raise ValueError("Compressor should implement the CompressorWrapper " + "interface, '{}' given.".format(compressor)) + + if (compressor.fileobj_factory is not None and + (not hasattr(compressor.fileobj_factory, 'read') or + not hasattr(compressor.fileobj_factory, 'write') or + not hasattr(compressor.fileobj_factory, 'seek') or + not hasattr(compressor.fileobj_factory, 'tell'))): + raise ValueError("Compressor 'fileobj_factory' attribute should " + "implement the file object interface, '{}' given." + .format(compressor.fileobj_factory)) + + if compressor_name in _COMPRESSORS and not force: + raise ValueError("Compressor '{}' already registered." + .format(compressor_name)) + + _COMPRESSORS[compressor_name] = compressor + + +class CompressorWrapper(): + """A wrapper around a compressor file object. + + Attributes + ---------- + obj: a file-like object + The object must implement the buffer interface and will be used + internally to compress/decompress the data. + prefix: bytestring + A bytestring corresponding to the magic number that identifies the + file format associated to the compressor. + extension: str + The file extension used to automatically select this compressor during + a dump to a file. + """ + + def __init__(self, obj, prefix=b'', extension=''): + self.fileobj_factory = obj + self.prefix = prefix + self.extension = extension + + def compressor_file(self, fileobj, compresslevel=None): + """Returns an instance of a compressor file object.""" + if compresslevel is None: + return self.fileobj_factory(fileobj, 'wb') + else: + return self.fileobj_factory(fileobj, 'wb', + compresslevel=compresslevel) + + def decompressor_file(self, fileobj): + """Returns an instance of a decompressor file object.""" + return self.fileobj_factory(fileobj, 'rb') + + +class BZ2CompressorWrapper(CompressorWrapper): + + prefix = _BZ2_PREFIX + extension = '.bz2' + + def __init__(self): + if bz2 is not None: + self.fileobj_factory = bz2.BZ2File + else: + self.fileobj_factory = None + + def _check_versions(self): + if bz2 is None: + raise ValueError('bz2 module is not compiled on your python ' + 'standard library.') + + def compressor_file(self, fileobj, compresslevel=None): + """Returns an instance of a compressor file object.""" + self._check_versions() + if compresslevel is None: + return self.fileobj_factory(fileobj, 'wb') + else: + return self.fileobj_factory(fileobj, 'wb', + compresslevel=compresslevel) + + def decompressor_file(self, fileobj): + """Returns an instance of a decompressor file object.""" + self._check_versions() + fileobj = self.fileobj_factory(fileobj, 'rb') + return fileobj + + +class LZMACompressorWrapper(CompressorWrapper): + + prefix = _LZMA_PREFIX + extension = '.lzma' + _lzma_format_name = 'FORMAT_ALONE' + + def __init__(self): + if lzma is not None: + self.fileobj_factory = lzma.LZMAFile + self._lzma_format = getattr(lzma, self._lzma_format_name) + else: + self.fileobj_factory = None + + def _check_versions(self): + if lzma is None: + raise ValueError('lzma module is not compiled on your python ' + 'standard library.') + + def compressor_file(self, fileobj, compresslevel=None): + """Returns an instance of a compressor file object.""" + if compresslevel is None: + return self.fileobj_factory(fileobj, 'wb', + format=self._lzma_format) + else: + return self.fileobj_factory(fileobj, 'wb', + format=self._lzma_format, + preset=compresslevel) + + def decompressor_file(self, fileobj): + """Returns an instance of a decompressor file object.""" + return lzma.LZMAFile(fileobj, 'rb') + + +class XZCompressorWrapper(LZMACompressorWrapper): + + prefix = _XZ_PREFIX + extension = '.xz' + _lzma_format_name = 'FORMAT_XZ' + + +class LZ4CompressorWrapper(CompressorWrapper): + + prefix = _LZ4_PREFIX + extension = '.lz4' + + def __init__(self): + if lz4 is not None: + self.fileobj_factory = LZ4FrameFile + else: + self.fileobj_factory = None + + def _check_versions(self): + if lz4 is None: + raise ValueError(LZ4_NOT_INSTALLED_ERROR) + lz4_version = lz4.__version__ + if lz4_version.startswith("v"): + lz4_version = lz4_version[1:] + if LooseVersion(lz4_version) < LooseVersion('0.19'): + raise ValueError(LZ4_NOT_INSTALLED_ERROR) + + def compressor_file(self, fileobj, compresslevel=None): + """Returns an instance of a compressor file object.""" + self._check_versions() + if compresslevel is None: + return self.fileobj_factory(fileobj, 'wb') + else: + return self.fileobj_factory(fileobj, 'wb', + compression_level=compresslevel) + + def decompressor_file(self, fileobj): + """Returns an instance of a decompressor file object.""" + self._check_versions() + return self.fileobj_factory(fileobj, 'rb') + + +############################################################################### +# base file compression/decompression object definition +_MODE_CLOSED = 0 +_MODE_READ = 1 +_MODE_READ_EOF = 2 +_MODE_WRITE = 3 +_BUFFER_SIZE = 8192 + + +class BinaryZlibFile(io.BufferedIOBase): + """A file object providing transparent zlib (de)compression. + + TODO python2_drop: is it still needed since we dropped Python 2 support A + BinaryZlibFile can act as a wrapper for an existing file object, or refer + directly to a named file on disk. + + Note that BinaryZlibFile provides only a *binary* file interface: data read + is returned as bytes, and data to be written should be given as bytes. + + This object is an adaptation of the BZ2File object and is compatible with + versions of python >= 2.7. + + If filename is a str or bytes object, it gives the name + of the file to be opened. Otherwise, it should be a file object, + which will be used to read or write the compressed data. + + mode can be 'rb' for reading (default) or 'wb' for (over)writing + + If mode is 'wb', compresslevel can be a number between 1 + and 9 specifying the level of compression: 1 produces the least + compression, and 9 produces the most compression. 3 is the default. + """ + + wbits = zlib.MAX_WBITS + + def __init__(self, filename, mode="rb", compresslevel=3): + # This lock must be recursive, so that BufferedIOBase's + # readline(), readlines() and writelines() don't deadlock. + self._lock = RLock() + self._fp = None + self._closefp = False + self._mode = _MODE_CLOSED + self._pos = 0 + self._size = -1 + self.compresslevel = compresslevel + + if not isinstance(compresslevel, int) or not (1 <= compresslevel <= 9): + raise ValueError("'compresslevel' must be an integer " + "between 1 and 9. You provided 'compresslevel={}'" + .format(compresslevel)) + + if mode == "rb": + self._mode = _MODE_READ + self._decompressor = zlib.decompressobj(self.wbits) + self._buffer = b"" + self._buffer_offset = 0 + elif mode == "wb": + self._mode = _MODE_WRITE + self._compressor = zlib.compressobj(self.compresslevel, + zlib.DEFLATED, self.wbits, + zlib.DEF_MEM_LEVEL, 0) + else: + raise ValueError("Invalid mode: %r" % (mode,)) + + if isinstance(filename, str): + self._fp = io.open(filename, mode) + self._closefp = True + elif hasattr(filename, "read") or hasattr(filename, "write"): + self._fp = filename + else: + raise TypeError("filename must be a str or bytes object, " + "or a file") + + def close(self): + """Flush and close the file. + + May be called more than once without error. Once the file is + closed, any other operation on it will raise a ValueError. + """ + with self._lock: + if self._mode == _MODE_CLOSED: + return + try: + if self._mode in (_MODE_READ, _MODE_READ_EOF): + self._decompressor = None + elif self._mode == _MODE_WRITE: + self._fp.write(self._compressor.flush()) + self._compressor = None + finally: + try: + if self._closefp: + self._fp.close() + finally: + self._fp = None + self._closefp = False + self._mode = _MODE_CLOSED + self._buffer = b"" + self._buffer_offset = 0 + + @property + def closed(self): + """True if this file is closed.""" + return self._mode == _MODE_CLOSED + + def fileno(self): + """Return the file descriptor for the underlying file.""" + self._check_not_closed() + return self._fp.fileno() + + def seekable(self): + """Return whether the file supports seeking.""" + return self.readable() and self._fp.seekable() + + def readable(self): + """Return whether the file was opened for reading.""" + self._check_not_closed() + return self._mode in (_MODE_READ, _MODE_READ_EOF) + + def writable(self): + """Return whether the file was opened for writing.""" + self._check_not_closed() + return self._mode == _MODE_WRITE + + # Mode-checking helper functions. + + def _check_not_closed(self): + if self.closed: + fname = getattr(self._fp, 'name', None) + msg = "I/O operation on closed file" + if fname is not None: + msg += " {}".format(fname) + msg += "." + raise ValueError(msg) + + def _check_can_read(self): + if self._mode not in (_MODE_READ, _MODE_READ_EOF): + self._check_not_closed() + raise io.UnsupportedOperation("File not open for reading") + + def _check_can_write(self): + if self._mode != _MODE_WRITE: + self._check_not_closed() + raise io.UnsupportedOperation("File not open for writing") + + def _check_can_seek(self): + if self._mode not in (_MODE_READ, _MODE_READ_EOF): + self._check_not_closed() + raise io.UnsupportedOperation("Seeking is only supported " + "on files open for reading") + if not self._fp.seekable(): + raise io.UnsupportedOperation("The underlying file object " + "does not support seeking") + + # Fill the readahead buffer if it is empty. Returns False on EOF. + def _fill_buffer(self): + if self._mode == _MODE_READ_EOF: + return False + # Depending on the input data, our call to the decompressor may not + # return any data. In this case, try again after reading another block. + while self._buffer_offset == len(self._buffer): + try: + rawblock = (self._decompressor.unused_data or + self._fp.read(_BUFFER_SIZE)) + if not rawblock: + raise EOFError + except EOFError: + # End-of-stream marker and end of file. We're good. + self._mode = _MODE_READ_EOF + self._size = self._pos + return False + else: + self._buffer = self._decompressor.decompress(rawblock) + self._buffer_offset = 0 + return True + + # Read data until EOF. + # If return_data is false, consume the data without returning it. + def _read_all(self, return_data=True): + # The loop assumes that _buffer_offset is 0. Ensure that this is true. + self._buffer = self._buffer[self._buffer_offset:] + self._buffer_offset = 0 + + blocks = [] + while self._fill_buffer(): + if return_data: + blocks.append(self._buffer) + self._pos += len(self._buffer) + self._buffer = b"" + if return_data: + return b"".join(blocks) + + # Read a block of up to n bytes. + # If return_data is false, consume the data without returning it. + def _read_block(self, n_bytes, return_data=True): + # If we have enough data buffered, return immediately. + end = self._buffer_offset + n_bytes + if end <= len(self._buffer): + data = self._buffer[self._buffer_offset: end] + self._buffer_offset = end + self._pos += len(data) + return data if return_data else None + + # The loop assumes that _buffer_offset is 0. Ensure that this is true. + self._buffer = self._buffer[self._buffer_offset:] + self._buffer_offset = 0 + + blocks = [] + while n_bytes > 0 and self._fill_buffer(): + if n_bytes < len(self._buffer): + data = self._buffer[:n_bytes] + self._buffer_offset = n_bytes + else: + data = self._buffer + self._buffer = b"" + if return_data: + blocks.append(data) + self._pos += len(data) + n_bytes -= len(data) + if return_data: + return b"".join(blocks) + + def read(self, size=-1): + """Read up to size uncompressed bytes from the file. + + If size is negative or omitted, read until EOF is reached. + Returns b'' if the file is already at EOF. + """ + with self._lock: + self._check_can_read() + if size == 0: + return b"" + elif size < 0: + return self._read_all() + else: + return self._read_block(size) + + def readinto(self, b): + """Read up to len(b) bytes into b. + + Returns the number of bytes read (0 for EOF). + """ + with self._lock: + return io.BufferedIOBase.readinto(self, b) + + def write(self, data): + """Write a byte string to the file. + + Returns the number of uncompressed bytes written, which is + always len(data). Note that due to buffering, the file on disk + may not reflect the data written until close() is called. + """ + with self._lock: + self._check_can_write() + # Convert data type if called by io.BufferedWriter. + if isinstance(data, memoryview): + data = data.tobytes() + + compressed = self._compressor.compress(data) + self._fp.write(compressed) + self._pos += len(data) + return len(data) + + # Rewind the file to the beginning of the data stream. + def _rewind(self): + self._fp.seek(0, 0) + self._mode = _MODE_READ + self._pos = 0 + self._decompressor = zlib.decompressobj(self.wbits) + self._buffer = b"" + self._buffer_offset = 0 + + def seek(self, offset, whence=0): + """Change the file position. + + The new position is specified by offset, relative to the + position indicated by whence. Values for whence are: + + 0: start of stream (default); offset must not be negative + 1: current stream position + 2: end of stream; offset must not be positive + + Returns the new file position. + + Note that seeking is emulated, so depending on the parameters, + this operation may be extremely slow. + """ + with self._lock: + self._check_can_seek() + + # Recalculate offset as an absolute file position. + if whence == 0: + pass + elif whence == 1: + offset = self._pos + offset + elif whence == 2: + # Seeking relative to EOF - we need to know the file's size. + if self._size < 0: + self._read_all(return_data=False) + offset = self._size + offset + else: + raise ValueError("Invalid value for whence: %s" % (whence,)) + + # Make it so that offset is the number of bytes to skip forward. + if offset < self._pos: + self._rewind() + else: + offset -= self._pos + + # Read and discard data until we reach the desired position. + self._read_block(offset, return_data=False) + + return self._pos + + def tell(self): + """Return the current file position.""" + with self._lock: + self._check_not_closed() + return self._pos + + +class ZlibCompressorWrapper(CompressorWrapper): + + def __init__(self): + CompressorWrapper.__init__(self, obj=BinaryZlibFile, + prefix=_ZLIB_PREFIX, extension='.z') + + +class BinaryGzipFile(BinaryZlibFile): + """A file object providing transparent gzip (de)compression. + + If filename is a str or bytes object, it gives the name + of the file to be opened. Otherwise, it should be a file object, + which will be used to read or write the compressed data. + + mode can be 'rb' for reading (default) or 'wb' for (over)writing + + If mode is 'wb', compresslevel can be a number between 1 + and 9 specifying the level of compression: 1 produces the least + compression, and 9 produces the most compression. 3 is the default. + """ + + wbits = 31 # zlib compressor/decompressor wbits value for gzip format. + + +class GzipCompressorWrapper(CompressorWrapper): + + def __init__(self): + CompressorWrapper.__init__(self, obj=BinaryGzipFile, + prefix=_GZIP_PREFIX, extension='.gz') diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/disk.py b/env-llmeval/lib/python3.10/site-packages/joblib/disk.py new file mode 100644 index 0000000000000000000000000000000000000000..32fbb89f6dc6c9c7df532c5fefa14934f16321f6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/disk.py @@ -0,0 +1,136 @@ +""" +Disk management utilities. +""" + +# Authors: Gael Varoquaux +# Lars Buitinck +# Copyright (c) 2010 Gael Varoquaux +# License: BSD Style, 3 clauses. + + +import os +import sys +import time +import errno +import shutil + +from multiprocessing import util + + +try: + WindowsError +except NameError: + WindowsError = OSError + + +def disk_used(path): + """ Return the disk usage in a directory.""" + size = 0 + for file in os.listdir(path) + ['.']: + stat = os.stat(os.path.join(path, file)) + if hasattr(stat, 'st_blocks'): + size += stat.st_blocks * 512 + else: + # on some platform st_blocks is not available (e.g., Windows) + # approximate by rounding to next multiple of 512 + size += (stat.st_size // 512 + 1) * 512 + # We need to convert to int to avoid having longs on some systems (we + # don't want longs to avoid problems we SQLite) + return int(size / 1024.) + + +def memstr_to_bytes(text): + """ Convert a memory text to its value in bytes. + """ + kilo = 1024 + units = dict(K=kilo, M=kilo ** 2, G=kilo ** 3) + try: + size = int(units[text[-1]] * float(text[:-1])) + except (KeyError, ValueError) as e: + raise ValueError( + "Invalid literal for size give: %s (type %s) should be " + "alike '10G', '500M', '50K'." % (text, type(text))) from e + return size + + +def mkdirp(d): + """Ensure directory d exists (like mkdir -p on Unix) + No guarantee that the directory is writable. + """ + try: + os.makedirs(d) + except OSError as e: + if e.errno != errno.EEXIST: + raise + + +# if a rmtree operation fails in rm_subdirs, wait for this much time (in secs), +# then retry up to RM_SUBDIRS_N_RETRY times. If it still fails, raise the +# exception. this mechanism ensures that the sub-process gc have the time to +# collect and close the memmaps before we fail. +RM_SUBDIRS_RETRY_TIME = 0.1 +RM_SUBDIRS_N_RETRY = 10 + + +def rm_subdirs(path, onerror=None): + """Remove all subdirectories in this path. + + The directory indicated by `path` is left in place, and its subdirectories + are erased. + + If onerror is set, it is called to handle the error with arguments (func, + path, exc_info) where func is os.listdir, os.remove, or os.rmdir; + path is the argument to that function that caused it to fail; and + exc_info is a tuple returned by sys.exc_info(). If onerror is None, + an exception is raised. + """ + + # NOTE this code is adapted from the one in shutil.rmtree, and is + # just as fast + + names = [] + try: + names = os.listdir(path) + except os.error: + if onerror is not None: + onerror(os.listdir, path, sys.exc_info()) + else: + raise + + for name in names: + fullname = os.path.join(path, name) + delete_folder(fullname, onerror=onerror) + + +def delete_folder(folder_path, onerror=None, allow_non_empty=True): + """Utility function to cleanup a temporary folder if it still exists.""" + if os.path.isdir(folder_path): + if onerror is not None: + shutil.rmtree(folder_path, False, onerror) + else: + # allow the rmtree to fail once, wait and re-try. + # if the error is raised again, fail + err_count = 0 + while True: + files = os.listdir(folder_path) + try: + if len(files) == 0 or allow_non_empty: + shutil.rmtree( + folder_path, ignore_errors=False, onerror=None + ) + util.debug( + "Successfully deleted {}".format(folder_path)) + break + else: + raise OSError( + "Expected empty folder {} but got {} " + "files.".format(folder_path, len(files)) + ) + except (OSError, WindowsError): + err_count += 1 + if err_count > RM_SUBDIRS_N_RETRY: + # the folder cannot be deleted right now. It maybe + # because some temporary files have not been deleted + # yet. + raise + time.sleep(RM_SUBDIRS_RETRY_TIME) diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/executor.py b/env-llmeval/lib/python3.10/site-packages/joblib/executor.py new file mode 100644 index 0000000000000000000000000000000000000000..6837a7d147411cd74034a078ff98cab916ec36ce --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/executor.py @@ -0,0 +1,117 @@ +"""Utility function to construct a loky.ReusableExecutor with custom pickler. + +This module provides efficient ways of working with data stored in +shared memory with numpy.memmap arrays without inducing any memory +copy between the parent and child processes. +""" +# Author: Thomas Moreau +# Copyright: 2017, Thomas Moreau +# License: BSD 3 clause + +from ._memmapping_reducer import get_memmapping_reducers +from ._memmapping_reducer import TemporaryResourcesManager +from .externals.loky.reusable_executor import _ReusablePoolExecutor + + +_executor_args = None + + +def get_memmapping_executor(n_jobs, **kwargs): + return MemmappingExecutor.get_memmapping_executor(n_jobs, **kwargs) + + +class MemmappingExecutor(_ReusablePoolExecutor): + + @classmethod + def get_memmapping_executor(cls, n_jobs, timeout=300, initializer=None, + initargs=(), env=None, temp_folder=None, + context_id=None, **backend_args): + """Factory for ReusableExecutor with automatic memmapping for large + numpy arrays. + """ + global _executor_args + # Check if we can reuse the executor here instead of deferring the test + # to loky as the reducers are objects that changes at each call. + executor_args = backend_args.copy() + executor_args.update(env if env else {}) + executor_args.update(dict( + timeout=timeout, initializer=initializer, initargs=initargs)) + reuse = _executor_args is None or _executor_args == executor_args + _executor_args = executor_args + + manager = TemporaryResourcesManager(temp_folder) + + # reducers access the temporary folder in which to store temporary + # pickles through a call to manager.resolve_temp_folder_name. resolving + # the folder name dynamically is useful to use different folders across + # calls of a same reusable executor + job_reducers, result_reducers = get_memmapping_reducers( + unlink_on_gc_collect=True, + temp_folder_resolver=manager.resolve_temp_folder_name, + **backend_args) + _executor, executor_is_reused = super().get_reusable_executor( + n_jobs, job_reducers=job_reducers, result_reducers=result_reducers, + reuse=reuse, timeout=timeout, initializer=initializer, + initargs=initargs, env=env + ) + + if not executor_is_reused: + # Only set a _temp_folder_manager for new executors. Reused + # executors already have a _temporary_folder_manager that must not + # be re-assigned like that because it is referenced in various + # places in the reducing machinery of the executor. + _executor._temp_folder_manager = manager + + if context_id is not None: + # Only register the specified context once we know which manager + # the current executor is using, in order to not register an atexit + # finalizer twice for the same folder. + _executor._temp_folder_manager.register_new_context(context_id) + + return _executor + + def terminate(self, kill_workers=False): + + self.shutdown(kill_workers=kill_workers) + + # When workers are killed in a brutal manner, they cannot execute the + # finalizer of their shared memmaps. The refcount of those memmaps may + # be off by an unknown number, so instead of decref'ing them, we force + # delete the whole temporary folder, and unregister them. There is no + # risk of PermissionError at folder deletion because at this + # point, all child processes are dead, so all references to temporary + # memmaps are closed. Otherwise, just try to delete as much as possible + # with allow_non_empty=True but if we can't, it will be clean up later + # on by the resource_tracker. + with self._submit_resize_lock: + self._temp_folder_manager._clean_temporary_resources( + force=kill_workers, allow_non_empty=True + ) + + @property + def _temp_folder(self): + # Legacy property in tests. could be removed if we refactored the + # memmapping tests. SHOULD ONLY BE USED IN TESTS! + # We cache this property because it is called late in the tests - at + # this point, all context have been unregistered, and + # resolve_temp_folder_name raises an error. + if getattr(self, '_cached_temp_folder', None) is not None: + return self._cached_temp_folder + else: + self._cached_temp_folder = self._temp_folder_manager.resolve_temp_folder_name() # noqa + return self._cached_temp_folder + + +class _TestingMemmappingExecutor(MemmappingExecutor): + """Wrapper around ReusableExecutor to ease memmapping testing with Pool + and Executor. This is only for testing purposes. + + """ + def apply_async(self, func, args): + """Schedule a func to be run""" + future = self.submit(func, *args) + future.get = future.result + return future + + def map(self, f, *args): + return list(super().map(f, *args)) diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/externals/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e964b4501babdc001b8227c4bb0481f6ccda34f0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/externals/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/cloudpickle/__init__.py b/env-llmeval/lib/python3.10/site-packages/joblib/externals/cloudpickle/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..58a8d086ff616b2ef75ab0d788d990e749f96e8d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/externals/cloudpickle/__init__.py @@ -0,0 +1,18 @@ +from . import cloudpickle +from .cloudpickle import * # noqa + +__doc__ = cloudpickle.__doc__ + +__version__ = "3.0.0" + +__all__ = [ # noqa + "__version__", + "Pickler", + "CloudPickler", + "dumps", + "loads", + "dump", + "load", + "register_pickle_by_value", + "unregister_pickle_by_value", +] diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9722971ce2d0f61c4597b7abc0e0af4e7ddc0b9b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea90e390f1951c3c0af55e05c3bc7111178aa644 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle_fast.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle_fast.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2389dcaa949f8c8c8311a87920a62ed33faf42e1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle_fast.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle.py b/env-llmeval/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle.py new file mode 100644 index 0000000000000000000000000000000000000000..eb43a9676bbb11bdecf187e7f6cde51f793ff3fc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle.py @@ -0,0 +1,1487 @@ +"""Pickler class to extend the standard pickle.Pickler functionality + +The main objective is to make it natural to perform distributed computing on +clusters (such as PySpark, Dask, Ray...) with interactively defined code +(functions, classes, ...) written in notebooks or console. + +In particular this pickler adds the following features: +- serialize interactively-defined or locally-defined functions, classes, + enums, typevars, lambdas and nested functions to compiled byte code; +- deal with some other non-serializable objects in an ad-hoc manner where + applicable. + +This pickler is therefore meant to be used for the communication between short +lived Python processes running the same version of Python and libraries. In +particular, it is not meant to be used for long term storage of Python objects. + +It does not include an unpickler, as standard Python unpickling suffices. + +This module was extracted from the `cloud` package, developed by `PiCloud, Inc. +`_. + +Copyright (c) 2012-now, CloudPickle developers and contributors. +Copyright (c) 2012, Regents of the University of California. +Copyright (c) 2009 `PiCloud, Inc. `_. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the University of California, Berkeley nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +""" + +import _collections_abc +from collections import ChainMap, OrderedDict +import abc +import builtins +import copyreg +import dataclasses +import dis +from enum import Enum +import io +import itertools +import logging +import opcode +import pickle +from pickle import _getattribute +import platform +import struct +import sys +import threading +import types +import typing +import uuid +import warnings +import weakref + +# The following import is required to be imported in the cloudpickle +# namespace to be able to load pickle files generated with older versions of +# cloudpickle. See: tests/test_backward_compat.py +from types import CellType # noqa: F401 + + +# cloudpickle is meant for inter process communication: we expect all +# communicating processes to run the same Python version hence we favor +# communication speed over compatibility: +DEFAULT_PROTOCOL = pickle.HIGHEST_PROTOCOL + +# Names of modules whose resources should be treated as dynamic. +_PICKLE_BY_VALUE_MODULES = set() + +# Track the provenance of reconstructed dynamic classes to make it possible to +# reconstruct instances from the matching singleton class definition when +# appropriate and preserve the usual "isinstance" semantics of Python objects. +_DYNAMIC_CLASS_TRACKER_BY_CLASS = weakref.WeakKeyDictionary() +_DYNAMIC_CLASS_TRACKER_BY_ID = weakref.WeakValueDictionary() +_DYNAMIC_CLASS_TRACKER_LOCK = threading.Lock() + +PYPY = platform.python_implementation() == "PyPy" + +builtin_code_type = None +if PYPY: + # builtin-code objects only exist in pypy + builtin_code_type = type(float.__new__.__code__) + +_extract_code_globals_cache = weakref.WeakKeyDictionary() + + +def _get_or_create_tracker_id(class_def): + with _DYNAMIC_CLASS_TRACKER_LOCK: + class_tracker_id = _DYNAMIC_CLASS_TRACKER_BY_CLASS.get(class_def) + if class_tracker_id is None: + class_tracker_id = uuid.uuid4().hex + _DYNAMIC_CLASS_TRACKER_BY_CLASS[class_def] = class_tracker_id + _DYNAMIC_CLASS_TRACKER_BY_ID[class_tracker_id] = class_def + return class_tracker_id + + +def _lookup_class_or_track(class_tracker_id, class_def): + if class_tracker_id is not None: + with _DYNAMIC_CLASS_TRACKER_LOCK: + class_def = _DYNAMIC_CLASS_TRACKER_BY_ID.setdefault( + class_tracker_id, class_def + ) + _DYNAMIC_CLASS_TRACKER_BY_CLASS[class_def] = class_tracker_id + return class_def + + +def register_pickle_by_value(module): + """Register a module to make it functions and classes picklable by value. + + By default, functions and classes that are attributes of an importable + module are to be pickled by reference, that is relying on re-importing + the attribute from the module at load time. + + If `register_pickle_by_value(module)` is called, all its functions and + classes are subsequently to be pickled by value, meaning that they can + be loaded in Python processes where the module is not importable. + + This is especially useful when developing a module in a distributed + execution environment: restarting the client Python process with the new + source code is enough: there is no need to re-install the new version + of the module on all the worker nodes nor to restart the workers. + + Note: this feature is considered experimental. See the cloudpickle + README.md file for more details and limitations. + """ + if not isinstance(module, types.ModuleType): + raise ValueError(f"Input should be a module object, got {str(module)} instead") + # In the future, cloudpickle may need a way to access any module registered + # for pickling by value in order to introspect relative imports inside + # functions pickled by value. (see + # https://github.com/cloudpipe/cloudpickle/pull/417#issuecomment-873684633). + # This access can be ensured by checking that module is present in + # sys.modules at registering time and assuming that it will still be in + # there when accessed during pickling. Another alternative would be to + # store a weakref to the module. Even though cloudpickle does not implement + # this introspection yet, in order to avoid a possible breaking change + # later, we still enforce the presence of module inside sys.modules. + if module.__name__ not in sys.modules: + raise ValueError( + f"{module} was not imported correctly, have you used an " + "`import` statement to access it?" + ) + _PICKLE_BY_VALUE_MODULES.add(module.__name__) + + +def unregister_pickle_by_value(module): + """Unregister that the input module should be pickled by value.""" + if not isinstance(module, types.ModuleType): + raise ValueError(f"Input should be a module object, got {str(module)} instead") + if module.__name__ not in _PICKLE_BY_VALUE_MODULES: + raise ValueError(f"{module} is not registered for pickle by value") + else: + _PICKLE_BY_VALUE_MODULES.remove(module.__name__) + + +def list_registry_pickle_by_value(): + return _PICKLE_BY_VALUE_MODULES.copy() + + +def _is_registered_pickle_by_value(module): + module_name = module.__name__ + if module_name in _PICKLE_BY_VALUE_MODULES: + return True + while True: + parent_name = module_name.rsplit(".", 1)[0] + if parent_name == module_name: + break + if parent_name in _PICKLE_BY_VALUE_MODULES: + return True + module_name = parent_name + return False + + +def _whichmodule(obj, name): + """Find the module an object belongs to. + + This function differs from ``pickle.whichmodule`` in two ways: + - it does not mangle the cases where obj's module is __main__ and obj was + not found in any module. + - Errors arising during module introspection are ignored, as those errors + are considered unwanted side effects. + """ + module_name = getattr(obj, "__module__", None) + + if module_name is not None: + return module_name + # Protect the iteration by using a copy of sys.modules against dynamic + # modules that trigger imports of other modules upon calls to getattr or + # other threads importing at the same time. + for module_name, module in sys.modules.copy().items(): + # Some modules such as coverage can inject non-module objects inside + # sys.modules + if ( + module_name == "__main__" + or module is None + or not isinstance(module, types.ModuleType) + ): + continue + try: + if _getattribute(module, name)[0] is obj: + return module_name + except Exception: + pass + return None + + +def _should_pickle_by_reference(obj, name=None): + """Test whether an function or a class should be pickled by reference + + Pickling by reference means by that the object (typically a function or a + class) is an attribute of a module that is assumed to be importable in the + target Python environment. Loading will therefore rely on importing the + module and then calling `getattr` on it to access the function or class. + + Pickling by reference is the only option to pickle functions and classes + in the standard library. In cloudpickle the alternative option is to + pickle by value (for instance for interactively or locally defined + functions and classes or for attributes of modules that have been + explicitly registered to be pickled by value. + """ + if isinstance(obj, types.FunctionType) or issubclass(type(obj), type): + module_and_name = _lookup_module_and_qualname(obj, name=name) + if module_and_name is None: + return False + module, name = module_and_name + return not _is_registered_pickle_by_value(module) + + elif isinstance(obj, types.ModuleType): + # We assume that sys.modules is primarily used as a cache mechanism for + # the Python import machinery. Checking if a module has been added in + # is sys.modules therefore a cheap and simple heuristic to tell us + # whether we can assume that a given module could be imported by name + # in another Python process. + if _is_registered_pickle_by_value(obj): + return False + return obj.__name__ in sys.modules + else: + raise TypeError( + "cannot check importability of {} instances".format(type(obj).__name__) + ) + + +def _lookup_module_and_qualname(obj, name=None): + if name is None: + name = getattr(obj, "__qualname__", None) + if name is None: # pragma: no cover + # This used to be needed for Python 2.7 support but is probably not + # needed anymore. However we keep the __name__ introspection in case + # users of cloudpickle rely on this old behavior for unknown reasons. + name = getattr(obj, "__name__", None) + + module_name = _whichmodule(obj, name) + + if module_name is None: + # In this case, obj.__module__ is None AND obj was not found in any + # imported module. obj is thus treated as dynamic. + return None + + if module_name == "__main__": + return None + + # Note: if module_name is in sys.modules, the corresponding module is + # assumed importable at unpickling time. See #357 + module = sys.modules.get(module_name, None) + if module is None: + # The main reason why obj's module would not be imported is that this + # module has been dynamically created, using for example + # types.ModuleType. The other possibility is that module was removed + # from sys.modules after obj was created/imported. But this case is not + # supported, as the standard pickle does not support it either. + return None + + try: + obj2, parent = _getattribute(module, name) + except AttributeError: + # obj was not found inside the module it points to + return None + if obj2 is not obj: + return None + return module, name + + +def _extract_code_globals(co): + """Find all globals names read or written to by codeblock co.""" + out_names = _extract_code_globals_cache.get(co) + if out_names is None: + # We use a dict with None values instead of a set to get a + # deterministic order and avoid introducing non-deterministic pickle + # bytes as a results. + out_names = {name: None for name in _walk_global_ops(co)} + + # Declaring a function inside another one using the "def ..." syntax + # generates a constant code object corresponding to the one of the + # nested function's As the nested function may itself need global + # variables, we need to introspect its code, extract its globals, (look + # for code object in it's co_consts attribute..) and add the result to + # code_globals + if co.co_consts: + for const in co.co_consts: + if isinstance(const, types.CodeType): + out_names.update(_extract_code_globals(const)) + + _extract_code_globals_cache[co] = out_names + + return out_names + + +def _find_imported_submodules(code, top_level_dependencies): + """Find currently imported submodules used by a function. + + Submodules used by a function need to be detected and referenced for the + function to work correctly at depickling time. Because submodules can be + referenced as attribute of their parent package (``package.submodule``), we + need a special introspection technique that does not rely on GLOBAL-related + opcodes to find references of them in a code object. + + Example: + ``` + import concurrent.futures + import cloudpickle + def func(): + x = concurrent.futures.ThreadPoolExecutor + if __name__ == '__main__': + cloudpickle.dumps(func) + ``` + The globals extracted by cloudpickle in the function's state include the + concurrent package, but not its submodule (here, concurrent.futures), which + is the module used by func. Find_imported_submodules will detect the usage + of concurrent.futures. Saving this module alongside with func will ensure + that calling func once depickled does not fail due to concurrent.futures + not being imported + """ + + subimports = [] + # check if any known dependency is an imported package + for x in top_level_dependencies: + if ( + isinstance(x, types.ModuleType) + and hasattr(x, "__package__") + and x.__package__ + ): + # check if the package has any currently loaded sub-imports + prefix = x.__name__ + "." + # A concurrent thread could mutate sys.modules, + # make sure we iterate over a copy to avoid exceptions + for name in list(sys.modules): + # Older versions of pytest will add a "None" module to + # sys.modules. + if name is not None and name.startswith(prefix): + # check whether the function can address the sub-module + tokens = set(name[len(prefix) :].split(".")) + if not tokens - set(code.co_names): + subimports.append(sys.modules[name]) + return subimports + + +# relevant opcodes +STORE_GLOBAL = opcode.opmap["STORE_GLOBAL"] +DELETE_GLOBAL = opcode.opmap["DELETE_GLOBAL"] +LOAD_GLOBAL = opcode.opmap["LOAD_GLOBAL"] +GLOBAL_OPS = (STORE_GLOBAL, DELETE_GLOBAL, LOAD_GLOBAL) +HAVE_ARGUMENT = dis.HAVE_ARGUMENT +EXTENDED_ARG = dis.EXTENDED_ARG + + +_BUILTIN_TYPE_NAMES = {} +for k, v in types.__dict__.items(): + if type(v) is type: + _BUILTIN_TYPE_NAMES[v] = k + + +def _builtin_type(name): + if name == "ClassType": # pragma: no cover + # Backward compat to load pickle files generated with cloudpickle + # < 1.3 even if loading pickle files from older versions is not + # officially supported. + return type + return getattr(types, name) + + +def _walk_global_ops(code): + """Yield referenced name for global-referencing instructions in code.""" + for instr in dis.get_instructions(code): + op = instr.opcode + if op in GLOBAL_OPS: + yield instr.argval + + +def _extract_class_dict(cls): + """Retrieve a copy of the dict of a class without the inherited method.""" + clsdict = dict(cls.__dict__) # copy dict proxy to a dict + if len(cls.__bases__) == 1: + inherited_dict = cls.__bases__[0].__dict__ + else: + inherited_dict = {} + for base in reversed(cls.__bases__): + inherited_dict.update(base.__dict__) + to_remove = [] + for name, value in clsdict.items(): + try: + base_value = inherited_dict[name] + if value is base_value: + to_remove.append(name) + except KeyError: + pass + for name in to_remove: + clsdict.pop(name) + return clsdict + + +def is_tornado_coroutine(func): + """Return whether `func` is a Tornado coroutine function. + + Running coroutines are not supported. + """ + warnings.warn( + "is_tornado_coroutine is deprecated in cloudpickle 3.0 and will be " + "removed in cloudpickle 4.0. Use tornado.gen.is_coroutine_function " + "directly instead.", + category=DeprecationWarning, + ) + if "tornado.gen" not in sys.modules: + return False + gen = sys.modules["tornado.gen"] + if not hasattr(gen, "is_coroutine_function"): + # Tornado version is too old + return False + return gen.is_coroutine_function(func) + + +def subimport(name): + # We cannot do simply: `return __import__(name)`: Indeed, if ``name`` is + # the name of a submodule, __import__ will return the top-level root module + # of this submodule. For instance, __import__('os.path') returns the `os` + # module. + __import__(name) + return sys.modules[name] + + +def dynamic_subimport(name, vars): + mod = types.ModuleType(name) + mod.__dict__.update(vars) + mod.__dict__["__builtins__"] = builtins.__dict__ + return mod + + +def _get_cell_contents(cell): + try: + return cell.cell_contents + except ValueError: + # Handle empty cells explicitly with a sentinel value. + return _empty_cell_value + + +def instance(cls): + """Create a new instance of a class. + + Parameters + ---------- + cls : type + The class to create an instance of. + + Returns + ------- + instance : cls + A new instance of ``cls``. + """ + return cls() + + +@instance +class _empty_cell_value: + """Sentinel for empty closures.""" + + @classmethod + def __reduce__(cls): + return cls.__name__ + + +def _make_function(code, globals, name, argdefs, closure): + # Setting __builtins__ in globals is needed for nogil CPython. + globals["__builtins__"] = __builtins__ + return types.FunctionType(code, globals, name, argdefs, closure) + + +def _make_empty_cell(): + if False: + # trick the compiler into creating an empty cell in our lambda + cell = None + raise AssertionError("this route should not be executed") + + return (lambda: cell).__closure__[0] + + +def _make_cell(value=_empty_cell_value): + cell = _make_empty_cell() + if value is not _empty_cell_value: + cell.cell_contents = value + return cell + + +def _make_skeleton_class( + type_constructor, name, bases, type_kwargs, class_tracker_id, extra +): + """Build dynamic class with an empty __dict__ to be filled once memoized + + If class_tracker_id is not None, try to lookup an existing class definition + matching that id. If none is found, track a newly reconstructed class + definition under that id so that other instances stemming from the same + class id will also reuse this class definition. + + The "extra" variable is meant to be a dict (or None) that can be used for + forward compatibility shall the need arise. + """ + skeleton_class = types.new_class( + name, bases, {"metaclass": type_constructor}, lambda ns: ns.update(type_kwargs) + ) + return _lookup_class_or_track(class_tracker_id, skeleton_class) + + +def _make_skeleton_enum( + bases, name, qualname, members, module, class_tracker_id, extra +): + """Build dynamic enum with an empty __dict__ to be filled once memoized + + The creation of the enum class is inspired by the code of + EnumMeta._create_. + + If class_tracker_id is not None, try to lookup an existing enum definition + matching that id. If none is found, track a newly reconstructed enum + definition under that id so that other instances stemming from the same + class id will also reuse this enum definition. + + The "extra" variable is meant to be a dict (or None) that can be used for + forward compatibility shall the need arise. + """ + # enums always inherit from their base Enum class at the last position in + # the list of base classes: + enum_base = bases[-1] + metacls = enum_base.__class__ + classdict = metacls.__prepare__(name, bases) + + for member_name, member_value in members.items(): + classdict[member_name] = member_value + enum_class = metacls.__new__(metacls, name, bases, classdict) + enum_class.__module__ = module + enum_class.__qualname__ = qualname + + return _lookup_class_or_track(class_tracker_id, enum_class) + + +def _make_typevar(name, bound, constraints, covariant, contravariant, class_tracker_id): + tv = typing.TypeVar( + name, + *constraints, + bound=bound, + covariant=covariant, + contravariant=contravariant, + ) + return _lookup_class_or_track(class_tracker_id, tv) + + +def _decompose_typevar(obj): + return ( + obj.__name__, + obj.__bound__, + obj.__constraints__, + obj.__covariant__, + obj.__contravariant__, + _get_or_create_tracker_id(obj), + ) + + +def _typevar_reduce(obj): + # TypeVar instances require the module information hence why we + # are not using the _should_pickle_by_reference directly + module_and_name = _lookup_module_and_qualname(obj, name=obj.__name__) + + if module_and_name is None: + return (_make_typevar, _decompose_typevar(obj)) + elif _is_registered_pickle_by_value(module_and_name[0]): + return (_make_typevar, _decompose_typevar(obj)) + + return (getattr, module_and_name) + + +def _get_bases(typ): + if "__orig_bases__" in getattr(typ, "__dict__", {}): + # For generic types (see PEP 560) + # Note that simply checking `hasattr(typ, '__orig_bases__')` is not + # correct. Subclasses of a fully-parameterized generic class does not + # have `__orig_bases__` defined, but `hasattr(typ, '__orig_bases__')` + # will return True because it's defined in the base class. + bases_attr = "__orig_bases__" + else: + # For regular class objects + bases_attr = "__bases__" + return getattr(typ, bases_attr) + + +def _make_dict_keys(obj, is_ordered=False): + if is_ordered: + return OrderedDict.fromkeys(obj).keys() + else: + return dict.fromkeys(obj).keys() + + +def _make_dict_values(obj, is_ordered=False): + if is_ordered: + return OrderedDict((i, _) for i, _ in enumerate(obj)).values() + else: + return {i: _ for i, _ in enumerate(obj)}.values() + + +def _make_dict_items(obj, is_ordered=False): + if is_ordered: + return OrderedDict(obj).items() + else: + return obj.items() + + +# COLLECTION OF OBJECTS __getnewargs__-LIKE METHODS +# ------------------------------------------------- + + +def _class_getnewargs(obj): + type_kwargs = {} + if "__module__" in obj.__dict__: + type_kwargs["__module__"] = obj.__module__ + + __dict__ = obj.__dict__.get("__dict__", None) + if isinstance(__dict__, property): + type_kwargs["__dict__"] = __dict__ + + return ( + type(obj), + obj.__name__, + _get_bases(obj), + type_kwargs, + _get_or_create_tracker_id(obj), + None, + ) + + +def _enum_getnewargs(obj): + members = {e.name: e.value for e in obj} + return ( + obj.__bases__, + obj.__name__, + obj.__qualname__, + members, + obj.__module__, + _get_or_create_tracker_id(obj), + None, + ) + + +# COLLECTION OF OBJECTS RECONSTRUCTORS +# ------------------------------------ +def _file_reconstructor(retval): + return retval + + +# COLLECTION OF OBJECTS STATE GETTERS +# ----------------------------------- + + +def _function_getstate(func): + # - Put func's dynamic attributes (stored in func.__dict__) in state. These + # attributes will be restored at unpickling time using + # f.__dict__.update(state) + # - Put func's members into slotstate. Such attributes will be restored at + # unpickling time by iterating over slotstate and calling setattr(func, + # slotname, slotvalue) + slotstate = { + "__name__": func.__name__, + "__qualname__": func.__qualname__, + "__annotations__": func.__annotations__, + "__kwdefaults__": func.__kwdefaults__, + "__defaults__": func.__defaults__, + "__module__": func.__module__, + "__doc__": func.__doc__, + "__closure__": func.__closure__, + } + + f_globals_ref = _extract_code_globals(func.__code__) + f_globals = {k: func.__globals__[k] for k in f_globals_ref if k in func.__globals__} + + if func.__closure__ is not None: + closure_values = list(map(_get_cell_contents, func.__closure__)) + else: + closure_values = () + + # Extract currently-imported submodules used by func. Storing these modules + # in a smoke _cloudpickle_subimports attribute of the object's state will + # trigger the side effect of importing these modules at unpickling time + # (which is necessary for func to work correctly once depickled) + slotstate["_cloudpickle_submodules"] = _find_imported_submodules( + func.__code__, itertools.chain(f_globals.values(), closure_values) + ) + slotstate["__globals__"] = f_globals + + state = func.__dict__ + return state, slotstate + + +def _class_getstate(obj): + clsdict = _extract_class_dict(obj) + clsdict.pop("__weakref__", None) + + if issubclass(type(obj), abc.ABCMeta): + # If obj is an instance of an ABCMeta subclass, don't pickle the + # cache/negative caches populated during isinstance/issubclass + # checks, but pickle the list of registered subclasses of obj. + clsdict.pop("_abc_cache", None) + clsdict.pop("_abc_negative_cache", None) + clsdict.pop("_abc_negative_cache_version", None) + registry = clsdict.pop("_abc_registry", None) + if registry is None: + # The abc caches and registered subclasses of a + # class are bundled into the single _abc_impl attribute + clsdict.pop("_abc_impl", None) + (registry, _, _, _) = abc._get_dump(obj) + + clsdict["_abc_impl"] = [subclass_weakref() for subclass_weakref in registry] + else: + # In the above if clause, registry is a set of weakrefs -- in + # this case, registry is a WeakSet + clsdict["_abc_impl"] = [type_ for type_ in registry] + + if "__slots__" in clsdict: + # pickle string length optimization: member descriptors of obj are + # created automatically from obj's __slots__ attribute, no need to + # save them in obj's state + if isinstance(obj.__slots__, str): + clsdict.pop(obj.__slots__) + else: + for k in obj.__slots__: + clsdict.pop(k, None) + + clsdict.pop("__dict__", None) # unpicklable property object + + return (clsdict, {}) + + +def _enum_getstate(obj): + clsdict, slotstate = _class_getstate(obj) + + members = {e.name: e.value for e in obj} + # Cleanup the clsdict that will be passed to _make_skeleton_enum: + # Those attributes are already handled by the metaclass. + for attrname in [ + "_generate_next_value_", + "_member_names_", + "_member_map_", + "_member_type_", + "_value2member_map_", + ]: + clsdict.pop(attrname, None) + for member in members: + clsdict.pop(member) + # Special handling of Enum subclasses + return clsdict, slotstate + + +# COLLECTIONS OF OBJECTS REDUCERS +# ------------------------------- +# A reducer is a function taking a single argument (obj), and that returns a +# tuple with all the necessary data to re-construct obj. Apart from a few +# exceptions (list, dict, bytes, int, etc.), a reducer is necessary to +# correctly pickle an object. +# While many built-in objects (Exceptions objects, instances of the "object" +# class, etc), are shipped with their own built-in reducer (invoked using +# obj.__reduce__), some do not. The following methods were created to "fill +# these holes". + + +def _code_reduce(obj): + """code object reducer.""" + # If you are not sure about the order of arguments, take a look at help + # of the specific type from types, for example: + # >>> from types import CodeType + # >>> help(CodeType) + if hasattr(obj, "co_exceptiontable"): + # Python 3.11 and later: there are some new attributes + # related to the enhanced exceptions. + args = ( + obj.co_argcount, + obj.co_posonlyargcount, + obj.co_kwonlyargcount, + obj.co_nlocals, + obj.co_stacksize, + obj.co_flags, + obj.co_code, + obj.co_consts, + obj.co_names, + obj.co_varnames, + obj.co_filename, + obj.co_name, + obj.co_qualname, + obj.co_firstlineno, + obj.co_linetable, + obj.co_exceptiontable, + obj.co_freevars, + obj.co_cellvars, + ) + elif hasattr(obj, "co_linetable"): + # Python 3.10 and later: obj.co_lnotab is deprecated and constructor + # expects obj.co_linetable instead. + args = ( + obj.co_argcount, + obj.co_posonlyargcount, + obj.co_kwonlyargcount, + obj.co_nlocals, + obj.co_stacksize, + obj.co_flags, + obj.co_code, + obj.co_consts, + obj.co_names, + obj.co_varnames, + obj.co_filename, + obj.co_name, + obj.co_firstlineno, + obj.co_linetable, + obj.co_freevars, + obj.co_cellvars, + ) + elif hasattr(obj, "co_nmeta"): # pragma: no cover + # "nogil" Python: modified attributes from 3.9 + args = ( + obj.co_argcount, + obj.co_posonlyargcount, + obj.co_kwonlyargcount, + obj.co_nlocals, + obj.co_framesize, + obj.co_ndefaultargs, + obj.co_nmeta, + obj.co_flags, + obj.co_code, + obj.co_consts, + obj.co_varnames, + obj.co_filename, + obj.co_name, + obj.co_firstlineno, + obj.co_lnotab, + obj.co_exc_handlers, + obj.co_jump_table, + obj.co_freevars, + obj.co_cellvars, + obj.co_free2reg, + obj.co_cell2reg, + ) + else: + # Backward compat for 3.8 and 3.9 + args = ( + obj.co_argcount, + obj.co_posonlyargcount, + obj.co_kwonlyargcount, + obj.co_nlocals, + obj.co_stacksize, + obj.co_flags, + obj.co_code, + obj.co_consts, + obj.co_names, + obj.co_varnames, + obj.co_filename, + obj.co_name, + obj.co_firstlineno, + obj.co_lnotab, + obj.co_freevars, + obj.co_cellvars, + ) + return types.CodeType, args + + +def _cell_reduce(obj): + """Cell (containing values of a function's free variables) reducer.""" + try: + obj.cell_contents + except ValueError: # cell is empty + return _make_empty_cell, () + else: + return _make_cell, (obj.cell_contents,) + + +def _classmethod_reduce(obj): + orig_func = obj.__func__ + return type(obj), (orig_func,) + + +def _file_reduce(obj): + """Save a file.""" + import io + + if not hasattr(obj, "name") or not hasattr(obj, "mode"): + raise pickle.PicklingError( + "Cannot pickle files that do not map to an actual file" + ) + if obj is sys.stdout: + return getattr, (sys, "stdout") + if obj is sys.stderr: + return getattr, (sys, "stderr") + if obj is sys.stdin: + raise pickle.PicklingError("Cannot pickle standard input") + if obj.closed: + raise pickle.PicklingError("Cannot pickle closed files") + if hasattr(obj, "isatty") and obj.isatty(): + raise pickle.PicklingError("Cannot pickle files that map to tty objects") + if "r" not in obj.mode and "+" not in obj.mode: + raise pickle.PicklingError( + "Cannot pickle files that are not opened for reading: %s" % obj.mode + ) + + name = obj.name + + retval = io.StringIO() + + try: + # Read the whole file + curloc = obj.tell() + obj.seek(0) + contents = obj.read() + obj.seek(curloc) + except OSError as e: + raise pickle.PicklingError( + "Cannot pickle file %s as it cannot be read" % name + ) from e + retval.write(contents) + retval.seek(curloc) + + retval.name = name + return _file_reconstructor, (retval,) + + +def _getset_descriptor_reduce(obj): + return getattr, (obj.__objclass__, obj.__name__) + + +def _mappingproxy_reduce(obj): + return types.MappingProxyType, (dict(obj),) + + +def _memoryview_reduce(obj): + return bytes, (obj.tobytes(),) + + +def _module_reduce(obj): + if _should_pickle_by_reference(obj): + return subimport, (obj.__name__,) + else: + # Some external libraries can populate the "__builtins__" entry of a + # module's `__dict__` with unpicklable objects (see #316). For that + # reason, we do not attempt to pickle the "__builtins__" entry, and + # restore a default value for it at unpickling time. + state = obj.__dict__.copy() + state.pop("__builtins__", None) + return dynamic_subimport, (obj.__name__, state) + + +def _method_reduce(obj): + return (types.MethodType, (obj.__func__, obj.__self__)) + + +def _logger_reduce(obj): + return logging.getLogger, (obj.name,) + + +def _root_logger_reduce(obj): + return logging.getLogger, () + + +def _property_reduce(obj): + return property, (obj.fget, obj.fset, obj.fdel, obj.__doc__) + + +def _weakset_reduce(obj): + return weakref.WeakSet, (list(obj),) + + +def _dynamic_class_reduce(obj): + """Save a class that can't be referenced as a module attribute. + + This method is used to serialize classes that are defined inside + functions, or that otherwise can't be serialized as attribute lookups + from importable modules. + """ + if Enum is not None and issubclass(obj, Enum): + return ( + _make_skeleton_enum, + _enum_getnewargs(obj), + _enum_getstate(obj), + None, + None, + _class_setstate, + ) + else: + return ( + _make_skeleton_class, + _class_getnewargs(obj), + _class_getstate(obj), + None, + None, + _class_setstate, + ) + + +def _class_reduce(obj): + """Select the reducer depending on the dynamic nature of the class obj.""" + if obj is type(None): # noqa + return type, (None,) + elif obj is type(Ellipsis): + return type, (Ellipsis,) + elif obj is type(NotImplemented): + return type, (NotImplemented,) + elif obj in _BUILTIN_TYPE_NAMES: + return _builtin_type, (_BUILTIN_TYPE_NAMES[obj],) + elif not _should_pickle_by_reference(obj): + return _dynamic_class_reduce(obj) + return NotImplemented + + +def _dict_keys_reduce(obj): + # Safer not to ship the full dict as sending the rest might + # be unintended and could potentially cause leaking of + # sensitive information + return _make_dict_keys, (list(obj),) + + +def _dict_values_reduce(obj): + # Safer not to ship the full dict as sending the rest might + # be unintended and could potentially cause leaking of + # sensitive information + return _make_dict_values, (list(obj),) + + +def _dict_items_reduce(obj): + return _make_dict_items, (dict(obj),) + + +def _odict_keys_reduce(obj): + # Safer not to ship the full dict as sending the rest might + # be unintended and could potentially cause leaking of + # sensitive information + return _make_dict_keys, (list(obj), True) + + +def _odict_values_reduce(obj): + # Safer not to ship the full dict as sending the rest might + # be unintended and could potentially cause leaking of + # sensitive information + return _make_dict_values, (list(obj), True) + + +def _odict_items_reduce(obj): + return _make_dict_items, (dict(obj), True) + + +def _dataclass_field_base_reduce(obj): + return _get_dataclass_field_type_sentinel, (obj.name,) + + +# COLLECTIONS OF OBJECTS STATE SETTERS +# ------------------------------------ +# state setters are called at unpickling time, once the object is created and +# it has to be updated to how it was at unpickling time. + + +def _function_setstate(obj, state): + """Update the state of a dynamic function. + + As __closure__ and __globals__ are readonly attributes of a function, we + cannot rely on the native setstate routine of pickle.load_build, that calls + setattr on items of the slotstate. Instead, we have to modify them inplace. + """ + state, slotstate = state + obj.__dict__.update(state) + + obj_globals = slotstate.pop("__globals__") + obj_closure = slotstate.pop("__closure__") + # _cloudpickle_subimports is a set of submodules that must be loaded for + # the pickled function to work correctly at unpickling time. Now that these + # submodules are depickled (hence imported), they can be removed from the + # object's state (the object state only served as a reference holder to + # these submodules) + slotstate.pop("_cloudpickle_submodules") + + obj.__globals__.update(obj_globals) + obj.__globals__["__builtins__"] = __builtins__ + + if obj_closure is not None: + for i, cell in enumerate(obj_closure): + try: + value = cell.cell_contents + except ValueError: # cell is empty + continue + obj.__closure__[i].cell_contents = value + + for k, v in slotstate.items(): + setattr(obj, k, v) + + +def _class_setstate(obj, state): + state, slotstate = state + registry = None + for attrname, attr in state.items(): + if attrname == "_abc_impl": + registry = attr + else: + setattr(obj, attrname, attr) + if registry is not None: + for subclass in registry: + obj.register(subclass) + + return obj + + +# COLLECTION OF DATACLASS UTILITIES +# --------------------------------- +# There are some internal sentinel values whose identity must be preserved when +# unpickling dataclass fields. Each sentinel value has a unique name that we can +# use to retrieve its identity at unpickling time. + + +_DATACLASSE_FIELD_TYPE_SENTINELS = { + dataclasses._FIELD.name: dataclasses._FIELD, + dataclasses._FIELD_CLASSVAR.name: dataclasses._FIELD_CLASSVAR, + dataclasses._FIELD_INITVAR.name: dataclasses._FIELD_INITVAR, +} + + +def _get_dataclass_field_type_sentinel(name): + return _DATACLASSE_FIELD_TYPE_SENTINELS[name] + + +class Pickler(pickle.Pickler): + # set of reducers defined and used by cloudpickle (private) + _dispatch_table = {} + _dispatch_table[classmethod] = _classmethod_reduce + _dispatch_table[io.TextIOWrapper] = _file_reduce + _dispatch_table[logging.Logger] = _logger_reduce + _dispatch_table[logging.RootLogger] = _root_logger_reduce + _dispatch_table[memoryview] = _memoryview_reduce + _dispatch_table[property] = _property_reduce + _dispatch_table[staticmethod] = _classmethod_reduce + _dispatch_table[CellType] = _cell_reduce + _dispatch_table[types.CodeType] = _code_reduce + _dispatch_table[types.GetSetDescriptorType] = _getset_descriptor_reduce + _dispatch_table[types.ModuleType] = _module_reduce + _dispatch_table[types.MethodType] = _method_reduce + _dispatch_table[types.MappingProxyType] = _mappingproxy_reduce + _dispatch_table[weakref.WeakSet] = _weakset_reduce + _dispatch_table[typing.TypeVar] = _typevar_reduce + _dispatch_table[_collections_abc.dict_keys] = _dict_keys_reduce + _dispatch_table[_collections_abc.dict_values] = _dict_values_reduce + _dispatch_table[_collections_abc.dict_items] = _dict_items_reduce + _dispatch_table[type(OrderedDict().keys())] = _odict_keys_reduce + _dispatch_table[type(OrderedDict().values())] = _odict_values_reduce + _dispatch_table[type(OrderedDict().items())] = _odict_items_reduce + _dispatch_table[abc.abstractmethod] = _classmethod_reduce + _dispatch_table[abc.abstractclassmethod] = _classmethod_reduce + _dispatch_table[abc.abstractstaticmethod] = _classmethod_reduce + _dispatch_table[abc.abstractproperty] = _property_reduce + _dispatch_table[dataclasses._FIELD_BASE] = _dataclass_field_base_reduce + + dispatch_table = ChainMap(_dispatch_table, copyreg.dispatch_table) + + # function reducers are defined as instance methods of cloudpickle.Pickler + # objects, as they rely on a cloudpickle.Pickler attribute (globals_ref) + def _dynamic_function_reduce(self, func): + """Reduce a function that is not pickleable via attribute lookup.""" + newargs = self._function_getnewargs(func) + state = _function_getstate(func) + return (_make_function, newargs, state, None, None, _function_setstate) + + def _function_reduce(self, obj): + """Reducer for function objects. + + If obj is a top-level attribute of a file-backed module, this reducer + returns NotImplemented, making the cloudpickle.Pickler fall back to + traditional pickle.Pickler routines to save obj. Otherwise, it reduces + obj using a custom cloudpickle reducer designed specifically to handle + dynamic functions. + """ + if _should_pickle_by_reference(obj): + return NotImplemented + else: + return self._dynamic_function_reduce(obj) + + def _function_getnewargs(self, func): + code = func.__code__ + + # base_globals represents the future global namespace of func at + # unpickling time. Looking it up and storing it in + # cloudpickle.Pickler.globals_ref allow functions sharing the same + # globals at pickling time to also share them once unpickled, at one + # condition: since globals_ref is an attribute of a cloudpickle.Pickler + # instance, and that a new cloudpickle.Pickler is created each time + # cloudpickle.dump or cloudpickle.dumps is called, functions also need + # to be saved within the same invocation of + # cloudpickle.dump/cloudpickle.dumps (for example: + # cloudpickle.dumps([f1, f2])). There is no such limitation when using + # cloudpickle.Pickler.dump, as long as the multiple invocations are + # bound to the same cloudpickle.Pickler instance. + base_globals = self.globals_ref.setdefault(id(func.__globals__), {}) + + if base_globals == {}: + # Add module attributes used to resolve relative imports + # instructions inside func. + for k in ["__package__", "__name__", "__path__", "__file__"]: + if k in func.__globals__: + base_globals[k] = func.__globals__[k] + + # Do not bind the free variables before the function is created to + # avoid infinite recursion. + if func.__closure__ is None: + closure = None + else: + closure = tuple(_make_empty_cell() for _ in range(len(code.co_freevars))) + + return code, base_globals, None, None, closure + + def dump(self, obj): + try: + return super().dump(obj) + except RuntimeError as e: + if len(e.args) > 0 and "recursion" in e.args[0]: + msg = "Could not pickle object as excessively deep recursion required." + raise pickle.PicklingError(msg) from e + else: + raise + + def __init__(self, file, protocol=None, buffer_callback=None): + if protocol is None: + protocol = DEFAULT_PROTOCOL + super().__init__(file, protocol=protocol, buffer_callback=buffer_callback) + # map functions __globals__ attribute ids, to ensure that functions + # sharing the same global namespace at pickling time also share + # their global namespace at unpickling time. + self.globals_ref = {} + self.proto = int(protocol) + + if not PYPY: + # pickle.Pickler is the C implementation of the CPython pickler and + # therefore we rely on reduce_override method to customize the pickler + # behavior. + + # `cloudpickle.Pickler.dispatch` is only left for backward + # compatibility - note that when using protocol 5, + # `cloudpickle.Pickler.dispatch` is not an extension of + # `pickle._Pickler.dispatch` dictionary, because `cloudpickle.Pickler` + # subclasses the C-implemented `pickle.Pickler`, which does not expose + # a `dispatch` attribute. Earlier versions of `cloudpickle.Pickler` + # used `cloudpickle.Pickler.dispatch` as a class-level attribute + # storing all reducers implemented by cloudpickle, but the attribute + # name was not a great choice given because it would collide with a + # similarly named attribute in the pure-Python `pickle._Pickler` + # implementation in the standard library. + dispatch = dispatch_table + + # Implementation of the reducer_override callback, in order to + # efficiently serialize dynamic functions and classes by subclassing + # the C-implemented `pickle.Pickler`. + # TODO: decorrelate reducer_override (which is tied to CPython's + # implementation - would it make sense to backport it to pypy? - and + # pickle's protocol 5 which is implementation agnostic. Currently, the + # availability of both notions coincide on CPython's pickle, but it may + # not be the case anymore when pypy implements protocol 5. + + def reducer_override(self, obj): + """Type-agnostic reducing callback for function and classes. + + For performance reasons, subclasses of the C `pickle.Pickler` class + cannot register custom reducers for functions and classes in the + dispatch_table attribute. Reducers for such types must instead + implemented via the special `reducer_override` method. + + Note that this method will be called for any object except a few + builtin-types (int, lists, dicts etc.), which differs from reducers + in the Pickler's dispatch_table, each of them being invoked for + objects of a specific type only. + + This property comes in handy for classes: although most classes are + instances of the ``type`` metaclass, some of them can be instances + of other custom metaclasses (such as enum.EnumMeta for example). In + particular, the metaclass will likely not be known in advance, and + thus cannot be special-cased using an entry in the dispatch_table. + reducer_override, among other things, allows us to register a + reducer that will be called for any class, independently of its + type. + + Notes: + + * reducer_override has the priority over dispatch_table-registered + reducers. + * reducer_override can be used to fix other limitations of + cloudpickle for other types that suffered from type-specific + reducers, such as Exceptions. See + https://github.com/cloudpipe/cloudpickle/issues/248 + """ + t = type(obj) + try: + is_anyclass = issubclass(t, type) + except TypeError: # t is not a class (old Boost; see SF #502085) + is_anyclass = False + + if is_anyclass: + return _class_reduce(obj) + elif isinstance(obj, types.FunctionType): + return self._function_reduce(obj) + else: + # fallback to save_global, including the Pickler's + # dispatch_table + return NotImplemented + + else: + # When reducer_override is not available, hack the pure-Python + # Pickler's types.FunctionType and type savers. Note: the type saver + # must override Pickler.save_global, because pickle.py contains a + # hard-coded call to save_global when pickling meta-classes. + dispatch = pickle.Pickler.dispatch.copy() + + def _save_reduce_pickle5( + self, + func, + args, + state=None, + listitems=None, + dictitems=None, + state_setter=None, + obj=None, + ): + save = self.save + write = self.write + self.save_reduce( + func, + args, + state=None, + listitems=listitems, + dictitems=dictitems, + obj=obj, + ) + # backport of the Python 3.8 state_setter pickle operations + save(state_setter) + save(obj) # simple BINGET opcode as obj is already memoized. + save(state) + write(pickle.TUPLE2) + # Trigger a state_setter(obj, state) function call. + write(pickle.REDUCE) + # The purpose of state_setter is to carry-out an + # inplace modification of obj. We do not care about what the + # method might return, so its output is eventually removed from + # the stack. + write(pickle.POP) + + def save_global(self, obj, name=None, pack=struct.pack): + """Main dispatch method. + + The name of this method is somewhat misleading: all types get + dispatched here. + """ + if obj is type(None): # noqa + return self.save_reduce(type, (None,), obj=obj) + elif obj is type(Ellipsis): + return self.save_reduce(type, (Ellipsis,), obj=obj) + elif obj is type(NotImplemented): + return self.save_reduce(type, (NotImplemented,), obj=obj) + elif obj in _BUILTIN_TYPE_NAMES: + return self.save_reduce( + _builtin_type, (_BUILTIN_TYPE_NAMES[obj],), obj=obj + ) + + if name is not None: + super().save_global(obj, name=name) + elif not _should_pickle_by_reference(obj, name=name): + self._save_reduce_pickle5(*_dynamic_class_reduce(obj), obj=obj) + else: + super().save_global(obj, name=name) + + dispatch[type] = save_global + + def save_function(self, obj, name=None): + """Registered with the dispatch to handle all function types. + + Determines what kind of function obj is (e.g. lambda, defined at + interactive prompt, etc) and handles the pickling appropriately. + """ + if _should_pickle_by_reference(obj, name=name): + return super().save_global(obj, name=name) + elif PYPY and isinstance(obj.__code__, builtin_code_type): + return self.save_pypy_builtin_func(obj) + else: + return self._save_reduce_pickle5( + *self._dynamic_function_reduce(obj), obj=obj + ) + + def save_pypy_builtin_func(self, obj): + """Save pypy equivalent of builtin functions. + + PyPy does not have the concept of builtin-functions. Instead, + builtin-functions are simple function instances, but with a + builtin-code attribute. + Most of the time, builtin functions should be pickled by attribute. + But PyPy has flaky support for __qualname__, so some builtin + functions such as float.__new__ will be classified as dynamic. For + this reason only, we created this special routine. Because + builtin-functions are not expected to have closure or globals, + there is no additional hack (compared the one already implemented + in pickle) to protect ourselves from reference cycles. A simple + (reconstructor, newargs, obj.__dict__) tuple is save_reduced. Note + also that PyPy improved their support for __qualname__ in v3.6, so + this routing should be removed when cloudpickle supports only PyPy + 3.6 and later. + """ + rv = ( + types.FunctionType, + (obj.__code__, {}, obj.__name__, obj.__defaults__, obj.__closure__), + obj.__dict__, + ) + self.save_reduce(*rv, obj=obj) + + dispatch[types.FunctionType] = save_function + + +# Shorthands similar to pickle.dump/pickle.dumps + + +def dump(obj, file, protocol=None, buffer_callback=None): + """Serialize obj as bytes streamed into file + + protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to + pickle.HIGHEST_PROTOCOL. This setting favors maximum communication + speed between processes running the same Python version. + + Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure + compatibility with older versions of Python (although this is not always + guaranteed to work because cloudpickle relies on some internal + implementation details that can change from one Python version to the + next). + """ + Pickler(file, protocol=protocol, buffer_callback=buffer_callback).dump(obj) + + +def dumps(obj, protocol=None, buffer_callback=None): + """Serialize obj as a string of bytes allocated in memory + + protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to + pickle.HIGHEST_PROTOCOL. This setting favors maximum communication + speed between processes running the same Python version. + + Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure + compatibility with older versions of Python (although this is not always + guaranteed to work because cloudpickle relies on some internal + implementation details that can change from one Python version to the + next). + """ + with io.BytesIO() as file: + cp = Pickler(file, protocol=protocol, buffer_callback=buffer_callback) + cp.dump(obj) + return file.getvalue() + + +# Include pickles unloading functions in this namespace for convenience. +load, loads = pickle.load, pickle.loads + +# Backward compat alias. +CloudPickler = Pickler diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle_fast.py b/env-llmeval/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle_fast.py new file mode 100644 index 0000000000000000000000000000000000000000..52d6732e44ebcc0053b24969943f7c3b742268bb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle_fast.py @@ -0,0 +1,13 @@ +"""Compatibility module. + +It can be necessary to load files generated by previous versions of cloudpickle +that rely on symbols being defined under the `cloudpickle.cloudpickle_fast` +namespace. + +See: tests/test_backward_compat.py +""" +from . import cloudpickle + + +def __getattr__(name): + return getattr(cloudpickle, name) diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e312207ed81b87ca26d3c33fe1747e1aa2fc430 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/_base.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fffbf70d89481ddbab1f40e925adfc51877aaf38 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/_base.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/initializers.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/initializers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aafd7f5a8b681c6d0b7e97d6d06b566ad8f3420c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/initializers.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/process_executor.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/process_executor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab4e03b81e7c6cdaaffa930ba46085d80d45f1e2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/process_executor.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__init__.py b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d339aa644599cf5728394200abdfa19a1256aa02 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__init__.py @@ -0,0 +1,14 @@ +import os +from multiprocessing import synchronize + +from .context import get_context + + +def _make_name(): + return f"/loky-{os.getpid()}-{next(synchronize.SemLock._rand)}" + + +# monkey patch the name creation for multiprocessing +synchronize.SemLock._make_name = staticmethod(_make_name) + +__all__ = ["get_context"] diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa394397a40c0f913a924c8d3a4c0e9f39a17b46 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_posix_reduction.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_posix_reduction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..603653d29c7170f09c0d07903e115fd77a23aa56 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_posix_reduction.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_posix.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_posix.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71bde06f72250352ea1096ba603fb7284283d06b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_posix.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/process.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/process.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4ab4682b6f1db6cf4331e0a3df79f080c6b6f31 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/process.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/_posix_reduction.py b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/_posix_reduction.py new file mode 100644 index 0000000000000000000000000000000000000000..4b800ec07ff26af38174097a194e24413bf6fc2d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/_posix_reduction.py @@ -0,0 +1,67 @@ +############################################################################### +# Extra reducers for Unix based system and connections objects +# +# author: Thomas Moreau and Olivier Grisel +# +# adapted from multiprocessing/reduction.py (17/02/2017) +# * Add adapted reduction for LokyProcesses and socket/Connection +# +import os +import socket +import _socket +from multiprocessing.connection import Connection +from multiprocessing.context import get_spawning_popen + +from .reduction import register + +HAVE_SEND_HANDLE = ( + hasattr(socket, "CMSG_LEN") + and hasattr(socket, "SCM_RIGHTS") + and hasattr(socket.socket, "sendmsg") +) + + +def _mk_inheritable(fd): + os.set_inheritable(fd, True) + return fd + + +def DupFd(fd): + """Return a wrapper for an fd.""" + popen_obj = get_spawning_popen() + if popen_obj is not None: + return popen_obj.DupFd(popen_obj.duplicate_for_child(fd)) + elif HAVE_SEND_HANDLE: + from multiprocessing import resource_sharer + + return resource_sharer.DupFd(fd) + else: + raise TypeError( + "Cannot pickle connection object. This object can only be " + "passed when spawning a new process" + ) + + +def _reduce_socket(s): + df = DupFd(s.fileno()) + return _rebuild_socket, (df, s.family, s.type, s.proto) + + +def _rebuild_socket(df, family, type, proto): + fd = df.detach() + return socket.fromfd(fd, family, type, proto) + + +def rebuild_connection(df, readable, writable): + fd = df.detach() + return Connection(fd, readable, writable) + + +def reduce_connection(conn): + df = DupFd(conn.fileno()) + return rebuild_connection, (df, conn.readable, conn.writable) + + +register(socket.socket, _reduce_socket) +register(_socket.socket, _reduce_socket) +register(Connection, reduce_connection) diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/_win_reduction.py b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/_win_reduction.py new file mode 100644 index 0000000000000000000000000000000000000000..506d0ecba7c8951ddeaa05b48eb1bdadc8d5ff46 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/_win_reduction.py @@ -0,0 +1,18 @@ +############################################################################### +# Extra reducers for Windows system and connections objects +# +# author: Thomas Moreau and Olivier Grisel +# +# adapted from multiprocessing/reduction.py (17/02/2017) +# * Add adapted reduction for LokyProcesses and socket/PipeConnection +# +import socket +from multiprocessing import connection +from multiprocessing.reduction import _reduce_socket + +from .reduction import register + +# register reduction for win32 communication objects +register(socket.socket, _reduce_socket) +register(connection.Connection, connection.reduce_connection) +register(connection.PipeConnection, connection.reduce_pipe_connection) diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/context.py b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/context.py new file mode 100644 index 0000000000000000000000000000000000000000..d0f590317e75752fdd0b4962b9f3ecbbbaf50b37 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/context.py @@ -0,0 +1,378 @@ +############################################################################### +# Basic context management with LokyContext +# +# author: Thomas Moreau and Olivier Grisel +# +# adapted from multiprocessing/context.py +# * Create a context ensuring loky uses only objects that are compatible +# * Add LokyContext to the list of context of multiprocessing so loky can be +# used with multiprocessing.set_start_method +# * Implement a CFS-aware amd physical-core aware cpu_count function. +# +import os +import sys +import math +import subprocess +import traceback +import warnings +import multiprocessing as mp +from multiprocessing import get_context as mp_get_context +from multiprocessing.context import BaseContext + + +from .process import LokyProcess, LokyInitMainProcess + +# Apparently, on older Python versions, loky cannot work 61 workers on Windows +# but instead 60: ¯\_(ツ)_/¯ +if sys.version_info >= (3, 8): + from concurrent.futures.process import _MAX_WINDOWS_WORKERS + + if sys.version_info < (3, 10): + _MAX_WINDOWS_WORKERS = _MAX_WINDOWS_WORKERS - 1 +else: + # compat for versions before 3.8 which do not define this. + _MAX_WINDOWS_WORKERS = 60 + +START_METHODS = ["loky", "loky_init_main", "spawn"] +if sys.platform != "win32": + START_METHODS += ["fork", "forkserver"] + +_DEFAULT_START_METHOD = None + +# Cache for the number of physical cores to avoid repeating subprocess calls. +# It should not change during the lifetime of the program. +physical_cores_cache = None + + +def get_context(method=None): + # Try to overload the default context + method = method or _DEFAULT_START_METHOD or "loky" + if method == "fork": + # If 'fork' is explicitly requested, warn user about potential issues. + warnings.warn( + "`fork` start method should not be used with " + "`loky` as it does not respect POSIX. Try using " + "`spawn` or `loky` instead.", + UserWarning, + ) + try: + return mp_get_context(method) + except ValueError: + raise ValueError( + f"Unknown context '{method}'. Value should be in " + f"{START_METHODS}." + ) + + +def set_start_method(method, force=False): + global _DEFAULT_START_METHOD + if _DEFAULT_START_METHOD is not None and not force: + raise RuntimeError("context has already been set") + assert method is None or method in START_METHODS, ( + f"'{method}' is not a valid start_method. It should be in " + f"{START_METHODS}" + ) + + _DEFAULT_START_METHOD = method + + +def get_start_method(): + return _DEFAULT_START_METHOD + + +def cpu_count(only_physical_cores=False): + """Return the number of CPUs the current process can use. + + The returned number of CPUs accounts for: + * the number of CPUs in the system, as given by + ``multiprocessing.cpu_count``; + * the CPU affinity settings of the current process + (available on some Unix systems); + * Cgroup CPU bandwidth limit (available on Linux only, typically + set by docker and similar container orchestration systems); + * the value of the LOKY_MAX_CPU_COUNT environment variable if defined. + and is given as the minimum of these constraints. + + If ``only_physical_cores`` is True, return the number of physical cores + instead of the number of logical cores (hyperthreading / SMT). Note that + this option is not enforced if the number of usable cores is controlled in + any other way such as: process affinity, Cgroup restricted CPU bandwidth + or the LOKY_MAX_CPU_COUNT environment variable. If the number of physical + cores is not found, return the number of logical cores. + + Note that on Windows, the returned number of CPUs cannot exceed 61 (or 60 for + Python < 3.10), see: + https://bugs.python.org/issue26903. + + It is also always larger or equal to 1. + """ + # Note: os.cpu_count() is allowed to return None in its docstring + os_cpu_count = os.cpu_count() or 1 + if sys.platform == "win32": + # On Windows, attempting to use more than 61 CPUs would result in a + # OS-level error. See https://bugs.python.org/issue26903. According to + # https://learn.microsoft.com/en-us/windows/win32/procthread/processor-groups + # it might be possible to go beyond with a lot of extra work but this + # does not look easy. + os_cpu_count = min(os_cpu_count, _MAX_WINDOWS_WORKERS) + + cpu_count_user = _cpu_count_user(os_cpu_count) + aggregate_cpu_count = max(min(os_cpu_count, cpu_count_user), 1) + + if not only_physical_cores: + return aggregate_cpu_count + + if cpu_count_user < os_cpu_count: + # Respect user setting + return max(cpu_count_user, 1) + + cpu_count_physical, exception = _count_physical_cores() + if cpu_count_physical != "not found": + return cpu_count_physical + + # Fallback to default behavior + if exception is not None: + # warns only the first time + warnings.warn( + "Could not find the number of physical cores for the " + f"following reason:\n{exception}\n" + "Returning the number of logical cores instead. You can " + "silence this warning by setting LOKY_MAX_CPU_COUNT to " + "the number of cores you want to use." + ) + traceback.print_tb(exception.__traceback__) + + return aggregate_cpu_count + + +def _cpu_count_cgroup(os_cpu_count): + # Cgroup CPU bandwidth limit available in Linux since 2.6 kernel + cpu_max_fname = "/sys/fs/cgroup/cpu.max" + cfs_quota_fname = "/sys/fs/cgroup/cpu/cpu.cfs_quota_us" + cfs_period_fname = "/sys/fs/cgroup/cpu/cpu.cfs_period_us" + if os.path.exists(cpu_max_fname): + # cgroup v2 + # https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html + with open(cpu_max_fname) as fh: + cpu_quota_us, cpu_period_us = fh.read().strip().split() + elif os.path.exists(cfs_quota_fname) and os.path.exists(cfs_period_fname): + # cgroup v1 + # https://www.kernel.org/doc/html/latest/scheduler/sched-bwc.html#management + with open(cfs_quota_fname) as fh: + cpu_quota_us = fh.read().strip() + with open(cfs_period_fname) as fh: + cpu_period_us = fh.read().strip() + else: + # No Cgroup CPU bandwidth limit (e.g. non-Linux platform) + cpu_quota_us = "max" + cpu_period_us = 100_000 # unused, for consistency with default values + + if cpu_quota_us == "max": + # No active Cgroup quota on a Cgroup-capable platform + return os_cpu_count + else: + cpu_quota_us = int(cpu_quota_us) + cpu_period_us = int(cpu_period_us) + if cpu_quota_us > 0 and cpu_period_us > 0: + return math.ceil(cpu_quota_us / cpu_period_us) + else: # pragma: no cover + # Setting a negative cpu_quota_us value is a valid way to disable + # cgroup CPU bandwith limits + return os_cpu_count + + +def _cpu_count_affinity(os_cpu_count): + # Number of available CPUs given affinity settings + if hasattr(os, "sched_getaffinity"): + try: + return len(os.sched_getaffinity(0)) + except NotImplementedError: + pass + + # On PyPy and possibly other platforms, os.sched_getaffinity does not exist + # or raises NotImplementedError, let's try with the psutil if installed. + try: + import psutil + + p = psutil.Process() + if hasattr(p, "cpu_affinity"): + return len(p.cpu_affinity()) + + except ImportError: # pragma: no cover + if ( + sys.platform == "linux" + and os.environ.get("LOKY_MAX_CPU_COUNT") is None + ): + # PyPy does not implement os.sched_getaffinity on Linux which + # can cause severe oversubscription problems. Better warn the + # user in this particularly pathological case which can wreck + # havoc, typically on CI workers. + warnings.warn( + "Failed to inspect CPU affinity constraints on this system. " + "Please install psutil or explictly set LOKY_MAX_CPU_COUNT." + ) + + # This can happen for platforms that do not implement any kind of CPU + # infinity such as macOS-based platforms. + return os_cpu_count + + +def _cpu_count_user(os_cpu_count): + """Number of user defined available CPUs""" + cpu_count_affinity = _cpu_count_affinity(os_cpu_count) + + cpu_count_cgroup = _cpu_count_cgroup(os_cpu_count) + + # User defined soft-limit passed as a loky specific environment variable. + cpu_count_loky = int(os.environ.get("LOKY_MAX_CPU_COUNT", os_cpu_count)) + + return min(cpu_count_affinity, cpu_count_cgroup, cpu_count_loky) + + +def _count_physical_cores(): + """Return a tuple (number of physical cores, exception) + + If the number of physical cores is found, exception is set to None. + If it has not been found, return ("not found", exception). + + The number of physical cores is cached to avoid repeating subprocess calls. + """ + exception = None + + # First check if the value is cached + global physical_cores_cache + if physical_cores_cache is not None: + return physical_cores_cache, exception + + # Not cached yet, find it + try: + if sys.platform == "linux": + cpu_info = subprocess.run( + "lscpu --parse=core".split(), capture_output=True, text=True + ) + cpu_info = cpu_info.stdout.splitlines() + cpu_info = {line for line in cpu_info if not line.startswith("#")} + cpu_count_physical = len(cpu_info) + elif sys.platform == "win32": + cpu_info = subprocess.run( + "wmic CPU Get NumberOfCores /Format:csv".split(), + capture_output=True, + text=True, + ) + cpu_info = cpu_info.stdout.splitlines() + cpu_info = [ + l.split(",")[1] + for l in cpu_info + if (l and l != "Node,NumberOfCores") + ] + cpu_count_physical = sum(map(int, cpu_info)) + elif sys.platform == "darwin": + cpu_info = subprocess.run( + "sysctl -n hw.physicalcpu".split(), + capture_output=True, + text=True, + ) + cpu_info = cpu_info.stdout + cpu_count_physical = int(cpu_info) + else: + raise NotImplementedError(f"unsupported platform: {sys.platform}") + + # if cpu_count_physical < 1, we did not find a valid value + if cpu_count_physical < 1: + raise ValueError(f"found {cpu_count_physical} physical cores < 1") + + except Exception as e: + exception = e + cpu_count_physical = "not found" + + # Put the result in cache + physical_cores_cache = cpu_count_physical + + return cpu_count_physical, exception + + +class LokyContext(BaseContext): + """Context relying on the LokyProcess.""" + + _name = "loky" + Process = LokyProcess + cpu_count = staticmethod(cpu_count) + + def Queue(self, maxsize=0, reducers=None): + """Returns a queue object""" + from .queues import Queue + + return Queue(maxsize, reducers=reducers, ctx=self.get_context()) + + def SimpleQueue(self, reducers=None): + """Returns a queue object""" + from .queues import SimpleQueue + + return SimpleQueue(reducers=reducers, ctx=self.get_context()) + + if sys.platform != "win32": + """For Unix platform, use our custom implementation of synchronize + ensuring that we use the loky.backend.resource_tracker to clean-up + the semaphores in case of a worker crash. + """ + + def Semaphore(self, value=1): + """Returns a semaphore object""" + from .synchronize import Semaphore + + return Semaphore(value=value) + + def BoundedSemaphore(self, value): + """Returns a bounded semaphore object""" + from .synchronize import BoundedSemaphore + + return BoundedSemaphore(value) + + def Lock(self): + """Returns a lock object""" + from .synchronize import Lock + + return Lock() + + def RLock(self): + """Returns a recurrent lock object""" + from .synchronize import RLock + + return RLock() + + def Condition(self, lock=None): + """Returns a condition object""" + from .synchronize import Condition + + return Condition(lock) + + def Event(self): + """Returns an event object""" + from .synchronize import Event + + return Event() + + +class LokyInitMainContext(LokyContext): + """Extra context with LokyProcess, which does load the main module + + This context is used for compatibility in the case ``cloudpickle`` is not + present on the running system. This permits to load functions defined in + the ``main`` module, using proper safeguards. The declaration of the + ``executor`` should be protected by ``if __name__ == "__main__":`` and the + functions and variable used from main should be out of this block. + + This mimics the default behavior of multiprocessing under Windows and the + behavior of the ``spawn`` start method on a posix system. + For more details, see the end of the following section of python doc + https://docs.python.org/3/library/multiprocessing.html#multiprocessing-programming + """ + + _name = "loky_init_main" + Process = LokyInitMainProcess + + +# Register loky context so it works with multiprocessing.get_context +ctx_loky = LokyContext() +mp.context._concrete_contexts["loky"] = ctx_loky +mp.context._concrete_contexts["loky_init_main"] = LokyInitMainContext() diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/fork_exec.py b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/fork_exec.py new file mode 100644 index 0000000000000000000000000000000000000000..2353c42f51a6e6c558ce70e35e1b7405e22d70ed --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/fork_exec.py @@ -0,0 +1,43 @@ +############################################################################### +# Launch a subprocess using forkexec and make sure only the needed fd are +# shared in the two process. +# +# author: Thomas Moreau and Olivier Grisel +# +import os +import sys + + +def close_fds(keep_fds): # pragma: no cover + """Close all the file descriptors except those in keep_fds.""" + + # Make sure to keep stdout and stderr open for logging purpose + keep_fds = {*keep_fds, 1, 2} + + # We try to retrieve all the open fds + try: + open_fds = {int(fd) for fd in os.listdir("/proc/self/fd")} + except FileNotFoundError: + import resource + + max_nfds = resource.getrlimit(resource.RLIMIT_NOFILE)[0] + open_fds = {*range(max_nfds)} + + for i in open_fds - keep_fds: + try: + os.close(i) + except OSError: + pass + + +def fork_exec(cmd, keep_fds, env=None): + # copy the environment variables to set in the child process + env = env or {} + child_env = {**os.environ, **env} + + pid = os.fork() + if pid == 0: # pragma: no cover + close_fds(keep_fds) + os.execve(sys.executable, cmd, child_env) + else: + return pid diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/popen_loky_posix.py b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/popen_loky_posix.py new file mode 100644 index 0000000000000000000000000000000000000000..74395be0757f0a07ef92a7b0efe1e1ea4ecdac77 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/popen_loky_posix.py @@ -0,0 +1,193 @@ +############################################################################### +# Popen for LokyProcess. +# +# author: Thomas Moreau and Olivier Grisel +# +import os +import sys +import signal +import pickle +from io import BytesIO +from multiprocessing import util, process +from multiprocessing.connection import wait +from multiprocessing.context import set_spawning_popen + +from . import reduction, resource_tracker, spawn + + +__all__ = ["Popen"] + + +# +# Wrapper for an fd used while launching a process +# + + +class _DupFd: + def __init__(self, fd): + self.fd = reduction._mk_inheritable(fd) + + def detach(self): + return self.fd + + +# +# Start child process using subprocess.Popen +# + + +class Popen: + method = "loky" + DupFd = _DupFd + + def __init__(self, process_obj): + sys.stdout.flush() + sys.stderr.flush() + self.returncode = None + self._fds = [] + self._launch(process_obj) + + def duplicate_for_child(self, fd): + self._fds.append(fd) + return reduction._mk_inheritable(fd) + + def poll(self, flag=os.WNOHANG): + if self.returncode is None: + while True: + try: + pid, sts = os.waitpid(self.pid, flag) + except OSError: + # Child process not yet created. See #1731717 + # e.errno == errno.ECHILD == 10 + return None + else: + break + if pid == self.pid: + if os.WIFSIGNALED(sts): + self.returncode = -os.WTERMSIG(sts) + else: + assert os.WIFEXITED(sts) + self.returncode = os.WEXITSTATUS(sts) + return self.returncode + + def wait(self, timeout=None): + if self.returncode is None: + if timeout is not None: + if not wait([self.sentinel], timeout): + return None + # This shouldn't block if wait() returned successfully. + return self.poll(os.WNOHANG if timeout == 0.0 else 0) + return self.returncode + + def terminate(self): + if self.returncode is None: + try: + os.kill(self.pid, signal.SIGTERM) + except ProcessLookupError: + pass + except OSError: + if self.wait(timeout=0.1) is None: + raise + + def _launch(self, process_obj): + + tracker_fd = resource_tracker._resource_tracker.getfd() + + fp = BytesIO() + set_spawning_popen(self) + try: + prep_data = spawn.get_preparation_data( + process_obj._name, + getattr(process_obj, "init_main_module", True), + ) + reduction.dump(prep_data, fp) + reduction.dump(process_obj, fp) + + finally: + set_spawning_popen(None) + + try: + parent_r, child_w = os.pipe() + child_r, parent_w = os.pipe() + # for fd in self._fds: + # _mk_inheritable(fd) + + cmd_python = [sys.executable] + cmd_python += ["-m", self.__module__] + cmd_python += ["--process-name", str(process_obj.name)] + cmd_python += ["--pipe", str(reduction._mk_inheritable(child_r))] + reduction._mk_inheritable(child_w) + reduction._mk_inheritable(tracker_fd) + self._fds += [child_r, child_w, tracker_fd] + if sys.version_info >= (3, 8) and os.name == "posix": + mp_tracker_fd = prep_data["mp_tracker_args"]["fd"] + self.duplicate_for_child(mp_tracker_fd) + + from .fork_exec import fork_exec + + pid = fork_exec(cmd_python, self._fds, env=process_obj.env) + util.debug( + f"launched python with pid {pid} and cmd:\n{cmd_python}" + ) + self.sentinel = parent_r + + method = "getbuffer" + if not hasattr(fp, method): + method = "getvalue" + with os.fdopen(parent_w, "wb") as f: + f.write(getattr(fp, method)()) + self.pid = pid + finally: + if parent_r is not None: + util.Finalize(self, os.close, (parent_r,)) + for fd in (child_r, child_w): + if fd is not None: + os.close(fd) + + @staticmethod + def thread_is_spawning(): + return True + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser("Command line parser") + parser.add_argument( + "--pipe", type=int, required=True, help="File handle for the pipe" + ) + parser.add_argument( + "--process-name", + type=str, + default=None, + help="Identifier for debugging purpose", + ) + + args = parser.parse_args() + + info = {} + exitcode = 1 + try: + with os.fdopen(args.pipe, "rb") as from_parent: + process.current_process()._inheriting = True + try: + prep_data = pickle.load(from_parent) + spawn.prepare(prep_data) + process_obj = pickle.load(from_parent) + finally: + del process.current_process()._inheriting + + exitcode = process_obj._bootstrap() + except Exception: + print("\n\n" + "-" * 80) + print(f"{args.process_name} failed with traceback: ") + print("-" * 80) + import traceback + + print(traceback.format_exc()) + print("\n" + "-" * 80) + finally: + if from_parent is not None: + from_parent.close() + + sys.exit(exitcode) diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/popen_loky_win32.py b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/popen_loky_win32.py new file mode 100644 index 0000000000000000000000000000000000000000..4f85f65df5e22bc2342f44c4a59b5e2ece63a81f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/popen_loky_win32.py @@ -0,0 +1,173 @@ +import os +import sys +import msvcrt +import _winapi +from pickle import load +from multiprocessing import process, util +from multiprocessing.context import set_spawning_popen +from multiprocessing.popen_spawn_win32 import Popen as _Popen + +from . import reduction, spawn + + +__all__ = ["Popen"] + +# +# +# + + +def _path_eq(p1, p2): + return p1 == p2 or os.path.normcase(p1) == os.path.normcase(p2) + + +WINENV = hasattr(sys, "_base_executable") and not _path_eq( + sys.executable, sys._base_executable +) + + +def _close_handles(*handles): + for handle in handles: + _winapi.CloseHandle(handle) + + +# +# We define a Popen class similar to the one from subprocess, but +# whose constructor takes a process object as its argument. +# + + +class Popen(_Popen): + """ + Start a subprocess to run the code of a process object. + + We differ from cpython implementation with the way we handle environment + variables, in order to be able to modify then in the child processes before + importing any library, in order to control the number of threads in C-level + threadpools. + + We also use the loky preparation data, in particular to handle main_module + inits and the loky resource tracker. + """ + + method = "loky" + + def __init__(self, process_obj): + prep_data = spawn.get_preparation_data( + process_obj._name, getattr(process_obj, "init_main_module", True) + ) + + # read end of pipe will be duplicated by the child process + # -- see spawn_main() in spawn.py. + # + # bpo-33929: Previously, the read end of pipe was "stolen" by the child + # process, but it leaked a handle if the child process had been + # terminated before it could steal the handle from the parent process. + rhandle, whandle = _winapi.CreatePipe(None, 0) + wfd = msvcrt.open_osfhandle(whandle, 0) + cmd = get_command_line(parent_pid=os.getpid(), pipe_handle=rhandle) + + python_exe = spawn.get_executable() + + # copy the environment variables to set in the child process + child_env = {**os.environ, **process_obj.env} + + # bpo-35797: When running in a venv, we bypass the redirect + # executor and launch our base Python. + if WINENV and _path_eq(python_exe, sys.executable): + cmd[0] = python_exe = sys._base_executable + child_env["__PYVENV_LAUNCHER__"] = sys.executable + + cmd = " ".join(f'"{x}"' for x in cmd) + + with open(wfd, "wb") as to_child: + # start process + try: + hp, ht, pid, _ = _winapi.CreateProcess( + python_exe, + cmd, + None, + None, + False, + 0, + child_env, + None, + None, + ) + _winapi.CloseHandle(ht) + except BaseException: + _winapi.CloseHandle(rhandle) + raise + + # set attributes of self + self.pid = pid + self.returncode = None + self._handle = hp + self.sentinel = int(hp) + self.finalizer = util.Finalize( + self, _close_handles, (self.sentinel, int(rhandle)) + ) + + # send information to child + set_spawning_popen(self) + try: + reduction.dump(prep_data, to_child) + reduction.dump(process_obj, to_child) + finally: + set_spawning_popen(None) + + +def get_command_line(pipe_handle, parent_pid, **kwds): + """Returns prefix of command line used for spawning a child process.""" + if getattr(sys, "frozen", False): + return [sys.executable, "--multiprocessing-fork", pipe_handle] + else: + prog = ( + "from joblib.externals.loky.backend.popen_loky_win32 import main; " + f"main(pipe_handle={pipe_handle}, parent_pid={parent_pid})" + ) + opts = util._args_from_interpreter_flags() + return [ + spawn.get_executable(), + *opts, + "-c", + prog, + "--multiprocessing-fork", + ] + + +def is_forking(argv): + """Return whether commandline indicates we are forking.""" + if len(argv) >= 2 and argv[1] == "--multiprocessing-fork": + return True + else: + return False + + +def main(pipe_handle, parent_pid=None): + """Run code specified by data received over pipe.""" + assert is_forking(sys.argv), "Not forking" + + if parent_pid is not None: + source_process = _winapi.OpenProcess( + _winapi.SYNCHRONIZE | _winapi.PROCESS_DUP_HANDLE, False, parent_pid + ) + else: + source_process = None + new_handle = reduction.duplicate( + pipe_handle, source_process=source_process + ) + fd = msvcrt.open_osfhandle(new_handle, os.O_RDONLY) + parent_sentinel = source_process + + with os.fdopen(fd, "rb", closefd=True) as from_parent: + process.current_process()._inheriting = True + try: + preparation_data = load(from_parent) + spawn.prepare(preparation_data, parent_sentinel) + self = load(from_parent) + finally: + del process.current_process()._inheriting + + exitcode = self._bootstrap(parent_sentinel) + sys.exit(exitcode) diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/process.py b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/process.py new file mode 100644 index 0000000000000000000000000000000000000000..356255094b7647be8de6998a8752dd7807b25e10 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/process.py @@ -0,0 +1,85 @@ +############################################################################### +# LokyProcess implementation +# +# authors: Thomas Moreau and Olivier Grisel +# +# based on multiprocessing/process.py (17/02/2017) +# +import sys +from multiprocessing.context import assert_spawning +from multiprocessing.process import BaseProcess + + +class LokyProcess(BaseProcess): + _start_method = "loky" + + def __init__( + self, + group=None, + target=None, + name=None, + args=(), + kwargs={}, + daemon=None, + init_main_module=False, + env=None, + ): + super().__init__( + group=group, + target=target, + name=name, + args=args, + kwargs=kwargs, + daemon=daemon, + ) + self.env = {} if env is None else env + self.authkey = self.authkey + self.init_main_module = init_main_module + + @staticmethod + def _Popen(process_obj): + if sys.platform == "win32": + from .popen_loky_win32 import Popen + else: + from .popen_loky_posix import Popen + return Popen(process_obj) + + +class LokyInitMainProcess(LokyProcess): + _start_method = "loky_init_main" + + def __init__( + self, + group=None, + target=None, + name=None, + args=(), + kwargs={}, + daemon=None, + ): + super().__init__( + group=group, + target=target, + name=name, + args=args, + kwargs=kwargs, + daemon=daemon, + init_main_module=True, + ) + + +# +# We subclass bytes to avoid accidental transmission of auth keys over network +# + + +class AuthenticationKey(bytes): + def __reduce__(self): + try: + assert_spawning(self) + except RuntimeError: + raise TypeError( + "Pickling an AuthenticationKey object is " + "disallowed for security reasons" + ) + return AuthenticationKey, (bytes(self),) diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/queues.py b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/queues.py new file mode 100644 index 0000000000000000000000000000000000000000..5afd99b420fbc480ed5eb743333a687110a90e49 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/queues.py @@ -0,0 +1,236 @@ +############################################################################### +# Queue and SimpleQueue implementation for loky +# +# authors: Thomas Moreau, Olivier Grisel +# +# based on multiprocessing/queues.py (16/02/2017) +# * Add some custom reducers for the Queues/SimpleQueue to tweak the +# pickling process. (overload Queue._feed/SimpleQueue.put) +# +import os +import sys +import errno +import weakref +import threading +from multiprocessing import util +from multiprocessing.queues import ( + Full, + Queue as mp_Queue, + SimpleQueue as mp_SimpleQueue, + _sentinel, +) +from multiprocessing.context import assert_spawning + +from .reduction import dumps + + +__all__ = ["Queue", "SimpleQueue", "Full"] + + +class Queue(mp_Queue): + def __init__(self, maxsize=0, reducers=None, ctx=None): + super().__init__(maxsize=maxsize, ctx=ctx) + self._reducers = reducers + + # Use custom queue set/get state to be able to reduce the custom reducers + def __getstate__(self): + assert_spawning(self) + return ( + self._ignore_epipe, + self._maxsize, + self._reader, + self._writer, + self._reducers, + self._rlock, + self._wlock, + self._sem, + self._opid, + ) + + def __setstate__(self, state): + ( + self._ignore_epipe, + self._maxsize, + self._reader, + self._writer, + self._reducers, + self._rlock, + self._wlock, + self._sem, + self._opid, + ) = state + if sys.version_info >= (3, 9): + self._reset() + else: + self._after_fork() + + # Overload _start_thread to correctly call our custom _feed + def _start_thread(self): + util.debug("Queue._start_thread()") + + # Start thread which transfers data from buffer to pipe + self._buffer.clear() + self._thread = threading.Thread( + target=Queue._feed, + args=( + self._buffer, + self._notempty, + self._send_bytes, + self._wlock, + self._writer.close, + self._reducers, + self._ignore_epipe, + self._on_queue_feeder_error, + self._sem, + ), + name="QueueFeederThread", + ) + self._thread.daemon = True + + util.debug("doing self._thread.start()") + self._thread.start() + util.debug("... done self._thread.start()") + + # On process exit we will wait for data to be flushed to pipe. + # + # However, if this process created the queue then all + # processes which use the queue will be descendants of this + # process. Therefore waiting for the queue to be flushed + # is pointless once all the child processes have been joined. + created_by_this_process = self._opid == os.getpid() + if not self._joincancelled and not created_by_this_process: + self._jointhread = util.Finalize( + self._thread, + Queue._finalize_join, + [weakref.ref(self._thread)], + exitpriority=-5, + ) + + # Send sentinel to the thread queue object when garbage collected + self._close = util.Finalize( + self, + Queue._finalize_close, + [self._buffer, self._notempty], + exitpriority=10, + ) + + # Overload the _feed methods to use our custom pickling strategy. + @staticmethod + def _feed( + buffer, + notempty, + send_bytes, + writelock, + close, + reducers, + ignore_epipe, + onerror, + queue_sem, + ): + util.debug("starting thread to feed data to pipe") + nacquire = notempty.acquire + nrelease = notempty.release + nwait = notempty.wait + bpopleft = buffer.popleft + sentinel = _sentinel + if sys.platform != "win32": + wacquire = writelock.acquire + wrelease = writelock.release + else: + wacquire = None + + while True: + try: + nacquire() + try: + if not buffer: + nwait() + finally: + nrelease() + try: + while True: + obj = bpopleft() + if obj is sentinel: + util.debug("feeder thread got sentinel -- exiting") + close() + return + + # serialize the data before acquiring the lock + obj_ = dumps(obj, reducers=reducers) + if wacquire is None: + send_bytes(obj_) + else: + wacquire() + try: + send_bytes(obj_) + finally: + wrelease() + # Remove references early to avoid leaking memory + del obj, obj_ + except IndexError: + pass + except BaseException as e: + if ignore_epipe and getattr(e, "errno", 0) == errno.EPIPE: + return + # Since this runs in a daemon thread the resources it uses + # may be become unusable while the process is cleaning up. + # We ignore errors which happen after the process has + # started to cleanup. + if util.is_exiting(): + util.info(f"error in queue thread: {e}") + return + else: + queue_sem.release() + onerror(e, obj) + + def _on_queue_feeder_error(self, e, obj): + """ + Private API hook called when feeding data in the background thread + raises an exception. For overriding by concurrent.futures. + """ + import traceback + + traceback.print_exc() + + +class SimpleQueue(mp_SimpleQueue): + def __init__(self, reducers=None, ctx=None): + super().__init__(ctx=ctx) + + # Add possiblity to use custom reducers + self._reducers = reducers + + def close(self): + self._reader.close() + self._writer.close() + + # Use custom queue set/get state to be able to reduce the custom reducers + def __getstate__(self): + assert_spawning(self) + return ( + self._reader, + self._writer, + self._reducers, + self._rlock, + self._wlock, + ) + + def __setstate__(self, state): + ( + self._reader, + self._writer, + self._reducers, + self._rlock, + self._wlock, + ) = state + + # Overload put to use our customizable reducer + def put(self, obj): + # serialize the data before acquiring the lock + obj = dumps(obj, reducers=self._reducers) + if self._wlock is None: + # writes to a message oriented win32 pipe are atomic + self._writer.send_bytes(obj) + else: + with self._wlock: + self._writer.send_bytes(obj) diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/reduction.py b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/reduction.py new file mode 100644 index 0000000000000000000000000000000000000000..bed32ba9e18f7d0fccab7ead6095996d27f448e2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/reduction.py @@ -0,0 +1,224 @@ +############################################################################### +# Customizable Pickler with some basic reducers +# +# author: Thomas Moreau +# +# adapted from multiprocessing/reduction.py (17/02/2017) +# * Replace the ForkingPickler with a similar _LokyPickler, +# * Add CustomizableLokyPickler to allow customizing pickling process +# on the fly. +# +import copyreg +import io +import functools +import types +import sys +import os + +from multiprocessing import util +from pickle import loads, HIGHEST_PROTOCOL + +############################################################################### +# Enable custom pickling in Loky. + +_dispatch_table = {} + + +def register(type_, reduce_function): + _dispatch_table[type_] = reduce_function + + +############################################################################### +# Registers extra pickling routines to improve picklization for loky + + +# make methods picklable +def _reduce_method(m): + if m.__self__ is None: + return getattr, (m.__class__, m.__func__.__name__) + else: + return getattr, (m.__self__, m.__func__.__name__) + + +class _C: + def f(self): + pass + + @classmethod + def h(cls): + pass + + +register(type(_C().f), _reduce_method) +register(type(_C.h), _reduce_method) + + +if not hasattr(sys, "pypy_version_info"): + # PyPy uses functions instead of method_descriptors and wrapper_descriptors + def _reduce_method_descriptor(m): + return getattr, (m.__objclass__, m.__name__) + + register(type(list.append), _reduce_method_descriptor) + register(type(int.__add__), _reduce_method_descriptor) + + +# Make partial func pickable +def _reduce_partial(p): + return _rebuild_partial, (p.func, p.args, p.keywords or {}) + + +def _rebuild_partial(func, args, keywords): + return functools.partial(func, *args, **keywords) + + +register(functools.partial, _reduce_partial) + +if sys.platform != "win32": + from ._posix_reduction import _mk_inheritable # noqa: F401 +else: + from . import _win_reduction # noqa: F401 + +# global variable to change the pickler behavior +try: + from joblib.externals import cloudpickle # noqa: F401 + + DEFAULT_ENV = "cloudpickle" +except ImportError: + # If cloudpickle is not present, fallback to pickle + DEFAULT_ENV = "pickle" + +ENV_LOKY_PICKLER = os.environ.get("LOKY_PICKLER", DEFAULT_ENV) +_LokyPickler = None +_loky_pickler_name = None + + +def set_loky_pickler(loky_pickler=None): + global _LokyPickler, _loky_pickler_name + + if loky_pickler is None: + loky_pickler = ENV_LOKY_PICKLER + + loky_pickler_cls = None + + # The default loky_pickler is cloudpickle + if loky_pickler in ["", None]: + loky_pickler = "cloudpickle" + + if loky_pickler == _loky_pickler_name: + return + + if loky_pickler == "cloudpickle": + from joblib.externals.cloudpickle import CloudPickler as loky_pickler_cls + else: + try: + from importlib import import_module + + module_pickle = import_module(loky_pickler) + loky_pickler_cls = module_pickle.Pickler + except (ImportError, AttributeError) as e: + extra_info = ( + "\nThis error occurred while setting loky_pickler to" + f" '{loky_pickler}', as required by the env variable " + "LOKY_PICKLER or the function set_loky_pickler." + ) + e.args = (e.args[0] + extra_info,) + e.args[1:] + e.msg = e.args[0] + raise e + + util.debug( + f"Using '{loky_pickler if loky_pickler else 'cloudpickle'}' for " + "serialization." + ) + + class CustomizablePickler(loky_pickler_cls): + _loky_pickler_cls = loky_pickler_cls + + def _set_dispatch_table(self, dispatch_table): + for ancestor_class in self._loky_pickler_cls.mro(): + dt_attribute = getattr(ancestor_class, "dispatch_table", None) + if isinstance(dt_attribute, types.MemberDescriptorType): + # Ancestor class (typically _pickle.Pickler) has a + # member_descriptor for its "dispatch_table" attribute. Use + # it to set the dispatch_table as a member instead of a + # dynamic attribute in the __dict__ of the instance, + # otherwise it will not be taken into account by the C + # implementation of the dump method if a subclass defines a + # class-level dispatch_table attribute as was done in + # cloudpickle 1.6.0: + # https://github.com/joblib/loky/pull/260 + dt_attribute.__set__(self, dispatch_table) + break + + # On top of member descriptor set, also use setattr such that code + # that directly access self.dispatch_table gets a consistent view + # of the same table. + self.dispatch_table = dispatch_table + + def __init__(self, writer, reducers=None, protocol=HIGHEST_PROTOCOL): + loky_pickler_cls.__init__(self, writer, protocol=protocol) + if reducers is None: + reducers = {} + + if hasattr(self, "dispatch_table"): + # Force a copy that we will update without mutating the + # any class level defined dispatch_table. + loky_dt = dict(self.dispatch_table) + else: + # Use standard reducers as bases + loky_dt = copyreg.dispatch_table.copy() + + # Register loky specific reducers + loky_dt.update(_dispatch_table) + + # Set the new dispatch table, taking care of the fact that we + # need to use the member_descriptor when we inherit from a + # subclass of the C implementation of the Pickler base class + # with an class level dispatch_table attribute. + self._set_dispatch_table(loky_dt) + + # Register the reducers + for type, reduce_func in reducers.items(): + self.register(type, reduce_func) + + def register(self, type, reduce_func): + """Attach a reducer function to a given type in the dispatch table.""" + self.dispatch_table[type] = reduce_func + + _LokyPickler = CustomizablePickler + _loky_pickler_name = loky_pickler + + +def get_loky_pickler_name(): + global _loky_pickler_name + return _loky_pickler_name + + +def get_loky_pickler(): + global _LokyPickler + return _LokyPickler + + +# Set it to its default value +set_loky_pickler() + + +def dump(obj, file, reducers=None, protocol=None): + """Replacement for pickle.dump() using _LokyPickler.""" + global _LokyPickler + _LokyPickler(file, reducers=reducers, protocol=protocol).dump(obj) + + +def dumps(obj, reducers=None, protocol=None): + global _LokyPickler + + buf = io.BytesIO() + dump(obj, buf, reducers=reducers, protocol=protocol) + return buf.getbuffer() + + +__all__ = ["dump", "dumps", "loads", "register", "set_loky_pickler"] + +if sys.platform == "win32": + from multiprocessing.reduction import duplicate + + __all__ += ["duplicate"] diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/resource_tracker.py b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/resource_tracker.py new file mode 100644 index 0000000000000000000000000000000000000000..25204a7a729d4d5f295070cd050c17a4ed9d49b7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/resource_tracker.py @@ -0,0 +1,378 @@ +############################################################################### +# Server process to keep track of unlinked resources, like folders and +# semaphores and clean them. +# +# author: Thomas Moreau +# +# adapted from multiprocessing/semaphore_tracker.py (17/02/2017) +# * include custom spawnv_passfds to start the process +# * add some VERBOSE logging +# +# TODO: multiprocessing.resource_tracker was contributed to Python 3.8 so +# once loky drops support for Python 3.7 it might be possible to stop +# maintaining this loky-specific fork. As a consequence, it might also be +# possible to stop maintaining the loky.backend.synchronize fork of +# multiprocessing.synchronize. + +# +# On Unix we run a server process which keeps track of unlinked +# resources. The server ignores SIGINT and SIGTERM and reads from a +# pipe. The resource_tracker implements a reference counting scheme: each time +# a Python process anticipates the shared usage of a resource by another +# process, it signals the resource_tracker of this shared usage, and in return, +# the resource_tracker increments the resource's reference count by 1. +# Similarly, when access to a resource is closed by a Python process, the +# process notifies the resource_tracker by asking it to decrement the +# resource's reference count by 1. When the reference count drops to 0, the +# resource_tracker attempts to clean up the underlying resource. + +# Finally, every other process connected to the resource tracker has a copy of +# the writable end of the pipe used to communicate with it, so the resource +# tracker gets EOF when all other processes have exited. Then the +# resource_tracker process unlinks any remaining leaked resources (with +# reference count above 0) + +# For semaphores, this is important because the system only supports a limited +# number of named semaphores, and they will not be automatically removed till +# the next reboot. Without this resource tracker process, "killall python" +# would probably leave unlinked semaphores. + +# Note that this behavior differs from CPython's resource_tracker, which only +# implements list of shared resources, and not a proper refcounting scheme. +# Also, CPython's resource tracker will only attempt to cleanup those shared +# resources once all procsses connected to the resouce tracker have exited. + + +import os +import shutil +import sys +import signal +import warnings +import threading +from _multiprocessing import sem_unlink +from multiprocessing import util + +from . import spawn + +if sys.platform == "win32": + import _winapi + import msvcrt + from multiprocessing.reduction import duplicate + + +__all__ = ["ensure_running", "register", "unregister"] + +_HAVE_SIGMASK = hasattr(signal, "pthread_sigmask") +_IGNORED_SIGNALS = (signal.SIGINT, signal.SIGTERM) + +_CLEANUP_FUNCS = {"folder": shutil.rmtree, "file": os.unlink} + +if os.name == "posix": + _CLEANUP_FUNCS["semlock"] = sem_unlink + + +VERBOSE = False + + +class ResourceTracker: + def __init__(self): + self._lock = threading.Lock() + self._fd = None + self._pid = None + + def getfd(self): + self.ensure_running() + return self._fd + + def ensure_running(self): + """Make sure that resource tracker process is running. + + This can be run from any process. Usually a child process will use + the resource created by its parent.""" + with self._lock: + if self._fd is not None: + # resource tracker was launched before, is it still running? + if self._check_alive(): + # => still alive + return + # => dead, launch it again + os.close(self._fd) + if os.name == "posix": + try: + # At this point, the resource_tracker process has been + # killed or crashed. Let's remove the process entry + # from the process table to avoid zombie processes. + os.waitpid(self._pid, 0) + except OSError: + # The process was terminated or is a child from an + # ancestor of the current process. + pass + self._fd = None + self._pid = None + + warnings.warn( + "resource_tracker: process died unexpectedly, " + "relaunching. Some folders/sempahores might " + "leak." + ) + + fds_to_pass = [] + try: + fds_to_pass.append(sys.stderr.fileno()) + except Exception: + pass + + r, w = os.pipe() + if sys.platform == "win32": + _r = duplicate(msvcrt.get_osfhandle(r), inheritable=True) + os.close(r) + r = _r + + cmd = f"from {main.__module__} import main; main({r}, {VERBOSE})" + try: + fds_to_pass.append(r) + # process will out live us, so no need to wait on pid + exe = spawn.get_executable() + args = [exe, *util._args_from_interpreter_flags(), "-c", cmd] + util.debug(f"launching resource tracker: {args}") + # bpo-33613: Register a signal mask that will block the + # signals. This signal mask will be inherited by the child + # that is going to be spawned and will protect the child from a + # race condition that can make the child die before it + # registers signal handlers for SIGINT and SIGTERM. The mask is + # unregistered after spawning the child. + try: + if _HAVE_SIGMASK: + signal.pthread_sigmask( + signal.SIG_BLOCK, _IGNORED_SIGNALS + ) + pid = spawnv_passfds(exe, args, fds_to_pass) + finally: + if _HAVE_SIGMASK: + signal.pthread_sigmask( + signal.SIG_UNBLOCK, _IGNORED_SIGNALS + ) + except BaseException: + os.close(w) + raise + else: + self._fd = w + self._pid = pid + finally: + if sys.platform == "win32": + _winapi.CloseHandle(r) + else: + os.close(r) + + def _check_alive(self): + """Check for the existence of the resource tracker process.""" + try: + self._send("PROBE", "", "") + except BrokenPipeError: + return False + else: + return True + + def register(self, name, rtype): + """Register a named resource, and increment its refcount.""" + self.ensure_running() + self._send("REGISTER", name, rtype) + + def unregister(self, name, rtype): + """Unregister a named resource with resource tracker.""" + self.ensure_running() + self._send("UNREGISTER", name, rtype) + + def maybe_unlink(self, name, rtype): + """Decrement the refcount of a resource, and delete it if it hits 0""" + self.ensure_running() + self._send("MAYBE_UNLINK", name, rtype) + + def _send(self, cmd, name, rtype): + if len(name) > 512: + # posix guarantees that writes to a pipe of less than PIPE_BUF + # bytes are atomic, and that PIPE_BUF >= 512 + raise ValueError("name too long") + msg = f"{cmd}:{name}:{rtype}\n".encode("ascii") + nbytes = os.write(self._fd, msg) + assert nbytes == len(msg) + + +_resource_tracker = ResourceTracker() +ensure_running = _resource_tracker.ensure_running +register = _resource_tracker.register +maybe_unlink = _resource_tracker.maybe_unlink +unregister = _resource_tracker.unregister +getfd = _resource_tracker.getfd + + +def main(fd, verbose=0): + """Run resource tracker.""" + # protect the process from ^C and "killall python" etc + if verbose: + util.log_to_stderr(level=util.DEBUG) + + signal.signal(signal.SIGINT, signal.SIG_IGN) + signal.signal(signal.SIGTERM, signal.SIG_IGN) + + if _HAVE_SIGMASK: + signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS) + + for f in (sys.stdin, sys.stdout): + try: + f.close() + except Exception: + pass + + if verbose: + util.debug("Main resource tracker is running") + + registry = {rtype: {} for rtype in _CLEANUP_FUNCS.keys()} + try: + # keep track of registered/unregistered resources + if sys.platform == "win32": + fd = msvcrt.open_osfhandle(fd, os.O_RDONLY) + with open(fd, "rb") as f: + while True: + line = f.readline() + if line == b"": # EOF + break + try: + splitted = line.strip().decode("ascii").split(":") + # name can potentially contain separator symbols (for + # instance folders on Windows) + cmd, name, rtype = ( + splitted[0], + ":".join(splitted[1:-1]), + splitted[-1], + ) + + if cmd == "PROBE": + continue + + if rtype not in _CLEANUP_FUNCS: + raise ValueError( + f"Cannot register {name} for automatic cleanup: " + f"unknown resource type ({rtype}). Resource type " + "should be one of the following: " + f"{list(_CLEANUP_FUNCS.keys())}" + ) + + if cmd == "REGISTER": + if name not in registry[rtype]: + registry[rtype][name] = 1 + else: + registry[rtype][name] += 1 + + if verbose: + util.debug( + "[ResourceTracker] incremented refcount of " + f"{rtype} {name} " + f"(current {registry[rtype][name]})" + ) + elif cmd == "UNREGISTER": + del registry[rtype][name] + if verbose: + util.debug( + f"[ResourceTracker] unregister {name} {rtype}: " + f"registry({len(registry)})" + ) + elif cmd == "MAYBE_UNLINK": + registry[rtype][name] -= 1 + if verbose: + util.debug( + "[ResourceTracker] decremented refcount of " + f"{rtype} {name} " + f"(current {registry[rtype][name]})" + ) + + if registry[rtype][name] == 0: + del registry[rtype][name] + try: + if verbose: + util.debug( + f"[ResourceTracker] unlink {name}" + ) + _CLEANUP_FUNCS[rtype](name) + except Exception as e: + warnings.warn( + f"resource_tracker: {name}: {e!r}" + ) + + else: + raise RuntimeError(f"unrecognized command {cmd!r}") + except BaseException: + try: + sys.excepthook(*sys.exc_info()) + except BaseException: + pass + finally: + # all processes have terminated; cleanup any remaining resources + def _unlink_resources(rtype_registry, rtype): + if rtype_registry: + try: + warnings.warn( + "resource_tracker: There appear to be " + f"{len(rtype_registry)} leaked {rtype} objects to " + "clean up at shutdown" + ) + except Exception: + pass + for name in rtype_registry: + # For some reason the process which created and registered this + # resource has failed to unregister it. Presumably it has + # died. We therefore clean it up. + try: + _CLEANUP_FUNCS[rtype](name) + if verbose: + util.debug(f"[ResourceTracker] unlink {name}") + except Exception as e: + warnings.warn(f"resource_tracker: {name}: {e!r}") + + for rtype, rtype_registry in registry.items(): + if rtype == "folder": + continue + else: + _unlink_resources(rtype_registry, rtype) + + # The default cleanup routine for folders deletes everything inside + # those folders recursively, which can include other resources tracked + # by the resource tracker). To limit the risk of the resource tracker + # attempting to delete twice a resource (once as part of a tracked + # folder, and once as a resource), we delete the folders after all + # other resource types. + if "folder" in registry: + _unlink_resources(registry["folder"], "folder") + + if verbose: + util.debug("resource tracker shut down") + + +# +# Start a program with only specified fds kept open +# + + +def spawnv_passfds(path, args, passfds): + passfds = sorted(passfds) + if sys.platform != "win32": + errpipe_read, errpipe_write = os.pipe() + try: + from .reduction import _mk_inheritable + from .fork_exec import fork_exec + + _pass = [_mk_inheritable(fd) for fd in passfds] + return fork_exec(args, _pass) + finally: + os.close(errpipe_read) + os.close(errpipe_write) + else: + cmd = " ".join(f'"{x}"' for x in args) + try: + _, ht, pid, _ = _winapi.CreateProcess( + path, cmd, None, None, True, 0, None, None, None + ) + _winapi.CloseHandle(ht) + except BaseException: + pass + return pid diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/spawn.py b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/spawn.py new file mode 100644 index 0000000000000000000000000000000000000000..d011c398035f4e013ef36615a56e3bf0d8519d07 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/spawn.py @@ -0,0 +1,250 @@ +############################################################################### +# Prepares and processes the data to setup the new process environment +# +# author: Thomas Moreau and Olivier Grisel +# +# adapted from multiprocessing/spawn.py (17/02/2017) +# * Improve logging data +# +import os +import sys +import runpy +import textwrap +import types +from multiprocessing import process, util + + +if sys.platform != "win32": + WINEXE = False + WINSERVICE = False +else: + import msvcrt + from multiprocessing.reduction import duplicate + + WINEXE = sys.platform == "win32" and getattr(sys, "frozen", False) + WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") + +if WINSERVICE: + _python_exe = os.path.join(sys.exec_prefix, "python.exe") +else: + _python_exe = sys.executable + + +def get_executable(): + return _python_exe + + +def _check_not_importing_main(): + if getattr(process.current_process(), "_inheriting", False): + raise RuntimeError( + textwrap.dedent( + """\ + An attempt has been made to start a new process before the + current process has finished its bootstrapping phase. + + This probably means that you are not using fork to start your + child processes and you have forgotten to use the proper idiom + in the main module: + + if __name__ == '__main__': + freeze_support() + ... + + The "freeze_support()" line can be omitted if the program + is not going to be frozen to produce an executable.""" + ) + ) + + +def get_preparation_data(name, init_main_module=True): + """Return info about parent needed by child to unpickle process object.""" + _check_not_importing_main() + d = dict( + log_to_stderr=util._log_to_stderr, + authkey=bytes(process.current_process().authkey), + name=name, + sys_argv=sys.argv, + orig_dir=process.ORIGINAL_DIR, + dir=os.getcwd(), + ) + + # Send sys_path and make sure the current directory will not be changed + d["sys_path"] = [p if p != "" else process.ORIGINAL_DIR for p in sys.path] + + # Make sure to pass the information if the multiprocessing logger is active + if util._logger is not None: + d["log_level"] = util._logger.getEffectiveLevel() + if util._logger.handlers: + h = util._logger.handlers[0] + d["log_fmt"] = h.formatter._fmt + + # Tell the child how to communicate with the resource_tracker + from .resource_tracker import _resource_tracker + + _resource_tracker.ensure_running() + d["tracker_args"] = {"pid": _resource_tracker._pid} + if sys.platform == "win32": + d["tracker_args"]["fh"] = msvcrt.get_osfhandle(_resource_tracker._fd) + else: + d["tracker_args"]["fd"] = _resource_tracker._fd + + if sys.version_info >= (3, 8) and os.name == "posix": + # joblib/loky#242: allow loky processes to retrieve the resource + # tracker of their parent in case the child processes depickles + # shared_memory objects, that are still tracked by multiprocessing's + # resource_tracker by default. + # XXX: this is a workaround that may be error prone: in the future, it + # would be better to have loky subclass multiprocessing's shared_memory + # to force registration of shared_memory segments via loky's + # resource_tracker. + from multiprocessing.resource_tracker import ( + _resource_tracker as mp_resource_tracker, + ) + + # multiprocessing's resource_tracker must be running before loky + # process is created (othewise the child won't be able to use it if it + # is created later on) + mp_resource_tracker.ensure_running() + d["mp_tracker_args"] = { + "fd": mp_resource_tracker._fd, + "pid": mp_resource_tracker._pid, + } + + # Figure out whether to initialise main in the subprocess as a module + # or through direct execution (or to leave it alone entirely) + if init_main_module: + main_module = sys.modules["__main__"] + try: + main_mod_name = getattr(main_module.__spec__, "name", None) + except BaseException: + main_mod_name = None + if main_mod_name is not None: + d["init_main_from_name"] = main_mod_name + elif sys.platform != "win32" or (not WINEXE and not WINSERVICE): + main_path = getattr(main_module, "__file__", None) + if main_path is not None: + if ( + not os.path.isabs(main_path) + and process.ORIGINAL_DIR is not None + ): + main_path = os.path.join(process.ORIGINAL_DIR, main_path) + d["init_main_from_path"] = os.path.normpath(main_path) + + return d + + +# +# Prepare current process +# +old_main_modules = [] + + +def prepare(data, parent_sentinel=None): + """Try to get current process ready to unpickle process object.""" + if "name" in data: + process.current_process().name = data["name"] + + if "authkey" in data: + process.current_process().authkey = data["authkey"] + + if "log_to_stderr" in data and data["log_to_stderr"]: + util.log_to_stderr() + + if "log_level" in data: + util.get_logger().setLevel(data["log_level"]) + + if "log_fmt" in data: + import logging + + util.get_logger().handlers[0].setFormatter( + logging.Formatter(data["log_fmt"]) + ) + + if "sys_path" in data: + sys.path = data["sys_path"] + + if "sys_argv" in data: + sys.argv = data["sys_argv"] + + if "dir" in data: + os.chdir(data["dir"]) + + if "orig_dir" in data: + process.ORIGINAL_DIR = data["orig_dir"] + + if "mp_tracker_args" in data: + from multiprocessing.resource_tracker import ( + _resource_tracker as mp_resource_tracker, + ) + + mp_resource_tracker._fd = data["mp_tracker_args"]["fd"] + mp_resource_tracker._pid = data["mp_tracker_args"]["pid"] + if "tracker_args" in data: + from .resource_tracker import _resource_tracker + + _resource_tracker._pid = data["tracker_args"]["pid"] + if sys.platform == "win32": + handle = data["tracker_args"]["fh"] + handle = duplicate(handle, source_process=parent_sentinel) + _resource_tracker._fd = msvcrt.open_osfhandle(handle, os.O_RDONLY) + else: + _resource_tracker._fd = data["tracker_args"]["fd"] + + if "init_main_from_name" in data: + _fixup_main_from_name(data["init_main_from_name"]) + elif "init_main_from_path" in data: + _fixup_main_from_path(data["init_main_from_path"]) + + +# Multiprocessing module helpers to fix up the main module in +# spawned subprocesses +def _fixup_main_from_name(mod_name): + # __main__.py files for packages, directories, zip archives, etc, run + # their "main only" code unconditionally, so we don't even try to + # populate anything in __main__, nor do we make any changes to + # __main__ attributes + current_main = sys.modules["__main__"] + if mod_name == "__main__" or mod_name.endswith(".__main__"): + return + + # If this process was forked, __main__ may already be populated + if getattr(current_main.__spec__, "name", None) == mod_name: + return + + # Otherwise, __main__ may contain some non-main code where we need to + # support unpickling it properly. We rerun it as __mp_main__ and make + # the normal __main__ an alias to that + old_main_modules.append(current_main) + main_module = types.ModuleType("__mp_main__") + main_content = runpy.run_module( + mod_name, run_name="__mp_main__", alter_sys=True + ) + main_module.__dict__.update(main_content) + sys.modules["__main__"] = sys.modules["__mp_main__"] = main_module + + +def _fixup_main_from_path(main_path): + # If this process was forked, __main__ may already be populated + current_main = sys.modules["__main__"] + + # Unfortunately, the main ipython launch script historically had no + # "if __name__ == '__main__'" guard, so we work around that + # by treating it like a __main__.py file + # See https://github.com/ipython/ipython/issues/4698 + main_name = os.path.splitext(os.path.basename(main_path))[0] + if main_name == "ipython": + return + + # Otherwise, if __file__ already has the setting we expect, + # there's nothing more to do + if getattr(current_main, "__file__", None) == main_path: + return + + # If the parent process has sent a path through rather than a module + # name we assume it is an executable script that may contain + # non-main code that needs to be executed + old_main_modules.append(current_main) + main_module = types.ModuleType("__mp_main__") + main_content = runpy.run_path(main_path, run_name="__mp_main__") + main_module.__dict__.update(main_content) + sys.modules["__main__"] = sys.modules["__mp_main__"] = main_module diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/synchronize.py b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/synchronize.py new file mode 100644 index 0000000000000000000000000000000000000000..18db3e34db979240b4a4a943ea6931db3091321d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/synchronize.py @@ -0,0 +1,409 @@ +############################################################################### +# Synchronization primitives based on our SemLock implementation +# +# author: Thomas Moreau and Olivier Grisel +# +# adapted from multiprocessing/synchronize.py (17/02/2017) +# * Remove ctx argument for compatibility reason +# * Registers a cleanup function with the loky resource_tracker to remove the +# semaphore when the process dies instead. +# +# TODO: investigate which Python version is required to be able to use +# multiprocessing.resource_tracker and therefore multiprocessing.synchronize +# instead of a loky-specific fork. + +import os +import sys +import tempfile +import threading +import _multiprocessing +from time import time as _time +from multiprocessing import process, util +from multiprocessing.context import assert_spawning + +from . import resource_tracker + +__all__ = [ + "Lock", + "RLock", + "Semaphore", + "BoundedSemaphore", + "Condition", + "Event", +] +# Try to import the mp.synchronize module cleanly, if it fails +# raise ImportError for platforms lacking a working sem_open implementation. +# See issue 3770 +try: + from _multiprocessing import SemLock as _SemLock + from _multiprocessing import sem_unlink +except ImportError: + raise ImportError( + "This platform lacks a functioning sem_open" + " implementation, therefore, the required" + " synchronization primitives needed will not" + " function, see issue 3770." + ) + +# +# Constants +# + +RECURSIVE_MUTEX, SEMAPHORE = range(2) +SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX + + +# +# Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock` +# + + +class SemLock: + + _rand = tempfile._RandomNameSequence() + + def __init__(self, kind, value, maxvalue, name=None): + # unlink_now is only used on win32 or when we are using fork. + unlink_now = False + if name is None: + # Try to find an unused name for the SemLock instance. + for _ in range(100): + try: + self._semlock = _SemLock( + kind, value, maxvalue, SemLock._make_name(), unlink_now + ) + except FileExistsError: # pragma: no cover + pass + else: + break + else: # pragma: no cover + raise FileExistsError("cannot find name for semaphore") + else: + self._semlock = _SemLock(kind, value, maxvalue, name, unlink_now) + self.name = name + util.debug( + f"created semlock with handle {self._semlock.handle} and name " + f'"{self.name}"' + ) + + self._make_methods() + + def _after_fork(obj): + obj._semlock._after_fork() + + util.register_after_fork(self, _after_fork) + + # When the object is garbage collected or the + # process shuts down we unlink the semaphore name + resource_tracker.register(self._semlock.name, "semlock") + util.Finalize( + self, SemLock._cleanup, (self._semlock.name,), exitpriority=0 + ) + + @staticmethod + def _cleanup(name): + try: + sem_unlink(name) + except FileNotFoundError: + # Already unlinked, possibly by user code: ignore and make sure to + # unregister the semaphore from the resource tracker. + pass + finally: + resource_tracker.unregister(name, "semlock") + + def _make_methods(self): + self.acquire = self._semlock.acquire + self.release = self._semlock.release + + def __enter__(self): + return self._semlock.acquire() + + def __exit__(self, *args): + return self._semlock.release() + + def __getstate__(self): + assert_spawning(self) + sl = self._semlock + h = sl.handle + return (h, sl.kind, sl.maxvalue, sl.name) + + def __setstate__(self, state): + self._semlock = _SemLock._rebuild(*state) + util.debug( + f'recreated blocker with handle {state[0]!r} and name "{state[3]}"' + ) + self._make_methods() + + @staticmethod + def _make_name(): + # OSX does not support long names for semaphores + return f"/loky-{os.getpid()}-{next(SemLock._rand)}" + + +# +# Semaphore +# + + +class Semaphore(SemLock): + def __init__(self, value=1): + SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX) + + def get_value(self): + if sys.platform == "darwin": + raise NotImplementedError("OSX does not implement sem_getvalue") + return self._semlock._get_value() + + def __repr__(self): + try: + value = self._semlock._get_value() + except Exception: + value = "unknown" + return f"<{self.__class__.__name__}(value={value})>" + + +# +# Bounded semaphore +# + + +class BoundedSemaphore(Semaphore): + def __init__(self, value=1): + SemLock.__init__(self, SEMAPHORE, value, value) + + def __repr__(self): + try: + value = self._semlock._get_value() + except Exception: + value = "unknown" + return ( + f"<{self.__class__.__name__}(value={value}, " + f"maxvalue={self._semlock.maxvalue})>" + ) + + +# +# Non-recursive lock +# + + +class Lock(SemLock): + def __init__(self): + super().__init__(SEMAPHORE, 1, 1) + + def __repr__(self): + try: + if self._semlock._is_mine(): + name = process.current_process().name + if threading.current_thread().name != "MainThread": + name = f"{name}|{threading.current_thread().name}" + elif self._semlock._get_value() == 1: + name = "None" + elif self._semlock._count() > 0: + name = "SomeOtherThread" + else: + name = "SomeOtherProcess" + except Exception: + name = "unknown" + return f"<{self.__class__.__name__}(owner={name})>" + + +# +# Recursive lock +# + + +class RLock(SemLock): + def __init__(self): + super().__init__(RECURSIVE_MUTEX, 1, 1) + + def __repr__(self): + try: + if self._semlock._is_mine(): + name = process.current_process().name + if threading.current_thread().name != "MainThread": + name = f"{name}|{threading.current_thread().name}" + count = self._semlock._count() + elif self._semlock._get_value() == 1: + name, count = "None", 0 + elif self._semlock._count() > 0: + name, count = "SomeOtherThread", "nonzero" + else: + name, count = "SomeOtherProcess", "nonzero" + except Exception: + name, count = "unknown", "unknown" + return f"<{self.__class__.__name__}({name}, {count})>" + + +# +# Condition variable +# + + +class Condition: + def __init__(self, lock=None): + self._lock = lock or RLock() + self._sleeping_count = Semaphore(0) + self._woken_count = Semaphore(0) + self._wait_semaphore = Semaphore(0) + self._make_methods() + + def __getstate__(self): + assert_spawning(self) + return ( + self._lock, + self._sleeping_count, + self._woken_count, + self._wait_semaphore, + ) + + def __setstate__(self, state): + ( + self._lock, + self._sleeping_count, + self._woken_count, + self._wait_semaphore, + ) = state + self._make_methods() + + def __enter__(self): + return self._lock.__enter__() + + def __exit__(self, *args): + return self._lock.__exit__(*args) + + def _make_methods(self): + self.acquire = self._lock.acquire + self.release = self._lock.release + + def __repr__(self): + try: + num_waiters = ( + self._sleeping_count._semlock._get_value() + - self._woken_count._semlock._get_value() + ) + except Exception: + num_waiters = "unknown" + return f"<{self.__class__.__name__}({self._lock}, {num_waiters})>" + + def wait(self, timeout=None): + assert ( + self._lock._semlock._is_mine() + ), "must acquire() condition before using wait()" + + # indicate that this thread is going to sleep + self._sleeping_count.release() + + # release lock + count = self._lock._semlock._count() + for _ in range(count): + self._lock.release() + + try: + # wait for notification or timeout + return self._wait_semaphore.acquire(True, timeout) + finally: + # indicate that this thread has woken + self._woken_count.release() + + # reacquire lock + for _ in range(count): + self._lock.acquire() + + def notify(self): + assert self._lock._semlock._is_mine(), "lock is not owned" + assert not self._wait_semaphore.acquire(False) + + # to take account of timeouts since last notify() we subtract + # woken_count from sleeping_count and rezero woken_count + while self._woken_count.acquire(False): + res = self._sleeping_count.acquire(False) + assert res + + if self._sleeping_count.acquire(False): # try grabbing a sleeper + self._wait_semaphore.release() # wake up one sleeper + self._woken_count.acquire() # wait for the sleeper to wake + + # rezero _wait_semaphore in case a timeout just happened + self._wait_semaphore.acquire(False) + + def notify_all(self): + assert self._lock._semlock._is_mine(), "lock is not owned" + assert not self._wait_semaphore.acquire(False) + + # to take account of timeouts since last notify*() we subtract + # woken_count from sleeping_count and rezero woken_count + while self._woken_count.acquire(False): + res = self._sleeping_count.acquire(False) + assert res + + sleepers = 0 + while self._sleeping_count.acquire(False): + self._wait_semaphore.release() # wake up one sleeper + sleepers += 1 + + if sleepers: + for _ in range(sleepers): + self._woken_count.acquire() # wait for a sleeper to wake + + # rezero wait_semaphore in case some timeouts just happened + while self._wait_semaphore.acquire(False): + pass + + def wait_for(self, predicate, timeout=None): + result = predicate() + if result: + return result + if timeout is not None: + endtime = _time() + timeout + else: + endtime = None + waittime = None + while not result: + if endtime is not None: + waittime = endtime - _time() + if waittime <= 0: + break + self.wait(waittime) + result = predicate() + return result + + +# +# Event +# + + +class Event: + def __init__(self): + self._cond = Condition(Lock()) + self._flag = Semaphore(0) + + def is_set(self): + with self._cond: + if self._flag.acquire(False): + self._flag.release() + return True + return False + + def set(self): + with self._cond: + self._flag.acquire(False) + self._flag.release() + self._cond.notify_all() + + def clear(self): + with self._cond: + self._flag.acquire(False) + + def wait(self, timeout=None): + with self._cond: + if self._flag.acquire(False): + self._flag.release() + else: + self._cond.wait(timeout) + + if self._flag.acquire(False): + self._flag.release() + return True + return False diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/utils.py b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..aa089f7a1bf9b577455775f6d6249baf4bd430de --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/externals/loky/backend/utils.py @@ -0,0 +1,181 @@ +import os +import sys +import time +import errno +import signal +import warnings +import subprocess +import traceback + +try: + import psutil +except ImportError: + psutil = None + + +def kill_process_tree(process, use_psutil=True): + """Terminate process and its descendants with SIGKILL""" + if use_psutil and psutil is not None: + _kill_process_tree_with_psutil(process) + else: + _kill_process_tree_without_psutil(process) + + +def recursive_terminate(process, use_psutil=True): + warnings.warn( + "recursive_terminate is deprecated in loky 3.2, use kill_process_tree" + "instead", + DeprecationWarning, + ) + kill_process_tree(process, use_psutil=use_psutil) + + +def _kill_process_tree_with_psutil(process): + try: + descendants = psutil.Process(process.pid).children(recursive=True) + except psutil.NoSuchProcess: + return + + # Kill the descendants in reverse order to avoid killing the parents before + # the descendant in cases where there are more processes nested. + for descendant in descendants[::-1]: + try: + descendant.kill() + except psutil.NoSuchProcess: + pass + + try: + psutil.Process(process.pid).kill() + except psutil.NoSuchProcess: + pass + process.join() + + +def _kill_process_tree_without_psutil(process): + """Terminate a process and its descendants.""" + try: + if sys.platform == "win32": + _windows_taskkill_process_tree(process.pid) + else: + _posix_recursive_kill(process.pid) + except Exception: # pragma: no cover + details = traceback.format_exc() + warnings.warn( + "Failed to kill subprocesses on this platform. Please install" + "psutil: https://github.com/giampaolo/psutil\n" + f"Details:\n{details}" + ) + # In case we cannot introspect or kill the descendants, we fall back to + # only killing the main process. + # + # Note: on Windows, process.kill() is an alias for process.terminate() + # which in turns calls the Win32 API function TerminateProcess(). + process.kill() + process.join() + + +def _windows_taskkill_process_tree(pid): + # On windows, the taskkill function with option `/T` terminate a given + # process pid and its children. + try: + subprocess.check_output( + ["taskkill", "/F", "/T", "/PID", str(pid)], stderr=None + ) + except subprocess.CalledProcessError as e: + # In Windows, taskkill returns 128, 255 for no process found. + if e.returncode not in [128, 255]: + # Let's raise to let the caller log the error details in a + # warning and only kill the root process. + raise # pragma: no cover + + +def _kill(pid): + # Not all systems (e.g. Windows) have a SIGKILL, but the C specification + # mandates a SIGTERM signal. While Windows is handled specifically above, + # let's try to be safe for other hypothetic platforms that only have + # SIGTERM without SIGKILL. + kill_signal = getattr(signal, "SIGKILL", signal.SIGTERM) + try: + os.kill(pid, kill_signal) + except OSError as e: + # if OSError is raised with [Errno 3] no such process, the process + # is already terminated, else, raise the error and let the top + # level function raise a warning and retry to kill the process. + if e.errno != errno.ESRCH: + raise # pragma: no cover + + +def _posix_recursive_kill(pid): + """Recursively kill the descendants of a process before killing it.""" + try: + children_pids = subprocess.check_output( + ["pgrep", "-P", str(pid)], stderr=None, text=True + ) + except subprocess.CalledProcessError as e: + # `ps` returns 1 when no child process has been found + if e.returncode == 1: + children_pids = "" + else: + raise # pragma: no cover + + # Decode the result, split the cpid and remove the trailing line + for cpid in children_pids.splitlines(): + cpid = int(cpid) + _posix_recursive_kill(cpid) + + _kill(pid) + + +def get_exitcodes_terminated_worker(processes): + """Return a formatted string with the exitcodes of terminated workers. + + If necessary, wait (up to .25s) for the system to correctly set the + exitcode of one terminated worker. + """ + patience = 5 + + # Catch the exitcode of the terminated workers. There should at least be + # one. If not, wait a bit for the system to correctly set the exitcode of + # the terminated worker. + exitcodes = [ + p.exitcode for p in list(processes.values()) if p.exitcode is not None + ] + while not exitcodes and patience > 0: + patience -= 1 + exitcodes = [ + p.exitcode + for p in list(processes.values()) + if p.exitcode is not None + ] + time.sleep(0.05) + + return _format_exitcodes(exitcodes) + + +def _format_exitcodes(exitcodes): + """Format a list of exit code with names of the signals if possible""" + str_exitcodes = [ + f"{_get_exitcode_name(e)}({e})" for e in exitcodes if e is not None + ] + return "{" + ", ".join(str_exitcodes) + "}" + + +def _get_exitcode_name(exitcode): + if sys.platform == "win32": + # The exitcode are unreliable on windows (see bpo-31863). + # For this case, return UNKNOWN + return "UNKNOWN" + + if exitcode < 0: + try: + import signal + + return signal.Signals(-exitcode).name + except ValueError: + return "UNKNOWN" + elif exitcode != 255: + # The exitcode are unreliable on forkserver were 255 is always returned + # (see bpo-30589). For this case, return UNKNOWN + return "EXIT" + + return "UNKNOWN" diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/func_inspect.py b/env-llmeval/lib/python3.10/site-packages/joblib/func_inspect.py new file mode 100644 index 0000000000000000000000000000000000000000..3f8094614b90abed5804723908ddf5eb109901b1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/func_inspect.py @@ -0,0 +1,369 @@ +""" +My own variation on function-specific inspect-like features. +""" + +# Author: Gael Varoquaux +# Copyright (c) 2009 Gael Varoquaux +# License: BSD Style, 3 clauses. + +import inspect +import warnings +import re +import os +import collections + +from itertools import islice +from tokenize import open as open_py_source + +from .logger import pformat + +full_argspec_fields = ('args varargs varkw defaults kwonlyargs ' + 'kwonlydefaults annotations') +full_argspec_type = collections.namedtuple('FullArgSpec', full_argspec_fields) + + +def get_func_code(func): + """ Attempts to retrieve a reliable function code hash. + + The reason we don't use inspect.getsource is that it caches the + source, whereas we want this to be modified on the fly when the + function is modified. + + Returns + ------- + func_code: string + The function code + source_file: string + The path to the file in which the function is defined. + first_line: int + The first line of the code in the source file. + + Notes + ------ + This function does a bit more magic than inspect, and is thus + more robust. + """ + source_file = None + try: + code = func.__code__ + source_file = code.co_filename + if not os.path.exists(source_file): + # Use inspect for lambda functions and functions defined in an + # interactive shell, or in doctests + source_code = ''.join(inspect.getsourcelines(func)[0]) + line_no = 1 + if source_file.startswith('', source_file).groups() + line_no = int(line_no) + source_file = '' % source_file + return source_code, source_file, line_no + # Try to retrieve the source code. + with open_py_source(source_file) as source_file_obj: + first_line = code.co_firstlineno + # All the lines after the function definition: + source_lines = list(islice(source_file_obj, first_line - 1, None)) + return ''.join(inspect.getblock(source_lines)), source_file, first_line + except: # noqa: E722 + # If the source code fails, we use the hash. This is fragile and + # might change from one session to another. + if hasattr(func, '__code__'): + # Python 3.X + return str(func.__code__.__hash__()), source_file, -1 + else: + # Weird objects like numpy ufunc don't have __code__ + # This is fragile, as quite often the id of the object is + # in the repr, so it might not persist across sessions, + # however it will work for ufuncs. + return repr(func), source_file, -1 + + +def _clean_win_chars(string): + """Windows cannot encode some characters in filename.""" + import urllib + if hasattr(urllib, 'quote'): + quote = urllib.quote + else: + # In Python 3, quote is elsewhere + import urllib.parse + quote = urllib.parse.quote + for char in ('<', '>', '!', ':', '\\'): + string = string.replace(char, quote(char)) + return string + + +def get_func_name(func, resolv_alias=True, win_characters=True): + """ Return the function import path (as a list of module names), and + a name for the function. + + Parameters + ---------- + func: callable + The func to inspect + resolv_alias: boolean, optional + If true, possible local aliases are indicated. + win_characters: boolean, optional + If true, substitute special characters using urllib.quote + This is useful in Windows, as it cannot encode some filenames + """ + if hasattr(func, '__module__'): + module = func.__module__ + else: + try: + module = inspect.getmodule(func) + except TypeError: + if hasattr(func, '__class__'): + module = func.__class__.__module__ + else: + module = 'unknown' + if module is None: + # Happens in doctests, eg + module = '' + if module == '__main__': + try: + filename = os.path.abspath(inspect.getsourcefile(func)) + except: # noqa: E722 + filename = None + if filename is not None: + # mangling of full path to filename + parts = filename.split(os.sep) + if parts[-1].startswith(', where: + # - N is the cell number where the function was defined + # - XYZ is a hash representing the function's code (and name). + # It will be consistent across sessions and kernel restarts, + # and will change if the function's code/name changes + # We remove N so that cache is properly hit if the cell where + # the func is defined is re-exectuted. + # The XYZ hash should avoid collisions between functions with + # the same name, both within the same notebook but also across + # notebooks + splitted = parts[-1].split('-') + parts[-1] = '-'.join(splitted[:2] + splitted[3:]) + elif len(parts) > 2 and parts[-2].startswith('ipykernel_'): + # In a notebook session (ipykernel). Filename seems to be 'xyz' + # of above. parts[-2] has the structure ipykernel_XXXXXX where + # XXXXXX is a six-digit number identifying the current run (?). + # If we split it off, the function again has the same + # identifier across runs. + parts[-2] = 'ipykernel' + filename = '-'.join(parts) + if filename.endswith('.py'): + filename = filename[:-3] + module = module + '-' + filename + module = module.split('.') + if hasattr(func, 'func_name'): + name = func.func_name + elif hasattr(func, '__name__'): + name = func.__name__ + else: + name = 'unknown' + # Hack to detect functions not defined at the module-level + if resolv_alias: + # TODO: Maybe add a warning here? + if hasattr(func, 'func_globals') and name in func.func_globals: + if not func.func_globals[name] is func: + name = '%s-alias' % name + if hasattr(func, '__qualname__') and func.__qualname__ != name: + # Extend the module name in case of nested functions to avoid + # (module, name) collisions + module.extend(func.__qualname__.split(".")[:-1]) + if inspect.ismethod(func): + # We need to add the name of the class + if hasattr(func, 'im_class'): + klass = func.im_class + module.append(klass.__name__) + if os.name == 'nt' and win_characters: + # Windows can't encode certain characters in filenames + name = _clean_win_chars(name) + module = [_clean_win_chars(s) for s in module] + return module, name + + +def _signature_str(function_name, arg_sig): + """Helper function to output a function signature""" + return '{}{}'.format(function_name, arg_sig) + + +def _function_called_str(function_name, args, kwargs): + """Helper function to output a function call""" + template_str = '{0}({1}, {2})' + + args_str = repr(args)[1:-1] + kwargs_str = ', '.join('%s=%s' % (k, v) + for k, v in kwargs.items()) + return template_str.format(function_name, args_str, + kwargs_str) + + +def filter_args(func, ignore_lst, args=(), kwargs=dict()): + """ Filters the given args and kwargs using a list of arguments to + ignore, and a function specification. + + Parameters + ---------- + func: callable + Function giving the argument specification + ignore_lst: list of strings + List of arguments to ignore (either a name of an argument + in the function spec, or '*', or '**') + *args: list + Positional arguments passed to the function. + **kwargs: dict + Keyword arguments passed to the function + + Returns + ------- + filtered_args: list + List of filtered positional and keyword arguments. + """ + args = list(args) + if isinstance(ignore_lst, str): + # Catch a common mistake + raise ValueError( + 'ignore_lst must be a list of parameters to ignore ' + '%s (type %s) was given' % (ignore_lst, type(ignore_lst))) + # Special case for functools.partial objects + if (not inspect.ismethod(func) and not inspect.isfunction(func)): + if ignore_lst: + warnings.warn('Cannot inspect object %s, ignore list will ' + 'not work.' % func, stacklevel=2) + return {'*': args, '**': kwargs} + arg_sig = inspect.signature(func) + arg_names = [] + arg_defaults = [] + arg_kwonlyargs = [] + arg_varargs = None + arg_varkw = None + for param in arg_sig.parameters.values(): + if param.kind is param.POSITIONAL_OR_KEYWORD: + arg_names.append(param.name) + elif param.kind is param.KEYWORD_ONLY: + arg_names.append(param.name) + arg_kwonlyargs.append(param.name) + elif param.kind is param.VAR_POSITIONAL: + arg_varargs = param.name + elif param.kind is param.VAR_KEYWORD: + arg_varkw = param.name + if param.default is not param.empty: + arg_defaults.append(param.default) + if inspect.ismethod(func): + # First argument is 'self', it has been removed by Python + # we need to add it back: + args = [func.__self__, ] + args + # func is an instance method, inspect.signature(func) does not + # include self, we need to fetch it from the class method, i.e + # func.__func__ + class_method_sig = inspect.signature(func.__func__) + self_name = next(iter(class_method_sig.parameters)) + arg_names = [self_name] + arg_names + # XXX: Maybe I need an inspect.isbuiltin to detect C-level methods, such + # as on ndarrays. + + _, name = get_func_name(func, resolv_alias=False) + arg_dict = dict() + arg_position = -1 + for arg_position, arg_name in enumerate(arg_names): + if arg_position < len(args): + # Positional argument or keyword argument given as positional + if arg_name not in arg_kwonlyargs: + arg_dict[arg_name] = args[arg_position] + else: + raise ValueError( + "Keyword-only parameter '%s' was passed as " + 'positional parameter for %s:\n' + ' %s was called.' + % (arg_name, + _signature_str(name, arg_sig), + _function_called_str(name, args, kwargs)) + ) + + else: + position = arg_position - len(arg_names) + if arg_name in kwargs: + arg_dict[arg_name] = kwargs[arg_name] + else: + try: + arg_dict[arg_name] = arg_defaults[position] + except (IndexError, KeyError) as e: + # Missing argument + raise ValueError( + 'Wrong number of arguments for %s:\n' + ' %s was called.' + % (_signature_str(name, arg_sig), + _function_called_str(name, args, kwargs)) + ) from e + + varkwargs = dict() + for arg_name, arg_value in sorted(kwargs.items()): + if arg_name in arg_dict: + arg_dict[arg_name] = arg_value + elif arg_varkw is not None: + varkwargs[arg_name] = arg_value + else: + raise TypeError("Ignore list for %s() contains an unexpected " + "keyword argument '%s'" % (name, arg_name)) + + if arg_varkw is not None: + arg_dict['**'] = varkwargs + if arg_varargs is not None: + varargs = args[arg_position + 1:] + arg_dict['*'] = varargs + + # Now remove the arguments to be ignored + for item in ignore_lst: + if item in arg_dict: + arg_dict.pop(item) + else: + raise ValueError("Ignore list: argument '%s' is not defined for " + "function %s" + % (item, + _signature_str(name, arg_sig)) + ) + # XXX: Return a sorted list of pairs? + return arg_dict + + +def _format_arg(arg): + formatted_arg = pformat(arg, indent=2) + if len(formatted_arg) > 1500: + formatted_arg = '%s...' % formatted_arg[:700] + return formatted_arg + + +def format_signature(func, *args, **kwargs): + # XXX: Should this use inspect.formatargvalues/formatargspec? + module, name = get_func_name(func) + module = [m for m in module if m] + if module: + module.append(name) + module_path = '.'.join(module) + else: + module_path = name + arg_str = list() + previous_length = 0 + for arg in args: + formatted_arg = _format_arg(arg) + if previous_length > 80: + formatted_arg = '\n%s' % formatted_arg + previous_length = len(formatted_arg) + arg_str.append(formatted_arg) + arg_str.extend(['%s=%s' % (v, _format_arg(i)) for v, i in kwargs.items()]) + arg_str = ', '.join(arg_str) + + signature = '%s(%s)' % (name, arg_str) + return module_path, signature + + +def format_call(func, args, kwargs, object_name="Memory"): + """ Returns a nicely formatted statement displaying the function + call with the given arguments. + """ + path, signature = format_signature(func, *args, **kwargs) + msg = '%s\n[%s] Calling %s...\n%s' % (80 * '_', object_name, + path, signature) + return msg + # XXX: Not using logging framework + # self.debug(msg) diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/hashing.py b/env-llmeval/lib/python3.10/site-packages/joblib/hashing.py new file mode 100644 index 0000000000000000000000000000000000000000..6c081f06997ae3b6c18fc76b286ef95e008f23cc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/hashing.py @@ -0,0 +1,265 @@ +""" +Fast cryptographic hash of Python objects, with a special case for fast +hashing of numpy arrays. +""" + +# Author: Gael Varoquaux +# Copyright (c) 2009 Gael Varoquaux +# License: BSD Style, 3 clauses. + +import pickle +import hashlib +import sys +import types +import struct +import io +import decimal + + +Pickler = pickle._Pickler + + +class _ConsistentSet(object): + """ Class used to ensure the hash of Sets is preserved + whatever the order of its items. + """ + def __init__(self, set_sequence): + # Forces order of elements in set to ensure consistent hash. + try: + # Trying first to order the set assuming the type of elements is + # consistent and orderable. + # This fails on python 3 when elements are unorderable + # but we keep it in a try as it's faster. + self._sequence = sorted(set_sequence) + except (TypeError, decimal.InvalidOperation): + # If elements are unorderable, sorting them using their hash. + # This is slower but works in any case. + self._sequence = sorted((hash(e) for e in set_sequence)) + + +class _MyHash(object): + """ Class used to hash objects that won't normally pickle """ + + def __init__(self, *args): + self.args = args + + +class Hasher(Pickler): + """ A subclass of pickler, to do cryptographic hashing, rather than + pickling. + """ + + def __init__(self, hash_name='md5'): + self.stream = io.BytesIO() + # By default we want a pickle protocol that only changes with + # the major python version and not the minor one + protocol = 3 + Pickler.__init__(self, self.stream, protocol=protocol) + # Initialise the hash obj + self._hash = hashlib.new(hash_name) + + def hash(self, obj, return_digest=True): + try: + self.dump(obj) + except pickle.PicklingError as e: + e.args += ('PicklingError while hashing %r: %r' % (obj, e),) + raise + dumps = self.stream.getvalue() + self._hash.update(dumps) + if return_digest: + return self._hash.hexdigest() + + def save(self, obj): + if isinstance(obj, (types.MethodType, type({}.pop))): + # the Pickler cannot pickle instance methods; here we decompose + # them into components that make them uniquely identifiable + if hasattr(obj, '__func__'): + func_name = obj.__func__.__name__ + else: + func_name = obj.__name__ + inst = obj.__self__ + if type(inst) is type(pickle): + obj = _MyHash(func_name, inst.__name__) + elif inst is None: + # type(None) or type(module) do not pickle + obj = _MyHash(func_name, inst) + else: + cls = obj.__self__.__class__ + obj = _MyHash(func_name, inst, cls) + Pickler.save(self, obj) + + def memoize(self, obj): + # We want hashing to be sensitive to value instead of reference. + # For example we want ['aa', 'aa'] and ['aa', 'aaZ'[:2]] + # to hash to the same value and that's why we disable memoization + # for strings + if isinstance(obj, (bytes, str)): + return + Pickler.memoize(self, obj) + + # The dispatch table of the pickler is not accessible in Python + # 3, as these lines are only bugware for IPython, we skip them. + def save_global(self, obj, name=None, pack=struct.pack): + # We have to override this method in order to deal with objects + # defined interactively in IPython that are not injected in + # __main__ + kwargs = dict(name=name, pack=pack) + del kwargs['pack'] + try: + Pickler.save_global(self, obj, **kwargs) + except pickle.PicklingError: + Pickler.save_global(self, obj, **kwargs) + module = getattr(obj, "__module__", None) + if module == '__main__': + my_name = name + if my_name is None: + my_name = obj.__name__ + mod = sys.modules[module] + if not hasattr(mod, my_name): + # IPython doesn't inject the variables define + # interactively in __main__ + setattr(mod, my_name, obj) + + dispatch = Pickler.dispatch.copy() + # builtin + dispatch[type(len)] = save_global + # type + dispatch[type(object)] = save_global + # classobj + dispatch[type(Pickler)] = save_global + # function + dispatch[type(pickle.dump)] = save_global + + def _batch_setitems(self, items): + # forces order of keys in dict to ensure consistent hash. + try: + # Trying first to compare dict assuming the type of keys is + # consistent and orderable. + # This fails on python 3 when keys are unorderable + # but we keep it in a try as it's faster. + Pickler._batch_setitems(self, iter(sorted(items))) + except TypeError: + # If keys are unorderable, sorting them using their hash. This is + # slower but works in any case. + Pickler._batch_setitems(self, iter(sorted((hash(k), v) + for k, v in items))) + + def save_set(self, set_items): + # forces order of items in Set to ensure consistent hash + Pickler.save(self, _ConsistentSet(set_items)) + + dispatch[type(set())] = save_set + + +class NumpyHasher(Hasher): + """ Special case the hasher for when numpy is loaded. + """ + + def __init__(self, hash_name='md5', coerce_mmap=False): + """ + Parameters + ---------- + hash_name: string + The hash algorithm to be used + coerce_mmap: boolean + Make no difference between np.memmap and np.ndarray + objects. + """ + self.coerce_mmap = coerce_mmap + Hasher.__init__(self, hash_name=hash_name) + # delayed import of numpy, to avoid tight coupling + import numpy as np + self.np = np + if hasattr(np, 'getbuffer'): + self._getbuffer = np.getbuffer + else: + self._getbuffer = memoryview + + def save(self, obj): + """ Subclass the save method, to hash ndarray subclass, rather + than pickling them. Off course, this is a total abuse of + the Pickler class. + """ + if isinstance(obj, self.np.ndarray) and not obj.dtype.hasobject: + # Compute a hash of the object + # The update function of the hash requires a c_contiguous buffer. + if obj.shape == (): + # 0d arrays need to be flattened because viewing them as bytes + # raises a ValueError exception. + obj_c_contiguous = obj.flatten() + elif obj.flags.c_contiguous: + obj_c_contiguous = obj + elif obj.flags.f_contiguous: + obj_c_contiguous = obj.T + else: + # Cater for non-single-segment arrays: this creates a + # copy, and thus alleviates this issue. + # XXX: There might be a more efficient way of doing this + obj_c_contiguous = obj.flatten() + + # memoryview is not supported for some dtypes, e.g. datetime64, see + # https://github.com/numpy/numpy/issues/4983. The + # workaround is to view the array as bytes before + # taking the memoryview. + self._hash.update( + self._getbuffer(obj_c_contiguous.view(self.np.uint8))) + + # We store the class, to be able to distinguish between + # Objects with the same binary content, but different + # classes. + if self.coerce_mmap and isinstance(obj, self.np.memmap): + # We don't make the difference between memmap and + # normal ndarrays, to be able to reload previously + # computed results with memmap. + klass = self.np.ndarray + else: + klass = obj.__class__ + # We also return the dtype and the shape, to distinguish + # different views on the same data with different dtypes. + + # The object will be pickled by the pickler hashed at the end. + obj = (klass, ('HASHED', obj.dtype, obj.shape, obj.strides)) + elif isinstance(obj, self.np.dtype): + # numpy.dtype consistent hashing is tricky to get right. This comes + # from the fact that atomic np.dtype objects are interned: + # ``np.dtype('f4') is np.dtype('f4')``. The situation is + # complicated by the fact that this interning does not resist a + # simple pickle.load/dump roundtrip: + # ``pickle.loads(pickle.dumps(np.dtype('f4'))) is not + # np.dtype('f4') Because pickle relies on memoization during + # pickling, it is easy to + # produce different hashes for seemingly identical objects, such as + # ``[np.dtype('f4'), np.dtype('f4')]`` + # and ``[np.dtype('f4'), pickle.loads(pickle.dumps('f4'))]``. + # To prevent memoization from interfering with hashing, we isolate + # the serialization (and thus the pickle memoization) of each dtype + # using each time a different ``pickle.dumps`` call unrelated to + # the current Hasher instance. + self._hash.update("_HASHED_DTYPE".encode('utf-8')) + self._hash.update(pickle.dumps(obj)) + return + Hasher.save(self, obj) + + +def hash(obj, hash_name='md5', coerce_mmap=False): + """ Quick calculation of a hash to identify uniquely Python objects + containing numpy arrays. + + Parameters + ---------- + hash_name: 'md5' or 'sha1' + Hashing algorithm used. sha1 is supposedly safer, but md5 is + faster. + coerce_mmap: boolean + Make no difference between np.memmap and np.ndarray + """ + valid_hash_names = ('md5', 'sha1') + if hash_name not in valid_hash_names: + raise ValueError("Valid options for 'hash_name' are {}. " + "Got hash_name={!r} instead." + .format(valid_hash_names, hash_name)) + if 'numpy' in sys.modules: + hasher = NumpyHasher(hash_name=hash_name, coerce_mmap=coerce_mmap) + else: + hasher = Hasher(hash_name=hash_name) + return hasher.hash(obj) diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/logger.py b/env-llmeval/lib/python3.10/site-packages/joblib/logger.py new file mode 100644 index 0000000000000000000000000000000000000000..cf9d258011f3c581334a93ef3ccdac7dfb19b25e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/logger.py @@ -0,0 +1,162 @@ +""" +Helpers for logging. + +This module needs much love to become useful. +""" + +# Author: Gael Varoquaux +# Copyright (c) 2008 Gael Varoquaux +# License: BSD Style, 3 clauses. + +from __future__ import print_function + +import time +import sys +import os +import shutil +import logging +import pprint + +from .disk import mkdirp + + +def _squeeze_time(t): + """Remove .1s to the time under Windows: this is the time it take to + stat files. This is needed to make results similar to timings under + Unix, for tests + """ + if sys.platform.startswith('win'): + return max(0, t - .1) + else: + return t + + +def format_time(t): + t = _squeeze_time(t) + return "%.1fs, %.1fmin" % (t, t / 60.) + + +def short_format_time(t): + t = _squeeze_time(t) + if t > 60: + return "%4.1fmin" % (t / 60.) + else: + return " %5.1fs" % (t) + + +def pformat(obj, indent=0, depth=3): + if 'numpy' in sys.modules: + import numpy as np + print_options = np.get_printoptions() + np.set_printoptions(precision=6, threshold=64, edgeitems=1) + else: + print_options = None + out = pprint.pformat(obj, depth=depth, indent=indent) + if print_options: + np.set_printoptions(**print_options) + return out + + +############################################################################### +# class `Logger` +############################################################################### +class Logger(object): + """ Base class for logging messages. + """ + + def __init__(self, depth=3, name=None): + """ + Parameters + ---------- + depth: int, optional + The depth of objects printed. + name: str, optional + The namespace to log to. If None, defaults to joblib. + """ + self.depth = depth + self._name = name if name else 'joblib' + + def warn(self, msg): + logging.getLogger(self._name).warning("[%s]: %s" % (self, msg)) + + def info(self, msg): + logging.info("[%s]: %s" % (self, msg)) + + def debug(self, msg): + # XXX: This conflicts with the debug flag used in children class + logging.getLogger(self._name).debug("[%s]: %s" % (self, msg)) + + def format(self, obj, indent=0): + """Return the formatted representation of the object.""" + return pformat(obj, indent=indent, depth=self.depth) + + +############################################################################### +# class `PrintTime` +############################################################################### +class PrintTime(object): + """ Print and log messages while keeping track of time. + """ + + def __init__(self, logfile=None, logdir=None): + if logfile is not None and logdir is not None: + raise ValueError('Cannot specify both logfile and logdir') + # XXX: Need argument docstring + self.last_time = time.time() + self.start_time = self.last_time + if logdir is not None: + logfile = os.path.join(logdir, 'joblib.log') + self.logfile = logfile + if logfile is not None: + mkdirp(os.path.dirname(logfile)) + if os.path.exists(logfile): + # Rotate the logs + for i in range(1, 9): + try: + shutil.move(logfile + '.%i' % i, + logfile + '.%i' % (i + 1)) + except: # noqa: E722 + "No reason failing here" + # Use a copy rather than a move, so that a process + # monitoring this file does not get lost. + try: + shutil.copy(logfile, logfile + '.1') + except: # noqa: E722 + "No reason failing here" + try: + with open(logfile, 'w') as logfile: + logfile.write('\nLogging joblib python script\n') + logfile.write('\n---%s---\n' % time.ctime(self.last_time)) + except: # noqa: E722 + """ Multiprocessing writing to files can create race + conditions. Rather fail silently than crash the + computation. + """ + # XXX: We actually need a debug flag to disable this + # silent failure. + + def __call__(self, msg='', total=False): + """ Print the time elapsed between the last call and the current + call, with an optional message. + """ + if not total: + time_lapse = time.time() - self.last_time + full_msg = "%s: %s" % (msg, format_time(time_lapse)) + else: + # FIXME: Too much logic duplicated + time_lapse = time.time() - self.start_time + full_msg = "%s: %.2fs, %.1f min" % (msg, time_lapse, + time_lapse / 60) + print(full_msg, file=sys.stderr) + if self.logfile is not None: + try: + with open(self.logfile, 'a') as f: + print(full_msg, file=f) + except: # noqa: E722 + """ Multiprocessing writing to files can create race + conditions. Rather fail silently than crash the + calculation. + """ + # XXX: We actually need a debug flag to disable this + # silent failure. + self.last_time = time.time() diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/memory.py b/env-llmeval/lib/python3.10/site-packages/joblib/memory.py new file mode 100644 index 0000000000000000000000000000000000000000..6f87f803947a4060f0e64fe1bec1a3d0bc35672b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/memory.py @@ -0,0 +1,1160 @@ +""" +A context object for caching a function's return value each time it +is called with the same input arguments. + +""" + +# Author: Gael Varoquaux +# Copyright (c) 2009 Gael Varoquaux +# License: BSD Style, 3 clauses. + + +import asyncio +import datetime +import functools +import inspect +import logging +import os +import pathlib +import pydoc +import re +import textwrap +import time +import tokenize +import traceback +import warnings +import weakref + +from . import hashing +from ._store_backends import CacheWarning # noqa +from ._store_backends import FileSystemStoreBackend, StoreBackendBase +from .func_inspect import (filter_args, format_call, format_signature, + get_func_code, get_func_name) +from .logger import Logger, format_time, pformat + +FIRST_LINE_TEXT = "# first line:" + +# TODO: The following object should have a data store object as a sub +# object, and the interface to persist and query should be separated in +# the data store. +# +# This would enable creating 'Memory' objects with a different logic for +# pickling that would simply span a MemorizedFunc with the same +# store (or do we want to copy it to avoid cross-talks?), for instance to +# implement HDF5 pickling. + +# TODO: Same remark for the logger, and probably use the Python logging +# mechanism. + + +def extract_first_line(func_code): + """ Extract the first line information from the function code + text if available. + """ + if func_code.startswith(FIRST_LINE_TEXT): + func_code = func_code.split('\n') + first_line = int(func_code[0][len(FIRST_LINE_TEXT):]) + func_code = '\n'.join(func_code[1:]) + else: + first_line = -1 + return func_code, first_line + + +class JobLibCollisionWarning(UserWarning): + """ Warn that there might be a collision between names of functions. + """ + + +_STORE_BACKENDS = {'local': FileSystemStoreBackend} + + +def register_store_backend(backend_name, backend): + """Extend available store backends. + + The Memory, MemorizeResult and MemorizeFunc objects are designed to be + agnostic to the type of store used behind. By default, the local file + system is used but this function gives the possibility to extend joblib's + memory pattern with other types of storage such as cloud storage (S3, GCS, + OpenStack, HadoopFS, etc) or blob DBs. + + Parameters + ---------- + backend_name: str + The name identifying the store backend being registered. For example, + 'local' is used with FileSystemStoreBackend. + backend: StoreBackendBase subclass + The name of a class that implements the StoreBackendBase interface. + + """ + if not isinstance(backend_name, str): + raise ValueError("Store backend name should be a string, " + "'{0}' given.".format(backend_name)) + if backend is None or not issubclass(backend, StoreBackendBase): + raise ValueError("Store backend should inherit " + "StoreBackendBase, " + "'{0}' given.".format(backend)) + + _STORE_BACKENDS[backend_name] = backend + + +def _store_backend_factory(backend, location, verbose=0, backend_options=None): + """Return the correct store object for the given location.""" + if backend_options is None: + backend_options = {} + + if isinstance(location, pathlib.Path): + location = str(location) + + if isinstance(location, StoreBackendBase): + return location + elif isinstance(location, str): + obj = None + location = os.path.expanduser(location) + # The location is not a local file system, we look in the + # registered backends if there's one matching the given backend + # name. + for backend_key, backend_obj in _STORE_BACKENDS.items(): + if backend == backend_key: + obj = backend_obj() + + # By default, we assume the FileSystemStoreBackend can be used if no + # matching backend could be found. + if obj is None: + raise TypeError('Unknown location {0} or backend {1}'.format( + location, backend)) + + # The store backend is configured with the extra named parameters, + # some of them are specific to the underlying store backend. + obj.configure(location, verbose=verbose, + backend_options=backend_options) + return obj + elif location is not None: + warnings.warn( + "Instantiating a backend using a {} as a location is not " + "supported by joblib. Returning None instead.".format( + location.__class__.__name__), UserWarning) + + return None + + +def _build_func_identifier(func): + """Build a roughly unique identifier for the cached function.""" + modules, funcname = get_func_name(func) + # We reuse historical fs-like way of building a function identifier + return os.path.join(*modules, funcname) + + +# An in-memory store to avoid looking at the disk-based function +# source code to check if a function definition has changed +_FUNCTION_HASHES = weakref.WeakKeyDictionary() + + +############################################################################### +# class `MemorizedResult` +############################################################################### +class MemorizedResult(Logger): + """Object representing a cached value. + + Attributes + ---------- + location: str + The location of joblib cache. Depends on the store backend used. + + func: function or str + function whose output is cached. The string case is intended only for + instantiation based on the output of repr() on another instance. + (namely eval(repr(memorized_instance)) works). + + argument_hash: str + hash of the function arguments. + + backend: str + Type of store backend for reading/writing cache files. + Default is 'local'. + + mmap_mode: {None, 'r+', 'r', 'w+', 'c'} + The memmapping mode used when loading from cache numpy arrays. See + numpy.load for the meaning of the different values. + + verbose: int + verbosity level (0 means no message). + + timestamp, metadata: string + for internal use only. + """ + def __init__(self, location, call_id, backend='local', mmap_mode=None, + verbose=0, timestamp=None, metadata=None): + Logger.__init__(self) + self._call_id = call_id + self.store_backend = _store_backend_factory(backend, location, + verbose=verbose) + self.mmap_mode = mmap_mode + + if metadata is not None: + self.metadata = metadata + else: + self.metadata = self.store_backend.get_metadata(self._call_id) + + self.duration = self.metadata.get('duration', None) + self.verbose = verbose + self.timestamp = timestamp + + @property + def func(self): + return self.func_id + + @property + def func_id(self): + return self._call_id[0] + + @property + def args_id(self): + return self._call_id[1] + + @property + def argument_hash(self): + warnings.warn( + "The 'argument_hash' attribute has been deprecated in version " + "0.12 and will be removed in version 0.14.\n" + "Use `args_id` attribute instead.", + DeprecationWarning, stacklevel=2) + return self.args_id + + def get(self): + """Read value from cache and return it.""" + try: + return self.store_backend.load_item( + self._call_id, + timestamp=self.timestamp, + metadata=self.metadata, + verbose=self.verbose + ) + except ValueError as exc: + new_exc = KeyError( + "Error while trying to load a MemorizedResult's value. " + "It seems that this folder is corrupted : {}".format( + os.path.join(self.store_backend.location, *self._call_id))) + raise new_exc from exc + + def clear(self): + """Clear value from cache""" + self.store_backend.clear_item(self._call_id) + + def __repr__(self): + return '{}(location="{}", func="{}", args_id="{}")'.format( + self.__class__.__name__, self.store_backend.location, + *self._call_id + ) + + def __getstate__(self): + state = self.__dict__.copy() + state['timestamp'] = None + return state + + +class NotMemorizedResult(object): + """Class representing an arbitrary value. + + This class is a replacement for MemorizedResult when there is no cache. + """ + __slots__ = ('value', 'valid') + + def __init__(self, value): + self.value = value + self.valid = True + + def get(self): + if self.valid: + return self.value + else: + raise KeyError("No value stored.") + + def clear(self): + self.valid = False + self.value = None + + def __repr__(self): + if self.valid: + return ('{class_name}({value})' + .format(class_name=self.__class__.__name__, + value=pformat(self.value))) + else: + return self.__class__.__name__ + ' with no value' + + # __getstate__ and __setstate__ are required because of __slots__ + def __getstate__(self): + return {"valid": self.valid, "value": self.value} + + def __setstate__(self, state): + self.valid = state["valid"] + self.value = state["value"] + + +############################################################################### +# class `NotMemorizedFunc` +############################################################################### +class NotMemorizedFunc(object): + """No-op object decorating a function. + + This class replaces MemorizedFunc when there is no cache. It provides an + identical API but does not write anything on disk. + + Attributes + ---------- + func: callable + Original undecorated function. + """ + # Should be a light as possible (for speed) + def __init__(self, func): + self.func = func + + def __call__(self, *args, **kwargs): + return self.func(*args, **kwargs) + + def call_and_shelve(self, *args, **kwargs): + return NotMemorizedResult(self.func(*args, **kwargs)) + + def __repr__(self): + return '{0}(func={1})'.format(self.__class__.__name__, self.func) + + def clear(self, warn=True): + # Argument "warn" is for compatibility with MemorizedFunc.clear + pass + + def call(self, *args, **kwargs): + return self.func(*args, **kwargs) + + def check_call_in_cache(self, *args, **kwargs): + return False + + +############################################################################### +# class `AsyncNotMemorizedFunc` +############################################################################### +class AsyncNotMemorizedFunc(NotMemorizedFunc): + async def call_and_shelve(self, *args, **kwargs): + return NotMemorizedResult(await self.func(*args, **kwargs)) + + +############################################################################### +# class `MemorizedFunc` +############################################################################### +class MemorizedFunc(Logger): + """Callable object decorating a function for caching its return value + each time it is called. + + Methods are provided to inspect the cache or clean it. + + Attributes + ---------- + func: callable + The original, undecorated, function. + + location: string + The location of joblib cache. Depends on the store backend used. + + backend: str + Type of store backend for reading/writing cache files. + Default is 'local', in which case the location is the path to a + disk storage. + + ignore: list or None + List of variable names to ignore when choosing whether to + recompute. + + mmap_mode: {None, 'r+', 'r', 'w+', 'c'} + The memmapping mode used when loading from cache + numpy arrays. See numpy.load for the meaning of the different + values. + + compress: boolean, or integer + Whether to zip the stored data on disk. If an integer is + given, it should be between 1 and 9, and sets the amount + of compression. Note that compressed arrays cannot be + read by memmapping. + + verbose: int, optional + The verbosity flag, controls messages that are issued as + the function is evaluated. + + cache_validation_callback: callable, optional + Callable to check if a result in cache is valid or is to be recomputed. + When the function is called with arguments for which a cache exists, + the callback is called with the cache entry's metadata as its sole + argument. If it returns True, the cached result is returned, else the + cache for these arguments is cleared and the result is recomputed. + """ + # ------------------------------------------------------------------------ + # Public interface + # ------------------------------------------------------------------------ + + def __init__(self, func, location, backend='local', ignore=None, + mmap_mode=None, compress=False, verbose=1, timestamp=None, + cache_validation_callback=None): + Logger.__init__(self) + self.mmap_mode = mmap_mode + self.compress = compress + self.func = func + self.cache_validation_callback = cache_validation_callback + self.func_id = _build_func_identifier(func) + self.ignore = ignore if ignore is not None else [] + self._verbose = verbose + + # retrieve store object from backend type and location. + self.store_backend = _store_backend_factory(backend, location, + verbose=verbose, + backend_options=dict( + compress=compress, + mmap_mode=mmap_mode), + ) + if self.store_backend is not None: + # Create func directory on demand. + self.store_backend.store_cached_func_code([self.func_id]) + + self.timestamp = timestamp if timestamp is not None else time.time() + try: + functools.update_wrapper(self, func) + except Exception: + pass # Objects like ufunc don't like that + if inspect.isfunction(func): + doc = pydoc.TextDoc().document(func) + # Remove blank line + doc = doc.replace('\n', '\n\n', 1) + # Strip backspace-overprints for compatibility with autodoc + doc = re.sub('\x08.', '', doc) + else: + # Pydoc does a poor job on other objects + doc = func.__doc__ + self.__doc__ = 'Memoized version of %s' % doc + + self._func_code_info = None + self._func_code_id = None + + def _is_in_cache_and_valid(self, call_id): + """Check if the function call is cached and valid for given arguments. + + - Compare the function code with the one from the cached function, + asserting if it has changed. + - Check if the function call is present in the cache. + - Call `cache_validation_callback` for user define cache validation. + + Returns True if the function call is in cache and can be used, and + returns False otherwise. + """ + # Check if the code of the function has changed + if not self._check_previous_func_code(stacklevel=4): + return False + + # Check if this specific call is in the cache + if not self.store_backend.contains_item(call_id): + return False + + # Call the user defined cache validation callback + metadata = self.store_backend.get_metadata(call_id) + if (self.cache_validation_callback is not None and + not self.cache_validation_callback(metadata)): + self.store_backend.clear_item(call_id) + return False + + return True + + def _cached_call(self, args, kwargs, shelving): + """Call wrapped function and cache result, or read cache if available. + + This function returns the wrapped function output or a reference to + the cached result. + + Arguments: + ---------- + + args, kwargs: list and dict + input arguments for wrapped function + + shelving: bool + True when called via the call_and_shelve function. + + + Returns + ------- + Output of the wrapped function if shelving is false, or a + MemorizedResult reference to the value if shelving is true. + """ + args_id = self._get_args_id(*args, **kwargs) + call_id = (self.func_id, args_id) + _, func_name = get_func_name(self.func) + func_info = self.store_backend.get_cached_func_info([self.func_id]) + location = func_info['location'] + + if self._verbose >= 20: + logging.basicConfig(level=logging.INFO) + _, signature = format_signature(self.func, *args, **kwargs) + self.info( + textwrap.dedent( + f""" + Querying {func_name} with signature + {signature}. + + (argument hash {args_id}) + + The store location is {location}. + """ + ) + ) + + # Compare the function code with the previous to see if the + # function code has changed and check if the results are present in + # the cache. + if self._is_in_cache_and_valid(call_id): + if shelving: + return self._get_memorized_result(call_id) + + try: + start_time = time.time() + output = self._load_item(call_id) + if self._verbose > 4: + self._print_duration(time.time() - start_time, + context='cache loaded ') + return output + except Exception: + # XXX: Should use an exception logger + _, signature = format_signature(self.func, *args, **kwargs) + self.warn('Exception while loading results for ' + '{}\n {}'.format(signature, traceback.format_exc())) + + if self._verbose > 10: + self.warn( + f"Computing func {func_name}, argument hash {args_id} " + f"in location {location}" + ) + + return self._call(call_id, args, kwargs, shelving) + + @property + def func_code_info(self): + # 3-tuple property containing: the function source code, source file, + # and first line of the code inside the source file + if hasattr(self.func, '__code__'): + if self._func_code_id is None: + self._func_code_id = id(self.func.__code__) + elif id(self.func.__code__) != self._func_code_id: + # Be robust to dynamic reassignments of self.func.__code__ + self._func_code_info = None + + if self._func_code_info is None: + # Cache the source code of self.func . Provided that get_func_code + # (which should be called once on self) gets called in the process + # in which self.func was defined, this caching mechanism prevents + # undesired cache clearing when the cached function is called in + # an environment where the introspection utilities get_func_code + # relies on do not work (typically, in joblib child processes). + # See #1035 for more info + # TODO (pierreglaser): do the same with get_func_name? + self._func_code_info = get_func_code(self.func) + return self._func_code_info + + def call_and_shelve(self, *args, **kwargs): + """Call wrapped function, cache result and return a reference. + + This method returns a reference to the cached result instead of the + result itself. The reference object is small and pickeable, allowing + to send or store it easily. Call .get() on reference object to get + result. + + Returns + ------- + cached_result: MemorizedResult or NotMemorizedResult + reference to the value returned by the wrapped function. The + class "NotMemorizedResult" is used when there is no cache + activated (e.g. location=None in Memory). + """ + return self._cached_call(args, kwargs, shelving=True) + + def __call__(self, *args, **kwargs): + return self._cached_call(args, kwargs, shelving=False) + + def __getstate__(self): + # Make sure self.func's source is introspected prior to being pickled - + # code introspection utilities typically do not work inside child + # processes + _ = self.func_code_info + + # We don't store the timestamp when pickling, to avoid the hash + # depending from it. + state = self.__dict__.copy() + state['timestamp'] = None + + # Invalidate the code id as id(obj) will be different in the child + state['_func_code_id'] = None + + return state + + def check_call_in_cache(self, *args, **kwargs): + """Check if function call is in the memory cache. + + Does not call the function or do any work besides func inspection + and arg hashing. + + Returns + ------- + is_call_in_cache: bool + Whether or not the result of the function has been cached + for the input arguments that have been passed. + """ + call_id = (self.func_id, self._get_args_id(*args, **kwargs)) + return self.store_backend.contains_item(call_id) + + # ------------------------------------------------------------------------ + # Private interface + # ------------------------------------------------------------------------ + + def _get_args_id(self, *args, **kwargs): + """Return the input parameter hash of a result.""" + return hashing.hash(filter_args(self.func, self.ignore, args, kwargs), + coerce_mmap=self.mmap_mode is not None) + + def _hash_func(self): + """Hash a function to key the online cache""" + func_code_h = hash(getattr(self.func, '__code__', None)) + return id(self.func), hash(self.func), func_code_h + + def _write_func_code(self, func_code, first_line): + """ Write the function code and the filename to a file. + """ + # We store the first line because the filename and the function + # name is not always enough to identify a function: people + # sometimes have several functions named the same way in a + # file. This is bad practice, but joblib should be robust to bad + # practice. + func_code = u'%s %i\n%s' % (FIRST_LINE_TEXT, first_line, func_code) + self.store_backend.store_cached_func_code([self.func_id], func_code) + + # Also store in the in-memory store of function hashes + is_named_callable = (hasattr(self.func, '__name__') and + self.func.__name__ != '') + if is_named_callable: + # Don't do this for lambda functions or strange callable + # objects, as it ends up being too fragile + func_hash = self._hash_func() + try: + _FUNCTION_HASHES[self.func] = func_hash + except TypeError: + # Some callable are not hashable + pass + + def _check_previous_func_code(self, stacklevel=2): + """ + stacklevel is the depth a which this function is called, to + issue useful warnings to the user. + """ + # First check if our function is in the in-memory store. + # Using the in-memory store not only makes things faster, but it + # also renders us robust to variations of the files when the + # in-memory version of the code does not vary + try: + if self.func in _FUNCTION_HASHES: + # We use as an identifier the id of the function and its + # hash. This is more likely to falsely change than have hash + # collisions, thus we are on the safe side. + func_hash = self._hash_func() + if func_hash == _FUNCTION_HASHES[self.func]: + return True + except TypeError: + # Some callables are not hashable + pass + + # Here, we go through some effort to be robust to dynamically + # changing code and collision. We cannot inspect.getsource + # because it is not reliable when using IPython's magic "%run". + func_code, source_file, first_line = self.func_code_info + try: + old_func_code, old_first_line = extract_first_line( + self.store_backend.get_cached_func_code([self.func_id])) + except (IOError, OSError): # some backend can also raise OSError + self._write_func_code(func_code, first_line) + return False + if old_func_code == func_code: + return True + + # We have differing code, is this because we are referring to + # different functions, or because the function we are referring to has + # changed? + + _, func_name = get_func_name(self.func, resolv_alias=False, + win_characters=False) + if old_first_line == first_line == -1 or func_name == '': + if not first_line == -1: + func_description = ("{0} ({1}:{2})" + .format(func_name, source_file, + first_line)) + else: + func_description = func_name + warnings.warn(JobLibCollisionWarning( + "Cannot detect name collisions for function '{0}'" + .format(func_description)), stacklevel=stacklevel) + + # Fetch the code at the old location and compare it. If it is the + # same than the code store, we have a collision: the code in the + # file has not changed, but the name we have is pointing to a new + # code block. + if not old_first_line == first_line and source_file is not None: + if os.path.exists(source_file): + _, func_name = get_func_name(self.func, resolv_alias=False) + num_lines = len(func_code.split('\n')) + with tokenize.open(source_file) as f: + on_disk_func_code = f.readlines()[ + old_first_line - 1:old_first_line - 1 + num_lines - 1] + on_disk_func_code = ''.join(on_disk_func_code) + possible_collision = (on_disk_func_code.rstrip() == + old_func_code.rstrip()) + else: + possible_collision = source_file.startswith(' 10: + _, func_name = get_func_name(self.func, resolv_alias=False) + self.warn("Function {0} (identified by {1}) has changed" + ".".format(func_name, self.func_id)) + self.clear(warn=True) + return False + + def clear(self, warn=True): + """Empty the function's cache.""" + func_id = self.func_id + if self._verbose > 0 and warn: + self.warn("Clearing function cache identified by %s" % func_id) + self.store_backend.clear_path([func_id, ]) + + func_code, _, first_line = self.func_code_info + self._write_func_code(func_code, first_line) + + def call(self, *args, **kwargs): + """Force the execution of the function with the given arguments. + + The output values will be persisted, i.e., the cache will be updated + with any new values. + + Parameters + ---------- + *args: arguments + The arguments. + **kwargs: keyword arguments + Keyword arguments. + + Returns + ------- + output : object + The output of the function call. + """ + call_id = (self.func_id, self._get_args_id(*args, **kwargs)) + return self._call(call_id, args, kwargs) + + def _call(self, call_id, args, kwargs, shelving=False): + self._before_call(args, kwargs) + start_time = time.time() + output = self.func(*args, **kwargs) + return self._after_call(call_id, args, kwargs, shelving, + output, start_time) + + def _before_call(self, args, kwargs): + if self._verbose > 0: + print(format_call(self.func, args, kwargs)) + + def _after_call(self, call_id, args, kwargs, shelving, output, start_time): + self.store_backend.dump_item(call_id, output, verbose=self._verbose) + duration = time.time() - start_time + if self._verbose > 0: + self._print_duration(duration) + metadata = self._persist_input(duration, call_id, args, kwargs) + if shelving: + return self._get_memorized_result(call_id, metadata) + + if self.mmap_mode is not None: + # Memmap the output at the first call to be consistent with + # later calls + output = self._load_item(call_id, metadata) + return output + + def _persist_input(self, duration, call_id, args, kwargs, + this_duration_limit=0.5): + """ Save a small summary of the call using json format in the + output directory. + + output_dir: string + directory where to write metadata. + + duration: float + time taken by hashing input arguments, calling the wrapped + function and persisting its output. + + args, kwargs: list and dict + input arguments for wrapped function + + this_duration_limit: float + Max execution time for this function before issuing a warning. + """ + start_time = time.time() + argument_dict = filter_args(self.func, self.ignore, + args, kwargs) + + input_repr = dict((k, repr(v)) for k, v in argument_dict.items()) + # This can fail due to race-conditions with multiple + # concurrent joblibs removing the file or the directory + metadata = { + "duration": duration, "input_args": input_repr, "time": start_time, + } + + self.store_backend.store_metadata(call_id, metadata) + + this_duration = time.time() - start_time + if this_duration > this_duration_limit: + # This persistence should be fast. It will not be if repr() takes + # time and its output is large, because json.dump will have to + # write a large file. This should not be an issue with numpy arrays + # for which repr() always output a short representation, but can + # be with complex dictionaries. Fixing the problem should be a + # matter of replacing repr() above by something smarter. + warnings.warn("Persisting input arguments took %.2fs to run." + "If this happens often in your code, it can cause " + "performance problems " + "(results will be correct in all cases). " + "The reason for this is probably some large input " + "arguments for a wrapped function." + % this_duration, stacklevel=5) + return metadata + + def _get_memorized_result(self, call_id, metadata=None): + return MemorizedResult(self.store_backend, call_id, + metadata=metadata, timestamp=self.timestamp, + verbose=self._verbose - 1) + + def _load_item(self, call_id, metadata=None): + return self.store_backend.load_item(call_id, metadata=metadata, + timestamp=self.timestamp, + verbose=self._verbose) + + def _print_duration(self, duration, context=''): + _, name = get_func_name(self.func) + msg = f"{name} {context}- {format_time(duration)}" + print(max(0, (80 - len(msg))) * '_' + msg) + + # ------------------------------------------------------------------------ + # Private `object` interface + # ------------------------------------------------------------------------ + + def __repr__(self): + return '{class_name}(func={func}, location={location})'.format( + class_name=self.__class__.__name__, + func=self.func, + location=self.store_backend.location,) + + +############################################################################### +# class `AsyncMemorizedFunc` +############################################################################### +class AsyncMemorizedFunc(MemorizedFunc): + async def __call__(self, *args, **kwargs): + out = super().__call__(*args, **kwargs) + return await out if asyncio.iscoroutine(out) else out + + async def call_and_shelve(self, *args, **kwargs): + out = super().call_and_shelve(*args, **kwargs) + return await out if asyncio.iscoroutine(out) else out + + async def call(self, *args, **kwargs): + out = super().call(*args, **kwargs) + return await out if asyncio.iscoroutine(out) else out + + async def _call(self, call_id, args, kwargs, shelving=False): + self._before_call(args, kwargs) + start_time = time.time() + output = await self.func(*args, **kwargs) + return self._after_call(call_id, args, kwargs, shelving, + output, start_time) + + +############################################################################### +# class `Memory` +############################################################################### +class Memory(Logger): + """ A context object for caching a function's return value each time it + is called with the same input arguments. + + All values are cached on the filesystem, in a deep directory + structure. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + location: str, pathlib.Path or None + The path of the base directory to use as a data store + or None. If None is given, no caching is done and + the Memory object is completely transparent. This option + replaces cachedir since version 0.12. + + backend: str, optional + Type of store backend for reading/writing cache files. + Default: 'local'. + The 'local' backend is using regular filesystem operations to + manipulate data (open, mv, etc) in the backend. + + mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional + The memmapping mode used when loading from cache + numpy arrays. See numpy.load for the meaning of the + arguments. + + compress: boolean, or integer, optional + Whether to zip the stored data on disk. If an integer is + given, it should be between 1 and 9, and sets the amount + of compression. Note that compressed arrays cannot be + read by memmapping. + + verbose: int, optional + Verbosity flag, controls the debug messages that are issued + as functions are evaluated. + + bytes_limit: int | str, optional + Limit in bytes of the size of the cache. By default, the size of + the cache is unlimited. When reducing the size of the cache, + ``joblib`` keeps the most recently accessed items first. If a + str is passed, it is converted to a number of bytes using units + { K | M | G} for kilo, mega, giga. + + **Note:** You need to call :meth:`joblib.Memory.reduce_size` to + actually reduce the cache size to be less than ``bytes_limit``. + + **Note:** This argument has been deprecated. One should give the + value of ``bytes_limit`` directly in + :meth:`joblib.Memory.reduce_size`. + + backend_options: dict, optional + Contains a dictionary of named parameters used to configure + the store backend. + """ + # ------------------------------------------------------------------------ + # Public interface + # ------------------------------------------------------------------------ + + def __init__(self, location=None, backend='local', + mmap_mode=None, compress=False, verbose=1, bytes_limit=None, + backend_options=None): + Logger.__init__(self) + self._verbose = verbose + self.mmap_mode = mmap_mode + self.timestamp = time.time() + if bytes_limit is not None: + warnings.warn( + "bytes_limit argument has been deprecated. It will be removed " + "in version 1.5. Please pass its value directly to " + "Memory.reduce_size.", + category=DeprecationWarning + ) + self.bytes_limit = bytes_limit + self.backend = backend + self.compress = compress + if backend_options is None: + backend_options = {} + self.backend_options = backend_options + + if compress and mmap_mode is not None: + warnings.warn('Compressed results cannot be memmapped', + stacklevel=2) + + self.location = location + if isinstance(location, str): + location = os.path.join(location, 'joblib') + + self.store_backend = _store_backend_factory( + backend, location, verbose=self._verbose, + backend_options=dict(compress=compress, mmap_mode=mmap_mode, + **backend_options)) + + def cache(self, func=None, ignore=None, verbose=None, mmap_mode=False, + cache_validation_callback=None): + """ Decorates the given function func to only compute its return + value for input arguments not cached on disk. + + Parameters + ---------- + func: callable, optional + The function to be decorated + ignore: list of strings + A list of arguments name to ignore in the hashing + verbose: integer, optional + The verbosity mode of the function. By default that + of the memory object is used. + mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional + The memmapping mode used when loading from cache + numpy arrays. See numpy.load for the meaning of the + arguments. By default that of the memory object is used. + cache_validation_callback: callable, optional + Callable to validate whether or not the cache is valid. When + the cached function is called with arguments for which a cache + exists, this callable is called with the metadata of the cached + result as its sole argument. If it returns True, then the + cached result is returned, else the cache for these arguments + is cleared and recomputed. + + Returns + ------- + decorated_func: MemorizedFunc object + The returned object is a MemorizedFunc object, that is + callable (behaves like a function), but offers extra + methods for cache lookup and management. See the + documentation for :class:`joblib.memory.MemorizedFunc`. + """ + if (cache_validation_callback is not None and + not callable(cache_validation_callback)): + raise ValueError( + "cache_validation_callback needs to be callable. " + f"Got {cache_validation_callback}." + ) + if func is None: + # Partial application, to be able to specify extra keyword + # arguments in decorators + return functools.partial( + self.cache, ignore=ignore, + mmap_mode=mmap_mode, + verbose=verbose, + cache_validation_callback=cache_validation_callback + ) + if self.store_backend is None: + cls = (AsyncNotMemorizedFunc + if asyncio.iscoroutinefunction(func) + else NotMemorizedFunc) + return cls(func) + if verbose is None: + verbose = self._verbose + if mmap_mode is False: + mmap_mode = self.mmap_mode + if isinstance(func, MemorizedFunc): + func = func.func + cls = (AsyncMemorizedFunc + if asyncio.iscoroutinefunction(func) + else MemorizedFunc) + return cls( + func, location=self.store_backend, backend=self.backend, + ignore=ignore, mmap_mode=mmap_mode, compress=self.compress, + verbose=verbose, timestamp=self.timestamp, + cache_validation_callback=cache_validation_callback + ) + + def clear(self, warn=True): + """ Erase the complete cache directory. + """ + if warn: + self.warn('Flushing completely the cache') + if self.store_backend is not None: + self.store_backend.clear() + + # As the cache is completely clear, make sure the _FUNCTION_HASHES + # cache is also reset. Else, for a function that is present in this + # table, results cached after this clear will be have cache miss + # as the function code is not re-written. + _FUNCTION_HASHES.clear() + + def reduce_size(self, bytes_limit=None, items_limit=None, age_limit=None): + """Remove cache elements to make the cache fit its limits. + + The limitation can impose that the cache size fits in ``bytes_limit``, + that the number of cache items is no more than ``items_limit``, and + that all files in cache are not older than ``age_limit``. + + Parameters + ---------- + bytes_limit: int | str, optional + Limit in bytes of the size of the cache. By default, the size of + the cache is unlimited. When reducing the size of the cache, + ``joblib`` keeps the most recently accessed items first. If a + str is passed, it is converted to a number of bytes using units + { K | M | G} for kilo, mega, giga. + + items_limit: int, optional + Number of items to limit the cache to. By default, the number of + items in the cache is unlimited. When reducing the size of the + cache, ``joblib`` keeps the most recently accessed items first. + + age_limit: datetime.timedelta, optional + Maximum age of items to limit the cache to. When reducing the size + of the cache, any items last accessed more than the given length of + time ago are deleted. + """ + if bytes_limit is None: + bytes_limit = self.bytes_limit + + if self.store_backend is None: + # No cached results, this function does nothing. + return + + if bytes_limit is None and items_limit is None and age_limit is None: + # No limitation to impose, returning + return + + # Defers the actual limits enforcing to the store backend. + self.store_backend.enforce_store_limits( + bytes_limit, items_limit, age_limit + ) + + def eval(self, func, *args, **kwargs): + """ Eval function func with arguments `*args` and `**kwargs`, + in the context of the memory. + + This method works similarly to the builtin `apply`, except + that the function is called only if the cache is not + up to date. + + """ + if self.store_backend is None: + return func(*args, **kwargs) + return self.cache(func)(*args, **kwargs) + + # ------------------------------------------------------------------------ + # Private `object` interface + # ------------------------------------------------------------------------ + + def __repr__(self): + return '{class_name}(location={location})'.format( + class_name=self.__class__.__name__, + location=(None if self.store_backend is None + else self.store_backend.location)) + + def __getstate__(self): + """ We don't store the timestamp when pickling, to avoid the hash + depending from it. + """ + state = self.__dict__.copy() + state['timestamp'] = None + return state + + +############################################################################### +# cache_validation_callback helpers +############################################################################### + +def expires_after(days=0, seconds=0, microseconds=0, milliseconds=0, minutes=0, + hours=0, weeks=0): + """Helper cache_validation_callback to force recompute after a duration. + + Parameters + ---------- + days, seconds, microseconds, milliseconds, minutes, hours, weeks: numbers + argument passed to a timedelta. + """ + delta = datetime.timedelta( + days=days, seconds=seconds, microseconds=microseconds, + milliseconds=milliseconds, minutes=minutes, hours=hours, weeks=weeks + ) + + def cache_validation_callback(metadata): + computation_age = time.time() - metadata['time'] + return computation_age < delta.total_seconds() + + return cache_validation_callback diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/numpy_pickle.py b/env-llmeval/lib/python3.10/site-packages/joblib/numpy_pickle.py new file mode 100644 index 0000000000000000000000000000000000000000..bf83bb0914571dfa978bbe41a6d0e3a44a9cb947 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/numpy_pickle.py @@ -0,0 +1,659 @@ +"""Utilities for fast persistence of big data, with optional compression.""" + +# Author: Gael Varoquaux +# Copyright (c) 2009 Gael Varoquaux +# License: BSD Style, 3 clauses. + +import pickle +import os +import warnings +import io +from pathlib import Path + +from .compressor import lz4, LZ4_NOT_INSTALLED_ERROR +from .compressor import _COMPRESSORS, register_compressor, BinaryZlibFile +from .compressor import (ZlibCompressorWrapper, GzipCompressorWrapper, + BZ2CompressorWrapper, LZMACompressorWrapper, + XZCompressorWrapper, LZ4CompressorWrapper) +from .numpy_pickle_utils import Unpickler, Pickler +from .numpy_pickle_utils import _read_fileobject, _write_fileobject +from .numpy_pickle_utils import _read_bytes, BUFFER_SIZE +from .numpy_pickle_utils import _ensure_native_byte_order +from .numpy_pickle_compat import load_compatibility +from .numpy_pickle_compat import NDArrayWrapper +# For compatibility with old versions of joblib, we need ZNDArrayWrapper +# to be visible in the current namespace. +# Explicitly skipping next line from flake8 as it triggers an F401 warning +# which we don't care. +from .numpy_pickle_compat import ZNDArrayWrapper # noqa +from .backports import make_memmap + +# Register supported compressors +register_compressor('zlib', ZlibCompressorWrapper()) +register_compressor('gzip', GzipCompressorWrapper()) +register_compressor('bz2', BZ2CompressorWrapper()) +register_compressor('lzma', LZMACompressorWrapper()) +register_compressor('xz', XZCompressorWrapper()) +register_compressor('lz4', LZ4CompressorWrapper()) + + +############################################################################### +# Utility objects for persistence. + +# For convenience, 16 bytes are used to be sure to cover all the possible +# dtypes' alignments. For reference, see: +# https://numpy.org/devdocs/dev/alignment.html +NUMPY_ARRAY_ALIGNMENT_BYTES = 16 + + +class NumpyArrayWrapper(object): + """An object to be persisted instead of numpy arrays. + + This object is used to hack into the pickle machinery and read numpy + array data from our custom persistence format. + More precisely, this object is used for: + * carrying the information of the persisted array: subclass, shape, order, + dtype. Those ndarray metadata are used to correctly reconstruct the array + with low level numpy functions. + * determining if memmap is allowed on the array. + * reading the array bytes from a file. + * reading the array using memorymap from a file. + * writing the array bytes to a file. + + Attributes + ---------- + subclass: numpy.ndarray subclass + Determine the subclass of the wrapped array. + shape: numpy.ndarray shape + Determine the shape of the wrapped array. + order: {'C', 'F'} + Determine the order of wrapped array data. 'C' is for C order, 'F' is + for fortran order. + dtype: numpy.ndarray dtype + Determine the data type of the wrapped array. + allow_mmap: bool + Determine if memory mapping is allowed on the wrapped array. + Default: False. + """ + + def __init__(self, subclass, shape, order, dtype, allow_mmap=False, + numpy_array_alignment_bytes=NUMPY_ARRAY_ALIGNMENT_BYTES): + """Constructor. Store the useful information for later.""" + self.subclass = subclass + self.shape = shape + self.order = order + self.dtype = dtype + self.allow_mmap = allow_mmap + # We make numpy_array_alignment_bytes an instance attribute to allow us + # to change our mind about the default alignment and still load the old + # pickles (with the previous alignment) correctly + self.numpy_array_alignment_bytes = numpy_array_alignment_bytes + + def safe_get_numpy_array_alignment_bytes(self): + # NumpyArrayWrapper instances loaded from joblib <= 1.1 pickles don't + # have an numpy_array_alignment_bytes attribute + return getattr(self, 'numpy_array_alignment_bytes', None) + + def write_array(self, array, pickler): + """Write array bytes to pickler file handle. + + This function is an adaptation of the numpy write_array function + available in version 1.10.1 in numpy/lib/format.py. + """ + # Set buffer size to 16 MiB to hide the Python loop overhead. + buffersize = max(16 * 1024 ** 2 // array.itemsize, 1) + if array.dtype.hasobject: + # We contain Python objects so we cannot write out the data + # directly. Instead, we will pickle it out with version 2 of the + # pickle protocol. + pickle.dump(array, pickler.file_handle, protocol=2) + else: + numpy_array_alignment_bytes = \ + self.safe_get_numpy_array_alignment_bytes() + if numpy_array_alignment_bytes is not None: + current_pos = pickler.file_handle.tell() + pos_after_padding_byte = current_pos + 1 + padding_length = numpy_array_alignment_bytes - ( + pos_after_padding_byte % numpy_array_alignment_bytes) + # A single byte is written that contains the padding length in + # bytes + padding_length_byte = int.to_bytes( + padding_length, length=1, byteorder='little') + pickler.file_handle.write(padding_length_byte) + + if padding_length != 0: + padding = b'\xff' * padding_length + pickler.file_handle.write(padding) + + for chunk in pickler.np.nditer(array, + flags=['external_loop', + 'buffered', + 'zerosize_ok'], + buffersize=buffersize, + order=self.order): + pickler.file_handle.write(chunk.tobytes('C')) + + def read_array(self, unpickler): + """Read array from unpickler file handle. + + This function is an adaptation of the numpy read_array function + available in version 1.10.1 in numpy/lib/format.py. + """ + if len(self.shape) == 0: + count = 1 + else: + # joblib issue #859: we cast the elements of self.shape to int64 to + # prevent a potential overflow when computing their product. + shape_int64 = [unpickler.np.int64(x) for x in self.shape] + count = unpickler.np.multiply.reduce(shape_int64) + # Now read the actual data. + if self.dtype.hasobject: + # The array contained Python objects. We need to unpickle the data. + array = pickle.load(unpickler.file_handle) + else: + numpy_array_alignment_bytes = \ + self.safe_get_numpy_array_alignment_bytes() + if numpy_array_alignment_bytes is not None: + padding_byte = unpickler.file_handle.read(1) + padding_length = int.from_bytes( + padding_byte, byteorder='little') + if padding_length != 0: + unpickler.file_handle.read(padding_length) + + # This is not a real file. We have to read it the + # memory-intensive way. + # crc32 module fails on reads greater than 2 ** 32 bytes, + # breaking large reads from gzip streams. Chunk reads to + # BUFFER_SIZE bytes to avoid issue and reduce memory overhead + # of the read. In non-chunked case count < max_read_count, so + # only one read is performed. + max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, + self.dtype.itemsize) + + array = unpickler.np.empty(count, dtype=self.dtype) + for i in range(0, count, max_read_count): + read_count = min(max_read_count, count - i) + read_size = int(read_count * self.dtype.itemsize) + data = _read_bytes(unpickler.file_handle, + read_size, "array data") + array[i:i + read_count] = \ + unpickler.np.frombuffer(data, dtype=self.dtype, + count=read_count) + del data + + if self.order == 'F': + array.shape = self.shape[::-1] + array = array.transpose() + else: + array.shape = self.shape + + # Detect byte order mismatch and swap as needed. + return _ensure_native_byte_order(array) + + def read_mmap(self, unpickler): + """Read an array using numpy memmap.""" + current_pos = unpickler.file_handle.tell() + offset = current_pos + numpy_array_alignment_bytes = \ + self.safe_get_numpy_array_alignment_bytes() + + if numpy_array_alignment_bytes is not None: + padding_byte = unpickler.file_handle.read(1) + padding_length = int.from_bytes(padding_byte, byteorder='little') + # + 1 is for the padding byte + offset += padding_length + 1 + + if unpickler.mmap_mode == 'w+': + unpickler.mmap_mode = 'r+' + + marray = make_memmap(unpickler.filename, + dtype=self.dtype, + shape=self.shape, + order=self.order, + mode=unpickler.mmap_mode, + offset=offset) + # update the offset so that it corresponds to the end of the read array + unpickler.file_handle.seek(offset + marray.nbytes) + + if (numpy_array_alignment_bytes is None and + current_pos % NUMPY_ARRAY_ALIGNMENT_BYTES != 0): + message = ( + f'The memmapped array {marray} loaded from the file ' + f'{unpickler.file_handle.name} is not byte aligned. ' + 'This may cause segmentation faults if this memmapped array ' + 'is used in some libraries like BLAS or PyTorch. ' + 'To get rid of this warning, regenerate your pickle file ' + 'with joblib >= 1.2.0. ' + 'See https://github.com/joblib/joblib/issues/563 ' + 'for more details' + ) + warnings.warn(message) + + return _ensure_native_byte_order(marray) + + def read(self, unpickler): + """Read the array corresponding to this wrapper. + + Use the unpickler to get all information to correctly read the array. + + Parameters + ---------- + unpickler: NumpyUnpickler + + Returns + ------- + array: numpy.ndarray + + """ + # When requested, only use memmap mode if allowed. + if unpickler.mmap_mode is not None and self.allow_mmap: + array = self.read_mmap(unpickler) + else: + array = self.read_array(unpickler) + + # Manage array subclass case + if (hasattr(array, '__array_prepare__') and + self.subclass not in (unpickler.np.ndarray, + unpickler.np.memmap)): + # We need to reconstruct another subclass + new_array = unpickler.np.core.multiarray._reconstruct( + self.subclass, (0,), 'b') + return new_array.__array_prepare__(array) + else: + return array + +############################################################################### +# Pickler classes + + +class NumpyPickler(Pickler): + """A pickler to persist big data efficiently. + + The main features of this object are: + * persistence of numpy arrays in a single file. + * optional compression with a special care on avoiding memory copies. + + Attributes + ---------- + fp: file + File object handle used for serializing the input object. + protocol: int, optional + Pickle protocol used. Default is pickle.DEFAULT_PROTOCOL. + """ + + dispatch = Pickler.dispatch.copy() + + def __init__(self, fp, protocol=None): + self.file_handle = fp + self.buffered = isinstance(self.file_handle, BinaryZlibFile) + + # By default we want a pickle protocol that only changes with + # the major python version and not the minor one + if protocol is None: + protocol = pickle.DEFAULT_PROTOCOL + + Pickler.__init__(self, self.file_handle, protocol=protocol) + # delayed import of numpy, to avoid tight coupling + try: + import numpy as np + except ImportError: + np = None + self.np = np + + def _create_array_wrapper(self, array): + """Create and returns a numpy array wrapper from a numpy array.""" + order = 'F' if (array.flags.f_contiguous and + not array.flags.c_contiguous) else 'C' + allow_mmap = not self.buffered and not array.dtype.hasobject + + kwargs = {} + try: + self.file_handle.tell() + except io.UnsupportedOperation: + kwargs = {'numpy_array_alignment_bytes': None} + + wrapper = NumpyArrayWrapper(type(array), + array.shape, order, array.dtype, + allow_mmap=allow_mmap, + **kwargs) + + return wrapper + + def save(self, obj): + """Subclass the Pickler `save` method. + + This is a total abuse of the Pickler class in order to use the numpy + persistence function `save` instead of the default pickle + implementation. The numpy array is replaced by a custom wrapper in the + pickle persistence stack and the serialized array is written right + after in the file. Warning: the file produced does not follow the + pickle format. As such it can not be read with `pickle.load`. + """ + if self.np is not None and type(obj) in (self.np.ndarray, + self.np.matrix, + self.np.memmap): + if type(obj) is self.np.memmap: + # Pickling doesn't work with memmapped arrays + obj = self.np.asanyarray(obj) + + # The array wrapper is pickled instead of the real array. + wrapper = self._create_array_wrapper(obj) + Pickler.save(self, wrapper) + + # A framer was introduced with pickle protocol 4 and we want to + # ensure the wrapper object is written before the numpy array + # buffer in the pickle file. + # See https://www.python.org/dev/peps/pep-3154/#framing to get + # more information on the framer behavior. + if self.proto >= 4: + self.framer.commit_frame(force=True) + + # And then array bytes are written right after the wrapper. + wrapper.write_array(obj, self) + return + + return Pickler.save(self, obj) + + +class NumpyUnpickler(Unpickler): + """A subclass of the Unpickler to unpickle our numpy pickles. + + Attributes + ---------- + mmap_mode: str + The memorymap mode to use for reading numpy arrays. + file_handle: file_like + File object to unpickle from. + filename: str + Name of the file to unpickle from. It should correspond to file_handle. + This parameter is required when using mmap_mode. + np: module + Reference to numpy module if numpy is installed else None. + + """ + + dispatch = Unpickler.dispatch.copy() + + def __init__(self, filename, file_handle, mmap_mode=None): + # The next line is for backward compatibility with pickle generated + # with joblib versions less than 0.10. + self._dirname = os.path.dirname(filename) + + self.mmap_mode = mmap_mode + self.file_handle = file_handle + # filename is required for numpy mmap mode. + self.filename = filename + self.compat_mode = False + Unpickler.__init__(self, self.file_handle) + try: + import numpy as np + except ImportError: + np = None + self.np = np + + def load_build(self): + """Called to set the state of a newly created object. + + We capture it to replace our place-holder objects, NDArrayWrapper or + NumpyArrayWrapper, by the array we are interested in. We + replace them directly in the stack of pickler. + NDArrayWrapper is used for backward compatibility with joblib <= 0.9. + """ + Unpickler.load_build(self) + + # For backward compatibility, we support NDArrayWrapper objects. + if isinstance(self.stack[-1], (NDArrayWrapper, NumpyArrayWrapper)): + if self.np is None: + raise ImportError("Trying to unpickle an ndarray, " + "but numpy didn't import correctly") + array_wrapper = self.stack.pop() + # If any NDArrayWrapper is found, we switch to compatibility mode, + # this will be used to raise a DeprecationWarning to the user at + # the end of the unpickling. + if isinstance(array_wrapper, NDArrayWrapper): + self.compat_mode = True + self.stack.append(array_wrapper.read(self)) + + # Be careful to register our new method. + dispatch[pickle.BUILD[0]] = load_build + + +############################################################################### +# Utility functions + +def dump(value, filename, compress=0, protocol=None, cache_size=None): + """Persist an arbitrary Python object into one file. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + value: any Python object + The object to store to disk. + filename: str, pathlib.Path, or file object. + The file object or path of the file in which it is to be stored. + The compression method corresponding to one of the supported filename + extensions ('.z', '.gz', '.bz2', '.xz' or '.lzma') will be used + automatically. + compress: int from 0 to 9 or bool or 2-tuple, optional + Optional compression level for the data. 0 or False is no compression. + Higher value means more compression, but also slower read and + write times. Using a value of 3 is often a good compromise. + See the notes for more details. + If compress is True, the compression level used is 3. + If compress is a 2-tuple, the first element must correspond to a string + between supported compressors (e.g 'zlib', 'gzip', 'bz2', 'lzma' + 'xz'), the second element must be an integer from 0 to 9, corresponding + to the compression level. + protocol: int, optional + Pickle protocol, see pickle.dump documentation for more details. + cache_size: positive int, optional + This option is deprecated in 0.10 and has no effect. + + Returns + ------- + filenames: list of strings + The list of file names in which the data is stored. If + compress is false, each array is stored in a different file. + + See Also + -------- + joblib.load : corresponding loader + + Notes + ----- + Memmapping on load cannot be used for compressed files. Thus + using compression can significantly slow down loading. In + addition, compressed files take up extra memory during + dump and load. + + """ + + if Path is not None and isinstance(filename, Path): + filename = str(filename) + + is_filename = isinstance(filename, str) + is_fileobj = hasattr(filename, "write") + + compress_method = 'zlib' # zlib is the default compression method. + if compress is True: + # By default, if compress is enabled, we want the default compress + # level of the compressor. + compress_level = None + elif isinstance(compress, tuple): + # a 2-tuple was set in compress + if len(compress) != 2: + raise ValueError( + 'Compress argument tuple should contain exactly 2 elements: ' + '(compress method, compress level), you passed {}' + .format(compress)) + compress_method, compress_level = compress + elif isinstance(compress, str): + compress_method = compress + compress_level = None # Use default compress level + compress = (compress_method, compress_level) + else: + compress_level = compress + + if compress_method == 'lz4' and lz4 is None: + raise ValueError(LZ4_NOT_INSTALLED_ERROR) + + if (compress_level is not None and + compress_level is not False and + compress_level not in range(10)): + # Raising an error if a non valid compress level is given. + raise ValueError( + 'Non valid compress level given: "{}". Possible values are ' + '{}.'.format(compress_level, list(range(10)))) + + if compress_method not in _COMPRESSORS: + # Raising an error if an unsupported compression method is given. + raise ValueError( + 'Non valid compression method given: "{}". Possible values are ' + '{}.'.format(compress_method, _COMPRESSORS)) + + if not is_filename and not is_fileobj: + # People keep inverting arguments, and the resulting error is + # incomprehensible + raise ValueError( + 'Second argument should be a filename or a file-like object, ' + '%s (type %s) was given.' + % (filename, type(filename)) + ) + + if is_filename and not isinstance(compress, tuple): + # In case no explicit compression was requested using both compression + # method and level in a tuple and the filename has an explicit + # extension, we select the corresponding compressor. + + # unset the variable to be sure no compression level is set afterwards. + compress_method = None + for name, compressor in _COMPRESSORS.items(): + if filename.endswith(compressor.extension): + compress_method = name + + if compress_method in _COMPRESSORS and compress_level == 0: + # we choose the default compress_level in case it was not given + # as an argument (using compress). + compress_level = None + + if cache_size is not None: + # Cache size is deprecated starting from version 0.10 + warnings.warn("Please do not set 'cache_size' in joblib.dump, " + "this parameter has no effect and will be removed. " + "You used 'cache_size={}'".format(cache_size), + DeprecationWarning, stacklevel=2) + + if compress_level != 0: + with _write_fileobject(filename, compress=(compress_method, + compress_level)) as f: + NumpyPickler(f, protocol=protocol).dump(value) + elif is_filename: + with open(filename, 'wb') as f: + NumpyPickler(f, protocol=protocol).dump(value) + else: + NumpyPickler(filename, protocol=protocol).dump(value) + + # If the target container is a file object, nothing is returned. + if is_fileobj: + return + + # For compatibility, the list of created filenames (e.g with one element + # after 0.10.0) is returned by default. + return [filename] + + +def _unpickle(fobj, filename="", mmap_mode=None): + """Internal unpickling function.""" + # We are careful to open the file handle early and keep it open to + # avoid race-conditions on renames. + # That said, if data is stored in companion files, which can be + # the case with the old persistence format, moving the directory + # will create a race when joblib tries to access the companion + # files. + unpickler = NumpyUnpickler(filename, fobj, mmap_mode=mmap_mode) + obj = None + try: + obj = unpickler.load() + if unpickler.compat_mode: + warnings.warn("The file '%s' has been generated with a " + "joblib version less than 0.10. " + "Please regenerate this pickle file." + % filename, + DeprecationWarning, stacklevel=3) + except UnicodeDecodeError as exc: + # More user-friendly error message + new_exc = ValueError( + 'You may be trying to read with ' + 'python 3 a joblib pickle generated with python 2. ' + 'This feature is not supported by joblib.') + new_exc.__cause__ = exc + raise new_exc + return obj + + +def load_temporary_memmap(filename, mmap_mode, unlink_on_gc_collect): + from ._memmapping_reducer import JOBLIB_MMAPS, add_maybe_unlink_finalizer + obj = load(filename, mmap_mode) + JOBLIB_MMAPS.add(obj.filename) + if unlink_on_gc_collect: + add_maybe_unlink_finalizer(obj) + return obj + + +def load(filename, mmap_mode=None): + """Reconstruct a Python object from a file persisted with joblib.dump. + + Read more in the :ref:`User Guide `. + + WARNING: joblib.load relies on the pickle module and can therefore + execute arbitrary Python code. It should therefore never be used + to load files from untrusted sources. + + Parameters + ---------- + filename: str, pathlib.Path, or file object. + The file object or path of the file from which to load the object + mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional + If not None, the arrays are memory-mapped from the disk. This + mode has no effect for compressed files. Note that in this + case the reconstructed object might no longer match exactly + the originally pickled object. + + Returns + ------- + result: any Python object + The object stored in the file. + + See Also + -------- + joblib.dump : function to save an object + + Notes + ----- + + This function can load numpy array files saved separately during the + dump. If the mmap_mode argument is given, it is passed to np.load and + arrays are loaded as memmaps. As a consequence, the reconstructed + object might not match the original pickled object. Note that if the + file was saved with compression, the arrays cannot be memmapped. + """ + if Path is not None and isinstance(filename, Path): + filename = str(filename) + + if hasattr(filename, "read"): + fobj = filename + filename = getattr(fobj, 'name', '') + with _read_fileobject(fobj, filename, mmap_mode) as fobj: + obj = _unpickle(fobj) + else: + with open(filename, 'rb') as f: + with _read_fileobject(f, filename, mmap_mode) as fobj: + if isinstance(fobj, str): + # if the returned file object is a string, this means we + # try to load a pickle file generated with an version of + # Joblib so we load it with joblib compatibility function. + return load_compatibility(fobj) + + obj = _unpickle(fobj, filename, mmap_mode) + return obj diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/numpy_pickle_compat.py b/env-llmeval/lib/python3.10/site-packages/joblib/numpy_pickle_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..32612849bdb8320990f0433d3ded42898046c913 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/numpy_pickle_compat.py @@ -0,0 +1,244 @@ +"""Numpy pickle compatibility functions.""" + +import pickle +import os +import zlib +import inspect + +from io import BytesIO + +from .numpy_pickle_utils import _ZFILE_PREFIX +from .numpy_pickle_utils import Unpickler +from .numpy_pickle_utils import _ensure_native_byte_order + + +def hex_str(an_int): + """Convert an int to an hexadecimal string.""" + return '{:#x}'.format(an_int) + + +def asbytes(s): + if isinstance(s, bytes): + return s + return s.encode('latin1') + + +_MAX_LEN = len(hex_str(2 ** 64)) +_CHUNK_SIZE = 64 * 1024 + + +def read_zfile(file_handle): + """Read the z-file and return the content as a string. + + Z-files are raw data compressed with zlib used internally by joblib + for persistence. Backward compatibility is not guaranteed. Do not + use for external purposes. + """ + file_handle.seek(0) + header_length = len(_ZFILE_PREFIX) + _MAX_LEN + length = file_handle.read(header_length) + length = length[len(_ZFILE_PREFIX):] + length = int(length, 16) + + # With python2 and joblib version <= 0.8.4 compressed pickle header is one + # character wider so we need to ignore an additional space if present. + # Note: the first byte of the zlib data is guaranteed not to be a + # space according to + # https://tools.ietf.org/html/rfc6713#section-2.1 + next_byte = file_handle.read(1) + if next_byte != b' ': + # The zlib compressed data has started and we need to go back + # one byte + file_handle.seek(header_length) + + # We use the known length of the data to tell Zlib the size of the + # buffer to allocate. + data = zlib.decompress(file_handle.read(), 15, length) + assert len(data) == length, ( + "Incorrect data length while decompressing %s." + "The file could be corrupted." % file_handle) + return data + + +def write_zfile(file_handle, data, compress=1): + """Write the data in the given file as a Z-file. + + Z-files are raw data compressed with zlib used internally by joblib + for persistence. Backward compatibility is not guaranteed. Do not + use for external purposes. + """ + file_handle.write(_ZFILE_PREFIX) + length = hex_str(len(data)) + # Store the length of the data + file_handle.write(asbytes(length.ljust(_MAX_LEN))) + file_handle.write(zlib.compress(asbytes(data), compress)) + +############################################################################### +# Utility objects for persistence. + + +class NDArrayWrapper(object): + """An object to be persisted instead of numpy arrays. + + The only thing this object does, is to carry the filename in which + the array has been persisted, and the array subclass. + """ + + def __init__(self, filename, subclass, allow_mmap=True): + """Constructor. Store the useful information for later.""" + self.filename = filename + self.subclass = subclass + self.allow_mmap = allow_mmap + + def read(self, unpickler): + """Reconstruct the array.""" + filename = os.path.join(unpickler._dirname, self.filename) + # Load the array from the disk + # use getattr instead of self.allow_mmap to ensure backward compat + # with NDArrayWrapper instances pickled with joblib < 0.9.0 + allow_mmap = getattr(self, 'allow_mmap', True) + kwargs = {} + if allow_mmap: + kwargs['mmap_mode'] = unpickler.mmap_mode + if "allow_pickle" in inspect.signature(unpickler.np.load).parameters: + # Required in numpy 1.16.3 and later to aknowledge the security + # risk. + kwargs["allow_pickle"] = True + array = unpickler.np.load(filename, **kwargs) + + # Detect byte order mismatch and swap as needed. + array = _ensure_native_byte_order(array) + + # Reconstruct subclasses. This does not work with old + # versions of numpy + if (hasattr(array, '__array_prepare__') and + self.subclass not in (unpickler.np.ndarray, + unpickler.np.memmap)): + # We need to reconstruct another subclass + new_array = unpickler.np.core.multiarray._reconstruct( + self.subclass, (0,), 'b') + return new_array.__array_prepare__(array) + else: + return array + + +class ZNDArrayWrapper(NDArrayWrapper): + """An object to be persisted instead of numpy arrays. + + This object store the Zfile filename in which + the data array has been persisted, and the meta information to + retrieve it. + The reason that we store the raw buffer data of the array and + the meta information, rather than array representation routine + (tobytes) is that it enables us to use completely the strided + model to avoid memory copies (a and a.T store as fast). In + addition saving the heavy information separately can avoid + creating large temporary buffers when unpickling data with + large arrays. + """ + + def __init__(self, filename, init_args, state): + """Constructor. Store the useful information for later.""" + self.filename = filename + self.state = state + self.init_args = init_args + + def read(self, unpickler): + """Reconstruct the array from the meta-information and the z-file.""" + # Here we a simply reproducing the unpickling mechanism for numpy + # arrays + filename = os.path.join(unpickler._dirname, self.filename) + array = unpickler.np.core.multiarray._reconstruct(*self.init_args) + with open(filename, 'rb') as f: + data = read_zfile(f) + state = self.state + (data,) + array.__setstate__(state) + return array + + +class ZipNumpyUnpickler(Unpickler): + """A subclass of the Unpickler to unpickle our numpy pickles.""" + + dispatch = Unpickler.dispatch.copy() + + def __init__(self, filename, file_handle, mmap_mode=None): + """Constructor.""" + self._filename = os.path.basename(filename) + self._dirname = os.path.dirname(filename) + self.mmap_mode = mmap_mode + self.file_handle = self._open_pickle(file_handle) + Unpickler.__init__(self, self.file_handle) + try: + import numpy as np + except ImportError: + np = None + self.np = np + + def _open_pickle(self, file_handle): + return BytesIO(read_zfile(file_handle)) + + def load_build(self): + """Set the state of a newly created object. + + We capture it to replace our place-holder objects, + NDArrayWrapper, by the array we are interested in. We + replace them directly in the stack of pickler. + """ + Unpickler.load_build(self) + if isinstance(self.stack[-1], NDArrayWrapper): + if self.np is None: + raise ImportError("Trying to unpickle an ndarray, " + "but numpy didn't import correctly") + nd_array_wrapper = self.stack.pop() + array = nd_array_wrapper.read(self) + self.stack.append(array) + + dispatch[pickle.BUILD[0]] = load_build + + +def load_compatibility(filename): + """Reconstruct a Python object from a file persisted with joblib.dump. + + This function ensures the compatibility with joblib old persistence format + (<= 0.9.3). + + Parameters + ---------- + filename: string + The name of the file from which to load the object + + Returns + ------- + result: any Python object + The object stored in the file. + + See Also + -------- + joblib.dump : function to save an object + + Notes + ----- + + This function can load numpy array files saved separately during the + dump. + """ + with open(filename, 'rb') as file_handle: + # We are careful to open the file handle early and keep it open to + # avoid race-conditions on renames. That said, if data is stored in + # companion files, moving the directory will create a race when + # joblib tries to access the companion files. + unpickler = ZipNumpyUnpickler(filename, file_handle=file_handle) + try: + obj = unpickler.load() + except UnicodeDecodeError as exc: + # More user-friendly error message + new_exc = ValueError( + 'You may be trying to read with ' + 'python 3 a joblib pickle generated with python 2. ' + 'This feature is not supported by joblib.') + new_exc.__cause__ = exc + raise new_exc + finally: + if hasattr(unpickler, 'file_handle'): + unpickler.file_handle.close() + return obj diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/numpy_pickle_utils.py b/env-llmeval/lib/python3.10/site-packages/joblib/numpy_pickle_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..23cfb34ecb19161a2eca6bc85f29c3996162572c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/numpy_pickle_utils.py @@ -0,0 +1,253 @@ +"""Utilities for fast persistence of big data, with optional compression.""" + +# Author: Gael Varoquaux +# Copyright (c) 2009 Gael Varoquaux +# License: BSD Style, 3 clauses. + +import pickle +import io +import sys +import warnings +import contextlib + +from .compressor import _ZFILE_PREFIX +from .compressor import _COMPRESSORS + +try: + import numpy as np +except ImportError: + np = None + +Unpickler = pickle._Unpickler +Pickler = pickle._Pickler +xrange = range + + +try: + # The python standard library can be built without bz2 so we make bz2 + # usage optional. + # see https://github.com/scikit-learn/scikit-learn/issues/7526 for more + # details. + import bz2 +except ImportError: + bz2 = None + +# Buffer size used in io.BufferedReader and io.BufferedWriter +_IO_BUFFER_SIZE = 1024 ** 2 + + +def _is_raw_file(fileobj): + """Check if fileobj is a raw file object, e.g created with open.""" + fileobj = getattr(fileobj, 'raw', fileobj) + return isinstance(fileobj, io.FileIO) + + +def _get_prefixes_max_len(): + # Compute the max prefix len of registered compressors. + prefixes = [len(compressor.prefix) for compressor in _COMPRESSORS.values()] + prefixes += [len(_ZFILE_PREFIX)] + return max(prefixes) + + +def _is_numpy_array_byte_order_mismatch(array): + """Check if numpy array is having byte order mismatch""" + return ((sys.byteorder == 'big' and + (array.dtype.byteorder == '<' or + (array.dtype.byteorder == '|' and array.dtype.fields and + all(e[0].byteorder == '<' + for e in array.dtype.fields.values())))) or + (sys.byteorder == 'little' and + (array.dtype.byteorder == '>' or + (array.dtype.byteorder == '|' and array.dtype.fields and + all(e[0].byteorder == '>' + for e in array.dtype.fields.values()))))) + + +def _ensure_native_byte_order(array): + """Use the byte order of the host while preserving values + + Does nothing if array already uses the system byte order. + """ + if _is_numpy_array_byte_order_mismatch(array): + array = array.byteswap().view(array.dtype.newbyteorder('=')) + return array + + +############################################################################### +# Cache file utilities +def _detect_compressor(fileobj): + """Return the compressor matching fileobj. + + Parameters + ---------- + fileobj: file object + + Returns + ------- + str in {'zlib', 'gzip', 'bz2', 'lzma', 'xz', 'compat', 'not-compressed'} + """ + # Read the magic number in the first bytes of the file. + max_prefix_len = _get_prefixes_max_len() + if hasattr(fileobj, 'peek'): + # Peek allows to read those bytes without moving the cursor in the + # file whic. + first_bytes = fileobj.peek(max_prefix_len) + else: + # Fallback to seek if the fileobject is not peekable. + first_bytes = fileobj.read(max_prefix_len) + fileobj.seek(0) + + if first_bytes.startswith(_ZFILE_PREFIX): + return "compat" + else: + for name, compressor in _COMPRESSORS.items(): + if first_bytes.startswith(compressor.prefix): + return name + + return "not-compressed" + + +def _buffered_read_file(fobj): + """Return a buffered version of a read file object.""" + return io.BufferedReader(fobj, buffer_size=_IO_BUFFER_SIZE) + + +def _buffered_write_file(fobj): + """Return a buffered version of a write file object.""" + return io.BufferedWriter(fobj, buffer_size=_IO_BUFFER_SIZE) + + +@contextlib.contextmanager +def _read_fileobject(fileobj, filename, mmap_mode=None): + """Utility function opening the right fileobject from a filename. + + The magic number is used to choose between the type of file object to open: + * regular file object (default) + * zlib file object + * gzip file object + * bz2 file object + * lzma file object (for xz and lzma compressor) + + Parameters + ---------- + fileobj: file object + compressor: str in {'zlib', 'gzip', 'bz2', 'lzma', 'xz', 'compat', + 'not-compressed'} + filename: str + filename path corresponding to the fileobj parameter. + mmap_mode: str + memory map mode that should be used to open the pickle file. This + parameter is useful to verify that the user is not trying to one with + compression. Default: None. + + Returns + ------- + a file like object + + """ + # Detect if the fileobj contains compressed data. + compressor = _detect_compressor(fileobj) + + if compressor == 'compat': + # Compatibility with old pickle mode: simply return the input + # filename "as-is" and let the compatibility function be called by the + # caller. + warnings.warn("The file '%s' has been generated with a joblib " + "version less than 0.10. " + "Please regenerate this pickle file." % filename, + DeprecationWarning, stacklevel=2) + yield filename + else: + if compressor in _COMPRESSORS: + # based on the compressor detected in the file, we open the + # correct decompressor file object, wrapped in a buffer. + compressor_wrapper = _COMPRESSORS[compressor] + inst = compressor_wrapper.decompressor_file(fileobj) + fileobj = _buffered_read_file(inst) + + # Checking if incompatible load parameters with the type of file: + # mmap_mode cannot be used with compressed file or in memory buffers + # such as io.BytesIO. + if mmap_mode is not None: + if isinstance(fileobj, io.BytesIO): + warnings.warn('In memory persistence is not compatible with ' + 'mmap_mode "%(mmap_mode)s" flag passed. ' + 'mmap_mode option will be ignored.' + % locals(), stacklevel=2) + elif compressor != 'not-compressed': + warnings.warn('mmap_mode "%(mmap_mode)s" is not compatible ' + 'with compressed file %(filename)s. ' + '"%(mmap_mode)s" flag will be ignored.' + % locals(), stacklevel=2) + elif not _is_raw_file(fileobj): + warnings.warn('"%(fileobj)r" is not a raw file, mmap_mode ' + '"%(mmap_mode)s" flag will be ignored.' + % locals(), stacklevel=2) + + yield fileobj + + +def _write_fileobject(filename, compress=("zlib", 3)): + """Return the right compressor file object in write mode.""" + compressmethod = compress[0] + compresslevel = compress[1] + + if compressmethod in _COMPRESSORS.keys(): + file_instance = _COMPRESSORS[compressmethod].compressor_file( + filename, compresslevel=compresslevel) + return _buffered_write_file(file_instance) + else: + file_instance = _COMPRESSORS['zlib'].compressor_file( + filename, compresslevel=compresslevel) + return _buffered_write_file(file_instance) + + +# Utility functions/variables from numpy required for writing arrays. +# We need at least the functions introduced in version 1.9 of numpy. Here, +# we use the ones from numpy 1.10.2. +BUFFER_SIZE = 2 ** 18 # size of buffer for reading npz files in bytes + + +def _read_bytes(fp, size, error_template="ran out of data"): + """Read from file-like object until size bytes are read. + + TODO python2_drop: is it still needed? The docstring mentions python 2.6 + and it looks like this can be at least simplified ... + + Raises ValueError if not EOF is encountered before size bytes are read. + Non-blocking objects only supported if they derive from io objects. + + Required as e.g. ZipExtFile in python 2.6 can return less data than + requested. + + This function was taken from numpy/lib/format.py in version 1.10.2. + + Parameters + ---------- + fp: file-like object + size: int + error_template: str + + Returns + ------- + a bytes object + The data read in bytes. + + """ + data = bytes() + while True: + # io files (default in python3) return None or raise on + # would-block, python2 file will truncate, probably nothing can be + # done about that. note that regular files can't be non-blocking + try: + r = fp.read(size - len(data)) + data += r + if len(r) == 0 or len(data) == size: + break + except io.BlockingIOError: + pass + if len(data) != size: + msg = "EOF: reading %s, expected %d bytes got %d" + raise ValueError(msg % (error_template, size, len(data))) + else: + return data diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/parallel.py b/env-llmeval/lib/python3.10/site-packages/joblib/parallel.py new file mode 100644 index 0000000000000000000000000000000000000000..e42385df86cc8348d6b1e30b6c8dc5aecff9a8a6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/parallel.py @@ -0,0 +1,2010 @@ +""" +Helpers for embarrassingly parallel code. +""" +# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org > +# Copyright: 2010, Gael Varoquaux +# License: BSD 3 clause + +from __future__ import division + +import os +import sys +from math import sqrt +import functools +import collections +import time +import threading +import itertools +from uuid import uuid4 +from numbers import Integral +import warnings +import queue +import weakref +from contextlib import nullcontext + +from multiprocessing import TimeoutError + +from ._multiprocessing_helpers import mp + +from .logger import Logger, short_format_time +from .disk import memstr_to_bytes +from ._parallel_backends import (FallbackToBackend, MultiprocessingBackend, + ThreadingBackend, SequentialBackend, + LokyBackend) +from ._utils import eval_expr, _Sentinel + +# Make sure that those two classes are part of the public joblib.parallel API +# so that 3rd party backend implementers can import them from here. +from ._parallel_backends import AutoBatchingMixin # noqa +from ._parallel_backends import ParallelBackendBase # noqa + + +IS_PYPY = hasattr(sys, "pypy_version_info") + + +BACKENDS = { + 'threading': ThreadingBackend, + 'sequential': SequentialBackend, +} +# name of the backend used by default by Parallel outside of any context +# managed by ``parallel_config`` or ``parallel_backend``. + +# threading is the only backend that is always everywhere +DEFAULT_BACKEND = 'threading' + +MAYBE_AVAILABLE_BACKENDS = {'multiprocessing', 'loky'} + +# if multiprocessing is available, so is loky, we set it as the default +# backend +if mp is not None: + BACKENDS['multiprocessing'] = MultiprocessingBackend + from .externals import loky + BACKENDS['loky'] = LokyBackend + DEFAULT_BACKEND = 'loky' + + +DEFAULT_THREAD_BACKEND = 'threading' + + +# Thread local value that can be overridden by the ``parallel_config`` context +# manager +_backend = threading.local() + + +def _register_dask(): + """Register Dask Backend if called with parallel_config(backend="dask")""" + try: + from ._dask import DaskDistributedBackend + register_parallel_backend('dask', DaskDistributedBackend) + except ImportError as e: + msg = ("To use the dask.distributed backend you must install both " + "the `dask` and distributed modules.\n\n" + "See https://dask.pydata.org/en/latest/install.html for more " + "information.") + raise ImportError(msg) from e + + +EXTERNAL_BACKENDS = { + 'dask': _register_dask, +} + + +# Sentinels for the default values of the Parallel constructor and +# the parallel_config and parallel_backend context managers +default_parallel_config = { + "backend": _Sentinel(default_value=None), + "n_jobs": _Sentinel(default_value=None), + "verbose": _Sentinel(default_value=0), + "temp_folder": _Sentinel(default_value=None), + "max_nbytes": _Sentinel(default_value="1M"), + "mmap_mode": _Sentinel(default_value="r"), + "prefer": _Sentinel(default_value=None), + "require": _Sentinel(default_value=None), +} + + +VALID_BACKEND_HINTS = ('processes', 'threads', None) +VALID_BACKEND_CONSTRAINTS = ('sharedmem', None) + + +def _get_config_param(param, context_config, key): + """Return the value of a parallel config parameter + + Explicitly setting it in Parallel has priority over setting in a + parallel_(config/backend) context manager. + """ + if param is not default_parallel_config[key]: + # param is explicitely set, return it + return param + + if context_config[key] is not default_parallel_config[key]: + # there's a context manager and the key is set, return it + return context_config[key] + + # Otherwise, we are in the default_parallel_config, + # return the default value + return param.default_value + + +def get_active_backend( + prefer=default_parallel_config["prefer"], + require=default_parallel_config["require"], + verbose=default_parallel_config["verbose"], +): + """Return the active default backend""" + backend, config = _get_active_backend(prefer, require, verbose) + n_jobs = _get_config_param( + default_parallel_config['n_jobs'], config, "n_jobs" + ) + return backend, n_jobs + + +def _get_active_backend( + prefer=default_parallel_config["prefer"], + require=default_parallel_config["require"], + verbose=default_parallel_config["verbose"], +): + """Return the active default backend""" + + backend_config = getattr(_backend, "config", default_parallel_config) + + backend = _get_config_param( + default_parallel_config['backend'], backend_config, "backend" + ) + prefer = _get_config_param(prefer, backend_config, "prefer") + require = _get_config_param(require, backend_config, "require") + verbose = _get_config_param(verbose, backend_config, "verbose") + + if prefer not in VALID_BACKEND_HINTS: + raise ValueError( + f"prefer={prefer} is not a valid backend hint, " + f"expected one of {VALID_BACKEND_HINTS}" + ) + if require not in VALID_BACKEND_CONSTRAINTS: + raise ValueError( + f"require={require} is not a valid backend constraint, " + f"expected one of {VALID_BACKEND_CONSTRAINTS}" + ) + if prefer == 'processes' and require == 'sharedmem': + raise ValueError( + "prefer == 'processes' and require == 'sharedmem'" + " are inconsistent settings" + ) + + explicit_backend = True + if backend is None: + + # We are either outside of the scope of any parallel_(config/backend) + # context manager or the context manager did not set a backend. + # create the default backend instance now. + backend = BACKENDS[DEFAULT_BACKEND](nesting_level=0) + explicit_backend = False + + # Try to use the backend set by the user with the context manager. + + nesting_level = backend.nesting_level + uses_threads = getattr(backend, 'uses_threads', False) + supports_sharedmem = getattr(backend, 'supports_sharedmem', False) + # Force to use thread-based backend if the provided backend does not + # match the shared memory constraint or if the backend is not explicitely + # given and threads are prefered. + force_threads = (require == 'sharedmem' and not supports_sharedmem) + force_threads |= ( + not explicit_backend and prefer == 'threads' and not uses_threads + ) + if force_threads: + # This backend does not match the shared memory constraint: + # fallback to the default thead-based backend. + sharedmem_backend = BACKENDS[DEFAULT_THREAD_BACKEND]( + nesting_level=nesting_level + ) + # Warn the user if we forced the backend to thread-based, while the + # user explicitely specified a non-thread-based backend. + if verbose >= 10 and explicit_backend: + print( + f"Using {sharedmem_backend.__class__.__name__} as " + f"joblib backend instead of {backend.__class__.__name__} " + "as the latter does not provide shared memory semantics." + ) + # Force to n_jobs=1 by default + thread_config = backend_config.copy() + thread_config['n_jobs'] = 1 + return sharedmem_backend, thread_config + + return backend, backend_config + + +class parallel_config: + """Set the default backend or configuration for :class:`~joblib.Parallel`. + + This is an alternative to directly passing keyword arguments to the + :class:`~joblib.Parallel` class constructor. It is particularly useful when + calling into library code that uses joblib internally but does not expose + the various parallel configuration arguments in its own API. + + Parameters + ---------- + backend: str or ParallelBackendBase instance, default=None + If ``backend`` is a string it must match a previously registered + implementation using the :func:`~register_parallel_backend` function. + + By default the following backends are available: + + - 'loky': single-host, process-based parallelism (used by default), + - 'threading': single-host, thread-based parallelism, + - 'multiprocessing': legacy single-host, process-based parallelism. + + 'loky' is recommended to run functions that manipulate Python objects. + 'threading' is a low-overhead alternative that is most efficient for + functions that release the Global Interpreter Lock: e.g. I/O-bound + code or CPU-bound code in a few calls to native code that explicitly + releases the GIL. Note that on some rare systems (such as pyodide), + multiprocessing and loky may not be available, in which case joblib + defaults to threading. + + In addition, if the ``dask`` and ``distributed`` Python packages are + installed, it is possible to use the 'dask' backend for better + scheduling of nested parallel calls without over-subscription and + potentially distribute parallel calls over a networked cluster of + several hosts. + + It is also possible to use the distributed 'ray' backend for + distributing the workload to a cluster of nodes. See more details + in the Examples section below. + + Alternatively the backend can be passed directly as an instance. + + n_jobs: int, default=None + The maximum number of concurrently running jobs, such as the number + of Python worker processes when ``backend="loky"`` or the size of the + thread-pool when ``backend="threading"``. + This argument is converted to an integer, rounded below for float. + If -1 is given, `joblib` tries to use all CPUs. The number of CPUs + ``n_cpus`` is obtained with :func:`~cpu_count`. + For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. For instance, + using ``n_jobs=-2`` will result in all CPUs but one being used. + This argument can also go above ``n_cpus``, which will cause + oversubscription. In some cases, slight oversubscription can be + beneficial, e.g., for tasks with large I/O operations. + If 1 is given, no parallel computing code is used at all, and the + behavior amounts to a simple python `for` loop. This mode is not + compatible with `timeout`. + None is a marker for 'unset' that will be interpreted as n_jobs=1 + unless the call is performed under a :func:`~parallel_config` + context manager that sets another value for ``n_jobs``. + If n_jobs = 0 then a ValueError is raised. + + verbose: int, default=0 + The verbosity level: if non zero, progress messages are + printed. Above 50, the output is sent to stdout. + The frequency of the messages increases with the verbosity level. + If it more than 10, all iterations are reported. + + temp_folder: str or None, default=None + Folder to be used by the pool for memmapping large arrays + for sharing memory with worker processes. If None, this will try in + order: + + - a folder pointed by the ``JOBLIB_TEMP_FOLDER`` environment + variable, + - ``/dev/shm`` if the folder exists and is writable: this is a + RAM disk filesystem available by default on modern Linux + distributions, + - the default system temporary folder that can be + overridden with ``TMP``, ``TMPDIR`` or ``TEMP`` environment + variables, typically ``/tmp`` under Unix operating systems. + + max_nbytes int, str, or None, optional, default='1M' + Threshold on the size of arrays passed to the workers that + triggers automated memory mapping in temp_folder. Can be an int + in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte. + Use None to disable memmapping of large arrays. + + mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, default='r' + Memmapping mode for numpy arrays passed to workers. None will + disable memmapping, other modes defined in the numpy.memmap doc: + https://numpy.org/doc/stable/reference/generated/numpy.memmap.html + Also, see 'max_nbytes' parameter documentation for more details. + + prefer: str in {'processes', 'threads'} or None, default=None + Soft hint to choose the default backend. + The default process-based backend is 'loky' and the default + thread-based backend is 'threading'. Ignored if the ``backend`` + parameter is specified. + + require: 'sharedmem' or None, default=None + Hard constraint to select the backend. If set to 'sharedmem', + the selected backend will be single-host and thread-based. + + inner_max_num_threads: int, default=None + If not None, overwrites the limit set on the number of threads + usable in some third-party library threadpools like OpenBLAS, + MKL or OpenMP. This is only used with the ``loky`` backend. + + backend_params: dict + Additional parameters to pass to the backend constructor when + backend is a string. + + Notes + ----- + Joblib tries to limit the oversubscription by limiting the number of + threads usable in some third-party library threadpools like OpenBLAS, MKL + or OpenMP. The default limit in each worker is set to + ``max(cpu_count() // effective_n_jobs, 1)`` but this limit can be + overwritten with the ``inner_max_num_threads`` argument which will be used + to set this limit in the child processes. + + .. versionadded:: 1.3 + + Examples + -------- + >>> from operator import neg + >>> with parallel_config(backend='threading'): + ... print(Parallel()(delayed(neg)(i + 1) for i in range(5))) + ... + [-1, -2, -3, -4, -5] + + To use the 'ray' joblib backend add the following lines: + + >>> from ray.util.joblib import register_ray # doctest: +SKIP + >>> register_ray() # doctest: +SKIP + >>> with parallel_config(backend="ray"): # doctest: +SKIP + ... print(Parallel()(delayed(neg)(i + 1) for i in range(5))) + [-1, -2, -3, -4, -5] + + """ + def __init__( + self, + backend=default_parallel_config["backend"], + *, + n_jobs=default_parallel_config["n_jobs"], + verbose=default_parallel_config["verbose"], + temp_folder=default_parallel_config["temp_folder"], + max_nbytes=default_parallel_config["max_nbytes"], + mmap_mode=default_parallel_config["mmap_mode"], + prefer=default_parallel_config["prefer"], + require=default_parallel_config["require"], + inner_max_num_threads=None, + **backend_params + ): + # Save the parallel info and set the active parallel config + self.old_parallel_config = getattr( + _backend, "config", default_parallel_config + ) + + backend = self._check_backend( + backend, inner_max_num_threads, **backend_params + ) + + new_config = { + "n_jobs": n_jobs, + "verbose": verbose, + "temp_folder": temp_folder, + "max_nbytes": max_nbytes, + "mmap_mode": mmap_mode, + "prefer": prefer, + "require": require, + "backend": backend + } + self.parallel_config = self.old_parallel_config.copy() + self.parallel_config.update({ + k: v for k, v in new_config.items() + if not isinstance(v, _Sentinel) + }) + + setattr(_backend, "config", self.parallel_config) + + def _check_backend(self, backend, inner_max_num_threads, **backend_params): + if backend is default_parallel_config['backend']: + if inner_max_num_threads is not None or len(backend_params) > 0: + raise ValueError( + "inner_max_num_threads and other constructor " + "parameters backend_params are only supported " + "when backend is not None." + ) + return backend + + if isinstance(backend, str): + # Handle non-registered or missing backends + if backend not in BACKENDS: + if backend in EXTERNAL_BACKENDS: + register = EXTERNAL_BACKENDS[backend] + register() + elif backend in MAYBE_AVAILABLE_BACKENDS: + warnings.warn( + f"joblib backend '{backend}' is not available on " + f"your system, falling back to {DEFAULT_BACKEND}.", + UserWarning, + stacklevel=2 + ) + BACKENDS[backend] = BACKENDS[DEFAULT_BACKEND] + else: + raise ValueError( + f"Invalid backend: {backend}, expected one of " + f"{sorted(BACKENDS.keys())}" + ) + + backend = BACKENDS[backend](**backend_params) + + if inner_max_num_threads is not None: + msg = ( + f"{backend.__class__.__name__} does not accept setting the " + "inner_max_num_threads argument." + ) + assert backend.supports_inner_max_num_threads, msg + backend.inner_max_num_threads = inner_max_num_threads + + # If the nesting_level of the backend is not set previously, use the + # nesting level from the previous active_backend to set it + if backend.nesting_level is None: + parent_backend = self.old_parallel_config['backend'] + if parent_backend is default_parallel_config['backend']: + nesting_level = 0 + else: + nesting_level = parent_backend.nesting_level + backend.nesting_level = nesting_level + + return backend + + def __enter__(self): + return self.parallel_config + + def __exit__(self, type, value, traceback): + self.unregister() + + def unregister(self): + setattr(_backend, "config", self.old_parallel_config) + + +class parallel_backend(parallel_config): + """Change the default backend used by Parallel inside a with block. + + .. warning:: + It is advised to use the :class:`~joblib.parallel_config` context + manager instead, which allows more fine-grained control over the + backend configuration. + + If ``backend`` is a string it must match a previously registered + implementation using the :func:`~register_parallel_backend` function. + + By default the following backends are available: + + - 'loky': single-host, process-based parallelism (used by default), + - 'threading': single-host, thread-based parallelism, + - 'multiprocessing': legacy single-host, process-based parallelism. + + 'loky' is recommended to run functions that manipulate Python objects. + 'threading' is a low-overhead alternative that is most efficient for + functions that release the Global Interpreter Lock: e.g. I/O-bound code or + CPU-bound code in a few calls to native code that explicitly releases the + GIL. Note that on some rare systems (such as Pyodide), + multiprocessing and loky may not be available, in which case joblib + defaults to threading. + + You can also use the `Dask `_ joblib + backend to distribute work across machines. This works well with + scikit-learn estimators with the ``n_jobs`` parameter, for example:: + + >>> import joblib # doctest: +SKIP + >>> from sklearn.model_selection import GridSearchCV # doctest: +SKIP + >>> from dask.distributed import Client, LocalCluster # doctest: +SKIP + + >>> # create a local Dask cluster + >>> cluster = LocalCluster() # doctest: +SKIP + >>> client = Client(cluster) # doctest: +SKIP + >>> grid_search = GridSearchCV(estimator, param_grid, n_jobs=-1) + ... # doctest: +SKIP + >>> with joblib.parallel_backend("dask", scatter=[X, y]): # doctest: +SKIP + ... grid_search.fit(X, y) + + It is also possible to use the distributed 'ray' backend for distributing + the workload to a cluster of nodes. To use the 'ray' joblib backend add + the following lines:: + + >>> from ray.util.joblib import register_ray # doctest: +SKIP + >>> register_ray() # doctest: +SKIP + >>> with parallel_backend("ray"): # doctest: +SKIP + ... print(Parallel()(delayed(neg)(i + 1) for i in range(5))) + [-1, -2, -3, -4, -5] + + Alternatively the backend can be passed directly as an instance. + + By default all available workers will be used (``n_jobs=-1``) unless the + caller passes an explicit value for the ``n_jobs`` parameter. + + This is an alternative to passing a ``backend='backend_name'`` argument to + the :class:`~Parallel` class constructor. It is particularly useful when + calling into library code that uses joblib internally but does not expose + the backend argument in its own API. + + >>> from operator import neg + >>> with parallel_backend('threading'): + ... print(Parallel()(delayed(neg)(i + 1) for i in range(5))) + ... + [-1, -2, -3, -4, -5] + + Joblib also tries to limit the oversubscription by limiting the number of + threads usable in some third-party library threadpools like OpenBLAS, MKL + or OpenMP. The default limit in each worker is set to + ``max(cpu_count() // effective_n_jobs, 1)`` but this limit can be + overwritten with the ``inner_max_num_threads`` argument which will be used + to set this limit in the child processes. + + .. versionadded:: 0.10 + + See Also + -------- + joblib.parallel_config: context manager to change the backend + configuration. + """ + def __init__(self, backend, n_jobs=-1, inner_max_num_threads=None, + **backend_params): + + super().__init__( + backend=backend, + n_jobs=n_jobs, + inner_max_num_threads=inner_max_num_threads, + **backend_params + ) + + if self.old_parallel_config is None: + self.old_backend_and_jobs = None + else: + self.old_backend_and_jobs = ( + self.old_parallel_config["backend"], + self.old_parallel_config["n_jobs"], + ) + self.new_backend_and_jobs = ( + self.parallel_config["backend"], + self.parallel_config["n_jobs"], + ) + + def __enter__(self): + return self.new_backend_and_jobs + + +# Under Linux or OS X the default start method of multiprocessing +# can cause third party libraries to crash. Under Python 3.4+ it is possible +# to set an environment variable to switch the default start method from +# 'fork' to 'forkserver' or 'spawn' to avoid this issue albeit at the cost +# of causing semantic changes and some additional pool instantiation overhead. +DEFAULT_MP_CONTEXT = None +if hasattr(mp, 'get_context'): + method = os.environ.get('JOBLIB_START_METHOD', '').strip() or None + if method is not None: + DEFAULT_MP_CONTEXT = mp.get_context(method=method) + + +class BatchedCalls(object): + """Wrap a sequence of (func, args, kwargs) tuples as a single callable""" + + def __init__(self, iterator_slice, backend_and_jobs, reducer_callback=None, + pickle_cache=None): + self.items = list(iterator_slice) + self._size = len(self.items) + self._reducer_callback = reducer_callback + if isinstance(backend_and_jobs, tuple): + self._backend, self._n_jobs = backend_and_jobs + else: + # this is for backward compatibility purposes. Before 0.12.6, + # nested backends were returned without n_jobs indications. + self._backend, self._n_jobs = backend_and_jobs, None + self._pickle_cache = pickle_cache if pickle_cache is not None else {} + + def __call__(self): + # Set the default nested backend to self._backend but do not set the + # change the default number of processes to -1 + with parallel_config(backend=self._backend, n_jobs=self._n_jobs): + return [func(*args, **kwargs) + for func, args, kwargs in self.items] + + def __reduce__(self): + if self._reducer_callback is not None: + self._reducer_callback() + # no need to pickle the callback. + return ( + BatchedCalls, + (self.items, (self._backend, self._n_jobs), None, + self._pickle_cache) + ) + + def __len__(self): + return self._size + + +# Possible exit status for a task +TASK_DONE = "Done" +TASK_ERROR = "Error" +TASK_PENDING = "Pending" + + +############################################################################### +# CPU count that works also when multiprocessing has been disabled via +# the JOBLIB_MULTIPROCESSING environment variable +def cpu_count(only_physical_cores=False): + """Return the number of CPUs. + + This delegates to loky.cpu_count that takes into account additional + constraints such as Linux CFS scheduler quotas (typically set by container + runtimes such as docker) and CPU affinity (for instance using the taskset + command on Linux). + + If only_physical_cores is True, do not take hyperthreading / SMT logical + cores into account. + """ + if mp is None: + return 1 + + return loky.cpu_count(only_physical_cores=only_physical_cores) + + +############################################################################### +# For verbosity + +def _verbosity_filter(index, verbose): + """ Returns False for indices increasingly apart, the distance + depending on the value of verbose. + + We use a lag increasing as the square of index + """ + if not verbose: + return True + elif verbose > 10: + return False + if index == 0: + return False + verbose = .5 * (11 - verbose) ** 2 + scale = sqrt(index / verbose) + next_scale = sqrt((index + 1) / verbose) + return (int(next_scale) == int(scale)) + + +############################################################################### +def delayed(function): + """Decorator used to capture the arguments of a function.""" + + def delayed_function(*args, **kwargs): + return function, args, kwargs + try: + delayed_function = functools.wraps(function)(delayed_function) + except AttributeError: + " functools.wraps fails on some callable objects " + return delayed_function + + +############################################################################### +class BatchCompletionCallBack(object): + """Callback to keep track of completed results and schedule the next tasks. + + This callable is executed by the parent process whenever a worker process + has completed a batch of tasks. + + It is used for progress reporting, to update estimate of the batch + processing duration and to schedule the next batch of tasks to be + processed. + + It is assumed that this callback will always be triggered by the backend + right after the end of a task, in case of success as well as in case of + failure. + """ + + ########################################################################## + # METHODS CALLED BY THE MAIN THREAD # + ########################################################################## + def __init__(self, dispatch_timestamp, batch_size, parallel): + self.dispatch_timestamp = dispatch_timestamp + self.batch_size = batch_size + self.parallel = parallel + self.parallel_call_id = parallel._call_id + + # Internals to keep track of the status and outcome of the task. + + # Used to hold a reference to the future-like object returned by the + # backend after launching this task + # This will be set later when calling `register_job`, as it is only + # created once the task has been submitted. + self.job = None + + if not parallel._backend.supports_retrieve_callback: + # The status is only used for asynchronous result retrieval in the + # callback. + self.status = None + else: + # The initial status for the job is TASK_PENDING. + # Once it is done, it will be either TASK_DONE, or TASK_ERROR. + self.status = TASK_PENDING + + def register_job(self, job): + """Register the object returned by `apply_async`.""" + self.job = job + + def get_result(self, timeout): + """Returns the raw result of the task that was submitted. + + If the task raised an exception rather than returning, this same + exception will be raised instead. + + If the backend supports the retrieval callback, it is assumed that this + method is only called after the result has been registered. It is + ensured by checking that `self.status(timeout)` does not return + TASK_PENDING. In this case, `get_result` directly returns the + registered result (or raise the registered exception). + + For other backends, there are no such assumptions, but `get_result` + still needs to synchronously retrieve the result before it can + return it or raise. It will block at most `self.timeout` seconds + waiting for retrieval to complete, after that it raises a TimeoutError. + """ + + backend = self.parallel._backend + + if backend.supports_retrieve_callback: + # We assume that the result has already been retrieved by the + # callback thread, and is stored internally. It's just waiting to + # be returned. + return self._return_or_raise() + + # For other backends, the main thread needs to run the retrieval step. + try: + if backend.supports_timeout: + result = self.job.get(timeout=timeout) + else: + result = self.job.get() + outcome = dict(result=result, status=TASK_DONE) + except BaseException as e: + outcome = dict(result=e, status=TASK_ERROR) + self._register_outcome(outcome) + + return self._return_or_raise() + + def _return_or_raise(self): + try: + if self.status == TASK_ERROR: + raise self._result + return self._result + finally: + del self._result + + def get_status(self, timeout): + """Get the status of the task. + + This function also checks if the timeout has been reached and register + the TimeoutError outcome when it is the case. + """ + if timeout is None or self.status != TASK_PENDING: + return self.status + + # The computation are running and the status is pending. + # Check that we did not wait for this jobs more than `timeout`. + now = time.time() + if not hasattr(self, "_completion_timeout_counter"): + self._completion_timeout_counter = now + + if (now - self._completion_timeout_counter) > timeout: + outcome = dict(result=TimeoutError(), status=TASK_ERROR) + self._register_outcome(outcome) + + return self.status + + ########################################################################## + # METHODS CALLED BY CALLBACK THREADS # + ########################################################################## + def __call__(self, out): + """Function called by the callback thread after a job is completed.""" + + # If the backend doesn't support callback retrievals, the next batch of + # tasks is dispatched regardless. The result will be retrieved by the + # main thread when calling `get_result`. + if not self.parallel._backend.supports_retrieve_callback: + self._dispatch_new() + return + + # If the backend supports retrieving the result in the callback, it + # registers the task outcome (TASK_ERROR or TASK_DONE), and schedules + # the next batch if needed. + with self.parallel._lock: + # Edge case where while the task was processing, the `parallel` + # instance has been reset and a new call has been issued, but the + # worker managed to complete the task and trigger this callback + # call just before being aborted by the reset. + if self.parallel._call_id != self.parallel_call_id: + return + + # When aborting, stop as fast as possible and do not retrieve the + # result as it won't be returned by the Parallel call. + if self.parallel._aborting: + return + + # Retrieves the result of the task in the main process and dispatch + # a new batch if needed. + job_succeeded = self._retrieve_result(out) + + if not self.parallel.return_ordered: + # Append the job to the queue in the order of completion + # instead of submission. + self.parallel._jobs.append(self) + + if job_succeeded: + self._dispatch_new() + + def _dispatch_new(self): + """Schedule the next batch of tasks to be processed.""" + + # This steps ensure that auto-batching works as expected. + this_batch_duration = time.time() - self.dispatch_timestamp + self.parallel._backend.batch_completed(self.batch_size, + this_batch_duration) + + # Schedule the next batch of tasks. + with self.parallel._lock: + self.parallel.n_completed_tasks += self.batch_size + self.parallel.print_progress() + if self.parallel._original_iterator is not None: + self.parallel.dispatch_next() + + def _retrieve_result(self, out): + """Fetch and register the outcome of a task. + + Return True if the task succeeded, False otherwise. + This function is only called by backends that support retrieving + the task result in the callback thread. + """ + try: + result = self.parallel._backend.retrieve_result_callback(out) + outcome = dict(status=TASK_DONE, result=result) + except BaseException as e: + # Avoid keeping references to parallel in the error. + e.__traceback__ = None + outcome = dict(result=e, status=TASK_ERROR) + + self._register_outcome(outcome) + return outcome['status'] != TASK_ERROR + + ########################################################################## + # This method can be called either in the main thread # + # or in the callback thread. # + ########################################################################## + def _register_outcome(self, outcome): + """Register the outcome of a task. + + This method can be called only once, future calls will be ignored. + """ + # Covers the edge case where the main thread tries to register a + # `TimeoutError` while the callback thread tries to register a result + # at the same time. + with self.parallel._lock: + if self.status not in (TASK_PENDING, None): + return + self.status = outcome["status"] + + self._result = outcome["result"] + + # Once the result and the status are extracted, the last reference to + # the job can be deleted. + self.job = None + + # As soon as an error as been spotted, early stopping flags are sent to + # the `parallel` instance. + if self.status == TASK_ERROR: + self.parallel._exception = True + self.parallel._aborting = True + + +############################################################################### +def register_parallel_backend(name, factory, make_default=False): + """Register a new Parallel backend factory. + + The new backend can then be selected by passing its name as the backend + argument to the :class:`~Parallel` class. Moreover, the default backend can + be overwritten globally by setting make_default=True. + + The factory can be any callable that takes no argument and return an + instance of ``ParallelBackendBase``. + + Warning: this function is experimental and subject to change in a future + version of joblib. + + .. versionadded:: 0.10 + """ + BACKENDS[name] = factory + if make_default: + global DEFAULT_BACKEND + DEFAULT_BACKEND = name + + +def effective_n_jobs(n_jobs=-1): + """Determine the number of jobs that can actually run in parallel + + n_jobs is the number of workers requested by the callers. Passing n_jobs=-1 + means requesting all available workers for instance matching the number of + CPU cores on the worker host(s). + + This method should return a guesstimate of the number of workers that can + actually perform work concurrently with the currently enabled default + backend. The primary use case is to make it possible for the caller to know + in how many chunks to slice the work. + + In general working on larger data chunks is more efficient (less scheduling + overhead and better use of CPU cache prefetching heuristics) as long as all + the workers have enough work to do. + + Warning: this function is experimental and subject to change in a future + version of joblib. + + .. versionadded:: 0.10 + """ + if n_jobs == 1: + return 1 + + backend, backend_n_jobs = get_active_backend() + if n_jobs is None: + n_jobs = backend_n_jobs + return backend.effective_n_jobs(n_jobs=n_jobs) + + +############################################################################### +class Parallel(Logger): + ''' Helper class for readable parallel mapping. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_jobs: int, default=None + The maximum number of concurrently running jobs, such as the number + of Python worker processes when ``backend="loky"`` or the size of + the thread-pool when ``backend="threading"``. + This argument is converted to an integer, rounded below for float. + If -1 is given, `joblib` tries to use all CPUs. The number of CPUs + ``n_cpus`` is obtained with :func:`~cpu_count`. + For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. For instance, + using ``n_jobs=-2`` will result in all CPUs but one being used. + This argument can also go above ``n_cpus``, which will cause + oversubscription. In some cases, slight oversubscription can be + beneficial, e.g., for tasks with large I/O operations. + If 1 is given, no parallel computing code is used at all, and the + behavior amounts to a simple python `for` loop. This mode is not + compatible with ``timeout``. + None is a marker for 'unset' that will be interpreted as n_jobs=1 + unless the call is performed under a :func:`~parallel_config` + context manager that sets another value for ``n_jobs``. + If n_jobs = 0 then a ValueError is raised. + backend: str, ParallelBackendBase instance or None, default='loky' + Specify the parallelization backend implementation. + Supported backends are: + + - "loky" used by default, can induce some + communication and memory overhead when exchanging input and + output data with the worker Python processes. On some rare + systems (such as Pyiodide), the loky backend may not be + available. + - "multiprocessing" previous process-based backend based on + `multiprocessing.Pool`. Less robust than `loky`. + - "threading" is a very low-overhead backend but it suffers + from the Python Global Interpreter Lock if the called function + relies a lot on Python objects. "threading" is mostly useful + when the execution bottleneck is a compiled extension that + explicitly releases the GIL (for instance a Cython loop wrapped + in a "with nogil" block or an expensive call to a library such + as NumPy). + - finally, you can register backends by calling + :func:`~register_parallel_backend`. This will allow you to + implement a backend of your liking. + + It is not recommended to hard-code the backend name in a call to + :class:`~Parallel` in a library. Instead it is recommended to set + soft hints (prefer) or hard constraints (require) so as to make it + possible for library users to change the backend from the outside + using the :func:`~parallel_config` context manager. + return_as: str in {'list', 'generator', 'generator_unordered'}, default='list' + If 'list', calls to this instance will return a list, only when + all results have been processed and retrieved. + If 'generator', it will return a generator that yields the results + as soon as they are available, in the order the tasks have been + submitted with. + If 'generator_unordered', the generator will immediately yield + available results independently of the submission order. The output + order is not deterministic in this case because it depends on the + concurrency of the workers. + prefer: str in {'processes', 'threads'} or None, default=None + Soft hint to choose the default backend if no specific backend + was selected with the :func:`~parallel_config` context manager. + The default process-based backend is 'loky' and the default + thread-based backend is 'threading'. Ignored if the ``backend`` + parameter is specified. + require: 'sharedmem' or None, default=None + Hard constraint to select the backend. If set to 'sharedmem', + the selected backend will be single-host and thread-based even + if the user asked for a non-thread based backend with + :func:`~joblib.parallel_config`. + verbose: int, default=0 + The verbosity level: if non zero, progress messages are + printed. Above 50, the output is sent to stdout. + The frequency of the messages increases with the verbosity level. + If it more than 10, all iterations are reported. + timeout: float or None, default=None + Timeout limit for each task to complete. If any task takes longer + a TimeOutError will be raised. Only applied when n_jobs != 1 + pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}, default='2*n_jobs' + The number of batches (of tasks) to be pre-dispatched. + Default is '2*n_jobs'. When batch_size="auto" this is reasonable + default and the workers should never starve. Note that only basic + arithmetics are allowed here and no modules can be used in this + expression. + batch_size: int or 'auto', default='auto' + The number of atomic tasks to dispatch at once to each + worker. When individual evaluations are very fast, dispatching + calls to workers can be slower than sequential computation because + of the overhead. Batching fast computations together can mitigate + this. + The ``'auto'`` strategy keeps track of the time it takes for a + batch to complete, and dynamically adjusts the batch size to keep + the time on the order of half a second, using a heuristic. The + initial batch size is 1. + ``batch_size="auto"`` with ``backend="threading"`` will dispatch + batches of a single task at a time as the threading backend has + very little overhead and using larger batch size has not proved to + bring any gain in that case. + temp_folder: str or None, default=None + Folder to be used by the pool for memmapping large arrays + for sharing memory with worker processes. If None, this will try in + order: + + - a folder pointed by the JOBLIB_TEMP_FOLDER environment + variable, + - /dev/shm if the folder exists and is writable: this is a + RAM disk filesystem available by default on modern Linux + distributions, + - the default system temporary folder that can be + overridden with TMP, TMPDIR or TEMP environment + variables, typically /tmp under Unix operating systems. + + Only active when ``backend="loky"`` or ``"multiprocessing"``. + max_nbytes int, str, or None, optional, default='1M' + Threshold on the size of arrays passed to the workers that + triggers automated memory mapping in temp_folder. Can be an int + in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte. + Use None to disable memmapping of large arrays. + Only active when ``backend="loky"`` or ``"multiprocessing"``. + mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, default='r' + Memmapping mode for numpy arrays passed to workers. None will + disable memmapping, other modes defined in the numpy.memmap doc: + https://numpy.org/doc/stable/reference/generated/numpy.memmap.html + Also, see 'max_nbytes' parameter documentation for more details. + + Notes + ----- + + This object uses workers to compute in parallel the application of a + function to many different arguments. The main functionality it brings + in addition to using the raw multiprocessing or concurrent.futures API + are (see examples for details): + + * More readable code, in particular since it avoids + constructing list of arguments. + + * Easier debugging: + - informative tracebacks even when the error happens on + the client side + - using 'n_jobs=1' enables to turn off parallel computing + for debugging without changing the codepath + - early capture of pickling errors + + * An optional progress meter. + + * Interruption of multiprocesses jobs with 'Ctrl-C' + + * Flexible pickling control for the communication to and from + the worker processes. + + * Ability to use shared memory efficiently with worker + processes for large numpy-based datastructures. + + Note that the intended usage is to run one call at a time. Multiple + calls to the same Parallel object will result in a ``RuntimeError`` + + Examples + -------- + + A simple example: + + >>> from math import sqrt + >>> from joblib import Parallel, delayed + >>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10)) + [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0] + + Reshaping the output when the function has several return + values: + + >>> from math import modf + >>> from joblib import Parallel, delayed + >>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10)) + >>> res, i = zip(*r) + >>> res + (0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5) + >>> i + (0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0) + + The progress meter: the higher the value of `verbose`, the more + messages: + + >>> from time import sleep + >>> from joblib import Parallel, delayed + >>> r = Parallel(n_jobs=2, verbose=10)( + ... delayed(sleep)(.2) for _ in range(10)) #doctest: +SKIP + [Parallel(n_jobs=2)]: Done 1 tasks | elapsed: 0.6s + [Parallel(n_jobs=2)]: Done 4 tasks | elapsed: 0.8s + [Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 1.4s finished + + Traceback example, note how the line of the error is indicated + as well as the values of the parameter passed to the function that + triggered the exception, even though the traceback happens in the + child process: + + >>> from heapq import nlargest + >>> from joblib import Parallel, delayed + >>> Parallel(n_jobs=2)( + ... delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) + ... # doctest: +SKIP + ----------------------------------------------------------------------- + Sub-process traceback: + ----------------------------------------------------------------------- + TypeError Mon Nov 12 11:37:46 2012 + PID: 12934 Python 2.7.3: /usr/bin/python + ........................................................................ + /usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None) + 419 if n >= size: + 420 return sorted(iterable, key=key, reverse=True)[:n] + 421 + 422 # When key is none, use simpler decoration + 423 if key is None: + --> 424 it = izip(iterable, count(0,-1)) # decorate + 425 result = _nlargest(n, it) + 426 return map(itemgetter(0), result) # undecorate + 427 + 428 # General case, slowest method + TypeError: izip argument #1 must support iteration + _______________________________________________________________________ + + + Using pre_dispatch in a producer/consumer situation, where the + data is generated on the fly. Note how the producer is first + called 3 times before the parallel loop is initiated, and then + called to generate new data on the fly: + + >>> from math import sqrt + >>> from joblib import Parallel, delayed + >>> def producer(): + ... for i in range(6): + ... print('Produced %s' % i) + ... yield i + >>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')( + ... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP + Produced 0 + Produced 1 + Produced 2 + [Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s + Produced 3 + [Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s + Produced 4 + [Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s + Produced 5 + [Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s + [Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s remaining: 0.0s + [Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished + + ''' # noqa: E501 + def __init__( + self, + n_jobs=default_parallel_config["n_jobs"], + backend=default_parallel_config['backend'], + return_as="list", + verbose=default_parallel_config["verbose"], + timeout=None, + pre_dispatch='2 * n_jobs', + batch_size='auto', + temp_folder=default_parallel_config["temp_folder"], + max_nbytes=default_parallel_config["max_nbytes"], + mmap_mode=default_parallel_config["mmap_mode"], + prefer=default_parallel_config["prefer"], + require=default_parallel_config["require"], + ): + # Initiate parent Logger class state + super().__init__() + + # Interpret n_jobs=None as 'unset' + if n_jobs is None: + n_jobs = default_parallel_config["n_jobs"] + + active_backend, context_config = _get_active_backend( + prefer=prefer, require=require, verbose=verbose + ) + + nesting_level = active_backend.nesting_level + + self.verbose = _get_config_param(verbose, context_config, "verbose") + self.timeout = timeout + self.pre_dispatch = pre_dispatch + + if return_as not in {"list", "generator", "generator_unordered"}: + raise ValueError( + 'Expected `return_as` parameter to be a string equal to "list"' + f',"generator" or "generator_unordered", but got {return_as} ' + "instead." + ) + self.return_as = return_as + self.return_generator = return_as != "list" + self.return_ordered = return_as != "generator_unordered" + + # Check if we are under a parallel_config or parallel_backend + # context manager and use the config from the context manager + # for arguments that are not explicitly set. + self._backend_args = { + k: _get_config_param(param, context_config, k) for param, k in [ + (max_nbytes, "max_nbytes"), + (temp_folder, "temp_folder"), + (mmap_mode, "mmap_mode"), + (prefer, "prefer"), + (require, "require"), + (verbose, "verbose"), + ] + } + + if isinstance(self._backend_args["max_nbytes"], str): + self._backend_args["max_nbytes"] = memstr_to_bytes( + self._backend_args["max_nbytes"] + ) + self._backend_args["verbose"] = max( + 0, self._backend_args["verbose"] - 50 + ) + + if DEFAULT_MP_CONTEXT is not None: + self._backend_args['context'] = DEFAULT_MP_CONTEXT + elif hasattr(mp, "get_context"): + self._backend_args['context'] = mp.get_context() + + if backend is default_parallel_config['backend'] or backend is None: + backend = active_backend + + elif isinstance(backend, ParallelBackendBase): + # Use provided backend as is, with the current nesting_level if it + # is not set yet. + if backend.nesting_level is None: + backend.nesting_level = nesting_level + + elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'): + # Make it possible to pass a custom multiprocessing context as + # backend to change the start method to forkserver or spawn or + # preload modules on the forkserver helper process. + self._backend_args['context'] = backend + backend = MultiprocessingBackend(nesting_level=nesting_level) + + elif backend not in BACKENDS and backend in MAYBE_AVAILABLE_BACKENDS: + warnings.warn( + f"joblib backend '{backend}' is not available on " + f"your system, falling back to {DEFAULT_BACKEND}.", + UserWarning, + stacklevel=2) + BACKENDS[backend] = BACKENDS[DEFAULT_BACKEND] + backend = BACKENDS[DEFAULT_BACKEND](nesting_level=nesting_level) + + else: + try: + backend_factory = BACKENDS[backend] + except KeyError as e: + raise ValueError("Invalid backend: %s, expected one of %r" + % (backend, sorted(BACKENDS.keys()))) from e + backend = backend_factory(nesting_level=nesting_level) + + n_jobs = _get_config_param(n_jobs, context_config, "n_jobs") + if n_jobs is None: + # No specific context override and no specific value request: + # default to the default of the backend. + n_jobs = backend.default_n_jobs + try: + n_jobs = int(n_jobs) + except ValueError: + raise ValueError("n_jobs could not be converted to int") + self.n_jobs = n_jobs + + if (require == 'sharedmem' and + not getattr(backend, 'supports_sharedmem', False)): + raise ValueError("Backend %s does not support shared memory" + % backend) + + if (batch_size == 'auto' or isinstance(batch_size, Integral) and + batch_size > 0): + self.batch_size = batch_size + else: + raise ValueError( + "batch_size must be 'auto' or a positive integer, got: %r" + % batch_size) + + if not isinstance(backend, SequentialBackend): + if self.return_generator and not backend.supports_return_generator: + raise ValueError( + "Backend {} does not support " + "return_as={}".format(backend, return_as) + ) + # This lock is used to coordinate the main thread of this process + # with the async callback thread of our the pool. + self._lock = threading.RLock() + self._jobs = collections.deque() + self._pending_outputs = list() + self._ready_batches = queue.Queue() + self._reducer_callback = None + + # Internal variables + self._backend = backend + self._running = False + self._managed_backend = False + self._id = uuid4().hex + self._call_ref = None + + def __enter__(self): + self._managed_backend = True + self._calling = False + self._initialize_backend() + return self + + def __exit__(self, exc_type, exc_value, traceback): + self._managed_backend = False + if self.return_generator and self._calling: + self._abort() + self._terminate_and_reset() + + def _initialize_backend(self): + """Build a process or thread pool and return the number of workers""" + try: + n_jobs = self._backend.configure(n_jobs=self.n_jobs, parallel=self, + **self._backend_args) + if self.timeout is not None and not self._backend.supports_timeout: + warnings.warn( + 'The backend class {!r} does not support timeout. ' + "You have set 'timeout={}' in Parallel but " + "the 'timeout' parameter will not be used.".format( + self._backend.__class__.__name__, + self.timeout)) + + except FallbackToBackend as e: + # Recursively initialize the backend in case of requested fallback. + self._backend = e.backend + n_jobs = self._initialize_backend() + + return n_jobs + + def _effective_n_jobs(self): + if self._backend: + return self._backend.effective_n_jobs(self.n_jobs) + return 1 + + def _terminate_and_reset(self): + if hasattr(self._backend, 'stop_call') and self._calling: + self._backend.stop_call() + self._calling = False + if not self._managed_backend: + self._backend.terminate() + + def _dispatch(self, batch): + """Queue the batch for computing, with or without multiprocessing + + WARNING: this method is not thread-safe: it should be only called + indirectly via dispatch_one_batch. + + """ + # If job.get() catches an exception, it closes the queue: + if self._aborting: + return + + batch_size = len(batch) + + self.n_dispatched_tasks += batch_size + self.n_dispatched_batches += 1 + + dispatch_timestamp = time.time() + + batch_tracker = BatchCompletionCallBack( + dispatch_timestamp, batch_size, self + ) + + if self.return_ordered: + self._jobs.append(batch_tracker) + + # If return_ordered is False, the batch_tracker is not stored in the + # jobs queue at the time of submission. Instead, it will be appended to + # the queue by itself as soon as the callback is triggered to be able + # to return the results in the order of completion. + + job = self._backend.apply_async(batch, callback=batch_tracker) + batch_tracker.register_job(job) + + def dispatch_next(self): + """Dispatch more data for parallel processing + + This method is meant to be called concurrently by the multiprocessing + callback. We rely on the thread-safety of dispatch_one_batch to protect + against concurrent consumption of the unprotected iterator. + + """ + if not self.dispatch_one_batch(self._original_iterator): + self._iterating = False + self._original_iterator = None + + def dispatch_one_batch(self, iterator): + """Prefetch the tasks for the next batch and dispatch them. + + The effective size of the batch is computed here. + If there are no more jobs to dispatch, return False, else return True. + + The iterator consumption and dispatching is protected by the same + lock so calling this function should be thread safe. + + """ + + if self._aborting: + return False + + batch_size = self._get_batch_size() + + with self._lock: + # to ensure an even distribution of the workload between workers, + # we look ahead in the original iterators more than batch_size + # tasks - However, we keep consuming only one batch at each + # dispatch_one_batch call. The extra tasks are stored in a local + # queue, _ready_batches, that is looked-up prior to re-consuming + # tasks from the origal iterator. + try: + tasks = self._ready_batches.get(block=False) + except queue.Empty: + # slice the iterator n_jobs * batchsize items at a time. If the + # slice returns less than that, then the current batchsize puts + # too much weight on a subset of workers, while other may end + # up starving. So in this case, re-scale the batch size + # accordingly to distribute evenly the last items between all + # workers. + n_jobs = self._cached_effective_n_jobs + big_batch_size = batch_size * n_jobs + + try: + islice = list(itertools.islice(iterator, big_batch_size)) + except Exception as e: + # Handle the fact that the generator of task raised an + # exception. As this part of the code can be executed in + # a thread internal to the backend, register a task with + # an error that will be raised in the user's thread. + if isinstance(e.__context__, queue.Empty): + # Supress the cause of the exception if it is + # queue.Empty to avoid cluttered traceback. Only do it + # if the __context__ is really empty to avoid messing + # with causes of the original error. + e.__cause__ = None + batch_tracker = BatchCompletionCallBack( + 0, batch_size, self + ) + self._jobs.append(batch_tracker) + batch_tracker._register_outcome(dict( + result=e, status=TASK_ERROR + )) + return True + + if len(islice) == 0: + return False + elif (iterator is self._original_iterator and + len(islice) < big_batch_size): + # We reached the end of the original iterator (unless + # iterator is the ``pre_dispatch``-long initial slice of + # the original iterator) -- decrease the batch size to + # account for potential variance in the batches running + # time. + final_batch_size = max(1, len(islice) // (10 * n_jobs)) + else: + final_batch_size = max(1, len(islice) // n_jobs) + + # enqueue n_jobs batches in a local queue + for i in range(0, len(islice), final_batch_size): + tasks = BatchedCalls(islice[i:i + final_batch_size], + self._backend.get_nested_backend(), + self._reducer_callback, + self._pickle_cache) + self._ready_batches.put(tasks) + + # finally, get one task. + tasks = self._ready_batches.get(block=False) + if len(tasks) == 0: + # No more tasks available in the iterator: tell caller to stop. + return False + else: + self._dispatch(tasks) + return True + + def _get_batch_size(self): + """Returns the effective batch size for dispatch""" + if self.batch_size == 'auto': + return self._backend.compute_batch_size() + else: + # Fixed batch size strategy + return self.batch_size + + def _print(self, msg): + """Display the message on stout or stderr depending on verbosity""" + # XXX: Not using the logger framework: need to + # learn to use logger better. + if not self.verbose: + return + if self.verbose < 50: + writer = sys.stderr.write + else: + writer = sys.stdout.write + writer(f"[{self}]: {msg}\n") + + def _is_completed(self): + """Check if all tasks have been completed""" + return self.n_completed_tasks == self.n_dispatched_tasks and not ( + self._iterating or self._aborting + ) + + def print_progress(self): + """Display the process of the parallel execution only a fraction + of time, controlled by self.verbose. + """ + + if not self.verbose: + return + + elapsed_time = time.time() - self._start_time + + if self._is_completed(): + # Make sure that we get a last message telling us we are done + self._print( + f"Done {self.n_completed_tasks:3d} out of " + f"{self.n_completed_tasks:3d} | elapsed: " + f"{short_format_time(elapsed_time)} finished" + ) + return + + # Original job iterator becomes None once it has been fully + # consumed: at this point we know the total number of jobs and we are + # able to display an estimation of the remaining time based on already + # completed jobs. Otherwise, we simply display the number of completed + # tasks. + elif self._original_iterator is not None: + if _verbosity_filter(self.n_dispatched_batches, self.verbose): + return + self._print( + f"Done {self.n_completed_tasks:3d} tasks | elapsed: " + f"{short_format_time(elapsed_time)}" + ) + else: + index = self.n_completed_tasks + # We are finished dispatching + total_tasks = self.n_dispatched_tasks + # We always display the first loop + if not index == 0: + # Display depending on the number of remaining items + # A message as soon as we finish dispatching, cursor is 0 + cursor = (total_tasks - index + 1 - + self._pre_dispatch_amount) + frequency = (total_tasks // self.verbose) + 1 + is_last_item = (index + 1 == total_tasks) + if (is_last_item or cursor % frequency): + return + remaining_time = (elapsed_time / index) * \ + (self.n_dispatched_tasks - index * 1.0) + # only display status if remaining time is greater or equal to 0 + self._print( + f"Done {index:3d} out of {total_tasks:3d} | elapsed: " + f"{short_format_time(elapsed_time)} remaining: " + f"{short_format_time(remaining_time)}" + ) + + def _abort(self): + # Stop dispatching new jobs in the async callback thread + self._aborting = True + + # If the backend allows it, cancel or kill remaining running + # tasks without waiting for the results as we will raise + # the exception we got back to the caller instead of returning + # any result. + backend = self._backend + if (not self._aborted and hasattr(backend, 'abort_everything')): + # If the backend is managed externally we need to make sure + # to leave it in a working state to allow for future jobs + # scheduling. + ensure_ready = self._managed_backend + backend.abort_everything(ensure_ready=ensure_ready) + self._aborted = True + + def _start(self, iterator, pre_dispatch): + # Only set self._iterating to True if at least a batch + # was dispatched. In particular this covers the edge + # case of Parallel used with an exhausted iterator. If + # self._original_iterator is None, then this means either + # that pre_dispatch == "all", n_jobs == 1 or that the first batch + # was very quick and its callback already dispatched all the + # remaining jobs. + self._iterating = False + if self.dispatch_one_batch(iterator): + self._iterating = self._original_iterator is not None + + while self.dispatch_one_batch(iterator): + pass + + if pre_dispatch == "all": + # The iterable was consumed all at once by the above for loop. + # No need to wait for async callbacks to trigger to + # consumption. + self._iterating = False + + def _get_outputs(self, iterator, pre_dispatch): + """Iterator returning the tasks' output as soon as they are ready.""" + dispatch_thread_id = threading.get_ident() + detach_generator_exit = False + try: + self._start(iterator, pre_dispatch) + # first yield returns None, for internal use only. This ensures + # that we enter the try/except block and start dispatching the + # tasks. + yield + + with self._backend.retrieval_context(): + yield from self._retrieve() + + except GeneratorExit: + # The generator has been garbage collected before being fully + # consumed. This aborts the remaining tasks if possible and warn + # the user if necessary. + self._exception = True + + # In some interpreters such as PyPy, GeneratorExit can be raised in + # a different thread than the one used to start the dispatch of the + # parallel tasks. This can lead to hang when a thread attempts to + # join itself. As workaround, we detach the execution of the + # aborting code to a dedicated thread. We then need to make sure + # the rest of the function does not call `_terminate_and_reset` + # in finally. + if dispatch_thread_id != threading.get_ident(): + if not IS_PYPY: + warnings.warn( + "A generator produced by joblib.Parallel has been " + "gc'ed in an unexpected thread. This behavior should " + "not cause major -issues but to make sure, please " + "report this warning and your use case at " + "https://github.com/joblib/joblib/issues so it can " + "be investigated." + ) + + detach_generator_exit = True + _parallel = self + + class _GeneratorExitThread(threading.Thread): + def run(self): + _parallel._abort() + if _parallel.return_generator: + _parallel._warn_exit_early() + _parallel._terminate_and_reset() + + _GeneratorExitThread( + name="GeneratorExitThread" + ).start() + return + + # Otherwise, we are in the thread that started the dispatch: we can + # safely abort the execution and warn the user. + self._abort() + if self.return_generator: + self._warn_exit_early() + + raise + + # Note: we catch any BaseException instead of just Exception instances + # to also include KeyboardInterrupt + except BaseException: + self._exception = True + self._abort() + raise + finally: + # Store the unconsumed tasks and terminate the workers if necessary + _remaining_outputs = ([] if self._exception else self._jobs) + self._jobs = collections.deque() + self._running = False + if not detach_generator_exit: + self._terminate_and_reset() + + while len(_remaining_outputs) > 0: + batched_results = _remaining_outputs.popleft() + batched_results = batched_results.get_result(self.timeout) + for result in batched_results: + yield result + + def _wait_retrieval(self): + """Return True if we need to continue retriving some tasks.""" + + # If the input load is still being iterated over, it means that tasks + # are still on the dispatch wait list and their results will need to + # be retrieved later on. + if self._iterating: + return True + + # If some of the dispatched tasks are still being processed by the + # workers, wait for the compute to finish before starting retrieval + if self.n_completed_tasks < self.n_dispatched_tasks: + return True + + # For backends that does not support retrieving asynchronously the + # result to the main process, all results must be carefully retrieved + # in the _retrieve loop in the main thread while the backend is alive. + # For other backends, the actual retrieval is done asynchronously in + # the callback thread, and we can terminate the backend before the + # `self._jobs` result list has been emptied. The remaining results + # will be collected in the `finally` step of the generator. + if not self._backend.supports_retrieve_callback: + if len(self._jobs) > 0: + return True + + return False + + def _retrieve(self): + while self._wait_retrieval(): + + # If the callback thread of a worker has signaled that its task + # triggered an exception, or if the retrieval loop has raised an + # exception (e.g. `GeneratorExit`), exit the loop and surface the + # worker traceback. + if self._aborting: + self._raise_error_fast() + break + + # If the next job is not ready for retrieval yet, we just wait for + # async callbacks to progress. + if ((len(self._jobs) == 0) or + (self._jobs[0].get_status( + timeout=self.timeout) == TASK_PENDING)): + time.sleep(0.01) + continue + + # We need to be careful: the job list can be filling up as + # we empty it and Python list are not thread-safe by + # default hence the use of the lock + with self._lock: + batched_results = self._jobs.popleft() + + # Flatten the batched results to output one output at a time + batched_results = batched_results.get_result(self.timeout) + for result in batched_results: + self._nb_consumed += 1 + yield result + + def _raise_error_fast(self): + """If we are aborting, raise if a job caused an error.""" + + # Find the first job whose status is TASK_ERROR if it exists. + with self._lock: + error_job = next((job for job in self._jobs + if job.status == TASK_ERROR), None) + + # If this error job exists, immediatly raise the error by + # calling get_result. This job might not exists if abort has been + # called directly or if the generator is gc'ed. + if error_job is not None: + error_job.get_result(self.timeout) + + def _warn_exit_early(self): + """Warn the user if the generator is gc'ed before being consumned.""" + ready_outputs = self.n_completed_tasks - self._nb_consumed + is_completed = self._is_completed() + msg = "" + if ready_outputs: + msg += ( + f"{ready_outputs} tasks have been successfully executed " + " but not used." + ) + if not is_completed: + msg += " Additionally, " + + if not is_completed: + msg += ( + f"{self.n_dispatched_tasks - self.n_completed_tasks} tasks " + "which were still being processed by the workers have been " + "cancelled." + ) + + if msg: + msg += ( + " You could benefit from adjusting the input task " + "iterator to limit unnecessary computation time." + ) + + warnings.warn(msg) + + def _get_sequential_output(self, iterable): + """Separate loop for sequential output. + + This simplifies the traceback in case of errors and reduces the + overhead of calling sequential tasks with `joblib`. + """ + try: + self._iterating = True + self._original_iterator = iterable + batch_size = self._get_batch_size() + + if batch_size != 1: + it = iter(iterable) + iterable_batched = iter( + lambda: tuple(itertools.islice(it, batch_size)), () + ) + iterable = ( + task for batch in iterable_batched for task in batch + ) + + # first yield returns None, for internal use only. This ensures + # that we enter the try/except block and setup the generator. + yield None + + # Sequentially call the tasks and yield the results. + for func, args, kwargs in iterable: + self.n_dispatched_batches += 1 + self.n_dispatched_tasks += 1 + res = func(*args, **kwargs) + self.n_completed_tasks += 1 + self.print_progress() + yield res + self._nb_consumed += 1 + except BaseException: + self._exception = True + self._aborting = True + self._aborted = True + raise + finally: + self.print_progress() + self._running = False + self._iterating = False + self._original_iterator = None + + def _reset_run_tracking(self): + """Reset the counters and flags used to track the execution.""" + + # Makes sur the parallel instance was not previously running in a + # thread-safe way. + with getattr(self, '_lock', nullcontext()): + if self._running: + msg = 'This Parallel instance is already running !' + if self.return_generator is True: + msg += ( + " Before submitting new tasks, you must wait for the " + "completion of all the previous tasks, or clean all " + "references to the output generator." + ) + raise RuntimeError(msg) + self._running = True + + # Counter to keep track of the task dispatched and completed. + self.n_dispatched_batches = 0 + self.n_dispatched_tasks = 0 + self.n_completed_tasks = 0 + + # Following count is incremented by one each time the user iterates + # on the output generator, it is used to prepare an informative + # warning message in case the generator is deleted before all the + # dispatched tasks have been consumed. + self._nb_consumed = 0 + + # Following flags are used to synchronize the threads in case one of + # the tasks error-out to ensure that all workers abort fast and that + # the backend terminates properly. + + # Set to True as soon as a worker signals that a task errors-out + self._exception = False + # Set to True in case of early termination following an incident + self._aborting = False + # Set to True after abortion is complete + self._aborted = False + + def __call__(self, iterable): + """Main function to dispatch parallel tasks.""" + + self._reset_run_tracking() + self._start_time = time.time() + + if not self._managed_backend: + n_jobs = self._initialize_backend() + else: + n_jobs = self._effective_n_jobs() + + if n_jobs == 1: + # If n_jobs==1, run the computation sequentially and return + # immediatly to avoid overheads. + output = self._get_sequential_output(iterable) + next(output) + return output if self.return_generator else list(output) + + # Let's create an ID that uniquely identifies the current call. If the + # call is interrupted early and that the same instance is immediately + # re-used, this id will be used to prevent workers that were + # concurrently finalizing a task from the previous call to run the + # callback. + with self._lock: + self._call_id = uuid4().hex + + # self._effective_n_jobs should be called in the Parallel.__call__ + # thread only -- store its value in an attribute for further queries. + self._cached_effective_n_jobs = n_jobs + + if isinstance(self._backend, LokyBackend): + # For the loky backend, we add a callback executed when reducing + # BatchCalls, that makes the loky executor use a temporary folder + # specific to this Parallel object when pickling temporary memmaps. + # This callback is necessary to ensure that several Parallel + # objects using the same resuable executor don't use the same + # temporary resources. + + def _batched_calls_reducer_callback(): + # Relevant implementation detail: the following lines, called + # when reducing BatchedCalls, are called in a thread-safe + # situation, meaning that the context of the temporary folder + # manager will not be changed in between the callback execution + # and the end of the BatchedCalls pickling. The reason is that + # pickling (the only place where set_current_context is used) + # is done from a single thread (the queue_feeder_thread). + self._backend._workers._temp_folder_manager.set_current_context( # noqa + self._id + ) + self._reducer_callback = _batched_calls_reducer_callback + + # self._effective_n_jobs should be called in the Parallel.__call__ + # thread only -- store its value in an attribute for further queries. + self._cached_effective_n_jobs = n_jobs + + backend_name = self._backend.__class__.__name__ + if n_jobs == 0: + raise RuntimeError("%s has no active worker." % backend_name) + + self._print( + f"Using backend {backend_name} with {n_jobs} concurrent workers." + ) + if hasattr(self._backend, 'start_call'): + self._backend.start_call() + + # Following flag prevents double calls to `backend.stop_call`. + self._calling = True + + iterator = iter(iterable) + pre_dispatch = self.pre_dispatch + + if pre_dispatch == 'all': + # prevent further dispatch via multiprocessing callback thread + self._original_iterator = None + self._pre_dispatch_amount = 0 + else: + self._original_iterator = iterator + if hasattr(pre_dispatch, 'endswith'): + pre_dispatch = eval_expr( + pre_dispatch.replace("n_jobs", str(n_jobs)) + ) + self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch) + + # The main thread will consume the first pre_dispatch items and + # the remaining items will later be lazily dispatched by async + # callbacks upon task completions. + + # TODO: this iterator should be batch_size * n_jobs + iterator = itertools.islice(iterator, self._pre_dispatch_amount) + + # Use a caching dict for callables that are pickled with cloudpickle to + # improve performances. This cache is used only in the case of + # functions that are defined in the __main__ module, functions that + # are defined locally (inside another function) and lambda expressions. + self._pickle_cache = dict() + + output = self._get_outputs(iterator, pre_dispatch) + self._call_ref = weakref.ref(output) + + # The first item from the output is blank, but it makes the interpreter + # progress until it enters the Try/Except block of the generator and + # reach the first `yield` statement. This starts the aynchronous + # dispatch of the tasks to the workers. + next(output) + + return output if self.return_generator else list(output) + + def __repr__(self): + return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs) diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/pool.py b/env-llmeval/lib/python3.10/site-packages/joblib/pool.py new file mode 100644 index 0000000000000000000000000000000000000000..72a6baa3ac5854ebac9f96a4c6df77b52bff300e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/pool.py @@ -0,0 +1,354 @@ +"""Custom implementation of multiprocessing.Pool with custom pickler. + +This module provides efficient ways of working with data stored in +shared memory with numpy.memmap arrays without inducing any memory +copy between the parent and child processes. + +This module should not be imported if multiprocessing is not +available as it implements subclasses of multiprocessing Pool +that uses a custom alternative to SimpleQueue. + +""" +# Author: Olivier Grisel +# Copyright: 2012, Olivier Grisel +# License: BSD 3 clause + +import copyreg +import sys +import warnings +from time import sleep + +try: + WindowsError +except NameError: + WindowsError = type(None) + +from pickle import Pickler + +from pickle import HIGHEST_PROTOCOL +from io import BytesIO + +from ._memmapping_reducer import get_memmapping_reducers +from ._memmapping_reducer import TemporaryResourcesManager +from ._multiprocessing_helpers import mp, assert_spawning + +# We need the class definition to derive from it, not the multiprocessing.Pool +# factory function +from multiprocessing.pool import Pool + +try: + import numpy as np +except ImportError: + np = None + + +############################################################################### +# Enable custom pickling in Pool queues + +class CustomizablePickler(Pickler): + """Pickler that accepts custom reducers. + + TODO python2_drop : can this be simplified ? + + HIGHEST_PROTOCOL is selected by default as this pickler is used + to pickle ephemeral datastructures for interprocess communication + hence no backward compatibility is required. + + `reducers` is expected to be a dictionary with key/values + being `(type, callable)` pairs where `callable` is a function that + give an instance of `type` will return a tuple `(constructor, + tuple_of_objects)` to rebuild an instance out of the pickled + `tuple_of_objects` as would return a `__reduce__` method. See the + standard library documentation on pickling for more details. + + """ + + # We override the pure Python pickler as its the only way to be able to + # customize the dispatch table without side effects in Python 2.7 + # to 3.2. For Python 3.3+ leverage the new dispatch_table + # feature from https://bugs.python.org/issue14166 that makes it possible + # to use the C implementation of the Pickler which is faster. + + def __init__(self, writer, reducers=None, protocol=HIGHEST_PROTOCOL): + Pickler.__init__(self, writer, protocol=protocol) + if reducers is None: + reducers = {} + if hasattr(Pickler, 'dispatch'): + # Make the dispatch registry an instance level attribute instead of + # a reference to the class dictionary under Python 2 + self.dispatch = Pickler.dispatch.copy() + else: + # Under Python 3 initialize the dispatch table with a copy of the + # default registry + self.dispatch_table = copyreg.dispatch_table.copy() + for type, reduce_func in reducers.items(): + self.register(type, reduce_func) + + def register(self, type, reduce_func): + """Attach a reducer function to a given type in the dispatch table.""" + if hasattr(Pickler, 'dispatch'): + # Python 2 pickler dispatching is not explicitly customizable. + # Let us use a closure to workaround this limitation. + def dispatcher(self, obj): + reduced = reduce_func(obj) + self.save_reduce(obj=obj, *reduced) + self.dispatch[type] = dispatcher + else: + self.dispatch_table[type] = reduce_func + + +class CustomizablePicklingQueue(object): + """Locked Pipe implementation that uses a customizable pickler. + + This class is an alternative to the multiprocessing implementation + of SimpleQueue in order to make it possible to pass custom + pickling reducers, for instance to avoid memory copy when passing + memory mapped datastructures. + + `reducers` is expected to be a dict with key / values being + `(type, callable)` pairs where `callable` is a function that, given an + instance of `type`, will return a tuple `(constructor, tuple_of_objects)` + to rebuild an instance out of the pickled `tuple_of_objects` as would + return a `__reduce__` method. + + See the standard library documentation on pickling for more details. + """ + + def __init__(self, context, reducers=None): + self._reducers = reducers + self._reader, self._writer = context.Pipe(duplex=False) + self._rlock = context.Lock() + if sys.platform == 'win32': + self._wlock = None + else: + self._wlock = context.Lock() + self._make_methods() + + def __getstate__(self): + assert_spawning(self) + return (self._reader, self._writer, self._rlock, self._wlock, + self._reducers) + + def __setstate__(self, state): + (self._reader, self._writer, self._rlock, self._wlock, + self._reducers) = state + self._make_methods() + + def empty(self): + return not self._reader.poll() + + def _make_methods(self): + self._recv = recv = self._reader.recv + racquire, rrelease = self._rlock.acquire, self._rlock.release + + def get(): + racquire() + try: + return recv() + finally: + rrelease() + + self.get = get + + if self._reducers: + def send(obj): + buffer = BytesIO() + CustomizablePickler(buffer, self._reducers).dump(obj) + self._writer.send_bytes(buffer.getvalue()) + self._send = send + else: + self._send = send = self._writer.send + if self._wlock is None: + # writes to a message oriented win32 pipe are atomic + self.put = send + else: + wlock_acquire, wlock_release = ( + self._wlock.acquire, self._wlock.release) + + def put(obj): + wlock_acquire() + try: + return send(obj) + finally: + wlock_release() + + self.put = put + + +class PicklingPool(Pool): + """Pool implementation with customizable pickling reducers. + + This is useful to control how data is shipped between processes + and makes it possible to use shared memory without useless + copies induces by the default pickling methods of the original + objects passed as arguments to dispatch. + + `forward_reducers` and `backward_reducers` are expected to be + dictionaries with key/values being `(type, callable)` pairs where + `callable` is a function that, given an instance of `type`, will return a + tuple `(constructor, tuple_of_objects)` to rebuild an instance out of the + pickled `tuple_of_objects` as would return a `__reduce__` method. + See the standard library documentation about pickling for more details. + + """ + + def __init__(self, processes=None, forward_reducers=None, + backward_reducers=None, **kwargs): + if forward_reducers is None: + forward_reducers = dict() + if backward_reducers is None: + backward_reducers = dict() + self._forward_reducers = forward_reducers + self._backward_reducers = backward_reducers + poolargs = dict(processes=processes) + poolargs.update(kwargs) + super(PicklingPool, self).__init__(**poolargs) + + def _setup_queues(self): + context = getattr(self, '_ctx', mp) + self._inqueue = CustomizablePicklingQueue(context, + self._forward_reducers) + self._outqueue = CustomizablePicklingQueue(context, + self._backward_reducers) + self._quick_put = self._inqueue._send + self._quick_get = self._outqueue._recv + + +class MemmappingPool(PicklingPool): + """Process pool that shares large arrays to avoid memory copy. + + This drop-in replacement for `multiprocessing.pool.Pool` makes + it possible to work efficiently with shared memory in a numpy + context. + + Existing instances of numpy.memmap are preserved: the child + suprocesses will have access to the same shared memory in the + original mode except for the 'w+' mode that is automatically + transformed as 'r+' to avoid zeroing the original data upon + instantiation. + + Furthermore large arrays from the parent process are automatically + dumped to a temporary folder on the filesystem such as child + processes to access their content via memmapping (file system + backed shared memory). + + Note: it is important to call the terminate method to collect + the temporary folder used by the pool. + + Parameters + ---------- + processes: int, optional + Number of worker processes running concurrently in the pool. + initializer: callable, optional + Callable executed on worker process creation. + initargs: tuple, optional + Arguments passed to the initializer callable. + temp_folder: (str, callable) optional + If str: + Folder to be used by the pool for memmapping large arrays + for sharing memory with worker processes. If None, this will try in + order: + - a folder pointed by the JOBLIB_TEMP_FOLDER environment variable, + - /dev/shm if the folder exists and is writable: this is a RAMdisk + filesystem available by default on modern Linux distributions, + - the default system temporary folder that can be overridden + with TMP, TMPDIR or TEMP environment variables, typically /tmp + under Unix operating systems. + if callable: + An callable in charge of dynamically resolving a temporary folder + for memmapping large arrays. + max_nbytes int or None, optional, 1e6 by default + Threshold on the size of arrays passed to the workers that + triggers automated memory mapping in temp_folder. + Use None to disable memmapping of large arrays. + mmap_mode: {'r+', 'r', 'w+', 'c'} + Memmapping mode for numpy arrays passed to workers. + See 'max_nbytes' parameter documentation for more details. + forward_reducers: dictionary, optional + Reducers used to pickle objects passed from master to worker + processes: see below. + backward_reducers: dictionary, optional + Reducers used to pickle return values from workers back to the + master process. + verbose: int, optional + Make it possible to monitor how the communication of numpy arrays + with the subprocess is handled (pickling or memmapping) + prewarm: bool or str, optional, "auto" by default. + If True, force a read on newly memmapped array to make sure that OS + pre-cache it in memory. This can be useful to avoid concurrent disk + access when the same data array is passed to different worker + processes. If "auto" (by default), prewarm is set to True, unless the + Linux shared memory partition /dev/shm is available and used as temp + folder. + + `forward_reducers` and `backward_reducers` are expected to be + dictionaries with key/values being `(type, callable)` pairs where + `callable` is a function that give an instance of `type` will return + a tuple `(constructor, tuple_of_objects)` to rebuild an instance out + of the pickled `tuple_of_objects` as would return a `__reduce__` + method. See the standard library documentation on pickling for more + details. + + """ + + def __init__(self, processes=None, temp_folder=None, max_nbytes=1e6, + mmap_mode='r', forward_reducers=None, backward_reducers=None, + verbose=0, context_id=None, prewarm=False, **kwargs): + + if context_id is not None: + warnings.warn('context_id is deprecated and ignored in joblib' + ' 0.9.4 and will be removed in 0.11', + DeprecationWarning) + + manager = TemporaryResourcesManager(temp_folder) + self._temp_folder_manager = manager + + # The usage of a temp_folder_resolver over a simple temp_folder is + # superfluous for multiprocessing pools, as they don't get reused, see + # get_memmapping_executor for more details. We still use it for code + # simplicity. + forward_reducers, backward_reducers = \ + get_memmapping_reducers( + temp_folder_resolver=manager.resolve_temp_folder_name, + max_nbytes=max_nbytes, mmap_mode=mmap_mode, + forward_reducers=forward_reducers, + backward_reducers=backward_reducers, verbose=verbose, + unlink_on_gc_collect=False, prewarm=prewarm) + + poolargs = dict( + processes=processes, + forward_reducers=forward_reducers, + backward_reducers=backward_reducers) + poolargs.update(kwargs) + super(MemmappingPool, self).__init__(**poolargs) + + def terminate(self): + n_retries = 10 + for i in range(n_retries): + try: + super(MemmappingPool, self).terminate() + break + except OSError as e: + if isinstance(e, WindowsError): + # Workaround occasional "[Error 5] Access is denied" issue + # when trying to terminate a process under windows. + sleep(0.1) + if i + 1 == n_retries: + warnings.warn("Failed to terminate worker processes in" + " multiprocessing pool: %r" % e) + + # Clean up the temporary resources as the workers should now be off. + self._temp_folder_manager._clean_temporary_resources() + + @property + def _temp_folder(self): + # Legacy property in tests. could be removed if we refactored the + # memmapping tests. SHOULD ONLY BE USED IN TESTS! + # We cache this property because it is called late in the tests - at + # this point, all context have been unregistered, and + # resolve_temp_folder_name raises an error. + if getattr(self, '_cached_temp_folder', None) is not None: + return self._cached_temp_folder + else: + self._cached_temp_folder = self._temp_folder_manager.resolve_temp_folder_name() # noqa + return self._cached_temp_folder diff --git a/env-llmeval/lib/python3.10/site-packages/joblib/testing.py b/env-llmeval/lib/python3.10/site-packages/joblib/testing.py new file mode 100644 index 0000000000000000000000000000000000000000..caab7d2903c710534596f2248de4b6c0642f9526 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/joblib/testing.py @@ -0,0 +1,99 @@ +""" +Helper for testing. +""" + +import sys +import warnings +import os.path +import re +import subprocess +import threading + +import pytest +import _pytest + + +raises = pytest.raises +warns = pytest.warns +SkipTest = _pytest.runner.Skipped +skipif = pytest.mark.skipif +fixture = pytest.fixture +parametrize = pytest.mark.parametrize +timeout = pytest.mark.timeout +xfail = pytest.mark.xfail +param = pytest.param + + +def warnings_to_stdout(): + """ Redirect all warnings to stdout. + """ + showwarning_orig = warnings.showwarning + + def showwarning(msg, cat, fname, lno, file=None, line=0): + showwarning_orig(msg, cat, os.path.basename(fname), line, sys.stdout) + + warnings.showwarning = showwarning + # warnings.simplefilter('always') + + +def check_subprocess_call(cmd, timeout=5, stdout_regex=None, + stderr_regex=None): + """Runs a command in a subprocess with timeout in seconds. + + A SIGTERM is sent after `timeout` and if it does not terminate, a + SIGKILL is sent after `2 * timeout`. + + Also checks returncode is zero, stdout if stdout_regex is set, and + stderr if stderr_regex is set. + """ + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + def terminate_process(): # pragma: no cover + """ + Attempt to terminate a leftover process spawned during test execution: + ideally this should not be needed but can help avoid clogging the CI + workers in case of deadlocks. + """ + warnings.warn(f"Timeout running {cmd}") + proc.terminate() + + def kill_process(): # pragma: no cover + """ + Kill a leftover process spawned during test execution: ideally this + should not be needed but can help avoid clogging the CI workers in + case of deadlocks. + """ + warnings.warn(f"Timeout running {cmd}") + proc.kill() + + try: + if timeout is not None: + terminate_timer = threading.Timer(timeout, terminate_process) + terminate_timer.start() + kill_timer = threading.Timer(2 * timeout, kill_process) + kill_timer.start() + stdout, stderr = proc.communicate() + stdout, stderr = stdout.decode(), stderr.decode() + if proc.returncode != 0: + message = ( + 'Non-zero return code: {}.\nStdout:\n{}\n' + 'Stderr:\n{}').format( + proc.returncode, stdout, stderr) + raise ValueError(message) + + if (stdout_regex is not None and + not re.search(stdout_regex, stdout)): + raise ValueError( + "Unexpected stdout: {!r} does not match:\n{!r}".format( + stdout_regex, stdout)) + if (stderr_regex is not None and + not re.search(stderr_regex, stderr)): + raise ValueError( + "Unexpected stderr: {!r} does not match:\n{!r}".format( + stderr_regex, stderr)) + + finally: + if timeout is not None: + terminate_timer.cancel() + kill_timer.cancel() diff --git a/env-llmeval/lib/python3.10/site-packages/lxml/includes/extlibs/__init__.py b/env-llmeval/lib/python3.10/site-packages/lxml/includes/extlibs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/lxml/includes/extlibs/libcharset.h b/env-llmeval/lib/python3.10/site-packages/lxml/includes/extlibs/libcharset.h new file mode 100644 index 0000000000000000000000000000000000000000..fcf22748101279e454fd2fefe01908fd8545bce2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/lxml/includes/extlibs/libcharset.h @@ -0,0 +1,45 @@ +/* Copyright (C) 2003 Free Software Foundation, Inc. + This file is part of the GNU CHARSET Library. + + The GNU CHARSET Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public License as + published by the Free Software Foundation; either version 2 of the + License, or (at your option) any later version. + + The GNU CHARSET Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with the GNU CHARSET Library; see the file COPYING.LIB. If not, + see . */ + +#ifndef _LIBCHARSET_H +#define _LIBCHARSET_H + +#include + + +#ifdef __cplusplus +extern "C" { +#endif + + +/* Support for relocatable packages. */ + +/* Sets the original and the current installation prefix of the package. + Relocation simply replaces a pathname starting with the original prefix + by the corresponding pathname with the current prefix instead. Both + prefixes should be directory names without trailing slash (i.e. use "" + instead of "/"). */ +extern void libcharset_set_relocation_prefix (const char *orig_prefix, + const char *curr_prefix); + + +#ifdef __cplusplus +} +#endif + + +#endif /* _LIBCHARSET_H */ diff --git a/env-llmeval/lib/python3.10/site-packages/lxml/includes/extlibs/localcharset.h b/env-llmeval/lib/python3.10/site-packages/lxml/includes/extlibs/localcharset.h new file mode 100644 index 0000000000000000000000000000000000000000..34ce0adde9bb793f1c1cd5f81b5cc3d2eff08ab1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/lxml/includes/extlibs/localcharset.h @@ -0,0 +1,137 @@ +/* Determine a canonical name for the current locale's character encoding. + Copyright (C) 2000-2003, 2009-2019 Free Software Foundation, Inc. + This file is part of the GNU CHARSET Library. + + This program is free software; you can redistribute it and/or modify it + under the terms of the GNU Lesser General Public License as published + by the Free Software Foundation; either version 2, or (at your option) + any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with this program; if not, see . */ + +#ifndef _LOCALCHARSET_H +#define _LOCALCHARSET_H + + +#ifdef __cplusplus +extern "C" { +#endif + + +/* Determine the current locale's character encoding, and canonicalize it + into one of the canonical names listed below. + The result must not be freed; it is statically allocated. The result + becomes invalid when setlocale() is used to change the global locale, or + when the value of one of the environment variables LC_ALL, LC_CTYPE, LANG + is changed; threads in multithreaded programs should not do this. + If the canonical name cannot be determined, the result is a non-canonical + name. */ +extern const char * locale_charset (void); + +/* About GNU canonical names for character encodings: + + Every canonical name must be supported by GNU libiconv. Support by GNU libc + is also desirable. + + The name is case insensitive. Usually an upper case MIME charset name is + preferred. + + The current list of these GNU canonical names is: + + name MIME? used by which systems + (darwin = Mac OS X, windows = native Windows) + + ASCII, ANSI_X3.4-1968 glibc solaris freebsd netbsd darwin minix cygwin + ISO-8859-1 Y glibc aix hpux irix osf solaris freebsd netbsd openbsd darwin cygwin zos + ISO-8859-2 Y glibc aix hpux irix osf solaris freebsd netbsd openbsd darwin cygwin zos + ISO-8859-3 Y glibc solaris cygwin + ISO-8859-4 Y hpux osf solaris freebsd netbsd openbsd darwin + ISO-8859-5 Y glibc aix hpux irix osf solaris freebsd netbsd openbsd darwin cygwin zos + ISO-8859-6 Y glibc aix hpux solaris cygwin + ISO-8859-7 Y glibc aix hpux irix osf solaris freebsd netbsd openbsd darwin cygwin zos + ISO-8859-8 Y glibc aix hpux osf solaris cygwin zos + ISO-8859-9 Y glibc aix hpux irix osf solaris freebsd darwin cygwin zos + ISO-8859-13 glibc hpux solaris freebsd netbsd openbsd darwin cygwin + ISO-8859-14 glibc cygwin + ISO-8859-15 glibc aix irix osf solaris freebsd netbsd openbsd darwin cygwin + KOI8-R Y glibc hpux solaris freebsd netbsd openbsd darwin + KOI8-U Y glibc freebsd netbsd openbsd darwin cygwin + KOI8-T glibc + CP437 dos + CP775 dos + CP850 aix osf dos + CP852 dos + CP855 dos + CP856 aix + CP857 dos + CP861 dos + CP862 dos + CP864 dos + CP865 dos + CP866 freebsd netbsd openbsd darwin dos + CP869 dos + CP874 windows dos + CP922 aix + CP932 aix cygwin windows dos + CP943 aix zos + CP949 osf darwin windows dos + CP950 windows dos + CP1046 aix + CP1124 aix + CP1125 dos + CP1129 aix + CP1131 freebsd darwin + CP1250 windows + CP1251 glibc hpux solaris freebsd netbsd openbsd darwin cygwin windows + CP1252 aix windows + CP1253 windows + CP1254 windows + CP1255 glibc windows + CP1256 windows + CP1257 windows + GB2312 Y glibc aix hpux irix solaris freebsd netbsd darwin cygwin zos + EUC-JP Y glibc aix hpux irix osf solaris freebsd netbsd darwin cygwin + EUC-KR Y glibc aix hpux irix osf solaris freebsd netbsd darwin cygwin zos + EUC-TW glibc aix hpux irix osf solaris netbsd + BIG5 Y glibc aix hpux osf solaris freebsd netbsd darwin cygwin zos + BIG5-HKSCS glibc hpux solaris netbsd darwin + GBK glibc aix osf solaris freebsd darwin cygwin windows dos + GB18030 glibc hpux solaris freebsd netbsd darwin + SHIFT_JIS Y hpux osf solaris freebsd netbsd darwin + JOHAB glibc solaris windows + TIS-620 glibc aix hpux osf solaris cygwin zos + VISCII Y glibc + TCVN5712-1 glibc + ARMSCII-8 glibc freebsd netbsd darwin + GEORGIAN-PS glibc cygwin + PT154 glibc netbsd cygwin + HP-ROMAN8 hpux + HP-ARABIC8 hpux + HP-GREEK8 hpux + HP-HEBREW8 hpux + HP-TURKISH8 hpux + HP-KANA8 hpux + DEC-KANJI osf + DEC-HANYU osf + UTF-8 Y glibc aix hpux osf solaris netbsd darwin cygwin zos + + Note: Names which are not marked as being a MIME name should not be used in + Internet protocols for information interchange (mail, news, etc.). + + Note: ASCII and ANSI_X3.4-1968 are synonymous canonical names. Applications + must understand both names and treat them as equivalent. + */ + + +#ifdef __cplusplus +} +#endif + + +#endif /* _LOCALCHARSET_H */ diff --git a/env-llmeval/lib/python3.10/site-packages/lxml/includes/extlibs/zlib.h b/env-llmeval/lib/python3.10/site-packages/lxml/includes/extlibs/zlib.h new file mode 100644 index 0000000000000000000000000000000000000000..8d4b932eaf6a0fbb8133b3ab49ba5ef587059fa0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/lxml/includes/extlibs/zlib.h @@ -0,0 +1,1938 @@ +/* zlib.h -- interface of the 'zlib' general purpose compression library + version 1.3.1, January 22nd, 2024 + + Copyright (C) 1995-2024 Jean-loup Gailly and Mark Adler + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + Jean-loup Gailly Mark Adler + jloup@gzip.org madler@alumni.caltech.edu + + + The data format used by the zlib library is described by RFCs (Request for + Comments) 1950 to 1952 in the files http://tools.ietf.org/html/rfc1950 + (zlib format), rfc1951 (deflate format) and rfc1952 (gzip format). +*/ + +#ifndef ZLIB_H +#define ZLIB_H + +#include "zconf.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define ZLIB_VERSION "1.3.1" +#define ZLIB_VERNUM 0x1310 +#define ZLIB_VER_MAJOR 1 +#define ZLIB_VER_MINOR 3 +#define ZLIB_VER_REVISION 1 +#define ZLIB_VER_SUBREVISION 0 + +/* + The 'zlib' compression library provides in-memory compression and + decompression functions, including integrity checks of the uncompressed data. + This version of the library supports only one compression method (deflation) + but other algorithms will be added later and will have the same stream + interface. + + Compression can be done in a single step if the buffers are large enough, + or can be done by repeated calls of the compression function. In the latter + case, the application must provide more input and/or consume the output + (providing more output space) before each call. + + The compressed data format used by default by the in-memory functions is + the zlib format, which is a zlib wrapper documented in RFC 1950, wrapped + around a deflate stream, which is itself documented in RFC 1951. + + The library also supports reading and writing files in gzip (.gz) format + with an interface similar to that of stdio using the functions that start + with "gz". The gzip format is different from the zlib format. gzip is a + gzip wrapper, documented in RFC 1952, wrapped around a deflate stream. + + This library can optionally read and write gzip and raw deflate streams in + memory as well. + + The zlib format was designed to be compact and fast for use in memory + and on communications channels. The gzip format was designed for single- + file compression on file systems, has a larger header than zlib to maintain + directory information, and uses a different, slower check method than zlib. + + The library does not install any signal handler. The decoder checks + the consistency of the compressed data, so the library should never crash + even in the case of corrupted input. +*/ + +typedef voidpf (*alloc_func)(voidpf opaque, uInt items, uInt size); +typedef void (*free_func)(voidpf opaque, voidpf address); + +struct internal_state; + +typedef struct z_stream_s { + z_const Bytef *next_in; /* next input byte */ + uInt avail_in; /* number of bytes available at next_in */ + uLong total_in; /* total number of input bytes read so far */ + + Bytef *next_out; /* next output byte will go here */ + uInt avail_out; /* remaining free space at next_out */ + uLong total_out; /* total number of bytes output so far */ + + z_const char *msg; /* last error message, NULL if no error */ + struct internal_state FAR *state; /* not visible by applications */ + + alloc_func zalloc; /* used to allocate the internal state */ + free_func zfree; /* used to free the internal state */ + voidpf opaque; /* private data object passed to zalloc and zfree */ + + int data_type; /* best guess about the data type: binary or text + for deflate, or the decoding state for inflate */ + uLong adler; /* Adler-32 or CRC-32 value of the uncompressed data */ + uLong reserved; /* reserved for future use */ +} z_stream; + +typedef z_stream FAR *z_streamp; + +/* + gzip header information passed to and from zlib routines. See RFC 1952 + for more details on the meanings of these fields. +*/ +typedef struct gz_header_s { + int text; /* true if compressed data believed to be text */ + uLong time; /* modification time */ + int xflags; /* extra flags (not used when writing a gzip file) */ + int os; /* operating system */ + Bytef *extra; /* pointer to extra field or Z_NULL if none */ + uInt extra_len; /* extra field length (valid if extra != Z_NULL) */ + uInt extra_max; /* space at extra (only when reading header) */ + Bytef *name; /* pointer to zero-terminated file name or Z_NULL */ + uInt name_max; /* space at name (only when reading header) */ + Bytef *comment; /* pointer to zero-terminated comment or Z_NULL */ + uInt comm_max; /* space at comment (only when reading header) */ + int hcrc; /* true if there was or will be a header crc */ + int done; /* true when done reading gzip header (not used + when writing a gzip file) */ +} gz_header; + +typedef gz_header FAR *gz_headerp; + +/* + The application must update next_in and avail_in when avail_in has dropped + to zero. It must update next_out and avail_out when avail_out has dropped + to zero. The application must initialize zalloc, zfree and opaque before + calling the init function. All other fields are set by the compression + library and must not be updated by the application. + + The opaque value provided by the application will be passed as the first + parameter for calls of zalloc and zfree. This can be useful for custom + memory management. The compression library attaches no meaning to the + opaque value. + + zalloc must return Z_NULL if there is not enough memory for the object. + If zlib is used in a multi-threaded application, zalloc and zfree must be + thread safe. In that case, zlib is thread-safe. When zalloc and zfree are + Z_NULL on entry to the initialization function, they are set to internal + routines that use the standard library functions malloc() and free(). + + On 16-bit systems, the functions zalloc and zfree must be able to allocate + exactly 65536 bytes, but will not be required to allocate more than this if + the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS, pointers + returned by zalloc for objects of exactly 65536 bytes *must* have their + offset normalized to zero. The default allocation function provided by this + library ensures this (see zutil.c). To reduce memory requirements and avoid + any allocation of 64K objects, at the expense of compression ratio, compile + the library with -DMAX_WBITS=14 (see zconf.h). + + The fields total_in and total_out can be used for statistics or progress + reports. After compression, total_in holds the total size of the + uncompressed data and may be saved for use by the decompressor (particularly + if the decompressor wants to decompress everything in a single step). +*/ + + /* constants */ + +#define Z_NO_FLUSH 0 +#define Z_PARTIAL_FLUSH 1 +#define Z_SYNC_FLUSH 2 +#define Z_FULL_FLUSH 3 +#define Z_FINISH 4 +#define Z_BLOCK 5 +#define Z_TREES 6 +/* Allowed flush values; see deflate() and inflate() below for details */ + +#define Z_OK 0 +#define Z_STREAM_END 1 +#define Z_NEED_DICT 2 +#define Z_ERRNO (-1) +#define Z_STREAM_ERROR (-2) +#define Z_DATA_ERROR (-3) +#define Z_MEM_ERROR (-4) +#define Z_BUF_ERROR (-5) +#define Z_VERSION_ERROR (-6) +/* Return codes for the compression/decompression functions. Negative values + * are errors, positive values are used for special but normal events. + */ + +#define Z_NO_COMPRESSION 0 +#define Z_BEST_SPEED 1 +#define Z_BEST_COMPRESSION 9 +#define Z_DEFAULT_COMPRESSION (-1) +/* compression levels */ + +#define Z_FILTERED 1 +#define Z_HUFFMAN_ONLY 2 +#define Z_RLE 3 +#define Z_FIXED 4 +#define Z_DEFAULT_STRATEGY 0 +/* compression strategy; see deflateInit2() below for details */ + +#define Z_BINARY 0 +#define Z_TEXT 1 +#define Z_ASCII Z_TEXT /* for compatibility with 1.2.2 and earlier */ +#define Z_UNKNOWN 2 +/* Possible values of the data_type field for deflate() */ + +#define Z_DEFLATED 8 +/* The deflate compression method (the only one supported in this version) */ + +#define Z_NULL 0 /* for initializing zalloc, zfree, opaque */ + +#define zlib_version zlibVersion() +/* for compatibility with versions < 1.0.2 */ + + + /* basic functions */ + +ZEXTERN const char * ZEXPORT zlibVersion(void); +/* The application can compare zlibVersion and ZLIB_VERSION for consistency. + If the first character differs, the library code actually used is not + compatible with the zlib.h header file used by the application. This check + is automatically made by deflateInit and inflateInit. + */ + +/* +ZEXTERN int ZEXPORT deflateInit(z_streamp strm, int level); + + Initializes the internal stream state for compression. The fields + zalloc, zfree and opaque must be initialized before by the caller. If + zalloc and zfree are set to Z_NULL, deflateInit updates them to use default + allocation functions. total_in, total_out, adler, and msg are initialized. + + The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9: + 1 gives best speed, 9 gives best compression, 0 gives no compression at all + (the input data is simply copied a block at a time). Z_DEFAULT_COMPRESSION + requests a default compromise between speed and compression (currently + equivalent to level 6). + + deflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_STREAM_ERROR if level is not a valid compression level, or + Z_VERSION_ERROR if the zlib library version (zlib_version) is incompatible + with the version assumed by the caller (ZLIB_VERSION). msg is set to null + if there is no error message. deflateInit does not perform any compression: + this will be done by deflate(). +*/ + + +ZEXTERN int ZEXPORT deflate(z_streamp strm, int flush); +/* + deflate compresses as much data as possible, and stops when the input + buffer becomes empty or the output buffer becomes full. It may introduce + some output latency (reading input without producing any output) except when + forced to flush. + + The detailed semantics are as follows. deflate performs one or both of the + following actions: + + - Compress more input starting at next_in and update next_in and avail_in + accordingly. If not all input can be processed (because there is not + enough room in the output buffer), next_in and avail_in are updated and + processing will resume at this point for the next call of deflate(). + + - Generate more output starting at next_out and update next_out and avail_out + accordingly. This action is forced if the parameter flush is non zero. + Forcing flush frequently degrades the compression ratio, so this parameter + should be set only when necessary. Some output may be provided even if + flush is zero. + + Before the call of deflate(), the application should ensure that at least + one of the actions is possible, by providing more input and/or consuming more + output, and updating avail_in or avail_out accordingly; avail_out should + never be zero before the call. The application can consume the compressed + output when it wants, for example when the output buffer is full (avail_out + == 0), or after each call of deflate(). If deflate returns Z_OK and with + zero avail_out, it must be called again after making room in the output + buffer because there might be more output pending. See deflatePending(), + which can be used if desired to determine whether or not there is more output + in that case. + + Normally the parameter flush is set to Z_NO_FLUSH, which allows deflate to + decide how much data to accumulate before producing output, in order to + maximize compression. + + If the parameter flush is set to Z_SYNC_FLUSH, all pending output is + flushed to the output buffer and the output is aligned on a byte boundary, so + that the decompressor can get all input data available so far. (In + particular avail_in is zero after the call if enough output space has been + provided before the call.) Flushing may degrade compression for some + compression algorithms and so it should be used only when necessary. This + completes the current deflate block and follows it with an empty stored block + that is three bits plus filler bits to the next byte, followed by four bytes + (00 00 ff ff). + + If flush is set to Z_PARTIAL_FLUSH, all pending output is flushed to the + output buffer, but the output is not aligned to a byte boundary. All of the + input data so far will be available to the decompressor, as for Z_SYNC_FLUSH. + This completes the current deflate block and follows it with an empty fixed + codes block that is 10 bits long. This assures that enough bytes are output + in order for the decompressor to finish the block before the empty fixed + codes block. + + If flush is set to Z_BLOCK, a deflate block is completed and emitted, as + for Z_SYNC_FLUSH, but the output is not aligned on a byte boundary, and up to + seven bits of the current block are held to be written as the next byte after + the next deflate block is completed. In this case, the decompressor may not + be provided enough bits at this point in order to complete decompression of + the data provided so far to the compressor. It may need to wait for the next + block to be emitted. This is for advanced applications that need to control + the emission of deflate blocks. + + If flush is set to Z_FULL_FLUSH, all output is flushed as with + Z_SYNC_FLUSH, and the compression state is reset so that decompression can + restart from this point if previous compressed data has been damaged or if + random access is desired. Using Z_FULL_FLUSH too often can seriously degrade + compression. + + If deflate returns with avail_out == 0, this function must be called again + with the same value of the flush parameter and more output space (updated + avail_out), until the flush is complete (deflate returns with non-zero + avail_out). In the case of a Z_FULL_FLUSH or Z_SYNC_FLUSH, make sure that + avail_out is greater than six when the flush marker begins, in order to avoid + repeated flush markers upon calling deflate() again when avail_out == 0. + + If the parameter flush is set to Z_FINISH, pending input is processed, + pending output is flushed and deflate returns with Z_STREAM_END if there was + enough output space. If deflate returns with Z_OK or Z_BUF_ERROR, this + function must be called again with Z_FINISH and more output space (updated + avail_out) but no more input data, until it returns with Z_STREAM_END or an + error. After deflate has returned Z_STREAM_END, the only possible operations + on the stream are deflateReset or deflateEnd. + + Z_FINISH can be used in the first deflate call after deflateInit if all the + compression is to be done in a single step. In order to complete in one + call, avail_out must be at least the value returned by deflateBound (see + below). Then deflate is guaranteed to return Z_STREAM_END. If not enough + output space is provided, deflate will not return Z_STREAM_END, and it must + be called again as described above. + + deflate() sets strm->adler to the Adler-32 checksum of all input read + so far (that is, total_in bytes). If a gzip stream is being generated, then + strm->adler will be the CRC-32 checksum of the input read so far. (See + deflateInit2 below.) + + deflate() may update strm->data_type if it can make a good guess about + the input data type (Z_BINARY or Z_TEXT). If in doubt, the data is + considered binary. This field is only for information purposes and does not + affect the compression algorithm in any manner. + + deflate() returns Z_OK if some progress has been made (more input + processed or more output produced), Z_STREAM_END if all input has been + consumed and all output has been produced (only when flush is set to + Z_FINISH), Z_STREAM_ERROR if the stream state was inconsistent (for example + if next_in or next_out was Z_NULL or the state was inadvertently written over + by the application), or Z_BUF_ERROR if no progress is possible (for example + avail_in or avail_out was zero). Note that Z_BUF_ERROR is not fatal, and + deflate() can be called again with more input and more output space to + continue compressing. +*/ + + +ZEXTERN int ZEXPORT deflateEnd(z_streamp strm); +/* + All dynamically allocated data structures for this stream are freed. + This function discards any unprocessed input and does not flush any pending + output. + + deflateEnd returns Z_OK if success, Z_STREAM_ERROR if the + stream state was inconsistent, Z_DATA_ERROR if the stream was freed + prematurely (some input or output was discarded). In the error case, msg + may be set but then points to a static string (which must not be + deallocated). +*/ + + +/* +ZEXTERN int ZEXPORT inflateInit(z_streamp strm); + + Initializes the internal stream state for decompression. The fields + next_in, avail_in, zalloc, zfree and opaque must be initialized before by + the caller. In the current version of inflate, the provided input is not + read or consumed. The allocation of a sliding window will be deferred to + the first call of inflate (if the decompression does not complete on the + first call). If zalloc and zfree are set to Z_NULL, inflateInit updates + them to use default allocation functions. total_in, total_out, adler, and + msg are initialized. + + inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_VERSION_ERROR if the zlib library version is incompatible with the + version assumed by the caller, or Z_STREAM_ERROR if the parameters are + invalid, such as a null pointer to the structure. msg is set to null if + there is no error message. inflateInit does not perform any decompression. + Actual decompression will be done by inflate(). So next_in, and avail_in, + next_out, and avail_out are unused and unchanged. The current + implementation of inflateInit() does not process any header information -- + that is deferred until inflate() is called. +*/ + + +ZEXTERN int ZEXPORT inflate(z_streamp strm, int flush); +/* + inflate decompresses as much data as possible, and stops when the input + buffer becomes empty or the output buffer becomes full. It may introduce + some output latency (reading input without producing any output) except when + forced to flush. + + The detailed semantics are as follows. inflate performs one or both of the + following actions: + + - Decompress more input starting at next_in and update next_in and avail_in + accordingly. If not all input can be processed (because there is not + enough room in the output buffer), then next_in and avail_in are updated + accordingly, and processing will resume at this point for the next call of + inflate(). + + - Generate more output starting at next_out and update next_out and avail_out + accordingly. inflate() provides as much output as possible, until there is + no more input data or no more space in the output buffer (see below about + the flush parameter). + + Before the call of inflate(), the application should ensure that at least + one of the actions is possible, by providing more input and/or consuming more + output, and updating the next_* and avail_* values accordingly. If the + caller of inflate() does not provide both available input and available + output space, it is possible that there will be no progress made. The + application can consume the uncompressed output when it wants, for example + when the output buffer is full (avail_out == 0), or after each call of + inflate(). If inflate returns Z_OK and with zero avail_out, it must be + called again after making room in the output buffer because there might be + more output pending. + + The flush parameter of inflate() can be Z_NO_FLUSH, Z_SYNC_FLUSH, Z_FINISH, + Z_BLOCK, or Z_TREES. Z_SYNC_FLUSH requests that inflate() flush as much + output as possible to the output buffer. Z_BLOCK requests that inflate() + stop if and when it gets to the next deflate block boundary. When decoding + the zlib or gzip format, this will cause inflate() to return immediately + after the header and before the first block. When doing a raw inflate, + inflate() will go ahead and process the first block, and will return when it + gets to the end of that block, or when it runs out of data. + + The Z_BLOCK option assists in appending to or combining deflate streams. + To assist in this, on return inflate() always sets strm->data_type to the + number of unused bits in the last byte taken from strm->next_in, plus 64 if + inflate() is currently decoding the last block in the deflate stream, plus + 128 if inflate() returned immediately after decoding an end-of-block code or + decoding the complete header up to just before the first byte of the deflate + stream. The end-of-block will not be indicated until all of the uncompressed + data from that block has been written to strm->next_out. The number of + unused bits may in general be greater than seven, except when bit 7 of + data_type is set, in which case the number of unused bits will be less than + eight. data_type is set as noted here every time inflate() returns for all + flush options, and so can be used to determine the amount of currently + consumed input in bits. + + The Z_TREES option behaves as Z_BLOCK does, but it also returns when the + end of each deflate block header is reached, before any actual data in that + block is decoded. This allows the caller to determine the length of the + deflate block header for later use in random access within a deflate block. + 256 is added to the value of strm->data_type when inflate() returns + immediately after reaching the end of the deflate block header. + + inflate() should normally be called until it returns Z_STREAM_END or an + error. However if all decompression is to be performed in a single step (a + single call of inflate), the parameter flush should be set to Z_FINISH. In + this case all pending input is processed and all pending output is flushed; + avail_out must be large enough to hold all of the uncompressed data for the + operation to complete. (The size of the uncompressed data may have been + saved by the compressor for this purpose.) The use of Z_FINISH is not + required to perform an inflation in one step. However it may be used to + inform inflate that a faster approach can be used for the single inflate() + call. Z_FINISH also informs inflate to not maintain a sliding window if the + stream completes, which reduces inflate's memory footprint. If the stream + does not complete, either because not all of the stream is provided or not + enough output space is provided, then a sliding window will be allocated and + inflate() can be called again to continue the operation as if Z_NO_FLUSH had + been used. + + In this implementation, inflate() always flushes as much output as + possible to the output buffer, and always uses the faster approach on the + first call. So the effects of the flush parameter in this implementation are + on the return value of inflate() as noted below, when inflate() returns early + when Z_BLOCK or Z_TREES is used, and when inflate() avoids the allocation of + memory for a sliding window when Z_FINISH is used. + + If a preset dictionary is needed after this call (see inflateSetDictionary + below), inflate sets strm->adler to the Adler-32 checksum of the dictionary + chosen by the compressor and returns Z_NEED_DICT; otherwise it sets + strm->adler to the Adler-32 checksum of all output produced so far (that is, + total_out bytes) and returns Z_OK, Z_STREAM_END or an error code as described + below. At the end of the stream, inflate() checks that its computed Adler-32 + checksum is equal to that saved by the compressor and returns Z_STREAM_END + only if the checksum is correct. + + inflate() can decompress and check either zlib-wrapped or gzip-wrapped + deflate data. The header type is detected automatically, if requested when + initializing with inflateInit2(). Any information contained in the gzip + header is not retained unless inflateGetHeader() is used. When processing + gzip-wrapped deflate data, strm->adler32 is set to the CRC-32 of the output + produced so far. The CRC-32 is checked against the gzip trailer, as is the + uncompressed length, modulo 2^32. + + inflate() returns Z_OK if some progress has been made (more input processed + or more output produced), Z_STREAM_END if the end of the compressed data has + been reached and all uncompressed output has been produced, Z_NEED_DICT if a + preset dictionary is needed at this point, Z_DATA_ERROR if the input data was + corrupted (input stream not conforming to the zlib format or incorrect check + value, in which case strm->msg points to a string with a more specific + error), Z_STREAM_ERROR if the stream structure was inconsistent (for example + next_in or next_out was Z_NULL, or the state was inadvertently written over + by the application), Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR + if no progress was possible or if there was not enough room in the output + buffer when Z_FINISH is used. Note that Z_BUF_ERROR is not fatal, and + inflate() can be called again with more input and more output space to + continue decompressing. If Z_DATA_ERROR is returned, the application may + then call inflateSync() to look for a good compression block if a partial + recovery of the data is to be attempted. +*/ + + +ZEXTERN int ZEXPORT inflateEnd(z_streamp strm); +/* + All dynamically allocated data structures for this stream are freed. + This function discards any unprocessed input and does not flush any pending + output. + + inflateEnd returns Z_OK if success, or Z_STREAM_ERROR if the stream state + was inconsistent. +*/ + + + /* Advanced functions */ + +/* + The following functions are needed only in some special applications. +*/ + +/* +ZEXTERN int ZEXPORT deflateInit2(z_streamp strm, + int level, + int method, + int windowBits, + int memLevel, + int strategy); + + This is another version of deflateInit with more compression options. The + fields zalloc, zfree and opaque must be initialized before by the caller. + + The method parameter is the compression method. It must be Z_DEFLATED in + this version of the library. + + The windowBits parameter is the base two logarithm of the window size + (the size of the history buffer). It should be in the range 8..15 for this + version of the library. Larger values of this parameter result in better + compression at the expense of memory usage. The default value is 15 if + deflateInit is used instead. + + For the current implementation of deflate(), a windowBits value of 8 (a + window size of 256 bytes) is not supported. As a result, a request for 8 + will result in 9 (a 512-byte window). In that case, providing 8 to + inflateInit2() will result in an error when the zlib header with 9 is + checked against the initialization of inflate(). The remedy is to not use 8 + with deflateInit2() with this initialization, or at least in that case use 9 + with inflateInit2(). + + windowBits can also be -8..-15 for raw deflate. In this case, -windowBits + determines the window size. deflate() will then generate raw deflate data + with no zlib header or trailer, and will not compute a check value. + + windowBits can also be greater than 15 for optional gzip encoding. Add + 16 to windowBits to write a simple gzip header and trailer around the + compressed data instead of a zlib wrapper. The gzip header will have no + file name, no extra data, no comment, no modification time (set to zero), no + header crc, and the operating system will be set to the appropriate value, + if the operating system was determined at compile time. If a gzip stream is + being written, strm->adler is a CRC-32 instead of an Adler-32. + + For raw deflate or gzip encoding, a request for a 256-byte window is + rejected as invalid, since only the zlib header provides a means of + transmitting the window size to the decompressor. + + The memLevel parameter specifies how much memory should be allocated + for the internal compression state. memLevel=1 uses minimum memory but is + slow and reduces compression ratio; memLevel=9 uses maximum memory for + optimal speed. The default value is 8. See zconf.h for total memory usage + as a function of windowBits and memLevel. + + The strategy parameter is used to tune the compression algorithm. Use the + value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a + filter (or predictor), Z_HUFFMAN_ONLY to force Huffman encoding only (no + string match), or Z_RLE to limit match distances to one (run-length + encoding). Filtered data consists mostly of small values with a somewhat + random distribution. In this case, the compression algorithm is tuned to + compress them better. The effect of Z_FILTERED is to force more Huffman + coding and less string matching; it is somewhat intermediate between + Z_DEFAULT_STRATEGY and Z_HUFFMAN_ONLY. Z_RLE is designed to be almost as + fast as Z_HUFFMAN_ONLY, but give better compression for PNG image data. The + strategy parameter only affects the compression ratio but not the + correctness of the compressed output even if it is not set appropriately. + Z_FIXED prevents the use of dynamic Huffman codes, allowing for a simpler + decoder for special applications. + + deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_STREAM_ERROR if any parameter is invalid (such as an invalid + method), or Z_VERSION_ERROR if the zlib library version (zlib_version) is + incompatible with the version assumed by the caller (ZLIB_VERSION). msg is + set to null if there is no error message. deflateInit2 does not perform any + compression: this will be done by deflate(). +*/ + +ZEXTERN int ZEXPORT deflateSetDictionary(z_streamp strm, + const Bytef *dictionary, + uInt dictLength); +/* + Initializes the compression dictionary from the given byte sequence + without producing any compressed output. When using the zlib format, this + function must be called immediately after deflateInit, deflateInit2 or + deflateReset, and before any call of deflate. When doing raw deflate, this + function must be called either before any call of deflate, or immediately + after the completion of a deflate block, i.e. after all input has been + consumed and all output has been delivered when using any of the flush + options Z_BLOCK, Z_PARTIAL_FLUSH, Z_SYNC_FLUSH, or Z_FULL_FLUSH. The + compressor and decompressor must use exactly the same dictionary (see + inflateSetDictionary). + + The dictionary should consist of strings (byte sequences) that are likely + to be encountered later in the data to be compressed, with the most commonly + used strings preferably put towards the end of the dictionary. Using a + dictionary is most useful when the data to be compressed is short and can be + predicted with good accuracy; the data can then be compressed better than + with the default empty dictionary. + + Depending on the size of the compression data structures selected by + deflateInit or deflateInit2, a part of the dictionary may in effect be + discarded, for example if the dictionary is larger than the window size + provided in deflateInit or deflateInit2. Thus the strings most likely to be + useful should be put at the end of the dictionary, not at the front. In + addition, the current implementation of deflate will use at most the window + size minus 262 bytes of the provided dictionary. + + Upon return of this function, strm->adler is set to the Adler-32 value + of the dictionary; the decompressor may later use this value to determine + which dictionary has been used by the compressor. (The Adler-32 value + applies to the whole dictionary even if only a subset of the dictionary is + actually used by the compressor.) If a raw deflate was requested, then the + Adler-32 value is not computed and strm->adler is not set. + + deflateSetDictionary returns Z_OK if success, or Z_STREAM_ERROR if a + parameter is invalid (e.g. dictionary being Z_NULL) or the stream state is + inconsistent (for example if deflate has already been called for this stream + or if not at a block boundary for raw deflate). deflateSetDictionary does + not perform any compression: this will be done by deflate(). +*/ + +ZEXTERN int ZEXPORT deflateGetDictionary(z_streamp strm, + Bytef *dictionary, + uInt *dictLength); +/* + Returns the sliding dictionary being maintained by deflate. dictLength is + set to the number of bytes in the dictionary, and that many bytes are copied + to dictionary. dictionary must have enough space, where 32768 bytes is + always enough. If deflateGetDictionary() is called with dictionary equal to + Z_NULL, then only the dictionary length is returned, and nothing is copied. + Similarly, if dictLength is Z_NULL, then it is not set. + + deflateGetDictionary() may return a length less than the window size, even + when more than the window size in input has been provided. It may return up + to 258 bytes less in that case, due to how zlib's implementation of deflate + manages the sliding window and lookahead for matches, where matches can be + up to 258 bytes long. If the application needs the last window-size bytes of + input, then that would need to be saved by the application outside of zlib. + + deflateGetDictionary returns Z_OK on success, or Z_STREAM_ERROR if the + stream state is inconsistent. +*/ + +ZEXTERN int ZEXPORT deflateCopy(z_streamp dest, + z_streamp source); +/* + Sets the destination stream as a complete copy of the source stream. + + This function can be useful when several compression strategies will be + tried, for example when there are several ways of pre-processing the input + data with a filter. The streams that will be discarded should then be freed + by calling deflateEnd. Note that deflateCopy duplicates the internal + compression state which can be quite large, so this strategy is slow and can + consume lots of memory. + + deflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not + enough memory, Z_STREAM_ERROR if the source stream state was inconsistent + (such as zalloc being Z_NULL). msg is left unchanged in both source and + destination. +*/ + +ZEXTERN int ZEXPORT deflateReset(z_streamp strm); +/* + This function is equivalent to deflateEnd followed by deflateInit, but + does not free and reallocate the internal compression state. The stream + will leave the compression level and any other attributes that may have been + set unchanged. total_in, total_out, adler, and msg are initialized. + + deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent (such as zalloc or state being Z_NULL). +*/ + +ZEXTERN int ZEXPORT deflateParams(z_streamp strm, + int level, + int strategy); +/* + Dynamically update the compression level and compression strategy. The + interpretation of level and strategy is as in deflateInit2(). This can be + used to switch between compression and straight copy of the input data, or + to switch to a different kind of input data requiring a different strategy. + If the compression approach (which is a function of the level) or the + strategy is changed, and if there have been any deflate() calls since the + state was initialized or reset, then the input available so far is + compressed with the old level and strategy using deflate(strm, Z_BLOCK). + There are three approaches for the compression levels 0, 1..3, and 4..9 + respectively. The new level and strategy will take effect at the next call + of deflate(). + + If a deflate(strm, Z_BLOCK) is performed by deflateParams(), and it does + not have enough output space to complete, then the parameter change will not + take effect. In this case, deflateParams() can be called again with the + same parameters and more output space to try again. + + In order to assure a change in the parameters on the first try, the + deflate stream should be flushed using deflate() with Z_BLOCK or other flush + request until strm.avail_out is not zero, before calling deflateParams(). + Then no more input data should be provided before the deflateParams() call. + If this is done, the old level and strategy will be applied to the data + compressed before deflateParams(), and the new level and strategy will be + applied to the data compressed after deflateParams(). + + deflateParams returns Z_OK on success, Z_STREAM_ERROR if the source stream + state was inconsistent or if a parameter was invalid, or Z_BUF_ERROR if + there was not enough output space to complete the compression of the + available input data before a change in the strategy or approach. Note that + in the case of a Z_BUF_ERROR, the parameters are not changed. A return + value of Z_BUF_ERROR is not fatal, in which case deflateParams() can be + retried with more output space. +*/ + +ZEXTERN int ZEXPORT deflateTune(z_streamp strm, + int good_length, + int max_lazy, + int nice_length, + int max_chain); +/* + Fine tune deflate's internal compression parameters. This should only be + used by someone who understands the algorithm used by zlib's deflate for + searching for the best matching string, and even then only by the most + fanatic optimizer trying to squeeze out the last compressed bit for their + specific input data. Read the deflate.c source code for the meaning of the + max_lazy, good_length, nice_length, and max_chain parameters. + + deflateTune() can be called after deflateInit() or deflateInit2(), and + returns Z_OK on success, or Z_STREAM_ERROR for an invalid deflate stream. + */ + +ZEXTERN uLong ZEXPORT deflateBound(z_streamp strm, + uLong sourceLen); +/* + deflateBound() returns an upper bound on the compressed size after + deflation of sourceLen bytes. It must be called after deflateInit() or + deflateInit2(), and after deflateSetHeader(), if used. This would be used + to allocate an output buffer for deflation in a single pass, and so would be + called before deflate(). If that first deflate() call is provided the + sourceLen input bytes, an output buffer allocated to the size returned by + deflateBound(), and the flush value Z_FINISH, then deflate() is guaranteed + to return Z_STREAM_END. Note that it is possible for the compressed size to + be larger than the value returned by deflateBound() if flush options other + than Z_FINISH or Z_NO_FLUSH are used. +*/ + +ZEXTERN int ZEXPORT deflatePending(z_streamp strm, + unsigned *pending, + int *bits); +/* + deflatePending() returns the number of bytes and bits of output that have + been generated, but not yet provided in the available output. The bytes not + provided would be due to the available output space having being consumed. + The number of bits of output not provided are between 0 and 7, where they + await more bits to join them in order to fill out a full byte. If pending + or bits are Z_NULL, then those values are not set. + + deflatePending returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent. + */ + +ZEXTERN int ZEXPORT deflatePrime(z_streamp strm, + int bits, + int value); +/* + deflatePrime() inserts bits in the deflate output stream. The intent + is that this function is used to start off the deflate output with the bits + leftover from a previous deflate stream when appending to it. As such, this + function can only be used for raw deflate, and must be used before the first + deflate() call after a deflateInit2() or deflateReset(). bits must be less + than or equal to 16, and that many of the least significant bits of value + will be inserted in the output. + + deflatePrime returns Z_OK if success, Z_BUF_ERROR if there was not enough + room in the internal buffer to insert the bits, or Z_STREAM_ERROR if the + source stream state was inconsistent. +*/ + +ZEXTERN int ZEXPORT deflateSetHeader(z_streamp strm, + gz_headerp head); +/* + deflateSetHeader() provides gzip header information for when a gzip + stream is requested by deflateInit2(). deflateSetHeader() may be called + after deflateInit2() or deflateReset() and before the first call of + deflate(). The text, time, os, extra field, name, and comment information + in the provided gz_header structure are written to the gzip header (xflag is + ignored -- the extra flags are set according to the compression level). The + caller must assure that, if not Z_NULL, name and comment are terminated with + a zero byte, and that if extra is not Z_NULL, that extra_len bytes are + available there. If hcrc is true, a gzip header crc is included. Note that + the current versions of the command-line version of gzip (up through version + 1.3.x) do not support header crc's, and will report that it is a "multi-part + gzip file" and give up. + + If deflateSetHeader is not used, the default gzip header has text false, + the time set to zero, and os set to the current operating system, with no + extra, name, or comment fields. The gzip header is returned to the default + state by deflateReset(). + + deflateSetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent. +*/ + +/* +ZEXTERN int ZEXPORT inflateInit2(z_streamp strm, + int windowBits); + + This is another version of inflateInit with an extra parameter. The + fields next_in, avail_in, zalloc, zfree and opaque must be initialized + before by the caller. + + The windowBits parameter is the base two logarithm of the maximum window + size (the size of the history buffer). It should be in the range 8..15 for + this version of the library. The default value is 15 if inflateInit is used + instead. windowBits must be greater than or equal to the windowBits value + provided to deflateInit2() while compressing, or it must be equal to 15 if + deflateInit2() was not used. If a compressed stream with a larger window + size is given as input, inflate() will return with the error code + Z_DATA_ERROR instead of trying to allocate a larger window. + + windowBits can also be zero to request that inflate use the window size in + the zlib header of the compressed stream. + + windowBits can also be -8..-15 for raw inflate. In this case, -windowBits + determines the window size. inflate() will then process raw deflate data, + not looking for a zlib or gzip header, not generating a check value, and not + looking for any check values for comparison at the end of the stream. This + is for use with other formats that use the deflate compressed data format + such as zip. Those formats provide their own check values. If a custom + format is developed using the raw deflate format for compressed data, it is + recommended that a check value such as an Adler-32 or a CRC-32 be applied to + the uncompressed data as is done in the zlib, gzip, and zip formats. For + most applications, the zlib format should be used as is. Note that comments + above on the use in deflateInit2() applies to the magnitude of windowBits. + + windowBits can also be greater than 15 for optional gzip decoding. Add + 32 to windowBits to enable zlib and gzip decoding with automatic header + detection, or add 16 to decode only the gzip format (the zlib format will + return a Z_DATA_ERROR). If a gzip stream is being decoded, strm->adler is a + CRC-32 instead of an Adler-32. Unlike the gunzip utility and gzread() (see + below), inflate() will *not* automatically decode concatenated gzip members. + inflate() will return Z_STREAM_END at the end of the gzip member. The state + would need to be reset to continue decoding a subsequent gzip member. This + *must* be done if there is more data after a gzip member, in order for the + decompression to be compliant with the gzip standard (RFC 1952). + + inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_VERSION_ERROR if the zlib library version is incompatible with the + version assumed by the caller, or Z_STREAM_ERROR if the parameters are + invalid, such as a null pointer to the structure. msg is set to null if + there is no error message. inflateInit2 does not perform any decompression + apart from possibly reading the zlib header if present: actual decompression + will be done by inflate(). (So next_in and avail_in may be modified, but + next_out and avail_out are unused and unchanged.) The current implementation + of inflateInit2() does not process any header information -- that is + deferred until inflate() is called. +*/ + +ZEXTERN int ZEXPORT inflateSetDictionary(z_streamp strm, + const Bytef *dictionary, + uInt dictLength); +/* + Initializes the decompression dictionary from the given uncompressed byte + sequence. This function must be called immediately after a call of inflate, + if that call returned Z_NEED_DICT. The dictionary chosen by the compressor + can be determined from the Adler-32 value returned by that call of inflate. + The compressor and decompressor must use exactly the same dictionary (see + deflateSetDictionary). For raw inflate, this function can be called at any + time to set the dictionary. If the provided dictionary is smaller than the + window and there is already data in the window, then the provided dictionary + will amend what's there. The application must insure that the dictionary + that was used for compression is provided. + + inflateSetDictionary returns Z_OK if success, Z_STREAM_ERROR if a + parameter is invalid (e.g. dictionary being Z_NULL) or the stream state is + inconsistent, Z_DATA_ERROR if the given dictionary doesn't match the + expected one (incorrect Adler-32 value). inflateSetDictionary does not + perform any decompression: this will be done by subsequent calls of + inflate(). +*/ + +ZEXTERN int ZEXPORT inflateGetDictionary(z_streamp strm, + Bytef *dictionary, + uInt *dictLength); +/* + Returns the sliding dictionary being maintained by inflate. dictLength is + set to the number of bytes in the dictionary, and that many bytes are copied + to dictionary. dictionary must have enough space, where 32768 bytes is + always enough. If inflateGetDictionary() is called with dictionary equal to + Z_NULL, then only the dictionary length is returned, and nothing is copied. + Similarly, if dictLength is Z_NULL, then it is not set. + + inflateGetDictionary returns Z_OK on success, or Z_STREAM_ERROR if the + stream state is inconsistent. +*/ + +ZEXTERN int ZEXPORT inflateSync(z_streamp strm); +/* + Skips invalid compressed data until a possible full flush point (see above + for the description of deflate with Z_FULL_FLUSH) can be found, or until all + available input is skipped. No output is provided. + + inflateSync searches for a 00 00 FF FF pattern in the compressed data. + All full flush points have this pattern, but not all occurrences of this + pattern are full flush points. + + inflateSync returns Z_OK if a possible full flush point has been found, + Z_BUF_ERROR if no more input was provided, Z_DATA_ERROR if no flush point + has been found, or Z_STREAM_ERROR if the stream structure was inconsistent. + In the success case, the application may save the current value of total_in + which indicates where valid compressed data was found. In the error case, + the application may repeatedly call inflateSync, providing more input each + time, until success or end of the input data. +*/ + +ZEXTERN int ZEXPORT inflateCopy(z_streamp dest, + z_streamp source); +/* + Sets the destination stream as a complete copy of the source stream. + + This function can be useful when randomly accessing a large stream. The + first pass through the stream can periodically record the inflate state, + allowing restarting inflate at those points when randomly accessing the + stream. + + inflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not + enough memory, Z_STREAM_ERROR if the source stream state was inconsistent + (such as zalloc being Z_NULL). msg is left unchanged in both source and + destination. +*/ + +ZEXTERN int ZEXPORT inflateReset(z_streamp strm); +/* + This function is equivalent to inflateEnd followed by inflateInit, + but does not free and reallocate the internal decompression state. The + stream will keep attributes that may have been set by inflateInit2. + total_in, total_out, adler, and msg are initialized. + + inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent (such as zalloc or state being Z_NULL). +*/ + +ZEXTERN int ZEXPORT inflateReset2(z_streamp strm, + int windowBits); +/* + This function is the same as inflateReset, but it also permits changing + the wrap and window size requests. The windowBits parameter is interpreted + the same as it is for inflateInit2. If the window size is changed, then the + memory allocated for the window is freed, and the window will be reallocated + by inflate() if needed. + + inflateReset2 returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent (such as zalloc or state being Z_NULL), or if + the windowBits parameter is invalid. +*/ + +ZEXTERN int ZEXPORT inflatePrime(z_streamp strm, + int bits, + int value); +/* + This function inserts bits in the inflate input stream. The intent is + that this function is used to start inflating at a bit position in the + middle of a byte. The provided bits will be used before any bytes are used + from next_in. This function should only be used with raw inflate, and + should be used before the first inflate() call after inflateInit2() or + inflateReset(). bits must be less than or equal to 16, and that many of the + least significant bits of value will be inserted in the input. + + If bits is negative, then the input stream bit buffer is emptied. Then + inflatePrime() can be called again to put bits in the buffer. This is used + to clear out bits leftover after feeding inflate a block description prior + to feeding inflate codes. + + inflatePrime returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent. +*/ + +ZEXTERN long ZEXPORT inflateMark(z_streamp strm); +/* + This function returns two values, one in the lower 16 bits of the return + value, and the other in the remaining upper bits, obtained by shifting the + return value down 16 bits. If the upper value is -1 and the lower value is + zero, then inflate() is currently decoding information outside of a block. + If the upper value is -1 and the lower value is non-zero, then inflate is in + the middle of a stored block, with the lower value equaling the number of + bytes from the input remaining to copy. If the upper value is not -1, then + it is the number of bits back from the current bit position in the input of + the code (literal or length/distance pair) currently being processed. In + that case the lower value is the number of bytes already emitted for that + code. + + A code is being processed if inflate is waiting for more input to complete + decoding of the code, or if it has completed decoding but is waiting for + more output space to write the literal or match data. + + inflateMark() is used to mark locations in the input data for random + access, which may be at bit positions, and to note those cases where the + output of a code may span boundaries of random access blocks. The current + location in the input stream can be determined from avail_in and data_type + as noted in the description for the Z_BLOCK flush parameter for inflate. + + inflateMark returns the value noted above, or -65536 if the provided + source stream state was inconsistent. +*/ + +ZEXTERN int ZEXPORT inflateGetHeader(z_streamp strm, + gz_headerp head); +/* + inflateGetHeader() requests that gzip header information be stored in the + provided gz_header structure. inflateGetHeader() may be called after + inflateInit2() or inflateReset(), and before the first call of inflate(). + As inflate() processes the gzip stream, head->done is zero until the header + is completed, at which time head->done is set to one. If a zlib stream is + being decoded, then head->done is set to -1 to indicate that there will be + no gzip header information forthcoming. Note that Z_BLOCK or Z_TREES can be + used to force inflate() to return immediately after header processing is + complete and before any actual data is decompressed. + + The text, time, xflags, and os fields are filled in with the gzip header + contents. hcrc is set to true if there is a header CRC. (The header CRC + was valid if done is set to one.) If extra is not Z_NULL, then extra_max + contains the maximum number of bytes to write to extra. Once done is true, + extra_len contains the actual extra field length, and extra contains the + extra field, or that field truncated if extra_max is less than extra_len. + If name is not Z_NULL, then up to name_max characters are written there, + terminated with a zero unless the length is greater than name_max. If + comment is not Z_NULL, then up to comm_max characters are written there, + terminated with a zero unless the length is greater than comm_max. When any + of extra, name, or comment are not Z_NULL and the respective field is not + present in the header, then that field is set to Z_NULL to signal its + absence. This allows the use of deflateSetHeader() with the returned + structure to duplicate the header. However if those fields are set to + allocated memory, then the application will need to save those pointers + elsewhere so that they can be eventually freed. + + If inflateGetHeader is not used, then the header information is simply + discarded. The header is always checked for validity, including the header + CRC if present. inflateReset() will reset the process to discard the header + information. The application would need to call inflateGetHeader() again to + retrieve the header from the next gzip stream. + + inflateGetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent. +*/ + +/* +ZEXTERN int ZEXPORT inflateBackInit(z_streamp strm, int windowBits, + unsigned char FAR *window); + + Initialize the internal stream state for decompression using inflateBack() + calls. The fields zalloc, zfree and opaque in strm must be initialized + before the call. If zalloc and zfree are Z_NULL, then the default library- + derived memory allocation routines are used. windowBits is the base two + logarithm of the window size, in the range 8..15. window is a caller + supplied buffer of that size. Except for special applications where it is + assured that deflate was used with small window sizes, windowBits must be 15 + and a 32K byte window must be supplied to be able to decompress general + deflate streams. + + See inflateBack() for the usage of these routines. + + inflateBackInit will return Z_OK on success, Z_STREAM_ERROR if any of + the parameters are invalid, Z_MEM_ERROR if the internal state could not be + allocated, or Z_VERSION_ERROR if the version of the library does not match + the version of the header file. +*/ + +typedef unsigned (*in_func)(void FAR *, + z_const unsigned char FAR * FAR *); +typedef int (*out_func)(void FAR *, unsigned char FAR *, unsigned); + +ZEXTERN int ZEXPORT inflateBack(z_streamp strm, + in_func in, void FAR *in_desc, + out_func out, void FAR *out_desc); +/* + inflateBack() does a raw inflate with a single call using a call-back + interface for input and output. This is potentially more efficient than + inflate() for file i/o applications, in that it avoids copying between the + output and the sliding window by simply making the window itself the output + buffer. inflate() can be faster on modern CPUs when used with large + buffers. inflateBack() trusts the application to not change the output + buffer passed by the output function, at least until inflateBack() returns. + + inflateBackInit() must be called first to allocate the internal state + and to initialize the state with the user-provided window buffer. + inflateBack() may then be used multiple times to inflate a complete, raw + deflate stream with each call. inflateBackEnd() is then called to free the + allocated state. + + A raw deflate stream is one with no zlib or gzip header or trailer. + This routine would normally be used in a utility that reads zip or gzip + files and writes out uncompressed files. The utility would decode the + header and process the trailer on its own, hence this routine expects only + the raw deflate stream to decompress. This is different from the default + behavior of inflate(), which expects a zlib header and trailer around the + deflate stream. + + inflateBack() uses two subroutines supplied by the caller that are then + called by inflateBack() for input and output. inflateBack() calls those + routines until it reads a complete deflate stream and writes out all of the + uncompressed data, or until it encounters an error. The function's + parameters and return types are defined above in the in_func and out_func + typedefs. inflateBack() will call in(in_desc, &buf) which should return the + number of bytes of provided input, and a pointer to that input in buf. If + there is no input available, in() must return zero -- buf is ignored in that + case -- and inflateBack() will return a buffer error. inflateBack() will + call out(out_desc, buf, len) to write the uncompressed data buf[0..len-1]. + out() should return zero on success, or non-zero on failure. If out() + returns non-zero, inflateBack() will return with an error. Neither in() nor + out() are permitted to change the contents of the window provided to + inflateBackInit(), which is also the buffer that out() uses to write from. + The length written by out() will be at most the window size. Any non-zero + amount of input may be provided by in(). + + For convenience, inflateBack() can be provided input on the first call by + setting strm->next_in and strm->avail_in. If that input is exhausted, then + in() will be called. Therefore strm->next_in must be initialized before + calling inflateBack(). If strm->next_in is Z_NULL, then in() will be called + immediately for input. If strm->next_in is not Z_NULL, then strm->avail_in + must also be initialized, and then if strm->avail_in is not zero, input will + initially be taken from strm->next_in[0 .. strm->avail_in - 1]. + + The in_desc and out_desc parameters of inflateBack() is passed as the + first parameter of in() and out() respectively when they are called. These + descriptors can be optionally used to pass any information that the caller- + supplied in() and out() functions need to do their job. + + On return, inflateBack() will set strm->next_in and strm->avail_in to + pass back any unused input that was provided by the last in() call. The + return values of inflateBack() can be Z_STREAM_END on success, Z_BUF_ERROR + if in() or out() returned an error, Z_DATA_ERROR if there was a format error + in the deflate stream (in which case strm->msg is set to indicate the nature + of the error), or Z_STREAM_ERROR if the stream was not properly initialized. + In the case of Z_BUF_ERROR, an input or output error can be distinguished + using strm->next_in which will be Z_NULL only if in() returned an error. If + strm->next_in is not Z_NULL, then the Z_BUF_ERROR was due to out() returning + non-zero. (in() will always be called before out(), so strm->next_in is + assured to be defined if out() returns non-zero.) Note that inflateBack() + cannot return Z_OK. +*/ + +ZEXTERN int ZEXPORT inflateBackEnd(z_streamp strm); +/* + All memory allocated by inflateBackInit() is freed. + + inflateBackEnd() returns Z_OK on success, or Z_STREAM_ERROR if the stream + state was inconsistent. +*/ + +ZEXTERN uLong ZEXPORT zlibCompileFlags(void); +/* Return flags indicating compile-time options. + + Type sizes, two bits each, 00 = 16 bits, 01 = 32, 10 = 64, 11 = other: + 1.0: size of uInt + 3.2: size of uLong + 5.4: size of voidpf (pointer) + 7.6: size of z_off_t + + Compiler, assembler, and debug options: + 8: ZLIB_DEBUG + 9: ASMV or ASMINF -- use ASM code + 10: ZLIB_WINAPI -- exported functions use the WINAPI calling convention + 11: 0 (reserved) + + One-time table building (smaller code, but not thread-safe if true): + 12: BUILDFIXED -- build static block decoding tables when needed + 13: DYNAMIC_CRC_TABLE -- build CRC calculation tables when needed + 14,15: 0 (reserved) + + Library content (indicates missing functionality): + 16: NO_GZCOMPRESS -- gz* functions cannot compress (to avoid linking + deflate code when not needed) + 17: NO_GZIP -- deflate can't write gzip streams, and inflate can't detect + and decode gzip streams (to avoid linking crc code) + 18-19: 0 (reserved) + + Operation variations (changes in library functionality): + 20: PKZIP_BUG_WORKAROUND -- slightly more permissive inflate + 21: FASTEST -- deflate algorithm with only one, lowest compression level + 22,23: 0 (reserved) + + The sprintf variant used by gzprintf (zero is best): + 24: 0 = vs*, 1 = s* -- 1 means limited to 20 arguments after the format + 25: 0 = *nprintf, 1 = *printf -- 1 means gzprintf() not secure! + 26: 0 = returns value, 1 = void -- 1 means inferred string length returned + + Remainder: + 27-31: 0 (reserved) + */ + +#ifndef Z_SOLO + + /* utility functions */ + +/* + The following utility functions are implemented on top of the basic + stream-oriented functions. To simplify the interface, some default options + are assumed (compression level and memory usage, standard memory allocation + functions). The source code of these utility functions can be modified if + you need special options. +*/ + +ZEXTERN int ZEXPORT compress(Bytef *dest, uLongf *destLen, + const Bytef *source, uLong sourceLen); +/* + Compresses the source buffer into the destination buffer. sourceLen is + the byte length of the source buffer. Upon entry, destLen is the total size + of the destination buffer, which must be at least the value returned by + compressBound(sourceLen). Upon exit, destLen is the actual size of the + compressed data. compress() is equivalent to compress2() with a level + parameter of Z_DEFAULT_COMPRESSION. + + compress returns Z_OK if success, Z_MEM_ERROR if there was not + enough memory, Z_BUF_ERROR if there was not enough room in the output + buffer. +*/ + +ZEXTERN int ZEXPORT compress2(Bytef *dest, uLongf *destLen, + const Bytef *source, uLong sourceLen, + int level); +/* + Compresses the source buffer into the destination buffer. The level + parameter has the same meaning as in deflateInit. sourceLen is the byte + length of the source buffer. Upon entry, destLen is the total size of the + destination buffer, which must be at least the value returned by + compressBound(sourceLen). Upon exit, destLen is the actual size of the + compressed data. + + compress2 returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_BUF_ERROR if there was not enough room in the output buffer, + Z_STREAM_ERROR if the level parameter is invalid. +*/ + +ZEXTERN uLong ZEXPORT compressBound(uLong sourceLen); +/* + compressBound() returns an upper bound on the compressed size after + compress() or compress2() on sourceLen bytes. It would be used before a + compress() or compress2() call to allocate the destination buffer. +*/ + +ZEXTERN int ZEXPORT uncompress(Bytef *dest, uLongf *destLen, + const Bytef *source, uLong sourceLen); +/* + Decompresses the source buffer into the destination buffer. sourceLen is + the byte length of the source buffer. Upon entry, destLen is the total size + of the destination buffer, which must be large enough to hold the entire + uncompressed data. (The size of the uncompressed data must have been saved + previously by the compressor and transmitted to the decompressor by some + mechanism outside the scope of this compression library.) Upon exit, destLen + is the actual size of the uncompressed data. + + uncompress returns Z_OK if success, Z_MEM_ERROR if there was not + enough memory, Z_BUF_ERROR if there was not enough room in the output + buffer, or Z_DATA_ERROR if the input data was corrupted or incomplete. In + the case where there is not enough room, uncompress() will fill the output + buffer with the uncompressed data up to that point. +*/ + +ZEXTERN int ZEXPORT uncompress2(Bytef *dest, uLongf *destLen, + const Bytef *source, uLong *sourceLen); +/* + Same as uncompress, except that sourceLen is a pointer, where the + length of the source is *sourceLen. On return, *sourceLen is the number of + source bytes consumed. +*/ + + /* gzip file access functions */ + +/* + This library supports reading and writing files in gzip (.gz) format with + an interface similar to that of stdio, using the functions that start with + "gz". The gzip format is different from the zlib format. gzip is a gzip + wrapper, documented in RFC 1952, wrapped around a deflate stream. +*/ + +typedef struct gzFile_s *gzFile; /* semi-opaque gzip file descriptor */ + +/* +ZEXTERN gzFile ZEXPORT gzopen(const char *path, const char *mode); + + Open the gzip (.gz) file at path for reading and decompressing, or + compressing and writing. The mode parameter is as in fopen ("rb" or "wb") + but can also include a compression level ("wb9") or a strategy: 'f' for + filtered data as in "wb6f", 'h' for Huffman-only compression as in "wb1h", + 'R' for run-length encoding as in "wb1R", or 'F' for fixed code compression + as in "wb9F". (See the description of deflateInit2 for more information + about the strategy parameter.) 'T' will request transparent writing or + appending with no compression and not using the gzip format. + + "a" can be used instead of "w" to request that the gzip stream that will + be written be appended to the file. "+" will result in an error, since + reading and writing to the same gzip file is not supported. The addition of + "x" when writing will create the file exclusively, which fails if the file + already exists. On systems that support it, the addition of "e" when + reading or writing will set the flag to close the file on an execve() call. + + These functions, as well as gzip, will read and decode a sequence of gzip + streams in a file. The append function of gzopen() can be used to create + such a file. (Also see gzflush() for another way to do this.) When + appending, gzopen does not test whether the file begins with a gzip stream, + nor does it look for the end of the gzip streams to begin appending. gzopen + will simply append a gzip stream to the existing file. + + gzopen can be used to read a file which is not in gzip format; in this + case gzread will directly read from the file without decompression. When + reading, this will be detected automatically by looking for the magic two- + byte gzip header. + + gzopen returns NULL if the file could not be opened, if there was + insufficient memory to allocate the gzFile state, or if an invalid mode was + specified (an 'r', 'w', or 'a' was not provided, or '+' was provided). + errno can be checked to determine if the reason gzopen failed was that the + file could not be opened. +*/ + +ZEXTERN gzFile ZEXPORT gzdopen(int fd, const char *mode); +/* + Associate a gzFile with the file descriptor fd. File descriptors are + obtained from calls like open, dup, creat, pipe or fileno (if the file has + been previously opened with fopen). The mode parameter is as in gzopen. + + The next call of gzclose on the returned gzFile will also close the file + descriptor fd, just like fclose(fdopen(fd, mode)) closes the file descriptor + fd. If you want to keep fd open, use fd = dup(fd_keep); gz = gzdopen(fd, + mode);. The duplicated descriptor should be saved to avoid a leak, since + gzdopen does not close fd if it fails. If you are using fileno() to get the + file descriptor from a FILE *, then you will have to use dup() to avoid + double-close()ing the file descriptor. Both gzclose() and fclose() will + close the associated file descriptor, so they need to have different file + descriptors. + + gzdopen returns NULL if there was insufficient memory to allocate the + gzFile state, if an invalid mode was specified (an 'r', 'w', or 'a' was not + provided, or '+' was provided), or if fd is -1. The file descriptor is not + used until the next gz* read, write, seek, or close operation, so gzdopen + will not detect if fd is invalid (unless fd is -1). +*/ + +ZEXTERN int ZEXPORT gzbuffer(gzFile file, unsigned size); +/* + Set the internal buffer size used by this library's functions for file to + size. The default buffer size is 8192 bytes. This function must be called + after gzopen() or gzdopen(), and before any other calls that read or write + the file. The buffer memory allocation is always deferred to the first read + or write. Three times that size in buffer space is allocated. A larger + buffer size of, for example, 64K or 128K bytes will noticeably increase the + speed of decompression (reading). + + The new buffer size also affects the maximum length for gzprintf(). + + gzbuffer() returns 0 on success, or -1 on failure, such as being called + too late. +*/ + +ZEXTERN int ZEXPORT gzsetparams(gzFile file, int level, int strategy); +/* + Dynamically update the compression level and strategy for file. See the + description of deflateInit2 for the meaning of these parameters. Previously + provided data is flushed before applying the parameter changes. + + gzsetparams returns Z_OK if success, Z_STREAM_ERROR if the file was not + opened for writing, Z_ERRNO if there is an error writing the flushed data, + or Z_MEM_ERROR if there is a memory allocation error. +*/ + +ZEXTERN int ZEXPORT gzread(gzFile file, voidp buf, unsigned len); +/* + Read and decompress up to len uncompressed bytes from file into buf. If + the input file is not in gzip format, gzread copies the given number of + bytes into the buffer directly from the file. + + After reaching the end of a gzip stream in the input, gzread will continue + to read, looking for another gzip stream. Any number of gzip streams may be + concatenated in the input file, and will all be decompressed by gzread(). + If something other than a gzip stream is encountered after a gzip stream, + that remaining trailing garbage is ignored (and no error is returned). + + gzread can be used to read a gzip file that is being concurrently written. + Upon reaching the end of the input, gzread will return with the available + data. If the error code returned by gzerror is Z_OK or Z_BUF_ERROR, then + gzclearerr can be used to clear the end of file indicator in order to permit + gzread to be tried again. Z_OK indicates that a gzip stream was completed + on the last gzread. Z_BUF_ERROR indicates that the input file ended in the + middle of a gzip stream. Note that gzread does not return -1 in the event + of an incomplete gzip stream. This error is deferred until gzclose(), which + will return Z_BUF_ERROR if the last gzread ended in the middle of a gzip + stream. Alternatively, gzerror can be used before gzclose to detect this + case. + + gzread returns the number of uncompressed bytes actually read, less than + len for end of file, or -1 for error. If len is too large to fit in an int, + then nothing is read, -1 is returned, and the error state is set to + Z_STREAM_ERROR. +*/ + +ZEXTERN z_size_t ZEXPORT gzfread(voidp buf, z_size_t size, z_size_t nitems, + gzFile file); +/* + Read and decompress up to nitems items of size size from file into buf, + otherwise operating as gzread() does. This duplicates the interface of + stdio's fread(), with size_t request and return types. If the library + defines size_t, then z_size_t is identical to size_t. If not, then z_size_t + is an unsigned integer type that can contain a pointer. + + gzfread() returns the number of full items read of size size, or zero if + the end of the file was reached and a full item could not be read, or if + there was an error. gzerror() must be consulted if zero is returned in + order to determine if there was an error. If the multiplication of size and + nitems overflows, i.e. the product does not fit in a z_size_t, then nothing + is read, zero is returned, and the error state is set to Z_STREAM_ERROR. + + In the event that the end of file is reached and only a partial item is + available at the end, i.e. the remaining uncompressed data length is not a + multiple of size, then the final partial item is nevertheless read into buf + and the end-of-file flag is set. The length of the partial item read is not + provided, but could be inferred from the result of gztell(). This behavior + is the same as the behavior of fread() implementations in common libraries, + but it prevents the direct use of gzfread() to read a concurrently written + file, resetting and retrying on end-of-file, when size is not 1. +*/ + +ZEXTERN int ZEXPORT gzwrite(gzFile file, voidpc buf, unsigned len); +/* + Compress and write the len uncompressed bytes at buf to file. gzwrite + returns the number of uncompressed bytes written or 0 in case of error. +*/ + +ZEXTERN z_size_t ZEXPORT gzfwrite(voidpc buf, z_size_t size, + z_size_t nitems, gzFile file); +/* + Compress and write nitems items of size size from buf to file, duplicating + the interface of stdio's fwrite(), with size_t request and return types. If + the library defines size_t, then z_size_t is identical to size_t. If not, + then z_size_t is an unsigned integer type that can contain a pointer. + + gzfwrite() returns the number of full items written of size size, or zero + if there was an error. If the multiplication of size and nitems overflows, + i.e. the product does not fit in a z_size_t, then nothing is written, zero + is returned, and the error state is set to Z_STREAM_ERROR. +*/ + +ZEXTERN int ZEXPORTVA gzprintf(gzFile file, const char *format, ...); +/* + Convert, format, compress, and write the arguments (...) to file under + control of the string format, as in fprintf. gzprintf returns the number of + uncompressed bytes actually written, or a negative zlib error code in case + of error. The number of uncompressed bytes written is limited to 8191, or + one less than the buffer size given to gzbuffer(). The caller should assure + that this limit is not exceeded. If it is exceeded, then gzprintf() will + return an error (0) with nothing written. In this case, there may also be a + buffer overflow with unpredictable consequences, which is possible only if + zlib was compiled with the insecure functions sprintf() or vsprintf(), + because the secure snprintf() or vsnprintf() functions were not available. + This can be determined using zlibCompileFlags(). +*/ + +ZEXTERN int ZEXPORT gzputs(gzFile file, const char *s); +/* + Compress and write the given null-terminated string s to file, excluding + the terminating null character. + + gzputs returns the number of characters written, or -1 in case of error. +*/ + +ZEXTERN char * ZEXPORT gzgets(gzFile file, char *buf, int len); +/* + Read and decompress bytes from file into buf, until len-1 characters are + read, or until a newline character is read and transferred to buf, or an + end-of-file condition is encountered. If any characters are read or if len + is one, the string is terminated with a null character. If no characters + are read due to an end-of-file or len is less than one, then the buffer is + left untouched. + + gzgets returns buf which is a null-terminated string, or it returns NULL + for end-of-file or in case of error. If there was an error, the contents at + buf are indeterminate. +*/ + +ZEXTERN int ZEXPORT gzputc(gzFile file, int c); +/* + Compress and write c, converted to an unsigned char, into file. gzputc + returns the value that was written, or -1 in case of error. +*/ + +ZEXTERN int ZEXPORT gzgetc(gzFile file); +/* + Read and decompress one byte from file. gzgetc returns this byte or -1 + in case of end of file or error. This is implemented as a macro for speed. + As such, it does not do all of the checking the other functions do. I.e. + it does not check to see if file is NULL, nor whether the structure file + points to has been clobbered or not. +*/ + +ZEXTERN int ZEXPORT gzungetc(int c, gzFile file); +/* + Push c back onto the stream for file to be read as the first character on + the next read. At least one character of push-back is always allowed. + gzungetc() returns the character pushed, or -1 on failure. gzungetc() will + fail if c is -1, and may fail if a character has been pushed but not read + yet. If gzungetc is used immediately after gzopen or gzdopen, at least the + output buffer size of pushed characters is allowed. (See gzbuffer above.) + The pushed character will be discarded if the stream is repositioned with + gzseek() or gzrewind(). +*/ + +ZEXTERN int ZEXPORT gzflush(gzFile file, int flush); +/* + Flush all pending output to file. The parameter flush is as in the + deflate() function. The return value is the zlib error number (see function + gzerror below). gzflush is only permitted when writing. + + If the flush parameter is Z_FINISH, the remaining data is written and the + gzip stream is completed in the output. If gzwrite() is called again, a new + gzip stream will be started in the output. gzread() is able to read such + concatenated gzip streams. + + gzflush should be called only when strictly necessary because it will + degrade compression if called too often. +*/ + +/* +ZEXTERN z_off_t ZEXPORT gzseek(gzFile file, + z_off_t offset, int whence); + + Set the starting position to offset relative to whence for the next gzread + or gzwrite on file. The offset represents a number of bytes in the + uncompressed data stream. The whence parameter is defined as in lseek(2); + the value SEEK_END is not supported. + + If the file is opened for reading, this function is emulated but can be + extremely slow. If the file is opened for writing, only forward seeks are + supported; gzseek then compresses a sequence of zeroes up to the new + starting position. + + gzseek returns the resulting offset location as measured in bytes from + the beginning of the uncompressed stream, or -1 in case of error, in + particular if the file is opened for writing and the new starting position + would be before the current position. +*/ + +ZEXTERN int ZEXPORT gzrewind(gzFile file); +/* + Rewind file. This function is supported only for reading. + + gzrewind(file) is equivalent to (int)gzseek(file, 0L, SEEK_SET). +*/ + +/* +ZEXTERN z_off_t ZEXPORT gztell(gzFile file); + + Return the starting position for the next gzread or gzwrite on file. + This position represents a number of bytes in the uncompressed data stream, + and is zero when starting, even if appending or reading a gzip stream from + the middle of a file using gzdopen(). + + gztell(file) is equivalent to gzseek(file, 0L, SEEK_CUR) +*/ + +/* +ZEXTERN z_off_t ZEXPORT gzoffset(gzFile file); + + Return the current compressed (actual) read or write offset of file. This + offset includes the count of bytes that precede the gzip stream, for example + when appending or when using gzdopen() for reading. When reading, the + offset does not include as yet unused buffered input. This information can + be used for a progress indicator. On error, gzoffset() returns -1. +*/ + +ZEXTERN int ZEXPORT gzeof(gzFile file); +/* + Return true (1) if the end-of-file indicator for file has been set while + reading, false (0) otherwise. Note that the end-of-file indicator is set + only if the read tried to go past the end of the input, but came up short. + Therefore, just like feof(), gzeof() may return false even if there is no + more data to read, in the event that the last read request was for the exact + number of bytes remaining in the input file. This will happen if the input + file size is an exact multiple of the buffer size. + + If gzeof() returns true, then the read functions will return no more data, + unless the end-of-file indicator is reset by gzclearerr() and the input file + has grown since the previous end of file was detected. +*/ + +ZEXTERN int ZEXPORT gzdirect(gzFile file); +/* + Return true (1) if file is being copied directly while reading, or false + (0) if file is a gzip stream being decompressed. + + If the input file is empty, gzdirect() will return true, since the input + does not contain a gzip stream. + + If gzdirect() is used immediately after gzopen() or gzdopen() it will + cause buffers to be allocated to allow reading the file to determine if it + is a gzip file. Therefore if gzbuffer() is used, it should be called before + gzdirect(). + + When writing, gzdirect() returns true (1) if transparent writing was + requested ("wT" for the gzopen() mode), or false (0) otherwise. (Note: + gzdirect() is not needed when writing. Transparent writing must be + explicitly requested, so the application already knows the answer. When + linking statically, using gzdirect() will include all of the zlib code for + gzip file reading and decompression, which may not be desired.) +*/ + +ZEXTERN int ZEXPORT gzclose(gzFile file); +/* + Flush all pending output for file, if necessary, close file and + deallocate the (de)compression state. Note that once file is closed, you + cannot call gzerror with file, since its structures have been deallocated. + gzclose must not be called more than once on the same file, just as free + must not be called more than once on the same allocation. + + gzclose will return Z_STREAM_ERROR if file is not valid, Z_ERRNO on a + file operation error, Z_MEM_ERROR if out of memory, Z_BUF_ERROR if the + last read ended in the middle of a gzip stream, or Z_OK on success. +*/ + +ZEXTERN int ZEXPORT gzclose_r(gzFile file); +ZEXTERN int ZEXPORT gzclose_w(gzFile file); +/* + Same as gzclose(), but gzclose_r() is only for use when reading, and + gzclose_w() is only for use when writing or appending. The advantage to + using these instead of gzclose() is that they avoid linking in zlib + compression or decompression code that is not used when only reading or only + writing respectively. If gzclose() is used, then both compression and + decompression code will be included the application when linking to a static + zlib library. +*/ + +ZEXTERN const char * ZEXPORT gzerror(gzFile file, int *errnum); +/* + Return the error message for the last error which occurred on file. + errnum is set to zlib error number. If an error occurred in the file system + and not in the compression library, errnum is set to Z_ERRNO and the + application may consult errno to get the exact error code. + + The application must not modify the returned string. Future calls to + this function may invalidate the previously returned string. If file is + closed, then the string previously returned by gzerror will no longer be + available. + + gzerror() should be used to distinguish errors from end-of-file for those + functions above that do not distinguish those cases in their return values. +*/ + +ZEXTERN void ZEXPORT gzclearerr(gzFile file); +/* + Clear the error and end-of-file flags for file. This is analogous to the + clearerr() function in stdio. This is useful for continuing to read a gzip + file that is being written concurrently. +*/ + +#endif /* !Z_SOLO */ + + /* checksum functions */ + +/* + These functions are not related to compression but are exported + anyway because they might be useful in applications using the compression + library. +*/ + +ZEXTERN uLong ZEXPORT adler32(uLong adler, const Bytef *buf, uInt len); +/* + Update a running Adler-32 checksum with the bytes buf[0..len-1] and + return the updated checksum. An Adler-32 value is in the range of a 32-bit + unsigned integer. If buf is Z_NULL, this function returns the required + initial value for the checksum. + + An Adler-32 checksum is almost as reliable as a CRC-32 but can be computed + much faster. + + Usage example: + + uLong adler = adler32(0L, Z_NULL, 0); + + while (read_buffer(buffer, length) != EOF) { + adler = adler32(adler, buffer, length); + } + if (adler != original_adler) error(); +*/ + +ZEXTERN uLong ZEXPORT adler32_z(uLong adler, const Bytef *buf, + z_size_t len); +/* + Same as adler32(), but with a size_t length. +*/ + +/* +ZEXTERN uLong ZEXPORT adler32_combine(uLong adler1, uLong adler2, + z_off_t len2); + + Combine two Adler-32 checksums into one. For two sequences of bytes, seq1 + and seq2 with lengths len1 and len2, Adler-32 checksums were calculated for + each, adler1 and adler2. adler32_combine() returns the Adler-32 checksum of + seq1 and seq2 concatenated, requiring only adler1, adler2, and len2. Note + that the z_off_t type (like off_t) is a signed integer. If len2 is + negative, the result has no meaning or utility. +*/ + +ZEXTERN uLong ZEXPORT crc32(uLong crc, const Bytef *buf, uInt len); +/* + Update a running CRC-32 with the bytes buf[0..len-1] and return the + updated CRC-32. A CRC-32 value is in the range of a 32-bit unsigned integer. + If buf is Z_NULL, this function returns the required initial value for the + crc. Pre- and post-conditioning (one's complement) is performed within this + function so it shouldn't be done by the application. + + Usage example: + + uLong crc = crc32(0L, Z_NULL, 0); + + while (read_buffer(buffer, length) != EOF) { + crc = crc32(crc, buffer, length); + } + if (crc != original_crc) error(); +*/ + +ZEXTERN uLong ZEXPORT crc32_z(uLong crc, const Bytef *buf, + z_size_t len); +/* + Same as crc32(), but with a size_t length. +*/ + +/* +ZEXTERN uLong ZEXPORT crc32_combine(uLong crc1, uLong crc2, z_off_t len2); + + Combine two CRC-32 check values into one. For two sequences of bytes, + seq1 and seq2 with lengths len1 and len2, CRC-32 check values were + calculated for each, crc1 and crc2. crc32_combine() returns the CRC-32 + check value of seq1 and seq2 concatenated, requiring only crc1, crc2, and + len2. len2 must be non-negative. +*/ + +/* +ZEXTERN uLong ZEXPORT crc32_combine_gen(z_off_t len2); + + Return the operator corresponding to length len2, to be used with + crc32_combine_op(). len2 must be non-negative. +*/ + +ZEXTERN uLong ZEXPORT crc32_combine_op(uLong crc1, uLong crc2, uLong op); +/* + Give the same result as crc32_combine(), using op in place of len2. op is + is generated from len2 by crc32_combine_gen(). This will be faster than + crc32_combine() if the generated op is used more than once. +*/ + + + /* various hacks, don't look :) */ + +/* deflateInit and inflateInit are macros to allow checking the zlib version + * and the compiler's view of z_stream: + */ +ZEXTERN int ZEXPORT deflateInit_(z_streamp strm, int level, + const char *version, int stream_size); +ZEXTERN int ZEXPORT inflateInit_(z_streamp strm, + const char *version, int stream_size); +ZEXTERN int ZEXPORT deflateInit2_(z_streamp strm, int level, int method, + int windowBits, int memLevel, + int strategy, const char *version, + int stream_size); +ZEXTERN int ZEXPORT inflateInit2_(z_streamp strm, int windowBits, + const char *version, int stream_size); +ZEXTERN int ZEXPORT inflateBackInit_(z_streamp strm, int windowBits, + unsigned char FAR *window, + const char *version, + int stream_size); +#ifdef Z_PREFIX_SET +# define z_deflateInit(strm, level) \ + deflateInit_((strm), (level), ZLIB_VERSION, (int)sizeof(z_stream)) +# define z_inflateInit(strm) \ + inflateInit_((strm), ZLIB_VERSION, (int)sizeof(z_stream)) +# define z_deflateInit2(strm, level, method, windowBits, memLevel, strategy) \ + deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\ + (strategy), ZLIB_VERSION, (int)sizeof(z_stream)) +# define z_inflateInit2(strm, windowBits) \ + inflateInit2_((strm), (windowBits), ZLIB_VERSION, \ + (int)sizeof(z_stream)) +# define z_inflateBackInit(strm, windowBits, window) \ + inflateBackInit_((strm), (windowBits), (window), \ + ZLIB_VERSION, (int)sizeof(z_stream)) +#else +# define deflateInit(strm, level) \ + deflateInit_((strm), (level), ZLIB_VERSION, (int)sizeof(z_stream)) +# define inflateInit(strm) \ + inflateInit_((strm), ZLIB_VERSION, (int)sizeof(z_stream)) +# define deflateInit2(strm, level, method, windowBits, memLevel, strategy) \ + deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\ + (strategy), ZLIB_VERSION, (int)sizeof(z_stream)) +# define inflateInit2(strm, windowBits) \ + inflateInit2_((strm), (windowBits), ZLIB_VERSION, \ + (int)sizeof(z_stream)) +# define inflateBackInit(strm, windowBits, window) \ + inflateBackInit_((strm), (windowBits), (window), \ + ZLIB_VERSION, (int)sizeof(z_stream)) +#endif + +#ifndef Z_SOLO + +/* gzgetc() macro and its supporting function and exposed data structure. Note + * that the real internal state is much larger than the exposed structure. + * This abbreviated structure exposes just enough for the gzgetc() macro. The + * user should not mess with these exposed elements, since their names or + * behavior could change in the future, perhaps even capriciously. They can + * only be used by the gzgetc() macro. You have been warned. + */ +struct gzFile_s { + unsigned have; + unsigned char *next; + z_off64_t pos; +}; +ZEXTERN int ZEXPORT gzgetc_(gzFile file); /* backward compatibility */ +#ifdef Z_PREFIX_SET +# undef z_gzgetc +# define z_gzgetc(g) \ + ((g)->have ? ((g)->have--, (g)->pos++, *((g)->next)++) : (gzgetc)(g)) +#else +# define gzgetc(g) \ + ((g)->have ? ((g)->have--, (g)->pos++, *((g)->next)++) : (gzgetc)(g)) +#endif + +/* provide 64-bit offset functions if _LARGEFILE64_SOURCE defined, and/or + * change the regular functions to 64 bits if _FILE_OFFSET_BITS is 64 (if + * both are true, the application gets the *64 functions, and the regular + * functions are changed to 64 bits) -- in case these are set on systems + * without large file support, _LFS64_LARGEFILE must also be true + */ +#ifdef Z_LARGE64 + ZEXTERN gzFile ZEXPORT gzopen64(const char *, const char *); + ZEXTERN z_off64_t ZEXPORT gzseek64(gzFile, z_off64_t, int); + ZEXTERN z_off64_t ZEXPORT gztell64(gzFile); + ZEXTERN z_off64_t ZEXPORT gzoffset64(gzFile); + ZEXTERN uLong ZEXPORT adler32_combine64(uLong, uLong, z_off64_t); + ZEXTERN uLong ZEXPORT crc32_combine64(uLong, uLong, z_off64_t); + ZEXTERN uLong ZEXPORT crc32_combine_gen64(z_off64_t); +#endif + +#if !defined(ZLIB_INTERNAL) && defined(Z_WANT64) +# ifdef Z_PREFIX_SET +# define z_gzopen z_gzopen64 +# define z_gzseek z_gzseek64 +# define z_gztell z_gztell64 +# define z_gzoffset z_gzoffset64 +# define z_adler32_combine z_adler32_combine64 +# define z_crc32_combine z_crc32_combine64 +# define z_crc32_combine_gen z_crc32_combine_gen64 +# else +# define gzopen gzopen64 +# define gzseek gzseek64 +# define gztell gztell64 +# define gzoffset gzoffset64 +# define adler32_combine adler32_combine64 +# define crc32_combine crc32_combine64 +# define crc32_combine_gen crc32_combine_gen64 +# endif +# ifndef Z_LARGE64 + ZEXTERN gzFile ZEXPORT gzopen64(const char *, const char *); + ZEXTERN z_off_t ZEXPORT gzseek64(gzFile, z_off_t, int); + ZEXTERN z_off_t ZEXPORT gztell64(gzFile); + ZEXTERN z_off_t ZEXPORT gzoffset64(gzFile); + ZEXTERN uLong ZEXPORT adler32_combine64(uLong, uLong, z_off_t); + ZEXTERN uLong ZEXPORT crc32_combine64(uLong, uLong, z_off_t); + ZEXTERN uLong ZEXPORT crc32_combine_gen64(z_off_t); +# endif +#else + ZEXTERN gzFile ZEXPORT gzopen(const char *, const char *); + ZEXTERN z_off_t ZEXPORT gzseek(gzFile, z_off_t, int); + ZEXTERN z_off_t ZEXPORT gztell(gzFile); + ZEXTERN z_off_t ZEXPORT gzoffset(gzFile); + ZEXTERN uLong ZEXPORT adler32_combine(uLong, uLong, z_off_t); + ZEXTERN uLong ZEXPORT crc32_combine(uLong, uLong, z_off_t); + ZEXTERN uLong ZEXPORT crc32_combine_gen(z_off_t); +#endif + +#else /* Z_SOLO */ + + ZEXTERN uLong ZEXPORT adler32_combine(uLong, uLong, z_off_t); + ZEXTERN uLong ZEXPORT crc32_combine(uLong, uLong, z_off_t); + ZEXTERN uLong ZEXPORT crc32_combine_gen(z_off_t); + +#endif /* !Z_SOLO */ + +/* undocumented functions */ +ZEXTERN const char * ZEXPORT zError(int); +ZEXTERN int ZEXPORT inflateSyncPoint(z_streamp); +ZEXTERN const z_crc_t FAR * ZEXPORT get_crc_table(void); +ZEXTERN int ZEXPORT inflateUndermine(z_streamp, int); +ZEXTERN int ZEXPORT inflateValidate(z_streamp, int); +ZEXTERN unsigned long ZEXPORT inflateCodesUsed(z_streamp); +ZEXTERN int ZEXPORT inflateResetKeep(z_streamp); +ZEXTERN int ZEXPORT deflateResetKeep(z_streamp); +#if defined(_WIN32) && !defined(Z_SOLO) +ZEXTERN gzFile ZEXPORT gzopen_w(const wchar_t *path, + const char *mode); +#endif +#if defined(STDC) || defined(Z_HAVE_STDARG_H) +# ifndef Z_SOLO +ZEXTERN int ZEXPORTVA gzvprintf(gzFile file, + const char *format, + va_list va); +# endif +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* ZLIB_H */ diff --git a/env-llmeval/lib/python3.10/site-packages/lxml/includes/libxml/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/lxml/includes/libxml/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2776d08fac824f9f1341b69918ed1b023e626a69 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/lxml/includes/libxml/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/lxml/includes/libxml/hash.h b/env-llmeval/lib/python3.10/site-packages/lxml/includes/libxml/hash.h new file mode 100644 index 0000000000000000000000000000000000000000..f4af09ee5f94fa856cb650d218c1fbe1e84d10d5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/lxml/includes/libxml/hash.h @@ -0,0 +1,232 @@ +/* + * Summary: Chained hash tables + * Description: This module implements the hash table support used in + * various places in the library. + * + * Copy: See Copyright for the status of this software. + * + * Author: Bjorn Reese + */ + +#ifndef __XML_HASH_H__ +#define __XML_HASH_H__ + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * The hash table. + */ +typedef struct _xmlHashTable xmlHashTable; +typedef xmlHashTable *xmlHashTablePtr; + +/* + * Recent version of gcc produce a warning when a function pointer is assigned + * to an object pointer, or vice versa. The following macro is a dirty hack + * to allow suppression of the warning. If your architecture has function + * pointers which are a different size than a void pointer, there may be some + * serious trouble within the library. + */ +/** + * XML_CAST_FPTR: + * @fptr: pointer to a function + * + * Macro to do a casting from an object pointer to a + * function pointer without encountering a warning from + * gcc + * + * #define XML_CAST_FPTR(fptr) (*(void **)(&fptr)) + * This macro violated ISO C aliasing rules (gcc4 on s390 broke) + * so it is disabled now + */ + +#define XML_CAST_FPTR(fptr) fptr + +/* + * function types: + */ +/** + * xmlHashDeallocator: + * @payload: the data in the hash + * @name: the name associated + * + * Callback to free data from a hash. + */ +typedef void (*xmlHashDeallocator)(void *payload, const xmlChar *name); +/** + * xmlHashCopier: + * @payload: the data in the hash + * @name: the name associated + * + * Callback to copy data from a hash. + * + * Returns a copy of the data or NULL in case of error. + */ +typedef void *(*xmlHashCopier)(void *payload, const xmlChar *name); +/** + * xmlHashScanner: + * @payload: the data in the hash + * @data: extra scanner data + * @name: the name associated + * + * Callback when scanning data in a hash with the simple scanner. + */ +typedef void (*xmlHashScanner)(void *payload, void *data, const xmlChar *name); +/** + * xmlHashScannerFull: + * @payload: the data in the hash + * @data: extra scanner data + * @name: the name associated + * @name2: the second name associated + * @name3: the third name associated + * + * Callback when scanning data in a hash with the full scanner. + */ +typedef void (*xmlHashScannerFull)(void *payload, void *data, + const xmlChar *name, const xmlChar *name2, + const xmlChar *name3); + +/* + * Constructor and destructor. + */ +XMLPUBFUN xmlHashTablePtr + xmlHashCreate (int size); +XMLPUBFUN xmlHashTablePtr + xmlHashCreateDict (int size, + xmlDictPtr dict); +XMLPUBFUN void + xmlHashFree (xmlHashTablePtr hash, + xmlHashDeallocator dealloc); +XMLPUBFUN void + xmlHashDefaultDeallocator(void *entry, + const xmlChar *name); + +/* + * Add a new entry to the hash table. + */ +XMLPUBFUN int + xmlHashAddEntry (xmlHashTablePtr hash, + const xmlChar *name, + void *userdata); +XMLPUBFUN int + xmlHashUpdateEntry (xmlHashTablePtr hash, + const xmlChar *name, + void *userdata, + xmlHashDeallocator dealloc); +XMLPUBFUN int + xmlHashAddEntry2 (xmlHashTablePtr hash, + const xmlChar *name, + const xmlChar *name2, + void *userdata); +XMLPUBFUN int + xmlHashUpdateEntry2 (xmlHashTablePtr hash, + const xmlChar *name, + const xmlChar *name2, + void *userdata, + xmlHashDeallocator dealloc); +XMLPUBFUN int + xmlHashAddEntry3 (xmlHashTablePtr hash, + const xmlChar *name, + const xmlChar *name2, + const xmlChar *name3, + void *userdata); +XMLPUBFUN int + xmlHashUpdateEntry3 (xmlHashTablePtr hash, + const xmlChar *name, + const xmlChar *name2, + const xmlChar *name3, + void *userdata, + xmlHashDeallocator dealloc); + +/* + * Remove an entry from the hash table. + */ +XMLPUBFUN int + xmlHashRemoveEntry (xmlHashTablePtr hash, + const xmlChar *name, + xmlHashDeallocator dealloc); +XMLPUBFUN int + xmlHashRemoveEntry2 (xmlHashTablePtr hash, + const xmlChar *name, + const xmlChar *name2, + xmlHashDeallocator dealloc); +XMLPUBFUN int + xmlHashRemoveEntry3 (xmlHashTablePtr hash, + const xmlChar *name, + const xmlChar *name2, + const xmlChar *name3, + xmlHashDeallocator dealloc); + +/* + * Retrieve the payload. + */ +XMLPUBFUN void * + xmlHashLookup (xmlHashTablePtr hash, + const xmlChar *name); +XMLPUBFUN void * + xmlHashLookup2 (xmlHashTablePtr hash, + const xmlChar *name, + const xmlChar *name2); +XMLPUBFUN void * + xmlHashLookup3 (xmlHashTablePtr hash, + const xmlChar *name, + const xmlChar *name2, + const xmlChar *name3); +XMLPUBFUN void * + xmlHashQLookup (xmlHashTablePtr hash, + const xmlChar *prefix, + const xmlChar *name); +XMLPUBFUN void * + xmlHashQLookup2 (xmlHashTablePtr hash, + const xmlChar *prefix, + const xmlChar *name, + const xmlChar *prefix2, + const xmlChar *name2); +XMLPUBFUN void * + xmlHashQLookup3 (xmlHashTablePtr hash, + const xmlChar *prefix, + const xmlChar *name, + const xmlChar *prefix2, + const xmlChar *name2, + const xmlChar *prefix3, + const xmlChar *name3); + +/* + * Helpers. + */ +XMLPUBFUN xmlHashTablePtr + xmlHashCopy (xmlHashTablePtr hash, + xmlHashCopier copy); +XMLPUBFUN int + xmlHashSize (xmlHashTablePtr hash); +XMLPUBFUN void + xmlHashScan (xmlHashTablePtr hash, + xmlHashScanner scan, + void *data); +XMLPUBFUN void + xmlHashScan3 (xmlHashTablePtr hash, + const xmlChar *name, + const xmlChar *name2, + const xmlChar *name3, + xmlHashScanner scan, + void *data); +XMLPUBFUN void + xmlHashScanFull (xmlHashTablePtr hash, + xmlHashScannerFull scan, + void *data); +XMLPUBFUN void + xmlHashScanFull3 (xmlHashTablePtr hash, + const xmlChar *name, + const xmlChar *name2, + const xmlChar *name3, + xmlHashScannerFull scan, + void *data); +#ifdef __cplusplus +} +#endif +#endif /* ! __XML_HASH_H__ */ diff --git a/env-llmeval/lib/python3.10/site-packages/lxml/includes/libxml/xmlversion.h b/env-llmeval/lib/python3.10/site-packages/lxml/includes/libxml/xmlversion.h new file mode 100644 index 0000000000000000000000000000000000000000..a2f9fe607475fdc9a9e0fbfdefdcaa2f4a11580b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/lxml/includes/libxml/xmlversion.h @@ -0,0 +1,511 @@ +/* + * Summary: compile-time version information + * Description: compile-time version information for the XML library + * + * Copy: See Copyright for the status of this software. + * + * Author: Daniel Veillard + */ + +#ifndef __XML_VERSION_H__ +#define __XML_VERSION_H__ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * use those to be sure nothing nasty will happen if + * your library and includes mismatch + */ +#ifndef LIBXML2_COMPILING_MSCCDEF +XMLPUBFUN void xmlCheckVersion(int version); +#endif /* LIBXML2_COMPILING_MSCCDEF */ + +/** + * LIBXML_DOTTED_VERSION: + * + * the version string like "1.2.3" + */ +#define LIBXML_DOTTED_VERSION "2.12.6" + +/** + * LIBXML_VERSION: + * + * the version number: 1.2.3 value is 10203 + */ +#define LIBXML_VERSION 21206 + +/** + * LIBXML_VERSION_STRING: + * + * the version number string, 1.2.3 value is "10203" + */ +#define LIBXML_VERSION_STRING "21206" + +/** + * LIBXML_VERSION_EXTRA: + * + * extra version information, used to show a git commit description + */ +#define LIBXML_VERSION_EXTRA "" + +/** + * LIBXML_TEST_VERSION: + * + * Macro to check that the libxml version in use is compatible with + * the version the software has been compiled against + */ +#define LIBXML_TEST_VERSION xmlCheckVersion(21206); + +#ifndef VMS +#if 0 +/** + * WITH_TRIO: + * + * defined if the trio support need to be configured in + */ +#define WITH_TRIO +#else +/** + * WITHOUT_TRIO: + * + * defined if the trio support should not be configured in + */ +#define WITHOUT_TRIO +#endif +#else /* VMS */ +/** + * WITH_TRIO: + * + * defined if the trio support need to be configured in + */ +#define WITH_TRIO 1 +#endif /* VMS */ + +/** + * LIBXML_THREAD_ENABLED: + * + * Whether the thread support is configured in + */ +#if 1 +#define LIBXML_THREAD_ENABLED +#endif + +/** + * LIBXML_THREAD_ALLOC_ENABLED: + * + * Whether the allocation hooks are per-thread + */ +#if 0 +#define LIBXML_THREAD_ALLOC_ENABLED +#endif + +/** + * LIBXML_TREE_ENABLED: + * + * Whether the DOM like tree manipulation API support is configured in + */ +#if 1 +#define LIBXML_TREE_ENABLED +#endif + +/** + * LIBXML_OUTPUT_ENABLED: + * + * Whether the serialization/saving support is configured in + */ +#if 1 +#define LIBXML_OUTPUT_ENABLED +#endif + +/** + * LIBXML_PUSH_ENABLED: + * + * Whether the push parsing interfaces are configured in + */ +#if 1 +#define LIBXML_PUSH_ENABLED +#endif + +/** + * LIBXML_READER_ENABLED: + * + * Whether the xmlReader parsing interface is configured in + */ +#if 1 +#define LIBXML_READER_ENABLED +#endif + +/** + * LIBXML_PATTERN_ENABLED: + * + * Whether the xmlPattern node selection interface is configured in + */ +#if 1 +#define LIBXML_PATTERN_ENABLED +#endif + +/** + * LIBXML_WRITER_ENABLED: + * + * Whether the xmlWriter saving interface is configured in + */ +#if 1 +#define LIBXML_WRITER_ENABLED +#endif + +/** + * LIBXML_SAX1_ENABLED: + * + * Whether the older SAX1 interface is configured in + */ +#if 1 +#define LIBXML_SAX1_ENABLED +#endif + +/** + * LIBXML_FTP_ENABLED: + * + * Whether the FTP support is configured in + */ +#if 0 +#define LIBXML_FTP_ENABLED +#endif + +/** + * LIBXML_HTTP_ENABLED: + * + * Whether the HTTP support is configured in + */ +#if 1 +#define LIBXML_HTTP_ENABLED +#endif + +/** + * LIBXML_VALID_ENABLED: + * + * Whether the DTD validation support is configured in + */ +#if 1 +#define LIBXML_VALID_ENABLED +#endif + +/** + * LIBXML_HTML_ENABLED: + * + * Whether the HTML support is configured in + */ +#if 1 +#define LIBXML_HTML_ENABLED +#endif + +/** + * LIBXML_LEGACY_ENABLED: + * + * Whether the deprecated APIs are compiled in for compatibility + */ +#if 0 +#define LIBXML_LEGACY_ENABLED +#endif + +/** + * LIBXML_C14N_ENABLED: + * + * Whether the Canonicalization support is configured in + */ +#if 1 +#define LIBXML_C14N_ENABLED +#endif + +/** + * LIBXML_CATALOG_ENABLED: + * + * Whether the Catalog support is configured in + */ +#if 1 +#define LIBXML_CATALOG_ENABLED +#endif + +/** + * LIBXML_XPATH_ENABLED: + * + * Whether XPath is configured in + */ +#if 1 +#define LIBXML_XPATH_ENABLED +#endif + +/** + * LIBXML_XPTR_ENABLED: + * + * Whether XPointer is configured in + */ +#if 1 +#define LIBXML_XPTR_ENABLED +#endif + +/** + * LIBXML_XPTR_LOCS_ENABLED: + * + * Whether support for XPointer locations is configured in + */ +#if 0 +#define LIBXML_XPTR_LOCS_ENABLED +#endif + +/** + * LIBXML_XINCLUDE_ENABLED: + * + * Whether XInclude is configured in + */ +#if 1 +#define LIBXML_XINCLUDE_ENABLED +#endif + +/** + * LIBXML_ICONV_ENABLED: + * + * Whether iconv support is available + */ +#if 1 +#define LIBXML_ICONV_ENABLED +#endif + +/** + * LIBXML_ICU_ENABLED: + * + * Whether icu support is available + */ +#if 0 +#define LIBXML_ICU_ENABLED +#endif + +/** + * LIBXML_ISO8859X_ENABLED: + * + * Whether ISO-8859-* support is made available in case iconv is not + */ +#if 1 +#define LIBXML_ISO8859X_ENABLED +#endif + +/** + * LIBXML_DEBUG_ENABLED: + * + * Whether Debugging module is configured in + */ +#if 1 +#define LIBXML_DEBUG_ENABLED +#endif + +/** + * DEBUG_MEMORY_LOCATION: + * + * Whether the memory debugging is configured in + */ +#if 0 +#define DEBUG_MEMORY_LOCATION +#endif + +/** + * LIBXML_DEBUG_RUNTIME: + * + * Removed + */ +#if 0 +#define LIBXML_DEBUG_RUNTIME +#endif + +/** + * LIBXML_UNICODE_ENABLED: + * + * Whether the Unicode related interfaces are compiled in + */ +#if 1 +#define LIBXML_UNICODE_ENABLED +#endif + +/** + * LIBXML_REGEXP_ENABLED: + * + * Whether the regular expressions interfaces are compiled in + */ +#if 1 +#define LIBXML_REGEXP_ENABLED +#endif + +/** + * LIBXML_AUTOMATA_ENABLED: + * + * Whether the automata interfaces are compiled in + */ +#if 1 +#define LIBXML_AUTOMATA_ENABLED +#endif + +/** + * LIBXML_EXPR_ENABLED: + * + * Whether the formal expressions interfaces are compiled in + * + * This code is unused and disabled unconditionally for now. + */ +#if 0 +#define LIBXML_EXPR_ENABLED +#endif + +/** + * LIBXML_SCHEMAS_ENABLED: + * + * Whether the Schemas validation interfaces are compiled in + */ +#if 1 +#define LIBXML_SCHEMAS_ENABLED +#endif + +/** + * LIBXML_SCHEMATRON_ENABLED: + * + * Whether the Schematron validation interfaces are compiled in + */ +#if 1 +#define LIBXML_SCHEMATRON_ENABLED +#endif + +/** + * LIBXML_MODULES_ENABLED: + * + * Whether the module interfaces are compiled in + */ +#if 1 +#define LIBXML_MODULES_ENABLED +/** + * LIBXML_MODULE_EXTENSION: + * + * the string suffix used by dynamic modules (usually shared libraries) + */ +#define LIBXML_MODULE_EXTENSION ".so" +#endif + +/** + * LIBXML_ZLIB_ENABLED: + * + * Whether the Zlib support is compiled in + */ +#if 1 +#define LIBXML_ZLIB_ENABLED +#endif + +/** + * LIBXML_LZMA_ENABLED: + * + * Whether the Lzma support is compiled in + */ +#if 0 +#define LIBXML_LZMA_ENABLED +#endif + +#ifdef __GNUC__ +/** DOC_DISABLE */ + +#ifndef ATTRIBUTE_UNUSED +# if ((__GNUC__ > 2) || ((__GNUC__ == 2) && (__GNUC_MINOR__ >= 7))) +# define ATTRIBUTE_UNUSED __attribute__((unused)) +# else +# define ATTRIBUTE_UNUSED +# endif +#endif + +#ifndef LIBXML_ATTR_ALLOC_SIZE +# if (!defined(__clang__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3)))) +# define LIBXML_ATTR_ALLOC_SIZE(x) __attribute__((alloc_size(x))) +# else +# define LIBXML_ATTR_ALLOC_SIZE(x) +# endif +#else +# define LIBXML_ATTR_ALLOC_SIZE(x) +#endif + +#ifndef LIBXML_ATTR_FORMAT +# if ((__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3))) +# define LIBXML_ATTR_FORMAT(fmt,args) __attribute__((__format__(__printf__,fmt,args))) +# else +# define LIBXML_ATTR_FORMAT(fmt,args) +# endif +#else +# define LIBXML_ATTR_FORMAT(fmt,args) +#endif + +#ifndef XML_DEPRECATED +# if defined (IN_LIBXML) || (__GNUC__ * 100 + __GNUC_MINOR__ < 301) +# define XML_DEPRECATED +/* Available since at least GCC 3.1 */ +# else +# define XML_DEPRECATED __attribute__((deprecated)) +# endif +#endif + +#if defined(__clang__) || (__GNUC__ * 100 + __GNUC_MINOR__ >= 406) + #if defined(__clang__) || (__GNUC__ * 100 + __GNUC_MINOR__ >= 800) + #define XML_IGNORE_FPTR_CAST_WARNINGS \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wpedantic\"") \ + _Pragma("GCC diagnostic ignored \"-Wcast-function-type\"") + #else + #define XML_IGNORE_FPTR_CAST_WARNINGS \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wpedantic\"") + #endif + #define XML_POP_WARNINGS \ + _Pragma("GCC diagnostic pop") +#else + #define XML_IGNORE_FPTR_CAST_WARNINGS + #define XML_POP_WARNINGS +#endif + +#else /* ! __GNUC__ */ +#define ATTRIBUTE_UNUSED +#define LIBXML_ATTR_ALLOC_SIZE(x) +#define LIBXML_ATTR_FORMAT(fmt,args) +#ifndef XML_DEPRECATED +# if defined (IN_LIBXML) || !defined (_MSC_VER) +# define XML_DEPRECATED +/* Available since Visual Studio 2005 */ +# elif defined (_MSC_VER) && (_MSC_VER >= 1400) +# define XML_DEPRECATED __declspec(deprecated) +# endif +#endif +#if defined (_MSC_VER) && (_MSC_VER >= 1400) +# define XML_IGNORE_FPTR_CAST_WARNINGS __pragma(warning(push)) +#else +# define XML_IGNORE_FPTR_CAST_WARNINGS +#endif +#ifndef XML_POP_WARNINGS +# if defined (_MSC_VER) && (_MSC_VER >= 1400) +# define XML_POP_WARNINGS __pragma(warning(pop)) +# else +# define XML_POP_WARNINGS +# endif +#endif +#endif /* __GNUC__ */ + +#define XML_NO_ATTR + +#ifdef LIBXML_THREAD_ENABLED + #define XML_DECLARE_GLOBAL(name, type, attrs) \ + attrs XMLPUBFUN type *__##name(void); + #define XML_GLOBAL_MACRO(name) (*__##name()) +#else + #define XML_DECLARE_GLOBAL(name, type, attrs) \ + attrs XMLPUBVAR type name; +#endif + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif + + diff --git a/env-llmeval/lib/python3.10/site-packages/lxml/includes/libxml/xpath.h b/env-llmeval/lib/python3.10/site-packages/lxml/includes/libxml/xpath.h new file mode 100644 index 0000000000000000000000000000000000000000..6dae0780d830c4725f05e6323f64f2fc1c33ad1b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/lxml/includes/libxml/xpath.h @@ -0,0 +1,575 @@ +/* + * Summary: XML Path Language implementation + * Description: API for the XML Path Language implementation + * + * XML Path Language implementation + * XPath is a language for addressing parts of an XML document, + * designed to be used by both XSLT and XPointer + * http://www.w3.org/TR/xpath + * + * Implements + * W3C Recommendation 16 November 1999 + * http://www.w3.org/TR/1999/REC-xpath-19991116 + * + * Copy: See Copyright for the status of this software. + * + * Author: Daniel Veillard + */ + +#ifndef __XML_XPATH_H__ +#define __XML_XPATH_H__ + +#include + +#ifdef LIBXML_XPATH_ENABLED + +#include +#include +#include +#endif /* LIBXML_XPATH_ENABLED */ + +#if defined(LIBXML_XPATH_ENABLED) || defined(LIBXML_SCHEMAS_ENABLED) +#ifdef __cplusplus +extern "C" { +#endif +#endif /* LIBXML_XPATH_ENABLED or LIBXML_SCHEMAS_ENABLED */ + +#ifdef LIBXML_XPATH_ENABLED + +typedef struct _xmlXPathContext xmlXPathContext; +typedef xmlXPathContext *xmlXPathContextPtr; +typedef struct _xmlXPathParserContext xmlXPathParserContext; +typedef xmlXPathParserContext *xmlXPathParserContextPtr; + +/** + * The set of XPath error codes. + */ + +typedef enum { + XPATH_EXPRESSION_OK = 0, + XPATH_NUMBER_ERROR, + XPATH_UNFINISHED_LITERAL_ERROR, + XPATH_START_LITERAL_ERROR, + XPATH_VARIABLE_REF_ERROR, + XPATH_UNDEF_VARIABLE_ERROR, + XPATH_INVALID_PREDICATE_ERROR, + XPATH_EXPR_ERROR, + XPATH_UNCLOSED_ERROR, + XPATH_UNKNOWN_FUNC_ERROR, + XPATH_INVALID_OPERAND, + XPATH_INVALID_TYPE, + XPATH_INVALID_ARITY, + XPATH_INVALID_CTXT_SIZE, + XPATH_INVALID_CTXT_POSITION, + XPATH_MEMORY_ERROR, + XPTR_SYNTAX_ERROR, + XPTR_RESOURCE_ERROR, + XPTR_SUB_RESOURCE_ERROR, + XPATH_UNDEF_PREFIX_ERROR, + XPATH_ENCODING_ERROR, + XPATH_INVALID_CHAR_ERROR, + XPATH_INVALID_CTXT, + XPATH_STACK_ERROR, + XPATH_FORBID_VARIABLE_ERROR, + XPATH_OP_LIMIT_EXCEEDED, + XPATH_RECURSION_LIMIT_EXCEEDED +} xmlXPathError; + +/* + * A node-set (an unordered collection of nodes without duplicates). + */ +typedef struct _xmlNodeSet xmlNodeSet; +typedef xmlNodeSet *xmlNodeSetPtr; +struct _xmlNodeSet { + int nodeNr; /* number of nodes in the set */ + int nodeMax; /* size of the array as allocated */ + xmlNodePtr *nodeTab; /* array of nodes in no particular order */ + /* @@ with_ns to check whether namespace nodes should be looked at @@ */ +}; + +/* + * An expression is evaluated to yield an object, which + * has one of the following four basic types: + * - node-set + * - boolean + * - number + * - string + * + * @@ XPointer will add more types ! + */ + +typedef enum { + XPATH_UNDEFINED = 0, + XPATH_NODESET = 1, + XPATH_BOOLEAN = 2, + XPATH_NUMBER = 3, + XPATH_STRING = 4, +#ifdef LIBXML_XPTR_LOCS_ENABLED + XPATH_POINT = 5, + XPATH_RANGE = 6, + XPATH_LOCATIONSET = 7, +#endif + XPATH_USERS = 8, + XPATH_XSLT_TREE = 9 /* An XSLT value tree, non modifiable */ +} xmlXPathObjectType; + +#ifndef LIBXML_XPTR_LOCS_ENABLED +/** DOC_DISABLE */ +#define XPATH_POINT 5 +#define XPATH_RANGE 6 +#define XPATH_LOCATIONSET 7 +/** DOC_ENABLE */ +#endif + +typedef struct _xmlXPathObject xmlXPathObject; +typedef xmlXPathObject *xmlXPathObjectPtr; +struct _xmlXPathObject { + xmlXPathObjectType type; + xmlNodeSetPtr nodesetval; + int boolval; + double floatval; + xmlChar *stringval; + void *user; + int index; + void *user2; + int index2; +}; + +/** + * xmlXPathConvertFunc: + * @obj: an XPath object + * @type: the number of the target type + * + * A conversion function is associated to a type and used to cast + * the new type to primitive values. + * + * Returns -1 in case of error, 0 otherwise + */ +typedef int (*xmlXPathConvertFunc) (xmlXPathObjectPtr obj, int type); + +/* + * Extra type: a name and a conversion function. + */ + +typedef struct _xmlXPathType xmlXPathType; +typedef xmlXPathType *xmlXPathTypePtr; +struct _xmlXPathType { + const xmlChar *name; /* the type name */ + xmlXPathConvertFunc func; /* the conversion function */ +}; + +/* + * Extra variable: a name and a value. + */ + +typedef struct _xmlXPathVariable xmlXPathVariable; +typedef xmlXPathVariable *xmlXPathVariablePtr; +struct _xmlXPathVariable { + const xmlChar *name; /* the variable name */ + xmlXPathObjectPtr value; /* the value */ +}; + +/** + * xmlXPathEvalFunc: + * @ctxt: an XPath parser context + * @nargs: the number of arguments passed to the function + * + * An XPath evaluation function, the parameters are on the XPath context stack. + */ + +typedef void (*xmlXPathEvalFunc)(xmlXPathParserContextPtr ctxt, + int nargs); + +/* + * Extra function: a name and a evaluation function. + */ + +typedef struct _xmlXPathFunct xmlXPathFunct; +typedef xmlXPathFunct *xmlXPathFuncPtr; +struct _xmlXPathFunct { + const xmlChar *name; /* the function name */ + xmlXPathEvalFunc func; /* the evaluation function */ +}; + +/** + * xmlXPathAxisFunc: + * @ctxt: the XPath interpreter context + * @cur: the previous node being explored on that axis + * + * An axis traversal function. To traverse an axis, the engine calls + * the first time with cur == NULL and repeat until the function returns + * NULL indicating the end of the axis traversal. + * + * Returns the next node in that axis or NULL if at the end of the axis. + */ + +typedef xmlXPathObjectPtr (*xmlXPathAxisFunc) (xmlXPathParserContextPtr ctxt, + xmlXPathObjectPtr cur); + +/* + * Extra axis: a name and an axis function. + */ + +typedef struct _xmlXPathAxis xmlXPathAxis; +typedef xmlXPathAxis *xmlXPathAxisPtr; +struct _xmlXPathAxis { + const xmlChar *name; /* the axis name */ + xmlXPathAxisFunc func; /* the search function */ +}; + +/** + * xmlXPathFunction: + * @ctxt: the XPath interprestation context + * @nargs: the number of arguments + * + * An XPath function. + * The arguments (if any) are popped out from the context stack + * and the result is pushed on the stack. + */ + +typedef void (*xmlXPathFunction) (xmlXPathParserContextPtr ctxt, int nargs); + +/* + * Function and Variable Lookup. + */ + +/** + * xmlXPathVariableLookupFunc: + * @ctxt: an XPath context + * @name: name of the variable + * @ns_uri: the namespace name hosting this variable + * + * Prototype for callbacks used to plug variable lookup in the XPath + * engine. + * + * Returns the XPath object value or NULL if not found. + */ +typedef xmlXPathObjectPtr (*xmlXPathVariableLookupFunc) (void *ctxt, + const xmlChar *name, + const xmlChar *ns_uri); + +/** + * xmlXPathFuncLookupFunc: + * @ctxt: an XPath context + * @name: name of the function + * @ns_uri: the namespace name hosting this function + * + * Prototype for callbacks used to plug function lookup in the XPath + * engine. + * + * Returns the XPath function or NULL if not found. + */ +typedef xmlXPathFunction (*xmlXPathFuncLookupFunc) (void *ctxt, + const xmlChar *name, + const xmlChar *ns_uri); + +/** + * xmlXPathFlags: + * Flags for XPath engine compilation and runtime + */ +/** + * XML_XPATH_CHECKNS: + * + * check namespaces at compilation + */ +#define XML_XPATH_CHECKNS (1<<0) +/** + * XML_XPATH_NOVAR: + * + * forbid variables in expression + */ +#define XML_XPATH_NOVAR (1<<1) + +/** + * xmlXPathContext: + * + * Expression evaluation occurs with respect to a context. + * he context consists of: + * - a node (the context node) + * - a node list (the context node list) + * - a set of variable bindings + * - a function library + * - the set of namespace declarations in scope for the expression + * Following the switch to hash tables, this need to be trimmed up at + * the next binary incompatible release. + * The node may be modified when the context is passed to libxml2 + * for an XPath evaluation so you may need to initialize it again + * before the next call. + */ + +struct _xmlXPathContext { + xmlDocPtr doc; /* The current document */ + xmlNodePtr node; /* The current node */ + + int nb_variables_unused; /* unused (hash table) */ + int max_variables_unused; /* unused (hash table) */ + xmlHashTablePtr varHash; /* Hash table of defined variables */ + + int nb_types; /* number of defined types */ + int max_types; /* max number of types */ + xmlXPathTypePtr types; /* Array of defined types */ + + int nb_funcs_unused; /* unused (hash table) */ + int max_funcs_unused; /* unused (hash table) */ + xmlHashTablePtr funcHash; /* Hash table of defined funcs */ + + int nb_axis; /* number of defined axis */ + int max_axis; /* max number of axis */ + xmlXPathAxisPtr axis; /* Array of defined axis */ + + /* the namespace nodes of the context node */ + xmlNsPtr *namespaces; /* Array of namespaces */ + int nsNr; /* number of namespace in scope */ + void *user; /* function to free */ + + /* extra variables */ + int contextSize; /* the context size */ + int proximityPosition; /* the proximity position */ + + /* extra stuff for XPointer */ + int xptr; /* is this an XPointer context? */ + xmlNodePtr here; /* for here() */ + xmlNodePtr origin; /* for origin() */ + + /* the set of namespace declarations in scope for the expression */ + xmlHashTablePtr nsHash; /* The namespaces hash table */ + xmlXPathVariableLookupFunc varLookupFunc;/* variable lookup func */ + void *varLookupData; /* variable lookup data */ + + /* Possibility to link in an extra item */ + void *extra; /* needed for XSLT */ + + /* The function name and URI when calling a function */ + const xmlChar *function; + const xmlChar *functionURI; + + /* function lookup function and data */ + xmlXPathFuncLookupFunc funcLookupFunc;/* function lookup func */ + void *funcLookupData; /* function lookup data */ + + /* temporary namespace lists kept for walking the namespace axis */ + xmlNsPtr *tmpNsList; /* Array of namespaces */ + int tmpNsNr; /* number of namespaces in scope */ + + /* error reporting mechanism */ + void *userData; /* user specific data block */ + xmlStructuredErrorFunc error; /* the callback in case of errors */ + xmlError lastError; /* the last error */ + xmlNodePtr debugNode; /* the source node XSLT */ + + /* dictionary */ + xmlDictPtr dict; /* dictionary if any */ + + int flags; /* flags to control compilation */ + + /* Cache for reusal of XPath objects */ + void *cache; + + /* Resource limits */ + unsigned long opLimit; + unsigned long opCount; + int depth; +}; + +/* + * The structure of a compiled expression form is not public. + */ + +typedef struct _xmlXPathCompExpr xmlXPathCompExpr; +typedef xmlXPathCompExpr *xmlXPathCompExprPtr; + +/** + * xmlXPathParserContext: + * + * An XPath parser context. It contains pure parsing information, + * an xmlXPathContext, and the stack of objects. + */ +struct _xmlXPathParserContext { + const xmlChar *cur; /* the current char being parsed */ + const xmlChar *base; /* the full expression */ + + int error; /* error code */ + + xmlXPathContextPtr context; /* the evaluation context */ + xmlXPathObjectPtr value; /* the current value */ + int valueNr; /* number of values stacked */ + int valueMax; /* max number of values stacked */ + xmlXPathObjectPtr *valueTab; /* stack of values */ + + xmlXPathCompExprPtr comp; /* the precompiled expression */ + int xptr; /* it this an XPointer expression */ + xmlNodePtr ancestor; /* used for walking preceding axis */ + + int valueFrame; /* always zero for compatibility */ +}; + +/************************************************************************ + * * + * Public API * + * * + ************************************************************************/ + +/** + * Objects and Nodesets handling + */ + +XMLPUBVAR double xmlXPathNAN; +XMLPUBVAR double xmlXPathPINF; +XMLPUBVAR double xmlXPathNINF; + +/* These macros may later turn into functions */ +/** + * xmlXPathNodeSetGetLength: + * @ns: a node-set + * + * Implement a functionality similar to the DOM NodeList.length. + * + * Returns the number of nodes in the node-set. + */ +#define xmlXPathNodeSetGetLength(ns) ((ns) ? (ns)->nodeNr : 0) +/** + * xmlXPathNodeSetItem: + * @ns: a node-set + * @index: index of a node in the set + * + * Implements a functionality similar to the DOM NodeList.item(). + * + * Returns the xmlNodePtr at the given @index in @ns or NULL if + * @index is out of range (0 to length-1) + */ +#define xmlXPathNodeSetItem(ns, index) \ + ((((ns) != NULL) && \ + ((index) >= 0) && ((index) < (ns)->nodeNr)) ? \ + (ns)->nodeTab[(index)] \ + : NULL) +/** + * xmlXPathNodeSetIsEmpty: + * @ns: a node-set + * + * Checks whether @ns is empty or not. + * + * Returns %TRUE if @ns is an empty node-set. + */ +#define xmlXPathNodeSetIsEmpty(ns) \ + (((ns) == NULL) || ((ns)->nodeNr == 0) || ((ns)->nodeTab == NULL)) + + +XMLPUBFUN void + xmlXPathFreeObject (xmlXPathObjectPtr obj); +XMLPUBFUN xmlNodeSetPtr + xmlXPathNodeSetCreate (xmlNodePtr val); +XMLPUBFUN void + xmlXPathFreeNodeSetList (xmlXPathObjectPtr obj); +XMLPUBFUN void + xmlXPathFreeNodeSet (xmlNodeSetPtr obj); +XMLPUBFUN xmlXPathObjectPtr + xmlXPathObjectCopy (xmlXPathObjectPtr val); +XMLPUBFUN int + xmlXPathCmpNodes (xmlNodePtr node1, + xmlNodePtr node2); +/** + * Conversion functions to basic types. + */ +XMLPUBFUN int + xmlXPathCastNumberToBoolean (double val); +XMLPUBFUN int + xmlXPathCastStringToBoolean (const xmlChar * val); +XMLPUBFUN int + xmlXPathCastNodeSetToBoolean(xmlNodeSetPtr ns); +XMLPUBFUN int + xmlXPathCastToBoolean (xmlXPathObjectPtr val); + +XMLPUBFUN double + xmlXPathCastBooleanToNumber (int val); +XMLPUBFUN double + xmlXPathCastStringToNumber (const xmlChar * val); +XMLPUBFUN double + xmlXPathCastNodeToNumber (xmlNodePtr node); +XMLPUBFUN double + xmlXPathCastNodeSetToNumber (xmlNodeSetPtr ns); +XMLPUBFUN double + xmlXPathCastToNumber (xmlXPathObjectPtr val); + +XMLPUBFUN xmlChar * + xmlXPathCastBooleanToString (int val); +XMLPUBFUN xmlChar * + xmlXPathCastNumberToString (double val); +XMLPUBFUN xmlChar * + xmlXPathCastNodeToString (xmlNodePtr node); +XMLPUBFUN xmlChar * + xmlXPathCastNodeSetToString (xmlNodeSetPtr ns); +XMLPUBFUN xmlChar * + xmlXPathCastToString (xmlXPathObjectPtr val); + +XMLPUBFUN xmlXPathObjectPtr + xmlXPathConvertBoolean (xmlXPathObjectPtr val); +XMLPUBFUN xmlXPathObjectPtr + xmlXPathConvertNumber (xmlXPathObjectPtr val); +XMLPUBFUN xmlXPathObjectPtr + xmlXPathConvertString (xmlXPathObjectPtr val); + +/** + * Context handling. + */ +XMLPUBFUN xmlXPathContextPtr + xmlXPathNewContext (xmlDocPtr doc); +XMLPUBFUN void + xmlXPathFreeContext (xmlXPathContextPtr ctxt); +XMLPUBFUN int + xmlXPathContextSetCache(xmlXPathContextPtr ctxt, + int active, + int value, + int options); +/** + * Evaluation functions. + */ +XMLPUBFUN long + xmlXPathOrderDocElems (xmlDocPtr doc); +XMLPUBFUN int + xmlXPathSetContextNode (xmlNodePtr node, + xmlXPathContextPtr ctx); +XMLPUBFUN xmlXPathObjectPtr + xmlXPathNodeEval (xmlNodePtr node, + const xmlChar *str, + xmlXPathContextPtr ctx); +XMLPUBFUN xmlXPathObjectPtr + xmlXPathEval (const xmlChar *str, + xmlXPathContextPtr ctx); +XMLPUBFUN xmlXPathObjectPtr + xmlXPathEvalExpression (const xmlChar *str, + xmlXPathContextPtr ctxt); +XMLPUBFUN int + xmlXPathEvalPredicate (xmlXPathContextPtr ctxt, + xmlXPathObjectPtr res); +/** + * Separate compilation/evaluation entry points. + */ +XMLPUBFUN xmlXPathCompExprPtr + xmlXPathCompile (const xmlChar *str); +XMLPUBFUN xmlXPathCompExprPtr + xmlXPathCtxtCompile (xmlXPathContextPtr ctxt, + const xmlChar *str); +XMLPUBFUN xmlXPathObjectPtr + xmlXPathCompiledEval (xmlXPathCompExprPtr comp, + xmlXPathContextPtr ctx); +XMLPUBFUN int + xmlXPathCompiledEvalToBoolean(xmlXPathCompExprPtr comp, + xmlXPathContextPtr ctxt); +XMLPUBFUN void + xmlXPathFreeCompExpr (xmlXPathCompExprPtr comp); +#endif /* LIBXML_XPATH_ENABLED */ +#if defined(LIBXML_XPATH_ENABLED) || defined(LIBXML_SCHEMAS_ENABLED) +XML_DEPRECATED +XMLPUBFUN void + xmlXPathInit (void); +XMLPUBFUN int + xmlXPathIsNaN (double val); +XMLPUBFUN int + xmlXPathIsInf (double val); + +#ifdef __cplusplus +} +#endif + +#endif /* LIBXML_XPATH_ENABLED or LIBXML_SCHEMAS_ENABLED*/ +#endif /* ! __XML_XPATH_H__ */ diff --git a/env-llmeval/lib/python3.10/site-packages/lxml/includes/libxslt/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/lxml/includes/libxslt/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cdf81fa630f0bd44fa7c72ae310a6bce3d19f1fa Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/lxml/includes/libxslt/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/lxml/includes/libxslt/preproc.h b/env-llmeval/lib/python3.10/site-packages/lxml/includes/libxslt/preproc.h new file mode 100644 index 0000000000000000000000000000000000000000..2a2fc7e4305e567464874ba6cb7bcba69f85efdf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/lxml/includes/libxslt/preproc.h @@ -0,0 +1,43 @@ +/* + * Summary: precomputing stylesheets + * Description: this is the compilation phase, where most of the + * stylesheet is "compiled" into faster to use data. + * + * Copy: See Copyright for the status of this software. + * + * Author: Daniel Veillard + */ + +#ifndef __XML_XSLT_PRECOMP_H__ +#define __XML_XSLT_PRECOMP_H__ + +#include +#include "xsltexports.h" +#include "xsltInternals.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Interfaces + */ +XSLTPUBVAR const xmlChar *xsltExtMarker; + +XSLTPUBFUN xsltElemPreCompPtr XSLTCALL + xsltDocumentComp (xsltStylesheetPtr style, + xmlNodePtr inst, + xsltTransformFunction function); + +XSLTPUBFUN void XSLTCALL + xsltStylePreCompute (xsltStylesheetPtr style, + xmlNodePtr inst); +XSLTPUBFUN void XSLTCALL + xsltFreeStylePreComps (xsltStylesheetPtr style); + +#ifdef __cplusplus +} +#endif + +#endif /* __XML_XSLT_PRECOMP_H__ */ + diff --git a/env-llmeval/lib/python3.10/site-packages/lxml/includes/libxslt/security.h b/env-llmeval/lib/python3.10/site-packages/lxml/includes/libxslt/security.h new file mode 100644 index 0000000000000000000000000000000000000000..bab5c8c6b213ae0dec3160c08870e5232112cea4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/lxml/includes/libxslt/security.h @@ -0,0 +1,104 @@ +/* + * Summary: interface for the libxslt security framework + * Description: the libxslt security framework allow to restrict + * the access to new resources (file or URL) from + * the stylesheet at runtime. + * + * Copy: See Copyright for the status of this software. + * + * Author: Daniel Veillard + */ + +#ifndef __XML_XSLT_SECURITY_H__ +#define __XML_XSLT_SECURITY_H__ + +#include +#include "xsltexports.h" +#include "xsltInternals.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * xsltSecurityPref: + * + * structure to indicate the preferences for security in the XSLT + * transformation. + */ +typedef struct _xsltSecurityPrefs xsltSecurityPrefs; +typedef xsltSecurityPrefs *xsltSecurityPrefsPtr; + +/** + * xsltSecurityOption: + * + * the set of option that can be configured + */ +typedef enum { + XSLT_SECPREF_READ_FILE = 1, + XSLT_SECPREF_WRITE_FILE, + XSLT_SECPREF_CREATE_DIRECTORY, + XSLT_SECPREF_READ_NETWORK, + XSLT_SECPREF_WRITE_NETWORK +} xsltSecurityOption; + +/** + * xsltSecurityCheck: + * + * User provided function to check the value of a string like a file + * path or an URL ... + */ +typedef int (*xsltSecurityCheck) (xsltSecurityPrefsPtr sec, + xsltTransformContextPtr ctxt, + const char *value); + +/* + * Module interfaces + */ +XSLTPUBFUN xsltSecurityPrefsPtr XSLTCALL + xsltNewSecurityPrefs (void); +XSLTPUBFUN void XSLTCALL + xsltFreeSecurityPrefs (xsltSecurityPrefsPtr sec); +XSLTPUBFUN int XSLTCALL + xsltSetSecurityPrefs (xsltSecurityPrefsPtr sec, + xsltSecurityOption option, + xsltSecurityCheck func); +XSLTPUBFUN xsltSecurityCheck XSLTCALL + xsltGetSecurityPrefs (xsltSecurityPrefsPtr sec, + xsltSecurityOption option); + +XSLTPUBFUN void XSLTCALL + xsltSetDefaultSecurityPrefs (xsltSecurityPrefsPtr sec); +XSLTPUBFUN xsltSecurityPrefsPtr XSLTCALL + xsltGetDefaultSecurityPrefs (void); + +XSLTPUBFUN int XSLTCALL + xsltSetCtxtSecurityPrefs (xsltSecurityPrefsPtr sec, + xsltTransformContextPtr ctxt); + +XSLTPUBFUN int XSLTCALL + xsltSecurityAllow (xsltSecurityPrefsPtr sec, + xsltTransformContextPtr ctxt, + const char *value); +XSLTPUBFUN int XSLTCALL + xsltSecurityForbid (xsltSecurityPrefsPtr sec, + xsltTransformContextPtr ctxt, + const char *value); +/* + * internal interfaces + */ +XSLTPUBFUN int XSLTCALL + xsltCheckWrite (xsltSecurityPrefsPtr sec, + xsltTransformContextPtr ctxt, + const xmlChar *URL); +XSLTPUBFUN int XSLTCALL + xsltCheckRead (xsltSecurityPrefsPtr sec, + xsltTransformContextPtr ctxt, + const xmlChar *URL); + +#ifdef __cplusplus +} +#endif + +#endif /* __XML_XSLT_SECURITY_H__ */ + diff --git a/env-llmeval/lib/python3.10/site-packages/lxml/includes/libxslt/transform.h b/env-llmeval/lib/python3.10/site-packages/lxml/includes/libxslt/transform.h new file mode 100644 index 0000000000000000000000000000000000000000..5a6f79591079a8ed59a3003936279acb504378fc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/lxml/includes/libxslt/transform.h @@ -0,0 +1,207 @@ +/* + * Summary: the XSLT engine transformation part. + * Description: This module implements the bulk of the actual + * transformation processing. Most of the xsl: element + * constructs are implemented in this module. + * + * Copy: See Copyright for the status of this software. + * + * Author: Daniel Veillard + */ + +#ifndef __XML_XSLT_TRANSFORM_H__ +#define __XML_XSLT_TRANSFORM_H__ + +#include +#include +#include "xsltexports.h" +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * XInclude default processing. + */ +XSLTPUBFUN void XSLTCALL + xsltSetXIncludeDefault (int xinclude); +XSLTPUBFUN int XSLTCALL + xsltGetXIncludeDefault (void); + +/** + * Export context to users. + */ +XSLTPUBFUN xsltTransformContextPtr XSLTCALL + xsltNewTransformContext (xsltStylesheetPtr style, + xmlDocPtr doc); + +XSLTPUBFUN void XSLTCALL + xsltFreeTransformContext(xsltTransformContextPtr ctxt); + +XSLTPUBFUN xmlDocPtr XSLTCALL + xsltApplyStylesheetUser (xsltStylesheetPtr style, + xmlDocPtr doc, + const char **params, + const char *output, + FILE * profile, + xsltTransformContextPtr userCtxt); +XSLTPUBFUN void XSLTCALL + xsltProcessOneNode (xsltTransformContextPtr ctxt, + xmlNodePtr node, + xsltStackElemPtr params); +/** + * Private Interfaces. + */ +XSLTPUBFUN void XSLTCALL + xsltApplyStripSpaces (xsltTransformContextPtr ctxt, + xmlNodePtr node); +XSLTPUBFUN xmlDocPtr XSLTCALL + xsltApplyStylesheet (xsltStylesheetPtr style, + xmlDocPtr doc, + const char **params); +XSLTPUBFUN xmlDocPtr XSLTCALL + xsltProfileStylesheet (xsltStylesheetPtr style, + xmlDocPtr doc, + const char **params, + FILE * output); +XSLTPUBFUN int XSLTCALL + xsltRunStylesheet (xsltStylesheetPtr style, + xmlDocPtr doc, + const char **params, + const char *output, + xmlSAXHandlerPtr SAX, + xmlOutputBufferPtr IObuf); +XSLTPUBFUN int XSLTCALL + xsltRunStylesheetUser (xsltStylesheetPtr style, + xmlDocPtr doc, + const char **params, + const char *output, + xmlSAXHandlerPtr SAX, + xmlOutputBufferPtr IObuf, + FILE * profile, + xsltTransformContextPtr userCtxt); +XSLTPUBFUN void XSLTCALL + xsltApplyOneTemplate (xsltTransformContextPtr ctxt, + xmlNodePtr node, + xmlNodePtr list, + xsltTemplatePtr templ, + xsltStackElemPtr params); +XSLTPUBFUN void XSLTCALL + xsltDocumentElem (xsltTransformContextPtr ctxt, + xmlNodePtr node, + xmlNodePtr inst, + xsltElemPreCompPtr comp); +XSLTPUBFUN void XSLTCALL + xsltSort (xsltTransformContextPtr ctxt, + xmlNodePtr node, + xmlNodePtr inst, + xsltElemPreCompPtr comp); +XSLTPUBFUN void XSLTCALL + xsltCopy (xsltTransformContextPtr ctxt, + xmlNodePtr node, + xmlNodePtr inst, + xsltElemPreCompPtr comp); +XSLTPUBFUN void XSLTCALL + xsltText (xsltTransformContextPtr ctxt, + xmlNodePtr node, + xmlNodePtr inst, + xsltElemPreCompPtr comp); +XSLTPUBFUN void XSLTCALL + xsltElement (xsltTransformContextPtr ctxt, + xmlNodePtr node, + xmlNodePtr inst, + xsltElemPreCompPtr comp); +XSLTPUBFUN void XSLTCALL + xsltComment (xsltTransformContextPtr ctxt, + xmlNodePtr node, + xmlNodePtr inst, + xsltElemPreCompPtr comp); +XSLTPUBFUN void XSLTCALL + xsltAttribute (xsltTransformContextPtr ctxt, + xmlNodePtr node, + xmlNodePtr inst, + xsltElemPreCompPtr comp); +XSLTPUBFUN void XSLTCALL + xsltProcessingInstruction(xsltTransformContextPtr ctxt, + xmlNodePtr node, + xmlNodePtr inst, + xsltElemPreCompPtr comp); +XSLTPUBFUN void XSLTCALL + xsltCopyOf (xsltTransformContextPtr ctxt, + xmlNodePtr node, + xmlNodePtr inst, + xsltElemPreCompPtr comp); +XSLTPUBFUN void XSLTCALL + xsltValueOf (xsltTransformContextPtr ctxt, + xmlNodePtr node, + xmlNodePtr inst, + xsltElemPreCompPtr comp); +XSLTPUBFUN void XSLTCALL + xsltNumber (xsltTransformContextPtr ctxt, + xmlNodePtr node, + xmlNodePtr inst, + xsltElemPreCompPtr comp); +XSLTPUBFUN void XSLTCALL + xsltApplyImports (xsltTransformContextPtr ctxt, + xmlNodePtr node, + xmlNodePtr inst, + xsltElemPreCompPtr comp); +XSLTPUBFUN void XSLTCALL + xsltCallTemplate (xsltTransformContextPtr ctxt, + xmlNodePtr node, + xmlNodePtr inst, + xsltElemPreCompPtr comp); +XSLTPUBFUN void XSLTCALL + xsltApplyTemplates (xsltTransformContextPtr ctxt, + xmlNodePtr node, + xmlNodePtr inst, + xsltElemPreCompPtr comp); +XSLTPUBFUN void XSLTCALL + xsltChoose (xsltTransformContextPtr ctxt, + xmlNodePtr node, + xmlNodePtr inst, + xsltElemPreCompPtr comp); +XSLTPUBFUN void XSLTCALL + xsltIf (xsltTransformContextPtr ctxt, + xmlNodePtr node, + xmlNodePtr inst, + xsltElemPreCompPtr comp); +XSLTPUBFUN void XSLTCALL + xsltForEach (xsltTransformContextPtr ctxt, + xmlNodePtr node, + xmlNodePtr inst, + xsltElemPreCompPtr comp); +XSLTPUBFUN void XSLTCALL + xsltRegisterAllElement (xsltTransformContextPtr ctxt); + +XSLTPUBFUN xmlNodePtr XSLTCALL + xsltCopyTextString (xsltTransformContextPtr ctxt, + xmlNodePtr target, + const xmlChar *string, + int noescape); + +/* Following 2 functions needed for libexslt/functions.c */ +XSLTPUBFUN void XSLTCALL + xsltLocalVariablePop (xsltTransformContextPtr ctxt, + int limitNr, + int level); +XSLTPUBFUN int XSLTCALL + xsltLocalVariablePush (xsltTransformContextPtr ctxt, + xsltStackElemPtr variable, + int level); +/* + * Hook for the debugger if activated. + */ +XSLTPUBFUN void XSLTCALL + xslHandleDebugger (xmlNodePtr cur, + xmlNodePtr node, + xsltTemplatePtr templ, + xsltTransformContextPtr ctxt); + +#ifdef __cplusplus +} +#endif + +#endif /* __XML_XSLT_TRANSFORM_H__ */ + diff --git a/env-llmeval/lib/python3.10/site-packages/lxml/includes/libxslt/variables.h b/env-llmeval/lib/python3.10/site-packages/lxml/includes/libxslt/variables.h new file mode 100644 index 0000000000000000000000000000000000000000..e2adee0f7551c6b80e2b76ded49f0b51e405f546 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/lxml/includes/libxslt/variables.h @@ -0,0 +1,118 @@ +/* + * Summary: interface for the variable matching and lookup. + * Description: interface for the variable matching and lookup. + * + * Copy: See Copyright for the status of this software. + * + * Author: Daniel Veillard + */ + +#ifndef __XML_XSLT_VARIABLES_H__ +#define __XML_XSLT_VARIABLES_H__ + +#include +#include +#include "xsltexports.h" +#include "xsltInternals.h" +#include "functions.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +/** + * XSLT_REGISTER_VARIABLE_LOOKUP: + * + * Registering macro, not general purpose at all but used in different modules. + */ + +#define XSLT_REGISTER_VARIABLE_LOOKUP(ctxt) \ + xmlXPathRegisterVariableLookup((ctxt)->xpathCtxt, \ + xsltXPathVariableLookup, (void *)(ctxt)); \ + xsltRegisterAllFunctions((ctxt)->xpathCtxt); \ + xsltRegisterAllElement(ctxt); \ + (ctxt)->xpathCtxt->extra = ctxt + +/* + * Flags for memory management of RVTs + */ + +/** + * XSLT_RVT_LOCAL: + * + * RVT is destroyed after the current instructions ends. + */ +#define XSLT_RVT_LOCAL 1 + +/** + * XSLT_RVT_FUNC_RESULT: + * + * RVT is part of results returned with func:result. The RVT won't be + * destroyed after exiting a template and will be reset to XSLT_RVT_LOCAL or + * XSLT_RVT_VARIABLE in the template that receives the return value. + */ +#define XSLT_RVT_FUNC_RESULT 2 + +/** + * XSLT_RVT_GLOBAL: + * + * RVT is part of a global variable. + */ +#define XSLT_RVT_GLOBAL 3 + +/* + * Interfaces for the variable module. + */ + +XSLTPUBFUN int XSLTCALL + xsltEvalGlobalVariables (xsltTransformContextPtr ctxt); +XSLTPUBFUN int XSLTCALL + xsltEvalUserParams (xsltTransformContextPtr ctxt, + const char **params); +XSLTPUBFUN int XSLTCALL + xsltQuoteUserParams (xsltTransformContextPtr ctxt, + const char **params); +XSLTPUBFUN int XSLTCALL + xsltEvalOneUserParam (xsltTransformContextPtr ctxt, + const xmlChar * name, + const xmlChar * value); +XSLTPUBFUN int XSLTCALL + xsltQuoteOneUserParam (xsltTransformContextPtr ctxt, + const xmlChar * name, + const xmlChar * value); + +XSLTPUBFUN void XSLTCALL + xsltParseGlobalVariable (xsltStylesheetPtr style, + xmlNodePtr cur); +XSLTPUBFUN void XSLTCALL + xsltParseGlobalParam (xsltStylesheetPtr style, + xmlNodePtr cur); +XSLTPUBFUN void XSLTCALL + xsltParseStylesheetVariable (xsltTransformContextPtr ctxt, + xmlNodePtr cur); +XSLTPUBFUN void XSLTCALL + xsltParseStylesheetParam (xsltTransformContextPtr ctxt, + xmlNodePtr cur); +XSLTPUBFUN xsltStackElemPtr XSLTCALL + xsltParseStylesheetCallerParam (xsltTransformContextPtr ctxt, + xmlNodePtr cur); +XSLTPUBFUN int XSLTCALL + xsltAddStackElemList (xsltTransformContextPtr ctxt, + xsltStackElemPtr elems); +XSLTPUBFUN void XSLTCALL + xsltFreeGlobalVariables (xsltTransformContextPtr ctxt); +XSLTPUBFUN xmlXPathObjectPtr XSLTCALL + xsltVariableLookup (xsltTransformContextPtr ctxt, + const xmlChar *name, + const xmlChar *ns_uri); +XSLTPUBFUN xmlXPathObjectPtr XSLTCALL + xsltXPathVariableLookup (void *ctxt, + const xmlChar *name, + const xmlChar *ns_uri); +#ifdef __cplusplus +} +#endif + +#endif /* __XML_XSLT_VARIABLES_H__ */ + diff --git a/env-llmeval/lib/python3.10/site-packages/lxml/includes/libxslt/xsltInternals.h b/env-llmeval/lib/python3.10/site-packages/lxml/includes/libxslt/xsltInternals.h new file mode 100644 index 0000000000000000000000000000000000000000..7de638e143dd17762113e024b831a9b70a510f71 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/lxml/includes/libxslt/xsltInternals.h @@ -0,0 +1,1992 @@ +/* + * Summary: internal data structures, constants and functions + * Description: Internal data structures, constants and functions used + * by the XSLT engine. + * They are not part of the API or ABI, i.e. they can change + * without prior notice, use carefully. + * + * Copy: See Copyright for the status of this software. + * + * Author: Daniel Veillard + */ + +#ifndef __XML_XSLT_INTERNALS_H__ +#define __XML_XSLT_INTERNALS_H__ + +#include +#include +#include +#include +#include +#include +#include +#include "xsltexports.h" +#include "numbersInternals.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* #define XSLT_DEBUG_PROFILE_CACHE */ + +/** + * XSLT_IS_TEXT_NODE: + * + * check if the argument is a text node + */ +#define XSLT_IS_TEXT_NODE(n) ((n != NULL) && \ + (((n)->type == XML_TEXT_NODE) || \ + ((n)->type == XML_CDATA_SECTION_NODE))) + + +/** + * XSLT_MARK_RES_TREE_FRAG: + * + * internal macro to set up tree fragments + */ +#define XSLT_MARK_RES_TREE_FRAG(n) \ + (n)->name = (char *) xmlStrdup(BAD_CAST " fake node libxslt"); + +/** + * XSLT_IS_RES_TREE_FRAG: + * + * internal macro to test tree fragments + */ +#define XSLT_IS_RES_TREE_FRAG(n) \ + ((n != NULL) && ((n)->type == XML_DOCUMENT_NODE) && \ + ((n)->name != NULL) && ((n)->name[0] == ' ')) + +/** + * XSLT_REFACTORED_KEYCOMP: + * + * Internal define to enable on-demand xsl:key computation. + * That's the only mode now but the define is kept for compatibility + */ +#define XSLT_REFACTORED_KEYCOMP + +/** + * XSLT_FAST_IF: + * + * Internal define to enable usage of xmlXPathCompiledEvalToBoolean() + * for XSLT "tests"; e.g. in + */ +#define XSLT_FAST_IF + +/** + * XSLT_REFACTORED: + * + * Internal define to enable the refactored parts of Libxslt. + */ +/* #define XSLT_REFACTORED */ +/* ==================================================================== */ + +/** + * XSLT_REFACTORED_VARS: + * + * Internal define to enable the refactored variable part of libxslt + */ +#define XSLT_REFACTORED_VARS + +#ifdef XSLT_REFACTORED + +extern const xmlChar *xsltXSLTAttrMarker; + + +/* TODO: REMOVE: #define XSLT_REFACTORED_EXCLRESNS */ + +/* TODO: REMOVE: #define XSLT_REFACTORED_NSALIAS */ + +/** + * XSLT_REFACTORED_XSLT_NSCOMP + * + * Internal define to enable the pointer-comparison of + * namespaces of XSLT elements. + */ +/* #define XSLT_REFACTORED_XSLT_NSCOMP */ + +#ifdef XSLT_REFACTORED_XSLT_NSCOMP + +extern const xmlChar *xsltConstNamespaceNameXSLT; + +/** + * IS_XSLT_ELEM_FAST: + * + * quick test to detect XSLT elements + */ +#define IS_XSLT_ELEM_FAST(n) \ + (((n) != NULL) && ((n)->ns != NULL) && \ + ((n)->ns->href == xsltConstNamespaceNameXSLT)) + +/** + * IS_XSLT_ATTR_FAST: + * + * quick test to detect XSLT attributes + */ +#define IS_XSLT_ATTR_FAST(a) \ + (((a) != NULL) && ((a)->ns != NULL) && \ + ((a)->ns->href == xsltConstNamespaceNameXSLT)) + +/** + * XSLT_HAS_INTERNAL_NSMAP: + * + * check for namespace mapping + */ +#define XSLT_HAS_INTERNAL_NSMAP(s) \ + (((s) != NULL) && ((s)->principal) && \ + ((s)->principal->principalData) && \ + ((s)->principal->principalData->nsMap)) + +/** + * XSLT_GET_INTERNAL_NSMAP: + * + * get pointer to namespace map + */ +#define XSLT_GET_INTERNAL_NSMAP(s) ((s)->principal->principalData->nsMap) + +#else /* XSLT_REFACTORED_XSLT_NSCOMP */ + +/** + * IS_XSLT_ELEM_FAST: + * + * quick check whether this is an xslt element + */ +#define IS_XSLT_ELEM_FAST(n) \ + (((n) != NULL) && ((n)->ns != NULL) && \ + (xmlStrEqual((n)->ns->href, XSLT_NAMESPACE))) + +/** + * IS_XSLT_ATTR_FAST: + * + * quick check for xslt namespace attribute + */ +#define IS_XSLT_ATTR_FAST(a) \ + (((a) != NULL) && ((a)->ns != NULL) && \ + (xmlStrEqual((a)->ns->href, XSLT_NAMESPACE))) + + +#endif /* XSLT_REFACTORED_XSLT_NSCOMP */ + + +/** + * XSLT_REFACTORED_MANDATORY_VERSION: + * + * TODO: Currently disabled to surpress regression test failures, since + * the old behaviour was that a missing version attribute + * produced a only a warning and not an error, which was incerrect. + * So the regression tests need to be fixed if this is enabled. + */ +/* #define XSLT_REFACTORED_MANDATORY_VERSION */ + +/** + * xsltPointerList: + * + * Pointer-list for various purposes. + */ +typedef struct _xsltPointerList xsltPointerList; +typedef xsltPointerList *xsltPointerListPtr; +struct _xsltPointerList { + void **items; + int number; + int size; +}; + +#endif + +/** + * XSLT_REFACTORED_PARSING: + * + * Internal define to enable the refactored parts of Libxslt + * related to parsing. + */ +/* #define XSLT_REFACTORED_PARSING */ + +/** + * XSLT_MAX_SORT: + * + * Max number of specified xsl:sort on an element. + */ +#define XSLT_MAX_SORT 15 + +/** + * XSLT_PAT_NO_PRIORITY: + * + * Specific value for pattern without priority expressed. + */ +#define XSLT_PAT_NO_PRIORITY -12345789 + +/** + * xsltRuntimeExtra: + * + * Extra information added to the transformation context. + */ +typedef struct _xsltRuntimeExtra xsltRuntimeExtra; +typedef xsltRuntimeExtra *xsltRuntimeExtraPtr; +struct _xsltRuntimeExtra { + void *info; /* pointer to the extra data */ + xmlFreeFunc deallocate; /* pointer to the deallocation routine */ + union { /* dual-purpose field */ + void *ptr; /* data not needing deallocation */ + int ival; /* integer value storage */ + } val; +}; + +/** + * XSLT_RUNTIME_EXTRA_LST: + * @ctxt: the transformation context + * @nr: the index + * + * Macro used to access extra information stored in the context + */ +#define XSLT_RUNTIME_EXTRA_LST(ctxt, nr) (ctxt)->extras[(nr)].info +/** + * XSLT_RUNTIME_EXTRA_FREE: + * @ctxt: the transformation context + * @nr: the index + * + * Macro used to free extra information stored in the context + */ +#define XSLT_RUNTIME_EXTRA_FREE(ctxt, nr) (ctxt)->extras[(nr)].deallocate +/** + * XSLT_RUNTIME_EXTRA: + * @ctxt: the transformation context + * @nr: the index + * + * Macro used to define extra information stored in the context + */ +#define XSLT_RUNTIME_EXTRA(ctxt, nr, typ) (ctxt)->extras[(nr)].val.typ + +/** + * xsltTemplate: + * + * The in-memory structure corresponding to an XSLT Template. + */ +typedef struct _xsltTemplate xsltTemplate; +typedef xsltTemplate *xsltTemplatePtr; +struct _xsltTemplate { + struct _xsltTemplate *next;/* chained list sorted by priority */ + struct _xsltStylesheet *style;/* the containing stylesheet */ + xmlChar *match; /* the matching string */ + float priority; /* as given from the stylesheet, not computed */ + const xmlChar *name; /* the local part of the name QName */ + const xmlChar *nameURI; /* the URI part of the name QName */ + const xmlChar *mode;/* the local part of the mode QName */ + const xmlChar *modeURI;/* the URI part of the mode QName */ + xmlNodePtr content; /* the template replacement value */ + xmlNodePtr elem; /* the source element */ + + /* + * TODO: @inheritedNsNr and @inheritedNs won't be used in the + * refactored code. + */ + int inheritedNsNr; /* number of inherited namespaces */ + xmlNsPtr *inheritedNs;/* inherited non-excluded namespaces */ + + /* Profiling information */ + int nbCalls; /* the number of time the template was called */ + unsigned long time; /* the time spent in this template */ + void *params; /* xsl:param instructions */ + + int templNr; /* Nb of templates in the stack */ + int templMax; /* Size of the templtes stack */ + xsltTemplatePtr *templCalledTab; /* templates called */ + int *templCountTab; /* .. and how often */ + + /* Conflict resolution */ + int position; +}; + +/** + * xsltDecimalFormat: + * + * Data structure of decimal-format. + */ +typedef struct _xsltDecimalFormat xsltDecimalFormat; +typedef xsltDecimalFormat *xsltDecimalFormatPtr; +struct _xsltDecimalFormat { + struct _xsltDecimalFormat *next; /* chained list */ + xmlChar *name; + /* Used for interpretation of pattern */ + xmlChar *digit; + xmlChar *patternSeparator; + /* May appear in result */ + xmlChar *minusSign; + xmlChar *infinity; + xmlChar *noNumber; /* Not-a-number */ + /* Used for interpretation of pattern and may appear in result */ + xmlChar *decimalPoint; + xmlChar *grouping; + xmlChar *percent; + xmlChar *permille; + xmlChar *zeroDigit; + const xmlChar *nsUri; +}; + +/** + * xsltDocument: + * + * Data structure associated to a parsed document. + */ +typedef struct _xsltDocument xsltDocument; +typedef xsltDocument *xsltDocumentPtr; +struct _xsltDocument { + struct _xsltDocument *next; /* documents are kept in a chained list */ + int main; /* is this the main document */ + xmlDocPtr doc; /* the parsed document */ + void *keys; /* key tables storage */ + struct _xsltDocument *includes; /* subsidiary includes */ + int preproc; /* pre-processing already done */ + int nbKeysComputed; +}; + +/** + * xsltKeyDef: + * + * Representation of an xsl:key. + */ +typedef struct _xsltKeyDef xsltKeyDef; +typedef xsltKeyDef *xsltKeyDefPtr; +struct _xsltKeyDef { + struct _xsltKeyDef *next; + xmlNodePtr inst; + xmlChar *name; + xmlChar *nameURI; + xmlChar *match; + xmlChar *use; + xmlXPathCompExprPtr comp; + xmlXPathCompExprPtr usecomp; + xmlNsPtr *nsList; /* the namespaces in scope */ + int nsNr; /* the number of namespaces in scope */ +}; + +/** + * xsltKeyTable: + * + * Holds the computed keys for key definitions of the same QName. + * Is owned by an xsltDocument. + */ +typedef struct _xsltKeyTable xsltKeyTable; +typedef xsltKeyTable *xsltKeyTablePtr; +struct _xsltKeyTable { + struct _xsltKeyTable *next; + xmlChar *name; + xmlChar *nameURI; + xmlHashTablePtr keys; +}; + +/* + * The in-memory structure corresponding to an XSLT Stylesheet. + * NOTE: most of the content is simply linked from the doc tree + * structure, no specific allocation is made. + */ +typedef struct _xsltStylesheet xsltStylesheet; +typedef xsltStylesheet *xsltStylesheetPtr; + +typedef struct _xsltTransformContext xsltTransformContext; +typedef xsltTransformContext *xsltTransformContextPtr; + +/** + * xsltElemPreComp: + * + * The in-memory structure corresponding to element precomputed data, + * designed to be extended by extension implementors. + */ +typedef struct _xsltElemPreComp xsltElemPreComp; +typedef xsltElemPreComp *xsltElemPreCompPtr; + +/** + * xsltTransformFunction: + * @ctxt: the XSLT transformation context + * @node: the input node + * @inst: the stylesheet node + * @comp: the compiled information from the stylesheet + * + * Signature of the function associated to elements part of the + * stylesheet language like xsl:if or xsl:apply-templates. + */ +typedef void (*xsltTransformFunction) (xsltTransformContextPtr ctxt, + xmlNodePtr node, + xmlNodePtr inst, + xsltElemPreCompPtr comp); + +/** + * xsltSortFunc: + * @ctxt: a transformation context + * @sorts: the node-set to sort + * @nbsorts: the number of sorts + * + * Signature of the function to use during sorting + */ +typedef void (*xsltSortFunc) (xsltTransformContextPtr ctxt, xmlNodePtr *sorts, + int nbsorts); + +typedef enum { + XSLT_FUNC_COPY=1, + XSLT_FUNC_SORT, + XSLT_FUNC_TEXT, + XSLT_FUNC_ELEMENT, + XSLT_FUNC_ATTRIBUTE, + XSLT_FUNC_COMMENT, + XSLT_FUNC_PI, + XSLT_FUNC_COPYOF, + XSLT_FUNC_VALUEOF, + XSLT_FUNC_NUMBER, + XSLT_FUNC_APPLYIMPORTS, + XSLT_FUNC_CALLTEMPLATE, + XSLT_FUNC_APPLYTEMPLATES, + XSLT_FUNC_CHOOSE, + XSLT_FUNC_IF, + XSLT_FUNC_FOREACH, + XSLT_FUNC_DOCUMENT, + XSLT_FUNC_WITHPARAM, + XSLT_FUNC_PARAM, + XSLT_FUNC_VARIABLE, + XSLT_FUNC_WHEN, + XSLT_FUNC_EXTENSION +#ifdef XSLT_REFACTORED + , + XSLT_FUNC_OTHERWISE, + XSLT_FUNC_FALLBACK, + XSLT_FUNC_MESSAGE, + XSLT_FUNC_INCLUDE, + XSLT_FUNC_ATTRSET, + XSLT_FUNC_LITERAL_RESULT_ELEMENT, + XSLT_FUNC_UNKOWN_FORWARDS_COMPAT +#endif +} xsltStyleType; + +/** + * xsltElemPreCompDeallocator: + * @comp: the #xsltElemPreComp to free up + * + * Deallocates an #xsltElemPreComp structure. + */ +typedef void (*xsltElemPreCompDeallocator) (xsltElemPreCompPtr comp); + +/** + * xsltElemPreComp: + * + * The basic structure for compiled items of the AST of the XSLT processor. + * This structure is also intended to be extended by extension implementors. + * TODO: This is somehow not nice, since it has a "free" field, which + * derived stylesheet-structs do not have. + */ +struct _xsltElemPreComp { + xsltElemPreCompPtr next; /* next item in the global chained + list held by xsltStylesheet. */ + xsltStyleType type; /* type of the element */ + xsltTransformFunction func; /* handling function */ + xmlNodePtr inst; /* the node in the stylesheet's tree + corresponding to this item */ + + /* end of common part */ + xsltElemPreCompDeallocator free; /* the deallocator */ +}; + +/** + * xsltStylePreComp: + * + * The abstract basic structure for items of the XSLT processor. + * This includes: + * 1) compiled forms of XSLT instructions (xsl:if, xsl:attribute, etc.) + * 2) compiled forms of literal result elements + * 3) compiled forms of extension elements + */ +typedef struct _xsltStylePreComp xsltStylePreComp; +typedef xsltStylePreComp *xsltStylePreCompPtr; + +#ifdef XSLT_REFACTORED + +/* +* Some pointer-list utility functions. +*/ +XSLTPUBFUN xsltPointerListPtr XSLTCALL + xsltPointerListCreate (int initialSize); +XSLTPUBFUN void XSLTCALL + xsltPointerListFree (xsltPointerListPtr list); +XSLTPUBFUN void XSLTCALL + xsltPointerListClear (xsltPointerListPtr list); +XSLTPUBFUN int XSLTCALL + xsltPointerListAddSize (xsltPointerListPtr list, + void *item, + int initialSize); + +/************************************************************************ + * * + * Refactored structures * + * * + ************************************************************************/ + +typedef struct _xsltNsListContainer xsltNsListContainer; +typedef xsltNsListContainer *xsltNsListContainerPtr; +struct _xsltNsListContainer { + xmlNsPtr *list; + int totalNumber; + int xpathNumber; +}; + +/** + * XSLT_ITEM_COMPATIBILITY_FIELDS: + * + * Fields for API compatibility to the structure + * _xsltElemPreComp which is used for extension functions. + * Note that @next is used for storage; it does not reflect a next + * sibling in the tree. + * TODO: Evaluate if we really need such a compatibility. + */ +#define XSLT_ITEM_COMPATIBILITY_FIELDS \ + xsltElemPreCompPtr next;\ + xsltStyleType type;\ + xsltTransformFunction func;\ + xmlNodePtr inst; + +/** + * XSLT_ITEM_NAVIGATION_FIELDS: + * + * Currently empty. + * TODO: It is intended to hold navigational fields in the future. + */ +#define XSLT_ITEM_NAVIGATION_FIELDS +/* + xsltStylePreCompPtr parent;\ + xsltStylePreCompPtr children;\ + xsltStylePreCompPtr nextItem; +*/ + +/** + * XSLT_ITEM_NSINSCOPE_FIELDS: + * + * The in-scope namespaces. + */ +#define XSLT_ITEM_NSINSCOPE_FIELDS xsltNsListContainerPtr inScopeNs; + +/** + * XSLT_ITEM_COMMON_FIELDS: + * + * Common fields used for all items. + */ +#define XSLT_ITEM_COMMON_FIELDS \ + XSLT_ITEM_COMPATIBILITY_FIELDS \ + XSLT_ITEM_NAVIGATION_FIELDS \ + XSLT_ITEM_NSINSCOPE_FIELDS + +/** + * _xsltStylePreComp: + * + * The abstract basic structure for items of the XSLT processor. + * This includes: + * 1) compiled forms of XSLT instructions (e.g. xsl:if, xsl:attribute, etc.) + * 2) compiled forms of literal result elements + * 3) various properties for XSLT instructions (e.g. xsl:when, + * xsl:with-param) + * + * REVISIT TODO: Keep this structure equal to the fields + * defined by XSLT_ITEM_COMMON_FIELDS + */ +struct _xsltStylePreComp { + xsltElemPreCompPtr next; /* next item in the global chained + list held by xsltStylesheet */ + xsltStyleType type; /* type of the item */ + xsltTransformFunction func; /* handling function */ + xmlNodePtr inst; /* the node in the stylesheet's tree + corresponding to this item. */ + /* Currently no navigational fields. */ + xsltNsListContainerPtr inScopeNs; +}; + +/** + * xsltStyleBasicEmptyItem: + * + * Abstract structure only used as a short-cut for + * XSLT items with no extra fields. + * NOTE that it is intended that this structure looks the same as + * _xsltStylePreComp. + */ +typedef struct _xsltStyleBasicEmptyItem xsltStyleBasicEmptyItem; +typedef xsltStyleBasicEmptyItem *xsltStyleBasicEmptyItemPtr; + +struct _xsltStyleBasicEmptyItem { + XSLT_ITEM_COMMON_FIELDS +}; + +/** + * xsltStyleBasicExpressionItem: + * + * Abstract structure only used as a short-cut for + * XSLT items with just an expression. + */ +typedef struct _xsltStyleBasicExpressionItem xsltStyleBasicExpressionItem; +typedef xsltStyleBasicExpressionItem *xsltStyleBasicExpressionItemPtr; + +struct _xsltStyleBasicExpressionItem { + XSLT_ITEM_COMMON_FIELDS + + const xmlChar *select; /* TODO: Change this to "expression". */ + xmlXPathCompExprPtr comp; /* TODO: Change this to compExpr. */ +}; + +/************************************************************************ + * * + * XSLT-instructions/declarations * + * * + ************************************************************************/ + +/** + * xsltStyleItemElement: + * + * + * + * + * + */ +typedef struct _xsltStyleItemElement xsltStyleItemElement; +typedef xsltStyleItemElement *xsltStyleItemElementPtr; + +struct _xsltStyleItemElement { + XSLT_ITEM_COMMON_FIELDS + + const xmlChar *use; + int has_use; + const xmlChar *name; + int has_name; + const xmlChar *ns; + const xmlChar *nsPrefix; + int has_ns; +}; + +/** + * xsltStyleItemAttribute: + * + * + * + * + * + */ +typedef struct _xsltStyleItemAttribute xsltStyleItemAttribute; +typedef xsltStyleItemAttribute *xsltStyleItemAttributePtr; + +struct _xsltStyleItemAttribute { + XSLT_ITEM_COMMON_FIELDS + const xmlChar *name; + int has_name; + const xmlChar *ns; + const xmlChar *nsPrefix; + int has_ns; +}; + +/** + * xsltStyleItemText: + * + * + * + * + * + */ +typedef struct _xsltStyleItemText xsltStyleItemText; +typedef xsltStyleItemText *xsltStyleItemTextPtr; + +struct _xsltStyleItemText { + XSLT_ITEM_COMMON_FIELDS + int noescape; /* text */ +}; + +/** + * xsltStyleItemComment: + * + * + * + * + * + */ +typedef xsltStyleBasicEmptyItem xsltStyleItemComment; +typedef xsltStyleItemComment *xsltStyleItemCommentPtr; + +/** + * xsltStyleItemPI: + * + * + * + * + * + */ +typedef struct _xsltStyleItemPI xsltStyleItemPI; +typedef xsltStyleItemPI *xsltStyleItemPIPtr; + +struct _xsltStyleItemPI { + XSLT_ITEM_COMMON_FIELDS + const xmlChar *name; + int has_name; +}; + +/** + * xsltStyleItemApplyImports: + * + * + * + */ +typedef xsltStyleBasicEmptyItem xsltStyleItemApplyImports; +typedef xsltStyleItemApplyImports *xsltStyleItemApplyImportsPtr; + +/** + * xsltStyleItemApplyTemplates: + * + * + * + * + * + */ +typedef struct _xsltStyleItemApplyTemplates xsltStyleItemApplyTemplates; +typedef xsltStyleItemApplyTemplates *xsltStyleItemApplyTemplatesPtr; + +struct _xsltStyleItemApplyTemplates { + XSLT_ITEM_COMMON_FIELDS + + const xmlChar *mode; /* apply-templates */ + const xmlChar *modeURI; /* apply-templates */ + const xmlChar *select; /* sort, copy-of, value-of, apply-templates */ + xmlXPathCompExprPtr comp; /* a precompiled XPath expression */ + /* TODO: with-params */ +}; + +/** + * xsltStyleItemCallTemplate: + * + * + * + * + * + */ +typedef struct _xsltStyleItemCallTemplate xsltStyleItemCallTemplate; +typedef xsltStyleItemCallTemplate *xsltStyleItemCallTemplatePtr; + +struct _xsltStyleItemCallTemplate { + XSLT_ITEM_COMMON_FIELDS + + xsltTemplatePtr templ; /* call-template */ + const xmlChar *name; /* element, attribute, pi */ + int has_name; /* element, attribute, pi */ + const xmlChar *ns; /* element */ + int has_ns; /* element */ + /* TODO: with-params */ +}; + +/** + * xsltStyleItemCopy: + * + * + * + * + * + */ +typedef struct _xsltStyleItemCopy xsltStyleItemCopy; +typedef xsltStyleItemCopy *xsltStyleItemCopyPtr; + +struct _xsltStyleItemCopy { + XSLT_ITEM_COMMON_FIELDS + const xmlChar *use; /* copy, element */ + int has_use; /* copy, element */ +}; + +/** + * xsltStyleItemIf: + * + * + * + * + * + */ +typedef struct _xsltStyleItemIf xsltStyleItemIf; +typedef xsltStyleItemIf *xsltStyleItemIfPtr; + +struct _xsltStyleItemIf { + XSLT_ITEM_COMMON_FIELDS + + const xmlChar *test; /* if */ + xmlXPathCompExprPtr comp; /* a precompiled XPath expression */ +}; + + +/** + * xsltStyleItemCopyOf: + * + * + * + */ +typedef xsltStyleBasicExpressionItem xsltStyleItemCopyOf; +typedef xsltStyleItemCopyOf *xsltStyleItemCopyOfPtr; + +/** + * xsltStyleItemValueOf: + * + * + * + */ +typedef struct _xsltStyleItemValueOf xsltStyleItemValueOf; +typedef xsltStyleItemValueOf *xsltStyleItemValueOfPtr; + +struct _xsltStyleItemValueOf { + XSLT_ITEM_COMMON_FIELDS + + const xmlChar *select; + xmlXPathCompExprPtr comp; /* a precompiled XPath expression */ + int noescape; +}; + +/** + * xsltStyleItemNumber: + * + * + * + */ +typedef struct _xsltStyleItemNumber xsltStyleItemNumber; +typedef xsltStyleItemNumber *xsltStyleItemNumberPtr; + +struct _xsltStyleItemNumber { + XSLT_ITEM_COMMON_FIELDS + xsltNumberData numdata; /* number */ +}; + +/** + * xsltStyleItemChoose: + * + * + * + * + * + */ +typedef xsltStyleBasicEmptyItem xsltStyleItemChoose; +typedef xsltStyleItemChoose *xsltStyleItemChoosePtr; + +/** + * xsltStyleItemFallback: + * + * + * + * + * + */ +typedef xsltStyleBasicEmptyItem xsltStyleItemFallback; +typedef xsltStyleItemFallback *xsltStyleItemFallbackPtr; + +/** + * xsltStyleItemForEach: + * + * + * + * + * + */ +typedef xsltStyleBasicExpressionItem xsltStyleItemForEach; +typedef xsltStyleItemForEach *xsltStyleItemForEachPtr; + +/** + * xsltStyleItemMessage: + * + * + * + * + * + */ +typedef struct _xsltStyleItemMessage xsltStyleItemMessage; +typedef xsltStyleItemMessage *xsltStyleItemMessagePtr; + +struct _xsltStyleItemMessage { + XSLT_ITEM_COMMON_FIELDS + int terminate; +}; + +/** + * xsltStyleItemDocument: + * + * NOTE: This is not an instruction of XSLT 1.0. + */ +typedef struct _xsltStyleItemDocument xsltStyleItemDocument; +typedef xsltStyleItemDocument *xsltStyleItemDocumentPtr; + +struct _xsltStyleItemDocument { + XSLT_ITEM_COMMON_FIELDS + int ver11; /* assigned: in xsltDocumentComp; + read: nowhere; + TODO: Check if we need. */ + const xmlChar *filename; /* document URL */ + int has_filename; +}; + +/************************************************************************ + * * + * Non-instructions (actually properties of instructions/declarations) * + * * + ************************************************************************/ + +/** + * xsltStyleBasicItemVariable: + * + * Basic struct for xsl:variable, xsl:param and xsl:with-param. + * It's currently important to have equal fields, since + * xsltParseStylesheetCallerParam() is used with xsl:with-param from + * the xslt side and with xsl:param from the exslt side (in + * exsltFuncFunctionFunction()). + * + * FUTURE NOTE: In XSLT 2.0 xsl:param, xsl:variable and xsl:with-param + * have additional different fields. + */ +typedef struct _xsltStyleBasicItemVariable xsltStyleBasicItemVariable; +typedef xsltStyleBasicItemVariable *xsltStyleBasicItemVariablePtr; + +struct _xsltStyleBasicItemVariable { + XSLT_ITEM_COMMON_FIELDS + + const xmlChar *select; + xmlXPathCompExprPtr comp; + + const xmlChar *name; + int has_name; + const xmlChar *ns; + int has_ns; +}; + +/** + * xsltStyleItemVariable: + * + * + * + * + * + */ +typedef xsltStyleBasicItemVariable xsltStyleItemVariable; +typedef xsltStyleItemVariable *xsltStyleItemVariablePtr; + +/** + * xsltStyleItemParam: + * + * + * + * + * + */ +typedef struct _xsltStyleItemParam xsltStyleItemParam; +typedef xsltStyleItemParam *xsltStyleItemParamPtr; + +struct _xsltStyleItemParam { + XSLT_ITEM_COMMON_FIELDS + + const xmlChar *select; + xmlXPathCompExprPtr comp; + + const xmlChar *name; + int has_name; + const xmlChar *ns; + int has_ns; +}; + +/** + * xsltStyleItemWithParam: + * + * + * + * + */ +typedef xsltStyleBasicItemVariable xsltStyleItemWithParam; +typedef xsltStyleItemWithParam *xsltStyleItemWithParamPtr; + +/** + * xsltStyleItemSort: + * + * Reflects the XSLT xsl:sort item. + * Allowed parents: xsl:apply-templates, xsl:for-each + * + */ +typedef struct _xsltStyleItemSort xsltStyleItemSort; +typedef xsltStyleItemSort *xsltStyleItemSortPtr; + +struct _xsltStyleItemSort { + XSLT_ITEM_COMMON_FIELDS + + const xmlChar *stype; /* sort */ + int has_stype; /* sort */ + int number; /* sort */ + const xmlChar *order; /* sort */ + int has_order; /* sort */ + int descending; /* sort */ + const xmlChar *lang; /* sort */ + int has_lang; /* sort */ + const xmlChar *case_order; /* sort */ + int lower_first; /* sort */ + + const xmlChar *use; + int has_use; + + const xmlChar *select; /* sort, copy-of, value-of, apply-templates */ + + xmlXPathCompExprPtr comp; /* a precompiled XPath expression */ +}; + + +/** + * xsltStyleItemWhen: + * + * + * + * + * Allowed parent: xsl:choose + */ +typedef struct _xsltStyleItemWhen xsltStyleItemWhen; +typedef xsltStyleItemWhen *xsltStyleItemWhenPtr; + +struct _xsltStyleItemWhen { + XSLT_ITEM_COMMON_FIELDS + + const xmlChar *test; + xmlXPathCompExprPtr comp; +}; + +/** + * xsltStyleItemOtherwise: + * + * Allowed parent: xsl:choose + * + * + * + */ +typedef struct _xsltStyleItemOtherwise xsltStyleItemOtherwise; +typedef xsltStyleItemOtherwise *xsltStyleItemOtherwisePtr; + +struct _xsltStyleItemOtherwise { + XSLT_ITEM_COMMON_FIELDS +}; + +typedef struct _xsltStyleItemInclude xsltStyleItemInclude; +typedef xsltStyleItemInclude *xsltStyleItemIncludePtr; + +struct _xsltStyleItemInclude { + XSLT_ITEM_COMMON_FIELDS + xsltDocumentPtr include; +}; + +/************************************************************************ + * * + * XSLT elements in forwards-compatible mode * + * * + ************************************************************************/ + +typedef struct _xsltStyleItemUknown xsltStyleItemUknown; +typedef xsltStyleItemUknown *xsltStyleItemUknownPtr; +struct _xsltStyleItemUknown { + XSLT_ITEM_COMMON_FIELDS +}; + + +/************************************************************************ + * * + * Extension elements * + * * + ************************************************************************/ + +/* + * xsltStyleItemExtElement: + * + * Reflects extension elements. + * + * NOTE: Due to the fact that the structure xsltElemPreComp is most + * probably already heavily in use out there by users, so we cannot + * easily change it, we'll create an intermediate structure which will + * hold an xsltElemPreCompPtr. + * BIG NOTE: The only problem I see here is that the user processes the + * content of the stylesheet tree, possibly he'll lookup the node->psvi + * fields in order to find subsequent extension functions. + * In this case, the user's code will break, since the node->psvi + * field will hold now the xsltStyleItemExtElementPtr and not + * the xsltElemPreCompPtr. + * However the place where the structure is anchored in the node-tree, + * namely node->psvi, has beed already once been moved from node->_private + * to node->psvi, so we have a precedent here, which, I think, should allow + * us to change such semantics without headaches. + */ +typedef struct _xsltStyleItemExtElement xsltStyleItemExtElement; +typedef xsltStyleItemExtElement *xsltStyleItemExtElementPtr; +struct _xsltStyleItemExtElement { + XSLT_ITEM_COMMON_FIELDS + xsltElemPreCompPtr item; +}; + +/************************************************************************ + * * + * Literal result elements * + * * + ************************************************************************/ + +typedef struct _xsltEffectiveNs xsltEffectiveNs; +typedef xsltEffectiveNs *xsltEffectiveNsPtr; +struct _xsltEffectiveNs { + xsltEffectiveNsPtr nextInStore; /* storage next */ + xsltEffectiveNsPtr next; /* next item in the list */ + const xmlChar *prefix; + const xmlChar *nsName; + /* + * Indicates if eclared on the literal result element; dunno if really + * needed. + */ + int holdByElem; +}; + +/* + * Info for literal result elements. + * This will be set on the elem->psvi field and will be + * shared by literal result elements, which have the same + * excluded result namespaces; i.e., this *won't* be created uniquely + * for every literal result element. + */ +typedef struct _xsltStyleItemLRElementInfo xsltStyleItemLRElementInfo; +typedef xsltStyleItemLRElementInfo *xsltStyleItemLRElementInfoPtr; +struct _xsltStyleItemLRElementInfo { + XSLT_ITEM_COMMON_FIELDS + /* + * @effectiveNs is the set of effective ns-nodes + * on the literal result element, which will be added to the result + * element if not already existing in the result tree. + * This means that excluded namespaces (via exclude-result-prefixes, + * extension-element-prefixes and the XSLT namespace) not added + * to the set. + * Namespace-aliasing was applied on the @effectiveNs. + */ + xsltEffectiveNsPtr effectiveNs; + +}; + +#ifdef XSLT_REFACTORED + +typedef struct _xsltNsAlias xsltNsAlias; +typedef xsltNsAlias *xsltNsAliasPtr; +struct _xsltNsAlias { + xsltNsAliasPtr next; /* next in the list */ + xmlNsPtr literalNs; + xmlNsPtr targetNs; + xmlDocPtr docOfTargetNs; +}; +#endif + +#ifdef XSLT_REFACTORED_XSLT_NSCOMP + +typedef struct _xsltNsMap xsltNsMap; +typedef xsltNsMap *xsltNsMapPtr; +struct _xsltNsMap { + xsltNsMapPtr next; /* next in the list */ + xmlDocPtr doc; + xmlNodePtr elem; /* the element holding the ns-decl */ + xmlNsPtr ns; /* the xmlNs structure holding the XML namespace name */ + const xmlChar *origNsName; /* the original XML namespace name */ + const xmlChar *newNsName; /* the mapped XML namespace name */ +}; +#endif + +/************************************************************************ + * * + * Compile-time structures for *internal* use only * + * * + ************************************************************************/ + +typedef struct _xsltPrincipalStylesheetData xsltPrincipalStylesheetData; +typedef xsltPrincipalStylesheetData *xsltPrincipalStylesheetDataPtr; + +typedef struct _xsltNsList xsltNsList; +typedef xsltNsList *xsltNsListPtr; +struct _xsltNsList { + xsltNsListPtr next; /* next in the list */ + xmlNsPtr ns; +}; + +/* +* xsltVarInfo: +* +* Used at compilation time for parameters and variables. +*/ +typedef struct _xsltVarInfo xsltVarInfo; +typedef xsltVarInfo *xsltVarInfoPtr; +struct _xsltVarInfo { + xsltVarInfoPtr next; /* next in the list */ + xsltVarInfoPtr prev; + int depth; /* the depth in the tree */ + const xmlChar *name; + const xmlChar *nsName; +}; + +/** + * xsltCompilerNodeInfo: + * + * Per-node information during compile-time. + */ +typedef struct _xsltCompilerNodeInfo xsltCompilerNodeInfo; +typedef xsltCompilerNodeInfo *xsltCompilerNodeInfoPtr; +struct _xsltCompilerNodeInfo { + xsltCompilerNodeInfoPtr next; + xsltCompilerNodeInfoPtr prev; + xmlNodePtr node; + int depth; + xsltTemplatePtr templ; /* The owning template */ + int category; /* XSLT element, LR-element or + extension element */ + xsltStyleType type; + xsltElemPreCompPtr item; /* The compiled information */ + /* The current in-scope namespaces */ + xsltNsListContainerPtr inScopeNs; + /* The current excluded result namespaces */ + xsltPointerListPtr exclResultNs; + /* The current extension instruction namespaces */ + xsltPointerListPtr extElemNs; + + /* The current info for literal result elements. */ + xsltStyleItemLRElementInfoPtr litResElemInfo; + /* + * Set to 1 if in-scope namespaces changed, + * or excluded result namespaces changed, + * or extension element namespaces changed. + * This will trigger creation of new infos + * for literal result elements. + */ + int nsChanged; + int preserveWhitespace; + int stripWhitespace; + int isRoot; /* whether this is the stylesheet's root node */ + int forwardsCompat; /* whether forwards-compatible mode is enabled */ + /* whether the content of an extension element was processed */ + int extContentHandled; + /* the type of the current child */ + xsltStyleType curChildType; +}; + +/** + * XSLT_CCTXT: + * + * get pointer to compiler context + */ +#define XSLT_CCTXT(style) ((xsltCompilerCtxtPtr) style->compCtxt) + +typedef enum { + XSLT_ERROR_SEVERITY_ERROR = 0, + XSLT_ERROR_SEVERITY_WARNING +} xsltErrorSeverityType; + +typedef struct _xsltCompilerCtxt xsltCompilerCtxt; +typedef xsltCompilerCtxt *xsltCompilerCtxtPtr; +struct _xsltCompilerCtxt { + void *errorCtxt; /* user specific error context */ + /* + * used for error/warning reports; e.g. XSLT_ERROR_SEVERITY_WARNING */ + xsltErrorSeverityType errSeverity; + int warnings; /* TODO: number of warnings found at + compilation */ + int errors; /* TODO: number of errors found at + compilation */ + xmlDictPtr dict; + xsltStylesheetPtr style; + int simplified; /* whether this is a simplified stylesheet */ + /* TODO: structured/unstructured error contexts. */ + int depth; /* Current depth of processing */ + + xsltCompilerNodeInfoPtr inode; + xsltCompilerNodeInfoPtr inodeList; + xsltCompilerNodeInfoPtr inodeLast; + xsltPointerListPtr tmpList; /* Used for various purposes */ + /* + * The XSLT version as specified by the stylesheet's root element. + */ + int isInclude; + int hasForwardsCompat; /* whether forwards-compatible mode was used + in a parsing episode */ + int maxNodeInfos; /* TEMP TODO: just for the interest */ + int maxLREs; /* TEMP TODO: just for the interest */ + /* + * In order to keep the old behaviour, applying strict rules of + * the spec can be turned off. This has effect only on special + * mechanisms like whitespace-stripping in the stylesheet. + */ + int strict; + xsltPrincipalStylesheetDataPtr psData; + xsltStyleItemUknownPtr unknownItem; + int hasNsAliases; /* Indicator if there was an xsl:namespace-alias. */ + xsltNsAliasPtr nsAliases; + xsltVarInfoPtr ivars; /* Storage of local in-scope variables/params. */ + xsltVarInfoPtr ivar; /* topmost local variable/param. */ +}; + +#else /* XSLT_REFACTORED */ +/* +* The old structures before refactoring. +*/ + +/** + * _xsltStylePreComp: + * + * The in-memory structure corresponding to XSLT stylesheet constructs + * precomputed data. + */ +struct _xsltStylePreComp { + xsltElemPreCompPtr next; /* chained list */ + xsltStyleType type; /* type of the element */ + xsltTransformFunction func; /* handling function */ + xmlNodePtr inst; /* the instruction */ + + /* + * Pre computed values. + */ + + const xmlChar *stype; /* sort */ + int has_stype; /* sort */ + int number; /* sort */ + const xmlChar *order; /* sort */ + int has_order; /* sort */ + int descending; /* sort */ + const xmlChar *lang; /* sort */ + int has_lang; /* sort */ + const xmlChar *case_order; /* sort */ + int lower_first; /* sort */ + + const xmlChar *use; /* copy, element */ + int has_use; /* copy, element */ + + int noescape; /* text */ + + const xmlChar *name; /* element, attribute, pi */ + int has_name; /* element, attribute, pi */ + const xmlChar *ns; /* element */ + int has_ns; /* element */ + + const xmlChar *mode; /* apply-templates */ + const xmlChar *modeURI; /* apply-templates */ + + const xmlChar *test; /* if */ + + xsltTemplatePtr templ; /* call-template */ + + const xmlChar *select; /* sort, copy-of, value-of, apply-templates */ + + int ver11; /* document */ + const xmlChar *filename; /* document URL */ + int has_filename; /* document */ + + xsltNumberData numdata; /* number */ + + xmlXPathCompExprPtr comp; /* a precompiled XPath expression */ + xmlNsPtr *nsList; /* the namespaces in scope */ + int nsNr; /* the number of namespaces in scope */ +}; + +#endif /* XSLT_REFACTORED */ + + +/* + * The in-memory structure corresponding to an XSLT Variable + * or Param. + */ +typedef struct _xsltStackElem xsltStackElem; +typedef xsltStackElem *xsltStackElemPtr; +struct _xsltStackElem { + struct _xsltStackElem *next;/* chained list */ + xsltStylePreCompPtr comp; /* the compiled form */ + int computed; /* was the evaluation done */ + const xmlChar *name; /* the local part of the name QName */ + const xmlChar *nameURI; /* the URI part of the name QName */ + const xmlChar *select; /* the eval string */ + xmlNodePtr tree; /* the sequence constructor if no eval + string or the location */ + xmlXPathObjectPtr value; /* The value if computed */ + xmlDocPtr fragment; /* The Result Tree Fragments (needed for XSLT 1.0) + which are bound to the variable's lifetime. */ + int level; /* the depth in the tree; + -1 if persistent (e.g. a given xsl:with-param) */ + xsltTransformContextPtr context; /* The transformation context; needed to cache + the variables */ + int flags; +}; + +#ifdef XSLT_REFACTORED + +struct _xsltPrincipalStylesheetData { + /* + * Namespace dictionary for ns-prefixes and ns-names: + * TODO: Shared between stylesheets, and XPath mechanisms. + * Not used yet. + */ + xmlDictPtr namespaceDict; + /* + * Global list of in-scope namespaces. + */ + xsltPointerListPtr inScopeNamespaces; + /* + * Global list of information for [xsl:]excluded-result-prefixes. + */ + xsltPointerListPtr exclResultNamespaces; + /* + * Global list of information for [xsl:]extension-element-prefixes. + */ + xsltPointerListPtr extElemNamespaces; + xsltEffectiveNsPtr effectiveNs; +#ifdef XSLT_REFACTORED_XSLT_NSCOMP + /* + * Namespace name map to get rid of string comparison of namespace names. + */ + xsltNsMapPtr nsMap; +#endif +}; + + +#endif +/* + * Note that we added a @compCtxt field to anchor an stylesheet compilation + * context, since, due to historical reasons, various compile-time function + * take only the stylesheet as argument and not a compilation context. + */ +struct _xsltStylesheet { + /* + * The stylesheet import relation is kept as a tree. + */ + struct _xsltStylesheet *parent; + struct _xsltStylesheet *next; + struct _xsltStylesheet *imports; + + xsltDocumentPtr docList; /* the include document list */ + + /* + * General data on the style sheet document. + */ + xmlDocPtr doc; /* the parsed XML stylesheet */ + xmlHashTablePtr stripSpaces;/* the hash table of the strip-space and + preserve space elements */ + int stripAll; /* strip-space * (1) preserve-space * (-1) */ + xmlHashTablePtr cdataSection;/* the hash table of the cdata-section */ + + /* + * Global variable or parameters. + */ + xsltStackElemPtr variables; /* linked list of param and variables */ + + /* + * Template descriptions. + */ + xsltTemplatePtr templates; /* the ordered list of templates */ + xmlHashTablePtr templatesHash; /* hash table or wherever compiled + templates information is stored */ + struct _xsltCompMatch *rootMatch; /* template based on / */ + struct _xsltCompMatch *keyMatch; /* template based on key() */ + struct _xsltCompMatch *elemMatch; /* template based on * */ + struct _xsltCompMatch *attrMatch; /* template based on @* */ + struct _xsltCompMatch *parentMatch; /* template based on .. */ + struct _xsltCompMatch *textMatch; /* template based on text() */ + struct _xsltCompMatch *piMatch; /* template based on + processing-instruction() */ + struct _xsltCompMatch *commentMatch; /* template based on comment() */ + + /* + * Namespace aliases. + * NOTE: Not used in the refactored code. + */ + xmlHashTablePtr nsAliases; /* the namespace alias hash tables */ + + /* + * Attribute sets. + */ + xmlHashTablePtr attributeSets;/* the attribute sets hash tables */ + + /* + * Namespaces. + * TODO: Eliminate this. + */ + xmlHashTablePtr nsHash; /* the set of namespaces in use: + ATTENTION: This is used for + execution of XPath expressions; unfortunately + it restricts the stylesheet to have distinct + prefixes. + TODO: We need to get rid of this. + */ + void *nsDefs; /* ATTENTION TODO: This is currently used to store + xsltExtDefPtr (in extensions.c) and + *not* xmlNsPtr. + */ + + /* + * Key definitions. + */ + void *keys; /* key definitions */ + + /* + * Output related stuff. + */ + xmlChar *method; /* the output method */ + xmlChar *methodURI; /* associated namespace if any */ + xmlChar *version; /* version string */ + xmlChar *encoding; /* encoding string */ + int omitXmlDeclaration; /* omit-xml-declaration = "yes" | "no" */ + + /* + * Number formatting. + */ + xsltDecimalFormatPtr decimalFormat; + int standalone; /* standalone = "yes" | "no" */ + xmlChar *doctypePublic; /* doctype-public string */ + xmlChar *doctypeSystem; /* doctype-system string */ + int indent; /* should output being indented */ + xmlChar *mediaType; /* media-type string */ + + /* + * Precomputed blocks. + */ + xsltElemPreCompPtr preComps;/* list of precomputed blocks */ + int warnings; /* number of warnings found at compilation */ + int errors; /* number of errors found at compilation */ + + xmlChar *exclPrefix; /* last excluded prefixes */ + xmlChar **exclPrefixTab; /* array of excluded prefixes */ + int exclPrefixNr; /* number of excluded prefixes in scope */ + int exclPrefixMax; /* size of the array */ + + void *_private; /* user defined data */ + + /* + * Extensions. + */ + xmlHashTablePtr extInfos; /* the extension data */ + int extrasNr; /* the number of extras required */ + + /* + * For keeping track of nested includes + */ + xsltDocumentPtr includes; /* points to last nested include */ + + /* + * dictionary: shared between stylesheet, context and documents. + */ + xmlDictPtr dict; + /* + * precompiled attribute value templates. + */ + void *attVTs; + /* + * if namespace-alias has an alias for the default stylesheet prefix + * NOTE: Not used in the refactored code. + */ + const xmlChar *defaultAlias; + /* + * bypass pre-processing (already done) (used in imports) + */ + int nopreproc; + /* + * all document text strings were internalized + */ + int internalized; + /* + * Literal Result Element as Stylesheet c.f. section 2.3 + */ + int literal_result; + /* + * The principal stylesheet + */ + xsltStylesheetPtr principal; +#ifdef XSLT_REFACTORED + /* + * Compilation context used during compile-time. + */ + xsltCompilerCtxtPtr compCtxt; /* TODO: Change this to (void *). */ + + xsltPrincipalStylesheetDataPtr principalData; +#endif + /* + * Forwards-compatible processing + */ + int forwards_compatible; + + xmlHashTablePtr namedTemplates; /* hash table of named templates */ + + xmlXPathContextPtr xpathCtxt; +}; + +typedef struct _xsltTransformCache xsltTransformCache; +typedef xsltTransformCache *xsltTransformCachePtr; +struct _xsltTransformCache { + xmlDocPtr RVT; + int nbRVT; + xsltStackElemPtr stackItems; + int nbStackItems; +#ifdef XSLT_DEBUG_PROFILE_CACHE + int dbgCachedRVTs; + int dbgReusedRVTs; + int dbgCachedVars; + int dbgReusedVars; +#endif +}; + +/* + * The in-memory structure corresponding to an XSLT Transformation. + */ +typedef enum { + XSLT_OUTPUT_XML = 0, + XSLT_OUTPUT_HTML, + XSLT_OUTPUT_TEXT +} xsltOutputType; + +typedef void * +(*xsltNewLocaleFunc)(const xmlChar *lang, int lowerFirst); +typedef void +(*xsltFreeLocaleFunc)(void *locale); +typedef xmlChar * +(*xsltGenSortKeyFunc)(void *locale, const xmlChar *lang); + +typedef enum { + XSLT_STATE_OK = 0, + XSLT_STATE_ERROR, + XSLT_STATE_STOPPED +} xsltTransformState; + +struct _xsltTransformContext { + xsltStylesheetPtr style; /* the stylesheet used */ + xsltOutputType type; /* the type of output */ + + xsltTemplatePtr templ; /* the current template */ + int templNr; /* Nb of templates in the stack */ + int templMax; /* Size of the templtes stack */ + xsltTemplatePtr *templTab; /* the template stack */ + + xsltStackElemPtr vars; /* the current variable list */ + int varsNr; /* Nb of variable list in the stack */ + int varsMax; /* Size of the variable list stack */ + xsltStackElemPtr *varsTab; /* the variable list stack */ + int varsBase; /* the var base for current templ */ + + /* + * Extensions + */ + xmlHashTablePtr extFunctions; /* the extension functions */ + xmlHashTablePtr extElements; /* the extension elements */ + xmlHashTablePtr extInfos; /* the extension data */ + + const xmlChar *mode; /* the current mode */ + const xmlChar *modeURI; /* the current mode URI */ + + xsltDocumentPtr docList; /* the document list */ + + xsltDocumentPtr document; /* the current source document; can be NULL if an RTF */ + xmlNodePtr node; /* the current node being processed */ + xmlNodeSetPtr nodeList; /* the current node list */ + /* xmlNodePtr current; the node */ + + xmlDocPtr output; /* the resulting document */ + xmlNodePtr insert; /* the insertion node */ + + xmlXPathContextPtr xpathCtxt; /* the XPath context */ + xsltTransformState state; /* the current state */ + + /* + * Global variables + */ + xmlHashTablePtr globalVars; /* the global variables and params */ + + xmlNodePtr inst; /* the instruction in the stylesheet */ + + int xinclude; /* should XInclude be processed */ + + const char * outputFile; /* the output URI if known */ + + int profile; /* is this run profiled */ + long prof; /* the current profiled value */ + int profNr; /* Nb of templates in the stack */ + int profMax; /* Size of the templtaes stack */ + long *profTab; /* the profile template stack */ + + void *_private; /* user defined data */ + + int extrasNr; /* the number of extras used */ + int extrasMax; /* the number of extras allocated */ + xsltRuntimeExtraPtr extras; /* extra per runtime information */ + + xsltDocumentPtr styleList; /* the stylesheet docs list */ + void * sec; /* the security preferences if any */ + + xmlGenericErrorFunc error; /* a specific error handler */ + void * errctx; /* context for the error handler */ + + xsltSortFunc sortfunc; /* a ctxt specific sort routine */ + + /* + * handling of temporary Result Value Tree + * (XSLT 1.0 term: "Result Tree Fragment") + */ + xmlDocPtr tmpRVT; /* list of RVT without persistance */ + xmlDocPtr persistRVT; /* list of persistant RVTs */ + int ctxtflags; /* context processing flags */ + + /* + * Speed optimization when coalescing text nodes + */ + const xmlChar *lasttext; /* last text node content */ + int lasttsize; /* last text node size */ + int lasttuse; /* last text node use */ + /* + * Per Context Debugging + */ + int debugStatus; /* the context level debug status */ + unsigned long* traceCode; /* pointer to the variable holding the mask */ + + int parserOptions; /* parser options xmlParserOption */ + + /* + * dictionary: shared between stylesheet, context and documents. + */ + xmlDictPtr dict; + xmlDocPtr tmpDoc; /* Obsolete; not used in the library. */ + /* + * all document text strings are internalized + */ + int internalized; + int nbKeys; + int hasTemplKeyPatterns; + xsltTemplatePtr currentTemplateRule; /* the Current Template Rule */ + xmlNodePtr initialContextNode; + xmlDocPtr initialContextDoc; + xsltTransformCachePtr cache; + void *contextVariable; /* the current variable item */ + xmlDocPtr localRVT; /* list of local tree fragments; will be freed when + the instruction which created the fragment + exits */ + xmlDocPtr localRVTBase; /* Obsolete */ + int keyInitLevel; /* Needed to catch recursive keys issues */ + int depth; /* Needed to catch recursions */ + int maxTemplateDepth; + int maxTemplateVars; + unsigned long opLimit; + unsigned long opCount; + int sourceDocDirty; + unsigned long currentId; /* For generate-id() */ + + xsltNewLocaleFunc newLocale; + xsltFreeLocaleFunc freeLocale; + xsltGenSortKeyFunc genSortKey; +}; + +/** + * CHECK_STOPPED: + * + * Macro to check if the XSLT processing should be stopped. + * Will return from the function. + */ +#define CHECK_STOPPED if (ctxt->state == XSLT_STATE_STOPPED) return; + +/** + * CHECK_STOPPEDE: + * + * Macro to check if the XSLT processing should be stopped. + * Will goto the error: label. + */ +#define CHECK_STOPPEDE if (ctxt->state == XSLT_STATE_STOPPED) goto error; + +/** + * CHECK_STOPPED0: + * + * Macro to check if the XSLT processing should be stopped. + * Will return from the function with a 0 value. + */ +#define CHECK_STOPPED0 if (ctxt->state == XSLT_STATE_STOPPED) return(0); + +/* + * The macro XML_CAST_FPTR is a hack to avoid a gcc warning about + * possible incompatibilities between function pointers and object + * pointers. It is defined in libxml/hash.h within recent versions + * of libxml2, but is put here for compatibility. + */ +#ifndef XML_CAST_FPTR +/** + * XML_CAST_FPTR: + * @fptr: pointer to a function + * + * Macro to do a casting from an object pointer to a + * function pointer without encountering a warning from + * gcc + * + * #define XML_CAST_FPTR(fptr) (*(void **)(&fptr)) + * This macro violated ISO C aliasing rules (gcc4 on s390 broke) + * so it is disabled now + */ + +#define XML_CAST_FPTR(fptr) fptr +#endif +/* + * Functions associated to the internal types +xsltDecimalFormatPtr xsltDecimalFormatGetByName(xsltStylesheetPtr sheet, + xmlChar *name); + */ +XSLTPUBFUN xsltStylesheetPtr XSLTCALL + xsltNewStylesheet (void); +XSLTPUBFUN xsltStylesheetPtr XSLTCALL + xsltParseStylesheetFile (const xmlChar* filename); +XSLTPUBFUN void XSLTCALL + xsltFreeStylesheet (xsltStylesheetPtr style); +XSLTPUBFUN int XSLTCALL + xsltIsBlank (xmlChar *str); +XSLTPUBFUN void XSLTCALL + xsltFreeStackElemList (xsltStackElemPtr elem); +XSLTPUBFUN xsltDecimalFormatPtr XSLTCALL + xsltDecimalFormatGetByName(xsltStylesheetPtr style, + xmlChar *name); +XSLTPUBFUN xsltDecimalFormatPtr XSLTCALL + xsltDecimalFormatGetByQName(xsltStylesheetPtr style, + const xmlChar *nsUri, + const xmlChar *name); + +XSLTPUBFUN xsltStylesheetPtr XSLTCALL + xsltParseStylesheetProcess(xsltStylesheetPtr ret, + xmlDocPtr doc); +XSLTPUBFUN void XSLTCALL + xsltParseStylesheetOutput(xsltStylesheetPtr style, + xmlNodePtr cur); +XSLTPUBFUN xsltStylesheetPtr XSLTCALL + xsltParseStylesheetDoc (xmlDocPtr doc); +XSLTPUBFUN xsltStylesheetPtr XSLTCALL + xsltParseStylesheetImportedDoc(xmlDocPtr doc, + xsltStylesheetPtr style); +XSLTPUBFUN int XSLTCALL + xsltParseStylesheetUser(xsltStylesheetPtr style, + xmlDocPtr doc); +XSLTPUBFUN xsltStylesheetPtr XSLTCALL + xsltLoadStylesheetPI (xmlDocPtr doc); +XSLTPUBFUN void XSLTCALL + xsltNumberFormat (xsltTransformContextPtr ctxt, + xsltNumberDataPtr data, + xmlNodePtr node); +XSLTPUBFUN xmlXPathError XSLTCALL + xsltFormatNumberConversion(xsltDecimalFormatPtr self, + xmlChar *format, + double number, + xmlChar **result); + +XSLTPUBFUN void XSLTCALL + xsltParseTemplateContent(xsltStylesheetPtr style, + xmlNodePtr templ); +XSLTPUBFUN int XSLTCALL + xsltAllocateExtra (xsltStylesheetPtr style); +XSLTPUBFUN int XSLTCALL + xsltAllocateExtraCtxt (xsltTransformContextPtr ctxt); +/* + * Extra functions for Result Value Trees + */ +XSLTPUBFUN xmlDocPtr XSLTCALL + xsltCreateRVT (xsltTransformContextPtr ctxt); +XSLTPUBFUN int XSLTCALL + xsltRegisterTmpRVT (xsltTransformContextPtr ctxt, + xmlDocPtr RVT); +XSLTPUBFUN int XSLTCALL + xsltRegisterLocalRVT (xsltTransformContextPtr ctxt, + xmlDocPtr RVT); +XSLTPUBFUN int XSLTCALL + xsltRegisterPersistRVT (xsltTransformContextPtr ctxt, + xmlDocPtr RVT); +XSLTPUBFUN int XSLTCALL + xsltExtensionInstructionResultRegister( + xsltTransformContextPtr ctxt, + xmlXPathObjectPtr obj); +XSLTPUBFUN int XSLTCALL + xsltExtensionInstructionResultFinalize( + xsltTransformContextPtr ctxt); +XSLTPUBFUN int XSLTCALL + xsltFlagRVTs( + xsltTransformContextPtr ctxt, + xmlXPathObjectPtr obj, + int val); +XSLTPUBFUN void XSLTCALL + xsltFreeRVTs (xsltTransformContextPtr ctxt); +XSLTPUBFUN void XSLTCALL + xsltReleaseRVT (xsltTransformContextPtr ctxt, + xmlDocPtr RVT); +/* + * Extra functions for Attribute Value Templates + */ +XSLTPUBFUN void XSLTCALL + xsltCompileAttr (xsltStylesheetPtr style, + xmlAttrPtr attr); +XSLTPUBFUN xmlChar * XSLTCALL + xsltEvalAVT (xsltTransformContextPtr ctxt, + void *avt, + xmlNodePtr node); +XSLTPUBFUN void XSLTCALL + xsltFreeAVTList (void *avt); + +/* + * Extra function for successful xsltCleanupGlobals / xsltInit sequence. + */ + +XSLTPUBFUN void XSLTCALL + xsltUninit (void); + +/************************************************************************ + * * + * Compile-time functions for *internal* use only * + * * + ************************************************************************/ + +#ifdef XSLT_REFACTORED +XSLTPUBFUN void XSLTCALL + xsltParseSequenceConstructor( + xsltCompilerCtxtPtr cctxt, + xmlNodePtr start); +XSLTPUBFUN int XSLTCALL + xsltParseAnyXSLTElem (xsltCompilerCtxtPtr cctxt, + xmlNodePtr elem); +#ifdef XSLT_REFACTORED_XSLT_NSCOMP +XSLTPUBFUN int XSLTCALL + xsltRestoreDocumentNamespaces( + xsltNsMapPtr ns, + xmlDocPtr doc); +#endif +#endif /* XSLT_REFACTORED */ + +/************************************************************************ + * * + * Transformation-time functions for *internal* use only * + * * + ************************************************************************/ +XSLTPUBFUN int XSLTCALL + xsltInitCtxtKey (xsltTransformContextPtr ctxt, + xsltDocumentPtr doc, + xsltKeyDefPtr keyd); +XSLTPUBFUN int XSLTCALL + xsltInitAllDocKeys (xsltTransformContextPtr ctxt); +#ifdef __cplusplus +} +#endif + +#endif /* __XML_XSLT_H__ */ +