diff --git a/.gitattributes b/.gitattributes
index 9751e0a976b8e47604251f2017e83f1ff049ff6f..842ec444dfb9c819ac92288398382e96174b7523 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -66,3 +66,9 @@ venv/lib/python3.10/site-packages/scipy/misc/face.dat filter=lfs diff=lfs merge=
venv/lib/python3.10/site-packages/scipy/spatial/_qhull.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
venv/lib/python3.10/site-packages/scipy/spatial/_ckdtree.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
venv/lib/python3.10/site-packages/scipy/fft/_pocketfft/pypocketfft.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
+venv/lib/python3.10/site-packages/zstandard/backend_c.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
+venv/lib/python3.10/site-packages/scipy/io/_fast_matrix_market/_fmm_core.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
+venv/lib/python3.10/site-packages/pydantic_core/_pydantic_core.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
+venv/lib/python3.10/site-packages/zstandard/_cffi.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
+venv/lib/python3.10/site-packages/yaml/_yaml.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
+venv/lib/python3.10/site-packages/safetensors/_safetensors_rust.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
diff --git a/venv/lib/python3.10/site-packages/joblib/externals/cloudpickle/__init__.py b/venv/lib/python3.10/site-packages/joblib/externals/cloudpickle/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..58a8d086ff616b2ef75ab0d788d990e749f96e8d
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/joblib/externals/cloudpickle/__init__.py
@@ -0,0 +1,18 @@
+from . import cloudpickle
+from .cloudpickle import * # noqa
+
+__doc__ = cloudpickle.__doc__
+
+__version__ = "3.0.0"
+
+__all__ = [ # noqa
+ "__version__",
+ "Pickler",
+ "CloudPickler",
+ "dumps",
+ "loads",
+ "dump",
+ "load",
+ "register_pickle_by_value",
+ "unregister_pickle_by_value",
+]
diff --git a/venv/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..17fc7e4f6b36a98e460effda2bfb86b40f601912
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..18c2f1a15d3959549005429902d0939f47737a0d
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle_fast.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6013017ae49a6cdabc58a1f9c8407b9a1157a5ea
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle_fast.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle.py b/venv/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle.py
new file mode 100644
index 0000000000000000000000000000000000000000..eb43a9676bbb11bdecf187e7f6cde51f793ff3fc
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle.py
@@ -0,0 +1,1487 @@
+"""Pickler class to extend the standard pickle.Pickler functionality
+
+The main objective is to make it natural to perform distributed computing on
+clusters (such as PySpark, Dask, Ray...) with interactively defined code
+(functions, classes, ...) written in notebooks or console.
+
+In particular this pickler adds the following features:
+- serialize interactively-defined or locally-defined functions, classes,
+ enums, typevars, lambdas and nested functions to compiled byte code;
+- deal with some other non-serializable objects in an ad-hoc manner where
+ applicable.
+
+This pickler is therefore meant to be used for the communication between short
+lived Python processes running the same version of Python and libraries. In
+particular, it is not meant to be used for long term storage of Python objects.
+
+It does not include an unpickler, as standard Python unpickling suffices.
+
+This module was extracted from the `cloud` package, developed by `PiCloud, Inc.
+`_.
+
+Copyright (c) 2012-now, CloudPickle developers and contributors.
+Copyright (c) 2012, Regents of the University of California.
+Copyright (c) 2009 `PiCloud, Inc. `_.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of the University of California, Berkeley nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+"""
+
+import _collections_abc
+from collections import ChainMap, OrderedDict
+import abc
+import builtins
+import copyreg
+import dataclasses
+import dis
+from enum import Enum
+import io
+import itertools
+import logging
+import opcode
+import pickle
+from pickle import _getattribute
+import platform
+import struct
+import sys
+import threading
+import types
+import typing
+import uuid
+import warnings
+import weakref
+
+# The following import is required to be imported in the cloudpickle
+# namespace to be able to load pickle files generated with older versions of
+# cloudpickle. See: tests/test_backward_compat.py
+from types import CellType # noqa: F401
+
+
+# cloudpickle is meant for inter process communication: we expect all
+# communicating processes to run the same Python version hence we favor
+# communication speed over compatibility:
+DEFAULT_PROTOCOL = pickle.HIGHEST_PROTOCOL
+
+# Names of modules whose resources should be treated as dynamic.
+_PICKLE_BY_VALUE_MODULES = set()
+
+# Track the provenance of reconstructed dynamic classes to make it possible to
+# reconstruct instances from the matching singleton class definition when
+# appropriate and preserve the usual "isinstance" semantics of Python objects.
+_DYNAMIC_CLASS_TRACKER_BY_CLASS = weakref.WeakKeyDictionary()
+_DYNAMIC_CLASS_TRACKER_BY_ID = weakref.WeakValueDictionary()
+_DYNAMIC_CLASS_TRACKER_LOCK = threading.Lock()
+
+PYPY = platform.python_implementation() == "PyPy"
+
+builtin_code_type = None
+if PYPY:
+ # builtin-code objects only exist in pypy
+ builtin_code_type = type(float.__new__.__code__)
+
+_extract_code_globals_cache = weakref.WeakKeyDictionary()
+
+
+def _get_or_create_tracker_id(class_def):
+ with _DYNAMIC_CLASS_TRACKER_LOCK:
+ class_tracker_id = _DYNAMIC_CLASS_TRACKER_BY_CLASS.get(class_def)
+ if class_tracker_id is None:
+ class_tracker_id = uuid.uuid4().hex
+ _DYNAMIC_CLASS_TRACKER_BY_CLASS[class_def] = class_tracker_id
+ _DYNAMIC_CLASS_TRACKER_BY_ID[class_tracker_id] = class_def
+ return class_tracker_id
+
+
+def _lookup_class_or_track(class_tracker_id, class_def):
+ if class_tracker_id is not None:
+ with _DYNAMIC_CLASS_TRACKER_LOCK:
+ class_def = _DYNAMIC_CLASS_TRACKER_BY_ID.setdefault(
+ class_tracker_id, class_def
+ )
+ _DYNAMIC_CLASS_TRACKER_BY_CLASS[class_def] = class_tracker_id
+ return class_def
+
+
+def register_pickle_by_value(module):
+ """Register a module to make it functions and classes picklable by value.
+
+ By default, functions and classes that are attributes of an importable
+ module are to be pickled by reference, that is relying on re-importing
+ the attribute from the module at load time.
+
+ If `register_pickle_by_value(module)` is called, all its functions and
+ classes are subsequently to be pickled by value, meaning that they can
+ be loaded in Python processes where the module is not importable.
+
+ This is especially useful when developing a module in a distributed
+ execution environment: restarting the client Python process with the new
+ source code is enough: there is no need to re-install the new version
+ of the module on all the worker nodes nor to restart the workers.
+
+ Note: this feature is considered experimental. See the cloudpickle
+ README.md file for more details and limitations.
+ """
+ if not isinstance(module, types.ModuleType):
+ raise ValueError(f"Input should be a module object, got {str(module)} instead")
+ # In the future, cloudpickle may need a way to access any module registered
+ # for pickling by value in order to introspect relative imports inside
+ # functions pickled by value. (see
+ # https://github.com/cloudpipe/cloudpickle/pull/417#issuecomment-873684633).
+ # This access can be ensured by checking that module is present in
+ # sys.modules at registering time and assuming that it will still be in
+ # there when accessed during pickling. Another alternative would be to
+ # store a weakref to the module. Even though cloudpickle does not implement
+ # this introspection yet, in order to avoid a possible breaking change
+ # later, we still enforce the presence of module inside sys.modules.
+ if module.__name__ not in sys.modules:
+ raise ValueError(
+ f"{module} was not imported correctly, have you used an "
+ "`import` statement to access it?"
+ )
+ _PICKLE_BY_VALUE_MODULES.add(module.__name__)
+
+
+def unregister_pickle_by_value(module):
+ """Unregister that the input module should be pickled by value."""
+ if not isinstance(module, types.ModuleType):
+ raise ValueError(f"Input should be a module object, got {str(module)} instead")
+ if module.__name__ not in _PICKLE_BY_VALUE_MODULES:
+ raise ValueError(f"{module} is not registered for pickle by value")
+ else:
+ _PICKLE_BY_VALUE_MODULES.remove(module.__name__)
+
+
+def list_registry_pickle_by_value():
+ return _PICKLE_BY_VALUE_MODULES.copy()
+
+
+def _is_registered_pickle_by_value(module):
+ module_name = module.__name__
+ if module_name in _PICKLE_BY_VALUE_MODULES:
+ return True
+ while True:
+ parent_name = module_name.rsplit(".", 1)[0]
+ if parent_name == module_name:
+ break
+ if parent_name in _PICKLE_BY_VALUE_MODULES:
+ return True
+ module_name = parent_name
+ return False
+
+
+def _whichmodule(obj, name):
+ """Find the module an object belongs to.
+
+ This function differs from ``pickle.whichmodule`` in two ways:
+ - it does not mangle the cases where obj's module is __main__ and obj was
+ not found in any module.
+ - Errors arising during module introspection are ignored, as those errors
+ are considered unwanted side effects.
+ """
+ module_name = getattr(obj, "__module__", None)
+
+ if module_name is not None:
+ return module_name
+ # Protect the iteration by using a copy of sys.modules against dynamic
+ # modules that trigger imports of other modules upon calls to getattr or
+ # other threads importing at the same time.
+ for module_name, module in sys.modules.copy().items():
+ # Some modules such as coverage can inject non-module objects inside
+ # sys.modules
+ if (
+ module_name == "__main__"
+ or module is None
+ or not isinstance(module, types.ModuleType)
+ ):
+ continue
+ try:
+ if _getattribute(module, name)[0] is obj:
+ return module_name
+ except Exception:
+ pass
+ return None
+
+
+def _should_pickle_by_reference(obj, name=None):
+ """Test whether an function or a class should be pickled by reference
+
+ Pickling by reference means by that the object (typically a function or a
+ class) is an attribute of a module that is assumed to be importable in the
+ target Python environment. Loading will therefore rely on importing the
+ module and then calling `getattr` on it to access the function or class.
+
+ Pickling by reference is the only option to pickle functions and classes
+ in the standard library. In cloudpickle the alternative option is to
+ pickle by value (for instance for interactively or locally defined
+ functions and classes or for attributes of modules that have been
+ explicitly registered to be pickled by value.
+ """
+ if isinstance(obj, types.FunctionType) or issubclass(type(obj), type):
+ module_and_name = _lookup_module_and_qualname(obj, name=name)
+ if module_and_name is None:
+ return False
+ module, name = module_and_name
+ return not _is_registered_pickle_by_value(module)
+
+ elif isinstance(obj, types.ModuleType):
+ # We assume that sys.modules is primarily used as a cache mechanism for
+ # the Python import machinery. Checking if a module has been added in
+ # is sys.modules therefore a cheap and simple heuristic to tell us
+ # whether we can assume that a given module could be imported by name
+ # in another Python process.
+ if _is_registered_pickle_by_value(obj):
+ return False
+ return obj.__name__ in sys.modules
+ else:
+ raise TypeError(
+ "cannot check importability of {} instances".format(type(obj).__name__)
+ )
+
+
+def _lookup_module_and_qualname(obj, name=None):
+ if name is None:
+ name = getattr(obj, "__qualname__", None)
+ if name is None: # pragma: no cover
+ # This used to be needed for Python 2.7 support but is probably not
+ # needed anymore. However we keep the __name__ introspection in case
+ # users of cloudpickle rely on this old behavior for unknown reasons.
+ name = getattr(obj, "__name__", None)
+
+ module_name = _whichmodule(obj, name)
+
+ if module_name is None:
+ # In this case, obj.__module__ is None AND obj was not found in any
+ # imported module. obj is thus treated as dynamic.
+ return None
+
+ if module_name == "__main__":
+ return None
+
+ # Note: if module_name is in sys.modules, the corresponding module is
+ # assumed importable at unpickling time. See #357
+ module = sys.modules.get(module_name, None)
+ if module is None:
+ # The main reason why obj's module would not be imported is that this
+ # module has been dynamically created, using for example
+ # types.ModuleType. The other possibility is that module was removed
+ # from sys.modules after obj was created/imported. But this case is not
+ # supported, as the standard pickle does not support it either.
+ return None
+
+ try:
+ obj2, parent = _getattribute(module, name)
+ except AttributeError:
+ # obj was not found inside the module it points to
+ return None
+ if obj2 is not obj:
+ return None
+ return module, name
+
+
+def _extract_code_globals(co):
+ """Find all globals names read or written to by codeblock co."""
+ out_names = _extract_code_globals_cache.get(co)
+ if out_names is None:
+ # We use a dict with None values instead of a set to get a
+ # deterministic order and avoid introducing non-deterministic pickle
+ # bytes as a results.
+ out_names = {name: None for name in _walk_global_ops(co)}
+
+ # Declaring a function inside another one using the "def ..." syntax
+ # generates a constant code object corresponding to the one of the
+ # nested function's As the nested function may itself need global
+ # variables, we need to introspect its code, extract its globals, (look
+ # for code object in it's co_consts attribute..) and add the result to
+ # code_globals
+ if co.co_consts:
+ for const in co.co_consts:
+ if isinstance(const, types.CodeType):
+ out_names.update(_extract_code_globals(const))
+
+ _extract_code_globals_cache[co] = out_names
+
+ return out_names
+
+
+def _find_imported_submodules(code, top_level_dependencies):
+ """Find currently imported submodules used by a function.
+
+ Submodules used by a function need to be detected and referenced for the
+ function to work correctly at depickling time. Because submodules can be
+ referenced as attribute of their parent package (``package.submodule``), we
+ need a special introspection technique that does not rely on GLOBAL-related
+ opcodes to find references of them in a code object.
+
+ Example:
+ ```
+ import concurrent.futures
+ import cloudpickle
+ def func():
+ x = concurrent.futures.ThreadPoolExecutor
+ if __name__ == '__main__':
+ cloudpickle.dumps(func)
+ ```
+ The globals extracted by cloudpickle in the function's state include the
+ concurrent package, but not its submodule (here, concurrent.futures), which
+ is the module used by func. Find_imported_submodules will detect the usage
+ of concurrent.futures. Saving this module alongside with func will ensure
+ that calling func once depickled does not fail due to concurrent.futures
+ not being imported
+ """
+
+ subimports = []
+ # check if any known dependency is an imported package
+ for x in top_level_dependencies:
+ if (
+ isinstance(x, types.ModuleType)
+ and hasattr(x, "__package__")
+ and x.__package__
+ ):
+ # check if the package has any currently loaded sub-imports
+ prefix = x.__name__ + "."
+ # A concurrent thread could mutate sys.modules,
+ # make sure we iterate over a copy to avoid exceptions
+ for name in list(sys.modules):
+ # Older versions of pytest will add a "None" module to
+ # sys.modules.
+ if name is not None and name.startswith(prefix):
+ # check whether the function can address the sub-module
+ tokens = set(name[len(prefix) :].split("."))
+ if not tokens - set(code.co_names):
+ subimports.append(sys.modules[name])
+ return subimports
+
+
+# relevant opcodes
+STORE_GLOBAL = opcode.opmap["STORE_GLOBAL"]
+DELETE_GLOBAL = opcode.opmap["DELETE_GLOBAL"]
+LOAD_GLOBAL = opcode.opmap["LOAD_GLOBAL"]
+GLOBAL_OPS = (STORE_GLOBAL, DELETE_GLOBAL, LOAD_GLOBAL)
+HAVE_ARGUMENT = dis.HAVE_ARGUMENT
+EXTENDED_ARG = dis.EXTENDED_ARG
+
+
+_BUILTIN_TYPE_NAMES = {}
+for k, v in types.__dict__.items():
+ if type(v) is type:
+ _BUILTIN_TYPE_NAMES[v] = k
+
+
+def _builtin_type(name):
+ if name == "ClassType": # pragma: no cover
+ # Backward compat to load pickle files generated with cloudpickle
+ # < 1.3 even if loading pickle files from older versions is not
+ # officially supported.
+ return type
+ return getattr(types, name)
+
+
+def _walk_global_ops(code):
+ """Yield referenced name for global-referencing instructions in code."""
+ for instr in dis.get_instructions(code):
+ op = instr.opcode
+ if op in GLOBAL_OPS:
+ yield instr.argval
+
+
+def _extract_class_dict(cls):
+ """Retrieve a copy of the dict of a class without the inherited method."""
+ clsdict = dict(cls.__dict__) # copy dict proxy to a dict
+ if len(cls.__bases__) == 1:
+ inherited_dict = cls.__bases__[0].__dict__
+ else:
+ inherited_dict = {}
+ for base in reversed(cls.__bases__):
+ inherited_dict.update(base.__dict__)
+ to_remove = []
+ for name, value in clsdict.items():
+ try:
+ base_value = inherited_dict[name]
+ if value is base_value:
+ to_remove.append(name)
+ except KeyError:
+ pass
+ for name in to_remove:
+ clsdict.pop(name)
+ return clsdict
+
+
+def is_tornado_coroutine(func):
+ """Return whether `func` is a Tornado coroutine function.
+
+ Running coroutines are not supported.
+ """
+ warnings.warn(
+ "is_tornado_coroutine is deprecated in cloudpickle 3.0 and will be "
+ "removed in cloudpickle 4.0. Use tornado.gen.is_coroutine_function "
+ "directly instead.",
+ category=DeprecationWarning,
+ )
+ if "tornado.gen" not in sys.modules:
+ return False
+ gen = sys.modules["tornado.gen"]
+ if not hasattr(gen, "is_coroutine_function"):
+ # Tornado version is too old
+ return False
+ return gen.is_coroutine_function(func)
+
+
+def subimport(name):
+ # We cannot do simply: `return __import__(name)`: Indeed, if ``name`` is
+ # the name of a submodule, __import__ will return the top-level root module
+ # of this submodule. For instance, __import__('os.path') returns the `os`
+ # module.
+ __import__(name)
+ return sys.modules[name]
+
+
+def dynamic_subimport(name, vars):
+ mod = types.ModuleType(name)
+ mod.__dict__.update(vars)
+ mod.__dict__["__builtins__"] = builtins.__dict__
+ return mod
+
+
+def _get_cell_contents(cell):
+ try:
+ return cell.cell_contents
+ except ValueError:
+ # Handle empty cells explicitly with a sentinel value.
+ return _empty_cell_value
+
+
+def instance(cls):
+ """Create a new instance of a class.
+
+ Parameters
+ ----------
+ cls : type
+ The class to create an instance of.
+
+ Returns
+ -------
+ instance : cls
+ A new instance of ``cls``.
+ """
+ return cls()
+
+
+@instance
+class _empty_cell_value:
+ """Sentinel for empty closures."""
+
+ @classmethod
+ def __reduce__(cls):
+ return cls.__name__
+
+
+def _make_function(code, globals, name, argdefs, closure):
+ # Setting __builtins__ in globals is needed for nogil CPython.
+ globals["__builtins__"] = __builtins__
+ return types.FunctionType(code, globals, name, argdefs, closure)
+
+
+def _make_empty_cell():
+ if False:
+ # trick the compiler into creating an empty cell in our lambda
+ cell = None
+ raise AssertionError("this route should not be executed")
+
+ return (lambda: cell).__closure__[0]
+
+
+def _make_cell(value=_empty_cell_value):
+ cell = _make_empty_cell()
+ if value is not _empty_cell_value:
+ cell.cell_contents = value
+ return cell
+
+
+def _make_skeleton_class(
+ type_constructor, name, bases, type_kwargs, class_tracker_id, extra
+):
+ """Build dynamic class with an empty __dict__ to be filled once memoized
+
+ If class_tracker_id is not None, try to lookup an existing class definition
+ matching that id. If none is found, track a newly reconstructed class
+ definition under that id so that other instances stemming from the same
+ class id will also reuse this class definition.
+
+ The "extra" variable is meant to be a dict (or None) that can be used for
+ forward compatibility shall the need arise.
+ """
+ skeleton_class = types.new_class(
+ name, bases, {"metaclass": type_constructor}, lambda ns: ns.update(type_kwargs)
+ )
+ return _lookup_class_or_track(class_tracker_id, skeleton_class)
+
+
+def _make_skeleton_enum(
+ bases, name, qualname, members, module, class_tracker_id, extra
+):
+ """Build dynamic enum with an empty __dict__ to be filled once memoized
+
+ The creation of the enum class is inspired by the code of
+ EnumMeta._create_.
+
+ If class_tracker_id is not None, try to lookup an existing enum definition
+ matching that id. If none is found, track a newly reconstructed enum
+ definition under that id so that other instances stemming from the same
+ class id will also reuse this enum definition.
+
+ The "extra" variable is meant to be a dict (or None) that can be used for
+ forward compatibility shall the need arise.
+ """
+ # enums always inherit from their base Enum class at the last position in
+ # the list of base classes:
+ enum_base = bases[-1]
+ metacls = enum_base.__class__
+ classdict = metacls.__prepare__(name, bases)
+
+ for member_name, member_value in members.items():
+ classdict[member_name] = member_value
+ enum_class = metacls.__new__(metacls, name, bases, classdict)
+ enum_class.__module__ = module
+ enum_class.__qualname__ = qualname
+
+ return _lookup_class_or_track(class_tracker_id, enum_class)
+
+
+def _make_typevar(name, bound, constraints, covariant, contravariant, class_tracker_id):
+ tv = typing.TypeVar(
+ name,
+ *constraints,
+ bound=bound,
+ covariant=covariant,
+ contravariant=contravariant,
+ )
+ return _lookup_class_or_track(class_tracker_id, tv)
+
+
+def _decompose_typevar(obj):
+ return (
+ obj.__name__,
+ obj.__bound__,
+ obj.__constraints__,
+ obj.__covariant__,
+ obj.__contravariant__,
+ _get_or_create_tracker_id(obj),
+ )
+
+
+def _typevar_reduce(obj):
+ # TypeVar instances require the module information hence why we
+ # are not using the _should_pickle_by_reference directly
+ module_and_name = _lookup_module_and_qualname(obj, name=obj.__name__)
+
+ if module_and_name is None:
+ return (_make_typevar, _decompose_typevar(obj))
+ elif _is_registered_pickle_by_value(module_and_name[0]):
+ return (_make_typevar, _decompose_typevar(obj))
+
+ return (getattr, module_and_name)
+
+
+def _get_bases(typ):
+ if "__orig_bases__" in getattr(typ, "__dict__", {}):
+ # For generic types (see PEP 560)
+ # Note that simply checking `hasattr(typ, '__orig_bases__')` is not
+ # correct. Subclasses of a fully-parameterized generic class does not
+ # have `__orig_bases__` defined, but `hasattr(typ, '__orig_bases__')`
+ # will return True because it's defined in the base class.
+ bases_attr = "__orig_bases__"
+ else:
+ # For regular class objects
+ bases_attr = "__bases__"
+ return getattr(typ, bases_attr)
+
+
+def _make_dict_keys(obj, is_ordered=False):
+ if is_ordered:
+ return OrderedDict.fromkeys(obj).keys()
+ else:
+ return dict.fromkeys(obj).keys()
+
+
+def _make_dict_values(obj, is_ordered=False):
+ if is_ordered:
+ return OrderedDict((i, _) for i, _ in enumerate(obj)).values()
+ else:
+ return {i: _ for i, _ in enumerate(obj)}.values()
+
+
+def _make_dict_items(obj, is_ordered=False):
+ if is_ordered:
+ return OrderedDict(obj).items()
+ else:
+ return obj.items()
+
+
+# COLLECTION OF OBJECTS __getnewargs__-LIKE METHODS
+# -------------------------------------------------
+
+
+def _class_getnewargs(obj):
+ type_kwargs = {}
+ if "__module__" in obj.__dict__:
+ type_kwargs["__module__"] = obj.__module__
+
+ __dict__ = obj.__dict__.get("__dict__", None)
+ if isinstance(__dict__, property):
+ type_kwargs["__dict__"] = __dict__
+
+ return (
+ type(obj),
+ obj.__name__,
+ _get_bases(obj),
+ type_kwargs,
+ _get_or_create_tracker_id(obj),
+ None,
+ )
+
+
+def _enum_getnewargs(obj):
+ members = {e.name: e.value for e in obj}
+ return (
+ obj.__bases__,
+ obj.__name__,
+ obj.__qualname__,
+ members,
+ obj.__module__,
+ _get_or_create_tracker_id(obj),
+ None,
+ )
+
+
+# COLLECTION OF OBJECTS RECONSTRUCTORS
+# ------------------------------------
+def _file_reconstructor(retval):
+ return retval
+
+
+# COLLECTION OF OBJECTS STATE GETTERS
+# -----------------------------------
+
+
+def _function_getstate(func):
+ # - Put func's dynamic attributes (stored in func.__dict__) in state. These
+ # attributes will be restored at unpickling time using
+ # f.__dict__.update(state)
+ # - Put func's members into slotstate. Such attributes will be restored at
+ # unpickling time by iterating over slotstate and calling setattr(func,
+ # slotname, slotvalue)
+ slotstate = {
+ "__name__": func.__name__,
+ "__qualname__": func.__qualname__,
+ "__annotations__": func.__annotations__,
+ "__kwdefaults__": func.__kwdefaults__,
+ "__defaults__": func.__defaults__,
+ "__module__": func.__module__,
+ "__doc__": func.__doc__,
+ "__closure__": func.__closure__,
+ }
+
+ f_globals_ref = _extract_code_globals(func.__code__)
+ f_globals = {k: func.__globals__[k] for k in f_globals_ref if k in func.__globals__}
+
+ if func.__closure__ is not None:
+ closure_values = list(map(_get_cell_contents, func.__closure__))
+ else:
+ closure_values = ()
+
+ # Extract currently-imported submodules used by func. Storing these modules
+ # in a smoke _cloudpickle_subimports attribute of the object's state will
+ # trigger the side effect of importing these modules at unpickling time
+ # (which is necessary for func to work correctly once depickled)
+ slotstate["_cloudpickle_submodules"] = _find_imported_submodules(
+ func.__code__, itertools.chain(f_globals.values(), closure_values)
+ )
+ slotstate["__globals__"] = f_globals
+
+ state = func.__dict__
+ return state, slotstate
+
+
+def _class_getstate(obj):
+ clsdict = _extract_class_dict(obj)
+ clsdict.pop("__weakref__", None)
+
+ if issubclass(type(obj), abc.ABCMeta):
+ # If obj is an instance of an ABCMeta subclass, don't pickle the
+ # cache/negative caches populated during isinstance/issubclass
+ # checks, but pickle the list of registered subclasses of obj.
+ clsdict.pop("_abc_cache", None)
+ clsdict.pop("_abc_negative_cache", None)
+ clsdict.pop("_abc_negative_cache_version", None)
+ registry = clsdict.pop("_abc_registry", None)
+ if registry is None:
+ # The abc caches and registered subclasses of a
+ # class are bundled into the single _abc_impl attribute
+ clsdict.pop("_abc_impl", None)
+ (registry, _, _, _) = abc._get_dump(obj)
+
+ clsdict["_abc_impl"] = [subclass_weakref() for subclass_weakref in registry]
+ else:
+ # In the above if clause, registry is a set of weakrefs -- in
+ # this case, registry is a WeakSet
+ clsdict["_abc_impl"] = [type_ for type_ in registry]
+
+ if "__slots__" in clsdict:
+ # pickle string length optimization: member descriptors of obj are
+ # created automatically from obj's __slots__ attribute, no need to
+ # save them in obj's state
+ if isinstance(obj.__slots__, str):
+ clsdict.pop(obj.__slots__)
+ else:
+ for k in obj.__slots__:
+ clsdict.pop(k, None)
+
+ clsdict.pop("__dict__", None) # unpicklable property object
+
+ return (clsdict, {})
+
+
+def _enum_getstate(obj):
+ clsdict, slotstate = _class_getstate(obj)
+
+ members = {e.name: e.value for e in obj}
+ # Cleanup the clsdict that will be passed to _make_skeleton_enum:
+ # Those attributes are already handled by the metaclass.
+ for attrname in [
+ "_generate_next_value_",
+ "_member_names_",
+ "_member_map_",
+ "_member_type_",
+ "_value2member_map_",
+ ]:
+ clsdict.pop(attrname, None)
+ for member in members:
+ clsdict.pop(member)
+ # Special handling of Enum subclasses
+ return clsdict, slotstate
+
+
+# COLLECTIONS OF OBJECTS REDUCERS
+# -------------------------------
+# A reducer is a function taking a single argument (obj), and that returns a
+# tuple with all the necessary data to re-construct obj. Apart from a few
+# exceptions (list, dict, bytes, int, etc.), a reducer is necessary to
+# correctly pickle an object.
+# While many built-in objects (Exceptions objects, instances of the "object"
+# class, etc), are shipped with their own built-in reducer (invoked using
+# obj.__reduce__), some do not. The following methods were created to "fill
+# these holes".
+
+
+def _code_reduce(obj):
+ """code object reducer."""
+ # If you are not sure about the order of arguments, take a look at help
+ # of the specific type from types, for example:
+ # >>> from types import CodeType
+ # >>> help(CodeType)
+ if hasattr(obj, "co_exceptiontable"):
+ # Python 3.11 and later: there are some new attributes
+ # related to the enhanced exceptions.
+ args = (
+ obj.co_argcount,
+ obj.co_posonlyargcount,
+ obj.co_kwonlyargcount,
+ obj.co_nlocals,
+ obj.co_stacksize,
+ obj.co_flags,
+ obj.co_code,
+ obj.co_consts,
+ obj.co_names,
+ obj.co_varnames,
+ obj.co_filename,
+ obj.co_name,
+ obj.co_qualname,
+ obj.co_firstlineno,
+ obj.co_linetable,
+ obj.co_exceptiontable,
+ obj.co_freevars,
+ obj.co_cellvars,
+ )
+ elif hasattr(obj, "co_linetable"):
+ # Python 3.10 and later: obj.co_lnotab is deprecated and constructor
+ # expects obj.co_linetable instead.
+ args = (
+ obj.co_argcount,
+ obj.co_posonlyargcount,
+ obj.co_kwonlyargcount,
+ obj.co_nlocals,
+ obj.co_stacksize,
+ obj.co_flags,
+ obj.co_code,
+ obj.co_consts,
+ obj.co_names,
+ obj.co_varnames,
+ obj.co_filename,
+ obj.co_name,
+ obj.co_firstlineno,
+ obj.co_linetable,
+ obj.co_freevars,
+ obj.co_cellvars,
+ )
+ elif hasattr(obj, "co_nmeta"): # pragma: no cover
+ # "nogil" Python: modified attributes from 3.9
+ args = (
+ obj.co_argcount,
+ obj.co_posonlyargcount,
+ obj.co_kwonlyargcount,
+ obj.co_nlocals,
+ obj.co_framesize,
+ obj.co_ndefaultargs,
+ obj.co_nmeta,
+ obj.co_flags,
+ obj.co_code,
+ obj.co_consts,
+ obj.co_varnames,
+ obj.co_filename,
+ obj.co_name,
+ obj.co_firstlineno,
+ obj.co_lnotab,
+ obj.co_exc_handlers,
+ obj.co_jump_table,
+ obj.co_freevars,
+ obj.co_cellvars,
+ obj.co_free2reg,
+ obj.co_cell2reg,
+ )
+ else:
+ # Backward compat for 3.8 and 3.9
+ args = (
+ obj.co_argcount,
+ obj.co_posonlyargcount,
+ obj.co_kwonlyargcount,
+ obj.co_nlocals,
+ obj.co_stacksize,
+ obj.co_flags,
+ obj.co_code,
+ obj.co_consts,
+ obj.co_names,
+ obj.co_varnames,
+ obj.co_filename,
+ obj.co_name,
+ obj.co_firstlineno,
+ obj.co_lnotab,
+ obj.co_freevars,
+ obj.co_cellvars,
+ )
+ return types.CodeType, args
+
+
+def _cell_reduce(obj):
+ """Cell (containing values of a function's free variables) reducer."""
+ try:
+ obj.cell_contents
+ except ValueError: # cell is empty
+ return _make_empty_cell, ()
+ else:
+ return _make_cell, (obj.cell_contents,)
+
+
+def _classmethod_reduce(obj):
+ orig_func = obj.__func__
+ return type(obj), (orig_func,)
+
+
+def _file_reduce(obj):
+ """Save a file."""
+ import io
+
+ if not hasattr(obj, "name") or not hasattr(obj, "mode"):
+ raise pickle.PicklingError(
+ "Cannot pickle files that do not map to an actual file"
+ )
+ if obj is sys.stdout:
+ return getattr, (sys, "stdout")
+ if obj is sys.stderr:
+ return getattr, (sys, "stderr")
+ if obj is sys.stdin:
+ raise pickle.PicklingError("Cannot pickle standard input")
+ if obj.closed:
+ raise pickle.PicklingError("Cannot pickle closed files")
+ if hasattr(obj, "isatty") and obj.isatty():
+ raise pickle.PicklingError("Cannot pickle files that map to tty objects")
+ if "r" not in obj.mode and "+" not in obj.mode:
+ raise pickle.PicklingError(
+ "Cannot pickle files that are not opened for reading: %s" % obj.mode
+ )
+
+ name = obj.name
+
+ retval = io.StringIO()
+
+ try:
+ # Read the whole file
+ curloc = obj.tell()
+ obj.seek(0)
+ contents = obj.read()
+ obj.seek(curloc)
+ except OSError as e:
+ raise pickle.PicklingError(
+ "Cannot pickle file %s as it cannot be read" % name
+ ) from e
+ retval.write(contents)
+ retval.seek(curloc)
+
+ retval.name = name
+ return _file_reconstructor, (retval,)
+
+
+def _getset_descriptor_reduce(obj):
+ return getattr, (obj.__objclass__, obj.__name__)
+
+
+def _mappingproxy_reduce(obj):
+ return types.MappingProxyType, (dict(obj),)
+
+
+def _memoryview_reduce(obj):
+ return bytes, (obj.tobytes(),)
+
+
+def _module_reduce(obj):
+ if _should_pickle_by_reference(obj):
+ return subimport, (obj.__name__,)
+ else:
+ # Some external libraries can populate the "__builtins__" entry of a
+ # module's `__dict__` with unpicklable objects (see #316). For that
+ # reason, we do not attempt to pickle the "__builtins__" entry, and
+ # restore a default value for it at unpickling time.
+ state = obj.__dict__.copy()
+ state.pop("__builtins__", None)
+ return dynamic_subimport, (obj.__name__, state)
+
+
+def _method_reduce(obj):
+ return (types.MethodType, (obj.__func__, obj.__self__))
+
+
+def _logger_reduce(obj):
+ return logging.getLogger, (obj.name,)
+
+
+def _root_logger_reduce(obj):
+ return logging.getLogger, ()
+
+
+def _property_reduce(obj):
+ return property, (obj.fget, obj.fset, obj.fdel, obj.__doc__)
+
+
+def _weakset_reduce(obj):
+ return weakref.WeakSet, (list(obj),)
+
+
+def _dynamic_class_reduce(obj):
+ """Save a class that can't be referenced as a module attribute.
+
+ This method is used to serialize classes that are defined inside
+ functions, or that otherwise can't be serialized as attribute lookups
+ from importable modules.
+ """
+ if Enum is not None and issubclass(obj, Enum):
+ return (
+ _make_skeleton_enum,
+ _enum_getnewargs(obj),
+ _enum_getstate(obj),
+ None,
+ None,
+ _class_setstate,
+ )
+ else:
+ return (
+ _make_skeleton_class,
+ _class_getnewargs(obj),
+ _class_getstate(obj),
+ None,
+ None,
+ _class_setstate,
+ )
+
+
+def _class_reduce(obj):
+ """Select the reducer depending on the dynamic nature of the class obj."""
+ if obj is type(None): # noqa
+ return type, (None,)
+ elif obj is type(Ellipsis):
+ return type, (Ellipsis,)
+ elif obj is type(NotImplemented):
+ return type, (NotImplemented,)
+ elif obj in _BUILTIN_TYPE_NAMES:
+ return _builtin_type, (_BUILTIN_TYPE_NAMES[obj],)
+ elif not _should_pickle_by_reference(obj):
+ return _dynamic_class_reduce(obj)
+ return NotImplemented
+
+
+def _dict_keys_reduce(obj):
+ # Safer not to ship the full dict as sending the rest might
+ # be unintended and could potentially cause leaking of
+ # sensitive information
+ return _make_dict_keys, (list(obj),)
+
+
+def _dict_values_reduce(obj):
+ # Safer not to ship the full dict as sending the rest might
+ # be unintended and could potentially cause leaking of
+ # sensitive information
+ return _make_dict_values, (list(obj),)
+
+
+def _dict_items_reduce(obj):
+ return _make_dict_items, (dict(obj),)
+
+
+def _odict_keys_reduce(obj):
+ # Safer not to ship the full dict as sending the rest might
+ # be unintended and could potentially cause leaking of
+ # sensitive information
+ return _make_dict_keys, (list(obj), True)
+
+
+def _odict_values_reduce(obj):
+ # Safer not to ship the full dict as sending the rest might
+ # be unintended and could potentially cause leaking of
+ # sensitive information
+ return _make_dict_values, (list(obj), True)
+
+
+def _odict_items_reduce(obj):
+ return _make_dict_items, (dict(obj), True)
+
+
+def _dataclass_field_base_reduce(obj):
+ return _get_dataclass_field_type_sentinel, (obj.name,)
+
+
+# COLLECTIONS OF OBJECTS STATE SETTERS
+# ------------------------------------
+# state setters are called at unpickling time, once the object is created and
+# it has to be updated to how it was at unpickling time.
+
+
+def _function_setstate(obj, state):
+ """Update the state of a dynamic function.
+
+ As __closure__ and __globals__ are readonly attributes of a function, we
+ cannot rely on the native setstate routine of pickle.load_build, that calls
+ setattr on items of the slotstate. Instead, we have to modify them inplace.
+ """
+ state, slotstate = state
+ obj.__dict__.update(state)
+
+ obj_globals = slotstate.pop("__globals__")
+ obj_closure = slotstate.pop("__closure__")
+ # _cloudpickle_subimports is a set of submodules that must be loaded for
+ # the pickled function to work correctly at unpickling time. Now that these
+ # submodules are depickled (hence imported), they can be removed from the
+ # object's state (the object state only served as a reference holder to
+ # these submodules)
+ slotstate.pop("_cloudpickle_submodules")
+
+ obj.__globals__.update(obj_globals)
+ obj.__globals__["__builtins__"] = __builtins__
+
+ if obj_closure is not None:
+ for i, cell in enumerate(obj_closure):
+ try:
+ value = cell.cell_contents
+ except ValueError: # cell is empty
+ continue
+ obj.__closure__[i].cell_contents = value
+
+ for k, v in slotstate.items():
+ setattr(obj, k, v)
+
+
+def _class_setstate(obj, state):
+ state, slotstate = state
+ registry = None
+ for attrname, attr in state.items():
+ if attrname == "_abc_impl":
+ registry = attr
+ else:
+ setattr(obj, attrname, attr)
+ if registry is not None:
+ for subclass in registry:
+ obj.register(subclass)
+
+ return obj
+
+
+# COLLECTION OF DATACLASS UTILITIES
+# ---------------------------------
+# There are some internal sentinel values whose identity must be preserved when
+# unpickling dataclass fields. Each sentinel value has a unique name that we can
+# use to retrieve its identity at unpickling time.
+
+
+_DATACLASSE_FIELD_TYPE_SENTINELS = {
+ dataclasses._FIELD.name: dataclasses._FIELD,
+ dataclasses._FIELD_CLASSVAR.name: dataclasses._FIELD_CLASSVAR,
+ dataclasses._FIELD_INITVAR.name: dataclasses._FIELD_INITVAR,
+}
+
+
+def _get_dataclass_field_type_sentinel(name):
+ return _DATACLASSE_FIELD_TYPE_SENTINELS[name]
+
+
+class Pickler(pickle.Pickler):
+ # set of reducers defined and used by cloudpickle (private)
+ _dispatch_table = {}
+ _dispatch_table[classmethod] = _classmethod_reduce
+ _dispatch_table[io.TextIOWrapper] = _file_reduce
+ _dispatch_table[logging.Logger] = _logger_reduce
+ _dispatch_table[logging.RootLogger] = _root_logger_reduce
+ _dispatch_table[memoryview] = _memoryview_reduce
+ _dispatch_table[property] = _property_reduce
+ _dispatch_table[staticmethod] = _classmethod_reduce
+ _dispatch_table[CellType] = _cell_reduce
+ _dispatch_table[types.CodeType] = _code_reduce
+ _dispatch_table[types.GetSetDescriptorType] = _getset_descriptor_reduce
+ _dispatch_table[types.ModuleType] = _module_reduce
+ _dispatch_table[types.MethodType] = _method_reduce
+ _dispatch_table[types.MappingProxyType] = _mappingproxy_reduce
+ _dispatch_table[weakref.WeakSet] = _weakset_reduce
+ _dispatch_table[typing.TypeVar] = _typevar_reduce
+ _dispatch_table[_collections_abc.dict_keys] = _dict_keys_reduce
+ _dispatch_table[_collections_abc.dict_values] = _dict_values_reduce
+ _dispatch_table[_collections_abc.dict_items] = _dict_items_reduce
+ _dispatch_table[type(OrderedDict().keys())] = _odict_keys_reduce
+ _dispatch_table[type(OrderedDict().values())] = _odict_values_reduce
+ _dispatch_table[type(OrderedDict().items())] = _odict_items_reduce
+ _dispatch_table[abc.abstractmethod] = _classmethod_reduce
+ _dispatch_table[abc.abstractclassmethod] = _classmethod_reduce
+ _dispatch_table[abc.abstractstaticmethod] = _classmethod_reduce
+ _dispatch_table[abc.abstractproperty] = _property_reduce
+ _dispatch_table[dataclasses._FIELD_BASE] = _dataclass_field_base_reduce
+
+ dispatch_table = ChainMap(_dispatch_table, copyreg.dispatch_table)
+
+ # function reducers are defined as instance methods of cloudpickle.Pickler
+ # objects, as they rely on a cloudpickle.Pickler attribute (globals_ref)
+ def _dynamic_function_reduce(self, func):
+ """Reduce a function that is not pickleable via attribute lookup."""
+ newargs = self._function_getnewargs(func)
+ state = _function_getstate(func)
+ return (_make_function, newargs, state, None, None, _function_setstate)
+
+ def _function_reduce(self, obj):
+ """Reducer for function objects.
+
+ If obj is a top-level attribute of a file-backed module, this reducer
+ returns NotImplemented, making the cloudpickle.Pickler fall back to
+ traditional pickle.Pickler routines to save obj. Otherwise, it reduces
+ obj using a custom cloudpickle reducer designed specifically to handle
+ dynamic functions.
+ """
+ if _should_pickle_by_reference(obj):
+ return NotImplemented
+ else:
+ return self._dynamic_function_reduce(obj)
+
+ def _function_getnewargs(self, func):
+ code = func.__code__
+
+ # base_globals represents the future global namespace of func at
+ # unpickling time. Looking it up and storing it in
+ # cloudpickle.Pickler.globals_ref allow functions sharing the same
+ # globals at pickling time to also share them once unpickled, at one
+ # condition: since globals_ref is an attribute of a cloudpickle.Pickler
+ # instance, and that a new cloudpickle.Pickler is created each time
+ # cloudpickle.dump or cloudpickle.dumps is called, functions also need
+ # to be saved within the same invocation of
+ # cloudpickle.dump/cloudpickle.dumps (for example:
+ # cloudpickle.dumps([f1, f2])). There is no such limitation when using
+ # cloudpickle.Pickler.dump, as long as the multiple invocations are
+ # bound to the same cloudpickle.Pickler instance.
+ base_globals = self.globals_ref.setdefault(id(func.__globals__), {})
+
+ if base_globals == {}:
+ # Add module attributes used to resolve relative imports
+ # instructions inside func.
+ for k in ["__package__", "__name__", "__path__", "__file__"]:
+ if k in func.__globals__:
+ base_globals[k] = func.__globals__[k]
+
+ # Do not bind the free variables before the function is created to
+ # avoid infinite recursion.
+ if func.__closure__ is None:
+ closure = None
+ else:
+ closure = tuple(_make_empty_cell() for _ in range(len(code.co_freevars)))
+
+ return code, base_globals, None, None, closure
+
+ def dump(self, obj):
+ try:
+ return super().dump(obj)
+ except RuntimeError as e:
+ if len(e.args) > 0 and "recursion" in e.args[0]:
+ msg = "Could not pickle object as excessively deep recursion required."
+ raise pickle.PicklingError(msg) from e
+ else:
+ raise
+
+ def __init__(self, file, protocol=None, buffer_callback=None):
+ if protocol is None:
+ protocol = DEFAULT_PROTOCOL
+ super().__init__(file, protocol=protocol, buffer_callback=buffer_callback)
+ # map functions __globals__ attribute ids, to ensure that functions
+ # sharing the same global namespace at pickling time also share
+ # their global namespace at unpickling time.
+ self.globals_ref = {}
+ self.proto = int(protocol)
+
+ if not PYPY:
+ # pickle.Pickler is the C implementation of the CPython pickler and
+ # therefore we rely on reduce_override method to customize the pickler
+ # behavior.
+
+ # `cloudpickle.Pickler.dispatch` is only left for backward
+ # compatibility - note that when using protocol 5,
+ # `cloudpickle.Pickler.dispatch` is not an extension of
+ # `pickle._Pickler.dispatch` dictionary, because `cloudpickle.Pickler`
+ # subclasses the C-implemented `pickle.Pickler`, which does not expose
+ # a `dispatch` attribute. Earlier versions of `cloudpickle.Pickler`
+ # used `cloudpickle.Pickler.dispatch` as a class-level attribute
+ # storing all reducers implemented by cloudpickle, but the attribute
+ # name was not a great choice given because it would collide with a
+ # similarly named attribute in the pure-Python `pickle._Pickler`
+ # implementation in the standard library.
+ dispatch = dispatch_table
+
+ # Implementation of the reducer_override callback, in order to
+ # efficiently serialize dynamic functions and classes by subclassing
+ # the C-implemented `pickle.Pickler`.
+ # TODO: decorrelate reducer_override (which is tied to CPython's
+ # implementation - would it make sense to backport it to pypy? - and
+ # pickle's protocol 5 which is implementation agnostic. Currently, the
+ # availability of both notions coincide on CPython's pickle, but it may
+ # not be the case anymore when pypy implements protocol 5.
+
+ def reducer_override(self, obj):
+ """Type-agnostic reducing callback for function and classes.
+
+ For performance reasons, subclasses of the C `pickle.Pickler` class
+ cannot register custom reducers for functions and classes in the
+ dispatch_table attribute. Reducers for such types must instead
+ implemented via the special `reducer_override` method.
+
+ Note that this method will be called for any object except a few
+ builtin-types (int, lists, dicts etc.), which differs from reducers
+ in the Pickler's dispatch_table, each of them being invoked for
+ objects of a specific type only.
+
+ This property comes in handy for classes: although most classes are
+ instances of the ``type`` metaclass, some of them can be instances
+ of other custom metaclasses (such as enum.EnumMeta for example). In
+ particular, the metaclass will likely not be known in advance, and
+ thus cannot be special-cased using an entry in the dispatch_table.
+ reducer_override, among other things, allows us to register a
+ reducer that will be called for any class, independently of its
+ type.
+
+ Notes:
+
+ * reducer_override has the priority over dispatch_table-registered
+ reducers.
+ * reducer_override can be used to fix other limitations of
+ cloudpickle for other types that suffered from type-specific
+ reducers, such as Exceptions. See
+ https://github.com/cloudpipe/cloudpickle/issues/248
+ """
+ t = type(obj)
+ try:
+ is_anyclass = issubclass(t, type)
+ except TypeError: # t is not a class (old Boost; see SF #502085)
+ is_anyclass = False
+
+ if is_anyclass:
+ return _class_reduce(obj)
+ elif isinstance(obj, types.FunctionType):
+ return self._function_reduce(obj)
+ else:
+ # fallback to save_global, including the Pickler's
+ # dispatch_table
+ return NotImplemented
+
+ else:
+ # When reducer_override is not available, hack the pure-Python
+ # Pickler's types.FunctionType and type savers. Note: the type saver
+ # must override Pickler.save_global, because pickle.py contains a
+ # hard-coded call to save_global when pickling meta-classes.
+ dispatch = pickle.Pickler.dispatch.copy()
+
+ def _save_reduce_pickle5(
+ self,
+ func,
+ args,
+ state=None,
+ listitems=None,
+ dictitems=None,
+ state_setter=None,
+ obj=None,
+ ):
+ save = self.save
+ write = self.write
+ self.save_reduce(
+ func,
+ args,
+ state=None,
+ listitems=listitems,
+ dictitems=dictitems,
+ obj=obj,
+ )
+ # backport of the Python 3.8 state_setter pickle operations
+ save(state_setter)
+ save(obj) # simple BINGET opcode as obj is already memoized.
+ save(state)
+ write(pickle.TUPLE2)
+ # Trigger a state_setter(obj, state) function call.
+ write(pickle.REDUCE)
+ # The purpose of state_setter is to carry-out an
+ # inplace modification of obj. We do not care about what the
+ # method might return, so its output is eventually removed from
+ # the stack.
+ write(pickle.POP)
+
+ def save_global(self, obj, name=None, pack=struct.pack):
+ """Main dispatch method.
+
+ The name of this method is somewhat misleading: all types get
+ dispatched here.
+ """
+ if obj is type(None): # noqa
+ return self.save_reduce(type, (None,), obj=obj)
+ elif obj is type(Ellipsis):
+ return self.save_reduce(type, (Ellipsis,), obj=obj)
+ elif obj is type(NotImplemented):
+ return self.save_reduce(type, (NotImplemented,), obj=obj)
+ elif obj in _BUILTIN_TYPE_NAMES:
+ return self.save_reduce(
+ _builtin_type, (_BUILTIN_TYPE_NAMES[obj],), obj=obj
+ )
+
+ if name is not None:
+ super().save_global(obj, name=name)
+ elif not _should_pickle_by_reference(obj, name=name):
+ self._save_reduce_pickle5(*_dynamic_class_reduce(obj), obj=obj)
+ else:
+ super().save_global(obj, name=name)
+
+ dispatch[type] = save_global
+
+ def save_function(self, obj, name=None):
+ """Registered with the dispatch to handle all function types.
+
+ Determines what kind of function obj is (e.g. lambda, defined at
+ interactive prompt, etc) and handles the pickling appropriately.
+ """
+ if _should_pickle_by_reference(obj, name=name):
+ return super().save_global(obj, name=name)
+ elif PYPY and isinstance(obj.__code__, builtin_code_type):
+ return self.save_pypy_builtin_func(obj)
+ else:
+ return self._save_reduce_pickle5(
+ *self._dynamic_function_reduce(obj), obj=obj
+ )
+
+ def save_pypy_builtin_func(self, obj):
+ """Save pypy equivalent of builtin functions.
+
+ PyPy does not have the concept of builtin-functions. Instead,
+ builtin-functions are simple function instances, but with a
+ builtin-code attribute.
+ Most of the time, builtin functions should be pickled by attribute.
+ But PyPy has flaky support for __qualname__, so some builtin
+ functions such as float.__new__ will be classified as dynamic. For
+ this reason only, we created this special routine. Because
+ builtin-functions are not expected to have closure or globals,
+ there is no additional hack (compared the one already implemented
+ in pickle) to protect ourselves from reference cycles. A simple
+ (reconstructor, newargs, obj.__dict__) tuple is save_reduced. Note
+ also that PyPy improved their support for __qualname__ in v3.6, so
+ this routing should be removed when cloudpickle supports only PyPy
+ 3.6 and later.
+ """
+ rv = (
+ types.FunctionType,
+ (obj.__code__, {}, obj.__name__, obj.__defaults__, obj.__closure__),
+ obj.__dict__,
+ )
+ self.save_reduce(*rv, obj=obj)
+
+ dispatch[types.FunctionType] = save_function
+
+
+# Shorthands similar to pickle.dump/pickle.dumps
+
+
+def dump(obj, file, protocol=None, buffer_callback=None):
+ """Serialize obj as bytes streamed into file
+
+ protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
+ pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
+ speed between processes running the same Python version.
+
+ Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
+ compatibility with older versions of Python (although this is not always
+ guaranteed to work because cloudpickle relies on some internal
+ implementation details that can change from one Python version to the
+ next).
+ """
+ Pickler(file, protocol=protocol, buffer_callback=buffer_callback).dump(obj)
+
+
+def dumps(obj, protocol=None, buffer_callback=None):
+ """Serialize obj as a string of bytes allocated in memory
+
+ protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
+ pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
+ speed between processes running the same Python version.
+
+ Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
+ compatibility with older versions of Python (although this is not always
+ guaranteed to work because cloudpickle relies on some internal
+ implementation details that can change from one Python version to the
+ next).
+ """
+ with io.BytesIO() as file:
+ cp = Pickler(file, protocol=protocol, buffer_callback=buffer_callback)
+ cp.dump(obj)
+ return file.getvalue()
+
+
+# Include pickles unloading functions in this namespace for convenience.
+load, loads = pickle.load, pickle.loads
+
+# Backward compat alias.
+CloudPickler = Pickler
diff --git a/venv/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle_fast.py b/venv/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..52d6732e44ebcc0053b24969943f7c3b742268bb
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle_fast.py
@@ -0,0 +1,13 @@
+"""Compatibility module.
+
+It can be necessary to load files generated by previous versions of cloudpickle
+that rely on symbols being defined under the `cloudpickle.cloudpickle_fast`
+namespace.
+
+See: tests/test_backward_compat.py
+"""
+from . import cloudpickle
+
+
+def __getattr__(name):
+ return getattr(cloudpickle, name)
diff --git a/venv/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..64d70ee1a4565377685dd72dd5a633ae386aef1b
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/_base.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bf473643bcd7bf10256bbd0d48b4189c6d940fcd
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/_base.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/initializers.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/initializers.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0fbc1e61d52f65121116fb966ed313c6f5130a0a
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/initializers.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/reusable_executor.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/reusable_executor.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4f62bbe6699b943244ef5e164d0d8df5435da421
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/reusable_executor.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_posix_reduction.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_posix_reduction.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5f3929b6358d806ae75b4e6954dee044c36e787e
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_posix_reduction.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_win_reduction.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_win_reduction.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..789323dd1c530b1665cf5d55226b34805b2333e7
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_win_reduction.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/context.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/context.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3240c45110d33f0ad4abc2c906a61861515b6159
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/context.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/fork_exec.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/fork_exec.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dab0f63de7624ed915eb2754165081a5189ab3f3
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/fork_exec.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_posix.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_posix.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..91964a6fc6e311c5e2cd69b11a417bb56cc4f084
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_posix.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_win32.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_win32.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..68558dbe575a5253ec87ffdf182fa643cd6fe2f3
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_win32.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/process.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/process.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dd22ee728d4a746462108faca487380bdce05efd
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/process.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/resource_tracker.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/resource_tracker.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..677e6735fc011de05638ab7a3a47c5e0f10b18d2
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/resource_tracker.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/spawn.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/spawn.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5ab1b390df862c41ef76b289a8ec5dbb0f50c25f
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/spawn.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/synchronize.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/synchronize.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3d3191a1c16b35585d164b3f6ae28146f3617197
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/synchronize.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/test/__init__.py b/venv/lib/python3.10/site-packages/joblib/test/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/venv/lib/python3.10/site-packages/joblib/test/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cd497ec16db6398d66b505daa9c104a11bbc1c99
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/test/__pycache__/common.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/common.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9c9df77ebacb9165a4001ccda33c09f0264129a9
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/common.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_backports.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_backports.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bb1ed39afc5790da2a4336fee178fa4d29af3087
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_backports.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_cloudpickle_wrapper.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_cloudpickle_wrapper.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6d45e0ce22b71bee47e383e3edc498d3c5cb9b34
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_cloudpickle_wrapper.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_config.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_config.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..67b8a681c5c66cd02c8a27b4210f615303be319a
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_config.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_dask.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_dask.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b2a93a809a5b18771d19081198594890c51e1ae6
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_dask.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_disk.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_disk.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cd0d5901af46af8e5fe59a3439abf2665d9ada3d
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_disk.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_func_inspect.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_func_inspect.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bb28968f5816be6303a5d3ba10b57a4f24f302b7
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_func_inspect.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_func_inspect_special_encoding.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_func_inspect_special_encoding.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..eef3f8972eaf1661630bc0eb960671a50a390580
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_func_inspect_special_encoding.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_hashing.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_hashing.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a4d40c96c2cac974496c270d0870b2c174b70f36
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_hashing.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_init.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_init.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2cf12a260466064502d578ffece1facbbdf9f422
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_init.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_logger.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_logger.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bf414dea44baf2924584cf7ba32904ce64325abb
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_logger.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_memmapping.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_memmapping.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a91280272ba85c3946c821b684e2c09b3d19fe0a
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_memmapping.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_memory.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_memory.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..af0fb2a79c847cfdd722ff5e7aeccd3e49cc9838
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_memory.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_memory_async.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_memory_async.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5f6f40ea7c4d0618180e791e7d06b6fa19b1d683
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_memory_async.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_missing_multiprocessing.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_missing_multiprocessing.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2015e968ab274cc359779fdb0185786996c35f7e
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_missing_multiprocessing.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_module.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_module.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d37a19a68fabc6aea56010071cd28a91e5726098
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_module.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1249ebad3c7c4d55a261848e73262618a35ce9dc
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_compat.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_compat.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b9ab291239736ab226ae593277836e4b10c78032
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_compat.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8dba8b360e87eb807df43ee8ab855c475a2a744f
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_utils.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_parallel.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_parallel.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b7508fd825b3e3d77b296388c7c94c597dd4c125
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_parallel.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_store_backends.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_store_backends.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..69d1ee2350444a6aa8a282b0eef4a852f36d8a52
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_store_backends.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_testing.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_testing.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..54702d4d2e2b33c0e05574fc11a7dd3e27705948
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_testing.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..acedae6dda97ecc6f8038ec669eb2efc3f9fa7e5
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_utils.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/test/__pycache__/testutils.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/testutils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5bbbef2b9385777706153cd9299efbcdb6c405a1
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/test/__pycache__/testutils.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/joblib/test/common.py b/venv/lib/python3.10/site-packages/joblib/test/common.py
new file mode 100644
index 0000000000000000000000000000000000000000..b0ca0c6abd913bc37091ebae4bd6a0b64084d20f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/joblib/test/common.py
@@ -0,0 +1,84 @@
+"""
+Small utilities for testing.
+"""
+import os
+import gc
+import sys
+
+from joblib._multiprocessing_helpers import mp
+from joblib.testing import SkipTest, skipif
+
+try:
+ import lz4
+except ImportError:
+ lz4 = None
+
+IS_PYPY = hasattr(sys, "pypy_version_info")
+
+# A decorator to run tests only when numpy is available
+try:
+ import numpy as np
+
+ def with_numpy(func):
+ """A decorator to skip tests requiring numpy."""
+ return func
+
+except ImportError:
+ def with_numpy(func):
+ """A decorator to skip tests requiring numpy."""
+ def my_func():
+ raise SkipTest('Test requires numpy')
+ return my_func
+ np = None
+
+# TODO: Turn this back on after refactoring yield based tests in test_hashing
+# with_numpy = skipif(not np, reason='Test requires numpy.')
+
+# we use memory_profiler library for memory consumption checks
+try:
+ from memory_profiler import memory_usage
+
+ def with_memory_profiler(func):
+ """A decorator to skip tests requiring memory_profiler."""
+ return func
+
+ def memory_used(func, *args, **kwargs):
+ """Compute memory usage when executing func."""
+ gc.collect()
+ mem_use = memory_usage((func, args, kwargs), interval=.001)
+ return max(mem_use) - min(mem_use)
+
+except ImportError:
+ def with_memory_profiler(func):
+ """A decorator to skip tests requiring memory_profiler."""
+ def dummy_func():
+ raise SkipTest('Test requires memory_profiler.')
+ return dummy_func
+
+ memory_usage = memory_used = None
+
+
+def force_gc_pypy():
+ # The gc in pypy can be delayed. Force it to test the behavior when it
+ # will eventually be collected.
+ if IS_PYPY:
+ # Run gc.collect() twice to make sure the weakref is collected, as
+ # mentionned in the pypy doc:
+ # https://doc.pypy.org/en/latest/config/objspace.usemodules._weakref.html
+ import gc
+ gc.collect()
+ gc.collect()
+
+
+with_multiprocessing = skipif(
+ mp is None, reason='Needs multiprocessing to run.')
+
+
+with_dev_shm = skipif(
+ not os.path.exists('/dev/shm'),
+ reason='This test requires a large /dev/shm shared memory fs.')
+
+with_lz4 = skipif(lz4 is None, reason='Needs lz4 compression to run')
+
+without_lz4 = skipif(
+ lz4 is not None, reason='Needs lz4 not being installed to run')
diff --git a/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py33_np18.pkl.lzma b/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py33_np18.pkl.lzma
new file mode 100644
index 0000000000000000000000000000000000000000..4280387fc5e542d3b022be43ab8ab8c05ea2e953
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py33_np18.pkl.lzma differ
diff --git a/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl.gzip b/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl.gzip
new file mode 100644
index 0000000000000000000000000000000000000000..02b16b8ebaa36c61da9c0c4cba6ceae88049cb43
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl.gzip differ
diff --git a/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl.lzma b/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl.lzma
new file mode 100644
index 0000000000000000000000000000000000000000..add975b005d9c6fdab5c24c369edb0c7c4cf397c
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl.lzma differ
diff --git a/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl.lzma b/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl.lzma
new file mode 100644
index 0000000000000000000000000000000000000000..6664c1772e85fa930dc330550f2cb237ac042328
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl.lzma differ
diff --git a/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.4.dev0_compressed_cache_size_pickle_py35_np19.gz_03.npy.z b/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.4.dev0_compressed_cache_size_pickle_py35_np19.gz_03.npy.z
new file mode 100644
index 0000000000000000000000000000000000000000..669602e0a880d308cbea22eed2fda74cea225a9f
Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.4.dev0_compressed_cache_size_pickle_py35_np19.gz_03.npy.z differ
diff --git a/venv/lib/python3.10/site-packages/joblib/test/test_backports.py b/venv/lib/python3.10/site-packages/joblib/test/test_backports.py
new file mode 100644
index 0000000000000000000000000000000000000000..4f5a3bd42353bf5a3d77243b9c607a3ec2b8cb1d
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/joblib/test/test_backports.py
@@ -0,0 +1,35 @@
+import mmap
+
+from joblib.backports import make_memmap, concurrency_safe_rename
+from joblib.test.common import with_numpy
+from joblib.testing import parametrize
+from joblib import Parallel, delayed
+
+
+@with_numpy
+def test_memmap(tmpdir):
+ fname = tmpdir.join('test.mmap').strpath
+ size = 5 * mmap.ALLOCATIONGRANULARITY
+ offset = mmap.ALLOCATIONGRANULARITY + 1
+ memmap_obj = make_memmap(fname, shape=size, mode='w+', offset=offset)
+ assert memmap_obj.offset == offset
+
+
+@parametrize('dst_content', [None, 'dst content'])
+@parametrize('backend', [None, 'threading'])
+def test_concurrency_safe_rename(tmpdir, dst_content, backend):
+ src_paths = [tmpdir.join('src_%d' % i) for i in range(4)]
+ for src_path in src_paths:
+ src_path.write('src content')
+ dst_path = tmpdir.join('dst')
+ if dst_content is not None:
+ dst_path.write(dst_content)
+
+ Parallel(n_jobs=4, backend=backend)(
+ delayed(concurrency_safe_rename)(src_path.strpath, dst_path.strpath)
+ for src_path in src_paths
+ )
+ assert dst_path.exists()
+ assert dst_path.read() == 'src content'
+ for src_path in src_paths:
+ assert not src_path.exists()
diff --git a/venv/lib/python3.10/site-packages/joblib/test/test_cloudpickle_wrapper.py b/venv/lib/python3.10/site-packages/joblib/test/test_cloudpickle_wrapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..57bf92f940b2a68f10863ea5c42be444c8e0d2b0
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/joblib/test/test_cloudpickle_wrapper.py
@@ -0,0 +1,27 @@
+"""
+Test that our implementation of wrap_non_picklable_objects mimics
+properly the loky implementation.
+"""
+
+from .._cloudpickle_wrapper import wrap_non_picklable_objects
+from .._cloudpickle_wrapper import _my_wrap_non_picklable_objects
+
+
+def a_function(x):
+ return x
+
+
+class AClass(object):
+
+ def __call__(self, x):
+ return x
+
+
+def test_wrap_non_picklable_objects():
+ # Mostly a smoke test: test that we can use callable in the same way
+ # with both our implementation of wrap_non_picklable_objects and the
+ # upstream one
+ for obj in (a_function, AClass()):
+ wrapped_obj = wrap_non_picklable_objects(obj)
+ my_wrapped_obj = _my_wrap_non_picklable_objects(obj)
+ assert wrapped_obj(1) == my_wrapped_obj(1)
diff --git a/venv/lib/python3.10/site-packages/joblib/test/test_config.py b/venv/lib/python3.10/site-packages/joblib/test/test_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..56eeed31b0db103360771c52cae641d00edb1c3a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/joblib/test/test_config.py
@@ -0,0 +1,151 @@
+import os
+
+from joblib.parallel import parallel_config
+from joblib.parallel import parallel_backend
+from joblib.parallel import Parallel, delayed
+
+from joblib.parallel import BACKENDS
+from joblib.parallel import DEFAULT_BACKEND
+from joblib.parallel import EXTERNAL_BACKENDS
+
+from joblib._parallel_backends import LokyBackend
+from joblib._parallel_backends import ThreadingBackend
+from joblib._parallel_backends import MultiprocessingBackend
+
+from joblib.testing import parametrize, raises
+from joblib.test.common import np, with_numpy
+from joblib.test.common import with_multiprocessing
+from joblib.test.test_parallel import check_memmap
+
+
+@parametrize("context", [parallel_config, parallel_backend])
+def test_global_parallel_backend(context):
+ default = Parallel()._backend
+
+ pb = context('threading')
+ try:
+ assert isinstance(Parallel()._backend, ThreadingBackend)
+ finally:
+ pb.unregister()
+ assert type(Parallel()._backend) is type(default)
+
+
+@parametrize("context", [parallel_config, parallel_backend])
+def test_external_backends(context):
+ def register_foo():
+ BACKENDS['foo'] = ThreadingBackend
+
+ EXTERNAL_BACKENDS['foo'] = register_foo
+ try:
+ with context('foo'):
+ assert isinstance(Parallel()._backend, ThreadingBackend)
+ finally:
+ del EXTERNAL_BACKENDS['foo']
+
+
+@with_numpy
+@with_multiprocessing
+def test_parallel_config_no_backend(tmpdir):
+ # Check that parallel_config allows to change the config
+ # even if no backend is set.
+ with parallel_config(n_jobs=2, max_nbytes=1, temp_folder=tmpdir):
+ with Parallel(prefer="processes") as p:
+ assert isinstance(p._backend, LokyBackend)
+ assert p.n_jobs == 2
+
+ # Checks that memmapping is enabled
+ p(delayed(check_memmap)(a) for a in [np.random.random(10)] * 2)
+ assert len(os.listdir(tmpdir)) > 0
+
+
+@with_numpy
+@with_multiprocessing
+def test_parallel_config_params_explicit_set(tmpdir):
+ with parallel_config(n_jobs=3, max_nbytes=1, temp_folder=tmpdir):
+ with Parallel(n_jobs=2, prefer="processes", max_nbytes='1M') as p:
+ assert isinstance(p._backend, LokyBackend)
+ assert p.n_jobs == 2
+
+ # Checks that memmapping is disabled
+ with raises(TypeError, match="Expected np.memmap instance"):
+ p(delayed(check_memmap)(a) for a in [np.random.random(10)] * 2)
+
+
+@parametrize("param", ["prefer", "require"])
+def test_parallel_config_bad_params(param):
+ # Check that an error is raised when setting a wrong backend
+ # hint or constraint
+ with raises(ValueError, match=f"{param}=wrong is not a valid"):
+ with parallel_config(**{param: "wrong"}):
+ Parallel()
+
+
+def test_parallel_config_constructor_params():
+ # Check that an error is raised when backend is None
+ # but backend constructor params are given
+ with raises(ValueError, match="only supported when backend is not None"):
+ with parallel_config(inner_max_num_threads=1):
+ pass
+
+ with raises(ValueError, match="only supported when backend is not None"):
+ with parallel_config(backend_param=1):
+ pass
+
+
+def test_parallel_config_nested():
+ # Check that nested configuration retrieves the info from the
+ # parent config and do not reset them.
+
+ with parallel_config(n_jobs=2):
+ p = Parallel()
+ assert isinstance(p._backend, BACKENDS[DEFAULT_BACKEND])
+ assert p.n_jobs == 2
+
+ with parallel_config(backend='threading'):
+ with parallel_config(n_jobs=2):
+ p = Parallel()
+ assert isinstance(p._backend, ThreadingBackend)
+ assert p.n_jobs == 2
+
+ with parallel_config(verbose=100):
+ with parallel_config(n_jobs=2):
+ p = Parallel()
+ assert p.verbose == 100
+ assert p.n_jobs == 2
+
+
+@with_numpy
+@with_multiprocessing
+@parametrize('backend', ['multiprocessing', 'threading',
+ MultiprocessingBackend(), ThreadingBackend()])
+@parametrize("context", [parallel_config, parallel_backend])
+def test_threadpool_limitation_in_child_context_error(context, backend):
+
+ with raises(AssertionError, match=r"does not acc.*inner_max_num_threads"):
+ context(backend, inner_max_num_threads=1)
+
+
+@parametrize("context", [parallel_config, parallel_backend])
+def test_parallel_n_jobs_none(context):
+ # Check that n_jobs=None is interpreted as "unset" in Parallel
+ # non regression test for #1473
+ with context(backend="threading", n_jobs=2):
+ with Parallel(n_jobs=None) as p:
+ assert p.n_jobs == 2
+
+ with context(backend="threading"):
+ default_n_jobs = Parallel().n_jobs
+ with Parallel(n_jobs=None) as p:
+ assert p.n_jobs == default_n_jobs
+
+
+@parametrize("context", [parallel_config, parallel_backend])
+def test_parallel_config_n_jobs_none(context):
+ # Check that n_jobs=None is interpreted as "explicitly set" in
+ # parallel_(config/backend)
+ # non regression test for #1473
+ with context(backend="threading", n_jobs=2):
+ with context(backend="threading", n_jobs=None):
+ # n_jobs=None resets n_jobs to backend's default
+ with Parallel() as p:
+ assert p.n_jobs == 1
diff --git a/venv/lib/python3.10/site-packages/joblib/test/test_dask.py b/venv/lib/python3.10/site-packages/joblib/test/test_dask.py
new file mode 100644
index 0000000000000000000000000000000000000000..aebe65525fc55f4593e6c83b5bfd4ffc76d945a7
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/joblib/test/test_dask.py
@@ -0,0 +1,499 @@
+from __future__ import print_function, division, absolute_import
+import os
+import warnings
+
+import pytest
+from random import random
+from uuid import uuid4
+from time import sleep
+
+from .. import Parallel, delayed, parallel_config
+from ..parallel import ThreadingBackend, AutoBatchingMixin
+from .._dask import DaskDistributedBackend
+
+distributed = pytest.importorskip('distributed')
+dask = pytest.importorskip('dask')
+
+# These imports need to be after the pytest.importorskip hence the noqa: E402
+from distributed import Client, LocalCluster, get_client # noqa: E402
+from distributed.metrics import time # noqa: E402
+# Note: pytest requires to manually import all fixtures used in the test
+# and their dependencies.
+from distributed.utils_test import cluster, inc, cleanup # noqa: E402, F401
+
+
+def noop(*args, **kwargs):
+ pass
+
+
+def slow_raise_value_error(condition, duration=0.05):
+ sleep(duration)
+ if condition:
+ raise ValueError("condition evaluated to True")
+
+
+def count_events(event_name, client):
+ worker_events = client.run(lambda dask_worker: dask_worker.log)
+ event_counts = {}
+ for w, events in worker_events.items():
+ event_counts[w] = len([event for event in list(events)
+ if event[1] == event_name])
+ return event_counts
+
+
+def test_simple(loop):
+ with cluster() as (s, [a, b]):
+ with Client(s['address'], loop=loop) as client: # noqa: F841
+ with parallel_config(backend='dask'):
+ seq = Parallel()(delayed(inc)(i) for i in range(10))
+ assert seq == [inc(i) for i in range(10)]
+
+ with pytest.raises(ValueError):
+ Parallel()(delayed(slow_raise_value_error)(i == 3)
+ for i in range(10))
+
+ seq = Parallel()(delayed(inc)(i) for i in range(10))
+ assert seq == [inc(i) for i in range(10)]
+
+
+def test_dask_backend_uses_autobatching(loop):
+ assert (DaskDistributedBackend.compute_batch_size
+ is AutoBatchingMixin.compute_batch_size)
+
+ with cluster() as (s, [a, b]):
+ with Client(s['address'], loop=loop) as client: # noqa: F841
+ with parallel_config(backend='dask'):
+ with Parallel() as parallel:
+ # The backend should be initialized with a default
+ # batch size of 1:
+ backend = parallel._backend
+ assert isinstance(backend, DaskDistributedBackend)
+ assert backend.parallel is parallel
+ assert backend._effective_batch_size == 1
+
+ # Launch many short tasks that should trigger
+ # auto-batching:
+ parallel(
+ delayed(lambda: None)()
+ for _ in range(int(1e4))
+ )
+ assert backend._effective_batch_size > 10
+
+
+def random2():
+ return random()
+
+
+def test_dont_assume_function_purity(loop):
+ with cluster() as (s, [a, b]):
+ with Client(s['address'], loop=loop) as client: # noqa: F841
+ with parallel_config(backend='dask'):
+ x, y = Parallel()(delayed(random2)() for i in range(2))
+ assert x != y
+
+
+@pytest.mark.parametrize("mixed", [True, False])
+def test_dask_funcname(loop, mixed):
+ from joblib._dask import Batch
+ if not mixed:
+ tasks = [delayed(inc)(i) for i in range(4)]
+ batch_repr = 'batch_of_inc_4_calls'
+ else:
+ tasks = [
+ delayed(abs)(i) if i % 2 else delayed(inc)(i) for i in range(4)
+ ]
+ batch_repr = 'mixed_batch_of_inc_4_calls'
+
+ assert repr(Batch(tasks)) == batch_repr
+
+ with cluster() as (s, [a, b]):
+ with Client(s['address'], loop=loop) as client:
+ with parallel_config(backend='dask'):
+ _ = Parallel(batch_size=2, pre_dispatch='all')(tasks)
+
+ def f(dask_scheduler):
+ return list(dask_scheduler.transition_log)
+ batch_repr = batch_repr.replace('4', '2')
+ log = client.run_on_scheduler(f)
+ assert all('batch_of_inc' in tup[0] for tup in log)
+
+
+def test_no_undesired_distributed_cache_hit():
+ # Dask has a pickle cache for callables that are called many times. Because
+ # the dask backends used to wrap both the functions and the arguments
+ # under instances of the Batch callable class this caching mechanism could
+ # lead to bugs as described in: https://github.com/joblib/joblib/pull/1055
+ # The joblib-dask backend has been refactored to avoid bundling the
+ # arguments as an attribute of the Batch instance to avoid this problem.
+ # This test serves as non-regression problem.
+
+ # Use a large number of input arguments to give the AutoBatchingMixin
+ # enough tasks to kick-in.
+ lists = [[] for _ in range(100)]
+ np = pytest.importorskip('numpy')
+ X = np.arange(int(1e6))
+
+ def isolated_operation(list_, data=None):
+ if data is not None:
+ np.testing.assert_array_equal(data, X)
+ list_.append(uuid4().hex)
+ return list_
+
+ cluster = LocalCluster(n_workers=1, threads_per_worker=2)
+ client = Client(cluster)
+ try:
+ with parallel_config(backend='dask'):
+ # dispatches joblib.parallel.BatchedCalls
+ res = Parallel()(
+ delayed(isolated_operation)(list_) for list_ in lists
+ )
+
+ # The original arguments should not have been mutated as the mutation
+ # happens in the dask worker process.
+ assert lists == [[] for _ in range(100)]
+
+ # Here we did not pass any large numpy array as argument to
+ # isolated_operation so no scattering event should happen under the
+ # hood.
+ counts = count_events('receive-from-scatter', client)
+ assert sum(counts.values()) == 0
+ assert all([len(r) == 1 for r in res])
+
+ with parallel_config(backend='dask'):
+ # Append a large array which will be scattered by dask, and
+ # dispatch joblib._dask.Batch
+ res = Parallel()(
+ delayed(isolated_operation)(list_, data=X) for list_ in lists
+ )
+
+ # This time, auto-scattering should have kicked it.
+ counts = count_events('receive-from-scatter', client)
+ assert sum(counts.values()) > 0
+ assert all([len(r) == 1 for r in res])
+ finally:
+ client.close(timeout=30)
+ cluster.close(timeout=30)
+
+
+class CountSerialized(object):
+ def __init__(self, x):
+ self.x = x
+ self.count = 0
+
+ def __add__(self, other):
+ return self.x + getattr(other, 'x', other)
+
+ __radd__ = __add__
+
+ def __reduce__(self):
+ self.count += 1
+ return (CountSerialized, (self.x,))
+
+
+def add5(a, b, c, d=0, e=0):
+ return a + b + c + d + e
+
+
+def test_manual_scatter(loop):
+ x = CountSerialized(1)
+ y = CountSerialized(2)
+ z = CountSerialized(3)
+
+ with cluster() as (s, [a, b]):
+ with Client(s['address'], loop=loop) as client: # noqa: F841
+ with parallel_config(backend='dask', scatter=[x, y]):
+ f = delayed(add5)
+ tasks = [f(x, y, z, d=4, e=5),
+ f(x, z, y, d=5, e=4),
+ f(y, x, z, d=x, e=5),
+ f(z, z, x, d=z, e=y)]
+ expected = [func(*args, **kwargs)
+ for func, args, kwargs in tasks]
+ results = Parallel()(tasks)
+
+ # Scatter must take a list/tuple
+ with pytest.raises(TypeError):
+ with parallel_config(backend='dask', loop=loop, scatter=1):
+ pass
+
+ assert results == expected
+
+ # Scattered variables only serialized once
+ assert x.count == 1
+ assert y.count == 1
+ # Depending on the version of distributed, the unscattered z variable
+ # is either pickled 4 or 6 times, possibly because of the memoization
+ # of objects that appear several times in the arguments of a delayed
+ # task.
+ assert z.count in (4, 6)
+
+
+# When the same IOLoop is used for multiple clients in a row, use
+# loop_in_thread instead of loop to prevent the Client from closing it. See
+# dask/distributed #4112
+def test_auto_scatter(loop_in_thread):
+ np = pytest.importorskip('numpy')
+ data1 = np.ones(int(1e4), dtype=np.uint8)
+ data2 = np.ones(int(1e4), dtype=np.uint8)
+ data_to_process = ([data1] * 3) + ([data2] * 3)
+
+ with cluster() as (s, [a, b]):
+ with Client(s['address'], loop=loop_in_thread) as client:
+ with parallel_config(backend='dask'):
+ # Passing the same data as arg and kwarg triggers a single
+ # scatter operation whose result is reused.
+ Parallel()(delayed(noop)(data, data, i, opt=data)
+ for i, data in enumerate(data_to_process))
+ # By default large array are automatically scattered with
+ # broadcast=1 which means that one worker must directly receive
+ # the data from the scatter operation once.
+ counts = count_events('receive-from-scatter', client)
+ assert counts[a['address']] + counts[b['address']] == 2
+
+ with cluster() as (s, [a, b]):
+ with Client(s['address'], loop=loop_in_thread) as client:
+ with parallel_config(backend='dask'):
+ Parallel()(delayed(noop)(data1[:3], i) for i in range(5))
+ # Small arrays are passed within the task definition without going
+ # through a scatter operation.
+ counts = count_events('receive-from-scatter', client)
+ assert counts[a['address']] == 0
+ assert counts[b['address']] == 0
+
+
+@pytest.mark.parametrize("retry_no", list(range(2)))
+def test_nested_scatter(loop, retry_no):
+
+ np = pytest.importorskip('numpy')
+
+ NUM_INNER_TASKS = 10
+ NUM_OUTER_TASKS = 10
+
+ def my_sum(x, i, j):
+ return np.sum(x)
+
+ def outer_function_joblib(array, i):
+ client = get_client() # noqa
+ with parallel_config(backend="dask"):
+ results = Parallel()(
+ delayed(my_sum)(array[j:], i, j) for j in range(
+ NUM_INNER_TASKS)
+ )
+ return sum(results)
+
+ with cluster() as (s, [a, b]):
+ with Client(s['address'], loop=loop) as _:
+ with parallel_config(backend="dask"):
+ my_array = np.ones(10000)
+ _ = Parallel()(
+ delayed(outer_function_joblib)(
+ my_array[i:], i) for i in range(NUM_OUTER_TASKS)
+ )
+
+
+def test_nested_backend_context_manager(loop_in_thread):
+ def get_nested_pids():
+ pids = set(Parallel(n_jobs=2)(delayed(os.getpid)() for _ in range(2)))
+ pids |= set(Parallel(n_jobs=2)(delayed(os.getpid)() for _ in range(2)))
+ return pids
+
+ with cluster() as (s, [a, b]):
+ with Client(s['address'], loop=loop_in_thread) as client:
+ with parallel_config(backend='dask'):
+ pid_groups = Parallel(n_jobs=2)(
+ delayed(get_nested_pids)()
+ for _ in range(10)
+ )
+ for pid_group in pid_groups:
+ assert len(set(pid_group)) <= 2
+
+ # No deadlocks
+ with Client(s['address'], loop=loop_in_thread) as client: # noqa: F841
+ with parallel_config(backend='dask'):
+ pid_groups = Parallel(n_jobs=2)(
+ delayed(get_nested_pids)()
+ for _ in range(10)
+ )
+ for pid_group in pid_groups:
+ assert len(set(pid_group)) <= 2
+
+
+def test_nested_backend_context_manager_implicit_n_jobs(loop):
+ # Check that Parallel with no explicit n_jobs value automatically selects
+ # all the dask workers, including in nested calls.
+
+ def _backend_type(p):
+ return p._backend.__class__.__name__
+
+ def get_nested_implicit_n_jobs():
+ with Parallel() as p:
+ return _backend_type(p), p.n_jobs
+
+ with cluster() as (s, [a, b]):
+ with Client(s['address'], loop=loop) as client: # noqa: F841
+ with parallel_config(backend='dask'):
+ with Parallel() as p:
+ assert _backend_type(p) == "DaskDistributedBackend"
+ assert p.n_jobs == -1
+ all_nested_n_jobs = p(
+ delayed(get_nested_implicit_n_jobs)()
+ for _ in range(2)
+ )
+ for backend_type, nested_n_jobs in all_nested_n_jobs:
+ assert backend_type == "DaskDistributedBackend"
+ assert nested_n_jobs == -1
+
+
+def test_errors(loop):
+ with pytest.raises(ValueError) as info:
+ with parallel_config(backend='dask'):
+ pass
+
+ assert "create a dask client" in str(info.value).lower()
+
+
+def test_correct_nested_backend(loop):
+ with cluster() as (s, [a, b]):
+ with Client(s['address'], loop=loop) as client: # noqa: F841
+ # No requirement, should be us
+ with parallel_config(backend='dask'):
+ result = Parallel(n_jobs=2)(
+ delayed(outer)(nested_require=None) for _ in range(1))
+ assert isinstance(result[0][0][0], DaskDistributedBackend)
+
+ # Require threads, should be threading
+ with parallel_config(backend='dask'):
+ result = Parallel(n_jobs=2)(
+ delayed(outer)(nested_require='sharedmem')
+ for _ in range(1))
+ assert isinstance(result[0][0][0], ThreadingBackend)
+
+
+def outer(nested_require):
+ return Parallel(n_jobs=2, prefer='threads')(
+ delayed(middle)(nested_require) for _ in range(1)
+ )
+
+
+def middle(require):
+ return Parallel(n_jobs=2, require=require)(
+ delayed(inner)() for _ in range(1)
+ )
+
+
+def inner():
+ return Parallel()._backend
+
+
+def test_secede_with_no_processes(loop):
+ # https://github.com/dask/distributed/issues/1775
+ with Client(loop=loop, processes=False, set_as_default=True):
+ with parallel_config(backend='dask'):
+ Parallel(n_jobs=4)(delayed(id)(i) for i in range(2))
+
+
+def _worker_address(_):
+ from distributed import get_worker
+ return get_worker().address
+
+
+def test_dask_backend_keywords(loop):
+ with cluster() as (s, [a, b]):
+ with Client(s['address'], loop=loop) as client: # noqa: F841
+ with parallel_config(backend='dask', workers=a['address']):
+ seq = Parallel()(
+ delayed(_worker_address)(i) for i in range(10))
+ assert seq == [a['address']] * 10
+
+ with parallel_config(backend='dask', workers=b['address']):
+ seq = Parallel()(
+ delayed(_worker_address)(i) for i in range(10))
+ assert seq == [b['address']] * 10
+
+
+def test_scheduler_tasks_cleanup(loop):
+ with Client(processes=False, loop=loop) as client:
+ with parallel_config(backend='dask'):
+ Parallel()(delayed(inc)(i) for i in range(10))
+
+ start = time()
+ while client.cluster.scheduler.tasks:
+ sleep(0.01)
+ assert time() < start + 5
+
+ assert not client.futures
+
+
+@pytest.mark.parametrize("cluster_strategy", ["adaptive", "late_scaling"])
+@pytest.mark.skipif(
+ distributed.__version__ <= '2.1.1' and distributed.__version__ >= '1.28.0',
+ reason="distributed bug - https://github.com/dask/distributed/pull/2841")
+def test_wait_for_workers(cluster_strategy):
+ cluster = LocalCluster(n_workers=0, processes=False, threads_per_worker=2)
+ client = Client(cluster)
+ if cluster_strategy == "adaptive":
+ cluster.adapt(minimum=0, maximum=2)
+ elif cluster_strategy == "late_scaling":
+ # Tell the cluster to start workers but this is a non-blocking call
+ # and new workers might take time to connect. In this case the Parallel
+ # call should wait for at least one worker to come up before starting
+ # to schedule work.
+ cluster.scale(2)
+ try:
+ with parallel_config(backend='dask'):
+ # The following should wait a bit for at least one worker to
+ # become available.
+ Parallel()(delayed(inc)(i) for i in range(10))
+ finally:
+ client.close()
+ cluster.close()
+
+
+def test_wait_for_workers_timeout():
+ # Start a cluster with 0 worker:
+ cluster = LocalCluster(n_workers=0, processes=False, threads_per_worker=2)
+ client = Client(cluster)
+ try:
+ with parallel_config(backend='dask', wait_for_workers_timeout=0.1):
+ # Short timeout: DaskDistributedBackend
+ msg = "DaskDistributedBackend has no worker after 0.1 seconds."
+ with pytest.raises(TimeoutError, match=msg):
+ Parallel()(delayed(inc)(i) for i in range(10))
+
+ with parallel_config(backend='dask', wait_for_workers_timeout=0):
+ # No timeout: fallback to generic joblib failure:
+ msg = "DaskDistributedBackend has no active worker"
+ with pytest.raises(RuntimeError, match=msg):
+ Parallel()(delayed(inc)(i) for i in range(10))
+ finally:
+ client.close()
+ cluster.close()
+
+
+@pytest.mark.parametrize("backend", ["loky", "multiprocessing"])
+def test_joblib_warning_inside_dask_daemonic_worker(backend):
+ cluster = LocalCluster(n_workers=2)
+ client = Client(cluster)
+ try:
+
+ def func_using_joblib_parallel():
+ # Somehow trying to check the warning type here (e.g. with
+ # pytest.warns(UserWarning)) make the test hang. Work-around:
+ # return the warning record to the client and the warning check is
+ # done client-side.
+ with warnings.catch_warnings(record=True) as record:
+ Parallel(n_jobs=2, backend=backend)(
+ delayed(inc)(i) for i in range(10))
+
+ return record
+
+ fut = client.submit(func_using_joblib_parallel)
+ record = fut.result()
+
+ assert len(record) == 1
+ warning = record[0].message
+ assert isinstance(warning, UserWarning)
+ assert "distributed.worker.daemon" in str(warning)
+ finally:
+ client.close(timeout=30)
+ cluster.close(timeout=30)
diff --git a/venv/lib/python3.10/site-packages/joblib/test/test_disk.py b/venv/lib/python3.10/site-packages/joblib/test/test_disk.py
new file mode 100644
index 0000000000000000000000000000000000000000..b825a8b3a5c18a3114f34ed9d7c90cce62799085
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/joblib/test/test_disk.py
@@ -0,0 +1,71 @@
+"""
+Unit tests for the disk utilities.
+"""
+
+# Authors: Gael Varoquaux
+# Lars Buitinck
+# Copyright (c) 2010 Gael Varoquaux
+# License: BSD Style, 3 clauses.
+
+from __future__ import with_statement
+import array
+import os
+
+from joblib.disk import disk_used, memstr_to_bytes, mkdirp, rm_subdirs
+from joblib.testing import parametrize, raises
+
+###############################################################################
+
+
+def test_disk_used(tmpdir):
+ cachedir = tmpdir.strpath
+ # Not write a file that is 1M big in this directory, and check the
+ # size. The reason we use such a big file is that it makes us robust
+ # to errors due to block allocation.
+ a = array.array('i')
+ sizeof_i = a.itemsize
+ target_size = 1024
+ n = int(target_size * 1024 / sizeof_i)
+ a = array.array('i', n * (1,))
+ with open(os.path.join(cachedir, 'test'), 'wb') as output:
+ a.tofile(output)
+ assert disk_used(cachedir) >= target_size
+ assert disk_used(cachedir) < target_size + 12
+
+
+@parametrize('text,value',
+ [('80G', 80 * 1024 ** 3),
+ ('1.4M', int(1.4 * 1024 ** 2)),
+ ('120M', 120 * 1024 ** 2),
+ ('53K', 53 * 1024)])
+def test_memstr_to_bytes(text, value):
+ assert memstr_to_bytes(text) == value
+
+
+@parametrize('text,exception,regex',
+ [('fooG', ValueError, r'Invalid literal for size.*fooG.*'),
+ ('1.4N', ValueError, r'Invalid literal for size.*1.4N.*')])
+def test_memstr_to_bytes_exception(text, exception, regex):
+ with raises(exception) as excinfo:
+ memstr_to_bytes(text)
+ assert excinfo.match(regex)
+
+
+def test_mkdirp(tmpdir):
+ mkdirp(os.path.join(tmpdir.strpath, 'ham'))
+ mkdirp(os.path.join(tmpdir.strpath, 'ham'))
+ mkdirp(os.path.join(tmpdir.strpath, 'spam', 'spam'))
+
+ # Not all OSErrors are ignored
+ with raises(OSError):
+ mkdirp('')
+
+
+def test_rm_subdirs(tmpdir):
+ sub_path = os.path.join(tmpdir.strpath, "am", "stram")
+ full_path = os.path.join(sub_path, "gram")
+ mkdirp(os.path.join(full_path))
+
+ rm_subdirs(sub_path)
+ assert os.path.exists(sub_path)
+ assert not os.path.exists(full_path)
diff --git a/venv/lib/python3.10/site-packages/joblib/test/test_func_inspect.py b/venv/lib/python3.10/site-packages/joblib/test/test_func_inspect.py
new file mode 100644
index 0000000000000000000000000000000000000000..dba237d48578e5d6386e67e80f3e6d31761108d6
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/joblib/test/test_func_inspect.py
@@ -0,0 +1,310 @@
+"""
+Test the func_inspect module.
+"""
+
+# Author: Gael Varoquaux
+# Copyright (c) 2009 Gael Varoquaux
+# License: BSD Style, 3 clauses.
+
+import functools
+
+from joblib.func_inspect import filter_args, get_func_name, get_func_code
+from joblib.func_inspect import _clean_win_chars, format_signature
+from joblib.memory import Memory
+from joblib.test.common import with_numpy
+from joblib.testing import fixture, parametrize, raises
+
+
+###############################################################################
+# Module-level functions and fixture, for tests
+def f(x, y=0):
+ pass
+
+
+def g(x):
+ pass
+
+
+def h(x, y=0, *args, **kwargs):
+ pass
+
+
+def i(x=1):
+ pass
+
+
+def j(x, y, **kwargs):
+ pass
+
+
+def k(*args, **kwargs):
+ pass
+
+
+def m1(x, *, y):
+ pass
+
+
+def m2(x, *, y, z=3):
+ pass
+
+
+@fixture(scope='module')
+def cached_func(tmpdir_factory):
+ # Create a Memory object to test decorated functions.
+ # We should be careful not to call the decorated functions, so that
+ # cache directories are not created in the temp dir.
+ cachedir = tmpdir_factory.mktemp("joblib_test_func_inspect")
+ mem = Memory(cachedir.strpath)
+
+ @mem.cache
+ def cached_func_inner(x):
+ return x
+
+ return cached_func_inner
+
+
+class Klass(object):
+
+ def f(self, x):
+ return x
+
+
+###############################################################################
+# Tests
+
+@parametrize('func,args,filtered_args',
+ [(f, [[], (1, )], {'x': 1, 'y': 0}),
+ (f, [['x'], (1, )], {'y': 0}),
+ (f, [['y'], (0, )], {'x': 0}),
+ (f, [['y'], (0, ), {'y': 1}], {'x': 0}),
+ (f, [['x', 'y'], (0, )], {}),
+ (f, [[], (0,), {'y': 1}], {'x': 0, 'y': 1}),
+ (f, [['y'], (), {'x': 2, 'y': 1}], {'x': 2}),
+ (g, [[], (), {'x': 1}], {'x': 1}),
+ (i, [[], (2, )], {'x': 2})])
+def test_filter_args(func, args, filtered_args):
+ assert filter_args(func, *args) == filtered_args
+
+
+def test_filter_args_method():
+ obj = Klass()
+ assert filter_args(obj.f, [], (1, )) == {'x': 1, 'self': obj}
+
+
+@parametrize('func,args,filtered_args',
+ [(h, [[], (1, )],
+ {'x': 1, 'y': 0, '*': [], '**': {}}),
+ (h, [[], (1, 2, 3, 4)],
+ {'x': 1, 'y': 2, '*': [3, 4], '**': {}}),
+ (h, [[], (1, 25), {'ee': 2}],
+ {'x': 1, 'y': 25, '*': [], '**': {'ee': 2}}),
+ (h, [['*'], (1, 2, 25), {'ee': 2}],
+ {'x': 1, 'y': 2, '**': {'ee': 2}})])
+def test_filter_varargs(func, args, filtered_args):
+ assert filter_args(func, *args) == filtered_args
+
+
+test_filter_kwargs_extra_params = [
+ (m1, [[], (1,), {'y': 2}], {'x': 1, 'y': 2}),
+ (m2, [[], (1,), {'y': 2}], {'x': 1, 'y': 2, 'z': 3})
+]
+
+
+@parametrize('func,args,filtered_args',
+ [(k, [[], (1, 2), {'ee': 2}],
+ {'*': [1, 2], '**': {'ee': 2}}),
+ (k, [[], (3, 4)],
+ {'*': [3, 4], '**': {}})] +
+ test_filter_kwargs_extra_params)
+def test_filter_kwargs(func, args, filtered_args):
+ assert filter_args(func, *args) == filtered_args
+
+
+def test_filter_args_2():
+ assert (filter_args(j, [], (1, 2), {'ee': 2}) ==
+ {'x': 1, 'y': 2, '**': {'ee': 2}})
+
+ ff = functools.partial(f, 1)
+ # filter_args has to special-case partial
+ assert filter_args(ff, [], (1, )) == {'*': [1], '**': {}}
+ assert filter_args(ff, ['y'], (1, )) == {'*': [1], '**': {}}
+
+
+@parametrize('func,funcname', [(f, 'f'), (g, 'g'),
+ (cached_func, 'cached_func')])
+def test_func_name(func, funcname):
+ # Check that we are not confused by decoration
+ # here testcase 'cached_func' is the function itself
+ assert get_func_name(func)[1] == funcname
+
+
+def test_func_name_on_inner_func(cached_func):
+ # Check that we are not confused by decoration
+ # here testcase 'cached_func' is the 'cached_func_inner' function
+ # returned by 'cached_func' fixture
+ assert get_func_name(cached_func)[1] == 'cached_func_inner'
+
+
+def test_func_name_collision_on_inner_func():
+ # Check that two functions defining and caching an inner function
+ # with the same do not cause (module, name) collision
+ def f():
+ def inner_func():
+ return # pragma: no cover
+ return get_func_name(inner_func)
+
+ def g():
+ def inner_func():
+ return # pragma: no cover
+ return get_func_name(inner_func)
+
+ module, name = f()
+ other_module, other_name = g()
+
+ assert name == other_name
+ assert module != other_module
+
+
+def test_func_inspect_errors():
+ # Check that func_inspect is robust and will work on weird objects
+ assert get_func_name('a'.lower)[-1] == 'lower'
+ assert get_func_code('a'.lower)[1:] == (None, -1)
+ ff = lambda x: x # noqa: E731
+ assert get_func_name(ff, win_characters=False)[-1] == ''
+ assert get_func_code(ff)[1] == __file__.replace('.pyc', '.py')
+ # Simulate a function defined in __main__
+ ff.__module__ = '__main__'
+ assert get_func_name(ff, win_characters=False)[-1] == ''
+ assert get_func_code(ff)[1] == __file__.replace('.pyc', '.py')
+
+
+def func_with_kwonly_args(a, b, *, kw1='kw1', kw2='kw2'):
+ pass
+
+
+def func_with_signature(a: int, b: int) -> None:
+ pass
+
+
+def test_filter_args_edge_cases():
+ assert (
+ filter_args(func_with_kwonly_args, [], (1, 2),
+ {'kw1': 3, 'kw2': 4}) ==
+ {'a': 1, 'b': 2, 'kw1': 3, 'kw2': 4})
+
+ # filter_args doesn't care about keyword-only arguments so you
+ # can pass 'kw1' into *args without any problem
+ with raises(ValueError) as excinfo:
+ filter_args(func_with_kwonly_args, [], (1, 2, 3), {'kw2': 2})
+ excinfo.match("Keyword-only parameter 'kw1' was passed as positional "
+ "parameter")
+
+ assert (
+ filter_args(func_with_kwonly_args, ['b', 'kw2'], (1, 2),
+ {'kw1': 3, 'kw2': 4}) ==
+ {'a': 1, 'kw1': 3})
+
+ assert (filter_args(func_with_signature, ['b'], (1, 2)) == {'a': 1})
+
+
+def test_bound_methods():
+ """ Make sure that calling the same method on two different instances
+ of the same class does resolv to different signatures.
+ """
+ a = Klass()
+ b = Klass()
+ assert filter_args(a.f, [], (1, )) != filter_args(b.f, [], (1, ))
+
+
+@parametrize('exception,regex,func,args',
+ [(ValueError, 'ignore_lst must be a list of parameters to ignore',
+ f, ['bar', (None, )]),
+ (ValueError, r'Ignore list: argument \'(.*)\' is not defined',
+ g, [['bar'], (None, )]),
+ (ValueError, 'Wrong number of arguments',
+ h, [[]])])
+def test_filter_args_error_msg(exception, regex, func, args):
+ """ Make sure that filter_args returns decent error messages, for the
+ sake of the user.
+ """
+ with raises(exception) as excinfo:
+ filter_args(func, *args)
+ excinfo.match(regex)
+
+
+def test_filter_args_no_kwargs_mutation():
+ """None-regression test against 0.12.0 changes.
+
+ https://github.com/joblib/joblib/pull/75
+
+ Make sure filter args doesn't mutate the kwargs dict that gets passed in.
+ """
+ kwargs = {'x': 0}
+ filter_args(g, [], [], kwargs)
+ assert kwargs == {'x': 0}
+
+
+def test_clean_win_chars():
+ string = r'C:\foo\bar\main.py'
+ mangled_string = _clean_win_chars(string)
+ for char in ('\\', ':', '<', '>', '!'):
+ assert char not in mangled_string
+
+
+@parametrize('func,args,kwargs,sgn_expected',
+ [(g, [list(range(5))], {}, 'g([0, 1, 2, 3, 4])'),
+ (k, [1, 2, (3, 4)], {'y': True}, 'k(1, 2, (3, 4), y=True)')])
+def test_format_signature(func, args, kwargs, sgn_expected):
+ # Test signature formatting.
+ path, sgn_result = format_signature(func, *args, **kwargs)
+ assert sgn_result == sgn_expected
+
+
+def test_format_signature_long_arguments():
+ shortening_threshold = 1500
+ # shortening gets it down to 700 characters but there is the name
+ # of the function in the signature and a few additional things
+ # like dots for the ellipsis
+ shortening_target = 700 + 10
+
+ arg = 'a' * shortening_threshold
+ _, signature = format_signature(h, arg)
+ assert len(signature) < shortening_target
+
+ nb_args = 5
+ args = [arg for _ in range(nb_args)]
+ _, signature = format_signature(h, *args)
+ assert len(signature) < shortening_target * nb_args
+
+ kwargs = {str(i): arg for i, arg in enumerate(args)}
+ _, signature = format_signature(h, **kwargs)
+ assert len(signature) < shortening_target * nb_args
+
+ _, signature = format_signature(h, *args, **kwargs)
+ assert len(signature) < shortening_target * 2 * nb_args
+
+
+@with_numpy
+def test_format_signature_numpy():
+ """ Test the format signature formatting with numpy.
+ """
+
+
+def test_special_source_encoding():
+ from joblib.test.test_func_inspect_special_encoding import big5_f
+ func_code, source_file, first_line = get_func_code(big5_f)
+ assert first_line == 5
+ assert "def big5_f():" in func_code
+ assert "test_func_inspect_special_encoding" in source_file
+
+
+def _get_code():
+ from joblib.test.test_func_inspect_special_encoding import big5_f
+ return get_func_code(big5_f)[0]
+
+
+def test_func_code_consistency():
+ from joblib.parallel import Parallel, delayed
+ codes = Parallel(n_jobs=2)(delayed(_get_code)() for _ in range(5))
+ assert len(set(codes)) == 1
diff --git a/venv/lib/python3.10/site-packages/joblib/test/test_func_inspect_special_encoding.py b/venv/lib/python3.10/site-packages/joblib/test/test_func_inspect_special_encoding.py
new file mode 100644
index 0000000000000000000000000000000000000000..6c41a59a6900ced36050bf357359c1164a11fdbe
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/joblib/test/test_func_inspect_special_encoding.py
@@ -0,0 +1,9 @@
+# -*- coding: big5 -*-
+
+
+# Some Traditional Chinese characters: ¤@¨Ç¤¤¤å¦r²Å
+def big5_f():
+ """¥Î©ó´ú¸Õªº¨ç¼Æ
+ """
+ # µùÄÀ
+ return 0
diff --git a/venv/lib/python3.10/site-packages/joblib/test/test_hashing.py b/venv/lib/python3.10/site-packages/joblib/test/test_hashing.py
new file mode 100644
index 0000000000000000000000000000000000000000..85593d297f6e2387c58cad8d5bceba4d21b1c0aa
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/joblib/test/test_hashing.py
@@ -0,0 +1,495 @@
+"""
+Test the hashing module.
+"""
+
+# Author: Gael Varoquaux
+# Copyright (c) 2009 Gael Varoquaux
+# License: BSD Style, 3 clauses.
+
+import time
+import hashlib
+import sys
+import gc
+import io
+import collections
+import itertools
+import pickle
+import random
+from concurrent.futures import ProcessPoolExecutor
+from decimal import Decimal
+
+from joblib.hashing import hash
+from joblib.func_inspect import filter_args
+from joblib.memory import Memory
+from joblib.testing import raises, skipif, fixture, parametrize
+from joblib.test.common import np, with_numpy
+
+
+def unicode(s):
+ return s
+
+
+###############################################################################
+# Helper functions for the tests
+def time_func(func, *args):
+ """ Time function func on *args.
+ """
+ times = list()
+ for _ in range(3):
+ t1 = time.time()
+ func(*args)
+ times.append(time.time() - t1)
+ return min(times)
+
+
+def relative_time(func1, func2, *args):
+ """ Return the relative time between func1 and func2 applied on
+ *args.
+ """
+ time_func1 = time_func(func1, *args)
+ time_func2 = time_func(func2, *args)
+ relative_diff = 0.5 * (abs(time_func1 - time_func2)
+ / (time_func1 + time_func2))
+ return relative_diff
+
+
+class Klass(object):
+
+ def f(self, x):
+ return x
+
+
+class KlassWithCachedMethod(object):
+
+ def __init__(self, cachedir):
+ mem = Memory(location=cachedir)
+ self.f = mem.cache(self.f)
+
+ def f(self, x):
+ return x
+
+
+###############################################################################
+# Tests
+
+input_list = [1, 2, 1., 2., 1 + 1j, 2. + 1j,
+ 'a', 'b',
+ (1,), (1, 1,), [1, ], [1, 1, ],
+ {1: 1}, {1: 2}, {2: 1},
+ None,
+ gc.collect,
+ [1, ].append,
+ # Next 2 sets have unorderable elements in python 3.
+ set(('a', 1)),
+ set(('a', 1, ('a', 1))),
+ # Next 2 dicts have unorderable type of keys in python 3.
+ {'a': 1, 1: 2},
+ {'a': 1, 1: 2, 'd': {'a': 1}}]
+
+
+@parametrize('obj1', input_list)
+@parametrize('obj2', input_list)
+def test_trivial_hash(obj1, obj2):
+ """Smoke test hash on various types."""
+ # Check that 2 objects have the same hash only if they are the same.
+ are_hashes_equal = hash(obj1) == hash(obj2)
+ are_objs_identical = obj1 is obj2
+ assert are_hashes_equal == are_objs_identical
+
+
+def test_hash_methods():
+ # Check that hashing instance methods works
+ a = io.StringIO(unicode('a'))
+ assert hash(a.flush) == hash(a.flush)
+ a1 = collections.deque(range(10))
+ a2 = collections.deque(range(9))
+ assert hash(a1.extend) != hash(a2.extend)
+
+
+@fixture(scope='function')
+@with_numpy
+def three_np_arrays():
+ rnd = np.random.RandomState(0)
+ arr1 = rnd.random_sample((10, 10))
+ arr2 = arr1.copy()
+ arr3 = arr2.copy()
+ arr3[0] += 1
+ return arr1, arr2, arr3
+
+
+def test_hash_numpy_arrays(three_np_arrays):
+ arr1, arr2, arr3 = three_np_arrays
+
+ for obj1, obj2 in itertools.product(three_np_arrays, repeat=2):
+ are_hashes_equal = hash(obj1) == hash(obj2)
+ are_arrays_equal = np.all(obj1 == obj2)
+ assert are_hashes_equal == are_arrays_equal
+
+ assert hash(arr1) != hash(arr1.T)
+
+
+def test_hash_numpy_dict_of_arrays(three_np_arrays):
+ arr1, arr2, arr3 = three_np_arrays
+
+ d1 = {1: arr1, 2: arr2}
+ d2 = {1: arr2, 2: arr1}
+ d3 = {1: arr2, 2: arr3}
+
+ assert hash(d1) == hash(d2)
+ assert hash(d1) != hash(d3)
+
+
+@with_numpy
+@parametrize('dtype', ['datetime64[s]', 'timedelta64[D]'])
+def test_numpy_datetime_array(dtype):
+ # memoryview is not supported for some dtypes e.g. datetime64
+ # see https://github.com/joblib/joblib/issues/188 for more details
+ a_hash = hash(np.arange(10))
+ array = np.arange(0, 10, dtype=dtype)
+ assert hash(array) != a_hash
+
+
+@with_numpy
+def test_hash_numpy_noncontiguous():
+ a = np.asarray(np.arange(6000).reshape((1000, 2, 3)),
+ order='F')[:, :1, :]
+ b = np.ascontiguousarray(a)
+ assert hash(a) != hash(b)
+
+ c = np.asfortranarray(a)
+ assert hash(a) != hash(c)
+
+
+@with_numpy
+@parametrize('coerce_mmap', [True, False])
+def test_hash_memmap(tmpdir, coerce_mmap):
+ """Check that memmap and arrays hash identically if coerce_mmap is True."""
+ filename = tmpdir.join('memmap_temp').strpath
+ try:
+ m = np.memmap(filename, shape=(10, 10), mode='w+')
+ a = np.asarray(m)
+ are_hashes_equal = (hash(a, coerce_mmap=coerce_mmap) ==
+ hash(m, coerce_mmap=coerce_mmap))
+ assert are_hashes_equal == coerce_mmap
+ finally:
+ if 'm' in locals():
+ del m
+ # Force a garbage-collection cycle, to be certain that the
+ # object is delete, and we don't run in a problem under
+ # Windows with a file handle still open.
+ gc.collect()
+
+
+@with_numpy
+@skipif(sys.platform == 'win32', reason='This test is not stable under windows'
+ ' for some reason')
+def test_hash_numpy_performance():
+ """ Check the performance of hashing numpy arrays:
+
+ In [22]: a = np.random.random(1000000)
+
+ In [23]: %timeit hashlib.md5(a).hexdigest()
+ 100 loops, best of 3: 20.7 ms per loop
+
+ In [24]: %timeit hashlib.md5(pickle.dumps(a, protocol=2)).hexdigest()
+ 1 loops, best of 3: 73.1 ms per loop
+
+ In [25]: %timeit hashlib.md5(cPickle.dumps(a, protocol=2)).hexdigest()
+ 10 loops, best of 3: 53.9 ms per loop
+
+ In [26]: %timeit hash(a)
+ 100 loops, best of 3: 20.8 ms per loop
+ """
+ rnd = np.random.RandomState(0)
+ a = rnd.random_sample(1000000)
+
+ def md5_hash(x):
+ return hashlib.md5(memoryview(x)).hexdigest()
+
+ relative_diff = relative_time(md5_hash, hash, a)
+ assert relative_diff < 0.3
+
+ # Check that hashing an tuple of 3 arrays takes approximately
+ # 3 times as much as hashing one array
+ time_hashlib = 3 * time_func(md5_hash, a)
+ time_hash = time_func(hash, (a, a, a))
+ relative_diff = 0.5 * (abs(time_hash - time_hashlib)
+ / (time_hash + time_hashlib))
+ assert relative_diff < 0.3
+
+
+def test_bound_methods_hash():
+ """ Make sure that calling the same method on two different instances
+ of the same class does resolve to the same hashes.
+ """
+ a = Klass()
+ b = Klass()
+ assert (hash(filter_args(a.f, [], (1, ))) ==
+ hash(filter_args(b.f, [], (1, ))))
+
+
+def test_bound_cached_methods_hash(tmpdir):
+ """ Make sure that calling the same _cached_ method on two different
+ instances of the same class does resolve to the same hashes.
+ """
+ a = KlassWithCachedMethod(tmpdir.strpath)
+ b = KlassWithCachedMethod(tmpdir.strpath)
+ assert (hash(filter_args(a.f.func, [], (1, ))) ==
+ hash(filter_args(b.f.func, [], (1, ))))
+
+
+@with_numpy
+def test_hash_object_dtype():
+ """ Make sure that ndarrays with dtype `object' hash correctly."""
+
+ a = np.array([np.arange(i) for i in range(6)], dtype=object)
+ b = np.array([np.arange(i) for i in range(6)], dtype=object)
+
+ assert hash(a) == hash(b)
+
+
+@with_numpy
+def test_numpy_scalar():
+ # Numpy scalars are built from compiled functions, and lead to
+ # strange pickling paths explored, that can give hash collisions
+ a = np.float64(2.0)
+ b = np.float64(3.0)
+ assert hash(a) != hash(b)
+
+
+def test_dict_hash(tmpdir):
+ # Check that dictionaries hash consistently, even though the ordering
+ # of the keys is not guaranteed
+ k = KlassWithCachedMethod(tmpdir.strpath)
+
+ d = {'#s12069__c_maps.nii.gz': [33],
+ '#s12158__c_maps.nii.gz': [33],
+ '#s12258__c_maps.nii.gz': [33],
+ '#s12277__c_maps.nii.gz': [33],
+ '#s12300__c_maps.nii.gz': [33],
+ '#s12401__c_maps.nii.gz': [33],
+ '#s12430__c_maps.nii.gz': [33],
+ '#s13817__c_maps.nii.gz': [33],
+ '#s13903__c_maps.nii.gz': [33],
+ '#s13916__c_maps.nii.gz': [33],
+ '#s13981__c_maps.nii.gz': [33],
+ '#s13982__c_maps.nii.gz': [33],
+ '#s13983__c_maps.nii.gz': [33]}
+
+ a = k.f(d)
+ b = k.f(a)
+
+ assert hash(a) == hash(b)
+
+
+def test_set_hash(tmpdir):
+ # Check that sets hash consistently, even though their ordering
+ # is not guaranteed
+ k = KlassWithCachedMethod(tmpdir.strpath)
+
+ s = set(['#s12069__c_maps.nii.gz',
+ '#s12158__c_maps.nii.gz',
+ '#s12258__c_maps.nii.gz',
+ '#s12277__c_maps.nii.gz',
+ '#s12300__c_maps.nii.gz',
+ '#s12401__c_maps.nii.gz',
+ '#s12430__c_maps.nii.gz',
+ '#s13817__c_maps.nii.gz',
+ '#s13903__c_maps.nii.gz',
+ '#s13916__c_maps.nii.gz',
+ '#s13981__c_maps.nii.gz',
+ '#s13982__c_maps.nii.gz',
+ '#s13983__c_maps.nii.gz'])
+
+ a = k.f(s)
+ b = k.f(a)
+
+ assert hash(a) == hash(b)
+
+
+def test_set_decimal_hash():
+ # Check that sets containing decimals hash consistently, even though
+ # ordering is not guaranteed
+ assert (hash(set([Decimal(0), Decimal('NaN')])) ==
+ hash(set([Decimal('NaN'), Decimal(0)])))
+
+
+def test_string():
+ # Test that we obtain the same hash for object owning several strings,
+ # whatever the past of these strings (which are immutable in Python)
+ string = 'foo'
+ a = {string: 'bar'}
+ b = {string: 'bar'}
+ c = pickle.loads(pickle.dumps(b))
+ assert hash([a, b]) == hash([a, c])
+
+
+@with_numpy
+def test_numpy_dtype_pickling():
+ # numpy dtype hashing is tricky to get right: see #231, #239, #251 #1080,
+ # #1082, and explanatory comments inside
+ # ``joblib.hashing.NumpyHasher.save``.
+
+ # In this test, we make sure that the pickling of numpy dtypes is robust to
+ # object identity and object copy.
+
+ dt1 = np.dtype('f4')
+ dt2 = np.dtype('f4')
+
+ # simple dtypes objects are interned
+ assert dt1 is dt2
+ assert hash(dt1) == hash(dt2)
+
+ dt1_roundtripped = pickle.loads(pickle.dumps(dt1))
+ assert dt1 is not dt1_roundtripped
+ assert hash(dt1) == hash(dt1_roundtripped)
+
+ assert hash([dt1, dt1]) == hash([dt1_roundtripped, dt1_roundtripped])
+ assert hash([dt1, dt1]) == hash([dt1, dt1_roundtripped])
+
+ complex_dt1 = np.dtype(
+ [('name', np.str_, 16), ('grades', np.float64, (2,))]
+ )
+ complex_dt2 = np.dtype(
+ [('name', np.str_, 16), ('grades', np.float64, (2,))]
+ )
+
+ # complex dtypes objects are not interned
+ assert hash(complex_dt1) == hash(complex_dt2)
+
+ complex_dt1_roundtripped = pickle.loads(pickle.dumps(complex_dt1))
+ assert complex_dt1_roundtripped is not complex_dt1
+ assert hash(complex_dt1) == hash(complex_dt1_roundtripped)
+
+ assert hash([complex_dt1, complex_dt1]) == hash(
+ [complex_dt1_roundtripped, complex_dt1_roundtripped]
+ )
+ assert hash([complex_dt1, complex_dt1]) == hash(
+ [complex_dt1_roundtripped, complex_dt1]
+ )
+
+
+@parametrize('to_hash,expected',
+ [('This is a string to hash',
+ '71b3f47df22cb19431d85d92d0b230b2'),
+ (u"C'est l\xe9t\xe9",
+ '2d8d189e9b2b0b2e384d93c868c0e576'),
+ ((123456, 54321, -98765),
+ 'e205227dd82250871fa25aa0ec690aa3'),
+ ([random.Random(42).random() for _ in range(5)],
+ 'a11ffad81f9682a7d901e6edc3d16c84'),
+ ({'abcde': 123, 'sadfas': [-9999, 2, 3]},
+ 'aeda150553d4bb5c69f0e69d51b0e2ef')])
+def test_hashes_stay_the_same(to_hash, expected):
+ # We want to make sure that hashes don't change with joblib
+ # version. For end users, that would mean that they have to
+ # regenerate their cache from scratch, which potentially means
+ # lengthy recomputations.
+ # Expected results have been generated with joblib 0.9.2
+ assert hash(to_hash) == expected
+
+
+@with_numpy
+def test_hashes_are_different_between_c_and_fortran_contiguous_arrays():
+ # We want to be sure that the c-contiguous and f-contiguous versions of the
+ # same array produce 2 different hashes.
+ rng = np.random.RandomState(0)
+ arr_c = rng.random_sample((10, 10))
+ arr_f = np.asfortranarray(arr_c)
+ assert hash(arr_c) != hash(arr_f)
+
+
+@with_numpy
+def test_0d_array():
+ hash(np.array(0))
+
+
+@with_numpy
+def test_0d_and_1d_array_hashing_is_different():
+ assert hash(np.array(0)) != hash(np.array([0]))
+
+
+@with_numpy
+def test_hashes_stay_the_same_with_numpy_objects():
+ # Note: joblib used to test numpy objects hashing by comparing the produced
+ # hash of an object with some hard-coded target value to guarantee that
+ # hashing remains the same across joblib versions. However, since numpy
+ # 1.20 and joblib 1.0, joblib relies on potentially unstable implementation
+ # details of numpy to hash np.dtype objects, which makes the stability of
+ # hash values across different environments hard to guarantee and to test.
+ # As a result, hashing stability across joblib versions becomes best-effort
+ # only, and we only test the consistency within a single environment by
+ # making sure:
+ # - the hash of two copies of the same objects is the same
+ # - hashing some object in two different python processes produces the same
+ # value. This should be viewed as a proxy for testing hash consistency
+ # through time between Python sessions (provided no change in the
+ # environment was done between sessions).
+
+ def create_objects_to_hash():
+ rng = np.random.RandomState(42)
+ # Being explicit about dtypes in order to avoid
+ # architecture-related differences. Also using 'f4' rather than
+ # 'f8' for float arrays because 'f8' arrays generated by
+ # rng.random.randn don't seem to be bit-identical on 32bit and
+ # 64bit machines.
+ to_hash_list = [
+ rng.randint(-1000, high=1000, size=50).astype('
+# Copyright (c) 2009 Gael Varoquaux
+# License: BSD Style, 3 clauses.
+import re
+
+from joblib.logger import PrintTime
+
+
+def test_print_time(tmpdir, capsys):
+ # A simple smoke test for PrintTime.
+ logfile = tmpdir.join('test.log').strpath
+ print_time = PrintTime(logfile=logfile)
+ print_time('Foo')
+ # Create a second time, to smoke test log rotation.
+ print_time = PrintTime(logfile=logfile)
+ print_time('Foo')
+ # And a third time
+ print_time = PrintTime(logfile=logfile)
+ print_time('Foo')
+
+ out_printed_text, err_printed_text = capsys.readouterr()
+ # Use regexps to be robust to time variations
+ match = r"Foo: 0\..s, 0\..min\nFoo: 0\..s, 0..min\nFoo: " + \
+ r".\..s, 0..min\n"
+ if not re.match(match, err_printed_text):
+ raise AssertionError('Excepted %s, got %s' %
+ (match, err_printed_text))
diff --git a/venv/lib/python3.10/site-packages/joblib/test/test_memmapping.py b/venv/lib/python3.10/site-packages/joblib/test/test_memmapping.py
new file mode 100644
index 0000000000000000000000000000000000000000..42a297a9e445d38c0daa03bc7d78e4c4f1fd4571
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/joblib/test/test_memmapping.py
@@ -0,0 +1,1191 @@
+import os
+import mmap
+import sys
+import platform
+import gc
+import pickle
+import itertools
+from time import sleep
+import subprocess
+import threading
+import faulthandler
+
+import pytest
+
+from joblib.test.common import with_numpy, np
+from joblib.test.common import with_multiprocessing
+from joblib.test.common import with_dev_shm
+from joblib.testing import raises, parametrize, skipif
+from joblib.backports import make_memmap
+from joblib.parallel import Parallel, delayed
+
+from joblib.pool import MemmappingPool
+from joblib.executor import _TestingMemmappingExecutor as TestExecutor
+from joblib._memmapping_reducer import has_shareable_memory
+from joblib._memmapping_reducer import ArrayMemmapForwardReducer
+from joblib._memmapping_reducer import _strided_from_memmap
+from joblib._memmapping_reducer import _get_temp_dir
+from joblib._memmapping_reducer import _WeakArrayKeyMap
+from joblib._memmapping_reducer import _get_backing_memmap
+import joblib._memmapping_reducer as jmr
+
+
+def setup_module():
+ faulthandler.dump_traceback_later(timeout=300, exit=True)
+
+
+def teardown_module():
+ faulthandler.cancel_dump_traceback_later()
+
+
+def check_memmap_and_send_back(array):
+ assert _get_backing_memmap(array) is not None
+ return array
+
+
+def check_array(args):
+ """Dummy helper function to be executed in subprocesses
+
+ Check that the provided array has the expected values in the provided
+ range.
+
+ """
+ data, position, expected = args
+ np.testing.assert_array_equal(data[position], expected)
+
+
+def inplace_double(args):
+ """Dummy helper function to be executed in subprocesses
+
+
+ Check that the input array has the right values in the provided range
+ and perform an inplace modification to double the values in the range by
+ two.
+
+ """
+ data, position, expected = args
+ assert data[position] == expected
+ data[position] *= 2
+ np.testing.assert_array_equal(data[position], 2 * expected)
+
+
+@with_numpy
+@with_multiprocessing
+def test_memmap_based_array_reducing(tmpdir):
+ """Check that it is possible to reduce a memmap backed array"""
+ assert_array_equal = np.testing.assert_array_equal
+ filename = tmpdir.join('test.mmap').strpath
+
+ # Create a file larger than what will be used by a
+ buffer = np.memmap(filename, dtype=np.float64, shape=500, mode='w+')
+
+ # Fill the original buffer with negative markers to detect over of
+ # underflow in case of test failures
+ buffer[:] = - 1.0 * np.arange(buffer.shape[0], dtype=buffer.dtype)
+ buffer.flush()
+
+ # Memmap a 2D fortran array on a offsetted subsection of the previous
+ # buffer
+ a = np.memmap(filename, dtype=np.float64, shape=(3, 5, 4),
+ mode='r+', order='F', offset=4)
+ a[:] = np.arange(60).reshape(a.shape)
+
+ # Build various views that share the buffer with the original memmap
+
+ # b is an memmap sliced view on an memmap instance
+ b = a[1:-1, 2:-1, 2:4]
+
+ # c and d are array views
+ c = np.asarray(b)
+ d = c.T
+
+ # Array reducer with auto dumping disabled
+ reducer = ArrayMemmapForwardReducer(None, tmpdir.strpath, 'c', True)
+
+ def reconstruct_array_or_memmap(x):
+ cons, args = reducer(x)
+ return cons(*args)
+
+ # Reconstruct original memmap
+ a_reconstructed = reconstruct_array_or_memmap(a)
+ assert has_shareable_memory(a_reconstructed)
+ assert isinstance(a_reconstructed, np.memmap)
+ assert_array_equal(a_reconstructed, a)
+
+ # Reconstruct strided memmap view
+ b_reconstructed = reconstruct_array_or_memmap(b)
+ assert has_shareable_memory(b_reconstructed)
+ assert_array_equal(b_reconstructed, b)
+
+ # Reconstruct arrays views on memmap base
+ c_reconstructed = reconstruct_array_or_memmap(c)
+ assert not isinstance(c_reconstructed, np.memmap)
+ assert has_shareable_memory(c_reconstructed)
+ assert_array_equal(c_reconstructed, c)
+
+ d_reconstructed = reconstruct_array_or_memmap(d)
+ assert not isinstance(d_reconstructed, np.memmap)
+ assert has_shareable_memory(d_reconstructed)
+ assert_array_equal(d_reconstructed, d)
+
+ # Test graceful degradation on fake memmap instances with in-memory
+ # buffers
+ a3 = a * 3
+ assert not has_shareable_memory(a3)
+ a3_reconstructed = reconstruct_array_or_memmap(a3)
+ assert not has_shareable_memory(a3_reconstructed)
+ assert not isinstance(a3_reconstructed, np.memmap)
+ assert_array_equal(a3_reconstructed, a * 3)
+
+ # Test graceful degradation on arrays derived from fake memmap instances
+ b3 = np.asarray(a3)
+ assert not has_shareable_memory(b3)
+
+ b3_reconstructed = reconstruct_array_or_memmap(b3)
+ assert isinstance(b3_reconstructed, np.ndarray)
+ assert not has_shareable_memory(b3_reconstructed)
+ assert_array_equal(b3_reconstructed, b3)
+
+
+@with_multiprocessing
+@skipif((sys.platform != "win32") or (),
+ reason="PermissionError only easily triggerable on Windows")
+def test_resource_tracker_retries_when_permissionerror(tmpdir):
+ # Test resource_tracker retry mechanism when unlinking memmaps. See more
+ # thorough information in the ``unlink_file`` documentation of joblib.
+ filename = tmpdir.join('test.mmap').strpath
+ cmd = """if 1:
+ import os
+ import numpy as np
+ import time
+ from joblib.externals.loky.backend import resource_tracker
+ resource_tracker.VERBOSE = 1
+
+ # Start the resource tracker
+ resource_tracker.ensure_running()
+ time.sleep(1)
+
+ # Create a file containing numpy data
+ memmap = np.memmap(r"{filename}", dtype=np.float64, shape=10, mode='w+')
+ memmap[:] = np.arange(10).astype(np.int8).data
+ memmap.flush()
+ assert os.path.exists(r"{filename}")
+ del memmap
+
+ # Create a np.memmap backed by this file
+ memmap = np.memmap(r"{filename}", dtype=np.float64, shape=10, mode='w+')
+ resource_tracker.register(r"{filename}", "file")
+
+ # Ask the resource_tracker to delete the file backing the np.memmap , this
+ # should raise PermissionError that the resource_tracker will log.
+ resource_tracker.maybe_unlink(r"{filename}", "file")
+
+ # Wait for the resource_tracker to process the maybe_unlink before cleaning
+ # up the memmap
+ time.sleep(2)
+ """.format(filename=filename)
+ p = subprocess.Popen([sys.executable, '-c', cmd], stderr=subprocess.PIPE,
+ stdout=subprocess.PIPE)
+ p.wait()
+ out, err = p.communicate()
+ assert p.returncode == 0
+ assert out == b''
+ msg = 'tried to unlink {}, got PermissionError'.format(filename)
+ assert msg in err.decode()
+
+
+@with_numpy
+@with_multiprocessing
+def test_high_dimension_memmap_array_reducing(tmpdir):
+ assert_array_equal = np.testing.assert_array_equal
+
+ filename = tmpdir.join('test.mmap').strpath
+
+ # Create a high dimensional memmap
+ a = np.memmap(filename, dtype=np.float64, shape=(100, 15, 15, 3),
+ mode='w+')
+ a[:] = np.arange(100 * 15 * 15 * 3).reshape(a.shape)
+
+ # Create some slices/indices at various dimensions
+ b = a[0:10]
+ c = a[:, 5:10]
+ d = a[:, :, :, 0]
+ e = a[1:3:4]
+
+ # Array reducer with auto dumping disabled
+ reducer = ArrayMemmapForwardReducer(None, tmpdir.strpath, 'c', True)
+
+ def reconstruct_array_or_memmap(x):
+ cons, args = reducer(x)
+ return cons(*args)
+
+ a_reconstructed = reconstruct_array_or_memmap(a)
+ assert has_shareable_memory(a_reconstructed)
+ assert isinstance(a_reconstructed, np.memmap)
+ assert_array_equal(a_reconstructed, a)
+
+ b_reconstructed = reconstruct_array_or_memmap(b)
+ assert has_shareable_memory(b_reconstructed)
+ assert_array_equal(b_reconstructed, b)
+
+ c_reconstructed = reconstruct_array_or_memmap(c)
+ assert has_shareable_memory(c_reconstructed)
+ assert_array_equal(c_reconstructed, c)
+
+ d_reconstructed = reconstruct_array_or_memmap(d)
+ assert has_shareable_memory(d_reconstructed)
+ assert_array_equal(d_reconstructed, d)
+
+ e_reconstructed = reconstruct_array_or_memmap(e)
+ assert has_shareable_memory(e_reconstructed)
+ assert_array_equal(e_reconstructed, e)
+
+
+@with_numpy
+def test__strided_from_memmap(tmpdir):
+ fname = tmpdir.join('test.mmap').strpath
+ size = 5 * mmap.ALLOCATIONGRANULARITY
+ offset = mmap.ALLOCATIONGRANULARITY + 1
+ # This line creates the mmap file that is reused later
+ memmap_obj = np.memmap(fname, mode='w+', shape=size + offset)
+ # filename, dtype, mode, offset, order, shape, strides, total_buffer_len
+ memmap_obj = _strided_from_memmap(fname, dtype='uint8', mode='r',
+ offset=offset, order='C', shape=size,
+ strides=None, total_buffer_len=None,
+ unlink_on_gc_collect=False)
+ assert isinstance(memmap_obj, np.memmap)
+ assert memmap_obj.offset == offset
+ memmap_backed_obj = _strided_from_memmap(
+ fname, dtype='uint8', mode='r', offset=offset, order='C',
+ shape=(size // 2,), strides=(2,), total_buffer_len=size,
+ unlink_on_gc_collect=False
+ )
+ assert _get_backing_memmap(memmap_backed_obj).offset == offset
+
+
+@with_numpy
+@with_multiprocessing
+@parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor],
+ ids=["multiprocessing", "loky"])
+def test_pool_with_memmap(factory, tmpdir):
+ """Check that subprocess can access and update shared memory memmap"""
+ assert_array_equal = np.testing.assert_array_equal
+
+ # Fork the subprocess before allocating the objects to be passed
+ pool_temp_folder = tmpdir.mkdir('pool').strpath
+ p = factory(10, max_nbytes=2, temp_folder=pool_temp_folder)
+ try:
+ filename = tmpdir.join('test.mmap').strpath
+ a = np.memmap(filename, dtype=np.float32, shape=(3, 5), mode='w+')
+ a.fill(1.0)
+
+ p.map(inplace_double, [(a, (i, j), 1.0)
+ for i in range(a.shape[0])
+ for j in range(a.shape[1])])
+
+ assert_array_equal(a, 2 * np.ones(a.shape))
+
+ # Open a copy-on-write view on the previous data
+ b = np.memmap(filename, dtype=np.float32, shape=(5, 3), mode='c')
+
+ p.map(inplace_double, [(b, (i, j), 2.0)
+ for i in range(b.shape[0])
+ for j in range(b.shape[1])])
+
+ # Passing memmap instances to the pool should not trigger the creation
+ # of new files on the FS
+ assert os.listdir(pool_temp_folder) == []
+
+ # the original data is untouched
+ assert_array_equal(a, 2 * np.ones(a.shape))
+ assert_array_equal(b, 2 * np.ones(b.shape))
+
+ # readonly maps can be read but not updated
+ c = np.memmap(filename, dtype=np.float32, shape=(10,), mode='r',
+ offset=5 * 4)
+
+ with raises(AssertionError):
+ p.map(check_array, [(c, i, 3.0) for i in range(c.shape[0])])
+
+ # depending on the version of numpy one can either get a RuntimeError
+ # or a ValueError
+ with raises((RuntimeError, ValueError)):
+ p.map(inplace_double, [(c, i, 2.0) for i in range(c.shape[0])])
+ finally:
+ # Clean all filehandlers held by the pool
+ p.terminate()
+ del p
+
+
+@with_numpy
+@with_multiprocessing
+@parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor],
+ ids=["multiprocessing", "loky"])
+def test_pool_with_memmap_array_view(factory, tmpdir):
+ """Check that subprocess can access and update shared memory array"""
+ assert_array_equal = np.testing.assert_array_equal
+
+ # Fork the subprocess before allocating the objects to be passed
+ pool_temp_folder = tmpdir.mkdir('pool').strpath
+ p = factory(10, max_nbytes=2, temp_folder=pool_temp_folder)
+ try:
+
+ filename = tmpdir.join('test.mmap').strpath
+ a = np.memmap(filename, dtype=np.float32, shape=(3, 5), mode='w+')
+ a.fill(1.0)
+
+ # Create an ndarray view on the memmap instance
+ a_view = np.asarray(a)
+ assert not isinstance(a_view, np.memmap)
+ assert has_shareable_memory(a_view)
+
+ p.map(inplace_double, [(a_view, (i, j), 1.0)
+ for i in range(a.shape[0])
+ for j in range(a.shape[1])])
+
+ # Both a and the a_view have been updated
+ assert_array_equal(a, 2 * np.ones(a.shape))
+ assert_array_equal(a_view, 2 * np.ones(a.shape))
+
+ # Passing memmap array view to the pool should not trigger the
+ # creation of new files on the FS
+ assert os.listdir(pool_temp_folder) == []
+
+ finally:
+ p.terminate()
+ del p
+
+
+@with_numpy
+@with_multiprocessing
+@parametrize("backend", ["multiprocessing", "loky"])
+def test_permission_error_windows_reference_cycle(backend):
+ # Non regression test for:
+ # https://github.com/joblib/joblib/issues/806
+ #
+ # The issue happens when trying to delete a memory mapped file that has
+ # not yet been closed by one of the worker processes.
+ cmd = """if 1:
+ import numpy as np
+ from joblib import Parallel, delayed
+
+
+ data = np.random.rand(int(2e6)).reshape((int(1e6), 2))
+
+ # Build a complex cyclic reference that is likely to delay garbage
+ # collection of the memmapped array in the worker processes.
+ first_list = current_list = [data]
+ for i in range(10):
+ current_list = [current_list]
+ first_list.append(current_list)
+
+ if __name__ == "__main__":
+ results = Parallel(n_jobs=2, backend="{b}")(
+ delayed(len)(current_list) for i in range(10))
+ assert results == [1] * 10
+ """.format(b=backend)
+ p = subprocess.Popen([sys.executable, '-c', cmd], stderr=subprocess.PIPE,
+ stdout=subprocess.PIPE)
+ p.wait()
+ out, err = p.communicate()
+ assert p.returncode == 0, out.decode() + "\n\n" + err.decode()
+
+
+@with_numpy
+@with_multiprocessing
+@parametrize("backend", ["multiprocessing", "loky"])
+def test_permission_error_windows_memmap_sent_to_parent(backend):
+ # Second non-regression test for:
+ # https://github.com/joblib/joblib/issues/806
+ # previously, child process would not convert temporary memmaps to numpy
+ # arrays when sending the data back to the parent process. This would lead
+ # to permission errors on windows when deleting joblib's temporary folder,
+ # as the memmaped files handles would still opened in the parent process.
+ cmd = '''if 1:
+ import os
+ import time
+
+ import numpy as np
+
+ from joblib import Parallel, delayed
+ from testutils import return_slice_of_data
+
+ data = np.ones(int(2e6))
+
+ if __name__ == '__main__':
+ # warm-up call to launch the workers and start the resource_tracker
+ _ = Parallel(n_jobs=2, verbose=5, backend='{b}')(
+ delayed(id)(i) for i in range(20))
+
+ time.sleep(0.5)
+
+ slice_of_data = Parallel(n_jobs=2, verbose=5, backend='{b}')(
+ delayed(return_slice_of_data)(data, 0, 20) for _ in range(10))
+ '''.format(b=backend)
+
+ for _ in range(3):
+ env = os.environ.copy()
+ env['PYTHONPATH'] = os.path.dirname(__file__)
+ p = subprocess.Popen([sys.executable, '-c', cmd],
+ stderr=subprocess.PIPE,
+ stdout=subprocess.PIPE, env=env)
+ p.wait()
+ out, err = p.communicate()
+ assert p.returncode == 0, err
+ assert out == b''
+ if sys.version_info[:3] not in [(3, 8, 0), (3, 8, 1)]:
+ # In early versions of Python 3.8, a reference leak
+ # https://github.com/cloudpipe/cloudpickle/issues/327, holds
+ # references to pickled objects, generating race condition during
+ # cleanup finalizers of joblib and noisy resource_tracker outputs.
+ assert b'resource_tracker' not in err
+
+
+@with_numpy
+@with_multiprocessing
+@parametrize("backend", ["multiprocessing", "loky"])
+def test_parallel_isolated_temp_folders(backend):
+ # Test that consecutive Parallel call use isolated subfolders, even
+ # for the loky backend that reuses its executor instance across calls.
+ array = np.arange(int(1e2))
+ [filename_1] = Parallel(n_jobs=2, backend=backend, max_nbytes=10)(
+ delayed(getattr)(array, 'filename') for _ in range(1)
+ )
+ [filename_2] = Parallel(n_jobs=2, backend=backend, max_nbytes=10)(
+ delayed(getattr)(array, 'filename') for _ in range(1)
+ )
+ assert os.path.dirname(filename_2) != os.path.dirname(filename_1)
+
+
+@with_numpy
+@with_multiprocessing
+@parametrize("backend", ["multiprocessing", "loky"])
+def test_managed_backend_reuse_temp_folder(backend):
+ # Test that calls to a managed parallel object reuse the same memmaps.
+ array = np.arange(int(1e2))
+ with Parallel(n_jobs=2, backend=backend, max_nbytes=10) as p:
+ [filename_1] = p(
+ delayed(getattr)(array, 'filename') for _ in range(1)
+ )
+ [filename_2] = p(
+ delayed(getattr)(array, 'filename') for _ in range(1)
+ )
+ assert os.path.dirname(filename_2) == os.path.dirname(filename_1)
+
+
+@with_numpy
+@with_multiprocessing
+def test_memmapping_temp_folder_thread_safety():
+ # Concurrent calls to Parallel with the loky backend will use the same
+ # executor, and thus the same reducers. Make sure that those reducers use
+ # different temporary folders depending on which Parallel objects called
+ # them, which is necessary to limit potential race conditions during the
+ # garbage collection of temporary memmaps.
+ array = np.arange(int(1e2))
+
+ temp_dirs_thread_1 = set()
+ temp_dirs_thread_2 = set()
+
+ def concurrent_get_filename(array, temp_dirs):
+ with Parallel(backend='loky', n_jobs=2, max_nbytes=10) as p:
+ for i in range(10):
+ [filename] = p(
+ delayed(getattr)(array, 'filename') for _ in range(1)
+ )
+ temp_dirs.add(os.path.dirname(filename))
+
+ t1 = threading.Thread(
+ target=concurrent_get_filename, args=(array, temp_dirs_thread_1)
+ )
+ t2 = threading.Thread(
+ target=concurrent_get_filename, args=(array, temp_dirs_thread_2)
+ )
+
+ t1.start()
+ t2.start()
+
+ t1.join()
+ t2.join()
+
+ assert len(temp_dirs_thread_1) == 1
+ assert len(temp_dirs_thread_2) == 1
+
+ assert temp_dirs_thread_1 != temp_dirs_thread_2
+
+
+@with_numpy
+@with_multiprocessing
+def test_multithreaded_parallel_termination_resource_tracker_silent():
+ # test that concurrent termination attempts of a same executor does not
+ # emit any spurious error from the resource_tracker. We test various
+ # situations making 0, 1 or both parallel call sending a task that will
+ # make the worker (and thus the whole Parallel call) error out.
+ cmd = '''if 1:
+ import os
+ import numpy as np
+ from joblib import Parallel, delayed
+ from joblib.externals.loky.backend import resource_tracker
+ from concurrent.futures import ThreadPoolExecutor, wait
+
+ resource_tracker.VERBOSE = 0
+
+ array = np.arange(int(1e2))
+
+ temp_dirs_thread_1 = set()
+ temp_dirs_thread_2 = set()
+
+
+ def raise_error(array):
+ raise ValueError
+
+
+ def parallel_get_filename(array, temp_dirs):
+ with Parallel(backend="loky", n_jobs=2, max_nbytes=10) as p:
+ for i in range(10):
+ [filename] = p(
+ delayed(getattr)(array, "filename") for _ in range(1)
+ )
+ temp_dirs.add(os.path.dirname(filename))
+
+
+ def parallel_raise(array, temp_dirs):
+ with Parallel(backend="loky", n_jobs=2, max_nbytes=10) as p:
+ for i in range(10):
+ [filename] = p(
+ delayed(raise_error)(array) for _ in range(1)
+ )
+ temp_dirs.add(os.path.dirname(filename))
+
+
+ executor = ThreadPoolExecutor(max_workers=2)
+
+ # both function calls will use the same loky executor, but with a
+ # different Parallel object.
+ future_1 = executor.submit({f1}, array, temp_dirs_thread_1)
+ future_2 = executor.submit({f2}, array, temp_dirs_thread_2)
+
+ # Wait for both threads to terminate their backend
+ wait([future_1, future_2])
+
+ future_1.result()
+ future_2.result()
+ '''
+ functions_and_returncodes = [
+ ("parallel_get_filename", "parallel_get_filename", 0),
+ ("parallel_get_filename", "parallel_raise", 1),
+ ("parallel_raise", "parallel_raise", 1)
+ ]
+
+ for f1, f2, returncode in functions_and_returncodes:
+ p = subprocess.Popen([sys.executable, '-c', cmd.format(f1=f1, f2=f2)],
+ stderr=subprocess.PIPE, stdout=subprocess.PIPE)
+ p.wait()
+ out, err = p.communicate()
+ assert p.returncode == returncode, out.decode()
+ assert b"resource_tracker" not in err, err.decode()
+
+
+@with_numpy
+@with_multiprocessing
+@parametrize("backend", ["multiprocessing", "loky"])
+def test_many_parallel_calls_on_same_object(backend):
+ # After #966 got merged, consecutive Parallel objects were sharing temp
+ # folder, which would lead to race conditions happening during the
+ # temporary resources management with the resource_tracker. This is a
+ # non-regression test that makes sure that consecutive Parallel operations
+ # on the same object do not error out.
+ cmd = '''if 1:
+ import os
+ import time
+
+ import numpy as np
+
+ from joblib import Parallel, delayed
+ from testutils import return_slice_of_data
+
+ data = np.ones(100)
+
+ if __name__ == '__main__':
+ for i in range(5):
+ slice_of_data = Parallel(
+ n_jobs=2, max_nbytes=1, backend='{b}')(
+ delayed(return_slice_of_data)(data, 0, 20)
+ for _ in range(10)
+ )
+ '''.format(b=backend)
+ env = os.environ.copy()
+ env['PYTHONPATH'] = os.path.dirname(__file__)
+ p = subprocess.Popen(
+ [sys.executable, '-c', cmd],
+ stderr=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ env=env,
+ )
+ p.wait()
+ out, err = p.communicate()
+ assert p.returncode == 0, err
+ assert out == b''
+ if sys.version_info[:3] not in [(3, 8, 0), (3, 8, 1)]:
+ # In early versions of Python 3.8, a reference leak
+ # https://github.com/cloudpipe/cloudpickle/issues/327, holds
+ # references to pickled objects, generating race condition during
+ # cleanup finalizers of joblib and noisy resource_tracker outputs.
+ assert b'resource_tracker' not in err
+
+
+@with_numpy
+@with_multiprocessing
+@parametrize("backend", ["multiprocessing", "loky"])
+def test_memmap_returned_as_regular_array(backend):
+ data = np.ones(int(1e3))
+ # Check that child processes send temporary memmaps back as numpy arrays.
+ [result] = Parallel(n_jobs=2, backend=backend, max_nbytes=100)(
+ delayed(check_memmap_and_send_back)(data) for _ in range(1))
+ assert _get_backing_memmap(result) is None
+
+
+@with_numpy
+@with_multiprocessing
+@parametrize("backend", ["multiprocessing", "loky"])
+def test_resource_tracker_silent_when_reference_cycles(backend):
+ # There is a variety of reasons that can make joblib with loky backend
+ # output noisy warnings when a reference cycle is preventing a memmap from
+ # being garbage collected. Especially, joblib's main process finalizer
+ # deletes the temporary folder if it was not done before, which can
+ # interact badly with the resource_tracker. We don't risk leaking any
+ # resources, but this will likely make joblib output a lot of low-level
+ # confusing messages.
+ #
+ # This test makes sure that the resource_tracker is silent when a reference
+ # has been collected concurrently on non-Windows platforms.
+ #
+ # Note that the script in ``cmd`` is the exact same script as in
+ # test_permission_error_windows_reference_cycle.
+ if backend == "loky" and sys.platform.startswith('win'):
+ # XXX: on Windows, reference cycles can delay timely garbage collection
+ # and make it impossible to properly delete the temporary folder in the
+ # main process because of permission errors.
+ pytest.xfail(
+ "The temporary folder cannot be deleted on Windows in the "
+ "presence of a reference cycle"
+ )
+
+ cmd = """if 1:
+ import numpy as np
+ from joblib import Parallel, delayed
+
+
+ data = np.random.rand(int(2e6)).reshape((int(1e6), 2))
+
+ # Build a complex cyclic reference that is likely to delay garbage
+ # collection of the memmapped array in the worker processes.
+ first_list = current_list = [data]
+ for i in range(10):
+ current_list = [current_list]
+ first_list.append(current_list)
+
+ if __name__ == "__main__":
+ results = Parallel(n_jobs=2, backend="{b}")(
+ delayed(len)(current_list) for i in range(10))
+ assert results == [1] * 10
+ """.format(b=backend)
+ p = subprocess.Popen([sys.executable, '-c', cmd], stderr=subprocess.PIPE,
+ stdout=subprocess.PIPE)
+ p.wait()
+ out, err = p.communicate()
+ out = out.decode()
+ err = err.decode()
+ assert p.returncode == 0, out + "\n\n" + err
+ assert "resource_tracker" not in err, err
+
+
+@with_numpy
+@with_multiprocessing
+@parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor],
+ ids=["multiprocessing", "loky"])
+def test_memmapping_pool_for_large_arrays(factory, tmpdir):
+ """Check that large arrays are not copied in memory"""
+
+ # Check that the tempfolder is empty
+ assert os.listdir(tmpdir.strpath) == []
+
+ # Build an array reducers that automatically dump large array content
+ # to filesystem backed memmap instances to avoid memory explosion
+ p = factory(3, max_nbytes=40, temp_folder=tmpdir.strpath, verbose=2)
+ try:
+ # The temporary folder for the pool is not provisioned in advance
+ assert os.listdir(tmpdir.strpath) == []
+ assert not os.path.exists(p._temp_folder)
+
+ small = np.ones(5, dtype=np.float32)
+ assert small.nbytes == 20
+ p.map(check_array, [(small, i, 1.0) for i in range(small.shape[0])])
+
+ # Memory has been copied, the pool filesystem folder is unused
+ assert os.listdir(tmpdir.strpath) == []
+
+ # Try with a file larger than the memmap threshold of 40 bytes
+ large = np.ones(100, dtype=np.float64)
+ assert large.nbytes == 800
+ p.map(check_array, [(large, i, 1.0) for i in range(large.shape[0])])
+
+ # The data has been dumped in a temp folder for subprocess to share it
+ # without per-child memory copies
+ assert os.path.isdir(p._temp_folder)
+ dumped_filenames = os.listdir(p._temp_folder)
+ assert len(dumped_filenames) == 1
+
+ # Check that memory mapping is not triggered for arrays with
+ # dtype='object'
+ objects = np.array(['abc'] * 100, dtype='object')
+ results = p.map(has_shareable_memory, [objects])
+ assert not results[0]
+
+ finally:
+ # check FS garbage upon pool termination
+ p.terminate()
+ for i in range(10):
+ sleep(.1)
+ if not os.path.exists(p._temp_folder):
+ break
+ else: # pragma: no cover
+ raise AssertionError(
+ 'temporary folder {} was not deleted'.format(p._temp_folder)
+ )
+ del p
+
+
+@with_numpy
+@with_multiprocessing
+@parametrize(
+ "backend",
+ [
+ pytest.param(
+ "multiprocessing",
+ marks=pytest.mark.xfail(
+ reason='https://github.com/joblib/joblib/issues/1086'
+ ),
+ ),
+ "loky",
+ ]
+)
+def test_child_raises_parent_exits_cleanly(backend):
+ # When a task executed by a child process raises an error, the parent
+ # process's backend is notified, and calls abort_everything.
+ # In loky, abort_everything itself calls shutdown(kill_workers=True) which
+ # sends SIGKILL to the worker, preventing it from running the finalizers
+ # supposed to signal the resource_tracker when the worker is done using
+ # objects relying on a shared resource (e.g np.memmaps). Because this
+ # behavior is prone to :
+ # - cause a resource leak
+ # - make the resource tracker emit noisy resource warnings
+ # we explicitly test that, when the said situation occurs:
+ # - no resources are actually leaked
+ # - the temporary resources are deleted as soon as possible (typically, at
+ # the end of the failing Parallel call)
+ # - the resource_tracker does not emit any warnings.
+ cmd = """if 1:
+ import os
+ from pathlib import Path
+ from time import sleep
+
+ import numpy as np
+ from joblib import Parallel, delayed
+ from testutils import print_filename_and_raise
+
+ data = np.random.rand(1000)
+
+ def get_temp_folder(parallel_obj, backend):
+ if "{b}" == "loky":
+ return Path(parallel_obj._backend._workers._temp_folder)
+ else:
+ return Path(parallel_obj._backend._pool._temp_folder)
+
+
+ if __name__ == "__main__":
+ try:
+ with Parallel(n_jobs=2, backend="{b}", max_nbytes=100) as p:
+ temp_folder = get_temp_folder(p, "{b}")
+ p(delayed(print_filename_and_raise)(data)
+ for i in range(1))
+ except ValueError as e:
+ # the temporary folder should be deleted by the end of this
+ # call but apparently on some file systems, this takes
+ # some time to be visible.
+ #
+ # We attempt to write into the temporary folder to test for
+ # its existence and we wait for a maximum of 10 seconds.
+ for i in range(100):
+ try:
+ with open(temp_folder / "some_file.txt", "w") as f:
+ f.write("some content")
+ except FileNotFoundError:
+ # temp_folder has been deleted, all is fine
+ break
+
+ # ... else, wait a bit and try again
+ sleep(.1)
+ else:
+ raise AssertionError(
+ str(temp_folder) + " was not deleted"
+ ) from e
+ """.format(b=backend)
+ env = os.environ.copy()
+ env['PYTHONPATH'] = os.path.dirname(__file__)
+ p = subprocess.Popen([sys.executable, '-c', cmd], stderr=subprocess.PIPE,
+ stdout=subprocess.PIPE, env=env)
+ p.wait()
+ out, err = p.communicate()
+ out, err = out.decode(), err.decode()
+ filename = out.split('\n')[0]
+ assert p.returncode == 0, err or out
+ assert err == '' # no resource_tracker warnings.
+ assert not os.path.exists(filename)
+
+
+@with_numpy
+@with_multiprocessing
+@parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor],
+ ids=["multiprocessing", "loky"])
+def test_memmapping_pool_for_large_arrays_disabled(factory, tmpdir):
+ """Check that large arrays memmapping can be disabled"""
+ # Set max_nbytes to None to disable the auto memmapping feature
+ p = factory(3, max_nbytes=None, temp_folder=tmpdir.strpath)
+ try:
+
+ # Check that the tempfolder is empty
+ assert os.listdir(tmpdir.strpath) == []
+
+ # Try with a file largish than the memmap threshold of 40 bytes
+ large = np.ones(100, dtype=np.float64)
+ assert large.nbytes == 800
+ p.map(check_array, [(large, i, 1.0) for i in range(large.shape[0])])
+
+ # Check that the tempfolder is still empty
+ assert os.listdir(tmpdir.strpath) == []
+
+ finally:
+ # Cleanup open file descriptors
+ p.terminate()
+ del p
+
+
+@with_numpy
+@with_multiprocessing
+@with_dev_shm
+@parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor],
+ ids=["multiprocessing", "loky"])
+def test_memmapping_on_large_enough_dev_shm(factory):
+ """Check that memmapping uses /dev/shm when possible"""
+ orig_size = jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE
+ try:
+ # Make joblib believe that it can use /dev/shm even when running on a
+ # CI container where the size of the /dev/shm is not very large (that
+ # is at least 32 MB instead of 2 GB by default).
+ jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE = int(32e6)
+ p = factory(3, max_nbytes=10)
+ try:
+ # Check that the pool has correctly detected the presence of the
+ # shared memory filesystem.
+ pool_temp_folder = p._temp_folder
+ folder_prefix = '/dev/shm/joblib_memmapping_folder_'
+ assert pool_temp_folder.startswith(folder_prefix)
+ assert os.path.exists(pool_temp_folder)
+
+ # Try with a file larger than the memmap threshold of 10 bytes
+ a = np.ones(100, dtype=np.float64)
+ assert a.nbytes == 800
+ p.map(id, [a] * 10)
+ # a should have been memmapped to the pool temp folder: the joblib
+ # pickling procedure generate one .pkl file:
+ assert len(os.listdir(pool_temp_folder)) == 1
+
+ # create a new array with content that is different from 'a' so
+ # that it is mapped to a different file in the temporary folder of
+ # the pool.
+ b = np.ones(100, dtype=np.float64) * 2
+ assert b.nbytes == 800
+ p.map(id, [b] * 10)
+ # A copy of both a and b are now stored in the shared memory folder
+ assert len(os.listdir(pool_temp_folder)) == 2
+ finally:
+ # Cleanup open file descriptors
+ p.terminate()
+ del p
+
+ for i in range(100):
+ # The temp folder is cleaned up upon pool termination
+ if not os.path.exists(pool_temp_folder):
+ break
+ sleep(.1)
+ else: # pragma: no cover
+ raise AssertionError('temporary folder of pool was not deleted')
+ finally:
+ jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE = orig_size
+
+
+@with_numpy
+@with_multiprocessing
+@with_dev_shm
+@parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor],
+ ids=["multiprocessing", "loky"])
+def test_memmapping_on_too_small_dev_shm(factory):
+ orig_size = jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE
+ try:
+ # Make joblib believe that it cannot use /dev/shm unless there is
+ # 42 exabytes of available shared memory in /dev/shm
+ jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE = int(42e18)
+
+ p = factory(3, max_nbytes=10)
+ try:
+ # Check that the pool has correctly detected the presence of the
+ # shared memory filesystem.
+ pool_temp_folder = p._temp_folder
+ assert not pool_temp_folder.startswith('/dev/shm')
+ finally:
+ # Cleanup open file descriptors
+ p.terminate()
+ del p
+
+ # The temp folder is cleaned up upon pool termination
+ assert not os.path.exists(pool_temp_folder)
+ finally:
+ jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE = orig_size
+
+
+@with_numpy
+@with_multiprocessing
+@parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor],
+ ids=["multiprocessing", "loky"])
+def test_memmapping_pool_for_large_arrays_in_return(factory, tmpdir):
+ """Check that large arrays are not copied in memory in return"""
+ assert_array_equal = np.testing.assert_array_equal
+
+ # Build an array reducers that automatically dump large array content
+ # but check that the returned datastructure are regular arrays to avoid
+ # passing a memmap array pointing to a pool controlled temp folder that
+ # might be confusing to the user
+
+ # The MemmappingPool user can always return numpy.memmap object explicitly
+ # to avoid memory copy
+ p = factory(3, max_nbytes=10, temp_folder=tmpdir.strpath)
+ try:
+ res = p.apply_async(np.ones, args=(1000,))
+ large = res.get()
+ assert not has_shareable_memory(large)
+ assert_array_equal(large, np.ones(1000))
+ finally:
+ p.terminate()
+ del p
+
+
+def _worker_multiply(a, n_times):
+ """Multiplication function to be executed by subprocess"""
+ assert has_shareable_memory(a)
+ return a * n_times
+
+
+@with_numpy
+@with_multiprocessing
+@parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor],
+ ids=["multiprocessing", "loky"])
+def test_workaround_against_bad_memmap_with_copied_buffers(factory, tmpdir):
+ """Check that memmaps with a bad buffer are returned as regular arrays
+
+ Unary operations and ufuncs on memmap instances return a new memmap
+ instance with an in-memory buffer (probably a numpy bug).
+ """
+ assert_array_equal = np.testing.assert_array_equal
+
+ p = factory(3, max_nbytes=10, temp_folder=tmpdir.strpath)
+ try:
+ # Send a complex, large-ish view on a array that will be converted to
+ # a memmap in the worker process
+ a = np.asarray(np.arange(6000).reshape((1000, 2, 3)),
+ order='F')[:, :1, :]
+
+ # Call a non-inplace multiply operation on the worker and memmap and
+ # send it back to the parent.
+ b = p.apply_async(_worker_multiply, args=(a, 3)).get()
+ assert not has_shareable_memory(b)
+ assert_array_equal(b, 3 * a)
+ finally:
+ p.terminate()
+ del p
+
+
+def identity(arg):
+ return arg
+
+
+@with_numpy
+@with_multiprocessing
+@parametrize(
+ "factory,retry_no",
+ list(itertools.product(
+ [MemmappingPool, TestExecutor.get_memmapping_executor], range(3))),
+ ids=['{}, {}'.format(x, y) for x, y in itertools.product(
+ ["multiprocessing", "loky"], map(str, range(3)))])
+def test_pool_memmap_with_big_offset(factory, retry_no, tmpdir):
+ # Test that numpy memmap offset is set correctly if greater than
+ # mmap.ALLOCATIONGRANULARITY, see
+ # https://github.com/joblib/joblib/issues/451 and
+ # https://github.com/numpy/numpy/pull/8443 for more details.
+ fname = tmpdir.join('test.mmap').strpath
+ size = 5 * mmap.ALLOCATIONGRANULARITY
+ offset = mmap.ALLOCATIONGRANULARITY + 1
+ obj = make_memmap(fname, mode='w+', shape=size, dtype='uint8',
+ offset=offset)
+
+ p = factory(2, temp_folder=tmpdir.strpath)
+ result = p.apply_async(identity, args=(obj,)).get()
+ assert isinstance(result, np.memmap)
+ assert result.offset == offset
+ np.testing.assert_array_equal(obj, result)
+ p.terminate()
+
+
+def test_pool_get_temp_dir(tmpdir):
+ pool_folder_name = 'test.tmpdir'
+ pool_folder, shared_mem = _get_temp_dir(pool_folder_name, tmpdir.strpath)
+ assert shared_mem is False
+ assert pool_folder == tmpdir.join('test.tmpdir').strpath
+
+ pool_folder, shared_mem = _get_temp_dir(pool_folder_name, temp_folder=None)
+ if sys.platform.startswith('win'):
+ assert shared_mem is False
+ assert pool_folder.endswith(pool_folder_name)
+
+
+def test_pool_get_temp_dir_no_statvfs(tmpdir, monkeypatch):
+ """Check that _get_temp_dir works when os.statvfs is not defined
+
+ Regression test for #902
+ """
+ pool_folder_name = 'test.tmpdir'
+ import joblib._memmapping_reducer
+ if hasattr(joblib._memmapping_reducer.os, 'statvfs'):
+ # We are on Unix, since Windows doesn't have this function
+ monkeypatch.delattr(joblib._memmapping_reducer.os, 'statvfs')
+
+ pool_folder, shared_mem = _get_temp_dir(pool_folder_name, temp_folder=None)
+ if sys.platform.startswith('win'):
+ assert shared_mem is False
+ assert pool_folder.endswith(pool_folder_name)
+
+
+@with_numpy
+@skipif(sys.platform == 'win32', reason='This test fails with a '
+ 'PermissionError on Windows')
+@parametrize("mmap_mode", ["r+", "w+"])
+def test_numpy_arrays_use_different_memory(mmap_mode):
+ def func(arr, value):
+ arr[:] = value
+ return arr
+
+ arrays = [np.zeros((10, 10), dtype='float64') for i in range(10)]
+
+ results = Parallel(mmap_mode=mmap_mode, max_nbytes=0, n_jobs=2)(
+ delayed(func)(arr, i) for i, arr in enumerate(arrays))
+
+ for i, arr in enumerate(results):
+ np.testing.assert_array_equal(arr, i)
+
+
+@with_numpy
+def test_weak_array_key_map():
+
+ def assert_empty_after_gc_collect(container, retries=100):
+ for i in range(retries):
+ if len(container) == 0:
+ return
+ gc.collect()
+ sleep(.1)
+ assert len(container) == 0
+
+ a = np.ones(42)
+ m = _WeakArrayKeyMap()
+ m.set(a, 'a')
+ assert m.get(a) == 'a'
+
+ b = a
+ assert m.get(b) == 'a'
+ m.set(b, 'b')
+ assert m.get(a) == 'b'
+
+ del a
+ gc.collect()
+ assert len(m._data) == 1
+ assert m.get(b) == 'b'
+
+ del b
+ assert_empty_after_gc_collect(m._data)
+
+ c = np.ones(42)
+ m.set(c, 'c')
+ assert len(m._data) == 1
+ assert m.get(c) == 'c'
+
+ with raises(KeyError):
+ m.get(np.ones(42))
+
+ del c
+ assert_empty_after_gc_collect(m._data)
+
+ # Check that creating and dropping numpy arrays with potentially the same
+ # object id will not cause the map to get confused.
+ def get_set_get_collect(m, i):
+ a = np.ones(42)
+ with raises(KeyError):
+ m.get(a)
+ m.set(a, i)
+ assert m.get(a) == i
+ return id(a)
+
+ unique_ids = set([get_set_get_collect(m, i) for i in range(1000)])
+ if platform.python_implementation() == 'CPython':
+ # On CPython (at least) the same id is often reused many times for the
+ # temporary arrays created under the local scope of the
+ # get_set_get_collect function without causing any spurious lookups /
+ # insertions in the map. Apparently on Python nogil, the id is not
+ # reused as often.
+ max_len_unique_ids = 400 if getattr(sys.flags, 'nogil', False) else 100
+ assert len(unique_ids) < max_len_unique_ids
+
+
+def test_weak_array_key_map_no_pickling():
+ m = _WeakArrayKeyMap()
+ with raises(pickle.PicklingError):
+ pickle.dumps(m)
+
+
+@with_numpy
+@with_multiprocessing
+def test_direct_mmap(tmpdir):
+ testfile = str(tmpdir.join('arr.dat'))
+ a = np.arange(10, dtype='uint8')
+ a.tofile(testfile)
+
+ def _read_array():
+ with open(testfile) as fd:
+ mm = mmap.mmap(fd.fileno(), 0, access=mmap.ACCESS_READ, offset=0)
+ return np.ndarray((10,), dtype=np.uint8, buffer=mm, offset=0)
+
+ def func(x):
+ return x**2
+
+ arr = _read_array()
+
+ # this is expected to work and gives the reference
+ ref = Parallel(n_jobs=2)(delayed(func)(x) for x in [a])
+
+ # now test that it work with the mmap array
+ results = Parallel(n_jobs=2)(delayed(func)(x) for x in [arr])
+ np.testing.assert_array_equal(results, ref)
+
+ # also test with a mmap array read in the subprocess
+ def worker():
+ return _read_array()
+
+ results = Parallel(n_jobs=2)(delayed(worker)() for _ in range(1))
+ np.testing.assert_array_equal(results[0], arr)
diff --git a/venv/lib/python3.10/site-packages/joblib/test/test_memory.py b/venv/lib/python3.10/site-packages/joblib/test/test_memory.py
new file mode 100644
index 0000000000000000000000000000000000000000..120987b666bb84268457bdd4b843a7bae070832f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/joblib/test/test_memory.py
@@ -0,0 +1,1493 @@
+"""
+Test the memory module.
+"""
+
+# Author: Gael Varoquaux
+# Copyright (c) 2009 Gael Varoquaux
+# License: BSD Style, 3 clauses.
+
+import functools
+import gc
+import logging
+import shutil
+import os
+import os.path
+import pathlib
+import pickle
+import sys
+import time
+import datetime
+import textwrap
+
+import pytest
+
+from joblib.memory import Memory
+from joblib.memory import expires_after
+from joblib.memory import MemorizedFunc, NotMemorizedFunc
+from joblib.memory import MemorizedResult, NotMemorizedResult
+from joblib.memory import _FUNCTION_HASHES
+from joblib.memory import register_store_backend, _STORE_BACKENDS
+from joblib.memory import _build_func_identifier, _store_backend_factory
+from joblib.memory import JobLibCollisionWarning
+from joblib.parallel import Parallel, delayed
+from joblib._store_backends import StoreBackendBase, FileSystemStoreBackend
+from joblib.test.common import with_numpy, np
+from joblib.test.common import with_multiprocessing
+from joblib.testing import parametrize, raises, warns
+from joblib.hashing import hash
+
+
+###############################################################################
+# Module-level variables for the tests
+def f(x, y=1):
+ """ A module-level function for testing purposes.
+ """
+ return x ** 2 + y
+
+
+###############################################################################
+# Helper function for the tests
+def check_identity_lazy(func, accumulator, location):
+ """ Given a function and an accumulator (a list that grows every
+ time the function is called), check that the function can be
+ decorated by memory to be a lazy identity.
+ """
+ # Call each function with several arguments, and check that it is
+ # evaluated only once per argument.
+ memory = Memory(location=location, verbose=0)
+ func = memory.cache(func)
+ for i in range(3):
+ for _ in range(2):
+ assert func(i) == i
+ assert len(accumulator) == i + 1
+
+
+def corrupt_single_cache_item(memory):
+ single_cache_item, = memory.store_backend.get_items()
+ output_filename = os.path.join(single_cache_item.path, 'output.pkl')
+ with open(output_filename, 'w') as f:
+ f.write('garbage')
+
+
+def monkeypatch_cached_func_warn(func, monkeypatch_fixture):
+ # Need monkeypatch because pytest does not
+ # capture stdlib logging output (see
+ # https://github.com/pytest-dev/pytest/issues/2079)
+
+ recorded = []
+
+ def append_to_record(item):
+ recorded.append(item)
+ monkeypatch_fixture.setattr(func, 'warn', append_to_record)
+ return recorded
+
+
+###############################################################################
+# Tests
+def test_memory_integration(tmpdir):
+ """ Simple test of memory lazy evaluation.
+ """
+ accumulator = list()
+
+ # Rmk: this function has the same name than a module-level function,
+ # thus it serves as a test to see that both are identified
+ # as different.
+ def f(arg):
+ accumulator.append(1)
+ return arg
+
+ check_identity_lazy(f, accumulator, tmpdir.strpath)
+
+ # Now test clearing
+ for compress in (False, True):
+ for mmap_mode in ('r', None):
+ memory = Memory(location=tmpdir.strpath, verbose=10,
+ mmap_mode=mmap_mode, compress=compress)
+ # First clear the cache directory, to check that our code can
+ # handle that
+ # NOTE: this line would raise an exception, as the database file is
+ # still open; we ignore the error since we want to test what
+ # happens if the directory disappears
+ shutil.rmtree(tmpdir.strpath, ignore_errors=True)
+ g = memory.cache(f)
+ g(1)
+ g.clear(warn=False)
+ current_accumulator = len(accumulator)
+ out = g(1)
+
+ assert len(accumulator) == current_accumulator + 1
+ # Also, check that Memory.eval works similarly
+ assert memory.eval(f, 1) == out
+ assert len(accumulator) == current_accumulator + 1
+
+ # Now do a smoke test with a function defined in __main__, as the name
+ # mangling rules are more complex
+ f.__module__ = '__main__'
+ memory = Memory(location=tmpdir.strpath, verbose=0)
+ memory.cache(f)(1)
+
+
+@parametrize("call_before_reducing", [True, False])
+def test_parallel_call_cached_function_defined_in_jupyter(
+ tmpdir, call_before_reducing
+):
+ # Calling an interactively defined memory.cache()'d function inside a
+ # Parallel call used to clear the existing cache related to the said
+ # function (https://github.com/joblib/joblib/issues/1035)
+
+ # This tests checks that this is no longer the case.
+
+ # TODO: test that the cache related to the function cache persists across
+ # ipython sessions (provided that no code change were made to the
+ # function's source)?
+
+ # The first part of the test makes the necessary low-level calls to emulate
+ # the definition of a function in an jupyter notebook cell. Joblib has
+ # some custom code to treat functions defined specifically in jupyter
+ # notebooks/ipython session -- we want to test this code, which requires
+ # the emulation to be rigorous.
+ for session_no in [0, 1]:
+ ipython_cell_source = '''
+ def f(x):
+ return x
+ '''
+
+ ipython_cell_id = ''.format(session_no)
+
+ exec(
+ compile(
+ textwrap.dedent(ipython_cell_source),
+ filename=ipython_cell_id,
+ mode='exec'
+ )
+ )
+ # f is now accessible in the locals mapping - but for some unknown
+ # reason, f = locals()['f'] throws a KeyError at runtime, we need to
+ # bind locals()['f'] to a different name in the local namespace
+ aliased_f = locals()['f']
+ aliased_f.__module__ = "__main__"
+
+ # Preliminary sanity checks, and tests checking that joblib properly
+ # identified f as an interactive function defined in a jupyter notebook
+ assert aliased_f(1) == 1
+ assert aliased_f.__code__.co_filename == ipython_cell_id
+
+ memory = Memory(location=tmpdir.strpath, verbose=0)
+ cached_f = memory.cache(aliased_f)
+
+ assert len(os.listdir(tmpdir / 'joblib')) == 1
+ f_cache_relative_directory = os.listdir(tmpdir / 'joblib')[0]
+ assert 'ipython-input' in f_cache_relative_directory
+
+ f_cache_directory = tmpdir / 'joblib' / f_cache_relative_directory
+
+ if session_no == 0:
+ # The cache should be empty as cached_f has not been called yet.
+ assert os.listdir(f_cache_directory) == ['f']
+ assert os.listdir(f_cache_directory / 'f') == []
+
+ if call_before_reducing:
+ cached_f(3)
+ # Two files were just created, func_code.py, and a folder
+ # containing the information (inputs hash/ouptput) of
+ # cached_f(3)
+ assert len(os.listdir(f_cache_directory / 'f')) == 2
+
+ # Now, testing #1035: when calling a cached function, joblib
+ # used to dynamically inspect the underlying function to
+ # extract its source code (to verify it matches the source code
+ # of the function as last inspected by joblib) -- however,
+ # source code introspection fails for dynamic functions sent to
+ # child processes - which would eventually make joblib clear
+ # the cache associated to f
+ res = Parallel(n_jobs=2)(delayed(cached_f)(i) for i in [1, 2])
+ else:
+ # Submit the function to the joblib child processes, although
+ # the function has never been called in the parent yet. This
+ # triggers a specific code branch inside
+ # MemorizedFunc.__reduce__.
+ res = Parallel(n_jobs=2)(delayed(cached_f)(i) for i in [1, 2])
+ assert len(os.listdir(f_cache_directory / 'f')) == 3
+
+ cached_f(3)
+
+ # Making sure f's cache does not get cleared after the parallel
+ # calls, and contains ALL cached functions calls (f(1), f(2), f(3))
+ # and 'func_code.py'
+ assert len(os.listdir(f_cache_directory / 'f')) == 4
+ else:
+ # For the second session, there should be an already existing cache
+ assert len(os.listdir(f_cache_directory / 'f')) == 4
+
+ cached_f(3)
+
+ # The previous cache should not be invalidated after calling the
+ # function in a new session
+ assert len(os.listdir(f_cache_directory / 'f')) == 4
+
+
+def test_no_memory():
+ """ Test memory with location=None: no memoize """
+ accumulator = list()
+
+ def ff(arg):
+ accumulator.append(1)
+ return arg
+
+ memory = Memory(location=None, verbose=0)
+ gg = memory.cache(ff)
+ for _ in range(4):
+ current_accumulator = len(accumulator)
+ gg(1)
+ assert len(accumulator) == current_accumulator + 1
+
+
+def test_memory_kwarg(tmpdir):
+ " Test memory with a function with keyword arguments."
+ accumulator = list()
+
+ def g(arg1=None, arg2=1):
+ accumulator.append(1)
+ return arg1
+
+ check_identity_lazy(g, accumulator, tmpdir.strpath)
+
+ memory = Memory(location=tmpdir.strpath, verbose=0)
+ g = memory.cache(g)
+ # Smoke test with an explicit keyword argument:
+ assert g(arg1=30, arg2=2) == 30
+
+
+def test_memory_lambda(tmpdir):
+ " Test memory with a function with a lambda."
+ accumulator = list()
+
+ def helper(x):
+ """ A helper function to define l as a lambda.
+ """
+ accumulator.append(1)
+ return x
+
+ check_identity_lazy(lambda x: helper(x), accumulator, tmpdir.strpath)
+
+
+def test_memory_name_collision(tmpdir):
+ " Check that name collisions with functions will raise warnings"
+ memory = Memory(location=tmpdir.strpath, verbose=0)
+
+ @memory.cache
+ def name_collision(x):
+ """ A first function called name_collision
+ """
+ return x
+
+ a = name_collision
+
+ @memory.cache
+ def name_collision(x):
+ """ A second function called name_collision
+ """
+ return x
+
+ b = name_collision
+
+ with warns(JobLibCollisionWarning) as warninfo:
+ a(1)
+ b(1)
+
+ assert len(warninfo) == 1
+ assert "collision" in str(warninfo[0].message)
+
+
+def test_memory_warning_lambda_collisions(tmpdir):
+ # Check that multiple use of lambda will raise collisions
+ memory = Memory(location=tmpdir.strpath, verbose=0)
+ a = memory.cache(lambda x: x)
+ b = memory.cache(lambda x: x + 1)
+
+ with warns(JobLibCollisionWarning) as warninfo:
+ assert a(0) == 0
+ assert b(1) == 2
+ assert a(1) == 1
+
+ # In recent Python versions, we can retrieve the code of lambdas,
+ # thus nothing is raised
+ assert len(warninfo) == 4
+
+
+def test_memory_warning_collision_detection(tmpdir):
+ # Check that collisions impossible to detect will raise appropriate
+ # warnings.
+ memory = Memory(location=tmpdir.strpath, verbose=0)
+ a1 = eval('lambda x: x')
+ a1 = memory.cache(a1)
+ b1 = eval('lambda x: x+1')
+ b1 = memory.cache(b1)
+
+ with warns(JobLibCollisionWarning) as warninfo:
+ a1(1)
+ b1(1)
+ a1(0)
+
+ assert len(warninfo) == 2
+ assert "cannot detect" in str(warninfo[0].message).lower()
+
+
+def test_memory_partial(tmpdir):
+ " Test memory with functools.partial."
+ accumulator = list()
+
+ def func(x, y):
+ """ A helper function to define l as a lambda.
+ """
+ accumulator.append(1)
+ return y
+
+ import functools
+ function = functools.partial(func, 1)
+
+ check_identity_lazy(function, accumulator, tmpdir.strpath)
+
+
+def test_memory_eval(tmpdir):
+ " Smoke test memory with a function with a function defined in an eval."
+ memory = Memory(location=tmpdir.strpath, verbose=0)
+
+ m = eval('lambda x: x')
+ mm = memory.cache(m)
+
+ assert mm(1) == 1
+
+
+def count_and_append(x=[]):
+ """ A function with a side effect in its arguments.
+
+ Return the length of its argument and append one element.
+ """
+ len_x = len(x)
+ x.append(None)
+ return len_x
+
+
+def test_argument_change(tmpdir):
+ """ Check that if a function has a side effect in its arguments, it
+ should use the hash of changing arguments.
+ """
+ memory = Memory(location=tmpdir.strpath, verbose=0)
+ func = memory.cache(count_and_append)
+ # call the function for the first time, is should cache it with
+ # argument x=[]
+ assert func() == 0
+ # the second time the argument is x=[None], which is not cached
+ # yet, so the functions should be called a second time
+ assert func() == 1
+
+
+@with_numpy
+@parametrize('mmap_mode', [None, 'r'])
+def test_memory_numpy(tmpdir, mmap_mode):
+ " Test memory with a function with numpy arrays."
+ accumulator = list()
+
+ def n(arg=None):
+ accumulator.append(1)
+ return arg
+
+ memory = Memory(location=tmpdir.strpath, mmap_mode=mmap_mode,
+ verbose=0)
+ cached_n = memory.cache(n)
+
+ rnd = np.random.RandomState(0)
+ for i in range(3):
+ a = rnd.random_sample((10, 10))
+ for _ in range(3):
+ assert np.all(cached_n(a) == a)
+ assert len(accumulator) == i + 1
+
+
+@with_numpy
+def test_memory_numpy_check_mmap_mode(tmpdir, monkeypatch):
+ """Check that mmap_mode is respected even at the first call"""
+
+ memory = Memory(location=tmpdir.strpath, mmap_mode='r', verbose=0)
+
+ @memory.cache()
+ def twice(a):
+ return a * 2
+
+ a = np.ones(3)
+
+ b = twice(a)
+ c = twice(a)
+
+ assert isinstance(c, np.memmap)
+ assert c.mode == 'r'
+
+ assert isinstance(b, np.memmap)
+ assert b.mode == 'r'
+
+ # Corrupts the file, Deleting b and c mmaps
+ # is necessary to be able edit the file
+ del b
+ del c
+ gc.collect()
+ corrupt_single_cache_item(memory)
+
+ # Make sure that corrupting the file causes recomputation and that
+ # a warning is issued.
+ recorded_warnings = monkeypatch_cached_func_warn(twice, monkeypatch)
+ d = twice(a)
+ assert len(recorded_warnings) == 1
+ exception_msg = 'Exception while loading results'
+ assert exception_msg in recorded_warnings[0]
+ # Asserts that the recomputation returns a mmap
+ assert isinstance(d, np.memmap)
+ assert d.mode == 'r'
+
+
+def test_memory_exception(tmpdir):
+ """ Smoketest the exception handling of Memory.
+ """
+ memory = Memory(location=tmpdir.strpath, verbose=0)
+
+ class MyException(Exception):
+ pass
+
+ @memory.cache
+ def h(exc=0):
+ if exc:
+ raise MyException
+
+ # Call once, to initialise the cache
+ h()
+
+ for _ in range(3):
+ # Call 3 times, to be sure that the Exception is always raised
+ with raises(MyException):
+ h(1)
+
+
+def test_memory_ignore(tmpdir):
+ " Test the ignore feature of memory "
+ memory = Memory(location=tmpdir.strpath, verbose=0)
+ accumulator = list()
+
+ @memory.cache(ignore=['y'])
+ def z(x, y=1):
+ accumulator.append(1)
+
+ assert z.ignore == ['y']
+
+ z(0, y=1)
+ assert len(accumulator) == 1
+ z(0, y=1)
+ assert len(accumulator) == 1
+ z(0, y=2)
+ assert len(accumulator) == 1
+
+
+def test_memory_ignore_decorated(tmpdir):
+ " Test the ignore feature of memory on a decorated function "
+ memory = Memory(location=tmpdir.strpath, verbose=0)
+ accumulator = list()
+
+ def decorate(f):
+ @functools.wraps(f)
+ def wrapped(*args, **kwargs):
+ return f(*args, **kwargs)
+ return wrapped
+
+ @memory.cache(ignore=['y'])
+ @decorate
+ def z(x, y=1):
+ accumulator.append(1)
+
+ assert z.ignore == ['y']
+
+ z(0, y=1)
+ assert len(accumulator) == 1
+ z(0, y=1)
+ assert len(accumulator) == 1
+ z(0, y=2)
+ assert len(accumulator) == 1
+
+
+def test_memory_args_as_kwargs(tmpdir):
+ """Non-regression test against 0.12.0 changes.
+
+ https://github.com/joblib/joblib/pull/751
+ """
+ memory = Memory(location=tmpdir.strpath, verbose=0)
+
+ @memory.cache
+ def plus_one(a):
+ return a + 1
+
+ # It's possible to call a positional arg as a kwarg.
+ assert plus_one(1) == 2
+ assert plus_one(a=1) == 2
+
+ # However, a positional argument that joblib hadn't seen
+ # before would cause a failure if it was passed as a kwarg.
+ assert plus_one(a=2) == 3
+
+
+@parametrize('ignore, verbose, mmap_mode', [(['x'], 100, 'r'),
+ ([], 10, None)])
+def test_partial_decoration(tmpdir, ignore, verbose, mmap_mode):
+ "Check cache may be called with kwargs before decorating"
+ memory = Memory(location=tmpdir.strpath, verbose=0)
+
+ @memory.cache(ignore=ignore, verbose=verbose, mmap_mode=mmap_mode)
+ def z(x):
+ pass
+
+ assert z.ignore == ignore
+ assert z._verbose == verbose
+ assert z.mmap_mode == mmap_mode
+
+
+def test_func_dir(tmpdir):
+ # Test the creation of the memory cache directory for the function.
+ memory = Memory(location=tmpdir.strpath, verbose=0)
+ path = __name__.split('.')
+ path.append('f')
+ path = tmpdir.join('joblib', *path).strpath
+
+ g = memory.cache(f)
+ # Test that the function directory is created on demand
+ func_id = _build_func_identifier(f)
+ location = os.path.join(g.store_backend.location, func_id)
+ assert location == path
+ assert os.path.exists(path)
+ assert memory.location == os.path.dirname(g.store_backend.location)
+
+ # Test that the code is stored.
+ # For the following test to be robust to previous execution, we clear
+ # the in-memory store
+ _FUNCTION_HASHES.clear()
+ assert not g._check_previous_func_code()
+ assert os.path.exists(os.path.join(path, 'func_code.py'))
+ assert g._check_previous_func_code()
+
+ # Test the robustness to failure of loading previous results.
+ args_id = g._get_args_id(1)
+ output_dir = os.path.join(g.store_backend.location, g.func_id, args_id)
+ a = g(1)
+ assert os.path.exists(output_dir)
+ os.remove(os.path.join(output_dir, 'output.pkl'))
+ assert a == g(1)
+
+
+def test_persistence(tmpdir):
+ # Test the memorized functions can be pickled and restored.
+ memory = Memory(location=tmpdir.strpath, verbose=0)
+ g = memory.cache(f)
+ output = g(1)
+
+ h = pickle.loads(pickle.dumps(g))
+
+ args_id = h._get_args_id(1)
+ output_dir = os.path.join(h.store_backend.location, h.func_id, args_id)
+ assert os.path.exists(output_dir)
+ assert output == h.store_backend.load_item([h.func_id, args_id])
+ memory2 = pickle.loads(pickle.dumps(memory))
+ assert memory.store_backend.location == memory2.store_backend.location
+
+ # Smoke test that pickling a memory with location=None works
+ memory = Memory(location=None, verbose=0)
+ pickle.loads(pickle.dumps(memory))
+ g = memory.cache(f)
+ gp = pickle.loads(pickle.dumps(g))
+ gp(1)
+
+
+def test_check_call_in_cache(tmpdir):
+ for func in (MemorizedFunc(f, tmpdir.strpath),
+ Memory(location=tmpdir.strpath, verbose=0).cache(f)):
+ result = func.check_call_in_cache(2)
+ assert not result
+ assert isinstance(result, bool)
+ assert func(2) == 5
+ result = func.check_call_in_cache(2)
+ assert result
+ assert isinstance(result, bool)
+ func.clear()
+
+
+def test_call_and_shelve(tmpdir):
+ # Test MemorizedFunc outputting a reference to cache.
+
+ for func, Result in zip((MemorizedFunc(f, tmpdir.strpath),
+ NotMemorizedFunc(f),
+ Memory(location=tmpdir.strpath,
+ verbose=0).cache(f),
+ Memory(location=None).cache(f),
+ ),
+ (MemorizedResult, NotMemorizedResult,
+ MemorizedResult, NotMemorizedResult)):
+ assert func(2) == 5
+ result = func.call_and_shelve(2)
+ assert isinstance(result, Result)
+ assert result.get() == 5
+
+ result.clear()
+ with raises(KeyError):
+ result.get()
+ result.clear() # Do nothing if there is no cache.
+
+
+def test_call_and_shelve_argument_hash(tmpdir):
+ # Verify that a warning is raised when accessing arguments_hash
+ # attribute from MemorizedResult
+ func = Memory(location=tmpdir.strpath, verbose=0).cache(f)
+ result = func.call_and_shelve(2)
+ assert isinstance(result, MemorizedResult)
+ with warns(DeprecationWarning) as w:
+ assert result.argument_hash == result.args_id
+ assert len(w) == 1
+ assert "The 'argument_hash' attribute has been deprecated" \
+ in str(w[-1].message)
+
+
+def test_call_and_shelve_lazily_load_stored_result(tmpdir):
+ """Check call_and_shelve only load stored data if needed."""
+ test_access_time_file = tmpdir.join('test_access')
+ test_access_time_file.write('test_access')
+ test_access_time = os.stat(test_access_time_file.strpath).st_atime
+ # check file system access time stats resolution is lower than test wait
+ # timings.
+ time.sleep(0.5)
+ assert test_access_time_file.read() == 'test_access'
+
+ if test_access_time == os.stat(test_access_time_file.strpath).st_atime:
+ # Skip this test when access time cannot be retrieved with enough
+ # precision from the file system (e.g. NTFS on windows).
+ pytest.skip("filesystem does not support fine-grained access time "
+ "attribute")
+
+ memory = Memory(location=tmpdir.strpath, verbose=0)
+ func = memory.cache(f)
+ args_id = func._get_args_id(2)
+ result_path = os.path.join(memory.store_backend.location,
+ func.func_id, args_id, 'output.pkl')
+ assert func(2) == 5
+ first_access_time = os.stat(result_path).st_atime
+ time.sleep(1)
+
+ # Should not access the stored data
+ result = func.call_and_shelve(2)
+ assert isinstance(result, MemorizedResult)
+ assert os.stat(result_path).st_atime == first_access_time
+ time.sleep(1)
+
+ # Read the stored data => last access time is greater than first_access
+ assert result.get() == 5
+ assert os.stat(result_path).st_atime > first_access_time
+
+
+def test_memorized_pickling(tmpdir):
+ for func in (MemorizedFunc(f, tmpdir.strpath), NotMemorizedFunc(f)):
+ filename = tmpdir.join('pickling_test.dat').strpath
+ result = func.call_and_shelve(2)
+ with open(filename, 'wb') as fp:
+ pickle.dump(result, fp)
+ with open(filename, 'rb') as fp:
+ result2 = pickle.load(fp)
+ assert result2.get() == result.get()
+ os.remove(filename)
+
+
+def test_memorized_repr(tmpdir):
+ func = MemorizedFunc(f, tmpdir.strpath)
+ result = func.call_and_shelve(2)
+
+ func2 = MemorizedFunc(f, tmpdir.strpath)
+ result2 = func2.call_and_shelve(2)
+ assert result.get() == result2.get()
+ assert repr(func) == repr(func2)
+
+ # Smoke test with NotMemorizedFunc
+ func = NotMemorizedFunc(f)
+ repr(func)
+ repr(func.call_and_shelve(2))
+
+ # Smoke test for message output (increase code coverage)
+ func = MemorizedFunc(f, tmpdir.strpath, verbose=11, timestamp=time.time())
+ result = func.call_and_shelve(11)
+ result.get()
+
+ func = MemorizedFunc(f, tmpdir.strpath, verbose=11)
+ result = func.call_and_shelve(11)
+ result.get()
+
+ func = MemorizedFunc(f, tmpdir.strpath, verbose=5, timestamp=time.time())
+ result = func.call_and_shelve(11)
+ result.get()
+
+ func = MemorizedFunc(f, tmpdir.strpath, verbose=5)
+ result = func.call_and_shelve(11)
+ result.get()
+
+
+def test_memory_file_modification(capsys, tmpdir, monkeypatch):
+ # Test that modifying a Python file after loading it does not lead to
+ # Recomputation
+ dir_name = tmpdir.mkdir('tmp_import').strpath
+ filename = os.path.join(dir_name, 'tmp_joblib_.py')
+ content = 'def f(x):\n print(x)\n return x\n'
+ with open(filename, 'w') as module_file:
+ module_file.write(content)
+
+ # Load the module:
+ monkeypatch.syspath_prepend(dir_name)
+ import tmp_joblib_ as tmp
+
+ memory = Memory(location=tmpdir.strpath, verbose=0)
+ f = memory.cache(tmp.f)
+ # First call f a few times
+ f(1)
+ f(2)
+ f(1)
+
+ # Now modify the module where f is stored without modifying f
+ with open(filename, 'w') as module_file:
+ module_file.write('\n\n' + content)
+
+ # And call f a couple more times
+ f(1)
+ f(1)
+
+ # Flush the .pyc files
+ shutil.rmtree(dir_name)
+ os.mkdir(dir_name)
+ # Now modify the module where f is stored, modifying f
+ content = 'def f(x):\n print("x=%s" % x)\n return x\n'
+ with open(filename, 'w') as module_file:
+ module_file.write(content)
+
+ # And call f more times prior to reloading: the cache should not be
+ # invalidated at this point as the active function definition has not
+ # changed in memory yet.
+ f(1)
+ f(1)
+
+ # Now reload
+ sys.stdout.write('Reloading\n')
+ sys.modules.pop('tmp_joblib_')
+ import tmp_joblib_ as tmp
+ f = memory.cache(tmp.f)
+
+ # And call f more times
+ f(1)
+ f(1)
+
+ out, err = capsys.readouterr()
+ assert out == '1\n2\nReloading\nx=1\n'
+
+
+def _function_to_cache(a, b):
+ # Just a place holder function to be mutated by tests
+ pass
+
+
+def _sum(a, b):
+ return a + b
+
+
+def _product(a, b):
+ return a * b
+
+
+def test_memory_in_memory_function_code_change(tmpdir):
+ _function_to_cache.__code__ = _sum.__code__
+
+ memory = Memory(location=tmpdir.strpath, verbose=0)
+ f = memory.cache(_function_to_cache)
+
+ assert f(1, 2) == 3
+ assert f(1, 2) == 3
+
+ with warns(JobLibCollisionWarning):
+ # Check that inline function modification triggers a cache invalidation
+ _function_to_cache.__code__ = _product.__code__
+ assert f(1, 2) == 2
+ assert f(1, 2) == 2
+
+
+def test_clear_memory_with_none_location():
+ memory = Memory(location=None)
+ memory.clear()
+
+
+def func_with_kwonly_args(a, b, *, kw1='kw1', kw2='kw2'):
+ return a, b, kw1, kw2
+
+
+def func_with_signature(a: int, b: float) -> float:
+ return a + b
+
+
+def test_memory_func_with_kwonly_args(tmpdir):
+ memory = Memory(location=tmpdir.strpath, verbose=0)
+ func_cached = memory.cache(func_with_kwonly_args)
+
+ assert func_cached(1, 2, kw1=3) == (1, 2, 3, 'kw2')
+
+ # Making sure that providing a keyword-only argument by
+ # position raises an exception
+ with raises(ValueError) as excinfo:
+ func_cached(1, 2, 3, kw2=4)
+ excinfo.match("Keyword-only parameter 'kw1' was passed as positional "
+ "parameter")
+
+ # Keyword-only parameter passed by position with cached call
+ # should still raise ValueError
+ func_cached(1, 2, kw1=3, kw2=4)
+
+ with raises(ValueError) as excinfo:
+ func_cached(1, 2, 3, kw2=4)
+ excinfo.match("Keyword-only parameter 'kw1' was passed as positional "
+ "parameter")
+
+ # Test 'ignore' parameter
+ func_cached = memory.cache(func_with_kwonly_args, ignore=['kw2'])
+ assert func_cached(1, 2, kw1=3, kw2=4) == (1, 2, 3, 4)
+ assert func_cached(1, 2, kw1=3, kw2='ignored') == (1, 2, 3, 4)
+
+
+def test_memory_func_with_signature(tmpdir):
+ memory = Memory(location=tmpdir.strpath, verbose=0)
+ func_cached = memory.cache(func_with_signature)
+
+ assert func_cached(1, 2.) == 3.
+
+
+def _setup_toy_cache(tmpdir, num_inputs=10):
+ memory = Memory(location=tmpdir.strpath, verbose=0)
+
+ @memory.cache()
+ def get_1000_bytes(arg):
+ return 'a' * 1000
+
+ inputs = list(range(num_inputs))
+ for arg in inputs:
+ get_1000_bytes(arg)
+
+ func_id = _build_func_identifier(get_1000_bytes)
+ hash_dirnames = [get_1000_bytes._get_args_id(arg)
+ for arg in inputs]
+
+ full_hashdirs = [os.path.join(get_1000_bytes.store_backend.location,
+ func_id, dirname)
+ for dirname in hash_dirnames]
+ return memory, full_hashdirs, get_1000_bytes
+
+
+def test__get_items(tmpdir):
+ memory, expected_hash_dirs, _ = _setup_toy_cache(tmpdir)
+ items = memory.store_backend.get_items()
+ hash_dirs = [ci.path for ci in items]
+ assert set(hash_dirs) == set(expected_hash_dirs)
+
+ def get_files_size(directory):
+ full_paths = [os.path.join(directory, fn)
+ for fn in os.listdir(directory)]
+ return sum(os.path.getsize(fp) for fp in full_paths)
+
+ expected_hash_cache_sizes = [get_files_size(hash_dir)
+ for hash_dir in hash_dirs]
+ hash_cache_sizes = [ci.size for ci in items]
+ assert hash_cache_sizes == expected_hash_cache_sizes
+
+ output_filenames = [os.path.join(hash_dir, 'output.pkl')
+ for hash_dir in hash_dirs]
+
+ expected_last_accesses = [
+ datetime.datetime.fromtimestamp(os.path.getatime(fn))
+ for fn in output_filenames]
+ last_accesses = [ci.last_access for ci in items]
+ assert last_accesses == expected_last_accesses
+
+
+def test__get_items_to_delete(tmpdir):
+ # test empty cache
+ memory, _, _ = _setup_toy_cache(tmpdir, num_inputs=0)
+ items_to_delete = memory.store_backend._get_items_to_delete('1K')
+ assert items_to_delete == []
+
+ memory, expected_hash_cachedirs, _ = _setup_toy_cache(tmpdir)
+ items = memory.store_backend.get_items()
+ # bytes_limit set to keep only one cache item (each hash cache
+ # folder is about 1000 bytes + metadata)
+ items_to_delete = memory.store_backend._get_items_to_delete('2K')
+ nb_hashes = len(expected_hash_cachedirs)
+ assert set.issubset(set(items_to_delete), set(items))
+ assert len(items_to_delete) == nb_hashes - 1
+
+ # Sanity check bytes_limit=2048 is the same as bytes_limit='2K'
+ items_to_delete_2048b = memory.store_backend._get_items_to_delete(2048)
+ assert sorted(items_to_delete) == sorted(items_to_delete_2048b)
+
+ # bytes_limit greater than the size of the cache
+ items_to_delete_empty = memory.store_backend._get_items_to_delete('1M')
+ assert items_to_delete_empty == []
+
+ # All the cache items need to be deleted
+ bytes_limit_too_small = 500
+ items_to_delete_500b = memory.store_backend._get_items_to_delete(
+ bytes_limit_too_small
+ )
+ assert set(items_to_delete_500b), set(items)
+
+ # Test LRU property: surviving cache items should all have a more
+ # recent last_access that the ones that have been deleted
+ items_to_delete_6000b = memory.store_backend._get_items_to_delete(6000)
+ surviving_items = set(items).difference(items_to_delete_6000b)
+
+ assert (max(ci.last_access for ci in items_to_delete_6000b) <=
+ min(ci.last_access for ci in surviving_items))
+
+
+def test_memory_reduce_size_bytes_limit(tmpdir):
+ memory, _, _ = _setup_toy_cache(tmpdir)
+ ref_cache_items = memory.store_backend.get_items()
+
+ # By default memory.bytes_limit is None and reduce_size is a noop
+ memory.reduce_size()
+ cache_items = memory.store_backend.get_items()
+ assert sorted(ref_cache_items) == sorted(cache_items)
+
+ # No cache items deleted if bytes_limit greater than the size of
+ # the cache
+ memory.reduce_size(bytes_limit='1M')
+ cache_items = memory.store_backend.get_items()
+ assert sorted(ref_cache_items) == sorted(cache_items)
+
+ # bytes_limit is set so that only two cache items are kept
+ memory.reduce_size(bytes_limit='3K')
+ cache_items = memory.store_backend.get_items()
+ assert set.issubset(set(cache_items), set(ref_cache_items))
+ assert len(cache_items) == 2
+
+ # bytes_limit set so that no cache item is kept
+ bytes_limit_too_small = 500
+ memory.reduce_size(bytes_limit=bytes_limit_too_small)
+ cache_items = memory.store_backend.get_items()
+ assert cache_items == []
+
+
+def test_memory_reduce_size_items_limit(tmpdir):
+ memory, _, _ = _setup_toy_cache(tmpdir)
+ ref_cache_items = memory.store_backend.get_items()
+
+ # By default reduce_size is a noop
+ memory.reduce_size()
+ cache_items = memory.store_backend.get_items()
+ assert sorted(ref_cache_items) == sorted(cache_items)
+
+ # No cache items deleted if items_limit greater than the size of
+ # the cache
+ memory.reduce_size(items_limit=10)
+ cache_items = memory.store_backend.get_items()
+ assert sorted(ref_cache_items) == sorted(cache_items)
+
+ # items_limit is set so that only two cache items are kept
+ memory.reduce_size(items_limit=2)
+ cache_items = memory.store_backend.get_items()
+ assert set.issubset(set(cache_items), set(ref_cache_items))
+ assert len(cache_items) == 2
+
+ # item_limit set so that no cache item is kept
+ memory.reduce_size(items_limit=0)
+ cache_items = memory.store_backend.get_items()
+ assert cache_items == []
+
+
+def test_memory_reduce_size_age_limit(tmpdir):
+ import time
+ import datetime
+ memory, _, put_cache = _setup_toy_cache(tmpdir)
+ ref_cache_items = memory.store_backend.get_items()
+
+ # By default reduce_size is a noop
+ memory.reduce_size()
+ cache_items = memory.store_backend.get_items()
+ assert sorted(ref_cache_items) == sorted(cache_items)
+
+ # No cache items deleted if age_limit big.
+ memory.reduce_size(age_limit=datetime.timedelta(days=1))
+ cache_items = memory.store_backend.get_items()
+ assert sorted(ref_cache_items) == sorted(cache_items)
+
+ # age_limit is set so that only two cache items are kept
+ time.sleep(1)
+ put_cache(-1)
+ put_cache(-2)
+ memory.reduce_size(age_limit=datetime.timedelta(seconds=1))
+ cache_items = memory.store_backend.get_items()
+ assert not set.issubset(set(cache_items), set(ref_cache_items))
+ assert len(cache_items) == 2
+
+ # age_limit set so that no cache item is kept
+ memory.reduce_size(age_limit=datetime.timedelta(seconds=0))
+ cache_items = memory.store_backend.get_items()
+ assert cache_items == []
+
+
+def test_memory_clear(tmpdir):
+ memory, _, g = _setup_toy_cache(tmpdir)
+ memory.clear()
+
+ assert os.listdir(memory.store_backend.location) == []
+
+ # Check that the cache for functions hash is also reset.
+ assert not g._check_previous_func_code(stacklevel=4)
+
+
+def fast_func_with_complex_output():
+ complex_obj = ['a' * 1000] * 1000
+ return complex_obj
+
+
+def fast_func_with_conditional_complex_output(complex_output=True):
+ complex_obj = {str(i): i for i in range(int(1e5))}
+ return complex_obj if complex_output else 'simple output'
+
+
+@with_multiprocessing
+def test_cached_function_race_condition_when_persisting_output(tmpdir, capfd):
+ # Test race condition where multiple processes are writing into
+ # the same output.pkl. See
+ # https://github.com/joblib/joblib/issues/490 for more details.
+ memory = Memory(location=tmpdir.strpath)
+ func_cached = memory.cache(fast_func_with_complex_output)
+
+ Parallel(n_jobs=2)(delayed(func_cached)() for i in range(3))
+
+ stdout, stderr = capfd.readouterr()
+
+ # Checking both stdout and stderr (ongoing PR #434 may change
+ # logging destination) to make sure there is no exception while
+ # loading the results
+ exception_msg = 'Exception while loading results'
+ assert exception_msg not in stdout
+ assert exception_msg not in stderr
+
+
+@with_multiprocessing
+def test_cached_function_race_condition_when_persisting_output_2(tmpdir,
+ capfd):
+ # Test race condition in first attempt at solving
+ # https://github.com/joblib/joblib/issues/490. The race condition
+ # was due to the delay between seeing the cache directory created
+ # (interpreted as the result being cached) and the output.pkl being
+ # pickled.
+ memory = Memory(location=tmpdir.strpath)
+ func_cached = memory.cache(fast_func_with_conditional_complex_output)
+
+ Parallel(n_jobs=2)(delayed(func_cached)(True if i % 2 == 0 else False)
+ for i in range(3))
+
+ stdout, stderr = capfd.readouterr()
+
+ # Checking both stdout and stderr (ongoing PR #434 may change
+ # logging destination) to make sure there is no exception while
+ # loading the results
+ exception_msg = 'Exception while loading results'
+ assert exception_msg not in stdout
+ assert exception_msg not in stderr
+
+
+def test_memory_recomputes_after_an_error_while_loading_results(
+ tmpdir, monkeypatch):
+ memory = Memory(location=tmpdir.strpath)
+
+ def func(arg):
+ # This makes sure that the timestamp returned by two calls of
+ # func are different. This is needed on Windows where
+ # time.time resolution may not be accurate enough
+ time.sleep(0.01)
+ return arg, time.time()
+
+ cached_func = memory.cache(func)
+ input_arg = 'arg'
+ arg, timestamp = cached_func(input_arg)
+
+ # Make sure the function is correctly cached
+ assert arg == input_arg
+
+ # Corrupting output.pkl to make sure that an error happens when
+ # loading the cached result
+ corrupt_single_cache_item(memory)
+
+ # Make sure that corrupting the file causes recomputation and that
+ # a warning is issued.
+ recorded_warnings = monkeypatch_cached_func_warn(cached_func, monkeypatch)
+ recomputed_arg, recomputed_timestamp = cached_func(arg)
+ assert len(recorded_warnings) == 1
+ exception_msg = 'Exception while loading results'
+ assert exception_msg in recorded_warnings[0]
+ assert recomputed_arg == arg
+ assert recomputed_timestamp > timestamp
+
+ # Corrupting output.pkl to make sure that an error happens when
+ # loading the cached result
+ corrupt_single_cache_item(memory)
+ reference = cached_func.call_and_shelve(arg)
+ try:
+ reference.get()
+ raise AssertionError(
+ "It normally not possible to load a corrupted"
+ " MemorizedResult"
+ )
+ except KeyError as e:
+ message = "is corrupted"
+ assert message in str(e.args)
+
+
+class IncompleteStoreBackend(StoreBackendBase):
+ """This backend cannot be instantiated and should raise a TypeError."""
+ pass
+
+
+class DummyStoreBackend(StoreBackendBase):
+ """A dummy store backend that does nothing."""
+
+ def _open_item(self, *args, **kwargs):
+ """Open an item on store."""
+ "Does nothing"
+
+ def _item_exists(self, location):
+ """Check if an item location exists."""
+ "Does nothing"
+
+ def _move_item(self, src, dst):
+ """Move an item from src to dst in store."""
+ "Does nothing"
+
+ def create_location(self, location):
+ """Create location on store."""
+ "Does nothing"
+
+ def exists(self, obj):
+ """Check if an object exists in the store"""
+ return False
+
+ def clear_location(self, obj):
+ """Clear object on store"""
+ "Does nothing"
+
+ def get_items(self):
+ """Returns the whole list of items available in cache."""
+ return []
+
+ def configure(self, location, *args, **kwargs):
+ """Configure the store"""
+ "Does nothing"
+
+
+@parametrize("invalid_prefix", [None, dict(), list()])
+def test_register_invalid_store_backends_key(invalid_prefix):
+ # verify the right exceptions are raised when passing a wrong backend key.
+ with raises(ValueError) as excinfo:
+ register_store_backend(invalid_prefix, None)
+ excinfo.match(r'Store backend name should be a string*')
+
+
+def test_register_invalid_store_backends_object():
+ # verify the right exceptions are raised when passing a wrong backend
+ # object.
+ with raises(ValueError) as excinfo:
+ register_store_backend("fs", None)
+ excinfo.match(r'Store backend should inherit StoreBackendBase*')
+
+
+def test_memory_default_store_backend():
+ # test an unknown backend falls back into a FileSystemStoreBackend
+ with raises(TypeError) as excinfo:
+ Memory(location='/tmp/joblib', backend='unknown')
+ excinfo.match(r"Unknown location*")
+
+
+def test_warning_on_unknown_location_type():
+ class NonSupportedLocationClass:
+ pass
+ unsupported_location = NonSupportedLocationClass()
+
+ with warns(UserWarning) as warninfo:
+ _store_backend_factory("local", location=unsupported_location)
+
+ expected_mesage = ("Instantiating a backend using a "
+ "NonSupportedLocationClass as a location is not "
+ "supported by joblib")
+ assert expected_mesage in str(warninfo[0].message)
+
+
+def test_instanciate_incomplete_store_backend():
+ # Verify that registering an external incomplete store backend raises an
+ # exception when one tries to instantiate it.
+ backend_name = "isb"
+ register_store_backend(backend_name, IncompleteStoreBackend)
+ assert (backend_name, IncompleteStoreBackend) in _STORE_BACKENDS.items()
+ with raises(TypeError) as excinfo:
+ _store_backend_factory(backend_name, "fake_location")
+ excinfo.match(r"Can't instantiate abstract class IncompleteStoreBackend "
+ "(without an implementation for|with) abstract methods*")
+
+
+def test_dummy_store_backend():
+ # Verify that registering an external store backend works.
+
+ backend_name = "dsb"
+ register_store_backend(backend_name, DummyStoreBackend)
+ assert (backend_name, DummyStoreBackend) in _STORE_BACKENDS.items()
+
+ backend_obj = _store_backend_factory(backend_name, "dummy_location")
+ assert isinstance(backend_obj, DummyStoreBackend)
+
+
+def test_instanciate_store_backend_with_pathlib_path():
+ # Instantiate a FileSystemStoreBackend using a pathlib.Path object
+ path = pathlib.Path("some_folder")
+ backend_obj = _store_backend_factory("local", path)
+ assert backend_obj.location == "some_folder"
+
+
+def test_filesystem_store_backend_repr(tmpdir):
+ # Verify string representation of a filesystem store backend.
+
+ repr_pattern = 'FileSystemStoreBackend(location="{location}")'
+ backend = FileSystemStoreBackend()
+ assert backend.location is None
+
+ repr(backend) # Should not raise an exception
+
+ assert str(backend) == repr_pattern.format(location=None)
+
+ # backend location is passed explicitly via the configure method (called
+ # by the internal _store_backend_factory function)
+ backend.configure(tmpdir.strpath)
+
+ assert str(backend) == repr_pattern.format(location=tmpdir.strpath)
+
+ repr(backend) # Should not raise an exception
+
+
+def test_memory_objects_repr(tmpdir):
+ # Verify printable reprs of MemorizedResult, MemorizedFunc and Memory.
+
+ def my_func(a, b):
+ return a + b
+
+ memory = Memory(location=tmpdir.strpath, verbose=0)
+ memorized_func = memory.cache(my_func)
+
+ memorized_func_repr = 'MemorizedFunc(func={func}, location={location})'
+
+ assert str(memorized_func) == memorized_func_repr.format(
+ func=my_func,
+ location=memory.store_backend.location)
+
+ memorized_result = memorized_func.call_and_shelve(42, 42)
+
+ memorized_result_repr = ('MemorizedResult(location="{location}", '
+ 'func="{func}", args_id="{args_id}")')
+
+ assert str(memorized_result) == memorized_result_repr.format(
+ location=memory.store_backend.location,
+ func=memorized_result.func_id,
+ args_id=memorized_result.args_id)
+
+ assert str(memory) == 'Memory(location={location})'.format(
+ location=memory.store_backend.location)
+
+
+def test_memorized_result_pickle(tmpdir):
+ # Verify a MemoryResult object can be pickled/depickled. Non regression
+ # test introduced following issue
+ # https://github.com/joblib/joblib/issues/747
+
+ memory = Memory(location=tmpdir.strpath)
+
+ @memory.cache
+ def g(x):
+ return x**2
+
+ memorized_result = g.call_and_shelve(4)
+ memorized_result_pickle = pickle.dumps(memorized_result)
+ memorized_result_loads = pickle.loads(memorized_result_pickle)
+
+ assert memorized_result.store_backend.location == \
+ memorized_result_loads.store_backend.location
+ assert memorized_result.func == memorized_result_loads.func
+ assert memorized_result.args_id == memorized_result_loads.args_id
+ assert str(memorized_result) == str(memorized_result_loads)
+
+
+def compare(left, right, ignored_attrs=None):
+ if ignored_attrs is None:
+ ignored_attrs = []
+
+ left_vars = vars(left)
+ right_vars = vars(right)
+ assert set(left_vars.keys()) == set(right_vars.keys())
+ for attr in left_vars.keys():
+ if attr in ignored_attrs:
+ continue
+ assert left_vars[attr] == right_vars[attr]
+
+
+@pytest.mark.parametrize('memory_kwargs',
+ [{'compress': 3, 'verbose': 2},
+ {'mmap_mode': 'r', 'verbose': 5,
+ 'backend_options': {'parameter': 'unused'}}])
+def test_memory_pickle_dump_load(tmpdir, memory_kwargs):
+ memory = Memory(location=tmpdir.strpath, **memory_kwargs)
+
+ memory_reloaded = pickle.loads(pickle.dumps(memory))
+
+ # Compare Memory instance before and after pickle roundtrip
+ compare(memory.store_backend, memory_reloaded.store_backend)
+ compare(memory, memory_reloaded,
+ ignored_attrs=set(['store_backend', 'timestamp', '_func_code_id']))
+ assert hash(memory) == hash(memory_reloaded)
+
+ func_cached = memory.cache(f)
+
+ func_cached_reloaded = pickle.loads(pickle.dumps(func_cached))
+
+ # Compare MemorizedFunc instance before/after pickle roundtrip
+ compare(func_cached.store_backend, func_cached_reloaded.store_backend)
+ compare(func_cached, func_cached_reloaded,
+ ignored_attrs=set(['store_backend', 'timestamp', '_func_code_id']))
+ assert hash(func_cached) == hash(func_cached_reloaded)
+
+ # Compare MemorizedResult instance before/after pickle roundtrip
+ memorized_result = func_cached.call_and_shelve(1)
+ memorized_result_reloaded = pickle.loads(pickle.dumps(memorized_result))
+
+ compare(memorized_result.store_backend,
+ memorized_result_reloaded.store_backend)
+ compare(memorized_result, memorized_result_reloaded,
+ ignored_attrs=set(['store_backend', 'timestamp', '_func_code_id']))
+ assert hash(memorized_result) == hash(memorized_result_reloaded)
+
+
+def test_info_log(tmpdir, caplog):
+ caplog.set_level(logging.INFO)
+ x = 3
+
+ memory = Memory(location=tmpdir.strpath, verbose=20)
+
+ @memory.cache
+ def f(x):
+ return x ** 2
+
+ _ = f(x)
+ assert "Querying" in caplog.text
+ caplog.clear()
+
+ memory = Memory(location=tmpdir.strpath, verbose=0)
+
+ @memory.cache
+ def f(x):
+ return x ** 2
+
+ _ = f(x)
+ assert "Querying" not in caplog.text
+ caplog.clear()
+
+
+def test_deprecated_bytes_limit(tmpdir):
+ from joblib import __version__
+ if __version__ >= "1.5":
+ raise DeprecationWarning(
+ "Bytes limit is deprecated and should be removed by 1.4"
+ )
+ with pytest.warns(DeprecationWarning, match="bytes_limit"):
+ _ = Memory(location=tmpdir.strpath, bytes_limit='1K')
+
+
+class TestCacheValidationCallback:
+ "Tests on parameter `cache_validation_callback`"
+
+ @pytest.fixture()
+ def memory(self, tmp_path):
+ mem = Memory(location=tmp_path)
+ yield mem
+ mem.clear()
+
+ def foo(self, x, d, delay=None):
+ d["run"] = True
+ if delay is not None:
+ time.sleep(delay)
+ return x * 2
+
+ def test_invalid_cache_validation_callback(self, memory):
+ "Test invalid values for `cache_validation_callback"
+ match = "cache_validation_callback needs to be callable. Got True."
+ with pytest.raises(ValueError, match=match):
+ memory.cache(cache_validation_callback=True)
+
+ @pytest.mark.parametrize("consider_cache_valid", [True, False])
+ def test_constant_cache_validation_callback(
+ self, memory, consider_cache_valid
+ ):
+ "Test expiry of old results"
+ f = memory.cache(
+ self.foo, cache_validation_callback=lambda _: consider_cache_valid,
+ ignore=["d"]
+ )
+
+ d1, d2 = {"run": False}, {"run": False}
+ assert f(2, d1) == 4
+ assert f(2, d2) == 4
+
+ assert d1["run"]
+ assert d2["run"] != consider_cache_valid
+
+ def test_memory_only_cache_long_run(self, memory):
+ "Test cache validity based on run duration."
+
+ def cache_validation_callback(metadata):
+ duration = metadata['duration']
+ if duration > 0.1:
+ return True
+
+ f = memory.cache(
+ self.foo, cache_validation_callback=cache_validation_callback,
+ ignore=["d"]
+ )
+
+ # Short run are not cached
+ d1, d2 = {"run": False}, {"run": False}
+ assert f(2, d1, delay=0) == 4
+ assert f(2, d2, delay=0) == 4
+ assert d1["run"]
+ assert d2["run"]
+
+ # Longer run are cached
+ d1, d2 = {"run": False}, {"run": False}
+ assert f(2, d1, delay=0.2) == 4
+ assert f(2, d2, delay=0.2) == 4
+ assert d1["run"]
+ assert not d2["run"]
+
+ def test_memory_expires_after(self, memory):
+ "Test expiry of old cached results"
+
+ f = memory.cache(
+ self.foo, cache_validation_callback=expires_after(seconds=.3),
+ ignore=["d"]
+ )
+
+ d1, d2, d3 = {"run": False}, {"run": False}, {"run": False}
+ assert f(2, d1) == 4
+ assert f(2, d2) == 4
+ time.sleep(.5)
+ assert f(2, d3) == 4
+
+ assert d1["run"]
+ assert not d2["run"]
+ assert d3["run"]
diff --git a/venv/lib/python3.10/site-packages/joblib/test/test_memory_async.py b/venv/lib/python3.10/site-packages/joblib/test/test_memory_async.py
new file mode 100644
index 0000000000000000000000000000000000000000..ecad0c926193a7ed84f669cface0023363f231ff
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/joblib/test/test_memory_async.py
@@ -0,0 +1,149 @@
+import asyncio
+import gc
+import shutil
+
+import pytest
+
+from joblib.memory import (AsyncMemorizedFunc, AsyncNotMemorizedFunc,
+ MemorizedResult, Memory, NotMemorizedResult)
+from joblib.test.common import np, with_numpy
+from joblib.testing import raises
+
+from .test_memory import (corrupt_single_cache_item,
+ monkeypatch_cached_func_warn)
+
+
+async def check_identity_lazy_async(func, accumulator, location):
+ """ Similar to check_identity_lazy_async for coroutine functions"""
+ memory = Memory(location=location, verbose=0)
+ func = memory.cache(func)
+ for i in range(3):
+ for _ in range(2):
+ value = await func(i)
+ assert value == i
+ assert len(accumulator) == i + 1
+
+
+@pytest.mark.asyncio
+async def test_memory_integration_async(tmpdir):
+ accumulator = list()
+
+ async def f(n):
+ await asyncio.sleep(0.1)
+ accumulator.append(1)
+ return n
+
+ await check_identity_lazy_async(f, accumulator, tmpdir.strpath)
+
+ # Now test clearing
+ for compress in (False, True):
+ for mmap_mode in ('r', None):
+ memory = Memory(location=tmpdir.strpath, verbose=10,
+ mmap_mode=mmap_mode, compress=compress)
+ # First clear the cache directory, to check that our code can
+ # handle that
+ # NOTE: this line would raise an exception, as the database
+ # file is still open; we ignore the error since we want to
+ # test what happens if the directory disappears
+ shutil.rmtree(tmpdir.strpath, ignore_errors=True)
+ g = memory.cache(f)
+ await g(1)
+ g.clear(warn=False)
+ current_accumulator = len(accumulator)
+ out = await g(1)
+
+ assert len(accumulator) == current_accumulator + 1
+ # Also, check that Memory.eval works similarly
+ evaled = await memory.eval(f, 1)
+ assert evaled == out
+ assert len(accumulator) == current_accumulator + 1
+
+ # Now do a smoke test with a function defined in __main__, as the name
+ # mangling rules are more complex
+ f.__module__ = '__main__'
+ memory = Memory(location=tmpdir.strpath, verbose=0)
+ await memory.cache(f)(1)
+
+
+@pytest.mark.asyncio
+async def test_no_memory_async():
+ accumulator = list()
+
+ async def ff(x):
+ await asyncio.sleep(0.1)
+ accumulator.append(1)
+ return x
+
+ memory = Memory(location=None, verbose=0)
+ gg = memory.cache(ff)
+ for _ in range(4):
+ current_accumulator = len(accumulator)
+ await gg(1)
+ assert len(accumulator) == current_accumulator + 1
+
+
+@with_numpy
+@pytest.mark.asyncio
+async def test_memory_numpy_check_mmap_mode_async(tmpdir, monkeypatch):
+ """Check that mmap_mode is respected even at the first call"""
+
+ memory = Memory(location=tmpdir.strpath, mmap_mode='r', verbose=0)
+
+ @memory.cache()
+ async def twice(a):
+ return a * 2
+
+ a = np.ones(3)
+ b = await twice(a)
+ c = await twice(a)
+
+ assert isinstance(c, np.memmap)
+ assert c.mode == 'r'
+
+ assert isinstance(b, np.memmap)
+ assert b.mode == 'r'
+
+ # Corrupts the file, Deleting b and c mmaps
+ # is necessary to be able edit the file
+ del b
+ del c
+ gc.collect()
+ corrupt_single_cache_item(memory)
+
+ # Make sure that corrupting the file causes recomputation and that
+ # a warning is issued.
+ recorded_warnings = monkeypatch_cached_func_warn(twice, monkeypatch)
+ d = await twice(a)
+ assert len(recorded_warnings) == 1
+ exception_msg = 'Exception while loading results'
+ assert exception_msg in recorded_warnings[0]
+ # Asserts that the recomputation returns a mmap
+ assert isinstance(d, np.memmap)
+ assert d.mode == 'r'
+
+
+@pytest.mark.asyncio
+async def test_call_and_shelve_async(tmpdir):
+ async def f(x, y=1):
+ await asyncio.sleep(0.1)
+ return x ** 2 + y
+
+ # Test MemorizedFunc outputting a reference to cache.
+ for func, Result in zip((AsyncMemorizedFunc(f, tmpdir.strpath),
+ AsyncNotMemorizedFunc(f),
+ Memory(location=tmpdir.strpath,
+ verbose=0).cache(f),
+ Memory(location=None).cache(f),
+ ),
+ (MemorizedResult, NotMemorizedResult,
+ MemorizedResult, NotMemorizedResult,
+ )):
+ for _ in range(2):
+ result = await func.call_and_shelve(2)
+ assert isinstance(result, Result)
+ assert result.get() == 5
+
+ result.clear()
+ with raises(KeyError):
+ result.get()
+ result.clear() # Do nothing if there is no cache.
diff --git a/venv/lib/python3.10/site-packages/joblib/test/test_missing_multiprocessing.py b/venv/lib/python3.10/site-packages/joblib/test/test_missing_multiprocessing.py
new file mode 100644
index 0000000000000000000000000000000000000000..251925ced5208b4aaf09d9aab305eb44c7102818
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/joblib/test/test_missing_multiprocessing.py
@@ -0,0 +1,32 @@
+"""
+Pyodide and other single-threaded Python builds will be missing the
+_multiprocessing module. Test that joblib still works in this environment.
+"""
+
+import os
+import subprocess
+import sys
+
+
+def test_missing_multiprocessing(tmp_path):
+ """
+ Test that import joblib works even if _multiprocessing is missing.
+
+ pytest has already imported everything from joblib. The most reasonable way
+ to test importing joblib with modified environment is to invoke a separate
+ Python process. This also ensures that we don't break other tests by
+ importing a bad `_multiprocessing` module.
+ """
+ (tmp_path / "_multiprocessing.py").write_text(
+ 'raise ImportError("No _multiprocessing module!")'
+ )
+ env = dict(os.environ)
+ # For subprocess, use current sys.path with our custom version of
+ # multiprocessing inserted.
+ env["PYTHONPATH"] = ":".join([str(tmp_path)] + sys.path)
+ subprocess.check_call(
+ [sys.executable, "-c",
+ "import joblib, math; "
+ "joblib.Parallel(n_jobs=1)("
+ "joblib.delayed(math.sqrt)(i**2) for i in range(10))"
+ ], env=env)
diff --git a/venv/lib/python3.10/site-packages/joblib/test/test_module.py b/venv/lib/python3.10/site-packages/joblib/test/test_module.py
new file mode 100644
index 0000000000000000000000000000000000000000..a2257a4142d79996f3d299cb820927ae48a05810
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/joblib/test/test_module.py
@@ -0,0 +1,53 @@
+import sys
+import joblib
+from joblib.testing import check_subprocess_call
+from joblib.test.common import with_multiprocessing
+
+
+def test_version():
+ assert hasattr(joblib, '__version__'), (
+ "There are no __version__ argument on the joblib module")
+
+
+@with_multiprocessing
+def test_no_start_method_side_effect_on_import():
+ # check that importing joblib does not implicitly set the global
+ # start_method for multiprocessing.
+ code = """if True:
+ import joblib
+ import multiprocessing as mp
+ # The following line would raise RuntimeError if the
+ # start_method is already set.
+ mp.set_start_method("loky")
+ """
+ check_subprocess_call([sys.executable, '-c', code])
+
+
+@with_multiprocessing
+def test_no_semaphore_tracker_on_import():
+ # check that importing joblib does not implicitly spawn a resource tracker
+ # or a semaphore tracker
+ code = """if True:
+ import joblib
+ from multiprocessing import semaphore_tracker
+ # The following line would raise RuntimeError if the
+ # start_method is already set.
+ msg = "multiprocessing.semaphore_tracker has been spawned on import"
+ assert semaphore_tracker._semaphore_tracker._fd is None, msg"""
+ if sys.version_info >= (3, 8):
+ # semaphore_tracker was renamed in Python 3.8:
+ code = code.replace("semaphore_tracker", "resource_tracker")
+ check_subprocess_call([sys.executable, '-c', code])
+
+
+@with_multiprocessing
+def test_no_resource_tracker_on_import():
+ code = """if True:
+ import joblib
+ from joblib.externals.loky.backend import resource_tracker
+ # The following line would raise RuntimeError if the
+ # start_method is already set.
+ msg = "loky.resource_tracker has been spawned on import"
+ assert resource_tracker._resource_tracker._fd is None, msg
+ """
+ check_subprocess_call([sys.executable, '-c', code])
diff --git a/venv/lib/python3.10/site-packages/joblib/test/test_numpy_pickle.py b/venv/lib/python3.10/site-packages/joblib/test/test_numpy_pickle.py
new file mode 100644
index 0000000000000000000000000000000000000000..9fee585c79ad219d3a9f8cdc6a55655b50099c09
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/joblib/test/test_numpy_pickle.py
@@ -0,0 +1,1159 @@
+"""Test the numpy pickler as a replacement of the standard pickler."""
+
+import copy
+import os
+import random
+import re
+import io
+import sys
+import warnings
+import gzip
+import zlib
+import bz2
+import pickle
+import socket
+from contextlib import closing
+import mmap
+from pathlib import Path
+
+try:
+ import lzma
+except ImportError:
+ lzma = None
+
+import pytest
+
+from joblib.test.common import np, with_numpy, with_lz4, without_lz4
+from joblib.test.common import with_memory_profiler, memory_used
+from joblib.testing import parametrize, raises, warns
+
+# numpy_pickle is not a drop-in replacement of pickle, as it takes
+# filenames instead of open files as arguments.
+from joblib import numpy_pickle, register_compressor
+from joblib.test import data
+
+from joblib.numpy_pickle_utils import _IO_BUFFER_SIZE
+from joblib.numpy_pickle_utils import _detect_compressor
+from joblib.numpy_pickle_utils import _is_numpy_array_byte_order_mismatch
+from joblib.numpy_pickle_utils import _ensure_native_byte_order
+from joblib.compressor import (_COMPRESSORS, _LZ4_PREFIX, CompressorWrapper,
+ LZ4_NOT_INSTALLED_ERROR, BinaryZlibFile)
+
+
+###############################################################################
+# Define a list of standard types.
+# Borrowed from dill, initial author: Micheal McKerns:
+# http://dev.danse.us/trac/pathos/browser/dill/dill_test2.py
+
+typelist = []
+
+# testing types
+_none = None
+typelist.append(_none)
+_type = type
+typelist.append(_type)
+_bool = bool(1)
+typelist.append(_bool)
+_int = int(1)
+typelist.append(_int)
+_float = float(1)
+typelist.append(_float)
+_complex = complex(1)
+typelist.append(_complex)
+_string = str(1)
+typelist.append(_string)
+_tuple = ()
+typelist.append(_tuple)
+_list = []
+typelist.append(_list)
+_dict = {}
+typelist.append(_dict)
+_builtin = len
+typelist.append(_builtin)
+
+
+def _function(x):
+ yield x
+
+
+class _class:
+ def _method(self):
+ pass
+
+
+class _newclass(object):
+ def _method(self):
+ pass
+
+
+typelist.append(_function)
+typelist.append(_class)
+typelist.append(_newclass) #
+_instance = _class()
+typelist.append(_instance)
+_object = _newclass()
+typelist.append(_object) #
+
+
+###############################################################################
+# Tests
+
+@parametrize('compress', [0, 1])
+@parametrize('member', typelist)
+def test_standard_types(tmpdir, compress, member):
+ # Test pickling and saving with standard types.
+ filename = tmpdir.join('test.pkl').strpath
+ numpy_pickle.dump(member, filename, compress=compress)
+ _member = numpy_pickle.load(filename)
+ # We compare the pickled instance to the reloaded one only if it
+ # can be compared to a copied one
+ if member == copy.deepcopy(member):
+ assert member == _member
+
+
+def test_value_error():
+ # Test inverting the input arguments to dump
+ with raises(ValueError):
+ numpy_pickle.dump('foo', dict())
+
+
+@parametrize('wrong_compress', [-1, 10, dict()])
+def test_compress_level_error(wrong_compress):
+ # Verify that passing an invalid compress argument raises an error.
+ exception_msg = ('Non valid compress level given: '
+ '"{0}"'.format(wrong_compress))
+ with raises(ValueError) as excinfo:
+ numpy_pickle.dump('dummy', 'foo', compress=wrong_compress)
+ excinfo.match(exception_msg)
+
+
+@with_numpy
+@parametrize('compress', [False, True, 0, 3, 'zlib'])
+def test_numpy_persistence(tmpdir, compress):
+ filename = tmpdir.join('test.pkl').strpath
+ rnd = np.random.RandomState(0)
+ a = rnd.random_sample((10, 2))
+ # We use 'a.T' to have a non C-contiguous array.
+ for index, obj in enumerate(((a,), (a.T,), (a, a), [a, a, a])):
+ filenames = numpy_pickle.dump(obj, filename, compress=compress)
+
+ # All is cached in one file
+ assert len(filenames) == 1
+ # Check that only one file was created
+ assert filenames[0] == filename
+ # Check that this file does exist
+ assert os.path.exists(filenames[0])
+
+ # Unpickle the object
+ obj_ = numpy_pickle.load(filename)
+ # Check that the items are indeed arrays
+ for item in obj_:
+ assert isinstance(item, np.ndarray)
+ # And finally, check that all the values are equal.
+ np.testing.assert_array_equal(np.array(obj), np.array(obj_))
+
+ # Now test with an array subclass
+ obj = np.memmap(filename + 'mmap', mode='w+', shape=4, dtype=np.float64)
+ filenames = numpy_pickle.dump(obj, filename, compress=compress)
+ # All is cached in one file
+ assert len(filenames) == 1
+
+ obj_ = numpy_pickle.load(filename)
+ if (type(obj) is not np.memmap and
+ hasattr(obj, '__array_prepare__')):
+ # We don't reconstruct memmaps
+ assert isinstance(obj_, type(obj))
+
+ np.testing.assert_array_equal(obj_, obj)
+
+ # Test with an object containing multiple numpy arrays
+ obj = ComplexTestObject()
+ filenames = numpy_pickle.dump(obj, filename, compress=compress)
+ # All is cached in one file
+ assert len(filenames) == 1
+
+ obj_loaded = numpy_pickle.load(filename)
+ assert isinstance(obj_loaded, type(obj))
+ np.testing.assert_array_equal(obj_loaded.array_float, obj.array_float)
+ np.testing.assert_array_equal(obj_loaded.array_int, obj.array_int)
+ np.testing.assert_array_equal(obj_loaded.array_obj, obj.array_obj)
+
+
+@with_numpy
+def test_numpy_persistence_bufferred_array_compression(tmpdir):
+ big_array = np.ones((_IO_BUFFER_SIZE + 100), dtype=np.uint8)
+ filename = tmpdir.join('test.pkl').strpath
+ numpy_pickle.dump(big_array, filename, compress=True)
+ arr_reloaded = numpy_pickle.load(filename)
+
+ np.testing.assert_array_equal(big_array, arr_reloaded)
+
+
+@with_numpy
+def test_memmap_persistence(tmpdir):
+ rnd = np.random.RandomState(0)
+ a = rnd.random_sample(10)
+ filename = tmpdir.join('test1.pkl').strpath
+ numpy_pickle.dump(a, filename)
+ b = numpy_pickle.load(filename, mmap_mode='r')
+
+ assert isinstance(b, np.memmap)
+
+ # Test with an object containing multiple numpy arrays
+ filename = tmpdir.join('test2.pkl').strpath
+ obj = ComplexTestObject()
+ numpy_pickle.dump(obj, filename)
+ obj_loaded = numpy_pickle.load(filename, mmap_mode='r')
+ assert isinstance(obj_loaded, type(obj))
+ assert isinstance(obj_loaded.array_float, np.memmap)
+ assert not obj_loaded.array_float.flags.writeable
+ assert isinstance(obj_loaded.array_int, np.memmap)
+ assert not obj_loaded.array_int.flags.writeable
+ # Memory map not allowed for numpy object arrays
+ assert not isinstance(obj_loaded.array_obj, np.memmap)
+ np.testing.assert_array_equal(obj_loaded.array_float,
+ obj.array_float)
+ np.testing.assert_array_equal(obj_loaded.array_int,
+ obj.array_int)
+ np.testing.assert_array_equal(obj_loaded.array_obj,
+ obj.array_obj)
+
+ # Test we can write in memmapped arrays
+ obj_loaded = numpy_pickle.load(filename, mmap_mode='r+')
+ assert obj_loaded.array_float.flags.writeable
+ obj_loaded.array_float[0:10] = 10.0
+ assert obj_loaded.array_int.flags.writeable
+ obj_loaded.array_int[0:10] = 10
+
+ obj_reloaded = numpy_pickle.load(filename, mmap_mode='r')
+ np.testing.assert_array_equal(obj_reloaded.array_float,
+ obj_loaded.array_float)
+ np.testing.assert_array_equal(obj_reloaded.array_int,
+ obj_loaded.array_int)
+
+ # Test w+ mode is caught and the mode has switched to r+
+ numpy_pickle.load(filename, mmap_mode='w+')
+ assert obj_loaded.array_int.flags.writeable
+ assert obj_loaded.array_int.mode == 'r+'
+ assert obj_loaded.array_float.flags.writeable
+ assert obj_loaded.array_float.mode == 'r+'
+
+
+@with_numpy
+def test_memmap_persistence_mixed_dtypes(tmpdir):
+ # loading datastructures that have sub-arrays with dtype=object
+ # should not prevent memmapping on fixed size dtype sub-arrays.
+ rnd = np.random.RandomState(0)
+ a = rnd.random_sample(10)
+ b = np.array([1, 'b'], dtype=object)
+ construct = (a, b)
+ filename = tmpdir.join('test.pkl').strpath
+ numpy_pickle.dump(construct, filename)
+ a_clone, b_clone = numpy_pickle.load(filename, mmap_mode='r')
+
+ # the floating point array has been memory mapped
+ assert isinstance(a_clone, np.memmap)
+
+ # the object-dtype array has been loaded in memory
+ assert not isinstance(b_clone, np.memmap)
+
+
+@with_numpy
+def test_masked_array_persistence(tmpdir):
+ # The special-case picker fails, because saving masked_array
+ # not implemented, but it just delegates to the standard pickler.
+ rnd = np.random.RandomState(0)
+ a = rnd.random_sample(10)
+ a = np.ma.masked_greater(a, 0.5)
+ filename = tmpdir.join('test.pkl').strpath
+ numpy_pickle.dump(a, filename)
+ b = numpy_pickle.load(filename, mmap_mode='r')
+ assert isinstance(b, np.ma.masked_array)
+
+
+@with_numpy
+def test_compress_mmap_mode_warning(tmpdir):
+ # Test the warning in case of compress + mmap_mode
+ rnd = np.random.RandomState(0)
+ a = rnd.random_sample(10)
+ this_filename = tmpdir.join('test.pkl').strpath
+ numpy_pickle.dump(a, this_filename, compress=1)
+ with warns(UserWarning) as warninfo:
+ numpy_pickle.load(this_filename, mmap_mode='r+')
+ debug_msg = "\n".join([str(w) for w in warninfo])
+ warninfo = [w.message for w in warninfo]
+ assert len(warninfo) == 1, debug_msg
+ assert (
+ str(warninfo[0]) ==
+ 'mmap_mode "r+" is not compatible with compressed '
+ f'file {this_filename}. "r+" flag will be ignored.'
+ )
+
+
+@with_numpy
+@parametrize('cache_size', [None, 0, 10])
+def test_cache_size_warning(tmpdir, cache_size):
+ # Check deprecation warning raised when cache size is not None
+ filename = tmpdir.join('test.pkl').strpath
+ rnd = np.random.RandomState(0)
+ a = rnd.random_sample((10, 2))
+
+ warnings.simplefilter("always")
+ with warnings.catch_warnings(record=True) as warninfo:
+ numpy_pickle.dump(a, filename, cache_size=cache_size)
+ expected_nb_warnings = 1 if cache_size is not None else 0
+ assert len(warninfo) == expected_nb_warnings
+ for w in warninfo:
+ assert w.category == DeprecationWarning
+ assert (str(w.message) ==
+ "Please do not set 'cache_size' in joblib.dump, this "
+ "parameter has no effect and will be removed. You "
+ "used 'cache_size={0}'".format(cache_size))
+
+
+@with_numpy
+@with_memory_profiler
+@parametrize('compress', [True, False])
+def test_memory_usage(tmpdir, compress):
+ # Verify memory stays within expected bounds.
+ filename = tmpdir.join('test.pkl').strpath
+ small_array = np.ones((10, 10))
+ big_array = np.ones(shape=100 * int(1e6), dtype=np.uint8)
+
+ for obj in (small_array, big_array):
+ size = obj.nbytes / 1e6
+ obj_filename = filename + str(np.random.randint(0, 1000))
+ mem_used = memory_used(numpy_pickle.dump,
+ obj, obj_filename, compress=compress)
+
+ # The memory used to dump the object shouldn't exceed the buffer
+ # size used to write array chunks (16MB).
+ write_buf_size = _IO_BUFFER_SIZE + 16 * 1024 ** 2 / 1e6
+ assert mem_used <= write_buf_size
+
+ mem_used = memory_used(numpy_pickle.load, obj_filename)
+ # memory used should be less than array size + buffer size used to
+ # read the array chunk by chunk.
+ read_buf_size = 32 + _IO_BUFFER_SIZE # MiB
+ assert mem_used < size + read_buf_size
+
+
+@with_numpy
+def test_compressed_pickle_dump_and_load(tmpdir):
+ expected_list = [np.arange(5, dtype=np.dtype('i8')),
+ np.arange(5, dtype=np.dtype('f8')),
+ np.array([1, 'abc', {'a': 1, 'b': 2}], dtype='O'),
+ np.arange(256, dtype=np.uint8).tobytes(),
+ u"C'est l'\xe9t\xe9 !"]
+
+ fname = tmpdir.join('temp.pkl.gz').strpath
+
+ dumped_filenames = numpy_pickle.dump(expected_list, fname, compress=1)
+ assert len(dumped_filenames) == 1
+ result_list = numpy_pickle.load(fname)
+ for result, expected in zip(result_list, expected_list):
+ if isinstance(expected, np.ndarray):
+ expected = _ensure_native_byte_order(expected)
+ assert result.dtype == expected.dtype
+ np.testing.assert_equal(result, expected)
+ else:
+ assert result == expected
+
+
+def _check_pickle(filename, expected_list, mmap_mode=None):
+ """Helper function to test joblib pickle content.
+
+ Note: currently only pickles containing an iterable are supported
+ by this function.
+ """
+ version_match = re.match(r'.+py(\d)(\d).+', filename)
+ py_version_used_for_writing = int(version_match.group(1))
+
+ py_version_to_default_pickle_protocol = {2: 2, 3: 3}
+ pickle_reading_protocol = py_version_to_default_pickle_protocol.get(3, 4)
+ pickle_writing_protocol = py_version_to_default_pickle_protocol.get(
+ py_version_used_for_writing, 4)
+ if pickle_reading_protocol >= pickle_writing_protocol:
+ try:
+ with warnings.catch_warnings(record=True) as warninfo:
+ warnings.simplefilter('always')
+ warnings.filterwarnings(
+ 'ignore', module='numpy',
+ message='The compiler package is deprecated')
+ result_list = numpy_pickle.load(filename, mmap_mode=mmap_mode)
+ filename_base = os.path.basename(filename)
+ expected_nb_deprecation_warnings = 1 if (
+ "_0.9" in filename_base or "_0.8.4" in filename_base) else 0
+
+ expected_nb_user_warnings = 3 if (
+ re.search("_0.1.+.pkl$", filename_base) and
+ mmap_mode is not None) else 0
+ expected_nb_warnings = \
+ expected_nb_deprecation_warnings + expected_nb_user_warnings
+ assert len(warninfo) == expected_nb_warnings
+
+ deprecation_warnings = [
+ w for w in warninfo if issubclass(
+ w.category, DeprecationWarning)]
+ user_warnings = [
+ w for w in warninfo if issubclass(
+ w.category, UserWarning)]
+ for w in deprecation_warnings:
+ assert (str(w.message) ==
+ "The file '{0}' has been generated with a joblib "
+ "version less than 0.10. Please regenerate this "
+ "pickle file.".format(filename))
+
+ for w in user_warnings:
+ escaped_filename = re.escape(filename)
+ assert re.search(
+ f"memmapped.+{escaped_filename}.+segmentation fault",
+ str(w.message))
+
+ for result, expected in zip(result_list, expected_list):
+ if isinstance(expected, np.ndarray):
+ expected = _ensure_native_byte_order(expected)
+ assert result.dtype == expected.dtype
+ np.testing.assert_equal(result, expected)
+ else:
+ assert result == expected
+ except Exception as exc:
+ # When trying to read with python 3 a pickle generated
+ # with python 2 we expect a user-friendly error
+ if py_version_used_for_writing == 2:
+ assert isinstance(exc, ValueError)
+ message = ('You may be trying to read with '
+ 'python 3 a joblib pickle generated with python 2.')
+ assert message in str(exc)
+ elif filename.endswith('.lz4') and with_lz4.args[0]:
+ assert isinstance(exc, ValueError)
+ assert LZ4_NOT_INSTALLED_ERROR in str(exc)
+ else:
+ raise
+ else:
+ # Pickle protocol used for writing is too high. We expect a
+ # "unsupported pickle protocol" error message
+ try:
+ numpy_pickle.load(filename)
+ raise AssertionError('Numpy pickle loading should '
+ 'have raised a ValueError exception')
+ except ValueError as e:
+ message = 'unsupported pickle protocol: {0}'.format(
+ pickle_writing_protocol)
+ assert message in str(e.args)
+
+
+@with_numpy
+def test_joblib_pickle_across_python_versions():
+ # We need to be specific about dtypes in particular endianness
+ # because the pickles can be generated on one architecture and
+ # the tests run on another one. See
+ # https://github.com/joblib/joblib/issues/279.
+ expected_list = [np.arange(5, dtype=np.dtype('i8'), ('', '>f8')]),
+ np.arange(3, dtype=np.dtype('>i8')),
+ np.arange(3, dtype=np.dtype('>f8'))]
+
+ # Verify the byteorder mismatch is correctly detected.
+ for array in be_arrays:
+ if sys.byteorder == 'big':
+ assert not _is_numpy_array_byte_order_mismatch(array)
+ else:
+ assert _is_numpy_array_byte_order_mismatch(array)
+ converted = _ensure_native_byte_order(array)
+ if converted.dtype.fields:
+ for f in converted.dtype.fields.values():
+ f[0].byteorder == '='
+ else:
+ assert converted.dtype.byteorder == "="
+
+ # List of numpy arrays with little endian byteorder.
+ le_arrays = [np.array([(1, 2.0), (3, 4.0)],
+ dtype=[('', ' size
+ np.testing.assert_array_equal(obj, memmaps)
+
+
+def test_register_compressor(tmpdir):
+ # Check that registering compressor file works.
+ compressor_name = 'test-name'
+ compressor_prefix = 'test-prefix'
+
+ class BinaryCompressorTestFile(io.BufferedIOBase):
+ pass
+
+ class BinaryCompressorTestWrapper(CompressorWrapper):
+
+ def __init__(self):
+ CompressorWrapper.__init__(self, obj=BinaryCompressorTestFile,
+ prefix=compressor_prefix)
+
+ register_compressor(compressor_name, BinaryCompressorTestWrapper())
+
+ assert (_COMPRESSORS[compressor_name].fileobj_factory ==
+ BinaryCompressorTestFile)
+ assert _COMPRESSORS[compressor_name].prefix == compressor_prefix
+
+ # Remove this dummy compressor file from extra compressors because other
+ # tests might fail because of this
+ _COMPRESSORS.pop(compressor_name)
+
+
+@parametrize('invalid_name', [1, (), {}])
+def test_register_compressor_invalid_name(invalid_name):
+ # Test that registering an invalid compressor name is not allowed.
+ with raises(ValueError) as excinfo:
+ register_compressor(invalid_name, None)
+ excinfo.match("Compressor name should be a string")
+
+
+def test_register_compressor_invalid_fileobj():
+ # Test that registering an invalid file object is not allowed.
+
+ class InvalidFileObject():
+ pass
+
+ class InvalidFileObjectWrapper(CompressorWrapper):
+ def __init__(self):
+ CompressorWrapper.__init__(self, obj=InvalidFileObject,
+ prefix=b'prefix')
+
+ with raises(ValueError) as excinfo:
+ register_compressor('invalid', InvalidFileObjectWrapper())
+
+ excinfo.match("Compressor 'fileobj_factory' attribute should implement "
+ "the file object interface")
+
+
+class AnotherZlibCompressorWrapper(CompressorWrapper):
+
+ def __init__(self):
+ CompressorWrapper.__init__(self, obj=BinaryZlibFile, prefix=b'prefix')
+
+
+class StandardLibGzipCompressorWrapper(CompressorWrapper):
+
+ def __init__(self):
+ CompressorWrapper.__init__(self, obj=gzip.GzipFile, prefix=b'prefix')
+
+
+def test_register_compressor_already_registered():
+ # Test registration of existing compressor files.
+ compressor_name = 'test-name'
+
+ # register a test compressor
+ register_compressor(compressor_name, AnotherZlibCompressorWrapper())
+
+ with raises(ValueError) as excinfo:
+ register_compressor(compressor_name,
+ StandardLibGzipCompressorWrapper())
+ excinfo.match("Compressor '{}' already registered."
+ .format(compressor_name))
+
+ register_compressor(compressor_name, StandardLibGzipCompressorWrapper(),
+ force=True)
+
+ assert compressor_name in _COMPRESSORS
+ assert _COMPRESSORS[compressor_name].fileobj_factory == gzip.GzipFile
+
+ # Remove this dummy compressor file from extra compressors because other
+ # tests might fail because of this
+ _COMPRESSORS.pop(compressor_name)
+
+
+@with_lz4
+def test_lz4_compression(tmpdir):
+ # Check that lz4 can be used when dependency is available.
+ import lz4.frame
+ compressor = 'lz4'
+ assert compressor in _COMPRESSORS
+ assert _COMPRESSORS[compressor].fileobj_factory == lz4.frame.LZ4FrameFile
+
+ fname = tmpdir.join('test.pkl').strpath
+ data = 'test data'
+ numpy_pickle.dump(data, fname, compress=compressor)
+
+ with open(fname, 'rb') as f:
+ assert f.read(len(_LZ4_PREFIX)) == _LZ4_PREFIX
+ assert numpy_pickle.load(fname) == data
+
+ # Test that LZ4 is applied based on file extension
+ numpy_pickle.dump(data, fname + '.lz4')
+ with open(fname, 'rb') as f:
+ assert f.read(len(_LZ4_PREFIX)) == _LZ4_PREFIX
+ assert numpy_pickle.load(fname) == data
+
+
+@without_lz4
+def test_lz4_compression_without_lz4(tmpdir):
+ # Check that lz4 cannot be used when dependency is not available.
+ fname = tmpdir.join('test.nolz4').strpath
+ data = 'test data'
+ msg = LZ4_NOT_INSTALLED_ERROR
+ with raises(ValueError) as excinfo:
+ numpy_pickle.dump(data, fname, compress='lz4')
+ excinfo.match(msg)
+
+ with raises(ValueError) as excinfo:
+ numpy_pickle.dump(data, fname + '.lz4')
+ excinfo.match(msg)
+
+
+protocols = [pickle.DEFAULT_PROTOCOL]
+if pickle.HIGHEST_PROTOCOL != pickle.DEFAULT_PROTOCOL:
+ protocols.append(pickle.HIGHEST_PROTOCOL)
+
+
+@with_numpy
+@parametrize('protocol', protocols)
+def test_memmap_alignment_padding(tmpdir, protocol):
+ # Test that memmaped arrays returned by numpy.load are correctly aligned
+ fname = tmpdir.join('test.mmap').strpath
+
+ a = np.random.randn(2)
+ numpy_pickle.dump(a, fname, protocol=protocol)
+ memmap = numpy_pickle.load(fname, mmap_mode='r')
+ assert isinstance(memmap, np.memmap)
+ np.testing.assert_array_equal(a, memmap)
+ assert (
+ memmap.ctypes.data % numpy_pickle.NUMPY_ARRAY_ALIGNMENT_BYTES == 0)
+ assert memmap.flags.aligned
+
+ array_list = [
+ np.random.randn(2), np.random.randn(2),
+ np.random.randn(2), np.random.randn(2)
+ ]
+
+ # On Windows OSError 22 if reusing the same path for memmap ...
+ fname = tmpdir.join('test1.mmap').strpath
+ numpy_pickle.dump(array_list, fname, protocol=protocol)
+ l_reloaded = numpy_pickle.load(fname, mmap_mode='r')
+
+ for idx, memmap in enumerate(l_reloaded):
+ assert isinstance(memmap, np.memmap)
+ np.testing.assert_array_equal(array_list[idx], memmap)
+ assert (
+ memmap.ctypes.data % numpy_pickle.NUMPY_ARRAY_ALIGNMENT_BYTES == 0)
+ assert memmap.flags.aligned
+
+ array_dict = {
+ 'a0': np.arange(2, dtype=np.uint8),
+ 'a1': np.arange(3, dtype=np.uint8),
+ 'a2': np.arange(5, dtype=np.uint8),
+ 'a3': np.arange(7, dtype=np.uint8),
+ 'a4': np.arange(11, dtype=np.uint8),
+ 'a5': np.arange(13, dtype=np.uint8),
+ 'a6': np.arange(17, dtype=np.uint8),
+ 'a7': np.arange(19, dtype=np.uint8),
+ 'a8': np.arange(23, dtype=np.uint8),
+ }
+
+ # On Windows OSError 22 if reusing the same path for memmap ...
+ fname = tmpdir.join('test2.mmap').strpath
+ numpy_pickle.dump(array_dict, fname, protocol=protocol)
+ d_reloaded = numpy_pickle.load(fname, mmap_mode='r')
+
+ for key, memmap in d_reloaded.items():
+ assert isinstance(memmap, np.memmap)
+ np.testing.assert_array_equal(array_dict[key], memmap)
+ assert (
+ memmap.ctypes.data % numpy_pickle.NUMPY_ARRAY_ALIGNMENT_BYTES == 0)
+ assert memmap.flags.aligned
diff --git a/venv/lib/python3.10/site-packages/joblib/test/test_numpy_pickle_compat.py b/venv/lib/python3.10/site-packages/joblib/test/test_numpy_pickle_compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..9e95393b19e979593c7037d0d4fb740e47131dde
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/joblib/test/test_numpy_pickle_compat.py
@@ -0,0 +1,16 @@
+"""Test the old numpy pickler, compatibility version."""
+
+# numpy_pickle is not a drop-in replacement of pickle, as it takes
+# filenames instead of open files as arguments.
+from joblib import numpy_pickle_compat
+
+
+def test_z_file(tmpdir):
+ # Test saving and loading data with Zfiles.
+ filename = tmpdir.join('test.pkl').strpath
+ data = numpy_pickle_compat.asbytes('Foo, \n Bar, baz, \n\nfoobar')
+ with open(filename, 'wb') as f:
+ numpy_pickle_compat.write_zfile(f, data)
+ with open(filename, 'rb') as f:
+ data_read = numpy_pickle_compat.read_zfile(f)
+ assert data == data_read
diff --git a/venv/lib/python3.10/site-packages/joblib/test/test_numpy_pickle_utils.py b/venv/lib/python3.10/site-packages/joblib/test/test_numpy_pickle_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c414a2227cbf6094dc594f6712139d4fd397a9d
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/joblib/test/test_numpy_pickle_utils.py
@@ -0,0 +1,9 @@
+from joblib.compressor import BinaryZlibFile
+from joblib.testing import parametrize
+
+
+@parametrize('filename', ['test', u'test']) # testing str and unicode names
+def test_binary_zlib_file(tmpdir, filename):
+ """Testing creation of files depending on the type of the filenames."""
+ binary_file = BinaryZlibFile(tmpdir.join(filename).strpath, mode='wb')
+ binary_file.close()
diff --git a/venv/lib/python3.10/site-packages/joblib/test/test_parallel.py b/venv/lib/python3.10/site-packages/joblib/test/test_parallel.py
new file mode 100644
index 0000000000000000000000000000000000000000..21f06faa01c3ed93b80accad494e6a0482cc2e72
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/joblib/test/test_parallel.py
@@ -0,0 +1,2034 @@
+"""
+Test the parallel module.
+"""
+
+# Author: Gael Varoquaux
+# Copyright (c) 2010-2011 Gael Varoquaux
+# License: BSD Style, 3 clauses.
+
+import os
+import sys
+import time
+import mmap
+import weakref
+import warnings
+import threading
+from traceback import format_exception
+from math import sqrt
+from time import sleep
+from pickle import PicklingError
+from contextlib import nullcontext
+from multiprocessing import TimeoutError
+import pytest
+
+import joblib
+from joblib import parallel
+from joblib import dump, load
+
+from joblib._multiprocessing_helpers import mp
+
+from joblib.test.common import np, with_numpy
+from joblib.test.common import with_multiprocessing
+from joblib.test.common import IS_PYPY, force_gc_pypy
+from joblib.testing import (parametrize, raises, check_subprocess_call,
+ skipif, warns)
+
+if mp is not None:
+ # Loky is not available if multiprocessing is not
+ from joblib.externals.loky import get_reusable_executor
+
+from queue import Queue
+
+try:
+ import posix
+except ImportError:
+ posix = None
+
+try:
+ from ._openmp_test_helper.parallel_sum import parallel_sum
+except ImportError:
+ parallel_sum = None
+
+try:
+ import distributed
+except ImportError:
+ distributed = None
+
+from joblib._parallel_backends import SequentialBackend
+from joblib._parallel_backends import ThreadingBackend
+from joblib._parallel_backends import MultiprocessingBackend
+from joblib._parallel_backends import ParallelBackendBase
+from joblib._parallel_backends import LokyBackend
+
+from joblib.parallel import Parallel, delayed
+from joblib.parallel import parallel_config
+from joblib.parallel import parallel_backend
+from joblib.parallel import register_parallel_backend
+from joblib.parallel import effective_n_jobs, cpu_count
+
+from joblib.parallel import mp, BACKENDS, DEFAULT_BACKEND
+
+
+RETURN_GENERATOR_BACKENDS = BACKENDS.copy()
+RETURN_GENERATOR_BACKENDS.pop("multiprocessing", None)
+
+ALL_VALID_BACKENDS = [None] + sorted(BACKENDS.keys())
+# Add instances of backend classes deriving from ParallelBackendBase
+ALL_VALID_BACKENDS += [BACKENDS[backend_str]() for backend_str in BACKENDS]
+if mp is None:
+ PROCESS_BACKENDS = []
+else:
+ PROCESS_BACKENDS = ['multiprocessing', 'loky']
+PARALLEL_BACKENDS = PROCESS_BACKENDS + ['threading']
+
+if hasattr(mp, 'get_context'):
+ # Custom multiprocessing context in Python 3.4+
+ ALL_VALID_BACKENDS.append(mp.get_context('spawn'))
+
+DefaultBackend = BACKENDS[DEFAULT_BACKEND]
+
+
+def get_workers(backend):
+ return getattr(backend, '_pool', getattr(backend, '_workers', None))
+
+
+def division(x, y):
+ return x / y
+
+
+def square(x):
+ return x ** 2
+
+
+class MyExceptionWithFinickyInit(Exception):
+ """An exception class with non trivial __init__
+ """
+ def __init__(self, a, b, c, d):
+ pass
+
+
+def exception_raiser(x, custom_exception=False):
+ if x == 7:
+ raise (MyExceptionWithFinickyInit('a', 'b', 'c', 'd')
+ if custom_exception else ValueError)
+ return x
+
+
+def interrupt_raiser(x):
+ time.sleep(.05)
+ raise KeyboardInterrupt
+
+
+def f(x, y=0, z=0):
+ """ A module-level function so that it can be spawn with
+ multiprocessing.
+ """
+ return x ** 2 + y + z
+
+
+def _active_backend_type():
+ return type(parallel.get_active_backend()[0])
+
+
+def parallel_func(inner_n_jobs, backend):
+ return Parallel(n_jobs=inner_n_jobs, backend=backend)(
+ delayed(square)(i) for i in range(3))
+
+
+###############################################################################
+def test_cpu_count():
+ assert cpu_count() > 0
+
+
+def test_effective_n_jobs():
+ assert effective_n_jobs() > 0
+
+
+@parametrize("context", [parallel_config, parallel_backend])
+@pytest.mark.parametrize(
+ "backend_n_jobs, expected_n_jobs",
+ [(3, 3), (-1, effective_n_jobs(n_jobs=-1)), (None, 1)],
+ ids=["positive-int", "negative-int", "None"]
+)
+@with_multiprocessing
+def test_effective_n_jobs_None(context, backend_n_jobs, expected_n_jobs):
+ # check the number of effective jobs when `n_jobs=None`
+ # non-regression test for https://github.com/joblib/joblib/issues/984
+ with context("threading", n_jobs=backend_n_jobs):
+ # when using a backend, the default of number jobs will be the one set
+ # in the backend
+ assert effective_n_jobs(n_jobs=None) == expected_n_jobs
+ # without any backend, None will default to a single job
+ assert effective_n_jobs(n_jobs=None) == 1
+
+
+###############################################################################
+# Test parallel
+
+@parametrize('backend', ALL_VALID_BACKENDS)
+@parametrize('n_jobs', [1, 2, -1, -2])
+@parametrize('verbose', [2, 11, 100])
+def test_simple_parallel(backend, n_jobs, verbose):
+ assert ([square(x) for x in range(5)] ==
+ Parallel(n_jobs=n_jobs, backend=backend,
+ verbose=verbose)(
+ delayed(square)(x) for x in range(5)))
+
+
+@parametrize('backend', ALL_VALID_BACKENDS)
+def test_main_thread_renamed_no_warning(backend, monkeypatch):
+ # Check that no default backend relies on the name of the main thread:
+ # https://github.com/joblib/joblib/issues/180#issuecomment-253266247
+ # Some programs use a different name for the main thread. This is the case
+ # for uWSGI apps for instance.
+ monkeypatch.setattr(target=threading.current_thread(), name='name',
+ value='some_new_name_for_the_main_thread')
+
+ with warnings.catch_warnings(record=True) as warninfo:
+ results = Parallel(n_jobs=2, backend=backend)(
+ delayed(square)(x) for x in range(3))
+ assert results == [0, 1, 4]
+
+ # Due to the default parameters of LokyBackend, there is a chance that
+ # warninfo catches Warnings from worker timeouts. We remove it if it exists
+ warninfo = [w for w in warninfo if "worker timeout" not in str(w.message)]
+
+ # The multiprocessing backend will raise a warning when detecting that is
+ # started from the non-main thread. Let's check that there is no false
+ # positive because of the name change.
+ assert len(warninfo) == 0
+
+
+def _assert_warning_nested(backend, inner_n_jobs, expected):
+ with warnings.catch_warnings(record=True) as warninfo:
+ warnings.simplefilter("always")
+ parallel_func(backend=backend, inner_n_jobs=inner_n_jobs)
+
+ warninfo = [w.message for w in warninfo]
+ if expected:
+ if warninfo:
+ warnings_are_correct = all(
+ 'backed parallel loops cannot' in each.args[0]
+ for each in warninfo
+ )
+ # With Python nogil, when the outer backend is threading, we might
+ # see more that one warning
+ warnings_have_the_right_length = (
+ len(warninfo) >= 1 if getattr(sys.flags, 'nogil', False)
+ else len(warninfo) == 1)
+ return warnings_are_correct and warnings_have_the_right_length
+
+ return False
+ else:
+ assert not warninfo
+ return True
+
+
+@with_multiprocessing
+@parametrize('parent_backend,child_backend,expected', [
+ ('loky', 'multiprocessing', True),
+ ('loky', 'loky', False),
+ ('multiprocessing', 'multiprocessing', True),
+ ('multiprocessing', 'loky', True),
+ ('threading', 'multiprocessing', True),
+ ('threading', 'loky', True),
+])
+def test_nested_parallel_warnings(parent_backend, child_backend, expected):
+
+ # no warnings if inner_n_jobs=1
+ Parallel(n_jobs=2, backend=parent_backend)(
+ delayed(_assert_warning_nested)(
+ backend=child_backend, inner_n_jobs=1,
+ expected=False)
+ for _ in range(5))
+
+ # warnings if inner_n_jobs != 1 and expected
+ res = Parallel(n_jobs=2, backend=parent_backend)(
+ delayed(_assert_warning_nested)(
+ backend=child_backend, inner_n_jobs=2,
+ expected=expected)
+ for _ in range(5))
+
+ # warning handling is not thread safe. One thread might see multiple
+ # warning or no warning at all.
+ if parent_backend == "threading":
+ if IS_PYPY and not any(res):
+ # Related to joblib#1426, should be removed once it is solved.
+ pytest.xfail(reason="This test often fails in PyPy.")
+ assert any(res)
+ else:
+ assert all(res)
+
+
+@with_multiprocessing
+@parametrize('backend', ['loky', 'multiprocessing', 'threading'])
+def test_background_thread_parallelism(backend):
+ is_run_parallel = [False]
+
+ def background_thread(is_run_parallel):
+ with warnings.catch_warnings(record=True) as warninfo:
+ Parallel(n_jobs=2)(
+ delayed(sleep)(.1) for _ in range(4))
+ print(len(warninfo))
+ is_run_parallel[0] = len(warninfo) == 0
+
+ t = threading.Thread(target=background_thread, args=(is_run_parallel,))
+ t.start()
+ t.join()
+ assert is_run_parallel[0]
+
+
+def nested_loop(backend):
+ Parallel(n_jobs=2, backend=backend)(
+ delayed(square)(.01) for _ in range(2))
+
+
+@parametrize('child_backend', BACKENDS)
+@parametrize('parent_backend', BACKENDS)
+def test_nested_loop(parent_backend, child_backend):
+ Parallel(n_jobs=2, backend=parent_backend)(
+ delayed(nested_loop)(child_backend) for _ in range(2))
+
+
+def raise_exception(backend):
+ raise ValueError
+
+
+@with_multiprocessing
+def test_nested_loop_with_exception_with_loky():
+ with raises(ValueError):
+ with Parallel(n_jobs=2, backend="loky") as parallel:
+ parallel([delayed(nested_loop)("loky"),
+ delayed(raise_exception)("loky")])
+
+
+def test_mutate_input_with_threads():
+ """Input is mutable when using the threading backend"""
+ q = Queue(maxsize=5)
+ Parallel(n_jobs=2, backend="threading")(
+ delayed(q.put)(1) for _ in range(5))
+ assert q.full()
+
+
+@parametrize('n_jobs', [1, 2, 3])
+def test_parallel_kwargs(n_jobs):
+ """Check the keyword argument processing of pmap."""
+ lst = range(10)
+ assert ([f(x, y=1) for x in lst] ==
+ Parallel(n_jobs=n_jobs)(delayed(f)(x, y=1) for x in lst))
+
+
+@parametrize('backend', PARALLEL_BACKENDS)
+def test_parallel_as_context_manager(backend):
+ lst = range(10)
+ expected = [f(x, y=1) for x in lst]
+
+ with Parallel(n_jobs=4, backend=backend) as p:
+ # Internally a pool instance has been eagerly created and is managed
+ # via the context manager protocol
+ managed_backend = p._backend
+
+ # We make call with the managed parallel object several times inside
+ # the managed block:
+ assert expected == p(delayed(f)(x, y=1) for x in lst)
+ assert expected == p(delayed(f)(x, y=1) for x in lst)
+
+ # Those calls have all used the same pool instance:
+ if mp is not None:
+ assert get_workers(managed_backend) is get_workers(p._backend)
+
+ # As soon as we exit the context manager block, the pool is terminated and
+ # no longer referenced from the parallel object:
+ if mp is not None:
+ assert get_workers(p._backend) is None
+
+ # It's still possible to use the parallel instance in non-managed mode:
+ assert expected == p(delayed(f)(x, y=1) for x in lst)
+ if mp is not None:
+ assert get_workers(p._backend) is None
+
+
+@with_multiprocessing
+def test_parallel_pickling():
+ """ Check that pmap captures the errors when it is passed an object
+ that cannot be pickled.
+ """
+ class UnpicklableObject(object):
+ def __reduce__(self):
+ raise RuntimeError('123')
+
+ with raises(PicklingError, match=r"the task to send"):
+ Parallel(n_jobs=2, backend='loky')(delayed(id)(
+ UnpicklableObject()) for _ in range(10))
+
+
+@parametrize('backend', PARALLEL_BACKENDS)
+def test_parallel_timeout_success(backend):
+ # Check that timeout isn't thrown when function is fast enough
+ assert len(Parallel(n_jobs=2, backend=backend, timeout=30)(
+ delayed(sleep)(0.001) for x in range(10))) == 10
+
+
+@with_multiprocessing
+@parametrize('backend', PARALLEL_BACKENDS)
+def test_parallel_timeout_fail(backend):
+ # Check that timeout properly fails when function is too slow
+ with raises(TimeoutError):
+ Parallel(n_jobs=2, backend=backend, timeout=0.01)(
+ delayed(sleep)(10) for x in range(10))
+
+
+@with_multiprocessing
+@parametrize('backend', PROCESS_BACKENDS)
+def test_error_capture(backend):
+ # Check that error are captured, and that correct exceptions
+ # are raised.
+ if mp is not None:
+ with raises(ZeroDivisionError):
+ Parallel(n_jobs=2, backend=backend)(
+ [delayed(division)(x, y)
+ for x, y in zip((0, 1), (1, 0))])
+
+ with raises(KeyboardInterrupt):
+ Parallel(n_jobs=2, backend=backend)(
+ [delayed(interrupt_raiser)(x) for x in (1, 0)])
+
+ # Try again with the context manager API
+ with Parallel(n_jobs=2, backend=backend) as parallel:
+ assert get_workers(parallel._backend) is not None
+ original_workers = get_workers(parallel._backend)
+
+ with raises(ZeroDivisionError):
+ parallel([delayed(division)(x, y)
+ for x, y in zip((0, 1), (1, 0))])
+
+ # The managed pool should still be available and be in a working
+ # state despite the previously raised (and caught) exception
+ assert get_workers(parallel._backend) is not None
+
+ # The pool should have been interrupted and restarted:
+ assert get_workers(parallel._backend) is not original_workers
+
+ assert ([f(x, y=1) for x in range(10)] ==
+ parallel(delayed(f)(x, y=1) for x in range(10)))
+
+ original_workers = get_workers(parallel._backend)
+ with raises(KeyboardInterrupt):
+ parallel([delayed(interrupt_raiser)(x) for x in (1, 0)])
+
+ # The pool should still be available despite the exception
+ assert get_workers(parallel._backend) is not None
+
+ # The pool should have been interrupted and restarted:
+ assert get_workers(parallel._backend) is not original_workers
+
+ assert ([f(x, y=1) for x in range(10)] ==
+ parallel(delayed(f)(x, y=1) for x in range(10))), (
+ parallel._iterating, parallel.n_completed_tasks,
+ parallel.n_dispatched_tasks, parallel._aborting
+ )
+
+ # Check that the inner pool has been terminated when exiting the
+ # context manager
+ assert get_workers(parallel._backend) is None
+ else:
+ with raises(KeyboardInterrupt):
+ Parallel(n_jobs=2)(
+ [delayed(interrupt_raiser)(x) for x in (1, 0)])
+
+ # wrapped exceptions should inherit from the class of the original
+ # exception to make it easy to catch them
+ with raises(ZeroDivisionError):
+ Parallel(n_jobs=2)(
+ [delayed(division)(x, y) for x, y in zip((0, 1), (1, 0))])
+
+ with raises(MyExceptionWithFinickyInit):
+ Parallel(n_jobs=2, verbose=0)(
+ (delayed(exception_raiser)(i, custom_exception=True)
+ for i in range(30)))
+
+
+@with_multiprocessing
+@parametrize('backend', BACKENDS)
+def test_error_in_task_iterator(backend):
+
+ def my_generator(raise_at=0):
+ for i in range(20):
+ if i == raise_at:
+ raise ValueError("Iterator Raising Error")
+ yield i
+
+ with Parallel(n_jobs=2, backend=backend) as p:
+ # The error is raised in the pre-dispatch phase
+ with raises(ValueError, match="Iterator Raising Error"):
+ p(delayed(square)(i) for i in my_generator(raise_at=0))
+
+ # The error is raised when dispatching a new task after the
+ # pre-dispatch (likely to happen in a different thread)
+ with raises(ValueError, match="Iterator Raising Error"):
+ p(delayed(square)(i) for i in my_generator(raise_at=5))
+
+ # Same, but raises long after the pre-dispatch phase
+ with raises(ValueError, match="Iterator Raising Error"):
+ p(delayed(square)(i) for i in my_generator(raise_at=19))
+
+
+def consumer(queue, item):
+ queue.append('Consumed %s' % item)
+
+
+@parametrize('backend', BACKENDS)
+@parametrize('batch_size, expected_queue',
+ [(1, ['Produced 0', 'Consumed 0',
+ 'Produced 1', 'Consumed 1',
+ 'Produced 2', 'Consumed 2',
+ 'Produced 3', 'Consumed 3',
+ 'Produced 4', 'Consumed 4',
+ 'Produced 5', 'Consumed 5']),
+ (4, [ # First Batch
+ 'Produced 0', 'Produced 1', 'Produced 2', 'Produced 3',
+ 'Consumed 0', 'Consumed 1', 'Consumed 2', 'Consumed 3',
+ # Second batch
+ 'Produced 4', 'Produced 5', 'Consumed 4', 'Consumed 5'])])
+def test_dispatch_one_job(backend, batch_size, expected_queue):
+ """ Test that with only one job, Parallel does act as a iterator.
+ """
+ queue = list()
+
+ def producer():
+ for i in range(6):
+ queue.append('Produced %i' % i)
+ yield i
+
+ Parallel(n_jobs=1, batch_size=batch_size, backend=backend)(
+ delayed(consumer)(queue, x) for x in producer())
+ assert queue == expected_queue
+ assert len(queue) == 12
+
+
+@with_multiprocessing
+@parametrize('backend', PARALLEL_BACKENDS)
+def test_dispatch_multiprocessing(backend):
+ """ Check that using pre_dispatch Parallel does indeed dispatch items
+ lazily.
+ """
+ manager = mp.Manager()
+ queue = manager.list()
+
+ def producer():
+ for i in range(6):
+ queue.append('Produced %i' % i)
+ yield i
+
+ Parallel(n_jobs=2, batch_size=1, pre_dispatch=3, backend=backend)(
+ delayed(consumer)(queue, 'any') for _ in producer())
+
+ queue_contents = list(queue)
+ assert queue_contents[0] == 'Produced 0'
+
+ # Only 3 tasks are pre-dispatched out of 6. The 4th task is dispatched only
+ # after any of the first 3 jobs have completed.
+ first_consumption_index = queue_contents[:4].index('Consumed any')
+ assert first_consumption_index > -1
+
+ produced_3_index = queue_contents.index('Produced 3') # 4th task produced
+ assert produced_3_index > first_consumption_index
+
+ assert len(queue) == 12
+
+
+def test_batching_auto_threading():
+ # batching='auto' with the threading backend leaves the effective batch
+ # size to 1 (no batching) as it has been found to never be beneficial with
+ # this low-overhead backend.
+
+ with Parallel(n_jobs=2, batch_size='auto', backend='threading') as p:
+ p(delayed(id)(i) for i in range(5000)) # many very fast tasks
+ assert p._backend.compute_batch_size() == 1
+
+
+@with_multiprocessing
+@parametrize('backend', PROCESS_BACKENDS)
+def test_batching_auto_subprocesses(backend):
+ with Parallel(n_jobs=2, batch_size='auto', backend=backend) as p:
+ p(delayed(id)(i) for i in range(5000)) # many very fast tasks
+
+ # It should be strictly larger than 1 but as we don't want heisen
+ # failures on clogged CI worker environment be safe and only check that
+ # it's a strictly positive number.
+ assert p._backend.compute_batch_size() > 0
+
+
+def test_exception_dispatch():
+ """Make sure that exception raised during dispatch are indeed captured"""
+ with raises(ValueError):
+ Parallel(n_jobs=2, pre_dispatch=16, verbose=0)(
+ delayed(exception_raiser)(i) for i in range(30))
+
+
+def nested_function_inner(i):
+ Parallel(n_jobs=2)(
+ delayed(exception_raiser)(j) for j in range(30))
+
+
+def nested_function_outer(i):
+ Parallel(n_jobs=2)(
+ delayed(nested_function_inner)(j) for j in range(30))
+
+
+@with_multiprocessing
+@parametrize('backend', PARALLEL_BACKENDS)
+@pytest.mark.xfail(reason="https://github.com/joblib/loky/pull/255")
+def test_nested_exception_dispatch(backend):
+ """Ensure errors for nested joblib cases gets propagated
+
+ We rely on the Python 3 built-in __cause__ system that already
+ report this kind of information to the user.
+ """
+ with raises(ValueError) as excinfo:
+ Parallel(n_jobs=2, backend=backend)(
+ delayed(nested_function_outer)(i) for i in range(30))
+
+ # Check that important information such as function names are visible
+ # in the final error message reported to the user
+ report_lines = format_exception(excinfo.type, excinfo.value, excinfo.tb)
+ report = "".join(report_lines)
+ assert 'nested_function_outer' in report
+ assert 'nested_function_inner' in report
+ assert 'exception_raiser' in report
+
+ assert type(excinfo.value) is ValueError
+
+
+class FakeParallelBackend(SequentialBackend):
+ """Pretends to run concurrently while running sequentially."""
+
+ def configure(self, n_jobs=1, parallel=None, **backend_args):
+ self.n_jobs = self.effective_n_jobs(n_jobs)
+ self.parallel = parallel
+ return n_jobs
+
+ def effective_n_jobs(self, n_jobs=1):
+ if n_jobs < 0:
+ n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1)
+ return n_jobs
+
+
+def test_invalid_backend():
+ with raises(ValueError, match="Invalid backend:"):
+ Parallel(backend='unit-testing')
+
+ with raises(ValueError, match="Invalid backend:"):
+ with parallel_config(backend='unit-testing'):
+ pass
+
+ with raises(ValueError, match="Invalid backend:"):
+ with parallel_config(backend='unit-testing'):
+ pass
+
+
+@parametrize('backend', ALL_VALID_BACKENDS)
+def test_invalid_njobs(backend):
+ with raises(ValueError) as excinfo:
+ Parallel(n_jobs=0, backend=backend)._initialize_backend()
+ assert "n_jobs == 0 in Parallel has no meaning" in str(excinfo.value)
+
+ with raises(ValueError) as excinfo:
+ Parallel(n_jobs=0.5, backend=backend)._initialize_backend()
+ assert "n_jobs == 0 in Parallel has no meaning" in str(excinfo.value)
+
+ with raises(ValueError) as excinfo:
+ Parallel(n_jobs="2.3", backend=backend)._initialize_backend()
+ assert "n_jobs could not be converted to int" in str(excinfo.value)
+
+ with raises(ValueError) as excinfo:
+ Parallel(n_jobs="invalid_str", backend=backend)._initialize_backend()
+ assert "n_jobs could not be converted to int" in str(excinfo.value)
+
+
+@with_multiprocessing
+@parametrize('backend', PARALLEL_BACKENDS)
+@parametrize('n_jobs', ['2', 2.3, 2])
+def test_njobs_converted_to_int(backend, n_jobs):
+ p = Parallel(n_jobs=n_jobs, backend=backend)
+ assert p._effective_n_jobs() == 2
+
+ res = p(delayed(square)(i) for i in range(10))
+ assert all(r == square(i) for i, r in enumerate(res))
+
+
+def test_register_parallel_backend():
+ try:
+ register_parallel_backend("test_backend", FakeParallelBackend)
+ assert "test_backend" in BACKENDS
+ assert BACKENDS["test_backend"] == FakeParallelBackend
+ finally:
+ del BACKENDS["test_backend"]
+
+
+def test_overwrite_default_backend():
+ assert _active_backend_type() == DefaultBackend
+ try:
+ register_parallel_backend("threading", BACKENDS["threading"],
+ make_default=True)
+ assert _active_backend_type() == ThreadingBackend
+ finally:
+ # Restore the global default manually
+ parallel.DEFAULT_BACKEND = DEFAULT_BACKEND
+ assert _active_backend_type() == DefaultBackend
+
+
+@skipif(mp is not None, reason="Only without multiprocessing")
+def test_backend_no_multiprocessing():
+ with warns(UserWarning,
+ match="joblib backend '.*' is not available on.*"):
+ Parallel(backend='loky')(delayed(square)(i) for i in range(3))
+
+ # The below should now work without problems
+ with parallel_config(backend='loky'):
+ Parallel()(delayed(square)(i) for i in range(3))
+
+
+def check_backend_context_manager(context, backend_name):
+ with context(backend_name, n_jobs=3):
+ active_backend, active_n_jobs = parallel.get_active_backend()
+ assert active_n_jobs == 3
+ assert effective_n_jobs(3) == 3
+ p = Parallel()
+ assert p.n_jobs == 3
+ if backend_name == 'multiprocessing':
+ assert type(active_backend) is MultiprocessingBackend
+ assert type(p._backend) is MultiprocessingBackend
+ elif backend_name == 'loky':
+ assert type(active_backend) is LokyBackend
+ assert type(p._backend) is LokyBackend
+ elif backend_name == 'threading':
+ assert type(active_backend) is ThreadingBackend
+ assert type(p._backend) is ThreadingBackend
+ elif backend_name.startswith('test_'):
+ assert type(active_backend) is FakeParallelBackend
+ assert type(p._backend) is FakeParallelBackend
+
+
+all_backends_for_context_manager = PARALLEL_BACKENDS[:]
+all_backends_for_context_manager.extend(
+ ['test_backend_%d' % i for i in range(3)]
+)
+
+
+@with_multiprocessing
+@parametrize('backend', all_backends_for_context_manager)
+@parametrize('context', [parallel_backend, parallel_config])
+def test_backend_context_manager(monkeypatch, backend, context):
+ if backend not in BACKENDS:
+ monkeypatch.setitem(BACKENDS, backend, FakeParallelBackend)
+
+ assert _active_backend_type() == DefaultBackend
+ # check that this possible to switch parallel backends sequentially
+ check_backend_context_manager(context, backend)
+
+ # The default backend is restored
+ assert _active_backend_type() == DefaultBackend
+
+ # Check that context manager switching is thread safe:
+ Parallel(n_jobs=2, backend='threading')(
+ delayed(check_backend_context_manager)(context, b)
+ for b in all_backends_for_context_manager if not b)
+
+ # The default backend is again restored
+ assert _active_backend_type() == DefaultBackend
+
+
+class ParameterizedParallelBackend(SequentialBackend):
+ """Pretends to run conncurrently while running sequentially."""
+
+ def __init__(self, param=None):
+ if param is None:
+ raise ValueError('param should not be None')
+ self.param = param
+
+
+@parametrize("context", [parallel_config, parallel_backend])
+def test_parameterized_backend_context_manager(monkeypatch, context):
+ monkeypatch.setitem(BACKENDS, 'param_backend',
+ ParameterizedParallelBackend)
+ assert _active_backend_type() == DefaultBackend
+
+ with context('param_backend', param=42, n_jobs=3):
+ active_backend, active_n_jobs = parallel.get_active_backend()
+ assert type(active_backend) is ParameterizedParallelBackend
+ assert active_backend.param == 42
+ assert active_n_jobs == 3
+ p = Parallel()
+ assert p.n_jobs == 3
+ assert p._backend is active_backend
+ results = p(delayed(sqrt)(i) for i in range(5))
+ assert results == [sqrt(i) for i in range(5)]
+
+ # The default backend is again restored
+ assert _active_backend_type() == DefaultBackend
+
+
+@parametrize("context", [parallel_config, parallel_backend])
+def test_directly_parameterized_backend_context_manager(context):
+ assert _active_backend_type() == DefaultBackend
+
+ # Check that it's possible to pass a backend instance directly,
+ # without registration
+ with context(ParameterizedParallelBackend(param=43), n_jobs=5):
+ active_backend, active_n_jobs = parallel.get_active_backend()
+ assert type(active_backend) is ParameterizedParallelBackend
+ assert active_backend.param == 43
+ assert active_n_jobs == 5
+ p = Parallel()
+ assert p.n_jobs == 5
+ assert p._backend is active_backend
+ results = p(delayed(sqrt)(i) for i in range(5))
+ assert results == [sqrt(i) for i in range(5)]
+
+ # The default backend is again restored
+ assert _active_backend_type() == DefaultBackend
+
+
+def sleep_and_return_pid():
+ sleep(.1)
+ return os.getpid()
+
+
+def get_nested_pids():
+ assert _active_backend_type() == ThreadingBackend
+ # Assert that the nested backend does not change the default number of
+ # jobs used in Parallel
+ assert Parallel()._effective_n_jobs() == 1
+
+ # Assert that the tasks are running only on one process
+ return Parallel(n_jobs=2)(delayed(sleep_and_return_pid)()
+ for _ in range(2))
+
+
+class MyBackend(joblib._parallel_backends.LokyBackend):
+ """Backend to test backward compatibility with older backends"""
+ def get_nested_backend(self, ):
+ # Older backends only return a backend, without n_jobs indications.
+ return super(MyBackend, self).get_nested_backend()[0]
+
+
+register_parallel_backend('back_compat_backend', MyBackend)
+
+
+@with_multiprocessing
+@parametrize('backend', ['threading', 'loky', 'multiprocessing',
+ 'back_compat_backend'])
+@parametrize("context", [parallel_config, parallel_backend])
+def test_nested_backend_context_manager(context, backend):
+ # Check that by default, nested parallel calls will always use the
+ # ThreadingBackend
+
+ with context(backend):
+ pid_groups = Parallel(n_jobs=2)(
+ delayed(get_nested_pids)()
+ for _ in range(10)
+ )
+ for pid_group in pid_groups:
+ assert len(set(pid_group)) == 1
+
+
+@with_multiprocessing
+@parametrize('n_jobs', [2, -1, None])
+@parametrize('backend', PARALLEL_BACKENDS)
+@parametrize("context", [parallel_config, parallel_backend])
+def test_nested_backend_in_sequential(backend, n_jobs, context):
+ # Check that by default, nested parallel calls will always use the
+ # ThreadingBackend
+
+ def check_nested_backend(expected_backend_type, expected_n_job):
+ # Assert that the sequential backend at top level, does not change the
+ # backend for nested calls.
+ assert _active_backend_type() == BACKENDS[expected_backend_type]
+
+ # Assert that the nested backend in SequentialBackend does not change
+ # the default number of jobs used in Parallel
+ expected_n_job = effective_n_jobs(expected_n_job)
+ assert Parallel()._effective_n_jobs() == expected_n_job
+
+ Parallel(n_jobs=1)(
+ delayed(check_nested_backend)(DEFAULT_BACKEND, 1)
+ for _ in range(10)
+ )
+
+ with context(backend, n_jobs=n_jobs):
+ Parallel(n_jobs=1)(
+ delayed(check_nested_backend)(backend, n_jobs)
+ for _ in range(10)
+ )
+
+
+def check_nesting_level(context, inner_backend, expected_level):
+ with context(inner_backend) as ctx:
+ if context is parallel_config:
+ backend = ctx["backend"]
+ if context is parallel_backend:
+ backend = ctx[0]
+ assert backend.nesting_level == expected_level
+
+
+@with_multiprocessing
+@parametrize('outer_backend', PARALLEL_BACKENDS)
+@parametrize('inner_backend', PARALLEL_BACKENDS)
+@parametrize("context", [parallel_config, parallel_backend])
+def test_backend_nesting_level(context, outer_backend, inner_backend):
+ # Check that the nesting level for the backend is correctly set
+ check_nesting_level(context, outer_backend, 0)
+
+ Parallel(n_jobs=2, backend=outer_backend)(
+ delayed(check_nesting_level)(context, inner_backend, 1)
+ for _ in range(10)
+ )
+
+ with context(inner_backend, n_jobs=2):
+ Parallel()(delayed(check_nesting_level)(context, inner_backend, 1)
+ for _ in range(10))
+
+
+@with_multiprocessing
+@parametrize("context", [parallel_config, parallel_backend])
+@parametrize('with_retrieve_callback', [True, False])
+def test_retrieval_context(context, with_retrieve_callback):
+ import contextlib
+
+ class MyBackend(ThreadingBackend):
+ i = 0
+ supports_retrieve_callback = with_retrieve_callback
+
+ @contextlib.contextmanager
+ def retrieval_context(self):
+ self.i += 1
+ yield
+
+ register_parallel_backend("retrieval", MyBackend)
+
+ def nested_call(n):
+ return Parallel(n_jobs=2)(delayed(id)(i) for i in range(n))
+
+ with context("retrieval") as ctx:
+ Parallel(n_jobs=2)(
+ delayed(nested_call)(i)
+ for i in range(5)
+ )
+ if context is parallel_config:
+ assert ctx["backend"].i == 1
+ if context is parallel_backend:
+ assert ctx[0].i == 1
+
+
+###############################################################################
+# Test helpers
+
+@parametrize('batch_size', [0, -1, 1.42])
+def test_invalid_batch_size(batch_size):
+ with raises(ValueError):
+ Parallel(batch_size=batch_size)
+
+
+@parametrize('n_tasks, n_jobs, pre_dispatch, batch_size',
+ [(2, 2, 'all', 'auto'),
+ (2, 2, 'n_jobs', 'auto'),
+ (10, 2, 'n_jobs', 'auto'),
+ (517, 2, 'n_jobs', 'auto'),
+ (10, 2, 'n_jobs', 'auto'),
+ (10, 4, 'n_jobs', 'auto'),
+ (200, 12, 'n_jobs', 'auto'),
+ (25, 12, '2 * n_jobs', 1),
+ (250, 12, 'all', 1),
+ (250, 12, '2 * n_jobs', 7),
+ (200, 12, '2 * n_jobs', 'auto')])
+def test_dispatch_race_condition(n_tasks, n_jobs, pre_dispatch, batch_size):
+ # Check that using (async-)dispatch does not yield a race condition on the
+ # iterable generator that is not thread-safe natively.
+ # This is a non-regression test for the "Pool seems closed" class of error
+ params = {'n_jobs': n_jobs, 'pre_dispatch': pre_dispatch,
+ 'batch_size': batch_size}
+ expected = [square(i) for i in range(n_tasks)]
+ results = Parallel(**params)(delayed(square)(i) for i in range(n_tasks))
+ assert results == expected
+
+
+@with_multiprocessing
+def test_default_mp_context():
+ mp_start_method = mp.get_start_method()
+ p = Parallel(n_jobs=2, backend='multiprocessing')
+ context = p._backend_args.get('context')
+ start_method = context.get_start_method()
+ assert start_method == mp_start_method
+
+
+@with_numpy
+@with_multiprocessing
+@parametrize('backend', PROCESS_BACKENDS)
+def test_no_blas_crash_or_freeze_with_subprocesses(backend):
+ if backend == 'multiprocessing':
+ # Use the spawn backend that is both robust and available on all
+ # platforms
+ backend = mp.get_context('spawn')
+
+ # Check that on recent Python version, the 'spawn' start method can make
+ # it possible to use multiprocessing in conjunction of any BLAS
+ # implementation that happens to be used by numpy with causing a freeze or
+ # a crash
+ rng = np.random.RandomState(42)
+
+ # call BLAS DGEMM to force the initialization of the internal thread-pool
+ # in the main process
+ a = rng.randn(1000, 1000)
+ np.dot(a, a.T)
+
+ # check that the internal BLAS thread-pool is not in an inconsistent state
+ # in the worker processes managed by multiprocessing
+ Parallel(n_jobs=2, backend=backend)(
+ delayed(np.dot)(a, a.T) for i in range(2))
+
+
+UNPICKLABLE_CALLABLE_SCRIPT_TEMPLATE_NO_MAIN = """\
+from joblib import Parallel, delayed
+
+def square(x):
+ return x ** 2
+
+backend = "{}"
+if backend == "spawn":
+ from multiprocessing import get_context
+ backend = get_context(backend)
+
+print(Parallel(n_jobs=2, backend=backend)(
+ delayed(square)(i) for i in range(5)))
+"""
+
+
+@with_multiprocessing
+@parametrize('backend', PROCESS_BACKENDS)
+def test_parallel_with_interactively_defined_functions(backend):
+ # When using the "-c" flag, interactive functions defined in __main__
+ # should work with any backend.
+ if backend == "multiprocessing" and mp.get_start_method() != "fork":
+ pytest.skip("Require fork start method to use interactively defined "
+ "functions with multiprocessing.")
+ code = UNPICKLABLE_CALLABLE_SCRIPT_TEMPLATE_NO_MAIN.format(backend)
+ check_subprocess_call(
+ [sys.executable, '-c', code], timeout=10,
+ stdout_regex=r'\[0, 1, 4, 9, 16\]')
+
+
+UNPICKLABLE_CALLABLE_SCRIPT_TEMPLATE_MAIN = """\
+import sys
+# Make sure that joblib is importable in the subprocess launching this
+# script. This is needed in case we run the tests from the joblib root
+# folder without having installed joblib
+sys.path.insert(0, {joblib_root_folder!r})
+
+from joblib import Parallel, delayed
+
+def run(f, x):
+ return f(x)
+
+{define_func}
+
+if __name__ == "__main__":
+ backend = "{backend}"
+ if backend == "spawn":
+ from multiprocessing import get_context
+ backend = get_context(backend)
+
+ callable_position = "{callable_position}"
+ if callable_position == "delayed":
+ print(Parallel(n_jobs=2, backend=backend)(
+ delayed(square)(i) for i in range(5)))
+ elif callable_position == "args":
+ print(Parallel(n_jobs=2, backend=backend)(
+ delayed(run)(square, i) for i in range(5)))
+ else:
+ print(Parallel(n_jobs=2, backend=backend)(
+ delayed(run)(f=square, x=i) for i in range(5)))
+"""
+
+SQUARE_MAIN = """\
+def square(x):
+ return x ** 2
+"""
+SQUARE_LOCAL = """\
+def gen_square():
+ def square(x):
+ return x ** 2
+ return square
+square = gen_square()
+"""
+SQUARE_LAMBDA = """\
+square = lambda x: x ** 2
+"""
+
+
+@with_multiprocessing
+@parametrize('backend', PROCESS_BACKENDS + ([] if mp is None else ['spawn']))
+@parametrize('define_func', [SQUARE_MAIN, SQUARE_LOCAL, SQUARE_LAMBDA])
+@parametrize('callable_position', ['delayed', 'args', 'kwargs'])
+def test_parallel_with_unpicklable_functions_in_args(
+ backend, define_func, callable_position, tmpdir):
+ if backend in ['multiprocessing', 'spawn'] and (
+ define_func != SQUARE_MAIN or sys.platform == "win32"):
+ pytest.skip("Not picklable with pickle")
+ code = UNPICKLABLE_CALLABLE_SCRIPT_TEMPLATE_MAIN.format(
+ define_func=define_func, backend=backend,
+ callable_position=callable_position,
+ joblib_root_folder=os.path.dirname(os.path.dirname(joblib.__file__)))
+ code_file = tmpdir.join("unpicklable_func_script.py")
+ code_file.write(code)
+ check_subprocess_call(
+ [sys.executable, code_file.strpath], timeout=10,
+ stdout_regex=r'\[0, 1, 4, 9, 16\]')
+
+
+INTERACTIVE_DEFINED_FUNCTION_AND_CLASS_SCRIPT_CONTENT = """\
+import sys
+import faulthandler
+# Make sure that joblib is importable in the subprocess launching this
+# script. This is needed in case we run the tests from the joblib root
+# folder without having installed joblib
+sys.path.insert(0, {joblib_root_folder!r})
+
+from joblib import Parallel, delayed
+from functools import partial
+
+class MyClass:
+ '''Class defined in the __main__ namespace'''
+ def __init__(self, value):
+ self.value = value
+
+
+def square(x, ignored=None, ignored2=None):
+ '''Function defined in the __main__ namespace'''
+ return x.value ** 2
+
+
+square2 = partial(square, ignored2='something')
+
+# Here, we do not need the `if __name__ == "__main__":` safeguard when
+# using the default `loky` backend (even on Windows).
+
+# To make debugging easier
+faulthandler.dump_traceback_later(30, exit=True)
+
+# The following baroque function call is meant to check that joblib
+# introspection rightfully uses cloudpickle instead of the (faster) pickle
+# module of the standard library when necessary. In particular cloudpickle is
+# necessary for functions and instances of classes interactively defined in the
+# __main__ module.
+
+print(Parallel(backend="loky", n_jobs=2)(
+ delayed(square2)(MyClass(i), ignored=[dict(a=MyClass(1))])
+ for i in range(5)
+))
+""".format(joblib_root_folder=os.path.dirname(
+ os.path.dirname(joblib.__file__)))
+
+
+@with_multiprocessing
+def test_parallel_with_interactively_defined_functions_loky(tmpdir):
+ # loky accepts interactive functions defined in __main__ and does not
+ # require if __name__ == '__main__' even when the __main__ module is
+ # defined by the result of the execution of a filesystem script.
+ script = tmpdir.join('joblib_interactively_defined_function.py')
+ script.write(INTERACTIVE_DEFINED_FUNCTION_AND_CLASS_SCRIPT_CONTENT)
+ check_subprocess_call(
+ [sys.executable, script.strpath],
+ stdout_regex=r'\[0, 1, 4, 9, 16\]',
+ timeout=None, # rely on faulthandler to kill the process
+ )
+
+
+INTERACTIVELY_DEFINED_SUBCLASS_WITH_METHOD_SCRIPT_CONTENT = """\
+import sys
+# Make sure that joblib is importable in the subprocess launching this
+# script. This is needed in case we run the tests from the joblib root
+# folder without having installed joblib
+sys.path.insert(0, {joblib_root_folder!r})
+
+from joblib import Parallel, delayed, hash
+import multiprocessing as mp
+mp.util.log_to_stderr(5)
+
+class MyList(list):
+ '''MyList is interactively defined by MyList.append is a built-in'''
+ def __hash__(self):
+ # XXX: workaround limitation in cloudpickle
+ return hash(self).__hash__()
+
+l = MyList()
+
+print(Parallel(backend="loky", n_jobs=2)(
+ delayed(l.append)(i) for i in range(3)
+))
+""".format(joblib_root_folder=os.path.dirname(
+ os.path.dirname(joblib.__file__)))
+
+
+@with_multiprocessing
+def test_parallel_with_interactively_defined_bound_method_loky(tmpdir):
+ script = tmpdir.join('joblib_interactive_bound_method_script.py')
+ script.write(INTERACTIVELY_DEFINED_SUBCLASS_WITH_METHOD_SCRIPT_CONTENT)
+ check_subprocess_call([sys.executable, script.strpath],
+ stdout_regex=r'\[None, None, None\]',
+ stderr_regex=r'LokyProcess',
+ timeout=15)
+
+
+def test_parallel_with_exhausted_iterator():
+ exhausted_iterator = iter([])
+ assert Parallel(n_jobs=2)(exhausted_iterator) == []
+
+
+def _cleanup_worker():
+ """Helper function to force gc in each worker."""
+ force_gc_pypy()
+ time.sleep(.1)
+
+
+def check_memmap(a):
+ if not isinstance(a, np.memmap):
+ raise TypeError('Expected np.memmap instance, got %r',
+ type(a))
+ return a.copy() # return a regular array instead of a memmap
+
+
+@with_numpy
+@with_multiprocessing
+@parametrize('backend', PROCESS_BACKENDS)
+def test_auto_memmap_on_arrays_from_generator(backend):
+ # Non-regression test for a problem with a bad interaction between the
+ # GC collecting arrays recently created during iteration inside the
+ # parallel dispatch loop and the auto-memmap feature of Parallel.
+ # See: https://github.com/joblib/joblib/pull/294
+ def generate_arrays(n):
+ for i in range(n):
+ yield np.ones(10, dtype=np.float32) * i
+ # Use max_nbytes=1 to force the use of memory-mapping even for small
+ # arrays
+ results = Parallel(n_jobs=2, max_nbytes=1, backend=backend)(
+ delayed(check_memmap)(a) for a in generate_arrays(100))
+ for result, expected in zip(results, generate_arrays(len(results))):
+ np.testing.assert_array_equal(expected, result)
+
+ # Second call to force loky to adapt the executor by growing the number
+ # of worker processes. This is a non-regression test for:
+ # https://github.com/joblib/joblib/issues/629.
+ results = Parallel(n_jobs=4, max_nbytes=1, backend=backend)(
+ delayed(check_memmap)(a) for a in generate_arrays(100))
+ for result, expected in zip(results, generate_arrays(len(results))):
+ np.testing.assert_array_equal(expected, result)
+
+
+def identity(arg):
+ return arg
+
+
+@with_numpy
+@with_multiprocessing
+def test_memmap_with_big_offset(tmpdir):
+ fname = tmpdir.join('test.mmap').strpath
+ size = mmap.ALLOCATIONGRANULARITY
+ obj = [np.zeros(size, dtype='uint8'), np.ones(size, dtype='uint8')]
+ dump(obj, fname)
+ memmap = load(fname, mmap_mode='r')
+ result, = Parallel(n_jobs=2)(delayed(identity)(memmap) for _ in [0])
+ assert isinstance(memmap[1], np.memmap)
+ assert memmap[1].offset > size
+ np.testing.assert_array_equal(obj, result)
+
+
+def test_warning_about_timeout_not_supported_by_backend():
+ with warnings.catch_warnings(record=True) as warninfo:
+ Parallel(n_jobs=1, timeout=1)(delayed(square)(i) for i in range(50))
+ assert len(warninfo) == 1
+ w = warninfo[0]
+ assert isinstance(w.message, UserWarning)
+ assert str(w.message) == (
+ "The backend class 'SequentialBackend' does not support timeout. "
+ "You have set 'timeout=1' in Parallel but the 'timeout' parameter "
+ "will not be used.")
+
+
+def set_list_value(input_list, index, value):
+ input_list[index] = value
+ return value
+
+
+@pytest.mark.parametrize('n_jobs', [1, 2, 4])
+def test_parallel_return_order_with_return_as_generator_parameter(n_jobs):
+ # This test inserts values in a list in some expected order
+ # in sequential computing, and then checks that this order has been
+ # respected by Parallel output generator.
+ input_list = [0] * 5
+ result = Parallel(n_jobs=n_jobs, return_as="generator",
+ backend='threading')(
+ delayed(set_list_value)(input_list, i, i) for i in range(5))
+
+ # Ensure that all the tasks are completed before checking the result
+ result = list(result)
+
+ assert all(v == r for v, r in zip(input_list, result))
+
+
+def _sqrt_with_delay(e, delay):
+ if delay:
+ sleep(30)
+ return sqrt(e)
+
+
+def _test_parallel_unordered_generator_returns_fastest_first(backend, n_jobs):
+ # This test submits 10 tasks, but the second task is super slow. This test
+ # checks that the 9 other tasks return before the slow task is done, when
+ # `return_as` parameter is set to `'generator_unordered'`
+ result = Parallel(n_jobs=n_jobs, return_as="generator_unordered",
+ backend=backend)(
+ delayed(_sqrt_with_delay)(i**2, (i == 1)) for i in range(10))
+
+ quickly_returned = sorted(next(result) for _ in range(9))
+
+ expected_quickly_returned = [0] + list(range(2, 10))
+
+ assert all(
+ v == r for v, r in zip(expected_quickly_returned, quickly_returned)
+ )
+
+ del result
+ force_gc_pypy()
+
+
+@pytest.mark.parametrize('n_jobs', [2, 4])
+# NB: for this test to work, the backend must be allowed to process tasks
+# concurrently, so at least two jobs with a non-sequential backend are
+# mandatory.
+@with_multiprocessing
+@parametrize('backend', set(RETURN_GENERATOR_BACKENDS) - {"sequential"})
+def test_parallel_unordered_generator_returns_fastest_first(backend, n_jobs):
+ _test_parallel_unordered_generator_returns_fastest_first(backend, n_jobs)
+
+
+@pytest.mark.parametrize('n_jobs', [2, -1])
+@parametrize("context", [parallel_config, parallel_backend])
+@skipif(distributed is None, reason='This test requires dask')
+def test_parallel_unordered_generator_returns_fastest_first_with_dask(
+ n_jobs, context
+):
+ with distributed.Client(
+ n_workers=2, threads_per_worker=2
+ ), context("dask"):
+ _test_parallel_unordered_generator_returns_fastest_first(None, n_jobs)
+
+
+@parametrize('backend', ALL_VALID_BACKENDS)
+@parametrize('n_jobs', [1, 2, -2, -1])
+def test_abort_backend(n_jobs, backend):
+ delays = ["a"] + [10] * 100
+ with raises(TypeError):
+ t_start = time.time()
+ Parallel(n_jobs=n_jobs, backend=backend)(
+ delayed(time.sleep)(i) for i in delays)
+ dt = time.time() - t_start
+ assert dt < 20
+
+
+def get_large_object(arg):
+ result = np.ones(int(5 * 1e5), dtype=bool)
+ result[0] = False
+ return result
+
+
+def _test_deadlock_with_generator(backend, return_as, n_jobs):
+ # Non-regression test for a race condition in the backends when the pickler
+ # is delayed by a large object.
+ with Parallel(n_jobs=n_jobs, backend=backend,
+ return_as=return_as) as parallel:
+ result = parallel(delayed(get_large_object)(i) for i in range(10))
+ next(result)
+ next(result)
+ del result
+ # The gc in pypy can be delayed. Force it to make sure this test does
+ # not cause timeout on the CI.
+ force_gc_pypy()
+
+
+@with_numpy
+@parametrize('backend', RETURN_GENERATOR_BACKENDS)
+@parametrize('return_as', ["generator", "generator_unordered"])
+@parametrize('n_jobs', [1, 2, -2, -1])
+def test_deadlock_with_generator(backend, return_as, n_jobs):
+ _test_deadlock_with_generator(backend, return_as, n_jobs)
+
+
+@with_numpy
+@pytest.mark.parametrize('n_jobs', [2, -1])
+@parametrize('return_as', ["generator", "generator_unordered"])
+@parametrize("context", [parallel_config, parallel_backend])
+@skipif(distributed is None, reason='This test requires dask')
+def test_deadlock_with_generator_and_dask(context, return_as, n_jobs):
+ with distributed.Client(
+ n_workers=2, threads_per_worker=2
+ ), context("dask"):
+ _test_deadlock_with_generator(None, return_as, n_jobs)
+
+
+@parametrize('backend', RETURN_GENERATOR_BACKENDS)
+@parametrize('return_as', ["generator", "generator_unordered"])
+@parametrize('n_jobs', [1, 2, -2, -1])
+def test_multiple_generator_call(backend, return_as, n_jobs):
+ # Non-regression test that ensures the dispatch of the tasks starts
+ # immediately when Parallel.__call__ is called. This test relies on the
+ # assumption that only one generator can be submitted at a time.
+ with raises(RuntimeError,
+ match="This Parallel instance is already running"):
+ parallel = Parallel(n_jobs, backend=backend, return_as=return_as)
+ g = parallel(delayed(sleep)(1) for _ in range(10)) # noqa: F841
+ t_start = time.time()
+ gen2 = parallel(delayed(id)(i) for i in range(100)) # noqa: F841
+
+ # Make sure that the error is raised quickly
+ assert time.time() - t_start < 2, (
+ "The error should be raised immediatly when submitting a new task "
+ "but it took more than 2s."
+ )
+
+ del g
+ # The gc in pypy can be delayed. Force it to make sure this test does not
+ # cause timeout on the CI.
+ force_gc_pypy()
+
+
+@parametrize('backend', RETURN_GENERATOR_BACKENDS)
+@parametrize('return_as', ["generator", "generator_unordered"])
+@parametrize('n_jobs', [1, 2, -2, -1])
+def test_multiple_generator_call_managed(backend, return_as, n_jobs):
+ # Non-regression test that ensures the dispatch of the tasks starts
+ # immediately when Parallel.__call__ is called. This test relies on the
+ # assumption that only one generator can be submitted at a time.
+ with Parallel(n_jobs, backend=backend,
+ return_as=return_as) as parallel:
+ g = parallel(delayed(sleep)(10) for _ in range(10)) # noqa: F841
+ t_start = time.time()
+ with raises(RuntimeError,
+ match="This Parallel instance is already running"):
+ g2 = parallel(delayed(id)(i) for i in range(100)) # noqa: F841
+
+ # Make sure that the error is raised quickly
+ assert time.time() - t_start < 2, (
+ "The error should be raised immediatly when submitting a new task "
+ "but it took more than 2s."
+ )
+
+ # The gc in pypy can be delayed. Force it to make sure this test does not
+ # cause timeout on the CI.
+ del g
+ force_gc_pypy()
+
+
+@parametrize('backend', RETURN_GENERATOR_BACKENDS)
+@parametrize('return_as_1', ["generator", "generator_unordered"])
+@parametrize('return_as_2', ["generator", "generator_unordered"])
+@parametrize('n_jobs', [1, 2, -2, -1])
+def test_multiple_generator_call_separated(
+ backend, return_as_1, return_as_2, n_jobs
+):
+ # Check that for separated Parallel, both tasks are correctly returned.
+ g = Parallel(n_jobs, backend=backend, return_as=return_as_1)(
+ delayed(sqrt)(i ** 2) for i in range(10)
+ )
+ g2 = Parallel(n_jobs, backend=backend, return_as=return_as_2)(
+ delayed(sqrt)(i ** 2) for i in range(10, 20)
+ )
+
+ if return_as_1 == "generator_unordered":
+ g = sorted(g)
+
+ if return_as_2 == "generator_unordered":
+ g2 = sorted(g2)
+
+ assert all(res == i for res, i in zip(g, range(10)))
+ assert all(res == i for res, i in zip(g2, range(10, 20)))
+
+
+@parametrize('backend, error', [
+ ('loky', True),
+ ('threading', False),
+ ('sequential', False),
+])
+@parametrize('return_as_1', ["generator", "generator_unordered"])
+@parametrize('return_as_2', ["generator", "generator_unordered"])
+def test_multiple_generator_call_separated_gc(
+ backend, return_as_1, return_as_2, error
+):
+
+ if (backend == 'loky') and (mp is None):
+ pytest.skip("Requires multiprocessing")
+
+ # Check that in loky, only one call can be run at a time with
+ # a single executor.
+ parallel = Parallel(2, backend=backend, return_as=return_as_1)
+ g = parallel(delayed(sleep)(10) for i in range(10))
+ g_wr = weakref.finalize(g, lambda: print("Generator collected"))
+ ctx = (
+ raises(RuntimeError, match="The executor underlying Parallel")
+ if error else nullcontext()
+ )
+ with ctx:
+ # For loky, this call will raise an error as the gc of the previous
+ # generator will shutdown the shared executor.
+ # For the other backends, as the worker pools are not shared between
+ # the two calls, this should proceed correctly.
+ t_start = time.time()
+ g = Parallel(2, backend=backend, return_as=return_as_2)(
+ delayed(sqrt)(i ** 2) for i in range(10, 20)
+ )
+
+ # The gc in pypy can be delayed. Force it to test the behavior when it
+ # will eventually be collected.
+ force_gc_pypy()
+
+ if return_as_2 == "generator_unordered":
+ g = sorted(g)
+
+ assert all(res == i for res, i in zip(g, range(10, 20)))
+
+ assert time.time() - t_start < 5
+
+ # Make sure that the computation are stopped for the gc'ed generator
+ retry = 0
+ while g_wr.alive and retry < 3:
+ retry += 1
+ time.sleep(.5)
+ assert time.time() - t_start < 5
+
+ if parallel._effective_n_jobs() != 1:
+ # check that the first parallel object is aborting (the final _aborted
+ # state might be delayed).
+ assert parallel._aborting
+
+
+@with_numpy
+@with_multiprocessing
+@parametrize('backend', PROCESS_BACKENDS)
+def test_memmapping_leaks(backend, tmpdir):
+ # Non-regression test for memmapping backends. Ensure that the data
+ # does not stay too long in memory
+ tmpdir = tmpdir.strpath
+
+ # Use max_nbytes=1 to force the use of memory-mapping even for small
+ # arrays
+ with Parallel(n_jobs=2, max_nbytes=1, backend=backend,
+ temp_folder=tmpdir) as p:
+ p(delayed(check_memmap)(a) for a in [np.random.random(10)] * 2)
+
+ # The memmap folder should not be clean in the context scope
+ assert len(os.listdir(tmpdir)) > 0
+
+ # Cleaning of the memmap folder is triggered by the garbage
+ # collection. With pypy the garbage collection has been observed to be
+ # delayed, sometimes up until the shutdown of the interpreter. This
+ # cleanup job executed in the worker ensures that it's triggered
+ # immediately.
+ p(delayed(_cleanup_worker)() for _ in range(2))
+
+ # Make sure that the shared memory is cleaned at the end when we exit
+ # the context
+ for _ in range(100):
+ if not os.listdir(tmpdir):
+ break
+ sleep(.1)
+ else:
+ raise AssertionError('temporary directory of Parallel was not removed')
+
+ # Make sure that the shared memory is cleaned at the end of a call
+ p = Parallel(n_jobs=2, max_nbytes=1, backend=backend)
+ p(delayed(check_memmap)(a) for a in [np.random.random(10)] * 2)
+ p(delayed(_cleanup_worker)() for _ in range(2))
+
+ for _ in range(100):
+ if not os.listdir(tmpdir):
+ break
+ sleep(.1)
+ else:
+ raise AssertionError('temporary directory of Parallel was not removed')
+
+
+@parametrize('backend',
+ ([None, 'threading'] if mp is None
+ else [None, 'loky', 'threading'])
+ )
+def test_lambda_expression(backend):
+ # cloudpickle is used to pickle delayed callables
+ results = Parallel(n_jobs=2, backend=backend)(
+ delayed(lambda x: x ** 2)(i) for i in range(10))
+ assert results == [i ** 2 for i in range(10)]
+
+
+@with_multiprocessing
+@parametrize('backend', PROCESS_BACKENDS)
+def test_backend_batch_statistics_reset(backend):
+ """Test that a parallel backend correctly resets its batch statistics."""
+ n_jobs = 2
+ n_inputs = 500
+ task_time = 2. / n_inputs
+
+ p = Parallel(verbose=10, n_jobs=n_jobs, backend=backend)
+ p(delayed(time.sleep)(task_time) for i in range(n_inputs))
+ assert (p._backend._effective_batch_size ==
+ p._backend._DEFAULT_EFFECTIVE_BATCH_SIZE)
+ assert (p._backend._smoothed_batch_duration ==
+ p._backend._DEFAULT_SMOOTHED_BATCH_DURATION)
+
+ p(delayed(time.sleep)(task_time) for i in range(n_inputs))
+ assert (p._backend._effective_batch_size ==
+ p._backend._DEFAULT_EFFECTIVE_BATCH_SIZE)
+ assert (p._backend._smoothed_batch_duration ==
+ p._backend._DEFAULT_SMOOTHED_BATCH_DURATION)
+
+
+@with_multiprocessing
+@parametrize("context", [parallel_config, parallel_backend])
+def test_backend_hinting_and_constraints(context):
+ for n_jobs in [1, 2, -1]:
+ assert type(Parallel(n_jobs=n_jobs)._backend) == DefaultBackend
+
+ p = Parallel(n_jobs=n_jobs, prefer='threads')
+ assert type(p._backend) is ThreadingBackend
+
+ p = Parallel(n_jobs=n_jobs, prefer='processes')
+ assert type(p._backend) is DefaultBackend
+
+ p = Parallel(n_jobs=n_jobs, require='sharedmem')
+ assert type(p._backend) is ThreadingBackend
+
+ # Explicit backend selection can override backend hinting although it
+ # is useless to pass a hint when selecting a backend.
+ p = Parallel(n_jobs=2, backend='loky', prefer='threads')
+ assert type(p._backend) is LokyBackend
+
+ with context('loky', n_jobs=2):
+ # Explicit backend selection by the user with the context manager
+ # should be respected when combined with backend hints only.
+ p = Parallel(prefer='threads')
+ assert type(p._backend) is LokyBackend
+ assert p.n_jobs == 2
+
+ with context('loky', n_jobs=2):
+ # Locally hard-coded n_jobs value is respected.
+ p = Parallel(n_jobs=3, prefer='threads')
+ assert type(p._backend) is LokyBackend
+ assert p.n_jobs == 3
+
+ with context('loky', n_jobs=2):
+ # Explicit backend selection by the user with the context manager
+ # should be ignored when the Parallel call has hard constraints.
+ # In this case, the default backend that supports shared mem is
+ # used an the default number of processes is used.
+ p = Parallel(require='sharedmem')
+ assert type(p._backend) is ThreadingBackend
+ assert p.n_jobs == 1
+
+ with context('loky', n_jobs=2):
+ p = Parallel(n_jobs=3, require='sharedmem')
+ assert type(p._backend) is ThreadingBackend
+ assert p.n_jobs == 3
+
+
+@parametrize("context", [parallel_config, parallel_backend])
+def test_backend_hinting_and_constraints_with_custom_backends(
+ capsys, context
+):
+ # Custom backends can declare that they use threads and have shared memory
+ # semantics:
+ class MyCustomThreadingBackend(ParallelBackendBase):
+ supports_sharedmem = True
+ use_threads = True
+
+ def apply_async(self):
+ pass
+
+ def effective_n_jobs(self, n_jobs):
+ return n_jobs
+
+ with context(MyCustomThreadingBackend()):
+ p = Parallel(n_jobs=2, prefer='processes') # ignored
+ assert type(p._backend) is MyCustomThreadingBackend
+
+ p = Parallel(n_jobs=2, require='sharedmem')
+ assert type(p._backend) is MyCustomThreadingBackend
+
+ class MyCustomProcessingBackend(ParallelBackendBase):
+ supports_sharedmem = False
+ use_threads = False
+
+ def apply_async(self):
+ pass
+
+ def effective_n_jobs(self, n_jobs):
+ return n_jobs
+
+ with context(MyCustomProcessingBackend()):
+ p = Parallel(n_jobs=2, prefer='processes')
+ assert type(p._backend) is MyCustomProcessingBackend
+
+ out, err = capsys.readouterr()
+ assert out == ""
+ assert err == ""
+
+ p = Parallel(n_jobs=2, require='sharedmem', verbose=10)
+ assert type(p._backend) is ThreadingBackend
+
+ out, err = capsys.readouterr()
+ expected = ("Using ThreadingBackend as joblib backend "
+ "instead of MyCustomProcessingBackend as the latter "
+ "does not provide shared memory semantics.")
+ assert out.strip() == expected
+ assert err == ""
+
+ with raises(ValueError):
+ Parallel(backend=MyCustomProcessingBackend(), require='sharedmem')
+
+
+def test_invalid_backend_hinting_and_constraints():
+ with raises(ValueError):
+ Parallel(prefer='invalid')
+
+ with raises(ValueError):
+ Parallel(require='invalid')
+
+ with raises(ValueError):
+ # It is inconsistent to prefer process-based parallelism while
+ # requiring shared memory semantics.
+ Parallel(prefer='processes', require='sharedmem')
+
+ if mp is not None:
+ # It is inconsistent to ask explicitly for a process-based
+ # parallelism while requiring shared memory semantics.
+ with raises(ValueError):
+ Parallel(backend='loky', require='sharedmem')
+ with raises(ValueError):
+ Parallel(backend='multiprocessing', require='sharedmem')
+
+
+def _recursive_backend_info(limit=3, **kwargs):
+ """Perform nested parallel calls and introspect the backend on the way"""
+
+ with Parallel(n_jobs=2) as p:
+ this_level = [(type(p._backend).__name__, p._backend.nesting_level)]
+ if limit == 0:
+ return this_level
+ results = p(delayed(_recursive_backend_info)(limit=limit - 1, **kwargs)
+ for i in range(1))
+ return this_level + results[0]
+
+
+@with_multiprocessing
+@parametrize('backend', ['loky', 'threading'])
+@parametrize("context", [parallel_config, parallel_backend])
+def test_nested_parallelism_limit(context, backend):
+ with context(backend, n_jobs=2):
+ backend_types_and_levels = _recursive_backend_info()
+
+ if cpu_count() == 1:
+ second_level_backend_type = 'SequentialBackend'
+ max_level = 1
+ else:
+ second_level_backend_type = 'ThreadingBackend'
+ max_level = 2
+
+ top_level_backend_type = backend.title() + 'Backend'
+ expected_types_and_levels = [
+ (top_level_backend_type, 0),
+ (second_level_backend_type, 1),
+ ('SequentialBackend', max_level),
+ ('SequentialBackend', max_level)
+ ]
+ assert backend_types_and_levels == expected_types_and_levels
+
+
+@with_numpy
+@parametrize("context", [parallel_config, parallel_backend])
+@skipif(distributed is None, reason='This test requires dask')
+def test_nested_parallelism_with_dask(context):
+ with distributed.Client(n_workers=2, threads_per_worker=2):
+ # 10 MB of data as argument to trigger implicit scattering
+ data = np.ones(int(1e7), dtype=np.uint8)
+ for i in range(2):
+ with context('dask'):
+ backend_types_and_levels = _recursive_backend_info(data=data)
+ assert len(backend_types_and_levels) == 4
+ assert all(name == 'DaskDistributedBackend'
+ for name, _ in backend_types_and_levels)
+
+ # No argument
+ with context('dask'):
+ backend_types_and_levels = _recursive_backend_info()
+ assert len(backend_types_and_levels) == 4
+ assert all(name == 'DaskDistributedBackend'
+ for name, _ in backend_types_and_levels)
+
+
+def _recursive_parallel(nesting_limit=None):
+ """A horrible function that does recursive parallel calls"""
+ return Parallel()(delayed(_recursive_parallel)() for i in range(2))
+
+
+@pytest.mark.no_cover
+@parametrize("context", [parallel_config, parallel_backend])
+@parametrize(
+ 'backend', (['threading'] if mp is None else ['loky', 'threading'])
+)
+def test_thread_bomb_mitigation(context, backend):
+ # Test that recursive parallelism raises a recursion rather than
+ # saturating the operating system resources by creating a unbounded number
+ # of threads.
+ with context(backend, n_jobs=2):
+ with raises(BaseException) as excinfo:
+ _recursive_parallel()
+ exc = excinfo.value
+ if backend == "loky":
+ # Local import because loky may not be importable for lack of
+ # multiprocessing
+ from joblib.externals.loky.process_executor import TerminatedWorkerError # noqa
+ if isinstance(exc, (TerminatedWorkerError, PicklingError)):
+ # The recursion exception can itself cause an error when
+ # pickling it to be send back to the parent process. In this
+ # case the worker crashes but the original traceback is still
+ # printed on stderr. This could be improved but does not seem
+ # simple to do and this is not critical for users (as long
+ # as there is no process or thread bomb happening).
+ pytest.xfail("Loky worker crash when serializing RecursionError")
+
+ assert isinstance(exc, RecursionError)
+
+
+def _run_parallel_sum():
+ env_vars = {}
+ for var in ['OMP_NUM_THREADS', 'OPENBLAS_NUM_THREADS', 'MKL_NUM_THREADS',
+ 'VECLIB_MAXIMUM_THREADS', 'NUMEXPR_NUM_THREADS',
+ 'NUMBA_NUM_THREADS', 'ENABLE_IPC']:
+ env_vars[var] = os.environ.get(var)
+ return env_vars, parallel_sum(100)
+
+
+@parametrize("backend", ([None, 'loky'] if mp is not None else [None]))
+@skipif(parallel_sum is None, reason="Need OpenMP helper compiled")
+def test_parallel_thread_limit(backend):
+ results = Parallel(n_jobs=2, backend=backend)(
+ delayed(_run_parallel_sum)() for _ in range(2)
+ )
+ expected_num_threads = max(cpu_count() // 2, 1)
+ for worker_env_vars, omp_num_threads in results:
+ assert omp_num_threads == expected_num_threads
+ for name, value in worker_env_vars.items():
+ if name.endswith("_THREADS"):
+ assert value == str(expected_num_threads)
+ else:
+ assert name == "ENABLE_IPC"
+ assert value == "1"
+
+
+@parametrize("context", [parallel_config, parallel_backend])
+@skipif(distributed is not None, reason='This test requires dask')
+def test_dask_backend_when_dask_not_installed(context):
+ with raises(ValueError, match='Please install dask'):
+ context('dask')
+
+
+@parametrize("context", [parallel_config, parallel_backend])
+def test_zero_worker_backend(context):
+ # joblib.Parallel should reject with an explicit error message parallel
+ # backends that have no worker.
+ class ZeroWorkerBackend(ThreadingBackend):
+ def configure(self, *args, **kwargs):
+ return 0
+
+ def apply_async(self, func, callback=None): # pragma: no cover
+ raise TimeoutError("No worker available")
+
+ def effective_n_jobs(self, n_jobs): # pragma: no cover
+ return 0
+
+ expected_msg = "ZeroWorkerBackend has no active worker"
+ with context(ZeroWorkerBackend()):
+ with pytest.raises(RuntimeError, match=expected_msg):
+ Parallel(n_jobs=2)(delayed(id)(i) for i in range(2))
+
+
+def test_globals_update_at_each_parallel_call():
+ # This is a non-regression test related to joblib issues #836 and #833.
+ # Cloudpickle versions between 0.5.4 and 0.7 introduced a bug where global
+ # variables changes in a parent process between two calls to
+ # joblib.Parallel would not be propagated into the workers.
+ global MY_GLOBAL_VARIABLE
+ MY_GLOBAL_VARIABLE = "original value"
+
+ def check_globals():
+ global MY_GLOBAL_VARIABLE
+ return MY_GLOBAL_VARIABLE
+
+ assert check_globals() == "original value"
+
+ workers_global_variable = Parallel(n_jobs=2)(
+ delayed(check_globals)() for i in range(2))
+ assert set(workers_global_variable) == {"original value"}
+
+ # Change the value of MY_GLOBAL_VARIABLE, and make sure this change gets
+ # propagated into the workers environment
+ MY_GLOBAL_VARIABLE = "changed value"
+ assert check_globals() == "changed value"
+
+ workers_global_variable = Parallel(n_jobs=2)(
+ delayed(check_globals)() for i in range(2))
+ assert set(workers_global_variable) == {"changed value"}
+
+
+##############################################################################
+# Test environment variable in child env, in particular for limiting
+# the maximal number of threads in C-library threadpools.
+#
+
+def _check_numpy_threadpool_limits():
+ import numpy as np
+ # Let's call BLAS on a Matrix Matrix multiplication with dimensions large
+ # enough to ensure that the threadpool managed by the underlying BLAS
+ # implementation is actually used so as to force its initialization.
+ a = np.random.randn(100, 100)
+ np.dot(a, a)
+ from threadpoolctl import threadpool_info
+ return threadpool_info()
+
+
+def _parent_max_num_threads_for(child_module, parent_info):
+ for parent_module in parent_info:
+ if parent_module['filepath'] == child_module['filepath']:
+ return parent_module['num_threads']
+ raise ValueError("An unexpected module was loaded in child:\n{}"
+ .format(child_module))
+
+
+def check_child_num_threads(workers_info, parent_info, num_threads):
+ # Check that the number of threads reported in workers_info is consistent
+ # with the expectation. We need to be careful to handle the cases where
+ # the requested number of threads is below max_num_thread for the library.
+ for child_threadpool_info in workers_info:
+ for child_module in child_threadpool_info:
+ parent_max_num_threads = _parent_max_num_threads_for(
+ child_module, parent_info)
+ expected = {min(num_threads, parent_max_num_threads), num_threads}
+ assert child_module['num_threads'] in expected
+
+
+@with_numpy
+@with_multiprocessing
+@parametrize('n_jobs', [2, 4, -2, -1])
+def test_threadpool_limitation_in_child_loky(n_jobs):
+ # Check that the protection against oversubscription in workers is working
+ # using threadpoolctl functionalities.
+
+ # Skip this test if numpy is not linked to a BLAS library
+ parent_info = _check_numpy_threadpool_limits()
+ if len(parent_info) == 0:
+ pytest.skip(reason="Need a version of numpy linked to BLAS")
+
+ workers_threadpool_infos = Parallel(backend="loky", n_jobs=n_jobs)(
+ delayed(_check_numpy_threadpool_limits)() for i in range(2))
+
+ n_jobs = effective_n_jobs(n_jobs)
+ expected_child_num_threads = max(cpu_count() // n_jobs, 1)
+
+ check_child_num_threads(workers_threadpool_infos, parent_info,
+ expected_child_num_threads)
+
+
+@with_numpy
+@with_multiprocessing
+@parametrize('inner_max_num_threads', [1, 2, 4, None])
+@parametrize('n_jobs', [2, -1])
+@parametrize("context", [parallel_config, parallel_backend])
+def test_threadpool_limitation_in_child_context(
+ context, n_jobs, inner_max_num_threads
+):
+ # Check that the protection against oversubscription in workers is working
+ # using threadpoolctl functionalities.
+
+ # Skip this test if numpy is not linked to a BLAS library
+ parent_info = _check_numpy_threadpool_limits()
+ if len(parent_info) == 0:
+ pytest.skip(reason="Need a version of numpy linked to BLAS")
+
+ with context('loky', inner_max_num_threads=inner_max_num_threads):
+ workers_threadpool_infos = Parallel(n_jobs=n_jobs)(
+ delayed(_check_numpy_threadpool_limits)() for i in range(2))
+
+ n_jobs = effective_n_jobs(n_jobs)
+ if inner_max_num_threads is None:
+ expected_child_num_threads = max(cpu_count() // n_jobs, 1)
+ else:
+ expected_child_num_threads = inner_max_num_threads
+
+ check_child_num_threads(workers_threadpool_infos, parent_info,
+ expected_child_num_threads)
+
+
+@with_multiprocessing
+@parametrize('n_jobs', [2, -1])
+@parametrize('var_name', ["OPENBLAS_NUM_THREADS",
+ "MKL_NUM_THREADS",
+ "OMP_NUM_THREADS"])
+@parametrize("context", [parallel_config, parallel_backend])
+def test_threadpool_limitation_in_child_override(context, n_jobs, var_name):
+ # Check that environment variables set by the user on the main process
+ # always have the priority.
+
+ # Clean up the existing executor because we change the environment of the
+ # parent at runtime and it is not detected in loky intentionally.
+ get_reusable_executor(reuse=True).shutdown()
+
+ def _get_env(var_name):
+ return os.environ.get(var_name)
+
+ original_var_value = os.environ.get(var_name)
+ try:
+ os.environ[var_name] = "4"
+ # Skip this test if numpy is not linked to a BLAS library
+ results = Parallel(n_jobs=n_jobs)(
+ delayed(_get_env)(var_name) for i in range(2))
+ assert results == ["4", "4"]
+
+ with context('loky', inner_max_num_threads=1):
+ results = Parallel(n_jobs=n_jobs)(
+ delayed(_get_env)(var_name) for i in range(2))
+ assert results == ["1", "1"]
+
+ finally:
+ if original_var_value is None:
+ del os.environ[var_name]
+ else:
+ os.environ[var_name] = original_var_value
+
+
+@with_multiprocessing
+@parametrize('n_jobs', [2, 4, -1])
+def test_loky_reuse_workers(n_jobs):
+ # Non-regression test for issue #967 where the workers are not reused when
+ # calling multiple Parallel loops.
+
+ def parallel_call(n_jobs):
+ x = range(10)
+ Parallel(n_jobs=n_jobs)(delayed(sum)(x) for i in range(10))
+
+ # Run a parallel loop and get the workers used for computations
+ parallel_call(n_jobs)
+ first_executor = get_reusable_executor(reuse=True)
+
+ # Ensure that the workers are reused for the next calls, as the executor is
+ # not restarted.
+ for _ in range(10):
+ parallel_call(n_jobs)
+ executor = get_reusable_executor(reuse=True)
+ assert executor == first_executor
diff --git a/venv/lib/python3.10/site-packages/joblib/test/test_store_backends.py b/venv/lib/python3.10/site-packages/joblib/test/test_store_backends.py
new file mode 100644
index 0000000000000000000000000000000000000000..e5db16757ea45fd6761e1b8cfd35e5f5a920752f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/joblib/test/test_store_backends.py
@@ -0,0 +1,94 @@
+
+try:
+ # Python 2.7: use the C pickle to speed up
+ # test_concurrency_safe_write which pickles big python objects
+ import cPickle as cpickle
+except ImportError:
+ import pickle as cpickle
+import functools
+from pickle import PicklingError
+import time
+
+import pytest
+
+from joblib.testing import parametrize, timeout
+from joblib.test.common import with_multiprocessing
+from joblib.backports import concurrency_safe_rename
+from joblib import Parallel, delayed
+from joblib._store_backends import (
+ concurrency_safe_write,
+ FileSystemStoreBackend,
+ CacheWarning,
+)
+
+
+def write_func(output, filename):
+ with open(filename, 'wb') as f:
+ cpickle.dump(output, f)
+
+
+def load_func(expected, filename):
+ for i in range(10):
+ try:
+ with open(filename, 'rb') as f:
+ reloaded = cpickle.load(f)
+ break
+ except (OSError, IOError):
+ # On Windows you can have WindowsError ([Error 5] Access
+ # is denied or [Error 13] Permission denied) when reading the file,
+ # probably because a writer process has a lock on the file
+ time.sleep(0.1)
+ else:
+ raise
+ assert expected == reloaded
+
+
+def concurrency_safe_write_rename(to_write, filename, write_func):
+ temporary_filename = concurrency_safe_write(to_write,
+ filename, write_func)
+ concurrency_safe_rename(temporary_filename, filename)
+
+
+@timeout(0) # No timeout as this test can be long
+@with_multiprocessing
+@parametrize('backend', ['multiprocessing', 'loky', 'threading'])
+def test_concurrency_safe_write(tmpdir, backend):
+ # Add one item to cache
+ filename = tmpdir.join('test.pkl').strpath
+
+ obj = {str(i): i for i in range(int(1e5))}
+ funcs = [functools.partial(concurrency_safe_write_rename,
+ write_func=write_func)
+ if i % 3 != 2 else load_func for i in range(12)]
+ Parallel(n_jobs=2, backend=backend)(
+ delayed(func)(obj, filename) for func in funcs)
+
+
+def test_warning_on_dump_failure(tmpdir):
+ # Check that a warning is raised when the dump fails for any reason but
+ # a PicklingError.
+ class UnpicklableObject(object):
+ def __reduce__(self):
+ raise RuntimeError("some exception")
+
+ backend = FileSystemStoreBackend()
+ backend.location = tmpdir.join('test_warning_on_pickling_error').strpath
+ backend.compress = None
+
+ with pytest.warns(CacheWarning, match="some exception"):
+ backend.dump_item("testpath", UnpicklableObject())
+
+
+def test_warning_on_pickling_error(tmpdir):
+ # This is separate from test_warning_on_dump_failure because in the
+ # future we will turn this into an exception.
+ class UnpicklableObject(object):
+ def __reduce__(self):
+ raise PicklingError("not picklable")
+
+ backend = FileSystemStoreBackend()
+ backend.location = tmpdir.join('test_warning_on_pickling_error').strpath
+ backend.compress = None
+
+ with pytest.warns(FutureWarning, match="not picklable"):
+ backend.dump_item("testpath", UnpicklableObject())
diff --git a/venv/lib/python3.10/site-packages/joblib/test/test_testing.py b/venv/lib/python3.10/site-packages/joblib/test/test_testing.py
new file mode 100644
index 0000000000000000000000000000000000000000..e8095aa67040ce868849b89927b325895b5d8e34
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/joblib/test/test_testing.py
@@ -0,0 +1,75 @@
+import sys
+import re
+
+from joblib.testing import raises, check_subprocess_call
+
+
+def test_check_subprocess_call():
+ code = '\n'.join(['result = 1 + 2 * 3',
+ 'print(result)',
+ 'my_list = [1, 2, 3]',
+ 'print(my_list)'])
+
+ check_subprocess_call([sys.executable, '-c', code])
+
+ # Now checking stdout with a regex
+ check_subprocess_call([sys.executable, '-c', code],
+ # Regex needed for platform-specific line endings
+ stdout_regex=r'7\s{1,2}\[1, 2, 3\]')
+
+
+def test_check_subprocess_call_non_matching_regex():
+ code = '42'
+ non_matching_pattern = '_no_way_this_matches_anything_'
+
+ with raises(ValueError) as excinfo:
+ check_subprocess_call([sys.executable, '-c', code],
+ stdout_regex=non_matching_pattern)
+ excinfo.match('Unexpected stdout.+{}'.format(non_matching_pattern))
+
+
+def test_check_subprocess_call_wrong_command():
+ wrong_command = '_a_command_that_does_not_exist_'
+ with raises(OSError):
+ check_subprocess_call([wrong_command])
+
+
+def test_check_subprocess_call_non_zero_return_code():
+ code_with_non_zero_exit = '\n'.join([
+ 'import sys',
+ 'print("writing on stdout")',
+ 'sys.stderr.write("writing on stderr")',
+ 'sys.exit(123)'])
+
+ pattern = re.compile('Non-zero return code: 123.+'
+ 'Stdout:\nwriting on stdout.+'
+ 'Stderr:\nwriting on stderr', re.DOTALL)
+
+ with raises(ValueError) as excinfo:
+ check_subprocess_call([sys.executable, '-c', code_with_non_zero_exit])
+ excinfo.match(pattern)
+
+
+def test_check_subprocess_call_timeout():
+ code_timing_out = '\n'.join([
+ 'import time',
+ 'import sys',
+ 'print("before sleep on stdout")',
+ 'sys.stdout.flush()',
+ 'sys.stderr.write("before sleep on stderr")',
+ 'sys.stderr.flush()',
+ # We need to sleep for at least 2 * timeout seconds in case the SIGKILL
+ # is triggered.
+ 'time.sleep(10)',
+ 'print("process should have be killed before")',
+ 'sys.stdout.flush()'])
+
+ pattern = re.compile('Non-zero return code:.+'
+ 'Stdout:\nbefore sleep on stdout\\s+'
+ 'Stderr:\nbefore sleep on stderr',
+ re.DOTALL)
+
+ with raises(ValueError) as excinfo:
+ check_subprocess_call([sys.executable, '-c', code_timing_out],
+ timeout=1)
+ excinfo.match(pattern)
diff --git a/venv/lib/python3.10/site-packages/joblib/test/test_utils.py b/venv/lib/python3.10/site-packages/joblib/test/test_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..4999a212c462bdb6c10e9e08fdfba74d03b05294
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/joblib/test/test_utils.py
@@ -0,0 +1,27 @@
+import pytest
+
+from joblib._utils import eval_expr
+
+
+@pytest.mark.parametrize(
+ "expr",
+ ["exec('import os')", "print(1)", "import os", "1+1; import os", "1^1"],
+)
+def test_eval_expr_invalid(expr):
+ with pytest.raises(
+ ValueError, match="is not a valid or supported arithmetic"
+ ):
+ eval_expr(expr)
+
+
+@pytest.mark.parametrize(
+ "expr, result",
+ [
+ ("2*6", 12),
+ ("2**6", 64),
+ ("1 + 2*3**(4) / (6 + -7)", -161.0),
+ ("(20 // 3) % 5", 1),
+ ],
+)
+def test_eval_expr_valid(expr, result):
+ assert eval_expr(expr) == result
diff --git a/venv/lib/python3.10/site-packages/joblib/test/testutils.py b/venv/lib/python3.10/site-packages/joblib/test/testutils.py
new file mode 100644
index 0000000000000000000000000000000000000000..20ec8c1ba0a50da6be9fce3686ac1950b1a55ab5
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/joblib/test/testutils.py
@@ -0,0 +1,8 @@
+def return_slice_of_data(arr, start_idx, end_idx):
+ return arr[start_idx:end_idx]
+
+
+def print_filename_and_raise(arr):
+ from joblib._memmapping_reducer import _get_backing_memmap
+ print(_get_backing_memmap(arr).filename)
+ raise ValueError
diff --git a/venv/lib/python3.10/site-packages/pydantic_core/_pydantic_core.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/pydantic_core/_pydantic_core.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..36383e0e30515781c61b9ea2626dee77eca0ee16
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pydantic_core/_pydantic_core.cpython-310-x86_64-linux-gnu.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b6ac5108d14d41652e4483cdf8433568a6ac233591bdc5fd57df294ede1ab9c2
+size 5026584
diff --git a/venv/lib/python3.10/site-packages/safetensors/_safetensors_rust.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/safetensors/_safetensors_rust.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..cfd29701a6d022512849a387cf73c3adc51c0632
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/safetensors/_safetensors_rust.cpython-310-x86_64-linux-gnu.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b6345c716b5d67adb2d3f2477c07c0b1a214a70aa7cb71101d99327aba0bfaa0
+size 4438576
diff --git a/venv/lib/python3.10/site-packages/scipy/io/_fast_matrix_market/_fmm_core.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/io/_fast_matrix_market/_fmm_core.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..e8de0b967ff0bac9c48182e52cef924a2af6f011
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/io/_fast_matrix_market/_fmm_core.cpython-310-x86_64-linux-gnu.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:719f8c4c68bbb75108c6968d2ba4160a2a35d69eb787ee348943a884ea629827
+size 3827072
diff --git a/venv/lib/python3.10/site-packages/yaml/_yaml.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/yaml/_yaml.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..d72bd9fd40fcbd1a35b3fd58fecd785ae25b07a0
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/yaml/_yaml.cpython-310-x86_64-linux-gnu.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ffd895ac049ab508104855e529e09edae2b64f22b093d9ddeb50acffe7ea0073
+size 2226000
diff --git a/venv/lib/python3.10/site-packages/zstandard/_cffi.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/zstandard/_cffi.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..359b6c40271ef34fbb0b8b867ac329a25407a031
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/zstandard/_cffi.cpython-310-x86_64-linux-gnu.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b92eff670a98342c68e4fad475a81c93f6ab0bbd01b7d047d738f653a1542886
+size 11882792
diff --git a/venv/lib/python3.10/site-packages/zstandard/backend_c.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/zstandard/backend_c.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..7546bfbcc4b91bc09e744571afd7d539b99ed592
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/zstandard/backend_c.cpython-310-x86_64-linux-gnu.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b7703b6add7c04133294d31b7f80f02520fb233028cd1feb6fb75477b6d02971
+size 11287920