diff --git a/llmeval-env/lib/python3.10/site-packages/MarkupSafe-2.1.5.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/MarkupSafe-2.1.5.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/MarkupSafe-2.1.5.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/llmeval-env/lib/python3.10/site-packages/MarkupSafe-2.1.5.dist-info/LICENSE.rst b/llmeval-env/lib/python3.10/site-packages/MarkupSafe-2.1.5.dist-info/LICENSE.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9d227a0cc43c3268d15722b763bd94ad298645a1
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/MarkupSafe-2.1.5.dist-info/LICENSE.rst
@@ -0,0 +1,28 @@
+Copyright 2010 Pallets
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/llmeval-env/lib/python3.10/site-packages/MarkupSafe-2.1.5.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/MarkupSafe-2.1.5.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..dfe37d52dfbbfdffc5b3181923e51c610046ff12
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/MarkupSafe-2.1.5.dist-info/METADATA
@@ -0,0 +1,93 @@
+Metadata-Version: 2.1
+Name: MarkupSafe
+Version: 2.1.5
+Summary: Safely add untrusted strings to HTML/XML markup.
+Home-page: https://palletsprojects.com/p/markupsafe/
+Maintainer: Pallets
+Maintainer-email: contact@palletsprojects.com
+License: BSD-3-Clause
+Project-URL: Donate, https://palletsprojects.com/donate
+Project-URL: Documentation, https://markupsafe.palletsprojects.com/
+Project-URL: Changes, https://markupsafe.palletsprojects.com/changes/
+Project-URL: Source Code, https://github.com/pallets/markupsafe/
+Project-URL: Issue Tracker, https://github.com/pallets/markupsafe/issues/
+Project-URL: Chat, https://discord.gg/pallets
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
+Classifier: Topic :: Text Processing :: Markup :: HTML
+Requires-Python: >=3.7
+Description-Content-Type: text/x-rst
+License-File: LICENSE.rst
+
+MarkupSafe
+==========
+
+MarkupSafe implements a text object that escapes characters so it is
+safe to use in HTML and XML. Characters that have special meanings are
+replaced so that they display as the actual characters. This mitigates
+injection attacks, meaning untrusted user input can safely be displayed
+on a page.
+
+
+Installing
+----------
+
+Install and update using `pip`_:
+
+.. code-block:: text
+
+ pip install -U MarkupSafe
+
+.. _pip: https://pip.pypa.io/en/stable/getting-started/
+
+
+Examples
+--------
+
+.. code-block:: pycon
+
+ >>> from markupsafe import Markup, escape
+
+ >>> # escape replaces special characters and wraps in Markup
+ >>> escape("")
+ Markup('<script>alert(document.cookie);</script>')
+
+ >>> # wrap in Markup to mark text "safe" and prevent escaping
+ >>> Markup("Hello")
+ Markup('hello')
+
+ >>> escape(Markup("Hello"))
+ Markup('hello')
+
+ >>> # Markup is a str subclass
+ >>> # methods and operators escape their arguments
+ >>> template = Markup("Hello {name}")
+ >>> template.format(name='"World"')
+ Markup('Hello "World"')
+
+
+Donate
+------
+
+The Pallets organization develops and supports MarkupSafe and other
+popular packages. In order to grow the community of contributors and
+users, and allow the maintainers to devote more time to the projects,
+`please donate today`_.
+
+.. _please donate today: https://palletsprojects.com/donate
+
+
+Links
+-----
+
+- Documentation: https://markupsafe.palletsprojects.com/
+- Changes: https://markupsafe.palletsprojects.com/changes/
+- PyPI Releases: https://pypi.org/project/MarkupSafe/
+- Source Code: https://github.com/pallets/markupsafe/
+- Issue Tracker: https://github.com/pallets/markupsafe/issues/
+- Chat: https://discord.gg/pallets
diff --git a/llmeval-env/lib/python3.10/site-packages/MarkupSafe-2.1.5.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/MarkupSafe-2.1.5.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..1a961ed7a4d3a2d38028bbd9c5997c3bef5ad107
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/MarkupSafe-2.1.5.dist-info/RECORD
@@ -0,0 +1,14 @@
+MarkupSafe-2.1.5.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+MarkupSafe-2.1.5.dist-info/LICENSE.rst,sha256=SJqOEQhQntmKN7uYPhHg9-HTHwvY-Zp5yESOf_N9B-o,1475
+MarkupSafe-2.1.5.dist-info/METADATA,sha256=2dRDPam6OZLfpX0wg1JN5P3u9arqACxVSfdGmsJU7o8,3003
+MarkupSafe-2.1.5.dist-info/RECORD,,
+MarkupSafe-2.1.5.dist-info/WHEEL,sha256=1FEjxEYgybphwh9S0FO9IcZ0B-NIeM2ko8OzhFZeOeQ,152
+MarkupSafe-2.1.5.dist-info/top_level.txt,sha256=qy0Plje5IJuvsCBjejJyhDCjEAdcDLK_2agVcex8Z6U,11
+markupsafe/__init__.py,sha256=r7VOTjUq7EMQ4v3p4R1LoVOGJg6ysfYRncLr34laRBs,10958
+markupsafe/__pycache__/__init__.cpython-310.pyc,,
+markupsafe/__pycache__/_native.cpython-310.pyc,,
+markupsafe/_native.py,sha256=GR86Qvo_GcgKmKreA1WmYN9ud17OFwkww8E-fiW-57s,1713
+markupsafe/_speedups.c,sha256=X2XvQVtIdcK4Usz70BvkzoOfjTCmQlDkkjYSn-swE0g,7083
+markupsafe/_speedups.cpython-310-x86_64-linux-gnu.so,sha256=kPt-fhZ_RG7PUbDvwmyC26ZvRJ9DvUlF3hszBIB6_xs,44240
+markupsafe/_speedups.pyi,sha256=vfMCsOgbAXRNLUXkyuyonG8uEWKYU4PDqNuMaDELAYw,229
+markupsafe/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
diff --git a/llmeval-env/lib/python3.10/site-packages/MarkupSafe-2.1.5.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/MarkupSafe-2.1.5.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..1d812513305907d2ee59b95d161fdb54d1ab559c
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/MarkupSafe-2.1.5.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.42.0)
+Root-Is-Purelib: false
+Tag: cp310-cp310-manylinux_2_17_x86_64
+Tag: cp310-cp310-manylinux2014_x86_64
+
diff --git a/llmeval-env/lib/python3.10/site-packages/MarkupSafe-2.1.5.dist-info/top_level.txt b/llmeval-env/lib/python3.10/site-packages/MarkupSafe-2.1.5.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..75bf729258f9daef77370b6df1a57940f90fc23f
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/MarkupSafe-2.1.5.dist-info/top_level.txt
@@ -0,0 +1 @@
+markupsafe
diff --git a/llmeval-env/lib/python3.10/site-packages/dill/__diff.py b/llmeval-env/lib/python3.10/site-packages/dill/__diff.py
new file mode 100644
index 0000000000000000000000000000000000000000..8a8937534fac7f4791a2883cbc65eee9d45b56e5
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/dill/__diff.py
@@ -0,0 +1,234 @@
+#!/usr/bin/env python
+#
+# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
+# Copyright (c) 2008-2016 California Institute of Technology.
+# Copyright (c) 2016-2024 The Uncertainty Quantification Foundation.
+# License: 3-clause BSD. The full license text is available at:
+# - https://github.com/uqfoundation/dill/blob/master/LICENSE
+
+"""
+Module to show if an object has changed since it was memorised
+"""
+
+import builtins
+import os
+import sys
+import types
+try:
+ import numpy
+ HAS_NUMPY = True
+except ImportError:
+ HAS_NUMPY = False
+
+# pypy doesn't use reference counting
+getrefcount = getattr(sys, 'getrefcount', lambda x:0)
+
+# memo of objects indexed by id to a tuple (attributes, sequence items)
+# attributes is a dict indexed by attribute name to attribute id
+# sequence items is either a list of ids, of a dictionary of keys to ids
+memo = {}
+id_to_obj = {}
+# types that cannot have changing attributes
+builtins_types = set((str, list, dict, set, frozenset, int))
+dont_memo = set(id(i) for i in (memo, sys.modules, sys.path_importer_cache,
+ os.environ, id_to_obj))
+
+
+def get_attrs(obj):
+ """
+ Gets all the attributes of an object though its __dict__ or return None
+ """
+ if type(obj) in builtins_types \
+ or type(obj) is type and obj in builtins_types:
+ return
+ return getattr(obj, '__dict__', None)
+
+
+def get_seq(obj, cache={str: False, frozenset: False, list: True, set: True,
+ dict: True, tuple: True, type: False,
+ types.ModuleType: False, types.FunctionType: False,
+ types.BuiltinFunctionType: False}):
+ """
+ Gets all the items in a sequence or return None
+ """
+ try:
+ o_type = obj.__class__
+ except AttributeError:
+ o_type = type(obj)
+ hsattr = hasattr
+ if o_type in cache:
+ if cache[o_type]:
+ if hsattr(obj, "copy"):
+ return obj.copy()
+ return obj
+ elif HAS_NUMPY and o_type in (numpy.ndarray, numpy.ma.core.MaskedConstant):
+ if obj.shape and obj.size:
+ return obj
+ else:
+ return []
+ elif hsattr(obj, "__contains__") and hsattr(obj, "__iter__") \
+ and hsattr(obj, "__len__") and hsattr(o_type, "__contains__") \
+ and hsattr(o_type, "__iter__") and hsattr(o_type, "__len__"):
+ cache[o_type] = True
+ if hsattr(obj, "copy"):
+ return obj.copy()
+ return obj
+ else:
+ cache[o_type] = False
+ return None
+
+
+def memorise(obj, force=False):
+ """
+ Adds an object to the memo, and recursively adds all the objects
+ attributes, and if it is a container, its items. Use force=True to update
+ an object already in the memo. Updating is not recursively done.
+ """
+ obj_id = id(obj)
+ if obj_id in memo and not force or obj_id in dont_memo:
+ return
+ id_ = id
+ g = get_attrs(obj)
+ if g is None:
+ attrs_id = None
+ else:
+ attrs_id = dict((key,id_(value)) for key, value in g.items())
+
+ s = get_seq(obj)
+ if s is None:
+ seq_id = None
+ elif hasattr(s, "items"):
+ seq_id = dict((id_(key),id_(value)) for key, value in s.items())
+ elif not hasattr(s, "__len__"): #XXX: avoid TypeError from unexpected case
+ seq_id = None
+ else:
+ seq_id = [id_(i) for i in s]
+
+ memo[obj_id] = attrs_id, seq_id
+ id_to_obj[obj_id] = obj
+ mem = memorise
+ if g is not None:
+ [mem(value) for key, value in g.items()]
+
+ if s is not None:
+ if hasattr(s, "items"):
+ [(mem(key), mem(item))
+ for key, item in s.items()]
+ else:
+ if hasattr(s, '__len__'):
+ [mem(item) for item in s]
+ else: mem(s)
+
+
+def release_gone():
+ itop, mp, src = id_to_obj.pop, memo.pop, getrefcount
+ [(itop(id_), mp(id_)) for id_, obj in list(id_to_obj.items())
+ if src(obj) < 4] #XXX: correct for pypy?
+
+
+def whats_changed(obj, seen=None, simple=False, first=True):
+ """
+ Check an object against the memo. Returns a list in the form
+ (attribute changes, container changed). Attribute changes is a dict of
+ attribute name to attribute value. container changed is a boolean.
+ If simple is true, just returns a boolean. None for either item means
+ that it has not been checked yet
+ """
+ # Special cases
+ if first:
+ # ignore the _ variable, which only appears in interactive sessions
+ if "_" in builtins.__dict__:
+ del builtins._
+ if seen is None:
+ seen = {}
+
+ obj_id = id(obj)
+
+ if obj_id in seen:
+ if simple:
+ return any(seen[obj_id])
+ return seen[obj_id]
+
+ # Safety checks
+ if obj_id in dont_memo:
+ seen[obj_id] = [{}, False]
+ if simple:
+ return False
+ return seen[obj_id]
+ elif obj_id not in memo:
+ if simple:
+ return True
+ else:
+ raise RuntimeError("Object not memorised " + str(obj))
+
+ seen[obj_id] = ({}, False)
+
+ chngd = whats_changed
+ id_ = id
+
+ # compare attributes
+ attrs = get_attrs(obj)
+ if attrs is None:
+ changed = {}
+ else:
+ obj_attrs = memo[obj_id][0]
+ obj_get = obj_attrs.get
+ changed = dict((key,None) for key in obj_attrs if key not in attrs)
+ for key, o in attrs.items():
+ if id_(o) != obj_get(key, None) or chngd(o, seen, True, False):
+ changed[key] = o
+
+ # compare sequence
+ items = get_seq(obj)
+ seq_diff = False
+ if (items is not None) and (hasattr(items, '__len__')):
+ obj_seq = memo[obj_id][1]
+ if (len(items) != len(obj_seq)):
+ seq_diff = True
+ elif hasattr(obj, "items"): # dict type obj
+ obj_get = obj_seq.get
+ for key, item in items.items():
+ if id_(item) != obj_get(id_(key)) \
+ or chngd(key, seen, True, False) \
+ or chngd(item, seen, True, False):
+ seq_diff = True
+ break
+ else:
+ for i, j in zip(items, obj_seq): # list type obj
+ if id_(i) != j or chngd(i, seen, True, False):
+ seq_diff = True
+ break
+ seen[obj_id] = changed, seq_diff
+ if simple:
+ return changed or seq_diff
+ return changed, seq_diff
+
+
+def has_changed(*args, **kwds):
+ kwds['simple'] = True # ignore simple if passed in
+ return whats_changed(*args, **kwds)
+
+__import__ = __import__
+
+
+def _imp(*args, **kwds):
+ """
+ Replaces the default __import__, to allow a module to be memorised
+ before the user can change it
+ """
+ before = set(sys.modules.keys())
+ mod = __import__(*args, **kwds)
+ after = set(sys.modules.keys()).difference(before)
+ for m in after:
+ memorise(sys.modules[m])
+ return mod
+
+builtins.__import__ = _imp
+if hasattr(builtins, "_"):
+ del builtins._
+
+# memorise all already imported modules. This implies that this must be
+# imported first for any changes to be recorded
+for mod in list(sys.modules.values()):
+ memorise(mod)
+release_gone()
diff --git a/llmeval-env/lib/python3.10/site-packages/dill/__info__.py b/llmeval-env/lib/python3.10/site-packages/dill/__info__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a4592c4eff9961d8b9a6c14af7f1cfa45bc4d9ab
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/dill/__info__.py
@@ -0,0 +1,291 @@
+#!/usr/bin/env python
+#
+# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
+# Copyright (c) 2024 The Uncertainty Quantification Foundation.
+# License: 3-clause BSD. The full license text is available at:
+# - https://github.com/uqfoundation/dill/blob/master/LICENSE
+'''
+-----------------------------
+dill: serialize all of Python
+-----------------------------
+
+About Dill
+==========
+
+``dill`` extends Python's ``pickle`` module for serializing and de-serializing
+Python objects to the majority of the built-in Python types. Serialization
+is the process of converting an object to a byte stream, and the inverse
+of which is converting a byte stream back to a Python object hierarchy.
+
+``dill`` provides the user the same interface as the ``pickle`` module, and
+also includes some additional features. In addition to pickling Python
+objects, ``dill`` provides the ability to save the state of an interpreter
+session in a single command. Hence, it would be feasible to save an
+interpreter session, close the interpreter, ship the pickled file to
+another computer, open a new interpreter, unpickle the session and
+thus continue from the 'saved' state of the original interpreter
+session.
+
+``dill`` can be used to store Python objects to a file, but the primary
+usage is to send Python objects across the network as a byte stream.
+``dill`` is quite flexible, and allows arbitrary user defined classes
+and functions to be serialized. Thus ``dill`` is not intended to be
+secure against erroneously or maliciously constructed data. It is
+left to the user to decide whether the data they unpickle is from
+a trustworthy source.
+
+``dill`` is part of ``pathos``, a Python framework for heterogeneous computing.
+``dill`` is in active development, so any user feedback, bug reports, comments,
+or suggestions are highly appreciated. A list of issues is located at
+https://github.com/uqfoundation/dill/issues, with a legacy list maintained at
+https://uqfoundation.github.io/project/pathos/query.
+
+
+Major Features
+==============
+
+``dill`` can pickle the following standard types:
+
+ - none, type, bool, int, float, complex, bytes, str,
+ - tuple, list, dict, file, buffer, builtin,
+ - Python classes, namedtuples, dataclasses, metaclasses,
+ - instances of classes,
+ - set, frozenset, array, functions, exceptions
+
+``dill`` can also pickle more 'exotic' standard types:
+
+ - functions with yields, nested functions, lambdas,
+ - cell, method, unboundmethod, module, code, methodwrapper,
+ - methoddescriptor, getsetdescriptor, memberdescriptor, wrapperdescriptor,
+ - dictproxy, slice, notimplemented, ellipsis, quit
+
+``dill`` cannot yet pickle these standard types:
+
+ - frame, generator, traceback
+
+``dill`` also provides the capability to:
+
+ - save and load Python interpreter sessions
+ - save and extract the source code from functions and classes
+ - interactively diagnose pickling errors
+
+
+Current Release
+===============
+
+The latest released version of ``dill`` is available from:
+
+ https://pypi.org/project/dill
+
+``dill`` is distributed under a 3-clause BSD license.
+
+
+Development Version
+===================
+
+You can get the latest development version with all the shiny new features at:
+
+ https://github.com/uqfoundation
+
+If you have a new contribution, please submit a pull request.
+
+
+Installation
+============
+
+``dill`` can be installed with ``pip``::
+
+ $ pip install dill
+
+To optionally include the ``objgraph`` diagnostic tool in the install::
+
+ $ pip install dill[graph]
+
+To optionally include the ``gprof2dot`` diagnostic tool in the install::
+
+ $ pip install dill[profile]
+
+For windows users, to optionally install session history tools::
+
+ $ pip install dill[readline]
+
+
+Requirements
+============
+
+``dill`` requires:
+
+ - ``python`` (or ``pypy``), **>=3.8**
+ - ``setuptools``, **>=42**
+
+Optional requirements:
+
+ - ``objgraph``, **>=1.7.2**
+ - ``gprof2dot``, **>=2022.7.29**
+ - ``pyreadline``, **>=1.7.1** (on windows)
+
+
+Basic Usage
+===========
+
+``dill`` is a drop-in replacement for ``pickle``. Existing code can be
+updated to allow complete pickling using::
+
+ >>> import dill as pickle
+
+or::
+
+ >>> from dill import dumps, loads
+
+``dumps`` converts the object to a unique byte string, and ``loads`` performs
+the inverse operation::
+
+ >>> squared = lambda x: x**2
+ >>> loads(dumps(squared))(3)
+ 9
+
+There are a number of options to control serialization which are provided
+as keyword arguments to several ``dill`` functions:
+
+* with *protocol*, the pickle protocol level can be set. This uses the
+ same value as the ``pickle`` module, *DEFAULT_PROTOCOL*.
+* with *byref=True*, ``dill`` to behave a lot more like pickle with
+ certain objects (like modules) pickled by reference as opposed to
+ attempting to pickle the object itself.
+* with *recurse=True*, objects referred to in the global dictionary are
+ recursively traced and pickled, instead of the default behavior of
+ attempting to store the entire global dictionary.
+* with *fmode*, the contents of the file can be pickled along with the file
+ handle, which is useful if the object is being sent over the wire to a
+ remote system which does not have the original file on disk. Options are
+ *HANDLE_FMODE* for just the handle, *CONTENTS_FMODE* for the file content
+ and *FILE_FMODE* for content and handle.
+* with *ignore=False*, objects reconstructed with types defined in the
+ top-level script environment use the existing type in the environment
+ rather than a possibly different reconstructed type.
+
+The default serialization can also be set globally in *dill.settings*.
+Thus, we can modify how ``dill`` handles references to the global dictionary
+locally or globally::
+
+ >>> import dill.settings
+ >>> dumps(absolute) == dumps(absolute, recurse=True)
+ False
+ >>> dill.settings['recurse'] = True
+ >>> dumps(absolute) == dumps(absolute, recurse=True)
+ True
+
+``dill`` also includes source code inspection, as an alternate to pickling::
+
+ >>> import dill.source
+ >>> print(dill.source.getsource(squared))
+ squared = lambda x:x**2
+
+To aid in debugging pickling issues, use *dill.detect* which provides
+tools like pickle tracing::
+
+ >>> import dill.detect
+ >>> with dill.detect.trace():
+ >>> dumps(squared)
+ ┬ F1: at 0x7fe074f8c280>
+ ├┬ F2:
+ │└ # F2 [34 B]
+ ├┬ Co: at 0x7fe07501eb30, file "", line 1>
+ │├┬ F2:
+ ││└ # F2 [19 B]
+ │└ # Co [87 B]
+ ├┬ D1:
+ │└ # D1 [22 B]
+ ├┬ D2:
+ │└ # D2 [2 B]
+ ├┬ D2:
+ │├┬ D2:
+ ││└ # D2 [2 B]
+ │└ # D2 [23 B]
+ └ # F1 [180 B]
+
+With trace, we see how ``dill`` stored the lambda (``F1``) by first storing
+``_create_function``, the underlying code object (``Co``) and ``_create_code``
+(which is used to handle code objects), then we handle the reference to
+the global dict (``D2``) plus other dictionaries (``D1`` and ``D2``) that
+save the lambda object's state. A ``#`` marks when the object is actually stored.
+
+
+More Information
+================
+
+Probably the best way to get started is to look at the documentation at
+http://dill.rtfd.io. Also see ``dill.tests`` for a set of scripts that
+demonstrate how ``dill`` can serialize different Python objects. You can
+run the test suite with ``python -m dill.tests``. The contents of any
+pickle file can be examined with ``undill``. As ``dill`` conforms to
+the ``pickle`` interface, the examples and documentation found at
+http://docs.python.org/library/pickle.html also apply to ``dill``
+if one will ``import dill as pickle``. The source code is also generally
+well documented, so further questions may be resolved by inspecting the
+code itself. Please feel free to submit a ticket on github, or ask a
+question on stackoverflow (**@Mike McKerns**).
+If you would like to share how you use ``dill`` in your work, please send
+an email (to **mmckerns at uqfoundation dot org**).
+
+
+Citation
+========
+
+If you use ``dill`` to do research that leads to publication, we ask that you
+acknowledge use of ``dill`` by citing the following in your publication::
+
+ M.M. McKerns, L. Strand, T. Sullivan, A. Fang, M.A.G. Aivazis,
+ "Building a framework for predictive science", Proceedings of
+ the 10th Python in Science Conference, 2011;
+ http://arxiv.org/pdf/1202.1056
+
+ Michael McKerns and Michael Aivazis,
+ "pathos: a framework for heterogeneous computing", 2010- ;
+ https://uqfoundation.github.io/project/pathos
+
+Please see https://uqfoundation.github.io/project/pathos or
+http://arxiv.org/pdf/1202.1056 for further information.
+
+'''
+
+__version__ = '0.3.8'
+__author__ = 'Mike McKerns'
+
+__license__ = '''
+Copyright (c) 2004-2016 California Institute of Technology.
+Copyright (c) 2016-2024 The Uncertainty Quantification Foundation.
+All rights reserved.
+
+This software is available subject to the conditions and terms laid
+out below. By downloading and using this software you are agreeing
+to the following conditions.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+ - Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ - Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ - Neither the names of the copyright holders nor the names of any of
+ the contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+'''
diff --git a/llmeval-env/lib/python3.10/site-packages/dill/__init__.py b/llmeval-env/lib/python3.10/site-packages/dill/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..549048a438d42547473768f914d13dba3a4a7b71
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/dill/__init__.py
@@ -0,0 +1,119 @@
+#!/usr/bin/env python
+#
+# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
+# Copyright (c) 2008-2016 California Institute of Technology.
+# Copyright (c) 2016-2024 The Uncertainty Quantification Foundation.
+# License: 3-clause BSD. The full license text is available at:
+# - https://github.com/uqfoundation/dill/blob/master/LICENSE
+
+# author, version, license, and long description
+try: # the package is installed
+ from .__info__ import __version__, __author__, __doc__, __license__
+except: # pragma: no cover
+ import os
+ import sys
+ parent = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))
+ sys.path.append(parent)
+ # get distribution meta info
+ from version import (__version__, __author__,
+ get_license_text, get_readme_as_rst)
+ __license__ = get_license_text(os.path.join(parent, 'LICENSE'))
+ __license__ = "\n%s" % __license__
+ __doc__ = get_readme_as_rst(os.path.join(parent, 'README.md'))
+ del os, sys, parent, get_license_text, get_readme_as_rst
+
+
+from ._dill import (
+ dump, dumps, load, loads, copy,
+ Pickler, Unpickler, register, pickle, pickles, check,
+ DEFAULT_PROTOCOL, HIGHEST_PROTOCOL, HANDLE_FMODE, CONTENTS_FMODE, FILE_FMODE,
+ PickleError, PickleWarning, PicklingError, PicklingWarning, UnpicklingError,
+ UnpicklingWarning,
+)
+from .session import (
+ dump_module, load_module, load_module_asdict,
+ dump_session, load_session # backward compatibility
+)
+from . import detect, logger, session, source, temp
+
+# get global settings
+from .settings import settings
+
+# make sure "trace" is turned off
+logger.trace(False)
+
+objects = {}
+# local import of dill._objects
+#from . import _objects
+#objects.update(_objects.succeeds)
+#del _objects
+
+# local import of dill.objtypes
+from . import objtypes as types
+
+def load_types(pickleable=True, unpickleable=True):
+ """load pickleable and/or unpickleable types to ``dill.types``
+
+ ``dill.types`` is meant to mimic the ``types`` module, providing a
+ registry of object types. By default, the module is empty (for import
+ speed purposes). Use the ``load_types`` function to load selected object
+ types to the ``dill.types`` module.
+
+ Args:
+ pickleable (bool, default=True): if True, load pickleable types.
+ unpickleable (bool, default=True): if True, load unpickleable types.
+
+ Returns:
+ None
+ """
+ from importlib import reload
+ # local import of dill.objects
+ from . import _objects
+ if pickleable:
+ objects.update(_objects.succeeds)
+ else:
+ [objects.pop(obj,None) for obj in _objects.succeeds]
+ if unpickleable:
+ objects.update(_objects.failures)
+ else:
+ [objects.pop(obj,None) for obj in _objects.failures]
+ objects.update(_objects.registered)
+ del _objects
+ # reset contents of types to 'empty'
+ [types.__dict__.pop(obj) for obj in list(types.__dict__.keys()) \
+ if obj.find('Type') != -1]
+ # add corresponding types from objects to types
+ reload(types)
+
+def extend(use_dill=True):
+ '''add (or remove) dill types to/from the pickle registry
+
+ by default, ``dill`` populates its types to ``pickle.Pickler.dispatch``.
+ Thus, all ``dill`` types are available upon calling ``'import pickle'``.
+ To drop all ``dill`` types from the ``pickle`` dispatch, *use_dill=False*.
+
+ Args:
+ use_dill (bool, default=True): if True, extend the dispatch table.
+
+ Returns:
+ None
+ '''
+ from ._dill import _revert_extension, _extend
+ if use_dill: _extend()
+ else: _revert_extension()
+ return
+
+extend()
+
+
+def license():
+ """print license"""
+ print (__license__)
+ return
+
+def citation():
+ """print citation"""
+ print (__doc__[-491:-118])
+ return
+
+# end of file
diff --git a/llmeval-env/lib/python3.10/site-packages/dill/_dill.py b/llmeval-env/lib/python3.10/site-packages/dill/_dill.py
new file mode 100644
index 0000000000000000000000000000000000000000..d42432ff3703f3f49fd760340b77bf84ce3e3a95
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/dill/_dill.py
@@ -0,0 +1,2198 @@
+# -*- coding: utf-8 -*-
+#
+# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
+# Copyright (c) 2008-2015 California Institute of Technology.
+# Copyright (c) 2016-2024 The Uncertainty Quantification Foundation.
+# License: 3-clause BSD. The full license text is available at:
+# - https://github.com/uqfoundation/dill/blob/master/LICENSE
+"""
+dill: a utility for serialization of python objects
+
+The primary functions in `dill` are :func:`dump` and
+:func:`dumps` for serialization ("pickling") to a
+file or to a string, respectively, and :func:`load`
+and :func:`loads` for deserialization ("unpickling"),
+similarly, from a file or from a string. Other notable
+functions are :func:`~dill.dump_module` and
+:func:`~dill.load_module`, which are used to save and
+restore module objects, including an intepreter session.
+
+Based on code written by Oren Tirosh and Armin Ronacher.
+Extended to a (near) full set of the builtin types (in types module),
+and coded to the pickle interface, by .
+Initial port to python3 by Jonathan Dobson, continued by mmckerns.
+Tested against "all" python types (Std. Lib. CH 1-15 @ 2.7) by mmckerns.
+Tested against CH16+ Std. Lib. ... TBD.
+"""
+
+from __future__ import annotations
+
+__all__ = [
+ 'dump','dumps','load','loads','copy',
+ 'Pickler','Unpickler','register','pickle','pickles','check',
+ 'DEFAULT_PROTOCOL','HIGHEST_PROTOCOL','HANDLE_FMODE','CONTENTS_FMODE','FILE_FMODE',
+ 'PickleError','PickleWarning','PicklingError','PicklingWarning','UnpicklingError',
+ 'UnpicklingWarning',
+]
+
+__module__ = 'dill'
+
+import warnings
+from .logger import adapter as logger
+from .logger import trace as _trace
+log = logger # backward compatibility (see issue #582)
+
+import os
+import sys
+diff = None
+_use_diff = False
+OLD38 = (sys.hexversion < 0x3080000)
+OLD39 = (sys.hexversion < 0x3090000)
+OLD310 = (sys.hexversion < 0x30a0000)
+OLD312a7 = (sys.hexversion < 0x30c00a7)
+#XXX: get types from .objtypes ?
+import builtins as __builtin__
+from pickle import _Pickler as StockPickler, Unpickler as StockUnpickler
+from pickle import GLOBAL, POP
+from _thread import LockType
+from _thread import RLock as RLockType
+#from io import IOBase
+from types import CodeType, FunctionType, MethodType, GeneratorType, \
+ TracebackType, FrameType, ModuleType, BuiltinMethodType
+BufferType = memoryview #XXX: unregistered
+ClassType = type # no 'old-style' classes
+EllipsisType = type(Ellipsis)
+#FileType = IOBase
+NotImplementedType = type(NotImplemented)
+SliceType = slice
+TypeType = type # 'new-style' classes #XXX: unregistered
+XRangeType = range
+from types import MappingProxyType as DictProxyType, new_class
+from pickle import DEFAULT_PROTOCOL, HIGHEST_PROTOCOL, PickleError, PicklingError, UnpicklingError
+import __main__ as _main_module
+import marshal
+import gc
+# import zlib
+import abc
+import dataclasses
+from weakref import ReferenceType, ProxyType, CallableProxyType
+from collections import OrderedDict
+from enum import Enum, EnumMeta
+from functools import partial
+from operator import itemgetter, attrgetter
+GENERATOR_FAIL = False
+import importlib.machinery
+EXTENSION_SUFFIXES = tuple(importlib.machinery.EXTENSION_SUFFIXES)
+try:
+ import ctypes
+ HAS_CTYPES = True
+ # if using `pypy`, pythonapi is not found
+ IS_PYPY = not hasattr(ctypes, 'pythonapi')
+except ImportError:
+ HAS_CTYPES = False
+ IS_PYPY = False
+NumpyUfuncType = None
+NumpyDType = None
+NumpyArrayType = None
+try:
+ if not importlib.machinery.PathFinder().find_spec('numpy'):
+ raise ImportError("No module named 'numpy'")
+ NumpyUfuncType = True
+ NumpyDType = True
+ NumpyArrayType = True
+except ImportError:
+ pass
+def __hook__():
+ global NumpyArrayType, NumpyDType, NumpyUfuncType
+ from numpy import ufunc as NumpyUfuncType
+ from numpy import ndarray as NumpyArrayType
+ from numpy import dtype as NumpyDType
+ return True
+if NumpyArrayType: # then has numpy
+ def ndarraysubclassinstance(obj_type):
+ if all((c.__module__, c.__name__) != ('numpy', 'ndarray') for c in obj_type.__mro__):
+ return False
+ # anything below here is a numpy array (or subclass) instance
+ __hook__() # import numpy (so the following works!!!)
+ # verify that __reduce__ has not been overridden
+ if obj_type.__reduce_ex__ is not NumpyArrayType.__reduce_ex__ \
+ or obj_type.__reduce__ is not NumpyArrayType.__reduce__:
+ return False
+ return True
+ def numpyufunc(obj_type):
+ return any((c.__module__, c.__name__) == ('numpy', 'ufunc') for c in obj_type.__mro__)
+ def numpydtype(obj_type):
+ if all((c.__module__, c.__name__) != ('numpy', 'dtype') for c in obj_type.__mro__):
+ return False
+ # anything below here is a numpy dtype
+ __hook__() # import numpy (so the following works!!!)
+ return obj_type is type(NumpyDType) # handles subclasses
+else:
+ def ndarraysubclassinstance(obj): return False
+ def numpyufunc(obj): return False
+ def numpydtype(obj): return False
+
+from types import GetSetDescriptorType, ClassMethodDescriptorType, \
+ WrapperDescriptorType, MethodDescriptorType, MemberDescriptorType, \
+ MethodWrapperType #XXX: unused
+
+# make sure to add these 'hand-built' types to _typemap
+CellType = type((lambda x: lambda y: x)(0).__closure__[0])
+PartialType = type(partial(int, base=2))
+SuperType = type(super(Exception, TypeError()))
+ItemGetterType = type(itemgetter(0))
+AttrGetterType = type(attrgetter('__repr__'))
+
+try:
+ from functools import _lru_cache_wrapper as LRUCacheType
+except ImportError:
+ LRUCacheType = None
+
+if not isinstance(LRUCacheType, type):
+ LRUCacheType = None
+
+def get_file_type(*args, **kwargs):
+ open = kwargs.pop("open", __builtin__.open)
+ f = open(os.devnull, *args, **kwargs)
+ t = type(f)
+ f.close()
+ return t
+
+IS_PYODIDE = sys.platform == 'emscripten'
+
+FileType = get_file_type('rb', buffering=0)
+TextWrapperType = get_file_type('r', buffering=-1)
+BufferedRandomType = None if IS_PYODIDE else get_file_type('r+b', buffering=-1)
+BufferedReaderType = get_file_type('rb', buffering=-1)
+BufferedWriterType = get_file_type('wb', buffering=-1)
+try:
+ from _pyio import open as _open
+ PyTextWrapperType = get_file_type('r', buffering=-1, open=_open)
+ PyBufferedRandomType = None if IS_PYODIDE else get_file_type('r+b', buffering=-1, open=_open)
+ PyBufferedReaderType = get_file_type('rb', buffering=-1, open=_open)
+ PyBufferedWriterType = get_file_type('wb', buffering=-1, open=_open)
+except ImportError:
+ PyTextWrapperType = PyBufferedRandomType = PyBufferedReaderType = PyBufferedWriterType = None
+from io import BytesIO as StringIO
+InputType = OutputType = None
+from socket import socket as SocketType
+#FIXME: additionally calls ForkingPickler.register several times
+from multiprocessing.reduction import _reduce_socket as reduce_socket
+try: #pragma: no cover
+ IS_IPYTHON = __IPYTHON__ # is True
+ ExitType = None # IPython.core.autocall.ExitAutocall
+ IPYTHON_SINGLETONS = ('exit', 'quit', 'get_ipython')
+except NameError:
+ IS_IPYTHON = False
+ try: ExitType = type(exit) # apparently 'exit' can be removed
+ except NameError: ExitType = None
+ IPYTHON_SINGLETONS = ()
+
+import inspect
+import typing
+
+
+### Shims for different versions of Python and dill
+class Sentinel(object):
+ """
+ Create a unique sentinel object that is pickled as a constant.
+ """
+ def __init__(self, name, module_name=None):
+ self.name = name
+ if module_name is None:
+ # Use the calling frame's module
+ self.__module__ = inspect.currentframe().f_back.f_globals['__name__']
+ else:
+ self.__module__ = module_name # pragma: no cover
+ def __repr__(self):
+ return self.__module__ + '.' + self.name # pragma: no cover
+ def __copy__(self):
+ return self # pragma: no cover
+ def __deepcopy__(self, memo):
+ return self # pragma: no cover
+ def __reduce__(self):
+ return self.name
+ def __reduce_ex__(self, protocol):
+ return self.name
+
+from . import _shims
+from ._shims import Reduce, Getattr
+
+### File modes
+#: Pickles the file handle, preserving mode. The position of the unpickled
+#: object is as for a new file handle.
+HANDLE_FMODE = 0
+#: Pickles the file contents, creating a new file if on load the file does
+#: not exist. The position = min(pickled position, EOF) and mode is chosen
+#: as such that "best" preserves behavior of the original file.
+CONTENTS_FMODE = 1
+#: Pickles the entire file (handle and contents), preserving mode and position.
+FILE_FMODE = 2
+
+### Shorthands (modified from python2.5/lib/pickle.py)
+def copy(obj, *args, **kwds):
+ """
+ Use pickling to 'copy' an object (i.e. `loads(dumps(obj))`).
+
+ See :func:`dumps` and :func:`loads` for keyword arguments.
+ """
+ ignore = kwds.pop('ignore', Unpickler.settings['ignore'])
+ return loads(dumps(obj, *args, **kwds), ignore=ignore)
+
+def dump(obj, file, protocol=None, byref=None, fmode=None, recurse=None, **kwds):#, strictio=None):
+ """
+ Pickle an object to a file.
+
+ See :func:`dumps` for keyword arguments.
+ """
+ from .settings import settings
+ protocol = settings['protocol'] if protocol is None else int(protocol)
+ _kwds = kwds.copy()
+ _kwds.update(dict(byref=byref, fmode=fmode, recurse=recurse))
+ Pickler(file, protocol, **_kwds).dump(obj)
+ return
+
+def dumps(obj, protocol=None, byref=None, fmode=None, recurse=None, **kwds):#, strictio=None):
+ """
+ Pickle an object to a string.
+
+ *protocol* is the pickler protocol, as defined for Python *pickle*.
+
+ If *byref=True*, then dill behaves a lot more like pickle as certain
+ objects (like modules) are pickled by reference as opposed to attempting
+ to pickle the object itself.
+
+ If *recurse=True*, then objects referred to in the global dictionary
+ are recursively traced and pickled, instead of the default behavior
+ of attempting to store the entire global dictionary. This is needed for
+ functions defined via *exec()*.
+
+ *fmode* (:const:`HANDLE_FMODE`, :const:`CONTENTS_FMODE`,
+ or :const:`FILE_FMODE`) indicates how file handles will be pickled.
+ For example, when pickling a data file handle for transfer to a remote
+ compute service, *FILE_FMODE* will include the file contents in the
+ pickle and cursor position so that a remote method can operate
+ transparently on an object with an open file handle.
+
+ Default values for keyword arguments can be set in :mod:`dill.settings`.
+ """
+ file = StringIO()
+ dump(obj, file, protocol, byref, fmode, recurse, **kwds)#, strictio)
+ return file.getvalue()
+
+def load(file, ignore=None, **kwds):
+ """
+ Unpickle an object from a file.
+
+ See :func:`loads` for keyword arguments.
+ """
+ return Unpickler(file, ignore=ignore, **kwds).load()
+
+def loads(str, ignore=None, **kwds):
+ """
+ Unpickle an object from a string.
+
+ If *ignore=False* then objects whose class is defined in the module
+ *__main__* are updated to reference the existing class in *__main__*,
+ otherwise they are left to refer to the reconstructed type, which may
+ be different.
+
+ Default values for keyword arguments can be set in :mod:`dill.settings`.
+ """
+ file = StringIO(str)
+ return load(file, ignore, **kwds)
+
+# def dumpzs(obj, protocol=None):
+# """pickle an object to a compressed string"""
+# return zlib.compress(dumps(obj, protocol))
+
+# def loadzs(str):
+# """unpickle an object from a compressed string"""
+# return loads(zlib.decompress(str))
+
+### End: Shorthands ###
+
+class MetaCatchingDict(dict):
+ def get(self, key, default=None):
+ try:
+ return self[key]
+ except KeyError:
+ return default
+
+ def __missing__(self, key):
+ if issubclass(key, type):
+ return save_type
+ else:
+ raise KeyError()
+
+class PickleWarning(Warning, PickleError):
+ pass
+
+class PicklingWarning(PickleWarning, PicklingError):
+ pass
+
+class UnpicklingWarning(PickleWarning, UnpicklingError):
+ pass
+
+### Extend the Picklers
+class Pickler(StockPickler):
+ """python's Pickler extended to interpreter sessions"""
+ dispatch: typing.Dict[type, typing.Callable[[Pickler, typing.Any], None]] \
+ = MetaCatchingDict(StockPickler.dispatch.copy())
+ """The dispatch table, a dictionary of serializing functions used
+ by Pickler to save objects of specific types. Use :func:`pickle`
+ or :func:`register` to associate types to custom functions.
+
+ :meta hide-value:
+ """
+ _session = False
+ from .settings import settings
+
+ def __init__(self, file, *args, **kwds):
+ settings = Pickler.settings
+ _byref = kwds.pop('byref', None)
+ #_strictio = kwds.pop('strictio', None)
+ _fmode = kwds.pop('fmode', None)
+ _recurse = kwds.pop('recurse', None)
+ StockPickler.__init__(self, file, *args, **kwds)
+ self._main = _main_module
+ self._diff_cache = {}
+ self._byref = settings['byref'] if _byref is None else _byref
+ self._strictio = False #_strictio
+ self._fmode = settings['fmode'] if _fmode is None else _fmode
+ self._recurse = settings['recurse'] if _recurse is None else _recurse
+ self._postproc = OrderedDict()
+ self._file = file
+
+ def save(self, obj, save_persistent_id=True):
+ # numpy hack
+ obj_type = type(obj)
+ if NumpyArrayType and not (obj_type is type or obj_type in Pickler.dispatch):
+ # register if the object is a numpy ufunc
+ # thanks to Paul Kienzle for pointing out ufuncs didn't pickle
+ if numpyufunc(obj_type):
+ @register(obj_type)
+ def save_numpy_ufunc(pickler, obj):
+ logger.trace(pickler, "Nu: %s", obj)
+ name = getattr(obj, '__qualname__', getattr(obj, '__name__', None))
+ StockPickler.save_global(pickler, obj, name=name)
+ logger.trace(pickler, "# Nu")
+ return
+ # NOTE: the above 'save' performs like:
+ # import copy_reg
+ # def udump(f): return f.__name__
+ # def uload(name): return getattr(numpy, name)
+ # copy_reg.pickle(NumpyUfuncType, udump, uload)
+ # register if the object is a numpy dtype
+ if numpydtype(obj_type):
+ @register(obj_type)
+ def save_numpy_dtype(pickler, obj):
+ logger.trace(pickler, "Dt: %s", obj)
+ pickler.save_reduce(_create_dtypemeta, (obj.type,), obj=obj)
+ logger.trace(pickler, "# Dt")
+ return
+ # NOTE: the above 'save' performs like:
+ # import copy_reg
+ # def uload(name): return type(NumpyDType(name))
+ # def udump(f): return uload, (f.type,)
+ # copy_reg.pickle(NumpyDTypeType, udump, uload)
+ # register if the object is a subclassed numpy array instance
+ if ndarraysubclassinstance(obj_type):
+ @register(obj_type)
+ def save_numpy_array(pickler, obj):
+ logger.trace(pickler, "Nu: (%s, %s)", obj.shape, obj.dtype)
+ npdict = getattr(obj, '__dict__', None)
+ f, args, state = obj.__reduce__()
+ pickler.save_reduce(_create_array, (f,args,state,npdict), obj=obj)
+ logger.trace(pickler, "# Nu")
+ return
+ # end numpy hack
+
+ if GENERATOR_FAIL and obj_type is GeneratorType:
+ msg = "Can't pickle %s: attribute lookup builtins.generator failed" % GeneratorType
+ raise PicklingError(msg)
+ StockPickler.save(self, obj, save_persistent_id)
+
+ save.__doc__ = StockPickler.save.__doc__
+
+ def dump(self, obj): #NOTE: if settings change, need to update attributes
+ logger.trace_setup(self)
+ StockPickler.dump(self, obj)
+ dump.__doc__ = StockPickler.dump.__doc__
+
+class Unpickler(StockUnpickler):
+ """python's Unpickler extended to interpreter sessions and more types"""
+ from .settings import settings
+ _session = False
+
+ def find_class(self, module, name):
+ if (module, name) == ('__builtin__', '__main__'):
+ return self._main.__dict__ #XXX: above set w/save_module_dict
+ elif (module, name) == ('__builtin__', 'NoneType'):
+ return type(None) #XXX: special case: NoneType missing
+ if module == 'dill.dill': module = 'dill._dill'
+ return StockUnpickler.find_class(self, module, name)
+
+ def __init__(self, *args, **kwds):
+ settings = Pickler.settings
+ _ignore = kwds.pop('ignore', None)
+ StockUnpickler.__init__(self, *args, **kwds)
+ self._main = _main_module
+ self._ignore = settings['ignore'] if _ignore is None else _ignore
+
+ def load(self): #NOTE: if settings change, need to update attributes
+ obj = StockUnpickler.load(self)
+ if type(obj).__module__ == getattr(_main_module, '__name__', '__main__'):
+ if not self._ignore:
+ # point obj class to main
+ try: obj.__class__ = getattr(self._main, type(obj).__name__)
+ except (AttributeError,TypeError): pass # defined in a file
+ #_main_module.__dict__.update(obj.__dict__) #XXX: should update globals ?
+ return obj
+ load.__doc__ = StockUnpickler.load.__doc__
+ pass
+
+'''
+def dispatch_table():
+ """get the dispatch table of registered types"""
+ return Pickler.dispatch
+'''
+
+pickle_dispatch_copy = StockPickler.dispatch.copy()
+
+def pickle(t, func):
+ """expose :attr:`~Pickler.dispatch` table for user-created extensions"""
+ Pickler.dispatch[t] = func
+ return
+
+def register(t):
+ """decorator to register types to Pickler's :attr:`~Pickler.dispatch` table"""
+ def proxy(func):
+ Pickler.dispatch[t] = func
+ return func
+ return proxy
+
+def _revert_extension():
+ """drop dill-registered types from pickle's dispatch table"""
+ for type, func in list(StockPickler.dispatch.items()):
+ if func.__module__ == __name__:
+ del StockPickler.dispatch[type]
+ if type in pickle_dispatch_copy:
+ StockPickler.dispatch[type] = pickle_dispatch_copy[type]
+
+def use_diff(on=True):
+ """
+ Reduces size of pickles by only including object which have changed.
+
+ Decreases pickle size but increases CPU time needed.
+ Also helps avoid some unpickleable objects.
+ MUST be called at start of script, otherwise changes will not be recorded.
+ """
+ global _use_diff, diff
+ _use_diff = on
+ if _use_diff and diff is None:
+ try:
+ from . import diff as d
+ except ImportError:
+ import diff as d
+ diff = d
+
+def _create_typemap():
+ import types
+ d = dict(list(__builtin__.__dict__.items()) + \
+ list(types.__dict__.items())).items()
+ for key, value in d:
+ if getattr(value, '__module__', None) == 'builtins' \
+ and type(value) is type:
+ yield key, value
+ return
+_reverse_typemap = dict(_create_typemap())
+_reverse_typemap.update({
+ 'PartialType': PartialType,
+ 'SuperType': SuperType,
+ 'ItemGetterType': ItemGetterType,
+ 'AttrGetterType': AttrGetterType,
+})
+if sys.hexversion < 0x30800a2:
+ _reverse_typemap.update({
+ 'CellType': CellType,
+ })
+
+# "Incidental" implementation specific types. Unpickling these types in another
+# implementation of Python (PyPy -> CPython) is not guaranteed to work
+
+# This dictionary should contain all types that appear in Python implementations
+# but are not defined in https://docs.python.org/3/library/types.html#standard-interpreter-types
+x=OrderedDict()
+_incedental_reverse_typemap = {
+ 'FileType': FileType,
+ 'BufferedRandomType': BufferedRandomType,
+ 'BufferedReaderType': BufferedReaderType,
+ 'BufferedWriterType': BufferedWriterType,
+ 'TextWrapperType': TextWrapperType,
+ 'PyBufferedRandomType': PyBufferedRandomType,
+ 'PyBufferedReaderType': PyBufferedReaderType,
+ 'PyBufferedWriterType': PyBufferedWriterType,
+ 'PyTextWrapperType': PyTextWrapperType,
+}
+
+_incedental_reverse_typemap.update({
+ "DictKeysType": type({}.keys()),
+ "DictValuesType": type({}.values()),
+ "DictItemsType": type({}.items()),
+
+ "OdictKeysType": type(x.keys()),
+ "OdictValuesType": type(x.values()),
+ "OdictItemsType": type(x.items()),
+})
+
+if ExitType:
+ _incedental_reverse_typemap['ExitType'] = ExitType
+if InputType:
+ _incedental_reverse_typemap['InputType'] = InputType
+ _incedental_reverse_typemap['OutputType'] = OutputType
+
+'''
+try:
+ import symtable
+ _incedental_reverse_typemap["SymtableEntryType"] = type(symtable.symtable("", "string", "exec")._table)
+except: #FIXME: fails to pickle
+ pass
+
+if sys.hexversion >= 0x30a00a0:
+ _incedental_reverse_typemap['LineIteratorType'] = type(compile('3', '', 'eval').co_lines())
+'''
+
+if sys.hexversion >= 0x30b00b0:
+ from types import GenericAlias
+ _incedental_reverse_typemap["GenericAliasIteratorType"] = type(iter(GenericAlias(list, (int,))))
+ '''
+ _incedental_reverse_typemap['PositionsIteratorType'] = type(compile('3', '', 'eval').co_positions())
+ '''
+
+try:
+ import winreg
+ _incedental_reverse_typemap["HKEYType"] = winreg.HKEYType
+except ImportError:
+ pass
+
+_reverse_typemap.update(_incedental_reverse_typemap)
+_incedental_types = set(_incedental_reverse_typemap.values())
+
+del x
+
+_typemap = dict((v, k) for k, v in _reverse_typemap.items())
+
+def _unmarshal(string):
+ return marshal.loads(string)
+
+def _load_type(name):
+ return _reverse_typemap[name]
+
+def _create_type(typeobj, *args):
+ return typeobj(*args)
+
+def _create_function(fcode, fglobals, fname=None, fdefaults=None,
+ fclosure=None, fdict=None, fkwdefaults=None):
+ # same as FunctionType, but enable passing __dict__ to new function,
+ # __dict__ is the storehouse for attributes added after function creation
+ func = FunctionType(fcode, fglobals or dict(), fname, fdefaults, fclosure)
+ if fdict is not None:
+ func.__dict__.update(fdict) #XXX: better copy? option to copy?
+ if fkwdefaults is not None:
+ func.__kwdefaults__ = fkwdefaults
+ # 'recurse' only stores referenced modules/objects in fglobals,
+ # thus we need to make sure that we have __builtins__ as well
+ if "__builtins__" not in func.__globals__:
+ func.__globals__["__builtins__"] = globals()["__builtins__"]
+ # assert id(fglobals) == id(func.__globals__)
+ return func
+
+class match:
+ """
+ Make avaialable a limited structural pattern matching-like syntax for Python < 3.10
+
+ Patterns can be only tuples (without types) currently.
+ Inspired by the package pattern-matching-PEP634.
+
+ Usage:
+ >>> with match(args) as m:
+ >>> if m.case(('x', 'y')):
+ >>> # use m.x and m.y
+ >>> elif m.case(('x', 'y', 'z')):
+ >>> # use m.x, m.y and m.z
+
+ Equivalent native code for Python >= 3.10:
+ >>> match args:
+ >>> case (x, y):
+ >>> # use x and y
+ >>> case (x, y, z):
+ >>> # use x, y and z
+ """
+ def __init__(self, value):
+ self.value = value
+ self._fields = None
+ def __enter__(self):
+ return self
+ def __exit__(self, *exc_info):
+ return False
+ def case(self, args): # *args, **kwargs):
+ """just handles tuple patterns"""
+ if len(self.value) != len(args): # + len(kwargs):
+ return False
+ #if not all(isinstance(arg, pat) for arg, pat in zip(self.value[len(args):], kwargs.values())):
+ # return False
+ self.args = args # (*args, *kwargs)
+ return True
+ @property
+ def fields(self):
+ # Only bind names to values if necessary.
+ if self._fields is None:
+ self._fields = dict(zip(self.args, self.value))
+ return self._fields
+ def __getattr__(self, item):
+ return self.fields[item]
+
+ALL_CODE_PARAMS = [
+ # Version New attribute CodeType parameters
+ ((3,11,'a'), 'co_endlinetable', 'argcount posonlyargcount kwonlyargcount nlocals stacksize flags code consts names varnames filename name qualname firstlineno linetable endlinetable columntable exceptiontable freevars cellvars'),
+ ((3,11), 'co_exceptiontable', 'argcount posonlyargcount kwonlyargcount nlocals stacksize flags code consts names varnames filename name qualname firstlineno linetable exceptiontable freevars cellvars'),
+ ((3,10), 'co_linetable', 'argcount posonlyargcount kwonlyargcount nlocals stacksize flags code consts names varnames filename name firstlineno linetable freevars cellvars'),
+ ((3,8), 'co_posonlyargcount', 'argcount posonlyargcount kwonlyargcount nlocals stacksize flags code consts names varnames filename name firstlineno lnotab freevars cellvars'),
+ ((3,7), 'co_kwonlyargcount', 'argcount kwonlyargcount nlocals stacksize flags code consts names varnames filename name firstlineno lnotab freevars cellvars'),
+ ]
+for version, new_attr, params in ALL_CODE_PARAMS:
+ if hasattr(CodeType, new_attr):
+ CODE_VERSION = version
+ CODE_PARAMS = params.split()
+ break
+ENCODE_PARAMS = set(CODE_PARAMS).intersection(
+ ['code', 'lnotab', 'linetable', 'endlinetable', 'columntable', 'exceptiontable'])
+
+def _create_code(*args):
+ if not isinstance(args[0], int): # co_lnotab stored from >= 3.10
+ LNOTAB, *args = args
+ else: # from < 3.10 (or pre-LNOTAB storage)
+ LNOTAB = b''
+
+ with match(args) as m:
+ # Python 3.11/3.12a (18 members)
+ if m.case((
+ 'argcount', 'posonlyargcount', 'kwonlyargcount', 'nlocals', 'stacksize', 'flags', # args[0:6]
+ 'code', 'consts', 'names', 'varnames', 'filename', 'name', 'qualname', 'firstlineno', # args[6:14]
+ 'linetable', 'exceptiontable', 'freevars', 'cellvars' # args[14:]
+ )):
+ if CODE_VERSION == (3,11):
+ return CodeType(
+ *args[:6],
+ args[6].encode() if hasattr(args[6], 'encode') else args[6], # code
+ *args[7:14],
+ args[14].encode() if hasattr(args[14], 'encode') else args[14], # linetable
+ args[15].encode() if hasattr(args[15], 'encode') else args[15], # exceptiontable
+ args[16],
+ args[17],
+ )
+ fields = m.fields
+ # Python 3.10 or 3.8/3.9 (16 members)
+ elif m.case((
+ 'argcount', 'posonlyargcount', 'kwonlyargcount', 'nlocals', 'stacksize', 'flags', # args[0:6]
+ 'code', 'consts', 'names', 'varnames', 'filename', 'name', 'firstlineno', # args[6:13]
+ 'LNOTAB_OR_LINETABLE', 'freevars', 'cellvars' # args[13:]
+ )):
+ if CODE_VERSION == (3,10) or CODE_VERSION == (3,8):
+ return CodeType(
+ *args[:6],
+ args[6].encode() if hasattr(args[6], 'encode') else args[6], # code
+ *args[7:13],
+ args[13].encode() if hasattr(args[13], 'encode') else args[13], # lnotab/linetable
+ args[14],
+ args[15],
+ )
+ fields = m.fields
+ if CODE_VERSION >= (3,10):
+ fields['linetable'] = m.LNOTAB_OR_LINETABLE
+ else:
+ fields['lnotab'] = LNOTAB if LNOTAB else m.LNOTAB_OR_LINETABLE
+ # Python 3.7 (15 args)
+ elif m.case((
+ 'argcount', 'kwonlyargcount', 'nlocals', 'stacksize', 'flags', # args[0:5]
+ 'code', 'consts', 'names', 'varnames', 'filename', 'name', 'firstlineno', # args[5:12]
+ 'lnotab', 'freevars', 'cellvars' # args[12:]
+ )):
+ if CODE_VERSION == (3,7):
+ return CodeType(
+ *args[:5],
+ args[5].encode() if hasattr(args[5], 'encode') else args[5], # code
+ *args[6:12],
+ args[12].encode() if hasattr(args[12], 'encode') else args[12], # lnotab
+ args[13],
+ args[14],
+ )
+ fields = m.fields
+ # Python 3.11a (20 members)
+ elif m.case((
+ 'argcount', 'posonlyargcount', 'kwonlyargcount', 'nlocals', 'stacksize', 'flags', # args[0:6]
+ 'code', 'consts', 'names', 'varnames', 'filename', 'name', 'qualname', 'firstlineno', # args[6:14]
+ 'linetable', 'endlinetable', 'columntable', 'exceptiontable', 'freevars', 'cellvars' # args[14:]
+ )):
+ if CODE_VERSION == (3,11,'a'):
+ return CodeType(
+ *args[:6],
+ args[6].encode() if hasattr(args[6], 'encode') else args[6], # code
+ *args[7:14],
+ *(a.encode() if hasattr(a, 'encode') else a for a in args[14:18]), # linetable-exceptiontable
+ args[18],
+ args[19],
+ )
+ fields = m.fields
+ else:
+ raise UnpicklingError("pattern match for code object failed")
+
+ # The args format doesn't match this version.
+ fields.setdefault('posonlyargcount', 0) # from python <= 3.7
+ fields.setdefault('lnotab', LNOTAB) # from python >= 3.10
+ fields.setdefault('linetable', b'') # from python <= 3.9
+ fields.setdefault('qualname', fields['name']) # from python <= 3.10
+ fields.setdefault('exceptiontable', b'') # from python <= 3.10
+ fields.setdefault('endlinetable', None) # from python != 3.11a
+ fields.setdefault('columntable', None) # from python != 3.11a
+
+ args = (fields[k].encode() if k in ENCODE_PARAMS and hasattr(fields[k], 'encode') else fields[k]
+ for k in CODE_PARAMS)
+ return CodeType(*args)
+
+def _create_ftype(ftypeobj, func, args, kwds):
+ if kwds is None:
+ kwds = {}
+ if args is None:
+ args = ()
+ return ftypeobj(func, *args, **kwds)
+
+def _create_typing_tuple(argz, *args): #NOTE: workaround python/cpython#94245
+ if not argz:
+ return typing.Tuple[()].copy_with(())
+ if argz == ((),):
+ return typing.Tuple[()]
+ return typing.Tuple[argz]
+
+def _create_lock(locked, *args): #XXX: ignores 'blocking'
+ from threading import Lock
+ lock = Lock()
+ if locked:
+ if not lock.acquire(False):
+ raise UnpicklingError("Cannot acquire lock")
+ return lock
+
+def _create_rlock(count, owner, *args): #XXX: ignores 'blocking'
+ lock = RLockType()
+ if owner is not None:
+ lock._acquire_restore((count, owner))
+ if owner and not lock._is_owned():
+ raise UnpicklingError("Cannot acquire lock")
+ return lock
+
+# thanks to matsjoyce for adding all the different file modes
+def _create_filehandle(name, mode, position, closed, open, strictio, fmode, fdata): # buffering=0
+ # only pickles the handle, not the file contents... good? or StringIO(data)?
+ # (for file contents see: http://effbot.org/librarybook/copy-reg.htm)
+ # NOTE: handle special cases first (are there more special cases?)
+ names = {'':sys.__stdin__, '':sys.__stdout__,
+ '':sys.__stderr__} #XXX: better fileno=(0,1,2) ?
+ if name in list(names.keys()):
+ f = names[name] #XXX: safer "f=sys.stdin"
+ elif name == '':
+ f = os.tmpfile()
+ elif name == '':
+ import tempfile
+ f = tempfile.TemporaryFile(mode)
+ else:
+ try:
+ exists = os.path.exists(name)
+ except Exception:
+ exists = False
+ if not exists:
+ if strictio:
+ raise FileNotFoundError("[Errno 2] No such file or directory: '%s'" % name)
+ elif "r" in mode and fmode != FILE_FMODE:
+ name = '' # or os.devnull?
+ current_size = 0 # or maintain position?
+ else:
+ current_size = os.path.getsize(name)
+
+ if position > current_size:
+ if strictio:
+ raise ValueError("invalid buffer size")
+ elif fmode == CONTENTS_FMODE:
+ position = current_size
+ # try to open the file by name
+ # NOTE: has different fileno
+ try:
+ #FIXME: missing: *buffering*, encoding, softspace
+ if fmode == FILE_FMODE:
+ f = open(name, mode if "w" in mode else "w")
+ f.write(fdata)
+ if "w" not in mode:
+ f.close()
+ f = open(name, mode)
+ elif name == '': # file did not exist
+ import tempfile
+ f = tempfile.TemporaryFile(mode)
+ # treat x mode as w mode
+ elif fmode == CONTENTS_FMODE \
+ and ("w" in mode or "x" in mode):
+ # stop truncation when opening
+ flags = os.O_CREAT
+ if "+" in mode:
+ flags |= os.O_RDWR
+ else:
+ flags |= os.O_WRONLY
+ f = os.fdopen(os.open(name, flags), mode)
+ # set name to the correct value
+ r = getattr(f, "buffer", f)
+ r = getattr(r, "raw", r)
+ r.name = name
+ assert f.name == name
+ else:
+ f = open(name, mode)
+ except (IOError, FileNotFoundError):
+ err = sys.exc_info()[1]
+ raise UnpicklingError(err)
+ if closed:
+ f.close()
+ elif position >= 0 and fmode != HANDLE_FMODE:
+ f.seek(position)
+ return f
+
+def _create_stringi(value, position, closed):
+ f = StringIO(value)
+ if closed: f.close()
+ else: f.seek(position)
+ return f
+
+def _create_stringo(value, position, closed):
+ f = StringIO()
+ if closed: f.close()
+ else:
+ f.write(value)
+ f.seek(position)
+ return f
+
+class _itemgetter_helper(object):
+ def __init__(self):
+ self.items = []
+ def __getitem__(self, item):
+ self.items.append(item)
+ return
+
+class _attrgetter_helper(object):
+ def __init__(self, attrs, index=None):
+ self.attrs = attrs
+ self.index = index
+ def __getattribute__(self, attr):
+ attrs = object.__getattribute__(self, "attrs")
+ index = object.__getattribute__(self, "index")
+ if index is None:
+ index = len(attrs)
+ attrs.append(attr)
+ else:
+ attrs[index] = ".".join([attrs[index], attr])
+ return type(self)(attrs, index)
+
+class _dictproxy_helper(dict):
+ def __ror__(self, a):
+ return a
+
+_dictproxy_helper_instance = _dictproxy_helper()
+
+__d = {}
+try:
+ # In CPython 3.9 and later, this trick can be used to exploit the
+ # implementation of the __or__ function of MappingProxyType to get the true
+ # mapping referenced by the proxy. It may work for other implementations,
+ # but is not guaranteed.
+ MAPPING_PROXY_TRICK = __d is (DictProxyType(__d) | _dictproxy_helper_instance)
+except Exception:
+ MAPPING_PROXY_TRICK = False
+del __d
+
+# _CELL_REF and _CELL_EMPTY are used to stay compatible with versions of dill
+# whose _create_cell functions do not have a default value.
+# _CELL_REF can be safely removed entirely (replaced by empty tuples for calls
+# to _create_cell) once breaking changes are allowed.
+_CELL_REF = None
+_CELL_EMPTY = Sentinel('_CELL_EMPTY')
+
+def _create_cell(contents=None):
+ if contents is not _CELL_EMPTY:
+ value = contents
+ return (lambda: value).__closure__[0]
+
+def _create_weakref(obj, *args):
+ from weakref import ref
+ if obj is None: # it's dead
+ from collections import UserDict
+ return ref(UserDict(), *args)
+ return ref(obj, *args)
+
+def _create_weakproxy(obj, callable=False, *args):
+ from weakref import proxy
+ if obj is None: # it's dead
+ if callable: return proxy(lambda x:x, *args)
+ from collections import UserDict
+ return proxy(UserDict(), *args)
+ return proxy(obj, *args)
+
+def _eval_repr(repr_str):
+ return eval(repr_str)
+
+def _create_array(f, args, state, npdict=None):
+ #array = numpy.core.multiarray._reconstruct(*args)
+ array = f(*args)
+ array.__setstate__(state)
+ if npdict is not None: # we also have saved state in __dict__
+ array.__dict__.update(npdict)
+ return array
+
+def _create_dtypemeta(scalar_type):
+ if NumpyDType is True: __hook__() # a bit hacky I think
+ if scalar_type is None:
+ return NumpyDType
+ return type(NumpyDType(scalar_type))
+
+def _create_namedtuple(name, fieldnames, modulename, defaults=None):
+ class_ = _import_module(modulename + '.' + name, safe=True)
+ if class_ is not None:
+ return class_
+ import collections
+ t = collections.namedtuple(name, fieldnames, defaults=defaults, module=modulename)
+ return t
+
+def _create_capsule(pointer, name, context, destructor):
+ attr_found = False
+ try:
+ # based on https://github.com/python/cpython/blob/f4095e53ab708d95e019c909d5928502775ba68f/Objects/capsule.c#L209-L231
+ uname = name.decode('utf8')
+ for i in range(1, uname.count('.')+1):
+ names = uname.rsplit('.', i)
+ try:
+ module = __import__(names[0])
+ except ImportError:
+ pass
+ obj = module
+ for attr in names[1:]:
+ obj = getattr(obj, attr)
+ capsule = obj
+ attr_found = True
+ break
+ except Exception:
+ pass
+
+ if attr_found:
+ if _PyCapsule_IsValid(capsule, name):
+ return capsule
+ raise UnpicklingError("%s object exists at %s but a PyCapsule object was expected." % (type(capsule), name))
+ else:
+ #warnings.warn('Creating a new PyCapsule %s for a C data structure that may not be present in memory. Segmentation faults or other memory errors are possible.' % (name,), UnpicklingWarning)
+ capsule = _PyCapsule_New(pointer, name, destructor)
+ _PyCapsule_SetContext(capsule, context)
+ return capsule
+
+def _getattr(objclass, name, repr_str):
+ # hack to grab the reference directly
+ try: #XXX: works only for __builtin__ ?
+ attr = repr_str.split("'")[3]
+ return eval(attr+'.__dict__["'+name+'"]')
+ except Exception:
+ try:
+ attr = objclass.__dict__
+ if type(attr) is DictProxyType:
+ attr = attr[name]
+ else:
+ attr = getattr(objclass,name)
+ except (AttributeError, KeyError):
+ attr = getattr(objclass,name)
+ return attr
+
+def _get_attr(self, name):
+ # stop recursive pickling
+ return getattr(self, name, None) or getattr(__builtin__, name)
+
+def _import_module(import_name, safe=False):
+ try:
+ if import_name.startswith('__runtime__.'):
+ return sys.modules[import_name]
+ elif '.' in import_name:
+ items = import_name.split('.')
+ module = '.'.join(items[:-1])
+ obj = items[-1]
+ submodule = getattr(__import__(module, None, None, [obj]), obj)
+ if isinstance(submodule, (ModuleType, type)):
+ return submodule
+ return __import__(import_name, None, None, [obj])
+ else:
+ return __import__(import_name)
+ except (ImportError, AttributeError, KeyError):
+ if safe:
+ return None
+ raise
+
+# https://github.com/python/cpython/blob/a8912a0f8d9eba6d502c37d522221f9933e976db/Lib/pickle.py#L322-L333
+def _getattribute(obj, name):
+ for subpath in name.split('.'):
+ if subpath == '':
+ raise AttributeError("Can't get local attribute {!r} on {!r}"
+ .format(name, obj))
+ try:
+ parent = obj
+ obj = getattr(obj, subpath)
+ except AttributeError:
+ raise AttributeError("Can't get attribute {!r} on {!r}"
+ .format(name, obj))
+ return obj, parent
+
+def _locate_function(obj, pickler=None):
+ module_name = getattr(obj, '__module__', None)
+ if module_name in ['__main__', None] or \
+ pickler and is_dill(pickler, child=False) and pickler._session and module_name == pickler._main.__name__:
+ return False
+ if hasattr(obj, '__qualname__'):
+ module = _import_module(module_name, safe=True)
+ try:
+ found, _ = _getattribute(module, obj.__qualname__)
+ return found is obj
+ except AttributeError:
+ return False
+ else:
+ found = _import_module(module_name + '.' + obj.__name__, safe=True)
+ return found is obj
+
+
+def _setitems(dest, source):
+ for k, v in source.items():
+ dest[k] = v
+
+
+def _save_with_postproc(pickler, reduction, is_pickler_dill=None, obj=Getattr.NO_DEFAULT, postproc_list=None):
+ if obj is Getattr.NO_DEFAULT:
+ obj = Reduce(reduction) # pragma: no cover
+
+ if is_pickler_dill is None:
+ is_pickler_dill = is_dill(pickler, child=True)
+ if is_pickler_dill:
+ # assert id(obj) not in pickler._postproc, str(obj) + ' already pushed on stack!'
+ # if not hasattr(pickler, 'x'): pickler.x = 0
+ # print(pickler.x*' ', 'push', obj, id(obj), pickler._recurse)
+ # pickler.x += 1
+ if postproc_list is None:
+ postproc_list = []
+
+ # Recursive object not supported. Default to a global instead.
+ if id(obj) in pickler._postproc:
+ name = '%s.%s ' % (obj.__module__, getattr(obj, '__qualname__', obj.__name__)) if hasattr(obj, '__module__') else ''
+ warnings.warn('Cannot pickle %r: %shas recursive self-references that trigger a RecursionError.' % (obj, name), PicklingWarning)
+ pickler.save_global(obj)
+ return
+ pickler._postproc[id(obj)] = postproc_list
+
+ # TODO: Use state_setter in Python 3.8 to allow for faster cPickle implementations
+ pickler.save_reduce(*reduction, obj=obj)
+
+ if is_pickler_dill:
+ # pickler.x -= 1
+ # print(pickler.x*' ', 'pop', obj, id(obj))
+ postproc = pickler._postproc.pop(id(obj))
+ # assert postproc_list == postproc, 'Stack tampered!'
+ for reduction in reversed(postproc):
+ if reduction[0] is _setitems:
+ # use the internal machinery of pickle.py to speedup when
+ # updating a dictionary in postproc
+ dest, source = reduction[1]
+ if source:
+ pickler.write(pickler.get(pickler.memo[id(dest)][0]))
+ pickler._batch_setitems(iter(source.items()))
+ else:
+ # Updating with an empty dictionary. Same as doing nothing.
+ continue
+ else:
+ pickler.save_reduce(*reduction)
+ # pop None created by calling preprocessing step off stack
+ pickler.write(POP)
+
+#@register(CodeType)
+#def save_code(pickler, obj):
+# logger.trace(pickler, "Co: %s", obj)
+# pickler.save_reduce(_unmarshal, (marshal.dumps(obj),), obj=obj)
+# logger.trace(pickler, "# Co")
+# return
+
+# The following function is based on 'save_codeobject' from 'cloudpickle'
+# Copyright (c) 2012, Regents of the University of California.
+# Copyright (c) 2009 `PiCloud, Inc. `_.
+# License: https://github.com/cloudpipe/cloudpickle/blob/master/LICENSE
+@register(CodeType)
+def save_code(pickler, obj):
+ logger.trace(pickler, "Co: %s", obj)
+ if hasattr(obj, "co_endlinetable"): # python 3.11a (20 args)
+ args = (
+ obj.co_lnotab, # for < python 3.10 [not counted in args]
+ obj.co_argcount, obj.co_posonlyargcount,
+ obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize,
+ obj.co_flags, obj.co_code, obj.co_consts, obj.co_names,
+ obj.co_varnames, obj.co_filename, obj.co_name, obj.co_qualname,
+ obj.co_firstlineno, obj.co_linetable, obj.co_endlinetable,
+ obj.co_columntable, obj.co_exceptiontable, obj.co_freevars,
+ obj.co_cellvars
+ )
+ elif hasattr(obj, "co_exceptiontable"): # python 3.11 (18 args)
+ with warnings.catch_warnings():
+ if not OLD312a7: # issue 597
+ warnings.filterwarnings('ignore', category=DeprecationWarning)
+ args = (
+ obj.co_lnotab, # for < python 3.10 [not counted in args]
+ obj.co_argcount, obj.co_posonlyargcount,
+ obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize,
+ obj.co_flags, obj.co_code, obj.co_consts, obj.co_names,
+ obj.co_varnames, obj.co_filename, obj.co_name, obj.co_qualname,
+ obj.co_firstlineno, obj.co_linetable, obj.co_exceptiontable,
+ obj.co_freevars, obj.co_cellvars
+ )
+ elif hasattr(obj, "co_linetable"): # python 3.10 (16 args)
+ args = (
+ obj.co_lnotab, # for < python 3.10 [not counted in args]
+ obj.co_argcount, obj.co_posonlyargcount,
+ obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize,
+ obj.co_flags, obj.co_code, obj.co_consts, obj.co_names,
+ obj.co_varnames, obj.co_filename, obj.co_name,
+ obj.co_firstlineno, obj.co_linetable, obj.co_freevars,
+ obj.co_cellvars
+ )
+ elif hasattr(obj, "co_posonlyargcount"): # python 3.8 (16 args)
+ args = (
+ obj.co_argcount, obj.co_posonlyargcount,
+ obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize,
+ obj.co_flags, obj.co_code, obj.co_consts, obj.co_names,
+ obj.co_varnames, obj.co_filename, obj.co_name,
+ obj.co_firstlineno, obj.co_lnotab, obj.co_freevars,
+ obj.co_cellvars
+ )
+ else: # python 3.7 (15 args)
+ args = (
+ obj.co_argcount, obj.co_kwonlyargcount, obj.co_nlocals,
+ obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts,
+ obj.co_names, obj.co_varnames, obj.co_filename,
+ obj.co_name, obj.co_firstlineno, obj.co_lnotab,
+ obj.co_freevars, obj.co_cellvars
+ )
+
+ pickler.save_reduce(_create_code, args, obj=obj)
+ logger.trace(pickler, "# Co")
+ return
+
+def _repr_dict(obj):
+ """Make a short string representation of a dictionary."""
+ return "<%s object at %#012x>" % (type(obj).__name__, id(obj))
+
+@register(dict)
+def save_module_dict(pickler, obj):
+ if is_dill(pickler, child=False) and obj == pickler._main.__dict__ and \
+ not (pickler._session and pickler._first_pass):
+ logger.trace(pickler, "D1: %s", _repr_dict(obj)) # obj
+ pickler.write(bytes('c__builtin__\n__main__\n', 'UTF-8'))
+ logger.trace(pickler, "# D1")
+ elif (not is_dill(pickler, child=False)) and (obj == _main_module.__dict__):
+ logger.trace(pickler, "D3: %s", _repr_dict(obj)) # obj
+ pickler.write(bytes('c__main__\n__dict__\n', 'UTF-8')) #XXX: works in general?
+ logger.trace(pickler, "# D3")
+ elif '__name__' in obj and obj != _main_module.__dict__ \
+ and type(obj['__name__']) is str \
+ and obj is getattr(_import_module(obj['__name__'],True), '__dict__', None):
+ logger.trace(pickler, "D4: %s", _repr_dict(obj)) # obj
+ pickler.write(bytes('c%s\n__dict__\n' % obj['__name__'], 'UTF-8'))
+ logger.trace(pickler, "# D4")
+ else:
+ logger.trace(pickler, "D2: %s", _repr_dict(obj)) # obj
+ if is_dill(pickler, child=False) and pickler._session:
+ # we only care about session the first pass thru
+ pickler._first_pass = False
+ StockPickler.save_dict(pickler, obj)
+ logger.trace(pickler, "# D2")
+ return
+
+
+if not OLD310 and MAPPING_PROXY_TRICK:
+ def save_dict_view(dicttype):
+ def save_dict_view_for_function(func):
+ def _save_dict_view(pickler, obj):
+ logger.trace(pickler, "Dkvi: <%s>", obj)
+ mapping = obj.mapping | _dictproxy_helper_instance
+ pickler.save_reduce(func, (mapping,), obj=obj)
+ logger.trace(pickler, "# Dkvi")
+ return _save_dict_view
+ return [
+ (funcname, save_dict_view_for_function(getattr(dicttype, funcname)))
+ for funcname in ('keys', 'values', 'items')
+ ]
+else:
+ # The following functions are based on 'cloudpickle'
+ # https://github.com/cloudpipe/cloudpickle/blob/5d89947288a18029672596a4d719093cc6d5a412/cloudpickle/cloudpickle.py#L922-L940
+ # Copyright (c) 2012, Regents of the University of California.
+ # Copyright (c) 2009 `PiCloud, Inc. `_.
+ # License: https://github.com/cloudpipe/cloudpickle/blob/master/LICENSE
+ def save_dict_view(dicttype):
+ def save_dict_keys(pickler, obj):
+ logger.trace(pickler, "Dk: <%s>", obj)
+ dict_constructor = _shims.Reduce(dicttype.fromkeys, (list(obj),))
+ pickler.save_reduce(dicttype.keys, (dict_constructor,), obj=obj)
+ logger.trace(pickler, "# Dk")
+
+ def save_dict_values(pickler, obj):
+ logger.trace(pickler, "Dv: <%s>", obj)
+ dict_constructor = _shims.Reduce(dicttype, (enumerate(obj),))
+ pickler.save_reduce(dicttype.values, (dict_constructor,), obj=obj)
+ logger.trace(pickler, "# Dv")
+
+ def save_dict_items(pickler, obj):
+ logger.trace(pickler, "Di: <%s>", obj)
+ pickler.save_reduce(dicttype.items, (dicttype(obj),), obj=obj)
+ logger.trace(pickler, "# Di")
+
+ return (
+ ('keys', save_dict_keys),
+ ('values', save_dict_values),
+ ('items', save_dict_items)
+ )
+
+for __dicttype in (
+ dict,
+ OrderedDict
+):
+ __obj = __dicttype()
+ for __funcname, __savefunc in save_dict_view(__dicttype):
+ __tview = type(getattr(__obj, __funcname)())
+ if __tview not in Pickler.dispatch:
+ Pickler.dispatch[__tview] = __savefunc
+del __dicttype, __obj, __funcname, __tview, __savefunc
+
+
+@register(ClassType)
+def save_classobj(pickler, obj): #FIXME: enable pickler._byref
+ if not _locate_function(obj, pickler):
+ logger.trace(pickler, "C1: %s", obj)
+ pickler.save_reduce(ClassType, (obj.__name__, obj.__bases__,
+ obj.__dict__), obj=obj)
+ #XXX: or obj.__dict__.copy()), obj=obj) ?
+ logger.trace(pickler, "# C1")
+ else:
+ logger.trace(pickler, "C2: %s", obj)
+ name = getattr(obj, '__qualname__', getattr(obj, '__name__', None))
+ StockPickler.save_global(pickler, obj, name=name)
+ logger.trace(pickler, "# C2")
+ return
+
+@register(typing._GenericAlias)
+def save_generic_alias(pickler, obj):
+ args = obj.__args__
+ if type(obj.__reduce__()) is str:
+ logger.trace(pickler, "Ga0: %s", obj)
+ StockPickler.save_global(pickler, obj, name=obj.__reduce__())
+ logger.trace(pickler, "# Ga0")
+ elif obj.__origin__ is tuple and (not args or args == ((),)):
+ logger.trace(pickler, "Ga1: %s", obj)
+ pickler.save_reduce(_create_typing_tuple, (args,), obj=obj)
+ logger.trace(pickler, "# Ga1")
+ else:
+ logger.trace(pickler, "Ga2: %s", obj)
+ StockPickler.save_reduce(pickler, *obj.__reduce__(), obj=obj)
+ logger.trace(pickler, "# Ga2")
+ return
+
+@register(LockType)
+def save_lock(pickler, obj):
+ logger.trace(pickler, "Lo: %s", obj)
+ pickler.save_reduce(_create_lock, (obj.locked(),), obj=obj)
+ logger.trace(pickler, "# Lo")
+ return
+
+@register(RLockType)
+def save_rlock(pickler, obj):
+ logger.trace(pickler, "RL: %s", obj)
+ r = obj.__repr__() # don't use _release_save as it unlocks the lock
+ count = int(r.split('count=')[1].split()[0].rstrip('>'))
+ owner = int(r.split('owner=')[1].split()[0])
+ pickler.save_reduce(_create_rlock, (count,owner,), obj=obj)
+ logger.trace(pickler, "# RL")
+ return
+
+#@register(SocketType) #FIXME: causes multiprocess test_pickling FAIL
+def save_socket(pickler, obj):
+ logger.trace(pickler, "So: %s", obj)
+ pickler.save_reduce(*reduce_socket(obj))
+ logger.trace(pickler, "# So")
+ return
+
+def _save_file(pickler, obj, open_):
+ if obj.closed:
+ position = 0
+ else:
+ obj.flush()
+ if obj in (sys.__stdout__, sys.__stderr__, sys.__stdin__):
+ position = -1
+ else:
+ position = obj.tell()
+ if is_dill(pickler, child=True) and pickler._fmode == FILE_FMODE:
+ f = open_(obj.name, "r")
+ fdata = f.read()
+ f.close()
+ else:
+ fdata = ""
+ if is_dill(pickler, child=True):
+ strictio = pickler._strictio
+ fmode = pickler._fmode
+ else:
+ strictio = False
+ fmode = 0 # HANDLE_FMODE
+ pickler.save_reduce(_create_filehandle, (obj.name, obj.mode, position,
+ obj.closed, open_, strictio,
+ fmode, fdata), obj=obj)
+ return
+
+
+@register(FileType) #XXX: in 3.x has buffer=0, needs different _create?
+@register(BufferedReaderType)
+@register(BufferedWriterType)
+@register(TextWrapperType)
+def save_file(pickler, obj):
+ logger.trace(pickler, "Fi: %s", obj)
+ f = _save_file(pickler, obj, open)
+ logger.trace(pickler, "# Fi")
+ return f
+
+if BufferedRandomType:
+ @register(BufferedRandomType)
+ def save_file(pickler, obj):
+ logger.trace(pickler, "Fi: %s", obj)
+ f = _save_file(pickler, obj, open)
+ logger.trace(pickler, "# Fi")
+ return f
+
+if PyTextWrapperType:
+ @register(PyBufferedReaderType)
+ @register(PyBufferedWriterType)
+ @register(PyTextWrapperType)
+ def save_file(pickler, obj):
+ logger.trace(pickler, "Fi: %s", obj)
+ f = _save_file(pickler, obj, _open)
+ logger.trace(pickler, "# Fi")
+ return f
+
+ if PyBufferedRandomType:
+ @register(PyBufferedRandomType)
+ def save_file(pickler, obj):
+ logger.trace(pickler, "Fi: %s", obj)
+ f = _save_file(pickler, obj, _open)
+ logger.trace(pickler, "# Fi")
+ return f
+
+
+# The following two functions are based on 'saveCStringIoInput'
+# and 'saveCStringIoOutput' from spickle
+# Copyright (c) 2011 by science+computing ag
+# License: http://www.apache.org/licenses/LICENSE-2.0
+if InputType:
+ @register(InputType)
+ def save_stringi(pickler, obj):
+ logger.trace(pickler, "Io: %s", obj)
+ if obj.closed:
+ value = ''; position = 0
+ else:
+ value = obj.getvalue(); position = obj.tell()
+ pickler.save_reduce(_create_stringi, (value, position, \
+ obj.closed), obj=obj)
+ logger.trace(pickler, "# Io")
+ return
+
+ @register(OutputType)
+ def save_stringo(pickler, obj):
+ logger.trace(pickler, "Io: %s", obj)
+ if obj.closed:
+ value = ''; position = 0
+ else:
+ value = obj.getvalue(); position = obj.tell()
+ pickler.save_reduce(_create_stringo, (value, position, \
+ obj.closed), obj=obj)
+ logger.trace(pickler, "# Io")
+ return
+
+if LRUCacheType is not None:
+ from functools import lru_cache
+ @register(LRUCacheType)
+ def save_lru_cache(pickler, obj):
+ logger.trace(pickler, "LRU: %s", obj)
+ if OLD39:
+ kwargs = obj.cache_info()
+ args = (kwargs.maxsize,)
+ else:
+ kwargs = obj.cache_parameters()
+ args = (kwargs['maxsize'], kwargs['typed'])
+ if args != lru_cache.__defaults__:
+ wrapper = Reduce(lru_cache, args, is_callable=True)
+ else:
+ wrapper = lru_cache
+ pickler.save_reduce(wrapper, (obj.__wrapped__,), obj=obj)
+ logger.trace(pickler, "# LRU")
+ return
+
+@register(SuperType)
+def save_super(pickler, obj):
+ logger.trace(pickler, "Su: %s", obj)
+ pickler.save_reduce(super, (obj.__thisclass__, obj.__self__), obj=obj)
+ logger.trace(pickler, "# Su")
+ return
+
+if IS_PYPY:
+ @register(MethodType)
+ def save_instancemethod0(pickler, obj):
+ code = getattr(obj.__func__, '__code__', None)
+ if code is not None and type(code) is not CodeType \
+ and getattr(obj.__self__, obj.__name__) == obj:
+ # Some PyPy builtin functions have no module name
+ logger.trace(pickler, "Me2: %s", obj)
+ # TODO: verify that this works for all PyPy builtin methods
+ pickler.save_reduce(getattr, (obj.__self__, obj.__name__), obj=obj)
+ logger.trace(pickler, "# Me2")
+ return
+
+ logger.trace(pickler, "Me1: %s", obj)
+ pickler.save_reduce(MethodType, (obj.__func__, obj.__self__), obj=obj)
+ logger.trace(pickler, "# Me1")
+ return
+else:
+ @register(MethodType)
+ def save_instancemethod0(pickler, obj):
+ logger.trace(pickler, "Me1: %s", obj)
+ pickler.save_reduce(MethodType, (obj.__func__, obj.__self__), obj=obj)
+ logger.trace(pickler, "# Me1")
+ return
+
+if not IS_PYPY:
+ @register(MemberDescriptorType)
+ @register(GetSetDescriptorType)
+ @register(MethodDescriptorType)
+ @register(WrapperDescriptorType)
+ @register(ClassMethodDescriptorType)
+ def save_wrapper_descriptor(pickler, obj):
+ logger.trace(pickler, "Wr: %s", obj)
+ pickler.save_reduce(_getattr, (obj.__objclass__, obj.__name__,
+ obj.__repr__()), obj=obj)
+ logger.trace(pickler, "# Wr")
+ return
+else:
+ @register(MemberDescriptorType)
+ @register(GetSetDescriptorType)
+ def save_wrapper_descriptor(pickler, obj):
+ logger.trace(pickler, "Wr: %s", obj)
+ pickler.save_reduce(_getattr, (obj.__objclass__, obj.__name__,
+ obj.__repr__()), obj=obj)
+ logger.trace(pickler, "# Wr")
+ return
+
+@register(CellType)
+def save_cell(pickler, obj):
+ try:
+ f = obj.cell_contents
+ except ValueError: # cell is empty
+ logger.trace(pickler, "Ce3: %s", obj)
+ # _shims._CELL_EMPTY is defined in _shims.py to support PyPy 2.7.
+ # It unpickles to a sentinel object _dill._CELL_EMPTY, also created in
+ # _shims.py. This object is not present in Python 3 because the cell's
+ # contents can be deleted in newer versions of Python. The reduce object
+ # will instead unpickle to None if unpickled in Python 3.
+
+ # When breaking changes are made to dill, (_shims._CELL_EMPTY,) can
+ # be replaced by () OR the delattr function can be removed repending on
+ # whichever is more convienient.
+ pickler.save_reduce(_create_cell, (_shims._CELL_EMPTY,), obj=obj)
+ # Call the function _delattr on the cell's cell_contents attribute
+ # The result of this function call will be None
+ pickler.save_reduce(_shims._delattr, (obj, 'cell_contents'))
+ # pop None created by calling _delattr off stack
+ pickler.write(POP)
+ logger.trace(pickler, "# Ce3")
+ return
+ if is_dill(pickler, child=True):
+ if id(f) in pickler._postproc:
+ # Already seen. Add to its postprocessing.
+ postproc = pickler._postproc[id(f)]
+ else:
+ # Haven't seen it. Add to the highest possible object and set its
+ # value as late as possible to prevent cycle.
+ postproc = next(iter(pickler._postproc.values()), None)
+ if postproc is not None:
+ logger.trace(pickler, "Ce2: %s", obj)
+ # _CELL_REF is defined in _shims.py to support older versions of
+ # dill. When breaking changes are made to dill, (_CELL_REF,) can
+ # be replaced by ()
+ pickler.save_reduce(_create_cell, (_CELL_REF,), obj=obj)
+ postproc.append((_shims._setattr, (obj, 'cell_contents', f)))
+ logger.trace(pickler, "# Ce2")
+ return
+ logger.trace(pickler, "Ce1: %s", obj)
+ pickler.save_reduce(_create_cell, (f,), obj=obj)
+ logger.trace(pickler, "# Ce1")
+ return
+
+if MAPPING_PROXY_TRICK:
+ @register(DictProxyType)
+ def save_dictproxy(pickler, obj):
+ logger.trace(pickler, "Mp: %s", _repr_dict(obj)) # obj
+ mapping = obj | _dictproxy_helper_instance
+ pickler.save_reduce(DictProxyType, (mapping,), obj=obj)
+ logger.trace(pickler, "# Mp")
+ return
+else:
+ @register(DictProxyType)
+ def save_dictproxy(pickler, obj):
+ logger.trace(pickler, "Mp: %s", _repr_dict(obj)) # obj
+ pickler.save_reduce(DictProxyType, (obj.copy(),), obj=obj)
+ logger.trace(pickler, "# Mp")
+ return
+
+@register(SliceType)
+def save_slice(pickler, obj):
+ logger.trace(pickler, "Sl: %s", obj)
+ pickler.save_reduce(slice, (obj.start, obj.stop, obj.step), obj=obj)
+ logger.trace(pickler, "# Sl")
+ return
+
+@register(XRangeType)
+@register(EllipsisType)
+@register(NotImplementedType)
+def save_singleton(pickler, obj):
+ logger.trace(pickler, "Si: %s", obj)
+ pickler.save_reduce(_eval_repr, (obj.__repr__(),), obj=obj)
+ logger.trace(pickler, "# Si")
+ return
+
+def _proxy_helper(obj): # a dead proxy returns a reference to None
+ """get memory address of proxy's reference object"""
+ _repr = repr(obj)
+ try: _str = str(obj)
+ except ReferenceError: # it's a dead proxy
+ return id(None)
+ if _str == _repr: return id(obj) # it's a repr
+ try: # either way, it's a proxy from here
+ address = int(_str.rstrip('>').split(' at ')[-1], base=16)
+ except ValueError: # special case: proxy of a 'type'
+ if not IS_PYPY:
+ address = int(_repr.rstrip('>').split(' at ')[-1], base=16)
+ else:
+ objects = iter(gc.get_objects())
+ for _obj in objects:
+ if repr(_obj) == _str: return id(_obj)
+ # all bad below... nothing found so throw ReferenceError
+ msg = "Cannot reference object for proxy at '%s'" % id(obj)
+ raise ReferenceError(msg)
+ return address
+
+def _locate_object(address, module=None):
+ """get object located at the given memory address (inverse of id(obj))"""
+ special = [None, True, False] #XXX: more...?
+ for obj in special:
+ if address == id(obj): return obj
+ if module:
+ objects = iter(module.__dict__.values())
+ else: objects = iter(gc.get_objects())
+ for obj in objects:
+ if address == id(obj): return obj
+ # all bad below... nothing found so throw ReferenceError or TypeError
+ try: address = hex(address)
+ except TypeError:
+ raise TypeError("'%s' is not a valid memory address" % str(address))
+ raise ReferenceError("Cannot reference object at '%s'" % address)
+
+@register(ReferenceType)
+def save_weakref(pickler, obj):
+ refobj = obj()
+ logger.trace(pickler, "R1: %s", obj)
+ #refobj = ctypes.pythonapi.PyWeakref_GetObject(obj) # dead returns "None"
+ pickler.save_reduce(_create_weakref, (refobj,), obj=obj)
+ logger.trace(pickler, "# R1")
+ return
+
+@register(ProxyType)
+@register(CallableProxyType)
+def save_weakproxy(pickler, obj):
+ # Must do string substitution here and use %r to avoid ReferenceError.
+ logger.trace(pickler, "R2: %r" % obj)
+ refobj = _locate_object(_proxy_helper(obj))
+ pickler.save_reduce(_create_weakproxy, (refobj, callable(obj)), obj=obj)
+ logger.trace(pickler, "# R2")
+ return
+
+def _is_builtin_module(module):
+ if not hasattr(module, "__file__"): return True
+ if module.__file__ is None: return False
+ # If a module file name starts with prefix, it should be a builtin
+ # module, so should always be pickled as a reference.
+ names = ["base_prefix", "base_exec_prefix", "exec_prefix", "prefix", "real_prefix"]
+ rp = os.path.realpath
+ # See https://github.com/uqfoundation/dill/issues/566
+ return (
+ any(
+ module.__file__.startswith(getattr(sys, name))
+ or rp(module.__file__).startswith(rp(getattr(sys, name)))
+ for name in names
+ if hasattr(sys, name)
+ )
+ or module.__file__.endswith(EXTENSION_SUFFIXES)
+ or 'site-packages' in module.__file__
+ )
+
+def _is_imported_module(module):
+ return getattr(module, '__loader__', None) is not None or module in sys.modules.values()
+
+@register(ModuleType)
+def save_module(pickler, obj):
+ if False: #_use_diff:
+ if obj.__name__.split('.', 1)[0] != "dill":
+ try:
+ changed = diff.whats_changed(obj, seen=pickler._diff_cache)[0]
+ except RuntimeError: # not memorised module, probably part of dill
+ pass
+ else:
+ logger.trace(pickler, "M2: %s with diff", obj)
+ logger.info("Diff: %s", changed.keys())
+ pickler.save_reduce(_import_module, (obj.__name__,), obj=obj,
+ state=changed)
+ logger.trace(pickler, "# M2")
+ return
+
+ logger.trace(pickler, "M1: %s", obj)
+ pickler.save_reduce(_import_module, (obj.__name__,), obj=obj)
+ logger.trace(pickler, "# M1")
+ else:
+ builtin_mod = _is_builtin_module(obj)
+ is_session_main = is_dill(pickler, child=True) and obj is pickler._main
+ if (obj.__name__ not in ("builtins", "dill", "dill._dill") and not builtin_mod
+ or is_session_main):
+ logger.trace(pickler, "M1: %s", obj)
+ # Hack for handling module-type objects in load_module().
+ mod_name = obj.__name__ if _is_imported_module(obj) else '__runtime__.%s' % obj.__name__
+ # Second references are saved as __builtin__.__main__ in save_module_dict().
+ main_dict = obj.__dict__.copy()
+ for item in ('__builtins__', '__loader__'):
+ main_dict.pop(item, None)
+ for item in IPYTHON_SINGLETONS: #pragma: no cover
+ if getattr(main_dict.get(item), '__module__', '').startswith('IPython'):
+ del main_dict[item]
+ pickler.save_reduce(_import_module, (mod_name,), obj=obj, state=main_dict)
+ logger.trace(pickler, "# M1")
+ elif obj.__name__ == "dill._dill":
+ logger.trace(pickler, "M2: %s", obj)
+ pickler.save_global(obj, name="_dill")
+ logger.trace(pickler, "# M2")
+ else:
+ logger.trace(pickler, "M2: %s", obj)
+ pickler.save_reduce(_import_module, (obj.__name__,), obj=obj)
+ logger.trace(pickler, "# M2")
+ return
+
+# The following function is based on '_extract_class_dict' from 'cloudpickle'
+# Copyright (c) 2012, Regents of the University of California.
+# Copyright (c) 2009 `PiCloud, Inc. `_.
+# License: https://github.com/cloudpipe/cloudpickle/blob/master/LICENSE
+def _get_typedict_type(cls, clsdict, attrs, postproc_list):
+ """Retrieve a copy of the dict of a class without the inherited methods"""
+ if len(cls.__bases__) == 1:
+ inherited_dict = cls.__bases__[0].__dict__
+ else:
+ inherited_dict = {}
+ for base in reversed(cls.__bases__):
+ inherited_dict.update(base.__dict__)
+ to_remove = []
+ for name, value in dict.items(clsdict):
+ try:
+ base_value = inherited_dict[name]
+ if value is base_value and hasattr(value, '__qualname__'):
+ to_remove.append(name)
+ except KeyError:
+ pass
+ for name in to_remove:
+ dict.pop(clsdict, name)
+
+ if issubclass(type(cls), type):
+ clsdict.pop('__dict__', None)
+ clsdict.pop('__weakref__', None)
+ # clsdict.pop('__prepare__', None)
+ return clsdict, attrs
+
+def _get_typedict_abc(obj, _dict, attrs, postproc_list):
+ if hasattr(abc, '_get_dump'):
+ (registry, _, _, _) = abc._get_dump(obj)
+ register = obj.register
+ postproc_list.extend((register, (reg(),)) for reg in registry)
+ elif hasattr(obj, '_abc_registry'):
+ registry = obj._abc_registry
+ register = obj.register
+ postproc_list.extend((register, (reg,)) for reg in registry)
+ else:
+ raise PicklingError("Cannot find registry of ABC %s", obj)
+
+ if '_abc_registry' in _dict:
+ _dict.pop('_abc_registry', None)
+ _dict.pop('_abc_cache', None)
+ _dict.pop('_abc_negative_cache', None)
+ # _dict.pop('_abc_negative_cache_version', None)
+ else:
+ _dict.pop('_abc_impl', None)
+ return _dict, attrs
+
+@register(TypeType)
+def save_type(pickler, obj, postproc_list=None):
+ if obj in _typemap:
+ logger.trace(pickler, "T1: %s", obj)
+ # if obj in _incedental_types:
+ # warnings.warn('Type %r may only exist on this implementation of Python and cannot be unpickled in other implementations.' % (obj,), PicklingWarning)
+ pickler.save_reduce(_load_type, (_typemap[obj],), obj=obj)
+ logger.trace(pickler, "# T1")
+ elif obj.__bases__ == (tuple,) and all([hasattr(obj, attr) for attr in ('_fields','_asdict','_make','_replace')]):
+ # special case: namedtuples
+ logger.trace(pickler, "T6: %s", obj)
+
+ obj_name = getattr(obj, '__qualname__', getattr(obj, '__name__', None))
+ if obj.__name__ != obj_name:
+ if postproc_list is None:
+ postproc_list = []
+ postproc_list.append((setattr, (obj, '__qualname__', obj_name)))
+
+ if not obj._field_defaults:
+ _save_with_postproc(pickler, (_create_namedtuple, (obj.__name__, obj._fields, obj.__module__)), obj=obj, postproc_list=postproc_list)
+ else:
+ defaults = [obj._field_defaults[field] for field in obj._fields if field in obj._field_defaults]
+ _save_with_postproc(pickler, (_create_namedtuple, (obj.__name__, obj._fields, obj.__module__, defaults)), obj=obj, postproc_list=postproc_list)
+ logger.trace(pickler, "# T6")
+ return
+
+ # special cases: NoneType, NotImplementedType, EllipsisType, EnumMeta
+ elif obj is type(None):
+ logger.trace(pickler, "T7: %s", obj)
+ #XXX: pickler.save_reduce(type, (None,), obj=obj)
+ pickler.write(GLOBAL + b'__builtin__\nNoneType\n')
+ logger.trace(pickler, "# T7")
+ elif obj is NotImplementedType:
+ logger.trace(pickler, "T7: %s", obj)
+ pickler.save_reduce(type, (NotImplemented,), obj=obj)
+ logger.trace(pickler, "# T7")
+ elif obj is EllipsisType:
+ logger.trace(pickler, "T7: %s", obj)
+ pickler.save_reduce(type, (Ellipsis,), obj=obj)
+ logger.trace(pickler, "# T7")
+ elif obj is EnumMeta:
+ logger.trace(pickler, "T7: %s", obj)
+ pickler.write(GLOBAL + b'enum\nEnumMeta\n')
+ logger.trace(pickler, "# T7")
+
+ else:
+ _byref = getattr(pickler, '_byref', None)
+ obj_recursive = id(obj) in getattr(pickler, '_postproc', ())
+ incorrectly_named = not _locate_function(obj, pickler)
+ if not _byref and not obj_recursive and incorrectly_named: # not a function, but the name was held over
+ if postproc_list is None:
+ postproc_list = []
+
+ # thanks to Tom Stepleton pointing out pickler._session unneeded
+ logger.trace(pickler, "T2: %s", obj)
+ _dict, attrs = _get_typedict_type(obj, obj.__dict__.copy(), None, postproc_list) # copy dict proxy to a dict
+
+ #print (_dict)
+ #print ("%s\n%s" % (type(obj), obj.__name__))
+ #print ("%s\n%s" % (obj.__bases__, obj.__dict__))
+ slots = _dict.get('__slots__', ())
+ if type(slots) == str:
+ # __slots__ accepts a single string
+ slots = (slots,)
+
+ for name in slots:
+ _dict.pop(name, None)
+
+ if isinstance(obj, abc.ABCMeta):
+ logger.trace(pickler, "ABC: %s", obj)
+ _dict, attrs = _get_typedict_abc(obj, _dict, attrs, postproc_list)
+ logger.trace(pickler, "# ABC")
+
+ qualname = getattr(obj, '__qualname__', None)
+ if attrs is not None:
+ for k, v in attrs.items():
+ postproc_list.append((setattr, (obj, k, v)))
+ # TODO: Consider using the state argument to save_reduce?
+ if qualname is not None:
+ postproc_list.append((setattr, (obj, '__qualname__', qualname)))
+
+ if not hasattr(obj, '__orig_bases__'):
+ _save_with_postproc(pickler, (_create_type, (
+ type(obj), obj.__name__, obj.__bases__, _dict
+ )), obj=obj, postproc_list=postproc_list)
+ else:
+ # This case will always work, but might be overkill.
+ _metadict = {
+ 'metaclass': type(obj)
+ }
+
+ if _dict:
+ _dict_update = PartialType(_setitems, source=_dict)
+ else:
+ _dict_update = None
+
+ _save_with_postproc(pickler, (new_class, (
+ obj.__name__, obj.__orig_bases__, _metadict, _dict_update
+ )), obj=obj, postproc_list=postproc_list)
+ logger.trace(pickler, "# T2")
+ else:
+ obj_name = getattr(obj, '__qualname__', getattr(obj, '__name__', None))
+ logger.trace(pickler, "T4: %s", obj)
+ if incorrectly_named:
+ warnings.warn(
+ "Cannot locate reference to %r." % (obj,),
+ PicklingWarning,
+ stacklevel=3,
+ )
+ if obj_recursive:
+ warnings.warn(
+ "Cannot pickle %r: %s.%s has recursive self-references that "
+ "trigger a RecursionError." % (obj, obj.__module__, obj_name),
+ PicklingWarning,
+ stacklevel=3,
+ )
+ #print (obj.__dict__)
+ #print ("%s\n%s" % (type(obj), obj.__name__))
+ #print ("%s\n%s" % (obj.__bases__, obj.__dict__))
+ StockPickler.save_global(pickler, obj, name=obj_name)
+ logger.trace(pickler, "# T4")
+ return
+
+@register(property)
+@register(abc.abstractproperty)
+def save_property(pickler, obj):
+ logger.trace(pickler, "Pr: %s", obj)
+ pickler.save_reduce(type(obj), (obj.fget, obj.fset, obj.fdel, obj.__doc__),
+ obj=obj)
+ logger.trace(pickler, "# Pr")
+
+@register(staticmethod)
+@register(classmethod)
+@register(abc.abstractstaticmethod)
+@register(abc.abstractclassmethod)
+def save_classmethod(pickler, obj):
+ logger.trace(pickler, "Cm: %s", obj)
+ orig_func = obj.__func__
+
+ # if type(obj.__dict__) is dict:
+ # if obj.__dict__:
+ # state = obj.__dict__
+ # else:
+ # state = None
+ # else:
+ # state = (None, {'__dict__', obj.__dict__})
+
+ pickler.save_reduce(type(obj), (orig_func,), obj=obj)
+ logger.trace(pickler, "# Cm")
+
+@register(FunctionType)
+def save_function(pickler, obj):
+ if not _locate_function(obj, pickler):
+ if type(obj.__code__) is not CodeType:
+ # Some PyPy builtin functions have no module name, and thus are not
+ # able to be located
+ module_name = getattr(obj, '__module__', None)
+ if module_name is None:
+ module_name = __builtin__.__name__
+ module = _import_module(module_name, safe=True)
+ _pypy_builtin = False
+ try:
+ found, _ = _getattribute(module, obj.__qualname__)
+ if getattr(found, '__func__', None) is obj:
+ _pypy_builtin = True
+ except AttributeError:
+ pass
+
+ if _pypy_builtin:
+ logger.trace(pickler, "F3: %s", obj)
+ pickler.save_reduce(getattr, (found, '__func__'), obj=obj)
+ logger.trace(pickler, "# F3")
+ return
+
+ logger.trace(pickler, "F1: %s", obj)
+ _recurse = getattr(pickler, '_recurse', None)
+ _postproc = getattr(pickler, '_postproc', None)
+ _main_modified = getattr(pickler, '_main_modified', None)
+ _original_main = getattr(pickler, '_original_main', __builtin__)#'None'
+ postproc_list = []
+ if _recurse:
+ # recurse to get all globals referred to by obj
+ from .detect import globalvars
+ globs_copy = globalvars(obj, recurse=True, builtin=True)
+
+ # Add the name of the module to the globs dictionary to prevent
+ # the duplication of the dictionary. Pickle the unpopulated
+ # globals dictionary and set the remaining items after the function
+ # is created to correctly handle recursion.
+ globs = {'__name__': obj.__module__}
+ else:
+ globs_copy = obj.__globals__
+
+ # If the globals is the __dict__ from the module being saved as a
+ # session, substitute it by the dictionary being actually saved.
+ if _main_modified and globs_copy is _original_main.__dict__:
+ globs_copy = getattr(pickler, '_main', _original_main).__dict__
+ globs = globs_copy
+ # If the globals is a module __dict__, do not save it in the pickle.
+ elif globs_copy is not None and obj.__module__ is not None and \
+ getattr(_import_module(obj.__module__, True), '__dict__', None) is globs_copy:
+ globs = globs_copy
+ else:
+ globs = {'__name__': obj.__module__}
+
+ if globs_copy is not None and globs is not globs_copy:
+ # In the case that the globals are copied, we need to ensure that
+ # the globals dictionary is updated when all objects in the
+ # dictionary are already created.
+ glob_ids = {id(g) for g in globs_copy.values()}
+ for stack_element in _postproc:
+ if stack_element in glob_ids:
+ _postproc[stack_element].append((_setitems, (globs, globs_copy)))
+ break
+ else:
+ postproc_list.append((_setitems, (globs, globs_copy)))
+
+ closure = obj.__closure__
+ state_dict = {}
+ for fattrname in ('__doc__', '__kwdefaults__', '__annotations__'):
+ fattr = getattr(obj, fattrname, None)
+ if fattr is not None:
+ state_dict[fattrname] = fattr
+ if obj.__qualname__ != obj.__name__:
+ state_dict['__qualname__'] = obj.__qualname__
+ if '__name__' not in globs or obj.__module__ != globs['__name__']:
+ state_dict['__module__'] = obj.__module__
+
+ state = obj.__dict__
+ if type(state) is not dict:
+ state_dict['__dict__'] = state
+ state = None
+ if state_dict:
+ state = state, state_dict
+
+ _save_with_postproc(pickler, (_create_function, (
+ obj.__code__, globs, obj.__name__, obj.__defaults__,
+ closure
+ ), state), obj=obj, postproc_list=postproc_list)
+
+ # Lift closure cell update to earliest function (#458)
+ if _postproc:
+ topmost_postproc = next(iter(_postproc.values()), None)
+ if closure and topmost_postproc:
+ for cell in closure:
+ possible_postproc = (setattr, (cell, 'cell_contents', obj))
+ try:
+ topmost_postproc.remove(possible_postproc)
+ except ValueError:
+ continue
+
+ # Change the value of the cell
+ pickler.save_reduce(*possible_postproc)
+ # pop None created by calling preprocessing step off stack
+ pickler.write(POP)
+
+ logger.trace(pickler, "# F1")
+ else:
+ logger.trace(pickler, "F2: %s", obj)
+ name = getattr(obj, '__qualname__', getattr(obj, '__name__', None))
+ StockPickler.save_global(pickler, obj, name=name)
+ logger.trace(pickler, "# F2")
+ return
+
+if HAS_CTYPES and hasattr(ctypes, 'pythonapi'):
+ _PyCapsule_New = ctypes.pythonapi.PyCapsule_New
+ _PyCapsule_New.argtypes = (ctypes.c_void_p, ctypes.c_char_p, ctypes.c_void_p)
+ _PyCapsule_New.restype = ctypes.py_object
+ _PyCapsule_GetPointer = ctypes.pythonapi.PyCapsule_GetPointer
+ _PyCapsule_GetPointer.argtypes = (ctypes.py_object, ctypes.c_char_p)
+ _PyCapsule_GetPointer.restype = ctypes.c_void_p
+ _PyCapsule_GetDestructor = ctypes.pythonapi.PyCapsule_GetDestructor
+ _PyCapsule_GetDestructor.argtypes = (ctypes.py_object,)
+ _PyCapsule_GetDestructor.restype = ctypes.c_void_p
+ _PyCapsule_GetContext = ctypes.pythonapi.PyCapsule_GetContext
+ _PyCapsule_GetContext.argtypes = (ctypes.py_object,)
+ _PyCapsule_GetContext.restype = ctypes.c_void_p
+ _PyCapsule_GetName = ctypes.pythonapi.PyCapsule_GetName
+ _PyCapsule_GetName.argtypes = (ctypes.py_object,)
+ _PyCapsule_GetName.restype = ctypes.c_char_p
+ _PyCapsule_IsValid = ctypes.pythonapi.PyCapsule_IsValid
+ _PyCapsule_IsValid.argtypes = (ctypes.py_object, ctypes.c_char_p)
+ _PyCapsule_IsValid.restype = ctypes.c_bool
+ _PyCapsule_SetContext = ctypes.pythonapi.PyCapsule_SetContext
+ _PyCapsule_SetContext.argtypes = (ctypes.py_object, ctypes.c_void_p)
+ _PyCapsule_SetDestructor = ctypes.pythonapi.PyCapsule_SetDestructor
+ _PyCapsule_SetDestructor.argtypes = (ctypes.py_object, ctypes.c_void_p)
+ _PyCapsule_SetName = ctypes.pythonapi.PyCapsule_SetName
+ _PyCapsule_SetName.argtypes = (ctypes.py_object, ctypes.c_char_p)
+ _PyCapsule_SetPointer = ctypes.pythonapi.PyCapsule_SetPointer
+ _PyCapsule_SetPointer.argtypes = (ctypes.py_object, ctypes.c_void_p)
+ #from _socket import CAPI as _testcapsule
+ _testcapsule_name = b'dill._dill._testcapsule'
+ _testcapsule = _PyCapsule_New(
+ ctypes.cast(_PyCapsule_New, ctypes.c_void_p),
+ ctypes.c_char_p(_testcapsule_name),
+ None
+ )
+ PyCapsuleType = type(_testcapsule)
+ @register(PyCapsuleType)
+ def save_capsule(pickler, obj):
+ logger.trace(pickler, "Cap: %s", obj)
+ name = _PyCapsule_GetName(obj)
+ #warnings.warn('Pickling a PyCapsule (%s) does not pickle any C data structures and could cause segmentation faults or other memory errors when unpickling.' % (name,), PicklingWarning)
+ pointer = _PyCapsule_GetPointer(obj, name)
+ context = _PyCapsule_GetContext(obj)
+ destructor = _PyCapsule_GetDestructor(obj)
+ pickler.save_reduce(_create_capsule, (pointer, name, context, destructor), obj=obj)
+ logger.trace(pickler, "# Cap")
+ _incedental_reverse_typemap['PyCapsuleType'] = PyCapsuleType
+ _reverse_typemap['PyCapsuleType'] = PyCapsuleType
+ _incedental_types.add(PyCapsuleType)
+else:
+ _testcapsule = None
+
+
+#############################
+# A quick fix for issue #500
+# This should be removed when a better solution is found.
+
+if hasattr(dataclasses, "_HAS_DEFAULT_FACTORY_CLASS"):
+ @register(dataclasses._HAS_DEFAULT_FACTORY_CLASS)
+ def save_dataclasses_HAS_DEFAULT_FACTORY_CLASS(pickler, obj):
+ logger.trace(pickler, "DcHDF: %s", obj)
+ pickler.write(GLOBAL + b"dataclasses\n_HAS_DEFAULT_FACTORY\n")
+ logger.trace(pickler, "# DcHDF")
+
+if hasattr(dataclasses, "MISSING"):
+ @register(type(dataclasses.MISSING))
+ def save_dataclasses_MISSING_TYPE(pickler, obj):
+ logger.trace(pickler, "DcM: %s", obj)
+ pickler.write(GLOBAL + b"dataclasses\nMISSING\n")
+ logger.trace(pickler, "# DcM")
+
+if hasattr(dataclasses, "KW_ONLY"):
+ @register(type(dataclasses.KW_ONLY))
+ def save_dataclasses_KW_ONLY_TYPE(pickler, obj):
+ logger.trace(pickler, "DcKWO: %s", obj)
+ pickler.write(GLOBAL + b"dataclasses\nKW_ONLY\n")
+ logger.trace(pickler, "# DcKWO")
+
+if hasattr(dataclasses, "_FIELD_BASE"):
+ @register(dataclasses._FIELD_BASE)
+ def save_dataclasses_FIELD_BASE(pickler, obj):
+ logger.trace(pickler, "DcFB: %s", obj)
+ pickler.write(GLOBAL + b"dataclasses\n" + obj.name.encode() + b"\n")
+ logger.trace(pickler, "# DcFB")
+
+#############################
+
+# quick sanity checking
+def pickles(obj,exact=False,safe=False,**kwds):
+ """
+ Quick check if object pickles with dill.
+
+ If *exact=True* then an equality test is done to check if the reconstructed
+ object matches the original object.
+
+ If *safe=True* then any exception will raised in copy signal that the
+ object is not picklable, otherwise only pickling errors will be trapped.
+
+ Additional keyword arguments are as :func:`dumps` and :func:`loads`.
+ """
+ if safe: exceptions = (Exception,) # RuntimeError, ValueError
+ else:
+ exceptions = (TypeError, AssertionError, NotImplementedError, PicklingError, UnpicklingError)
+ try:
+ pik = copy(obj, **kwds)
+ #FIXME: should check types match first, then check content if "exact"
+ try:
+ #FIXME: should be "(pik == obj).all()" for numpy comparison, though that'll fail if shapes differ
+ result = bool(pik.all() == obj.all())
+ except (AttributeError, TypeError):
+ warnings.filterwarnings('ignore') #FIXME: be specific
+ result = pik == obj
+ if warnings.filters: del warnings.filters[0]
+ if hasattr(result, 'toarray'): # for unusual types like sparse matrix
+ result = result.toarray().all()
+ if result: return True
+ if not exact:
+ result = type(pik) == type(obj)
+ if result: return result
+ # class instances might have been dumped with byref=False
+ return repr(type(pik)) == repr(type(obj)) #XXX: InstanceType?
+ return False
+ except exceptions:
+ return False
+
+def check(obj, *args, **kwds):
+ """
+ Check pickling of an object across another process.
+
+ *python* is the path to the python interpreter (defaults to sys.executable)
+
+ Set *verbose=True* to print the unpickled object in the other process.
+
+ Additional keyword arguments are as :func:`dumps` and :func:`loads`.
+ """
+ # == undocumented ==
+ # python -- the string path or executable name of the selected python
+ # verbose -- if True, be verbose about printing warning messages
+ # all other args and kwds are passed to dill.dumps #FIXME: ignore on load
+ verbose = kwds.pop('verbose', False)
+ python = kwds.pop('python', None)
+ if python is None:
+ import sys
+ python = sys.executable
+ # type check
+ isinstance(python, str)
+ import subprocess
+ fail = True
+ try:
+ _obj = dumps(obj, *args, **kwds)
+ fail = False
+ finally:
+ if fail and verbose:
+ print("DUMP FAILED")
+ #FIXME: fails if python interpreter path contains spaces
+ # Use the following instead (which also processes the 'ignore' keyword):
+ # ignore = kwds.pop('ignore', None)
+ # unpickle = "dill.loads(%s, ignore=%s)"%(repr(_obj), repr(ignore))
+ # cmd = [python, "-c", "import dill; print(%s)"%unpickle]
+ # msg = "SUCCESS" if not subprocess.call(cmd) else "LOAD FAILED"
+ msg = "%s -c import dill; print(dill.loads(%s))" % (python, repr(_obj))
+ msg = "SUCCESS" if not subprocess.call(msg.split(None,2)) else "LOAD FAILED"
+ if verbose:
+ print(msg)
+ return
+
+# use to protect against missing attributes
+def is_dill(pickler, child=None):
+ "check the dill-ness of your pickler"
+ if child is False or not hasattr(pickler.__class__, 'mro'):
+ return 'dill' in pickler.__module__
+ return Pickler in pickler.__class__.mro()
+
+def _extend():
+ """extend pickle with all of dill's registered types"""
+ # need to have pickle not choke on _main_module? use is_dill(pickler)
+ for t,func in Pickler.dispatch.items():
+ try:
+ StockPickler.dispatch[t] = func
+ except Exception: #TypeError, PicklingError, UnpicklingError
+ logger.trace(pickler, "skip: %s", t)
+ return
+
+del diff, _use_diff, use_diff
+
+# EOF
diff --git a/llmeval-env/lib/python3.10/site-packages/dill/_objects.py b/llmeval-env/lib/python3.10/site-packages/dill/_objects.py
new file mode 100644
index 0000000000000000000000000000000000000000..b2c02d3bbcf025144c9296c5d3ef30d5318a1870
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/dill/_objects.py
@@ -0,0 +1,537 @@
+#!/usr/bin/env python
+#
+# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
+# Copyright (c) 2008-2016 California Institute of Technology.
+# Copyright (c) 2016-2024 The Uncertainty Quantification Foundation.
+# License: 3-clause BSD. The full license text is available at:
+# - https://github.com/uqfoundation/dill/blob/master/LICENSE
+"""
+all Python Standard Library objects (currently: CH 1-15 @ 2.7)
+and some other common objects (i.e. numpy.ndarray)
+"""
+
+__all__ = ['registered','failures','succeeds']
+
+# helper imports
+import warnings; warnings.filterwarnings("ignore", category=DeprecationWarning)
+import sys
+import queue as Queue
+import dbm as anydbm
+from io import BytesIO as StringIO
+import re
+import array
+import collections
+import codecs
+import struct
+import dataclasses
+import datetime
+import calendar
+import weakref
+import pprint
+import decimal
+import numbers
+import functools
+import itertools
+import operator
+import tempfile
+import shelve
+import zlib
+import gzip
+import zipfile
+import tarfile
+import csv
+import hashlib
+import hmac
+import os
+import logging
+import logging.handlers
+import optparse
+#import __hello__
+import threading
+import socket
+import contextlib
+try:
+ import bz2
+ import sqlite3
+ import dbm.ndbm as dbm
+ HAS_ALL = True
+except ImportError: # Ubuntu
+ HAS_ALL = False
+try:
+ #import curses
+ #from curses import textpad, panel
+ HAS_CURSES = True
+except ImportError: # Windows
+ HAS_CURSES = False
+try:
+ import ctypes
+ HAS_CTYPES = True
+ # if using `pypy`, pythonapi is not found
+ IS_PYPY = not hasattr(ctypes, 'pythonapi')
+except ImportError: # MacPorts
+ HAS_CTYPES = False
+ IS_PYPY = False
+
+# helper objects
+class _class:
+ def _method(self):
+ pass
+# @classmethod
+# def _clsmethod(cls): #XXX: test me
+# pass
+# @staticmethod
+# def _static(self): #XXX: test me
+# pass
+class _class2:
+ def __call__(self):
+ pass
+_instance2 = _class2()
+class _newclass(object):
+ def _method(self):
+ pass
+# @classmethod
+# def _clsmethod(cls): #XXX: test me
+# pass
+# @staticmethod
+# def _static(self): #XXX: test me
+# pass
+class _newclass2(object):
+ __slots__ = ['descriptor']
+def _function(x): yield x
+def _function2():
+ try: raise
+ except Exception:
+ from sys import exc_info
+ e, er, tb = exc_info()
+ return er, tb
+if HAS_CTYPES:
+ class _Struct(ctypes.Structure):
+ pass
+ _Struct._fields_ = [("_field", ctypes.c_int),("next", ctypes.POINTER(_Struct))]
+_filedescrip, _tempfile = tempfile.mkstemp('r') # deleted in cleanup
+if sys.hexversion < 0x30d00a1:
+ _tmpf = tempfile.TemporaryFile('w') # emits OSError 9 in python 3.13
+else:
+ _tmpf = tempfile.NamedTemporaryFile('w').file # for > python 3.9
+
+# objects used by dill for type declaration
+registered = d = {}
+# objects dill fails to pickle
+failures = x = {}
+# all other type objects
+succeeds = a = {}
+
+# types module (part of CH 8)
+a['BooleanType'] = bool(1)
+a['BuiltinFunctionType'] = len
+a['BuiltinMethodType'] = a['BuiltinFunctionType']
+a['BytesType'] = _bytes = codecs.latin_1_encode('\x00')[0] # bytes(1)
+a['ClassType'] = _class
+a['ComplexType'] = complex(1)
+a['DictType'] = _dict = {}
+a['DictionaryType'] = a['DictType']
+a['FloatType'] = float(1)
+a['FunctionType'] = _function
+a['InstanceType'] = _instance = _class()
+a['IntType'] = _int = int(1)
+a['ListType'] = _list = []
+a['NoneType'] = None
+a['ObjectType'] = object()
+a['StringType'] = _str = str(1)
+a['TupleType'] = _tuple = ()
+a['TypeType'] = type
+a['LongType'] = _int
+a['UnicodeType'] = _str
+# built-in constants (CH 4)
+a['CopyrightType'] = copyright
+# built-in types (CH 5)
+a['ClassObjectType'] = _newclass #
+a['ClassInstanceType'] = _newclass() #
+a['SetType'] = _set = set()
+a['FrozenSetType'] = frozenset()
+# built-in exceptions (CH 6)
+a['ExceptionType'] = _exception = _function2()[0]
+# string services (CH 7)
+a['SREPatternType'] = _srepattern = re.compile('')
+# data types (CH 8)
+a['ArrayType'] = array.array("f")
+a['DequeType'] = collections.deque([0])
+a['DefaultDictType'] = collections.defaultdict(_function, _dict)
+a['TZInfoType'] = datetime.tzinfo()
+a['DateTimeType'] = datetime.datetime.today()
+a['CalendarType'] = calendar.Calendar()
+# numeric and mathematical types (CH 9)
+a['DecimalType'] = decimal.Decimal(1)
+a['CountType'] = itertools.count(0)
+# data compression and archiving (CH 12)
+a['TarInfoType'] = tarfile.TarInfo()
+# generic operating system services (CH 15)
+a['LoggerType'] = _logger = logging.getLogger()
+a['FormatterType'] = logging.Formatter() # pickle ok
+a['FilterType'] = logging.Filter() # pickle ok
+a['LogRecordType'] = logging.makeLogRecord(_dict) # pickle ok
+a['OptionParserType'] = _oparser = optparse.OptionParser() # pickle ok
+a['OptionGroupType'] = optparse.OptionGroup(_oparser,"foo") # pickle ok
+a['OptionType'] = optparse.Option('--foo') # pickle ok
+if HAS_CTYPES:
+ z = x if IS_PYPY else a
+ z['CCharType'] = _cchar = ctypes.c_char()
+ z['CWCharType'] = ctypes.c_wchar() # fail == 2.6
+ z['CByteType'] = ctypes.c_byte()
+ z['CUByteType'] = ctypes.c_ubyte()
+ z['CShortType'] = ctypes.c_short()
+ z['CUShortType'] = ctypes.c_ushort()
+ z['CIntType'] = ctypes.c_int()
+ z['CUIntType'] = ctypes.c_uint()
+ z['CLongType'] = ctypes.c_long()
+ z['CULongType'] = ctypes.c_ulong()
+ z['CLongLongType'] = ctypes.c_longlong()
+ z['CULongLongType'] = ctypes.c_ulonglong()
+ z['CFloatType'] = ctypes.c_float()
+ z['CDoubleType'] = ctypes.c_double()
+ z['CSizeTType'] = ctypes.c_size_t()
+ del z
+ a['CLibraryLoaderType'] = ctypes.cdll
+ a['StructureType'] = _Struct
+ # if not IS_PYPY:
+ # a['BigEndianStructureType'] = ctypes.BigEndianStructure()
+#NOTE: also LittleEndianStructureType and UnionType... abstract classes
+#NOTE: remember for ctypesobj.contents creates a new python object
+#NOTE: ctypes.c_int._objects is memberdescriptor for object's __dict__
+#NOTE: base class of all ctypes data types is non-public _CData
+
+import fractions
+import io
+from io import StringIO as TextIO
+# built-in functions (CH 2)
+a['ByteArrayType'] = bytearray([1])
+# numeric and mathematical types (CH 9)
+a['FractionType'] = fractions.Fraction()
+a['NumberType'] = numbers.Number()
+# generic operating system services (CH 15)
+a['IOBaseType'] = io.IOBase()
+a['RawIOBaseType'] = io.RawIOBase()
+a['TextIOBaseType'] = io.TextIOBase()
+a['BufferedIOBaseType'] = io.BufferedIOBase()
+a['UnicodeIOType'] = TextIO() # the new StringIO
+a['LoggerAdapterType'] = logging.LoggerAdapter(_logger,_dict) # pickle ok
+if HAS_CTYPES:
+ z = x if IS_PYPY else a
+ z['CBoolType'] = ctypes.c_bool(1)
+ z['CLongDoubleType'] = ctypes.c_longdouble()
+ del z
+import argparse
+# data types (CH 8)
+a['OrderedDictType'] = collections.OrderedDict(_dict)
+a['CounterType'] = collections.Counter(_dict)
+if HAS_CTYPES:
+ z = x if IS_PYPY else a
+ z['CSSizeTType'] = ctypes.c_ssize_t()
+ del z
+# generic operating system services (CH 15)
+a['NullHandlerType'] = logging.NullHandler() # pickle ok # new 2.7
+a['ArgParseFileType'] = argparse.FileType() # pickle ok
+
+# -- pickle fails on all below here -----------------------------------------
+# types module (part of CH 8)
+a['CodeType'] = compile('','','exec')
+a['DictProxyType'] = type.__dict__
+a['DictProxyType2'] = _newclass.__dict__
+a['EllipsisType'] = Ellipsis
+a['ClosedFileType'] = open(os.devnull, 'wb', buffering=0).close()
+a['GetSetDescriptorType'] = array.array.typecode
+a['LambdaType'] = _lambda = lambda x: lambda y: x #XXX: works when not imported!
+a['MemberDescriptorType'] = _newclass2.descriptor
+if not IS_PYPY:
+ a['MemberDescriptorType2'] = datetime.timedelta.days
+a['MethodType'] = _method = _class()._method #XXX: works when not imported!
+a['ModuleType'] = datetime
+a['NotImplementedType'] = NotImplemented
+a['SliceType'] = slice(1)
+a['UnboundMethodType'] = _class._method #XXX: works when not imported!
+d['TextWrapperType'] = open(os.devnull, 'r') # same as mode='w','w+','r+'
+d['BufferedRandomType'] = open(os.devnull, 'r+b') # same as mode='w+b'
+d['BufferedReaderType'] = open(os.devnull, 'rb') # (default: buffering=-1)
+d['BufferedWriterType'] = open(os.devnull, 'wb')
+try: # oddities: deprecated
+ from _pyio import open as _open
+ d['PyTextWrapperType'] = _open(os.devnull, 'r', buffering=-1)
+ d['PyBufferedRandomType'] = _open(os.devnull, 'r+b', buffering=-1)
+ d['PyBufferedReaderType'] = _open(os.devnull, 'rb', buffering=-1)
+ d['PyBufferedWriterType'] = _open(os.devnull, 'wb', buffering=-1)
+except ImportError:
+ pass
+# other (concrete) object types
+z = d if sys.hexversion < 0x30800a2 else a
+z['CellType'] = (_lambda)(0).__closure__[0]
+del z
+a['XRangeType'] = _xrange = range(1)
+a['MethodDescriptorType'] = type.__dict__['mro']
+a['WrapperDescriptorType'] = type.__repr__
+#a['WrapperDescriptorType2'] = type.__dict__['__module__']#XXX: GetSetDescriptor
+a['ClassMethodDescriptorType'] = type.__dict__['__prepare__']
+# built-in functions (CH 2)
+_methodwrap = (1).__lt__
+a['MethodWrapperType'] = _methodwrap
+a['StaticMethodType'] = staticmethod(_method)
+a['ClassMethodType'] = classmethod(_method)
+a['PropertyType'] = property()
+d['SuperType'] = super(Exception, _exception)
+# string services (CH 7)
+_in = _bytes
+a['InputType'] = _cstrI = StringIO(_in)
+a['OutputType'] = _cstrO = StringIO()
+# data types (CH 8)
+a['WeakKeyDictionaryType'] = weakref.WeakKeyDictionary()
+a['WeakValueDictionaryType'] = weakref.WeakValueDictionary()
+a['ReferenceType'] = weakref.ref(_instance)
+a['DeadReferenceType'] = weakref.ref(_class())
+a['ProxyType'] = weakref.proxy(_instance)
+a['DeadProxyType'] = weakref.proxy(_class())
+a['CallableProxyType'] = weakref.proxy(_instance2)
+a['DeadCallableProxyType'] = weakref.proxy(_class2())
+a['QueueType'] = Queue.Queue()
+# numeric and mathematical types (CH 9)
+d['PartialType'] = functools.partial(int,base=2)
+a['IzipType'] = zip('0','1')
+a['ChainType'] = itertools.chain('0','1')
+d['ItemGetterType'] = operator.itemgetter(0)
+d['AttrGetterType'] = operator.attrgetter('__repr__')
+# file and directory access (CH 10)
+_fileW = _cstrO
+# data persistence (CH 11)
+if HAS_ALL:
+ x['ConnectionType'] = _conn = sqlite3.connect(':memory:')
+ x['CursorType'] = _conn.cursor()
+a['ShelveType'] = shelve.Shelf({})
+# data compression and archiving (CH 12)
+if HAS_ALL:
+ x['BZ2FileType'] = bz2.BZ2File(os.devnull)
+ x['BZ2CompressorType'] = bz2.BZ2Compressor()
+ x['BZ2DecompressorType'] = bz2.BZ2Decompressor()
+#x['ZipFileType'] = _zip = zipfile.ZipFile(os.devnull,'w')
+#_zip.write(_tempfile,'x') [causes annoying warning/error printed on import]
+#a['ZipInfoType'] = _zip.getinfo('x')
+a['TarFileType'] = tarfile.open(fileobj=_fileW,mode='w')
+# file formats (CH 13)
+x['DialectType'] = csv.get_dialect('excel')
+if sys.hexversion < 0x30d00a1:
+ import xdrlib
+ a['PackerType'] = xdrlib.Packer()
+# optional operating system services (CH 16)
+a['LockType'] = threading.Lock()
+a['RLockType'] = threading.RLock()
+# generic operating system services (CH 15) # also closed/open and r/w/etc...
+a['NamedLoggerType'] = _logger = logging.getLogger(__name__)
+#a['FrozenModuleType'] = __hello__ #FIXME: prints "Hello world..."
+# interprocess communication (CH 17)
+x['SocketType'] = _socket = socket.socket()
+x['SocketPairType'] = socket.socketpair()[0]
+# python runtime services (CH 27)
+a['GeneratorContextManagerType'] = contextlib.contextmanager(max)([1])
+
+try: # ipython
+ __IPYTHON__ is True # is ipython
+except NameError:
+ # built-in constants (CH 4)
+ a['QuitterType'] = quit
+ d['ExitType'] = a['QuitterType']
+try: # numpy #FIXME: slow... 0.05 to 0.1 sec to import numpy
+ from numpy import ufunc as _numpy_ufunc
+ from numpy import array as _numpy_array
+ from numpy import int32 as _numpy_int32
+ a['NumpyUfuncType'] = _numpy_ufunc
+ a['NumpyArrayType'] = _numpy_array
+ a['NumpyInt32Type'] = _numpy_int32
+except ImportError:
+ pass
+# numeric and mathematical types (CH 9)
+a['ProductType'] = itertools.product('0','1')
+# generic operating system services (CH 15)
+a['FileHandlerType'] = logging.FileHandler(os.devnull)
+a['RotatingFileHandlerType'] = logging.handlers.RotatingFileHandler(os.devnull)
+a['SocketHandlerType'] = logging.handlers.SocketHandler('localhost',514)
+a['MemoryHandlerType'] = logging.handlers.MemoryHandler(1)
+# data types (CH 8)
+a['WeakSetType'] = weakref.WeakSet() # 2.7
+# generic operating system services (CH 15) [errors when dill is imported]
+#a['ArgumentParserType'] = _parser = argparse.ArgumentParser('PROG')
+#a['NamespaceType'] = _parser.parse_args() # pickle ok
+#a['SubParsersActionType'] = _parser.add_subparsers()
+#a['MutuallyExclusiveGroupType'] = _parser.add_mutually_exclusive_group()
+#a['ArgumentGroupType'] = _parser.add_argument_group()
+
+# -- dill fails in some versions below here ---------------------------------
+# types module (part of CH 8)
+d['FileType'] = open(os.devnull, 'rb', buffering=0) # same 'wb','wb+','rb+'
+# built-in functions (CH 2)
+# Iterators:
+a['ListIteratorType'] = iter(_list) # empty vs non-empty
+a['SetIteratorType'] = iter(_set) #XXX: empty vs non-empty #FIXME: list_iterator
+a['TupleIteratorType']= iter(_tuple) # empty vs non-empty
+a['XRangeIteratorType'] = iter(_xrange) # empty vs non-empty
+a["BytesIteratorType"] = iter(b'')
+a["BytearrayIteratorType"] = iter(bytearray(b''))
+z = x if IS_PYPY else a
+z["CallableIteratorType"] = iter(iter, None)
+del z
+x["MemoryIteratorType"] = iter(memoryview(b''))
+a["ListReverseiteratorType"] = reversed([])
+X = a['OrderedDictType']
+d["OdictKeysType"] = X.keys()
+d["OdictValuesType"] = X.values()
+d["OdictItemsType"] = X.items()
+a["OdictIteratorType"] = iter(X.keys()) #FIXME: list_iterator
+del X
+#FIXME: list_iterator
+a['DictionaryItemIteratorType'] = iter(type.__dict__.items())
+a['DictionaryKeyIteratorType'] = iter(type.__dict__.keys())
+a['DictionaryValueIteratorType'] = iter(type.__dict__.values())
+if sys.hexversion >= 0x30800a0:
+ a["DictReversekeyiteratorType"] = reversed({}.keys())
+ a["DictReversevalueiteratorType"] = reversed({}.values())
+ a["DictReverseitemiteratorType"] = reversed({}.items())
+
+try:
+ import symtable
+ #FIXME: fails to pickle
+ x["SymtableEntryType"] = symtable.symtable("", "string", "exec")._table
+except ImportError:
+ pass
+
+if sys.hexversion >= 0x30a00a0 and not IS_PYPY:
+ x['LineIteratorType'] = compile('3', '', 'eval').co_lines()
+
+if sys.hexversion >= 0x30b00b0:
+ from types import GenericAlias
+ d["GenericAliasIteratorType"] = iter(GenericAlias(list, (int,)))
+ x['PositionsIteratorType'] = compile('3', '', 'eval').co_positions()
+
+# data types (CH 8)
+a['PrettyPrinterType'] = pprint.PrettyPrinter()
+# numeric and mathematical types (CH 9)
+a['CycleType'] = itertools.cycle('0')
+# file and directory access (CH 10)
+a['TemporaryFileType'] = _tmpf
+# data compression and archiving (CH 12)
+x['GzipFileType'] = gzip.GzipFile(fileobj=_fileW)
+# generic operating system services (CH 15)
+a['StreamHandlerType'] = logging.StreamHandler()
+# numeric and mathematical types (CH 9)
+a['PermutationsType'] = itertools.permutations('0')
+a['CombinationsType'] = itertools.combinations('0',1)
+a['RepeatType'] = itertools.repeat(0)
+a['CompressType'] = itertools.compress('0',[1])
+#XXX: ...and etc
+
+# -- dill fails on all below here -------------------------------------------
+# types module (part of CH 8)
+x['GeneratorType'] = _generator = _function(1) #XXX: priority
+x['FrameType'] = _generator.gi_frame #XXX: inspect.currentframe()
+x['TracebackType'] = _function2()[1] #(see: inspect.getouterframes,getframeinfo)
+# other (concrete) object types
+# (also: Capsule / CObject ?)
+# built-in functions (CH 2)
+# built-in types (CH 5)
+# string services (CH 7)
+x['StructType'] = struct.Struct('c')
+x['CallableIteratorType'] = _srepattern.finditer('')
+x['SREMatchType'] = _srepattern.match('')
+x['SREScannerType'] = _srepattern.scanner('')
+x['StreamReader'] = codecs.StreamReader(_cstrI) #XXX: ... and etc
+# python object persistence (CH 11)
+# x['DbShelveType'] = shelve.open('foo','n')#,protocol=2) #XXX: delete foo
+if HAS_ALL:
+ z = a if IS_PYPY else x
+ z['DbmType'] = dbm.open(_tempfile,'n')
+ del z
+# x['DbCursorType'] = _dbcursor = anydbm.open('foo','n') #XXX: delete foo
+# x['DbType'] = _dbcursor.db
+# data compression and archiving (CH 12)
+x['ZlibCompressType'] = zlib.compressobj()
+x['ZlibDecompressType'] = zlib.decompressobj()
+# file formats (CH 13)
+x['CSVReaderType'] = csv.reader(_cstrI)
+x['CSVWriterType'] = csv.writer(_cstrO)
+x['CSVDictReaderType'] = csv.DictReader(_cstrI)
+x['CSVDictWriterType'] = csv.DictWriter(_cstrO,{})
+# cryptographic services (CH 14)
+x['HashType'] = hashlib.md5()
+if (sys.hexversion < 0x30800a1):
+ x['HMACType'] = hmac.new(_in)
+else:
+ x['HMACType'] = hmac.new(_in, digestmod='md5')
+# generic operating system services (CH 15)
+if HAS_CURSES: pass
+ #x['CursesWindowType'] = _curwin = curses.initscr() #FIXME: messes up tty
+ #x['CursesTextPadType'] = textpad.Textbox(_curwin)
+ #x['CursesPanelType'] = panel.new_panel(_curwin)
+if HAS_CTYPES:
+ x['CCharPType'] = ctypes.c_char_p()
+ x['CWCharPType'] = ctypes.c_wchar_p()
+ x['CVoidPType'] = ctypes.c_void_p()
+ if sys.platform[:3] == 'win':
+ x['CDLLType'] = _cdll = ctypes.cdll.msvcrt
+ else:
+ x['CDLLType'] = _cdll = ctypes.CDLL(None)
+ if not IS_PYPY:
+ x['PyDLLType'] = _pydll = ctypes.pythonapi
+ x['FuncPtrType'] = _cdll._FuncPtr()
+ x['CCharArrayType'] = ctypes.create_string_buffer(1)
+ x['CWCharArrayType'] = ctypes.create_unicode_buffer(1)
+ x['CParamType'] = ctypes.byref(_cchar)
+ x['LPCCharType'] = ctypes.pointer(_cchar)
+ x['LPCCharObjType'] = _lpchar = ctypes.POINTER(ctypes.c_char)
+ x['NullPtrType'] = _lpchar()
+ x['NullPyObjectType'] = ctypes.py_object()
+ x['PyObjectType'] = ctypes.py_object(lambda :None)
+ z = a if IS_PYPY else x
+ z['FieldType'] = _field = _Struct._field
+ z['CFUNCTYPEType'] = _cfunc = ctypes.CFUNCTYPE(ctypes.c_char)
+ if sys.hexversion < 0x30c00b3:
+ x['CFunctionType'] = _cfunc(str)
+ del z
+# numeric and mathematical types (CH 9)
+a['MethodCallerType'] = operator.methodcaller('mro') # 2.6
+# built-in types (CH 5)
+x['MemoryType'] = memoryview(_in) # 2.7
+x['MemoryType2'] = memoryview(bytearray(_in)) # 2.7
+d['DictItemsType'] = _dict.items() # 2.7
+d['DictKeysType'] = _dict.keys() # 2.7
+d['DictValuesType'] = _dict.values() # 2.7
+# generic operating system services (CH 15)
+a['RawTextHelpFormatterType'] = argparse.RawTextHelpFormatter('PROG')
+a['RawDescriptionHelpFormatterType'] = argparse.RawDescriptionHelpFormatter('PROG')
+a['ArgDefaultsHelpFormatterType'] = argparse.ArgumentDefaultsHelpFormatter('PROG')
+z = a if IS_PYPY else x
+z['CmpKeyType'] = _cmpkey = functools.cmp_to_key(_methodwrap) # 2.7, >=3.2
+z['CmpKeyObjType'] = _cmpkey('0') #2.7, >=3.2
+del z
+# oddities: removed, etc
+x['BufferType'] = x['MemoryType']
+
+from dill._dill import _testcapsule
+if _testcapsule is not None:
+ d['PyCapsuleType'] = _testcapsule
+del _testcapsule
+
+if hasattr(dataclasses, '_HAS_DEFAULT_FACTORY'):
+ a['DataclassesHasDefaultFactoryType'] = dataclasses._HAS_DEFAULT_FACTORY
+
+if hasattr(dataclasses, 'MISSING'):
+ a['DataclassesMissingType'] = dataclasses.MISSING
+
+if hasattr(dataclasses, 'KW_ONLY'):
+ a['DataclassesKWOnlyType'] = dataclasses.KW_ONLY
+
+if hasattr(dataclasses, '_FIELD_BASE'):
+ a['DataclassesFieldBaseType'] = dataclasses._FIELD
+
+# -- cleanup ----------------------------------------------------------------
+a.update(d) # registered also succeed
+if sys.platform[:3] == 'win':
+ os.close(_filedescrip) # required on win32
+os.remove(_tempfile)
+
+
+# EOF
diff --git a/llmeval-env/lib/python3.10/site-packages/dill/_shims.py b/llmeval-env/lib/python3.10/site-packages/dill/_shims.py
new file mode 100644
index 0000000000000000000000000000000000000000..da1abbecc834859271d5e096f041fe299e04e072
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/dill/_shims.py
@@ -0,0 +1,193 @@
+#!/usr/bin/env python
+#
+# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
+# Author: Anirudh Vegesana (avegesan@cs.stanford.edu)
+# Copyright (c) 2021-2024 The Uncertainty Quantification Foundation.
+# License: 3-clause BSD. The full license text is available at:
+# - https://github.com/uqfoundation/dill/blob/master/LICENSE
+"""
+Provides shims for compatibility between versions of dill and Python.
+
+Compatibility shims should be provided in this file. Here are two simple example
+use cases.
+
+Deprecation of constructor function:
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Assume that we were transitioning _import_module in _dill.py to
+the builtin function importlib.import_module when present.
+
+@move_to(_dill)
+def _import_module(import_name):
+ ... # code already in _dill.py
+
+_import_module = Getattr(importlib, 'import_module', Getattr(_dill, '_import_module', None))
+
+The code will attempt to find import_module in the importlib module. If not
+present, it will use the _import_module function in _dill.
+
+Emulate new Python behavior in older Python versions:
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+CellType.cell_contents behaves differently in Python 3.6 and 3.7. It is
+read-only in Python 3.6 and writable and deletable in 3.7.
+
+if _dill.OLD37 and _dill.HAS_CTYPES and ...:
+ @move_to(_dill)
+ def _setattr(object, name, value):
+ if type(object) is _dill.CellType and name == 'cell_contents':
+ _PyCell_Set.argtypes = (ctypes.py_object, ctypes.py_object)
+ _PyCell_Set(object, value)
+ else:
+ setattr(object, name, value)
+... # more cases below
+
+_setattr = Getattr(_dill, '_setattr', setattr)
+
+_dill._setattr will be used when present to emulate Python 3.7 functionality in
+older versions of Python while defaulting to the standard setattr in 3.7+.
+
+See this PR for the discussion that lead to this system:
+https://github.com/uqfoundation/dill/pull/443
+"""
+
+import inspect
+import sys
+
+_dill = sys.modules['dill._dill']
+
+
+class Reduce(object):
+ """
+ Reduce objects are wrappers used for compatibility enforcement during
+ unpickle-time. They should only be used in calls to pickler.save and
+ other Reduce objects. They are only evaluated within unpickler.load.
+
+ Pickling a Reduce object makes the two implementations equivalent:
+
+ pickler.save(Reduce(*reduction))
+
+ pickler.save_reduce(*reduction, obj=reduction)
+ """
+ __slots__ = ['reduction']
+ def __new__(cls, *reduction, **kwargs):
+ """
+ Args:
+ *reduction: a tuple that matches the format given here:
+ https://docs.python.org/3/library/pickle.html#object.__reduce__
+ is_callable: a bool to indicate that the object created by
+ unpickling `reduction` is callable. If true, the current Reduce
+ is allowed to be used as the function in further save_reduce calls
+ or Reduce objects.
+ """
+ is_callable = kwargs.get('is_callable', False) # Pleases Py2. Can be removed later
+ if is_callable:
+ self = object.__new__(_CallableReduce)
+ else:
+ self = object.__new__(Reduce)
+ self.reduction = reduction
+ return self
+ def __repr__(self):
+ return 'Reduce%s' % (self.reduction,)
+ def __copy__(self):
+ return self # pragma: no cover
+ def __deepcopy__(self, memo):
+ return self # pragma: no cover
+ def __reduce__(self):
+ return self.reduction
+ def __reduce_ex__(self, protocol):
+ return self.__reduce__()
+
+class _CallableReduce(Reduce):
+ # A version of Reduce for functions. Used to trick pickler.save_reduce into
+ # thinking that Reduce objects of functions are themselves meaningful functions.
+ def __call__(self, *args, **kwargs):
+ reduction = self.__reduce__()
+ func = reduction[0]
+ f_args = reduction[1]
+ obj = func(*f_args)
+ return obj(*args, **kwargs)
+
+__NO_DEFAULT = _dill.Sentinel('Getattr.NO_DEFAULT')
+
+def Getattr(object, name, default=__NO_DEFAULT):
+ """
+ A Reduce object that represents the getattr operation. When unpickled, the
+ Getattr will access an attribute 'name' of 'object' and return the value
+ stored there. If the attribute doesn't exist, the default value will be
+ returned if present.
+
+ The following statements are equivalent:
+
+ Getattr(collections, 'OrderedDict')
+ Getattr(collections, 'spam', None)
+ Getattr(*args)
+
+ Reduce(getattr, (collections, 'OrderedDict'))
+ Reduce(getattr, (collections, 'spam', None))
+ Reduce(getattr, args)
+
+ During unpickling, the first two will result in collections.OrderedDict and
+ None respectively because the first attribute exists and the second one does
+ not, forcing it to use the default value given in the third argument.
+ """
+
+ if default is Getattr.NO_DEFAULT:
+ reduction = (getattr, (object, name))
+ else:
+ reduction = (getattr, (object, name, default))
+
+ return Reduce(*reduction, is_callable=callable(default))
+
+Getattr.NO_DEFAULT = __NO_DEFAULT
+del __NO_DEFAULT
+
+def move_to(module, name=None):
+ def decorator(func):
+ if name is None:
+ fname = func.__name__
+ else:
+ fname = name
+ module.__dict__[fname] = func
+ func.__module__ = module.__name__
+ return func
+ return decorator
+
+def register_shim(name, default):
+ """
+ A easier to understand and more compact way of "softly" defining a function.
+ These two pieces of code are equivalent:
+
+ if _dill.OLD3X:
+ def _create_class():
+ ...
+ _create_class = register_shim('_create_class', types.new_class)
+
+ if _dill.OLD3X:
+ @move_to(_dill)
+ def _create_class():
+ ...
+ _create_class = Getattr(_dill, '_create_class', types.new_class)
+
+ Intuitively, it creates a function or object in the versions of dill/python
+ that require special reimplementations, and use a core library or default
+ implementation if that function or object does not exist.
+ """
+ func = globals().get(name)
+ if func is not None:
+ _dill.__dict__[name] = func
+ func.__module__ = _dill.__name__
+
+ if default is Getattr.NO_DEFAULT:
+ reduction = (getattr, (_dill, name))
+ else:
+ reduction = (getattr, (_dill, name, default))
+
+ return Reduce(*reduction, is_callable=callable(default))
+
+######################
+## Compatibility Shims are defined below
+######################
+
+_CELL_EMPTY = register_shim('_CELL_EMPTY', None)
+
+_setattr = register_shim('_setattr', setattr)
+_delattr = register_shim('_delattr', delattr)
diff --git a/llmeval-env/lib/python3.10/site-packages/dill/detect.py b/llmeval-env/lib/python3.10/site-packages/dill/detect.py
new file mode 100644
index 0000000000000000000000000000000000000000..6f76e729d2469fa3f028a189314977c11397edd7
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/dill/detect.py
@@ -0,0 +1,284 @@
+#!/usr/bin/env python
+#
+# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
+# Copyright (c) 2008-2016 California Institute of Technology.
+# Copyright (c) 2016-2024 The Uncertainty Quantification Foundation.
+# License: 3-clause BSD. The full license text is available at:
+# - https://github.com/uqfoundation/dill/blob/master/LICENSE
+"""
+Methods for detecting objects leading to pickling failures.
+"""
+
+import dis
+from inspect import ismethod, isfunction, istraceback, isframe, iscode
+
+from .pointers import parent, reference, at, parents, children
+from .logger import trace
+
+__all__ = ['baditems','badobjects','badtypes','code','errors','freevars',
+ 'getmodule','globalvars','nestedcode','nestedglobals','outermost',
+ 'referredglobals','referrednested','trace','varnames']
+
+def getmodule(object, _filename=None, force=False):
+ """get the module of the object"""
+ from inspect import getmodule as getmod
+ module = getmod(object, _filename)
+ if module or not force: return module
+ import builtins
+ from .source import getname
+ name = getname(object, force=True)
+ return builtins if name in vars(builtins).keys() else None
+
+def outermost(func): # is analogous to getsource(func,enclosing=True)
+ """get outermost enclosing object (i.e. the outer function in a closure)
+
+ NOTE: this is the object-equivalent of getsource(func, enclosing=True)
+ """
+ if ismethod(func):
+ _globals = func.__func__.__globals__ or {}
+ elif isfunction(func):
+ _globals = func.__globals__ or {}
+ else:
+ return #XXX: or raise? no matches
+ _globals = _globals.items()
+ # get the enclosing source
+ from .source import getsourcelines
+ try: lines,lnum = getsourcelines(func, enclosing=True)
+ except Exception: #TypeError, IOError
+ lines,lnum = [],None
+ code = ''.join(lines)
+ # get all possible names,objects that are named in the enclosing source
+ _locals = ((name,obj) for (name,obj) in _globals if name in code)
+ # now only save the objects that generate the enclosing block
+ for name,obj in _locals: #XXX: don't really need 'name'
+ try:
+ if getsourcelines(obj) == (lines,lnum): return obj
+ except Exception: #TypeError, IOError
+ pass
+ return #XXX: or raise? no matches
+
+def nestedcode(func, recurse=True): #XXX: or return dict of {co_name: co} ?
+ """get the code objects for any nested functions (e.g. in a closure)"""
+ func = code(func)
+ if not iscode(func): return [] #XXX: or raise? no matches
+ nested = set()
+ for co in func.co_consts:
+ if co is None: continue
+ co = code(co)
+ if co:
+ nested.add(co)
+ if recurse: nested |= set(nestedcode(co, recurse=True))
+ return list(nested)
+
+def code(func):
+ """get the code object for the given function or method
+
+ NOTE: use dill.source.getsource(CODEOBJ) to get the source code
+ """
+ if ismethod(func): func = func.__func__
+ if isfunction(func): func = func.__code__
+ if istraceback(func): func = func.tb_frame
+ if isframe(func): func = func.f_code
+ if iscode(func): return func
+ return
+
+#XXX: ugly: parse dis.dis for name after " len(referrednested(func)), try calling func().
+ If possible, python builds code objects, but delays building functions
+ until func() is called.
+ """
+ import gc
+ funcs = set()
+ # get the code objects, and try to track down by referrence
+ for co in nestedcode(func, recurse):
+ # look for function objects that refer to the code object
+ for obj in gc.get_referrers(co):
+ # get methods
+ _ = getattr(obj, '__func__', None) # ismethod
+ if getattr(_, '__code__', None) is co: funcs.add(obj)
+ # get functions
+ elif getattr(obj, '__code__', None) is co: funcs.add(obj)
+ # get frame objects
+ elif getattr(obj, 'f_code', None) is co: funcs.add(obj)
+ # get code objects
+ elif hasattr(obj, 'co_code') and obj is co: funcs.add(obj)
+# frameobjs => func.__code__.co_varnames not in func.__code__.co_cellvars
+# funcobjs => func.__code__.co_cellvars not in func.__code__.co_varnames
+# frameobjs are not found, however funcobjs are...
+# (see: test_mixins.quad ... and test_mixins.wtf)
+# after execution, code objects get compiled, and then may be found by gc
+ return list(funcs)
+
+
+def freevars(func):
+ """get objects defined in enclosing code that are referred to by func
+
+ returns a dict of {name:object}"""
+ if ismethod(func): func = func.__func__
+ if isfunction(func):
+ closures = func.__closure__ or ()
+ func = func.__code__.co_freevars # get freevars
+ else:
+ return {}
+
+ def get_cell_contents():
+ for name, c in zip(func, closures):
+ try:
+ cell_contents = c.cell_contents
+ except ValueError: # cell is empty
+ continue
+ yield name, c.cell_contents
+
+ return dict(get_cell_contents())
+
+# thanks to Davies Liu for recursion of globals
+def nestedglobals(func, recurse=True):
+ """get the names of any globals found within func"""
+ func = code(func)
+ if func is None: return list()
+ import sys
+ from .temp import capture
+ CAN_NULL = sys.hexversion >= 0x30b00a7 # NULL may be prepended >= 3.11a7
+ names = set()
+ with capture('stdout') as out:
+ dis.dis(func) #XXX: dis.dis(None) disassembles last traceback
+ for line in out.getvalue().splitlines():
+ if '_GLOBAL' in line:
+ name = line.split('(')[-1].split(')')[0]
+ if CAN_NULL:
+ names.add(name.replace('NULL + ', '').replace(' + NULL', ''))
+ else:
+ names.add(name)
+ for co in getattr(func, 'co_consts', tuple()):
+ if co and recurse and iscode(co):
+ names.update(nestedglobals(co, recurse=True))
+ return list(names)
+
+def referredglobals(func, recurse=True, builtin=False):
+ """get the names of objects in the global scope referred to by func"""
+ return globalvars(func, recurse, builtin).keys()
+
+def globalvars(func, recurse=True, builtin=False):
+ """get objects defined in global scope that are referred to by func
+
+ return a dict of {name:object}"""
+ if ismethod(func): func = func.__func__
+ if isfunction(func):
+ globs = vars(getmodule(sum)).copy() if builtin else {}
+ # get references from within closure
+ orig_func, func = func, set()
+ for obj in orig_func.__closure__ or {}:
+ try:
+ cell_contents = obj.cell_contents
+ except ValueError: # cell is empty
+ pass
+ else:
+ _vars = globalvars(cell_contents, recurse, builtin) or {}
+ func.update(_vars) #XXX: (above) be wary of infinte recursion?
+ globs.update(_vars)
+ # get globals
+ globs.update(orig_func.__globals__ or {})
+ # get names of references
+ if not recurse:
+ func.update(orig_func.__code__.co_names)
+ else:
+ func.update(nestedglobals(orig_func.__code__))
+ # find globals for all entries of func
+ for key in func.copy(): #XXX: unnecessary...?
+ nested_func = globs.get(key)
+ if nested_func is orig_func:
+ #func.remove(key) if key in func else None
+ continue #XXX: globalvars(func, False)?
+ func.update(globalvars(nested_func, True, builtin))
+ elif iscode(func):
+ globs = vars(getmodule(sum)).copy() if builtin else {}
+ #globs.update(globals())
+ if not recurse:
+ func = func.co_names # get names
+ else:
+ orig_func = func.co_name # to stop infinite recursion
+ func = set(nestedglobals(func))
+ # find globals for all entries of func
+ for key in func.copy(): #XXX: unnecessary...?
+ if key is orig_func:
+ #func.remove(key) if key in func else None
+ continue #XXX: globalvars(func, False)?
+ nested_func = globs.get(key)
+ func.update(globalvars(nested_func, True, builtin))
+ else:
+ return {}
+ #NOTE: if name not in __globals__, then we skip it...
+ return dict((name,globs[name]) for name in func if name in globs)
+
+
+def varnames(func):
+ """get names of variables defined by func
+
+ returns a tuple (local vars, local vars referrenced by nested functions)"""
+ func = code(func)
+ if not iscode(func):
+ return () #XXX: better ((),())? or None?
+ return func.co_varnames, func.co_cellvars
+
+
+def baditems(obj, exact=False, safe=False): #XXX: obj=globals() ?
+ """get items in object that fail to pickle"""
+ if not hasattr(obj,'__iter__'): # is not iterable
+ return [j for j in (badobjects(obj,0,exact,safe),) if j is not None]
+ obj = obj.values() if getattr(obj,'values',None) else obj
+ _obj = [] # can't use a set, as items may be unhashable
+ [_obj.append(badobjects(i,0,exact,safe)) for i in obj if i not in _obj]
+ return [j for j in _obj if j is not None]
+
+
+def badobjects(obj, depth=0, exact=False, safe=False):
+ """get objects that fail to pickle"""
+ from dill import pickles
+ if not depth:
+ if pickles(obj,exact,safe): return None
+ return obj
+ return dict(((attr, badobjects(getattr(obj,attr),depth-1,exact,safe)) \
+ for attr in dir(obj) if not pickles(getattr(obj,attr),exact,safe)))
+
+def badtypes(obj, depth=0, exact=False, safe=False):
+ """get types for objects that fail to pickle"""
+ from dill import pickles
+ if not depth:
+ if pickles(obj,exact,safe): return None
+ return type(obj)
+ return dict(((attr, badtypes(getattr(obj,attr),depth-1,exact,safe)) \
+ for attr in dir(obj) if not pickles(getattr(obj,attr),exact,safe)))
+
+def errors(obj, depth=0, exact=False, safe=False):
+ """get errors for objects that fail to pickle"""
+ from dill import pickles, copy
+ if not depth:
+ try:
+ pik = copy(obj)
+ if exact:
+ assert pik == obj, \
+ "Unpickling produces %s instead of %s" % (pik,obj)
+ assert type(pik) == type(obj), \
+ "Unpickling produces %s instead of %s" % (type(pik),type(obj))
+ return None
+ except Exception:
+ import sys
+ return sys.exc_info()[1]
+ _dict = {}
+ for attr in dir(obj):
+ try:
+ _attr = getattr(obj,attr)
+ except Exception:
+ import sys
+ _dict[attr] = sys.exc_info()[1]
+ continue
+ if not pickles(_attr,exact,safe):
+ _dict[attr] = errors(_attr,depth-1,exact,safe)
+ return _dict
+
+
+# EOF
diff --git a/llmeval-env/lib/python3.10/site-packages/dill/logger.py b/llmeval-env/lib/python3.10/site-packages/dill/logger.py
new file mode 100644
index 0000000000000000000000000000000000000000..d975e8efb53efbea714616cf8ad49c3020c9d9d3
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/dill/logger.py
@@ -0,0 +1,285 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Author: Leonardo Gama (@leogama)
+# Copyright (c) 2022-2024 The Uncertainty Quantification Foundation.
+# License: 3-clause BSD. The full license text is available at:
+# - https://github.com/uqfoundation/dill/blob/master/LICENSE
+"""
+Logging utilities for dill.
+
+The 'logger' object is dill's top-level logger.
+
+The 'adapter' object wraps the logger and implements a 'trace()' method that
+generates a detailed tree-style trace for the pickling call at log level INFO.
+
+The 'trace()' function sets and resets dill's logger log level, enabling and
+disabling the pickling trace.
+
+The trace shows a tree structure depicting the depth of each object serialized
+*with dill save functions*, but not the ones that use save functions from
+'pickle._Pickler.dispatch'. If the information is available, it also displays
+the size in bytes that the object contributed to the pickle stream (including
+its child objects). Sample trace output:
+
+ >>> import dill, dill.tests
+ >>> dill.detect.trace(True)
+ >>> dill.dump_session(main=dill.tests)
+ ┬ M1:
+ ├┬ F2:
+ │└ # F2 [32 B]
+ ├┬ D2:
+ │├┬ T4:
+ ││└ # T4 [35 B]
+ │├┬ D2:
+ ││├┬ T4:
+ │││└ # T4 [50 B]
+ ││├┬ D2:
+ │││└ # D2 [84 B]
+ ││└ # D2 [413 B]
+ │└ # D2 [763 B]
+ └ # M1 [813 B]
+"""
+
+__all__ = ['adapter', 'logger', 'trace']
+
+import codecs
+import contextlib
+import locale
+import logging
+import math
+import os
+from functools import partial
+from typing import TextIO, Union
+
+import dill
+
+# Tree drawing characters: Unicode to ASCII map.
+ASCII_MAP = str.maketrans({"│": "|", "├": "|", "┬": "+", "└": "`"})
+
+## Notes about the design choices ##
+
+# Here is some domumentation of the Standard Library's logging internals that
+# can't be found completely in the official documentation. dill's logger is
+# obtained by calling logging.getLogger('dill') and therefore is an instance of
+# logging.getLoggerClass() at the call time. As this is controlled by the user,
+# in order to add some functionality to it it's necessary to use a LoggerAdapter
+# to wrap it, overriding some of the adapter's methods and creating new ones.
+#
+# Basic calling sequence
+# ======================
+#
+# Python's logging functionality can be conceptually divided into five steps:
+# 0. Check logging level -> abort if call level is greater than logger level
+# 1. Gather information -> construct a LogRecord from passed arguments and context
+# 2. Filter (optional) -> discard message if the record matches a filter
+# 3. Format -> format message with args, then format output string with message plus record
+# 4. Handle -> write the formatted string to output as defined in the handler
+#
+# dill.logging.logger.log -> # or logger.info, etc.
+# Logger.log -> \
+# Logger._log -> }- accept 'extra' parameter for custom record entries
+# Logger.makeRecord -> /
+# LogRecord.__init__
+# Logger.handle ->
+# Logger.callHandlers ->
+# Handler.handle ->
+# Filterer.filter ->
+# Filter.filter
+# StreamHandler.emit ->
+# Handler.format ->
+# Formatter.format ->
+# LogRecord.getMessage # does: record.message = msg % args
+# Formatter.formatMessage ->
+# PercentStyle.format # does: self._fmt % vars(record)
+#
+# NOTE: All methods from the second line on are from logging.__init__.py
+
+class TraceAdapter(logging.LoggerAdapter):
+ """
+ Tracks object tree depth and calculates pickled object size.
+
+ A single instance of this wraps the module's logger, as the logging API
+ doesn't allow setting it directly with a custom Logger subclass. The added
+ 'trace()' method receives a pickle instance as the first argument and
+ creates extra values to be added in the LogRecord from it, then calls
+ 'info()'.
+
+ Usage of logger with 'trace()' method:
+
+ >>> from dill.logger import adapter as logger #NOTE: not dill.logger.logger
+ >>> ...
+ >>> def save_atype(pickler, obj):
+ >>> logger.trace(pickler, "Message with %s and %r etc. placeholders", 'text', obj)
+ >>> ...
+ """
+ def __init__(self, logger):
+ self.logger = logger
+ def addHandler(self, handler):
+ formatter = TraceFormatter("%(prefix)s%(message)s%(suffix)s", handler=handler)
+ handler.setFormatter(formatter)
+ self.logger.addHandler(handler)
+ def removeHandler(self, handler):
+ self.logger.removeHandler(handler)
+ def process(self, msg, kwargs):
+ # A no-op override, as we don't have self.extra.
+ return msg, kwargs
+ def trace_setup(self, pickler):
+ # Called by Pickler.dump().
+ if not dill._dill.is_dill(pickler, child=False):
+ return
+ if self.isEnabledFor(logging.INFO):
+ pickler._trace_depth = 1
+ pickler._size_stack = []
+ else:
+ pickler._trace_depth = None
+ def trace(self, pickler, msg, *args, **kwargs):
+ if not hasattr(pickler, '_trace_depth'):
+ logger.info(msg, *args, **kwargs)
+ return
+ if pickler._trace_depth is None:
+ return
+ extra = kwargs.get('extra', {})
+ pushed_obj = msg.startswith('#')
+ size = None
+ try:
+ # Streams are not required to be tellable.
+ size = pickler._file.tell()
+ frame = pickler.framer.current_frame
+ try:
+ size += frame.tell()
+ except AttributeError:
+ # PyPy may use a BytesBuilder as frame
+ size += len(frame)
+ except (AttributeError, TypeError):
+ pass
+ if size is not None:
+ if not pushed_obj:
+ pickler._size_stack.append(size)
+ else:
+ size -= pickler._size_stack.pop()
+ extra['size'] = size
+ if pushed_obj:
+ pickler._trace_depth -= 1
+ extra['depth'] = pickler._trace_depth
+ kwargs['extra'] = extra
+ self.info(msg, *args, **kwargs)
+ if not pushed_obj:
+ pickler._trace_depth += 1
+
+class TraceFormatter(logging.Formatter):
+ """
+ Generates message prefix and suffix from record.
+
+ This Formatter adds prefix and suffix strings to the log message in trace
+ mode (an also provides empty string defaults for normal logs).
+ """
+ def __init__(self, *args, handler=None, **kwargs):
+ super().__init__(*args, **kwargs)
+ try:
+ encoding = handler.stream.encoding
+ if encoding is None:
+ raise AttributeError
+ except AttributeError:
+ encoding = locale.getpreferredencoding()
+ try:
+ encoding = codecs.lookup(encoding).name
+ except LookupError:
+ self.is_utf8 = False
+ else:
+ self.is_utf8 = (encoding == codecs.lookup('utf-8').name)
+ def format(self, record):
+ fields = {'prefix': "", 'suffix': ""}
+ if getattr(record, 'depth', 0) > 0:
+ if record.msg.startswith("#"):
+ prefix = (record.depth - 1)*"│" + "└"
+ elif record.depth == 1:
+ prefix = "┬"
+ else:
+ prefix = (record.depth - 2)*"│" + "├┬"
+ if not self.is_utf8:
+ prefix = prefix.translate(ASCII_MAP) + "-"
+ fields['prefix'] = prefix + " "
+ if hasattr(record, 'size') and record.size is not None and record.size >= 1:
+ # Show object size in human-readable form.
+ power = int(math.log(record.size, 2)) // 10
+ size = record.size >> power*10
+ fields['suffix'] = " [%d %sB]" % (size, "KMGTP"[power] + "i" if power else "")
+ vars(record).update(fields)
+ return super().format(record)
+
+logger = logging.getLogger('dill')
+logger.propagate = False
+adapter = TraceAdapter(logger)
+stderr_handler = logging._StderrHandler()
+adapter.addHandler(stderr_handler)
+
+def trace(arg: Union[bool, TextIO, str, os.PathLike] = None, *, mode: str = 'a') -> None:
+ """print a trace through the stack when pickling; useful for debugging
+
+ With a single boolean argument, enable or disable the tracing.
+
+ Example usage:
+
+ >>> import dill
+ >>> dill.detect.trace(True)
+ >>> dill.dump_session()
+
+ Alternatively, ``trace()`` can be used as a context manager. With no
+ arguments, it just takes care of restoring the tracing state on exit.
+ Either a file handle, or a file name and (optionally) a file mode may be
+ specitfied to redirect the tracing output in the ``with`` block context. A
+ log function is yielded by the manager so the user can write extra
+ information to the file.
+
+ Example usage:
+
+ >>> from dill import detect
+ >>> D = {'a': 42, 'b': {'x': None}}
+ >>> with detect.trace():
+ >>> dumps(D)
+ ┬ D2:
+ ├┬ D2:
+ │└ # D2 [8 B]
+ └ # D2 [22 B]
+ >>> squared = lambda x: x**2
+ >>> with detect.trace('output.txt', mode='w') as log:
+ >>> log("> D = %r", D)
+ >>> dumps(D)
+ >>> log("> squared = %r", squared)
+ >>> dumps(squared)
+
+ Arguments:
+ arg: a boolean value, or an optional file-like or path-like object for the context manager
+ mode: mode string for ``open()`` if a file name is passed as the first argument
+ """
+ if not isinstance(arg, bool):
+ return TraceManager(file=arg, mode=mode)
+ logger.setLevel(logging.INFO if arg else logging.WARNING)
+
+class TraceManager(contextlib.AbstractContextManager):
+ """context manager version of trace(); can redirect the trace to a file"""
+ def __init__(self, file, mode):
+ self.file = file
+ self.mode = mode
+ self.redirect = file is not None
+ self.file_is_stream = hasattr(file, 'write')
+ def __enter__(self):
+ if self.redirect:
+ stderr_handler.flush()
+ if self.file_is_stream:
+ self.handler = logging.StreamHandler(self.file)
+ else:
+ self.handler = logging.FileHandler(self.file, self.mode)
+ adapter.removeHandler(stderr_handler)
+ adapter.addHandler(self.handler)
+ self.old_level = adapter.getEffectiveLevel()
+ adapter.setLevel(logging.INFO)
+ return adapter.info
+ def __exit__(self, *exc_info):
+ adapter.setLevel(self.old_level)
+ if self.redirect:
+ adapter.removeHandler(self.handler)
+ adapter.addHandler(stderr_handler)
+ if not self.file_is_stream:
+ self.handler.close()
diff --git a/llmeval-env/lib/python3.10/site-packages/dill/objtypes.py b/llmeval-env/lib/python3.10/site-packages/dill/objtypes.py
new file mode 100644
index 0000000000000000000000000000000000000000..526b5835ea213fe08cf81a1d389368bd95a86cc8
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/dill/objtypes.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+#
+# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
+# Copyright (c) 2008-2016 California Institute of Technology.
+# Copyright (c) 2016-2024 The Uncertainty Quantification Foundation.
+# License: 3-clause BSD. The full license text is available at:
+# - https://github.com/uqfoundation/dill/blob/master/LICENSE
+"""
+all Python Standard Library object types (currently: CH 1-15 @ 2.7)
+and some other common object types (i.e. numpy.ndarray)
+
+to load more objects and types, use dill.load_types()
+"""
+
+# non-local import of dill.objects
+from dill import objects
+for _type in objects.keys():
+ exec("%s = type(objects['%s'])" % (_type,_type))
+
+del objects
+try:
+ del _type
+except NameError:
+ pass
diff --git a/llmeval-env/lib/python3.10/site-packages/dill/pointers.py b/llmeval-env/lib/python3.10/site-packages/dill/pointers.py
new file mode 100644
index 0000000000000000000000000000000000000000..c3b48cae00b87830dc15dcd8fc047ca74aae5b66
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/dill/pointers.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python
+#
+# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
+# Copyright (c) 2008-2016 California Institute of Technology.
+# Copyright (c) 2016-2024 The Uncertainty Quantification Foundation.
+# License: 3-clause BSD. The full license text is available at:
+# - https://github.com/uqfoundation/dill/blob/master/LICENSE
+
+__all__ = ['parent', 'reference', 'at', 'parents', 'children']
+
+import gc
+import sys
+
+from ._dill import _proxy_helper as reference
+from ._dill import _locate_object as at
+
+def parent(obj, objtype, ignore=()):
+ """
+>>> listiter = iter([4,5,6,7])
+>>> obj = parent(listiter, list)
+>>> obj == [4,5,6,7] # actually 'is', but don't have handle any longer
+True
+
+NOTE: objtype can be a single type (e.g. int or list) or a tuple of types.
+
+WARNING: if obj is a sequence (e.g. list), may produce unexpected results.
+Parent finds *one* parent (e.g. the last member of the sequence).
+ """
+ depth = 1 #XXX: always looking for the parent (only, right?)
+ chain = parents(obj, objtype, depth, ignore)
+ parent = chain.pop()
+ if parent is obj:
+ return None
+ return parent
+
+
+def parents(obj, objtype, depth=1, ignore=()): #XXX: objtype=object ?
+ """Find the chain of referents for obj. Chain will end with obj.
+
+ objtype: an object type or tuple of types to search for
+ depth: search depth (e.g. depth=2 is 'grandparents')
+ ignore: an object or tuple of objects to ignore in the search
+ """
+ edge_func = gc.get_referents # looking for refs, not back_refs
+ predicate = lambda x: isinstance(x, objtype) # looking for parent type
+ #if objtype is None: predicate = lambda x: True #XXX: in obj.mro() ?
+ ignore = (ignore,) if not hasattr(ignore, '__len__') else ignore
+ ignore = (id(obj) for obj in ignore)
+ chain = find_chain(obj, predicate, edge_func, depth)[::-1]
+ #XXX: should pop off obj... ?
+ return chain
+
+
+def children(obj, objtype, depth=1, ignore=()): #XXX: objtype=object ?
+ """Find the chain of referrers for obj. Chain will start with obj.
+
+ objtype: an object type or tuple of types to search for
+ depth: search depth (e.g. depth=2 is 'grandchildren')
+ ignore: an object or tuple of objects to ignore in the search
+
+ NOTE: a common thing to ignore is all globals, 'ignore=(globals(),)'
+
+ NOTE: repeated calls may yield different results, as python stores
+ the last value in the special variable '_'; thus, it is often good
+ to execute something to replace '_' (e.g. >>> 1+1).
+ """
+ edge_func = gc.get_referrers # looking for back_refs, not refs
+ predicate = lambda x: isinstance(x, objtype) # looking for child type
+ #if objtype is None: predicate = lambda x: True #XXX: in obj.mro() ?
+ ignore = (ignore,) if not hasattr(ignore, '__len__') else ignore
+ ignore = (id(obj) for obj in ignore)
+ chain = find_chain(obj, predicate, edge_func, depth, ignore)
+ #XXX: should pop off obj... ?
+ return chain
+
+
+# more generic helper function (cut-n-paste from objgraph)
+# Source at http://mg.pov.lt/objgraph/
+# Copyright (c) 2008-2010 Marius Gedminas
+# Copyright (c) 2010 Stefano Rivera
+# Released under the MIT licence (see objgraph/objgrah.py)
+
+def find_chain(obj, predicate, edge_func, max_depth=20, extra_ignore=()):
+ queue = [obj]
+ depth = {id(obj): 0}
+ parent = {id(obj): None}
+ ignore = set(extra_ignore)
+ ignore.add(id(extra_ignore))
+ ignore.add(id(queue))
+ ignore.add(id(depth))
+ ignore.add(id(parent))
+ ignore.add(id(ignore))
+ ignore.add(id(sys._getframe())) # this function
+ ignore.add(id(sys._getframe(1))) # find_chain/find_backref_chain, likely
+ gc.collect()
+ while queue:
+ target = queue.pop(0)
+ if predicate(target):
+ chain = [target]
+ while parent[id(target)] is not None:
+ target = parent[id(target)]
+ chain.append(target)
+ return chain
+ tdepth = depth[id(target)]
+ if tdepth < max_depth:
+ referrers = edge_func(target)
+ ignore.add(id(referrers))
+ for source in referrers:
+ if id(source) in ignore:
+ continue
+ if id(source) not in depth:
+ depth[id(source)] = tdepth + 1
+ parent[id(source)] = target
+ queue.append(source)
+ return [obj] # not found
+
+
+# backward compatibility
+refobject = at
+
+
+# EOF
diff --git a/llmeval-env/lib/python3.10/site-packages/dill/session.py b/llmeval-env/lib/python3.10/site-packages/dill/session.py
new file mode 100644
index 0000000000000000000000000000000000000000..e91068afc5b77dffe9646f0167ab2af5e97523fb
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/dill/session.py
@@ -0,0 +1,613 @@
+#!/usr/bin/env python
+#
+# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
+# Author: Leonardo Gama (@leogama)
+# Copyright (c) 2008-2015 California Institute of Technology.
+# Copyright (c) 2016-2024 The Uncertainty Quantification Foundation.
+# License: 3-clause BSD. The full license text is available at:
+# - https://github.com/uqfoundation/dill/blob/master/LICENSE
+"""
+Pickle and restore the intepreter session.
+"""
+
+__all__ = [
+ 'dump_module', 'load_module', 'load_module_asdict',
+ 'dump_session', 'load_session' # backward compatibility
+]
+
+import re
+import os
+import sys
+import warnings
+
+from dill import _dill, Pickler, Unpickler
+from ._dill import (
+ BuiltinMethodType, FunctionType, MethodType, ModuleType, TypeType,
+ _import_module, _is_builtin_module, _is_imported_module, _main_module,
+ _reverse_typemap, __builtin__,
+)
+
+# Type hints.
+from typing import Optional, Union
+
+import pathlib
+import tempfile
+
+TEMPDIR = pathlib.PurePath(tempfile.gettempdir())
+
+def _module_map():
+ """get map of imported modules"""
+ from collections import defaultdict
+ from types import SimpleNamespace
+ modmap = SimpleNamespace(
+ by_name=defaultdict(list),
+ by_id=defaultdict(list),
+ top_level={},
+ )
+ for modname, module in sys.modules.items():
+ if modname in ('__main__', '__mp_main__') or not isinstance(module, ModuleType):
+ continue
+ if '.' not in modname:
+ modmap.top_level[id(module)] = modname
+ for objname, modobj in module.__dict__.items():
+ modmap.by_name[objname].append((modobj, modname))
+ modmap.by_id[id(modobj)].append((modobj, objname, modname))
+ return modmap
+
+IMPORTED_AS_TYPES = (ModuleType, TypeType, FunctionType, MethodType, BuiltinMethodType)
+if 'PyCapsuleType' in _reverse_typemap:
+ IMPORTED_AS_TYPES += (_reverse_typemap['PyCapsuleType'],)
+IMPORTED_AS_MODULES = ('ctypes', 'typing', 'subprocess', 'threading',
+ r'concurrent\.futures(\.\w+)?', r'multiprocessing(\.\w+)?')
+IMPORTED_AS_MODULES = tuple(re.compile(x) for x in IMPORTED_AS_MODULES)
+
+def _lookup_module(modmap, name, obj, main_module):
+ """lookup name or id of obj if module is imported"""
+ for modobj, modname in modmap.by_name[name]:
+ if modobj is obj and sys.modules[modname] is not main_module:
+ return modname, name
+ __module__ = getattr(obj, '__module__', None)
+ if isinstance(obj, IMPORTED_AS_TYPES) or (__module__ is not None
+ and any(regex.fullmatch(__module__) for regex in IMPORTED_AS_MODULES)):
+ for modobj, objname, modname in modmap.by_id[id(obj)]:
+ if sys.modules[modname] is not main_module:
+ return modname, objname
+ return None, None
+
+def _stash_modules(main_module):
+ modmap = _module_map()
+ newmod = ModuleType(main_module.__name__)
+
+ imported = []
+ imported_as = []
+ imported_top_level = [] # keep separated for backward compatibility
+ original = {}
+ for name, obj in main_module.__dict__.items():
+ if obj is main_module:
+ original[name] = newmod # self-reference
+ elif obj is main_module.__dict__:
+ original[name] = newmod.__dict__
+ # Avoid incorrectly matching a singleton value in another package (ex.: __doc__).
+ elif any(obj is singleton for singleton in (None, False, True)) \
+ or isinstance(obj, ModuleType) and _is_builtin_module(obj): # always saved by ref
+ original[name] = obj
+ else:
+ source_module, objname = _lookup_module(modmap, name, obj, main_module)
+ if source_module is not None:
+ if objname == name:
+ imported.append((source_module, name))
+ else:
+ imported_as.append((source_module, objname, name))
+ else:
+ try:
+ imported_top_level.append((modmap.top_level[id(obj)], name))
+ except KeyError:
+ original[name] = obj
+
+ if len(original) < len(main_module.__dict__):
+ newmod.__dict__.update(original)
+ newmod.__dill_imported = imported
+ newmod.__dill_imported_as = imported_as
+ newmod.__dill_imported_top_level = imported_top_level
+ if getattr(newmod, '__loader__', None) is None and _is_imported_module(main_module):
+ # Trick _is_imported_module() to force saving as an imported module.
+ newmod.__loader__ = True # will be discarded by save_module()
+ return newmod
+ else:
+ return main_module
+
+def _restore_modules(unpickler, main_module):
+ try:
+ for modname, name in main_module.__dict__.pop('__dill_imported'):
+ main_module.__dict__[name] = unpickler.find_class(modname, name)
+ for modname, objname, name in main_module.__dict__.pop('__dill_imported_as'):
+ main_module.__dict__[name] = unpickler.find_class(modname, objname)
+ for modname, name in main_module.__dict__.pop('__dill_imported_top_level'):
+ main_module.__dict__[name] = __import__(modname)
+ except KeyError:
+ pass
+
+#NOTE: 06/03/15 renamed main_module to main
+def dump_module(
+ filename: Union[str, os.PathLike] = None,
+ module: Optional[Union[ModuleType, str]] = None,
+ refimported: bool = False,
+ **kwds
+) -> None:
+ """Pickle the current state of :py:mod:`__main__` or another module to a file.
+
+ Save the contents of :py:mod:`__main__` (e.g. from an interactive
+ interpreter session), an imported module, or a module-type object (e.g.
+ built with :py:class:`~types.ModuleType`), to a file. The pickled
+ module can then be restored with the function :py:func:`load_module`.
+
+ Args:
+ filename: a path-like object or a writable stream. If `None`
+ (the default), write to a named file in a temporary directory.
+ module: a module object or the name of an importable module. If `None`
+ (the default), :py:mod:`__main__` is saved.
+ refimported: if `True`, all objects identified as having been imported
+ into the module's namespace are saved by reference. *Note:* this is
+ similar but independent from ``dill.settings[`byref`]``, as
+ ``refimported`` refers to virtually all imported objects, while
+ ``byref`` only affects select objects.
+ **kwds: extra keyword arguments passed to :py:class:`Pickler()`.
+
+ Raises:
+ :py:exc:`PicklingError`: if pickling fails.
+
+ Examples:
+
+ - Save current interpreter session state:
+
+ >>> import dill
+ >>> squared = lambda x: x*x
+ >>> dill.dump_module() # save state of __main__ to /tmp/session.pkl
+
+ - Save the state of an imported/importable module:
+
+ >>> import dill
+ >>> import pox
+ >>> pox.plus_one = lambda x: x+1
+ >>> dill.dump_module('pox_session.pkl', module=pox)
+
+ - Save the state of a non-importable, module-type object:
+
+ >>> import dill
+ >>> from types import ModuleType
+ >>> foo = ModuleType('foo')
+ >>> foo.values = [1,2,3]
+ >>> import math
+ >>> foo.sin = math.sin
+ >>> dill.dump_module('foo_session.pkl', module=foo, refimported=True)
+
+ - Restore the state of the saved modules:
+
+ >>> import dill
+ >>> dill.load_module()
+ >>> squared(2)
+ 4
+ >>> pox = dill.load_module('pox_session.pkl')
+ >>> pox.plus_one(1)
+ 2
+ >>> foo = dill.load_module('foo_session.pkl')
+ >>> [foo.sin(x) for x in foo.values]
+ [0.8414709848078965, 0.9092974268256817, 0.1411200080598672]
+
+ - Use `refimported` to save imported objects by reference:
+
+ >>> import dill
+ >>> from html.entities import html5
+ >>> type(html5), len(html5)
+ (dict, 2231)
+ >>> import io
+ >>> buf = io.BytesIO()
+ >>> dill.dump_module(buf) # saves __main__, with html5 saved by value
+ >>> len(buf.getvalue()) # pickle size in bytes
+ 71665
+ >>> buf = io.BytesIO()
+ >>> dill.dump_module(buf, refimported=True) # html5 saved by reference
+ >>> len(buf.getvalue())
+ 438
+
+ *Changed in version 0.3.6:* Function ``dump_session()`` was renamed to
+ ``dump_module()``. Parameters ``main`` and ``byref`` were renamed to
+ ``module`` and ``refimported``, respectively.
+
+ Note:
+ Currently, ``dill.settings['byref']`` and ``dill.settings['recurse']``
+ don't apply to this function.
+ """
+ for old_par, par in [('main', 'module'), ('byref', 'refimported')]:
+ if old_par in kwds:
+ message = "The argument %r has been renamed %r" % (old_par, par)
+ if old_par == 'byref':
+ message += " to distinguish it from dill.settings['byref']"
+ warnings.warn(message + ".", PendingDeprecationWarning)
+ if locals()[par]: # the defaults are None and False
+ raise TypeError("both %r and %r arguments were used" % (par, old_par))
+ refimported = kwds.pop('byref', refimported)
+ module = kwds.pop('main', module)
+
+ from .settings import settings
+ protocol = settings['protocol']
+ main = module
+ if main is None:
+ main = _main_module
+ elif isinstance(main, str):
+ main = _import_module(main)
+ if not isinstance(main, ModuleType):
+ raise TypeError("%r is not a module" % main)
+ if hasattr(filename, 'write'):
+ file = filename
+ else:
+ if filename is None:
+ filename = str(TEMPDIR/'session.pkl')
+ file = open(filename, 'wb')
+ try:
+ pickler = Pickler(file, protocol, **kwds)
+ pickler._original_main = main
+ if refimported:
+ main = _stash_modules(main)
+ pickler._main = main #FIXME: dill.settings are disabled
+ pickler._byref = False # disable pickling by name reference
+ pickler._recurse = False # disable pickling recursion for globals
+ pickler._session = True # is best indicator of when pickling a session
+ pickler._first_pass = True
+ pickler._main_modified = main is not pickler._original_main
+ pickler.dump(main)
+ finally:
+ if file is not filename: # if newly opened file
+ file.close()
+ return
+
+# Backward compatibility.
+def dump_session(filename=None, main=None, byref=False, **kwds):
+ warnings.warn("dump_session() has been renamed dump_module()", PendingDeprecationWarning)
+ dump_module(filename, module=main, refimported=byref, **kwds)
+dump_session.__doc__ = dump_module.__doc__
+
+class _PeekableReader:
+ """lightweight stream wrapper that implements peek()"""
+ def __init__(self, stream):
+ self.stream = stream
+ def read(self, n):
+ return self.stream.read(n)
+ def readline(self):
+ return self.stream.readline()
+ def tell(self):
+ return self.stream.tell()
+ def close(self):
+ return self.stream.close()
+ def peek(self, n):
+ stream = self.stream
+ try:
+ if hasattr(stream, 'flush'): stream.flush()
+ position = stream.tell()
+ stream.seek(position) # assert seek() works before reading
+ chunk = stream.read(n)
+ stream.seek(position)
+ return chunk
+ except (AttributeError, OSError):
+ raise NotImplementedError("stream is not peekable: %r", stream) from None
+
+def _make_peekable(stream):
+ """return stream as an object with a peek() method"""
+ import io
+ if hasattr(stream, 'peek'):
+ return stream
+ if not (hasattr(stream, 'tell') and hasattr(stream, 'seek')):
+ try:
+ return io.BufferedReader(stream)
+ except Exception:
+ pass
+ return _PeekableReader(stream)
+
+def _identify_module(file, main=None):
+ """identify the name of the module stored in the given file-type object"""
+ from pickletools import genops
+ UNICODE = {'UNICODE', 'BINUNICODE', 'SHORT_BINUNICODE'}
+ found_import = False
+ try:
+ for opcode, arg, pos in genops(file.peek(256)):
+ if not found_import:
+ if opcode.name in ('GLOBAL', 'SHORT_BINUNICODE') and \
+ arg.endswith('_import_module'):
+ found_import = True
+ else:
+ if opcode.name in UNICODE:
+ return arg
+ else:
+ raise UnpicklingError("reached STOP without finding main module")
+ except (NotImplementedError, ValueError) as error:
+ # ValueError occours when the end of the chunk is reached (without a STOP).
+ if isinstance(error, NotImplementedError) and main is not None:
+ # file is not peekable, but we have main.
+ return None
+ raise UnpicklingError("unable to identify main module") from error
+
+def load_module(
+ filename: Union[str, os.PathLike] = None,
+ module: Optional[Union[ModuleType, str]] = None,
+ **kwds
+) -> Optional[ModuleType]:
+ """Update the selected module (default is :py:mod:`__main__`) with
+ the state saved at ``filename``.
+
+ Restore a module to the state saved with :py:func:`dump_module`. The
+ saved module can be :py:mod:`__main__` (e.g. an interpreter session),
+ an imported module, or a module-type object (e.g. created with
+ :py:class:`~types.ModuleType`).
+
+ When restoring the state of a non-importable module-type object, the
+ current instance of this module may be passed as the argument ``main``.
+ Otherwise, a new instance is created with :py:class:`~types.ModuleType`
+ and returned.
+
+ Args:
+ filename: a path-like object or a readable stream. If `None`
+ (the default), read from a named file in a temporary directory.
+ module: a module object or the name of an importable module;
+ the module name and kind (i.e. imported or non-imported) must
+ match the name and kind of the module stored at ``filename``.
+ **kwds: extra keyword arguments passed to :py:class:`Unpickler()`.
+
+ Raises:
+ :py:exc:`UnpicklingError`: if unpickling fails.
+ :py:exc:`ValueError`: if the argument ``main`` and module saved
+ at ``filename`` are incompatible.
+
+ Returns:
+ A module object, if the saved module is not :py:mod:`__main__` or
+ a module instance wasn't provided with the argument ``main``.
+
+ Examples:
+
+ - Save the state of some modules:
+
+ >>> import dill
+ >>> squared = lambda x: x*x
+ >>> dill.dump_module() # save state of __main__ to /tmp/session.pkl
+ >>>
+ >>> import pox # an imported module
+ >>> pox.plus_one = lambda x: x+1
+ >>> dill.dump_module('pox_session.pkl', module=pox)
+ >>>
+ >>> from types import ModuleType
+ >>> foo = ModuleType('foo') # a module-type object
+ >>> foo.values = [1,2,3]
+ >>> import math
+ >>> foo.sin = math.sin
+ >>> dill.dump_module('foo_session.pkl', module=foo, refimported=True)
+
+ - Restore the state of the interpreter:
+
+ >>> import dill
+ >>> dill.load_module() # updates __main__ from /tmp/session.pkl
+ >>> squared(2)
+ 4
+
+ - Load the saved state of an importable module:
+
+ >>> import dill
+ >>> pox = dill.load_module('pox_session.pkl')
+ >>> pox.plus_one(1)
+ 2
+ >>> import sys
+ >>> pox in sys.modules.values()
+ True
+
+ - Load the saved state of a non-importable module-type object:
+
+ >>> import dill
+ >>> foo = dill.load_module('foo_session.pkl')
+ >>> [foo.sin(x) for x in foo.values]
+ [0.8414709848078965, 0.9092974268256817, 0.1411200080598672]
+ >>> import math
+ >>> foo.sin is math.sin # foo.sin was saved by reference
+ True
+ >>> import sys
+ >>> foo in sys.modules.values()
+ False
+
+ - Update the state of a non-importable module-type object:
+
+ >>> import dill
+ >>> from types import ModuleType
+ >>> foo = ModuleType('foo')
+ >>> foo.values = ['a','b']
+ >>> foo.sin = lambda x: x*x
+ >>> dill.load_module('foo_session.pkl', module=foo)
+ >>> [foo.sin(x) for x in foo.values]
+ [0.8414709848078965, 0.9092974268256817, 0.1411200080598672]
+
+ *Changed in version 0.3.6:* Function ``load_session()`` was renamed to
+ ``load_module()``. Parameter ``main`` was renamed to ``module``.
+
+ See also:
+ :py:func:`load_module_asdict` to load the contents of module saved
+ with :py:func:`dump_module` into a dictionary.
+ """
+ if 'main' in kwds:
+ warnings.warn(
+ "The argument 'main' has been renamed 'module'.",
+ PendingDeprecationWarning
+ )
+ if module is not None:
+ raise TypeError("both 'module' and 'main' arguments were used")
+ module = kwds.pop('main')
+ main = module
+ if hasattr(filename, 'read'):
+ file = filename
+ else:
+ if filename is None:
+ filename = str(TEMPDIR/'session.pkl')
+ file = open(filename, 'rb')
+ try:
+ file = _make_peekable(file)
+ #FIXME: dill.settings are disabled
+ unpickler = Unpickler(file, **kwds)
+ unpickler._session = True
+
+ # Resolve unpickler._main
+ pickle_main = _identify_module(file, main)
+ if main is None and pickle_main is not None:
+ main = pickle_main
+ if isinstance(main, str):
+ if main.startswith('__runtime__.'):
+ # Create runtime module to load the session into.
+ main = ModuleType(main.partition('.')[-1])
+ else:
+ main = _import_module(main)
+ if main is not None:
+ if not isinstance(main, ModuleType):
+ raise TypeError("%r is not a module" % main)
+ unpickler._main = main
+ else:
+ main = unpickler._main
+
+ # Check against the pickle's main.
+ is_main_imported = _is_imported_module(main)
+ if pickle_main is not None:
+ is_runtime_mod = pickle_main.startswith('__runtime__.')
+ if is_runtime_mod:
+ pickle_main = pickle_main.partition('.')[-1]
+ error_msg = "can't update{} module{} %r with the saved state of{} module{} %r"
+ if is_runtime_mod and is_main_imported:
+ raise ValueError(
+ error_msg.format(" imported", "", "", "-type object")
+ % (main.__name__, pickle_main)
+ )
+ if not is_runtime_mod and not is_main_imported:
+ raise ValueError(
+ error_msg.format("", "-type object", " imported", "")
+ % (pickle_main, main.__name__)
+ )
+ if main.__name__ != pickle_main:
+ raise ValueError(error_msg.format("", "", "", "") % (main.__name__, pickle_main))
+
+ # This is for find_class() to be able to locate it.
+ if not is_main_imported:
+ runtime_main = '__runtime__.%s' % main.__name__
+ sys.modules[runtime_main] = main
+
+ loaded = unpickler.load()
+ finally:
+ if not hasattr(filename, 'read'): # if newly opened file
+ file.close()
+ try:
+ del sys.modules[runtime_main]
+ except (KeyError, NameError):
+ pass
+ assert loaded is main
+ _restore_modules(unpickler, main)
+ if main is _main_module or main is module:
+ return None
+ else:
+ return main
+
+# Backward compatibility.
+def load_session(filename=None, main=None, **kwds):
+ warnings.warn("load_session() has been renamed load_module().", PendingDeprecationWarning)
+ load_module(filename, module=main, **kwds)
+load_session.__doc__ = load_module.__doc__
+
+def load_module_asdict(
+ filename: Union[str, os.PathLike] = None,
+ update: bool = False,
+ **kwds
+) -> dict:
+ """
+ Load the contents of a saved module into a dictionary.
+
+ ``load_module_asdict()`` is the near-equivalent of::
+
+ lambda filename: vars(dill.load_module(filename)).copy()
+
+ however, does not alter the original module. Also, the path of
+ the loaded module is stored in the ``__session__`` attribute.
+
+ Args:
+ filename: a path-like object or a readable stream. If `None`
+ (the default), read from a named file in a temporary directory.
+ update: if `True`, initialize the dictionary with the current state
+ of the module prior to loading the state stored at filename.
+ **kwds: extra keyword arguments passed to :py:class:`Unpickler()`
+
+ Raises:
+ :py:exc:`UnpicklingError`: if unpickling fails
+
+ Returns:
+ A copy of the restored module's dictionary.
+
+ Note:
+ If ``update`` is True, the corresponding module may first be imported
+ into the current namespace before the saved state is loaded from
+ filename to the dictionary. Note that any module that is imported into
+ the current namespace as a side-effect of using ``update`` will not be
+ modified by loading the saved module in filename to a dictionary.
+
+ Example:
+ >>> import dill
+ >>> alist = [1, 2, 3]
+ >>> anum = 42
+ >>> dill.dump_module()
+ >>> anum = 0
+ >>> new_var = 'spam'
+ >>> main = dill.load_module_asdict()
+ >>> main['__name__'], main['__session__']
+ ('__main__', '/tmp/session.pkl')
+ >>> main is globals() # loaded objects don't reference globals
+ False
+ >>> main['alist'] == alist
+ True
+ >>> main['alist'] is alist # was saved by value
+ False
+ >>> main['anum'] == anum # changed after the session was saved
+ False
+ >>> new_var in main # would be True if the option 'update' was set
+ False
+ """
+ if 'module' in kwds:
+ raise TypeError("'module' is an invalid keyword argument for load_module_asdict()")
+ if hasattr(filename, 'read'):
+ file = filename
+ else:
+ if filename is None:
+ filename = str(TEMPDIR/'session.pkl')
+ file = open(filename, 'rb')
+ try:
+ file = _make_peekable(file)
+ main_name = _identify_module(file)
+ old_main = sys.modules.get(main_name)
+ main = ModuleType(main_name)
+ if update:
+ if old_main is None:
+ old_main = _import_module(main_name)
+ main.__dict__.update(old_main.__dict__)
+ else:
+ main.__builtins__ = __builtin__
+ sys.modules[main_name] = main
+ load_module(file, **kwds)
+ finally:
+ if not hasattr(filename, 'read'): # if newly opened file
+ file.close()
+ try:
+ if old_main is None:
+ del sys.modules[main_name]
+ else:
+ sys.modules[main_name] = old_main
+ except NameError: # failed before setting old_main
+ pass
+ main.__session__ = str(filename)
+ return main.__dict__
+
+
+# Internal exports for backward compatibility with dill v0.3.5.1
+# Can't be placed in dill._dill because of circular import problems.
+for name in (
+ '_lookup_module', '_module_map', '_restore_modules', '_stash_modules',
+ 'dump_session', 'load_session' # backward compatibility functions
+):
+ setattr(_dill, name, globals()[name])
+del name
diff --git a/llmeval-env/lib/python3.10/site-packages/dill/settings.py b/llmeval-env/lib/python3.10/site-packages/dill/settings.py
new file mode 100644
index 0000000000000000000000000000000000000000..19c18fc5f5159c99ef80759bde406bf15dfecc63
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/dill/settings.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+#
+# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
+# Copyright (c) 2008-2016 California Institute of Technology.
+# Copyright (c) 2016-2024 The Uncertainty Quantification Foundation.
+# License: 3-clause BSD. The full license text is available at:
+# - https://github.com/uqfoundation/dill/blob/master/LICENSE
+"""
+global settings for Pickler
+"""
+
+from pickle import DEFAULT_PROTOCOL
+
+settings = {
+ #'main' : None,
+ 'protocol' : DEFAULT_PROTOCOL,
+ 'byref' : False,
+ #'strictio' : False,
+ 'fmode' : 0, #HANDLE_FMODE
+ 'recurse' : False,
+ 'ignore' : False,
+}
+
+del DEFAULT_PROTOCOL
+
diff --git a/llmeval-env/lib/python3.10/site-packages/dill/source.py b/llmeval-env/lib/python3.10/site-packages/dill/source.py
new file mode 100644
index 0000000000000000000000000000000000000000..1d7bd0d3733c5a76847c4066fe56713145c8a438
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/dill/source.py
@@ -0,0 +1,1017 @@
+#!/usr/bin/env python
+#
+# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
+# Copyright (c) 2008-2016 California Institute of Technology.
+# Copyright (c) 2016-2024 The Uncertainty Quantification Foundation.
+# License: 3-clause BSD. The full license text is available at:
+# - https://github.com/uqfoundation/dill/blob/master/LICENSE
+#
+# inspired by inspect.py from Python-2.7.6
+# inspect.py author: 'Ka-Ping Yee '
+# inspect.py merged into original dill.source by Mike McKerns 4/13/14
+"""
+Extensions to python's 'inspect' module, which can be used
+to retrieve information from live python objects. The methods
+defined in this module are augmented to facilitate access to
+source code of interactively defined functions and classes,
+as well as provide access to source code for objects defined
+in a file.
+"""
+
+__all__ = ['findsource', 'getsourcelines', 'getsource', 'indent', 'outdent', \
+ '_wrap', 'dumpsource', 'getname', '_namespace', 'getimport', \
+ '_importable', 'importable','isdynamic', 'isfrommain']
+
+import linecache
+import re
+from inspect import (getblock, getfile, getmodule, getsourcefile, indentsize,
+ isbuiltin, isclass, iscode, isframe, isfunction, ismethod,
+ ismodule, istraceback)
+from tokenize import TokenError
+
+from ._dill import IS_IPYTHON
+
+
+def isfrommain(obj):
+ "check if object was built in __main__"
+ module = getmodule(obj)
+ if module and module.__name__ == '__main__':
+ return True
+ return False
+
+
+def isdynamic(obj):
+ "check if object was built in the interpreter"
+ try: file = getfile(obj)
+ except TypeError: file = None
+ if file == '' and isfrommain(obj):
+ return True
+ return False
+
+
+def _matchlambda(func, line):
+ """check if lambda object 'func' matches raw line of code 'line'"""
+ from .detect import code as getcode
+ from .detect import freevars, globalvars, varnames
+ dummy = lambda : '__this_is_a_big_dummy_function__'
+ # process the line (removing leading whitespace, etc)
+ lhs,rhs = line.split('lambda ',1)[-1].split(":", 1) #FIXME: if !1 inputs
+ try: #FIXME: unsafe
+ _ = eval("lambda %s : %s" % (lhs,rhs), globals(),locals())
+ except Exception: _ = dummy
+ # get code objects, for comparison
+ _, code = getcode(_).co_code, getcode(func).co_code
+ # check if func is in closure
+ _f = [line.count(i) for i in freevars(func).keys()]
+ if not _f: # not in closure
+ # check if code matches
+ if _ == code: return True
+ return False
+ # weak check on freevars
+ if not all(_f): return False #XXX: VERY WEAK
+ # weak check on varnames and globalvars
+ _f = varnames(func)
+ _f = [line.count(i) for i in _f[0]+_f[1]]
+ if _f and not all(_f): return False #XXX: VERY WEAK
+ _f = [line.count(i) for i in globalvars(func).keys()]
+ if _f and not all(_f): return False #XXX: VERY WEAK
+ # check if func is a double lambda
+ if (line.count('lambda ') > 1) and (lhs in freevars(func).keys()):
+ _lhs,_rhs = rhs.split('lambda ',1)[-1].split(":",1) #FIXME: if !1 inputs
+ try: #FIXME: unsafe
+ _f = eval("lambda %s : %s" % (_lhs,_rhs), globals(),locals())
+ except Exception: _f = dummy
+ # get code objects, for comparison
+ _, code = getcode(_f).co_code, getcode(func).co_code
+ if len(_) != len(code): return False
+ #NOTE: should be same code same order, but except for 't' and '\x88'
+ _ = set((i,j) for (i,j) in zip(_,code) if i != j)
+ if len(_) != 1: return False #('t','\x88')
+ return True
+ # check indentsize
+ if not indentsize(line): return False #FIXME: is this a good check???
+ # check if code 'pattern' matches
+ #XXX: or pattern match against dis.dis(code)? (or use uncompyle2?)
+ _ = _.split(_[0]) # 't' #XXX: remove matching values if starts the same?
+ _f = code.split(code[0]) # '\x88'
+ #NOTE: should be same code different order, with different first element
+ _ = dict(re.match(r'([\W\D\S])(.*)', _[i]).groups() for i in range(1,len(_)))
+ _f = dict(re.match(r'([\W\D\S])(.*)', _f[i]).groups() for i in range(1,len(_f)))
+ if (_.keys() == _f.keys()) and (sorted(_.values()) == sorted(_f.values())):
+ return True
+ return False
+
+
+def findsource(object):
+ """Return the entire source file and starting line number for an object.
+ For interactively-defined objects, the 'file' is the interpreter's history.
+
+ The argument may be a module, class, method, function, traceback, frame,
+ or code object. The source code is returned as a list of all the lines
+ in the file and the line number indexes a line in that list. An IOError
+ is raised if the source code cannot be retrieved, while a TypeError is
+ raised for objects where the source code is unavailable (e.g. builtins)."""
+
+ module = getmodule(object)
+ try: file = getfile(module)
+ except TypeError: file = None
+ is_module_main = (module and module.__name__ == '__main__' and not file)
+ if IS_IPYTHON and is_module_main:
+ #FIXME: quick fix for functions and classes in IPython interpreter
+ try:
+ file = getfile(object)
+ sourcefile = getsourcefile(object)
+ except TypeError:
+ if isclass(object):
+ for object_method in filter(isfunction, object.__dict__.values()):
+ # look for a method of the class
+ file_candidate = getfile(object_method)
+ if not file_candidate.startswith('':
+ raise IOError('source code not available')
+ file = sourcefile if sourcefile else file
+
+ module = getmodule(object, file)
+ if module:
+ lines = linecache.getlines(file, module.__dict__)
+ else:
+ lines = linecache.getlines(file)
+
+ if not lines:
+ raise IOError('could not extract source code')
+
+ #FIXME: all below may fail if exec used (i.e. exec('f = lambda x:x') )
+ if ismodule(object):
+ return lines, 0
+
+ #NOTE: beneficial if search goes from end to start of buffer history
+ name = pat1 = obj = ''
+ pat2 = r'^(\s*@)'
+# pat1b = r'^(\s*%s\W*=)' % name #FIXME: finds 'f = decorate(f)', not exec
+ if ismethod(object):
+ name = object.__name__
+ if name == '': pat1 = r'(.*(?':
+ pat1 = r'(.*(?'
+ if stdin:
+ lnum = len(lines) - 1 # can't get lnum easily, so leverage pat
+ if not pat1: pat1 = r'^(\s*def\s)|(.*(? 0: #XXX: won't find decorators in ?
+ line = lines[lnum]
+ if pat1.match(line):
+ if not stdin: break # co_firstlineno does the job
+ if name == '': # hackery needed to confirm a match
+ if _matchlambda(obj, line): break
+ else: # not a lambda, just look for the name
+ if name in line: # need to check for decorator...
+ hats = 0
+ for _lnum in range(lnum-1,-1,-1):
+ if pat2.match(lines[_lnum]): hats += 1
+ else: break
+ lnum = lnum - hats
+ break
+ lnum = lnum - 1
+ return lines, lnum
+
+ try: # turn instances into classes
+ if not isclass(object) and isclass(type(object)): # __class__
+ object = object.__class__ #XXX: sometimes type(class) is better?
+ #XXX: we don't find how the instance was built
+ except AttributeError: pass
+ if isclass(object):
+ name = object.__name__
+ pat = re.compile(r'^(\s*)class\s*' + name + r'\b')
+ # make some effort to find the best matching class definition:
+ # use the one with the least indentation, which is the one
+ # that's most probably not inside a function definition.
+ candidates = []
+ for i in range(len(lines)-1,-1,-1):
+ match = pat.match(lines[i])
+ if match:
+ # if it's at toplevel, it's already the best one
+ if lines[i][0] == 'c':
+ return lines, i
+ # else add whitespace to candidate list
+ candidates.append((match.group(1), i))
+ if candidates:
+ # this will sort by whitespace, and by line number,
+ # less whitespace first #XXX: should sort high lnum before low
+ candidates.sort()
+ return lines, candidates[0][1]
+ else:
+ raise IOError('could not find class definition')
+ raise IOError('could not find code object')
+
+
+def getblocks(object, lstrip=False, enclosing=False, locate=False):
+ """Return a list of source lines and starting line number for an object.
+ Interactively-defined objects refer to lines in the interpreter's history.
+
+ If enclosing=True, then also return any enclosing code.
+ If lstrip=True, ensure there is no indentation in the first line of code.
+ If locate=True, then also return the line number for the block of code.
+
+ DEPRECATED: use 'getsourcelines' instead
+ """
+ lines, lnum = findsource(object)
+
+ if ismodule(object):
+ if lstrip: lines = _outdent(lines)
+ return ([lines], [0]) if locate is True else [lines]
+
+ #XXX: 'enclosing' means: closures only? or classes and files?
+ indent = indentsize(lines[lnum])
+ block = getblock(lines[lnum:]) #XXX: catch any TokenError here?
+
+ if not enclosing or not indent:
+ if lstrip: block = _outdent(block)
+ return ([block], [lnum]) if locate is True else [block]
+
+ pat1 = r'^(\s*def\s)|(.*(? indent: #XXX: should be >= ?
+ line += len(code) - skip
+ elif target in ''.join(code):
+ blocks.append(code) # save code block as the potential winner
+ _lnum.append(line - skip) # save the line number for the match
+ line += len(code) - skip
+ else:
+ line += 1
+ skip = 0
+ # find skip: the number of consecutive decorators
+ elif pat2.match(lines[line]):
+ try: code = getblock(lines[line:])
+ except TokenError: code = [lines[line]]
+ skip = 1
+ for _line in code[1:]: # skip lines that are decorators
+ if not pat2.match(_line): break
+ skip += 1
+ line += skip
+ # no match: reset skip and go to the next line
+ else:
+ line +=1
+ skip = 0
+
+ if not blocks:
+ blocks = [block]
+ _lnum = [lnum]
+ if lstrip: blocks = [_outdent(block) for block in blocks]
+ # return last match
+ return (blocks, _lnum) if locate is True else blocks
+
+
+def getsourcelines(object, lstrip=False, enclosing=False):
+ """Return a list of source lines and starting line number for an object.
+ Interactively-defined objects refer to lines in the interpreter's history.
+
+ The argument may be a module, class, method, function, traceback, frame,
+ or code object. The source code is returned as a list of the lines
+ corresponding to the object and the line number indicates where in the
+ original source file the first line of code was found. An IOError is
+ raised if the source code cannot be retrieved, while a TypeError is
+ raised for objects where the source code is unavailable (e.g. builtins).
+
+ If lstrip=True, ensure there is no indentation in the first line of code.
+ If enclosing=True, then also return any enclosing code."""
+ code, n = getblocks(object, lstrip=lstrip, enclosing=enclosing, locate=True)
+ return code[-1], n[-1]
+
+
+#NOTE: broke backward compatibility 4/16/14 (was lstrip=True, force=True)
+def getsource(object, alias='', lstrip=False, enclosing=False, \
+ force=False, builtin=False):
+ """Return the text of the source code for an object. The source code for
+ interactively-defined objects are extracted from the interpreter's history.
+
+ The argument may be a module, class, method, function, traceback, frame,
+ or code object. The source code is returned as a single string. An
+ IOError is raised if the source code cannot be retrieved, while a
+ TypeError is raised for objects where the source code is unavailable
+ (e.g. builtins).
+
+ If alias is provided, then add a line of code that renames the object.
+ If lstrip=True, ensure there is no indentation in the first line of code.
+ If enclosing=True, then also return any enclosing code.
+ If force=True, catch (TypeError,IOError) and try to use import hooks.
+ If builtin=True, force an import for any builtins
+ """
+ # hascode denotes a callable
+ hascode = _hascode(object)
+ # is a class instance type (and not in builtins)
+ instance = _isinstance(object)
+
+ # get source lines; if fail, try to 'force' an import
+ try: # fails for builtins, and other assorted object types
+ lines, lnum = getsourcelines(object, enclosing=enclosing)
+ except (TypeError, IOError): # failed to get source, resort to import hooks
+ if not force: # don't try to get types that findsource can't get
+ raise
+ if not getmodule(object): # get things like 'None' and '1'
+ if not instance: return getimport(object, alias, builtin=builtin)
+ # special handling (numpy arrays, ...)
+ _import = getimport(object, builtin=builtin)
+ name = getname(object, force=True)
+ _alias = "%s = " % alias if alias else ""
+ if alias == name: _alias = ""
+ return _import+_alias+"%s\n" % name
+ else: #FIXME: could use a good bit of cleanup, since using getimport...
+ if not instance: return getimport(object, alias, builtin=builtin)
+ # now we are dealing with an instance...
+ name = object.__class__.__name__
+ module = object.__module__
+ if module in ['builtins','__builtin__']:
+ return getimport(object, alias, builtin=builtin)
+ else: #FIXME: leverage getimport? use 'from module import name'?
+ lines, lnum = ["%s = __import__('%s', fromlist=['%s']).%s\n" % (name,module,name,name)], 0
+ obj = eval(lines[0].lstrip(name + ' = '))
+ lines, lnum = getsourcelines(obj, enclosing=enclosing)
+
+ # strip leading indent (helps ensure can be imported)
+ if lstrip or alias:
+ lines = _outdent(lines)
+
+ # instantiate, if there's a nice repr #XXX: BAD IDEA???
+ if instance: #and force: #XXX: move into findsource or getsourcelines ?
+ if '(' in repr(object): lines.append('%r\n' % object)
+ #else: #XXX: better to somehow to leverage __reduce__ ?
+ # reconstructor,args = object.__reduce__()
+ # _ = reconstructor(*args)
+ else: # fall back to serialization #XXX: bad idea?
+ #XXX: better not duplicate work? #XXX: better new/enclose=True?
+ lines = dumpsource(object, alias='', new=force, enclose=False)
+ lines, lnum = [line+'\n' for line in lines.split('\n')][:-1], 0
+ #else: object.__code__ # raise AttributeError
+
+ # add an alias to the source code
+ if alias:
+ if hascode:
+ skip = 0
+ for line in lines: # skip lines that are decorators
+ if not line.startswith('@'): break
+ skip += 1
+ #XXX: use regex from findsource / getsourcelines ?
+ if lines[skip].lstrip().startswith('def '): # we have a function
+ if alias != object.__name__:
+ lines.append('\n%s = %s\n' % (alias, object.__name__))
+ elif 'lambda ' in lines[skip]: # we have a lambda
+ if alias != lines[skip].split('=')[0].strip():
+ lines[skip] = '%s = %s' % (alias, lines[skip])
+ else: # ...try to use the object's name
+ if alias != object.__name__:
+ lines.append('\n%s = %s\n' % (alias, object.__name__))
+ else: # class or class instance
+ if instance:
+ if alias != lines[-1].split('=')[0].strip():
+ lines[-1] = ('%s = ' % alias) + lines[-1]
+ else:
+ name = getname(object, force=True) or object.__name__
+ if alias != name:
+ lines.append('\n%s = %s\n' % (alias, name))
+ return ''.join(lines)
+
+
+def _hascode(object):
+ '''True if object has an attribute that stores it's __code__'''
+ return getattr(object,'__code__',None) or getattr(object,'func_code',None)
+
+def _isinstance(object):
+ '''True if object is a class instance type (and is not a builtin)'''
+ if _hascode(object) or isclass(object) or ismodule(object):
+ return False
+ if istraceback(object) or isframe(object) or iscode(object):
+ return False
+ # special handling (numpy arrays, ...)
+ if not getmodule(object) and getmodule(type(object)).__name__ in ['numpy']:
+ return True
+# # check if is instance of a builtin
+# if not getmodule(object) and getmodule(type(object)).__name__ in ['__builtin__','builtins']:
+# return False
+ _types = ('")
+ if not repr(type(object)).startswith(_types): #FIXME: weak hack
+ return False
+ if not getmodule(object) or object.__module__ in ['builtins','__builtin__'] or getname(object, force=True) in ['array']:
+ return False
+ return True # by process of elimination... it's what we want
+
+
+def _intypes(object):
+ '''check if object is in the 'types' module'''
+ import types
+ # allow user to pass in object or object.__name__
+ if type(object) is not type(''):
+ object = getname(object, force=True)
+ if object == 'ellipsis': object = 'EllipsisType'
+ return True if hasattr(types, object) else False
+
+
+def _isstring(object): #XXX: isstringlike better?
+ '''check if object is a string-like type'''
+ return isinstance(object, (str, bytes))
+
+
+def indent(code, spaces=4):
+ '''indent a block of code with whitespace (default is 4 spaces)'''
+ indent = indentsize(code)
+ if type(spaces) is int: spaces = ' '*spaces
+ # if '\t' is provided, will indent with a tab
+ nspaces = indentsize(spaces)
+ # blank lines (etc) need to be ignored
+ lines = code.split('\n')
+## stq = "'''"; dtq = '"""'
+## in_stq = in_dtq = False
+ for i in range(len(lines)):
+ #FIXME: works... but shouldn't indent 2nd+ lines of multiline doc
+ _indent = indentsize(lines[i])
+ if indent > _indent: continue
+ lines[i] = spaces+lines[i]
+## #FIXME: may fail when stq and dtq in same line (depends on ordering)
+## nstq, ndtq = lines[i].count(stq), lines[i].count(dtq)
+## if not in_dtq and not in_stq:
+## lines[i] = spaces+lines[i] # we indent
+## # entering a comment block
+## if nstq%2: in_stq = not in_stq
+## if ndtq%2: in_dtq = not in_dtq
+## # leaving a comment block
+## elif in_dtq and ndtq%2: in_dtq = not in_dtq
+## elif in_stq and nstq%2: in_stq = not in_stq
+## else: pass
+ if lines[-1].strip() == '': lines[-1] = ''
+ return '\n'.join(lines)
+
+
+def _outdent(lines, spaces=None, all=True):
+ '''outdent lines of code, accounting for docs and line continuations'''
+ indent = indentsize(lines[0])
+ if spaces is None or spaces > indent or spaces < 0: spaces = indent
+ for i in range(len(lines) if all else 1):
+ #FIXME: works... but shouldn't outdent 2nd+ lines of multiline doc
+ _indent = indentsize(lines[i])
+ if spaces > _indent: _spaces = _indent
+ else: _spaces = spaces
+ lines[i] = lines[i][_spaces:]
+ return lines
+
+def outdent(code, spaces=None, all=True):
+ '''outdent a block of code (default is to strip all leading whitespace)'''
+ indent = indentsize(code)
+ if spaces is None or spaces > indent or spaces < 0: spaces = indent
+ #XXX: will this delete '\n' in some cases?
+ if not all: return code[spaces:]
+ return '\n'.join(_outdent(code.split('\n'), spaces=spaces, all=all))
+
+
+#XXX: not sure what the point of _wrap is...
+__globals__ = globals()
+__locals__ = locals()
+def _wrap(f):
+ """ encapsulate a function and it's __import__ """
+ def func(*args, **kwds):
+ try:
+ # _ = eval(getsource(f, force=True)) #XXX: safer but less robust
+ exec(getimportable(f, alias='_'), __globals__, __locals__)
+ except Exception:
+ raise ImportError('cannot import name ' + f.__name__)
+ return _(*args, **kwds)
+ func.__name__ = f.__name__
+ func.__doc__ = f.__doc__
+ return func
+
+
+def _enclose(object, alias=''): #FIXME: needs alias to hold returned object
+ """create a function enclosure around the source of some object"""
+ #XXX: dummy and stub should append a random string
+ dummy = '__this_is_a_big_dummy_enclosing_function__'
+ stub = '__this_is_a_stub_variable__'
+ code = 'def %s():\n' % dummy
+ code += indent(getsource(object, alias=stub, lstrip=True, force=True))
+ code += indent('return %s\n' % stub)
+ if alias: code += '%s = ' % alias
+ code += '%s(); del %s\n' % (dummy, dummy)
+ #code += "globals().pop('%s',lambda :None)()\n" % dummy
+ return code
+
+
+def dumpsource(object, alias='', new=False, enclose=True):
+ """'dump to source', where the code includes a pickled object.
+
+ If new=True and object is a class instance, then create a new
+ instance using the unpacked class source code. If enclose, then
+ create the object inside a function enclosure (thus minimizing
+ any global namespace pollution).
+ """
+ from dill import dumps
+ pik = repr(dumps(object))
+ code = 'import dill\n'
+ if enclose:
+ stub = '__this_is_a_stub_variable__' #XXX: *must* be same _enclose.stub
+ pre = '%s = ' % stub
+ new = False #FIXME: new=True doesn't work with enclose=True
+ else:
+ stub = alias
+ pre = '%s = ' % stub if alias else alias
+
+ # if a 'new' instance is not needed, then just dump and load
+ if not new or not _isinstance(object):
+ code += pre + 'dill.loads(%s)\n' % pik
+ else: #XXX: other cases where source code is needed???
+ code += getsource(object.__class__, alias='', lstrip=True, force=True)
+ mod = repr(object.__module__) # should have a module (no builtins here)
+ code += pre + 'dill.loads(%s.replace(b%s,bytes(__name__,"UTF-8")))\n' % (pik,mod)
+ #code += 'del %s' % object.__class__.__name__ #NOTE: kills any existing!
+
+ if enclose:
+ # generation of the 'enclosure'
+ dummy = '__this_is_a_big_dummy_object__'
+ dummy = _enclose(dummy, alias=alias)
+ # hack to replace the 'dummy' with the 'real' code
+ dummy = dummy.split('\n')
+ code = dummy[0]+'\n' + indent(code) + '\n'.join(dummy[-3:])
+
+ return code #XXX: better 'dumpsourcelines', returning list of lines?
+
+
+def getname(obj, force=False, fqn=False): #XXX: throw(?) to raise error on fail?
+ """get the name of the object. for lambdas, get the name of the pointer """
+ if fqn: return '.'.join(_namespace(obj))
+ module = getmodule(obj)
+ if not module: # things like "None" and "1"
+ if not force: return None
+ return repr(obj)
+ try:
+ #XXX: 'wrong' for decorators and curried functions ?
+ # if obj.func_closure: ...use logic from getimportable, etc ?
+ name = obj.__name__
+ if name == '':
+ return getsource(obj).split('=',1)[0].strip()
+ # handle some special cases
+ if module.__name__ in ['builtins','__builtin__']:
+ if name == 'ellipsis': name = 'EllipsisType'
+ return name
+ except AttributeError: #XXX: better to just throw AttributeError ?
+ if not force: return None
+ name = repr(obj)
+ if name.startswith('<'): # or name.split('('):
+ return None
+ return name
+
+
+def _namespace(obj):
+ """_namespace(obj); return namespace hierarchy (as a list of names)
+ for the given object. For an instance, find the class hierarchy.
+
+ For example:
+
+ >>> from functools import partial
+ >>> p = partial(int, base=2)
+ >>> _namespace(p)
+ [\'functools\', \'partial\']
+ """
+ # mostly for functions and modules and such
+ #FIXME: 'wrong' for decorators and curried functions
+ try: #XXX: needs some work and testing on different types
+ module = qual = str(getmodule(obj)).split()[1].strip('>').strip('"').strip("'")
+ qual = qual.split('.')
+ if ismodule(obj):
+ return qual
+ # get name of a lambda, function, etc
+ name = getname(obj) or obj.__name__ # failing, raise AttributeError
+ # check special cases (NoneType, ...)
+ if module in ['builtins','__builtin__']: # BuiltinFunctionType
+ if _intypes(name): return ['types'] + [name]
+ return qual + [name] #XXX: can be wrong for some aliased objects
+ except Exception: pass
+ # special case: numpy.inf and numpy.nan (we don't want them as floats)
+ if str(obj) in ['inf','nan','Inf','NaN']: # is more, but are they needed?
+ return ['numpy'] + [str(obj)]
+ # mostly for classes and class instances and such
+ module = getattr(obj.__class__, '__module__', None)
+ qual = str(obj.__class__)
+ try: qual = qual[qual.index("'")+1:-2]
+ except ValueError: pass # str(obj.__class__) made the 'try' unnecessary
+ qual = qual.split(".")
+ if module in ['builtins','__builtin__']:
+ # check special cases (NoneType, Ellipsis, ...)
+ if qual[-1] == 'ellipsis': qual[-1] = 'EllipsisType'
+ if _intypes(qual[-1]): module = 'types' #XXX: BuiltinFunctionType
+ qual = [module] + qual
+ return qual
+
+
+#NOTE: 05/25/14 broke backward compatibility: added 'alias' as 3rd argument
+def _getimport(head, tail, alias='', verify=True, builtin=False):
+ """helper to build a likely import string from head and tail of namespace.
+ ('head','tail') are used in the following context: "from head import tail"
+
+ If verify=True, then test the import string before returning it.
+ If builtin=True, then force an import for builtins where possible.
+ If alias is provided, then rename the object on import.
+ """
+ # special handling for a few common types
+ if tail in ['Ellipsis', 'NotImplemented'] and head in ['types']:
+ head = len.__module__
+ elif tail in ['None'] and head in ['types']:
+ _alias = '%s = ' % alias if alias else ''
+ if alias == tail: _alias = ''
+ return _alias+'%s\n' % tail
+ # we don't need to import from builtins, so return ''
+# elif tail in ['NoneType','int','float','long','complex']: return '' #XXX: ?
+ if head in ['builtins','__builtin__']:
+ # special cases (NoneType, Ellipsis, ...) #XXX: BuiltinFunctionType
+ if tail == 'ellipsis': tail = 'EllipsisType'
+ if _intypes(tail): head = 'types'
+ elif not builtin:
+ _alias = '%s = ' % alias if alias else ''
+ if alias == tail: _alias = ''
+ return _alias+'%s\n' % tail
+ else: pass # handle builtins below
+ # get likely import string
+ if not head: _str = "import %s" % tail
+ else: _str = "from %s import %s" % (head, tail)
+ _alias = " as %s\n" % alias if alias else "\n"
+ if alias == tail: _alias = "\n"
+ _str += _alias
+ # FIXME: fails on most decorators, currying, and such...
+ # (could look for magic __wrapped__ or __func__ attr)
+ # (could fix in 'namespace' to check obj for closure)
+ if verify and not head.startswith('dill.'):# weird behavior for dill
+ #print(_str)
+ try: exec(_str) #XXX: check if == obj? (name collision)
+ except ImportError: #XXX: better top-down or bottom-up recursion?
+ _head = head.rsplit(".",1)[0] #(or get all, then compare == obj?)
+ if not _head: raise
+ if _head != head:
+ _str = _getimport(_head, tail, alias, verify)
+ return _str
+
+
+#XXX: rename builtin to force? vice versa? verify to force? (as in getsource)
+#NOTE: 05/25/14 broke backward compatibility: added 'alias' as 2nd argument
+def getimport(obj, alias='', verify=True, builtin=False, enclosing=False):
+ """get the likely import string for the given object
+
+ obj is the object to inspect
+ If verify=True, then test the import string before returning it.
+ If builtin=True, then force an import for builtins where possible.
+ If enclosing=True, get the import for the outermost enclosing callable.
+ If alias is provided, then rename the object on import.
+ """
+ if enclosing:
+ from .detect import outermost
+ _obj = outermost(obj)
+ obj = _obj if _obj else obj
+ # get the namespace
+ qual = _namespace(obj)
+ head = '.'.join(qual[:-1])
+ tail = qual[-1]
+ # for named things... with a nice repr #XXX: move into _namespace?
+ try: # look for '<...>' and be mindful it might be in lists, dicts, etc...
+ name = repr(obj).split('<',1)[1].split('>',1)[1]
+ name = None # we have a 'object'-style repr
+ except Exception: # it's probably something 'importable'
+ if head in ['builtins','__builtin__']:
+ name = repr(obj) #XXX: catch [1,2], (1,2), set([1,2])... others?
+ else:
+ name = repr(obj).split('(')[0]
+ #if not repr(obj).startswith('<'): name = repr(obj).split('(')[0]
+ #else: name = None
+ if name: # try using name instead of tail
+ try: return _getimport(head, name, alias, verify, builtin)
+ except ImportError: pass
+ except SyntaxError:
+ if head in ['builtins','__builtin__']:
+ _alias = '%s = ' % alias if alias else ''
+ if alias == name: _alias = ''
+ return _alias+'%s\n' % name
+ else: pass
+ try:
+ #if type(obj) is type(abs): _builtin = builtin # BuiltinFunctionType
+ #else: _builtin = False
+ return _getimport(head, tail, alias, verify, builtin)
+ except ImportError:
+ raise # could do some checking against obj
+ except SyntaxError:
+ if head in ['builtins','__builtin__']:
+ _alias = '%s = ' % alias if alias else ''
+ if alias == tail: _alias = ''
+ return _alias+'%s\n' % tail
+ raise # could do some checking against obj
+
+
+def _importable(obj, alias='', source=None, enclosing=False, force=True, \
+ builtin=True, lstrip=True):
+ """get an import string (or the source code) for the given object
+
+ This function will attempt to discover the name of the object, or the repr
+ of the object, or the source code for the object. To attempt to force
+ discovery of the source code, use source=True, to attempt to force the
+ use of an import, use source=False; otherwise an import will be sought
+ for objects not defined in __main__. The intent is to build a string
+ that can be imported from a python file. obj is the object to inspect.
+ If alias is provided, then rename the object with the given alias.
+
+ If source=True, use these options:
+ If enclosing=True, then also return any enclosing code.
+ If force=True, catch (TypeError,IOError) and try to use import hooks.
+ If lstrip=True, ensure there is no indentation in the first line of code.
+
+ If source=False, use these options:
+ If enclosing=True, get the import for the outermost enclosing callable.
+ If force=True, then don't test the import string before returning it.
+ If builtin=True, then force an import for builtins where possible.
+ """
+ if source is None:
+ source = True if isfrommain(obj) else False
+ if source: # first try to get the source
+ try:
+ return getsource(obj, alias, enclosing=enclosing, \
+ force=force, lstrip=lstrip, builtin=builtin)
+ except Exception: pass
+ try:
+ if not _isinstance(obj):
+ return getimport(obj, alias, enclosing=enclosing, \
+ verify=(not force), builtin=builtin)
+ # first 'get the import', then 'get the instance'
+ _import = getimport(obj, enclosing=enclosing, \
+ verify=(not force), builtin=builtin)
+ name = getname(obj, force=True)
+ if not name:
+ raise AttributeError("object has no atribute '__name__'")
+ _alias = "%s = " % alias if alias else ""
+ if alias == name: _alias = ""
+ return _import+_alias+"%s\n" % name
+
+ except Exception: pass
+ if not source: # try getsource, only if it hasn't been tried yet
+ try:
+ return getsource(obj, alias, enclosing=enclosing, \
+ force=force, lstrip=lstrip, builtin=builtin)
+ except Exception: pass
+ # get the name (of functions, lambdas, and classes)
+ # or hope that obj can be built from the __repr__
+ #XXX: what to do about class instances and such?
+ obj = getname(obj, force=force)
+ # we either have __repr__ or __name__ (or None)
+ if not obj or obj.startswith('<'):
+ raise AttributeError("object has no atribute '__name__'")
+ _alias = '%s = ' % alias if alias else ''
+ if alias == obj: _alias = ''
+ return _alias+'%s\n' % obj
+ #XXX: possible failsafe... (for example, for instances when source=False)
+ # "import dill; result = dill.loads(); # repr(