diff --git a/env-llmeval/lib/python3.10/site-packages/jsonlines-4.0.0.dist-info/INSTALLER b/env-llmeval/lib/python3.10/site-packages/jsonlines-4.0.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/jsonlines-4.0.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/env-llmeval/lib/python3.10/site-packages/jsonlines-4.0.0.dist-info/LICENSE.rst b/env-llmeval/lib/python3.10/site-packages/jsonlines-4.0.0.dist-info/LICENSE.rst new file mode 100644 index 0000000000000000000000000000000000000000..d2612b11b6fdd5ee2155d6f19aeefd5eb438588a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/jsonlines-4.0.0.dist-info/LICENSE.rst @@ -0,0 +1,30 @@ +*(This is the OSI approved 3-clause "New BSD License".)* + +Copyright © 2016, wouter bolsterlee + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + +* Neither the name of the author nor the names of the contributors may be used + to endorse or promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/env-llmeval/lib/python3.10/site-packages/jsonlines-4.0.0.dist-info/METADATA b/env-llmeval/lib/python3.10/site-packages/jsonlines-4.0.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..25140023c3ca440ba43ef4248360efb9527901cc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/jsonlines-4.0.0.dist-info/METADATA @@ -0,0 +1,48 @@ +Metadata-Version: 2.1 +Name: jsonlines +Version: 4.0.0 +Summary: Library with helpers for the jsonlines file format +Home-page: https://github.com/wbolster/jsonlines +Author: wouter bolsterlee +Author-email: wouter@bolsterl.ee +License: BSD +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: System Administrators +Classifier: License :: OSI Approved :: BSD License +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Topic :: Internet :: Log Analysis +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: System :: Logging +Classifier: Topic :: Utilities +Requires-Python: >=3.8 +License-File: LICENSE.rst +Requires-Dist: attrs >=19.2.0 + +.. image:: https://pepy.tech/badge/jsonlines + :target: https://pepy.tech/project/jsonlines + +.. image:: https://pepy.tech/badge/jsonlines/month + :target: https://pepy.tech/project/jsonlines + +.. image:: https://anaconda.org/anaconda/anaconda/badges/installer/conda.svg + :target: https://anaconda.org/anaconda/jsonlines + +========= +jsonlines +========= + +``jsonlines`` is a Python library to simplify working with jsonlines_ +and ndjson_ data. + +.. _jsonlines: http://jsonlines.org/ +.. _ndjson: http://ndjson.org/ + +* Documentation: https://jsonlines.readthedocs.io/ + +* Python Package Index (PyPI): https://pypi.python.org/pypi/jsonlines/ + +* Source code and issue tracker: https://github.com/wbolster/jsonlines + diff --git a/env-llmeval/lib/python3.10/site-packages/jsonlines-4.0.0.dist-info/RECORD b/env-llmeval/lib/python3.10/site-packages/jsonlines-4.0.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..f75f38bdfe1bad71254cfa4b29b6f77e21dcb6af --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/jsonlines-4.0.0.dist-info/RECORD @@ -0,0 +1,11 @@ +jsonlines-4.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +jsonlines-4.0.0.dist-info/LICENSE.rst,sha256=vKNU5jkrJCH_sfHiNFRcUVQzuSkTYsG7n9EAkiuQ60I,1543 +jsonlines-4.0.0.dist-info/METADATA,sha256=XDMhu0s_WdlpRSAcseysBZnpSInKa5EEMwyEZ-5ZtHE,1565 +jsonlines-4.0.0.dist-info/RECORD,, +jsonlines-4.0.0.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92 +jsonlines-4.0.0.dist-info/top_level.txt,sha256=Y-KWmwRS4_Ci-mje2h6XC6xPeGV191NA6XhnbPot6eE,10 +jsonlines/__init__.py,sha256=7R6ohpIk95mz93rtkWQLSK1_1UQWUd9ckaVxgFyfhsA,258 +jsonlines/__pycache__/__init__.cpython-310.pyc,, +jsonlines/__pycache__/jsonlines.cpython-310.pyc,, +jsonlines/jsonlines.py,sha256=PpLVYlWwGiB4UoTVq2hkdp9oJ0ioSO6gW0k8_-P97-w,19895 +jsonlines/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/env-llmeval/lib/python3.10/site-packages/jsonlines-4.0.0.dist-info/WHEEL b/env-llmeval/lib/python3.10/site-packages/jsonlines-4.0.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..7e688737d490be3643d705bc16b5a77f7bd567b7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/jsonlines-4.0.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.41.2) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/env-llmeval/lib/python3.10/site-packages/jsonlines-4.0.0.dist-info/top_level.txt b/env-llmeval/lib/python3.10/site-packages/jsonlines-4.0.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..c9ad36fe960977ebebfaac0b6682d2373ec0ffb5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/jsonlines-4.0.0.dist-info/top_level.txt @@ -0,0 +1 @@ +jsonlines diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/__init__.py b/env-llmeval/lib/python3.10/site-packages/multiprocess/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8ff0f1ac4d92706328b55fc8be649bc107092bbc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multiprocess/__init__.py @@ -0,0 +1,66 @@ +# +# Package analogous to 'threading.py' but using processes +# +# multiprocessing/__init__.py +# +# This package is intended to duplicate the functionality (and much of +# the API) of threading.py but uses processes instead of threads. A +# subpackage 'multiprocessing.dummy' has the same API but is a simple +# wrapper for 'threading'. +# +# Original: Copyright (c) 2006-2008, R Oudkerk +# Original: Licensed to PSF under a Contributor Agreement. +# Forked by Mike McKerns, to support enhanced serialization. + +# author, version, license, and long description +try: # the package is installed + from .__info__ import __version__, __author__, __doc__, __license__ +except: # pragma: no cover + import os + import sys + root = os.path.dirname(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) + sys.path.append(root) + # get distribution meta info + from version import (__version__, __author__, + get_license_text, get_readme_as_rst) + __license__ = get_license_text(os.path.join(root, 'LICENSE')) + __license__ = "\n%s" % __license__ + __doc__ = get_readme_as_rst(os.path.join(root, 'README.md')) + del os, sys, root, get_license_text, get_readme_as_rst + + +import sys +from . import context + +# +# Copy stuff from default context +# + +__all__ = [x for x in dir(context._default_context) if not x.startswith('_')] +globals().update((name, getattr(context._default_context, name)) for name in __all__) + +# +# XXX These should not really be documented or public. +# + +SUBDEBUG = 5 +SUBWARNING = 25 + +# +# Alias for main module -- will be reset by bootstrapping child processes +# + +if '__main__' in sys.modules: + sys.modules['__mp_main__'] = sys.modules['__main__'] + + +def license(): + """print license""" + print (__license__) + return + +def citation(): + """print citation""" + print (__doc__[-491:-118]) + return + diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/connection.py b/env-llmeval/lib/python3.10/site-packages/multiprocess/connection.py new file mode 100644 index 0000000000000000000000000000000000000000..aa23c04ac0ac8cd3264e28b7721360a7fb20516e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multiprocess/connection.py @@ -0,0 +1,976 @@ +# +# A higher level module for using sockets (or Windows named pipes) +# +# multiprocessing/connection.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = [ 'Client', 'Listener', 'Pipe', 'wait' ] + +import io +import os +import sys +import socket +import struct +import time +import tempfile +import itertools + +try: + import _multiprocess as _multiprocessing +except ImportError: + import _multiprocessing + +from . import util + +from . import AuthenticationError, BufferTooShort +from .context import reduction +_ForkingPickler = reduction.ForkingPickler + +try: + import _winapi + from _winapi import WAIT_OBJECT_0, WAIT_ABANDONED_0, WAIT_TIMEOUT, INFINITE +except ImportError: + if sys.platform == 'win32': + raise + _winapi = None + +# +# +# + +BUFSIZE = 8192 +# A very generous timeout when it comes to local connections... +CONNECTION_TIMEOUT = 20. + +_mmap_counter = itertools.count() + +default_family = 'AF_INET' +families = ['AF_INET'] + +if hasattr(socket, 'AF_UNIX'): + default_family = 'AF_UNIX' + families += ['AF_UNIX'] + +if sys.platform == 'win32': + default_family = 'AF_PIPE' + families += ['AF_PIPE'] + + +def _init_timeout(timeout=CONNECTION_TIMEOUT): + return getattr(time,'monotonic',time.time)() + timeout + +def _check_timeout(t): + return getattr(time,'monotonic',time.time)() > t + +# +# +# + +def arbitrary_address(family): + ''' + Return an arbitrary free address for the given family + ''' + if family == 'AF_INET': + return ('localhost', 0) + elif family == 'AF_UNIX': + return tempfile.mktemp(prefix='listener-', dir=util.get_temp_dir()) + elif family == 'AF_PIPE': + return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' % + (os.getpid(), next(_mmap_counter)), dir="") + else: + raise ValueError('unrecognized family') + +def _validate_family(family): + ''' + Checks if the family is valid for the current environment. + ''' + if sys.platform != 'win32' and family == 'AF_PIPE': + raise ValueError('Family %s is not recognized.' % family) + + if sys.platform == 'win32' and family == 'AF_UNIX': + # double check + if not hasattr(socket, family): + raise ValueError('Family %s is not recognized.' % family) + +def address_type(address): + ''' + Return the types of the address + + This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE' + ''' + if type(address) == tuple: + return 'AF_INET' + elif type(address) is str and address.startswith('\\\\'): + return 'AF_PIPE' + elif type(address) is str or util.is_abstract_socket_namespace(address): + return 'AF_UNIX' + else: + raise ValueError('address type of %r unrecognized' % address) + +# +# Connection classes +# + +class _ConnectionBase: + _handle = None + + def __init__(self, handle, readable=True, writable=True): + handle = handle.__index__() + if handle < 0: + raise ValueError("invalid handle") + if not readable and not writable: + raise ValueError( + "at least one of `readable` and `writable` must be True") + self._handle = handle + self._readable = readable + self._writable = writable + + # XXX should we use util.Finalize instead of a __del__? + + def __del__(self): + if self._handle is not None: + self._close() + + def _check_closed(self): + if self._handle is None: + raise OSError("handle is closed") + + def _check_readable(self): + if not self._readable: + raise OSError("connection is write-only") + + def _check_writable(self): + if not self._writable: + raise OSError("connection is read-only") + + def _bad_message_length(self): + if self._writable: + self._readable = False + else: + self.close() + raise OSError("bad message length") + + @property + def closed(self): + """True if the connection is closed""" + return self._handle is None + + @property + def readable(self): + """True if the connection is readable""" + return self._readable + + @property + def writable(self): + """True if the connection is writable""" + return self._writable + + def fileno(self): + """File descriptor or handle of the connection""" + self._check_closed() + return self._handle + + def close(self): + """Close the connection""" + if self._handle is not None: + try: + self._close() + finally: + self._handle = None + + def send_bytes(self, buf, offset=0, size=None): + """Send the bytes data from a bytes-like object""" + self._check_closed() + self._check_writable() + m = memoryview(buf) + # HACK for byte-indexing of non-bytewise buffers (e.g. array.array) + if m.itemsize > 1: + m = memoryview(bytes(m)) + n = len(m) + if offset < 0: + raise ValueError("offset is negative") + if n < offset: + raise ValueError("buffer length < offset") + if size is None: + size = n - offset + elif size < 0: + raise ValueError("size is negative") + elif offset + size > n: + raise ValueError("buffer length < offset + size") + self._send_bytes(m[offset:offset + size]) + + def send(self, obj): + """Send a (picklable) object""" + self._check_closed() + self._check_writable() + self._send_bytes(_ForkingPickler.dumps(obj)) + + def recv_bytes(self, maxlength=None): + """ + Receive bytes data as a bytes object. + """ + self._check_closed() + self._check_readable() + if maxlength is not None and maxlength < 0: + raise ValueError("negative maxlength") + buf = self._recv_bytes(maxlength) + if buf is None: + self._bad_message_length() + return buf.getvalue() + + def recv_bytes_into(self, buf, offset=0): + """ + Receive bytes data into a writeable bytes-like object. + Return the number of bytes read. + """ + self._check_closed() + self._check_readable() + with memoryview(buf) as m: + # Get bytesize of arbitrary buffer + itemsize = m.itemsize + bytesize = itemsize * len(m) + if offset < 0: + raise ValueError("negative offset") + elif offset > bytesize: + raise ValueError("offset too large") + result = self._recv_bytes() + size = result.tell() + if bytesize < offset + size: + raise BufferTooShort(result.getvalue()) + # Message can fit in dest + result.seek(0) + result.readinto(m[offset // itemsize : + (offset + size) // itemsize]) + return size + + def recv(self): + """Receive a (picklable) object""" + self._check_closed() + self._check_readable() + buf = self._recv_bytes() + return _ForkingPickler.loads(buf.getbuffer()) + + def poll(self, timeout=0.0): + """Whether there is any input available to be read""" + self._check_closed() + self._check_readable() + return self._poll(timeout) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.close() + + +if _winapi: + + class PipeConnection(_ConnectionBase): + """ + Connection class based on a Windows named pipe. + Overlapped I/O is used, so the handles must have been created + with FILE_FLAG_OVERLAPPED. + """ + _got_empty_message = False + + def _close(self, _CloseHandle=_winapi.CloseHandle): + _CloseHandle(self._handle) + + def _send_bytes(self, buf): + ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True) + try: + if err == _winapi.ERROR_IO_PENDING: + waitres = _winapi.WaitForMultipleObjects( + [ov.event], False, INFINITE) + assert waitres == WAIT_OBJECT_0 + except: + ov.cancel() + raise + finally: + nwritten, err = ov.GetOverlappedResult(True) + assert err == 0 + assert nwritten == len(buf) + + def _recv_bytes(self, maxsize=None): + if self._got_empty_message: + self._got_empty_message = False + return io.BytesIO() + else: + bsize = 128 if maxsize is None else min(maxsize, 128) + try: + ov, err = _winapi.ReadFile(self._handle, bsize, + overlapped=True) + try: + if err == _winapi.ERROR_IO_PENDING: + waitres = _winapi.WaitForMultipleObjects( + [ov.event], False, INFINITE) + assert waitres == WAIT_OBJECT_0 + except: + ov.cancel() + raise + finally: + nread, err = ov.GetOverlappedResult(True) + if err == 0: + f = io.BytesIO() + f.write(ov.getbuffer()) + return f + elif err == _winapi.ERROR_MORE_DATA: + return self._get_more_data(ov, maxsize) + except OSError as e: + if e.winerror == _winapi.ERROR_BROKEN_PIPE: + raise EOFError + else: + raise + raise RuntimeError("shouldn't get here; expected KeyboardInterrupt") + + def _poll(self, timeout): + if (self._got_empty_message or + _winapi.PeekNamedPipe(self._handle)[0] != 0): + return True + return bool(wait([self], timeout)) + + def _get_more_data(self, ov, maxsize): + buf = ov.getbuffer() + f = io.BytesIO() + f.write(buf) + left = _winapi.PeekNamedPipe(self._handle)[1] + assert left > 0 + if maxsize is not None and len(buf) + left > maxsize: + self._bad_message_length() + ov, err = _winapi.ReadFile(self._handle, left, overlapped=True) + rbytes, err = ov.GetOverlappedResult(True) + assert err == 0 + assert rbytes == left + f.write(ov.getbuffer()) + return f + + +class Connection(_ConnectionBase): + """ + Connection class based on an arbitrary file descriptor (Unix only), or + a socket handle (Windows). + """ + + if _winapi: + def _close(self, _close=_multiprocessing.closesocket): + _close(self._handle) + _write = _multiprocessing.send + _read = _multiprocessing.recv + else: + def _close(self, _close=os.close): + _close(self._handle) + _write = os.write + _read = os.read + + def _send(self, buf, write=_write): + remaining = len(buf) + while True: + n = write(self._handle, buf) + remaining -= n + if remaining == 0: + break + buf = buf[n:] + + def _recv(self, size, read=_read): + buf = io.BytesIO() + handle = self._handle + remaining = size + while remaining > 0: + chunk = read(handle, remaining) + n = len(chunk) + if n == 0: + if remaining == size: + raise EOFError + else: + raise OSError("got end of file during message") + buf.write(chunk) + remaining -= n + return buf + + def _send_bytes(self, buf): + n = len(buf) + if n > 0x7fffffff: + pre_header = struct.pack("!i", -1) + header = struct.pack("!Q", n) + self._send(pre_header) + self._send(header) + self._send(buf) + else: + # For wire compatibility with 3.7 and lower + header = struct.pack("!i", n) + if n > 16384: + # The payload is large so Nagle's algorithm won't be triggered + # and we'd better avoid the cost of concatenation. + self._send(header) + self._send(buf) + else: + # Issue #20540: concatenate before sending, to avoid delays due + # to Nagle's algorithm on a TCP socket. + # Also note we want to avoid sending a 0-length buffer separately, + # to avoid "broken pipe" errors if the other end closed the pipe. + self._send(header + buf) + + def _recv_bytes(self, maxsize=None): + buf = self._recv(4) + size, = struct.unpack("!i", buf.getvalue()) + if size == -1: + buf = self._recv(8) + size, = struct.unpack("!Q", buf.getvalue()) + if maxsize is not None and size > maxsize: + return None + return self._recv(size) + + def _poll(self, timeout): + r = wait([self], timeout) + return bool(r) + + +# +# Public functions +# + +class Listener(object): + ''' + Returns a listener object. + + This is a wrapper for a bound socket which is 'listening' for + connections, or for a Windows named pipe. + ''' + def __init__(self, address=None, family=None, backlog=1, authkey=None): + family = family or (address and address_type(address)) \ + or default_family + address = address or arbitrary_address(family) + + _validate_family(family) + if family == 'AF_PIPE': + self._listener = PipeListener(address, backlog) + else: + self._listener = SocketListener(address, family, backlog) + + if authkey is not None and not isinstance(authkey, bytes): + raise TypeError('authkey should be a byte string') + + self._authkey = authkey + + def accept(self): + ''' + Accept a connection on the bound socket or named pipe of `self`. + + Returns a `Connection` object. + ''' + if self._listener is None: + raise OSError('listener is closed') + c = self._listener.accept() + if self._authkey: + deliver_challenge(c, self._authkey) + answer_challenge(c, self._authkey) + return c + + def close(self): + ''' + Close the bound socket or named pipe of `self`. + ''' + listener = self._listener + if listener is not None: + self._listener = None + listener.close() + + @property + def address(self): + return self._listener._address + + @property + def last_accepted(self): + return self._listener._last_accepted + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.close() + + +def Client(address, family=None, authkey=None): + ''' + Returns a connection to the address of a `Listener` + ''' + family = family or address_type(address) + _validate_family(family) + if family == 'AF_PIPE': + c = PipeClient(address) + else: + c = SocketClient(address) + + if authkey is not None and not isinstance(authkey, bytes): + raise TypeError('authkey should be a byte string') + + if authkey is not None: + answer_challenge(c, authkey) + deliver_challenge(c, authkey) + + return c + + +if sys.platform != 'win32': + + def Pipe(duplex=True): + ''' + Returns pair of connection objects at either end of a pipe + ''' + if duplex: + s1, s2 = socket.socketpair() + s1.setblocking(True) + s2.setblocking(True) + c1 = Connection(s1.detach()) + c2 = Connection(s2.detach()) + else: + fd1, fd2 = os.pipe() + c1 = Connection(fd1, writable=False) + c2 = Connection(fd2, readable=False) + + return c1, c2 + +else: + + def Pipe(duplex=True): + ''' + Returns pair of connection objects at either end of a pipe + ''' + address = arbitrary_address('AF_PIPE') + if duplex: + openmode = _winapi.PIPE_ACCESS_DUPLEX + access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE + obsize, ibsize = BUFSIZE, BUFSIZE + else: + openmode = _winapi.PIPE_ACCESS_INBOUND + access = _winapi.GENERIC_WRITE + obsize, ibsize = 0, BUFSIZE + + h1 = _winapi.CreateNamedPipe( + address, openmode | _winapi.FILE_FLAG_OVERLAPPED | + _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE, + _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | + _winapi.PIPE_WAIT, + 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, + # default security descriptor: the handle cannot be inherited + _winapi.NULL + ) + h2 = _winapi.CreateFile( + address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING, + _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL + ) + _winapi.SetNamedPipeHandleState( + h2, _winapi.PIPE_READMODE_MESSAGE, None, None + ) + + overlapped = _winapi.ConnectNamedPipe(h1, overlapped=True) + _, err = overlapped.GetOverlappedResult(True) + assert err == 0 + + c1 = PipeConnection(h1, writable=duplex) + c2 = PipeConnection(h2, readable=duplex) + + return c1, c2 + +# +# Definitions for connections based on sockets +# + +class SocketListener(object): + ''' + Representation of a socket which is bound to an address and listening + ''' + def __init__(self, address, family, backlog=1): + self._socket = socket.socket(getattr(socket, family)) + try: + # SO_REUSEADDR has different semantics on Windows (issue #2550). + if os.name == 'posix': + self._socket.setsockopt(socket.SOL_SOCKET, + socket.SO_REUSEADDR, 1) + self._socket.setblocking(True) + self._socket.bind(address) + self._socket.listen(backlog) + self._address = self._socket.getsockname() + except OSError: + self._socket.close() + raise + self._family = family + self._last_accepted = None + + if family == 'AF_UNIX' and not util.is_abstract_socket_namespace(address): + # Linux abstract socket namespaces do not need to be explicitly unlinked + self._unlink = util.Finalize( + self, os.unlink, args=(address,), exitpriority=0 + ) + else: + self._unlink = None + + def accept(self): + s, self._last_accepted = self._socket.accept() + s.setblocking(True) + return Connection(s.detach()) + + def close(self): + try: + self._socket.close() + finally: + unlink = self._unlink + if unlink is not None: + self._unlink = None + unlink() + + +def SocketClient(address): + ''' + Return a connection object connected to the socket given by `address` + ''' + family = address_type(address) + with socket.socket( getattr(socket, family) ) as s: + s.setblocking(True) + s.connect(address) + return Connection(s.detach()) + +# +# Definitions for connections based on named pipes +# + +if sys.platform == 'win32': + + class PipeListener(object): + ''' + Representation of a named pipe + ''' + def __init__(self, address, backlog=None): + self._address = address + self._handle_queue = [self._new_handle(first=True)] + + self._last_accepted = None + util.sub_debug('listener created with address=%r', self._address) + self.close = util.Finalize( + self, PipeListener._finalize_pipe_listener, + args=(self._handle_queue, self._address), exitpriority=0 + ) + + def _new_handle(self, first=False): + flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED + if first: + flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE + return _winapi.CreateNamedPipe( + self._address, flags, + _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | + _winapi.PIPE_WAIT, + _winapi.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE, + _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL + ) + + def accept(self): + self._handle_queue.append(self._new_handle()) + handle = self._handle_queue.pop(0) + try: + ov = _winapi.ConnectNamedPipe(handle, overlapped=True) + except OSError as e: + if e.winerror != _winapi.ERROR_NO_DATA: + raise + # ERROR_NO_DATA can occur if a client has already connected, + # written data and then disconnected -- see Issue 14725. + else: + try: + res = _winapi.WaitForMultipleObjects( + [ov.event], False, INFINITE) + except: + ov.cancel() + _winapi.CloseHandle(handle) + raise + finally: + _, err = ov.GetOverlappedResult(True) + assert err == 0 + return PipeConnection(handle) + + @staticmethod + def _finalize_pipe_listener(queue, address): + util.sub_debug('closing listener with address=%r', address) + for handle in queue: + _winapi.CloseHandle(handle) + + def PipeClient(address): + ''' + Return a connection object connected to the pipe given by `address` + ''' + t = _init_timeout() + while 1: + try: + _winapi.WaitNamedPipe(address, 1000) + h = _winapi.CreateFile( + address, _winapi.GENERIC_READ | _winapi.GENERIC_WRITE, + 0, _winapi.NULL, _winapi.OPEN_EXISTING, + _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL + ) + except OSError as e: + if e.winerror not in (_winapi.ERROR_SEM_TIMEOUT, + _winapi.ERROR_PIPE_BUSY) or _check_timeout(t): + raise + else: + break + else: + raise + + _winapi.SetNamedPipeHandleState( + h, _winapi.PIPE_READMODE_MESSAGE, None, None + ) + return PipeConnection(h) + +# +# Authentication stuff +# + +MESSAGE_LENGTH = 20 + +CHALLENGE = b'#CHALLENGE#' +WELCOME = b'#WELCOME#' +FAILURE = b'#FAILURE#' + +def deliver_challenge(connection, authkey): + import hmac + if not isinstance(authkey, bytes): + raise ValueError( + "Authkey must be bytes, not {0!s}".format(type(authkey))) + message = os.urandom(MESSAGE_LENGTH) + connection.send_bytes(CHALLENGE + message) + digest = hmac.new(authkey, message, 'md5').digest() + response = connection.recv_bytes(256) # reject large message + if response == digest: + connection.send_bytes(WELCOME) + else: + connection.send_bytes(FAILURE) + raise AuthenticationError('digest received was wrong') + +def answer_challenge(connection, authkey): + import hmac + if not isinstance(authkey, bytes): + raise ValueError( + "Authkey must be bytes, not {0!s}".format(type(authkey))) + message = connection.recv_bytes(256) # reject large message + assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message + message = message[len(CHALLENGE):] + digest = hmac.new(authkey, message, 'md5').digest() + connection.send_bytes(digest) + response = connection.recv_bytes(256) # reject large message + if response != WELCOME: + raise AuthenticationError('digest sent was rejected') + +# +# Support for using xmlrpclib for serialization +# + +class ConnectionWrapper(object): + def __init__(self, conn, dumps, loads): + self._conn = conn + self._dumps = dumps + self._loads = loads + for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'): + obj = getattr(conn, attr) + setattr(self, attr, obj) + def send(self, obj): + s = self._dumps(obj) + self._conn.send_bytes(s) + def recv(self): + s = self._conn.recv_bytes() + return self._loads(s) + +def _xml_dumps(obj): + return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf-8') + +def _xml_loads(s): + (obj,), method = xmlrpclib.loads(s.decode('utf-8')) + return obj + +class XmlListener(Listener): + def accept(self): + global xmlrpclib + import xmlrpc.client as xmlrpclib + obj = Listener.accept(self) + return ConnectionWrapper(obj, _xml_dumps, _xml_loads) + +def XmlClient(*args, **kwds): + global xmlrpclib + import xmlrpc.client as xmlrpclib + return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads) + +# +# Wait +# + +if sys.platform == 'win32': + + def _exhaustive_wait(handles, timeout): + # Return ALL handles which are currently signalled. (Only + # returning the first signalled might create starvation issues.) + L = list(handles) + ready = [] + while L: + res = _winapi.WaitForMultipleObjects(L, False, timeout) + if res == WAIT_TIMEOUT: + break + elif WAIT_OBJECT_0 <= res < WAIT_OBJECT_0 + len(L): + res -= WAIT_OBJECT_0 + elif WAIT_ABANDONED_0 <= res < WAIT_ABANDONED_0 + len(L): + res -= WAIT_ABANDONED_0 + else: + raise RuntimeError('Should not get here') + ready.append(L[res]) + L = L[res+1:] + timeout = 0 + return ready + + _ready_errors = {_winapi.ERROR_BROKEN_PIPE, _winapi.ERROR_NETNAME_DELETED} + + def wait(object_list, timeout=None): + ''' + Wait till an object in object_list is ready/readable. + + Returns list of those objects in object_list which are ready/readable. + ''' + if timeout is None: + timeout = INFINITE + elif timeout < 0: + timeout = 0 + else: + timeout = int(timeout * 1000 + 0.5) + + object_list = list(object_list) + waithandle_to_obj = {} + ov_list = [] + ready_objects = set() + ready_handles = set() + + try: + for o in object_list: + try: + fileno = getattr(o, 'fileno') + except AttributeError: + waithandle_to_obj[o.__index__()] = o + else: + # start an overlapped read of length zero + try: + ov, err = _winapi.ReadFile(fileno(), 0, True) + except OSError as e: + ov, err = None, e.winerror + if err not in _ready_errors: + raise + if err == _winapi.ERROR_IO_PENDING: + ov_list.append(ov) + waithandle_to_obj[ov.event] = o + else: + # If o.fileno() is an overlapped pipe handle and + # err == 0 then there is a zero length message + # in the pipe, but it HAS NOT been consumed... + if ov and sys.getwindowsversion()[:2] >= (6, 2): + # ... except on Windows 8 and later, where + # the message HAS been consumed. + try: + _, err = ov.GetOverlappedResult(False) + except OSError as e: + err = e.winerror + if not err and hasattr(o, '_got_empty_message'): + o._got_empty_message = True + ready_objects.add(o) + timeout = 0 + + ready_handles = _exhaustive_wait(waithandle_to_obj.keys(), timeout) + finally: + # request that overlapped reads stop + for ov in ov_list: + ov.cancel() + + # wait for all overlapped reads to stop + for ov in ov_list: + try: + _, err = ov.GetOverlappedResult(True) + except OSError as e: + err = e.winerror + if err not in _ready_errors: + raise + if err != _winapi.ERROR_OPERATION_ABORTED: + o = waithandle_to_obj[ov.event] + ready_objects.add(o) + if err == 0: + # If o.fileno() is an overlapped pipe handle then + # a zero length message HAS been consumed. + if hasattr(o, '_got_empty_message'): + o._got_empty_message = True + + ready_objects.update(waithandle_to_obj[h] for h in ready_handles) + return [o for o in object_list if o in ready_objects] + +else: + + import selectors + + # poll/select have the advantage of not requiring any extra file + # descriptor, contrarily to epoll/kqueue (also, they require a single + # syscall). + if hasattr(selectors, 'PollSelector'): + _WaitSelector = selectors.PollSelector + else: + _WaitSelector = selectors.SelectSelector + + def wait(object_list, timeout=None): + ''' + Wait till an object in object_list is ready/readable. + + Returns list of those objects in object_list which are ready/readable. + ''' + with _WaitSelector() as selector: + for obj in object_list: + selector.register(obj, selectors.EVENT_READ) + + if timeout is not None: + deadline = getattr(time,'monotonic',time.time)() + timeout + + while True: + ready = selector.select(timeout) + if ready: + return [key.fileobj for (key, events) in ready] + else: + if timeout is not None: + timeout = deadline - getattr(time,'monotonic',time.time)() + if timeout < 0: + return ready + +# +# Make connection and socket objects sharable if possible +# + +if sys.platform == 'win32': + def reduce_connection(conn): + handle = conn.fileno() + with socket.fromfd(handle, socket.AF_INET, socket.SOCK_STREAM) as s: + from . import resource_sharer + ds = resource_sharer.DupSocket(s) + return rebuild_connection, (ds, conn.readable, conn.writable) + def rebuild_connection(ds, readable, writable): + sock = ds.detach() + return Connection(sock.detach(), readable, writable) + reduction.register(Connection, reduce_connection) + + def reduce_pipe_connection(conn): + access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) | + (_winapi.FILE_GENERIC_WRITE if conn.writable else 0)) + dh = reduction.DupHandle(conn.fileno(), access) + return rebuild_pipe_connection, (dh, conn.readable, conn.writable) + def rebuild_pipe_connection(dh, readable, writable): + handle = dh.detach() + return PipeConnection(handle, readable, writable) + reduction.register(PipeConnection, reduce_pipe_connection) + +else: + def reduce_connection(conn): + df = reduction.DupFd(conn.fileno()) + return rebuild_connection, (df, conn.readable, conn.writable) + def rebuild_connection(df, readable, writable): + fd = df.detach() + return Connection(fd, readable, writable) + reduction.register(Connection, reduce_connection) diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/heap.py b/env-llmeval/lib/python3.10/site-packages/multiprocess/heap.py new file mode 100644 index 0000000000000000000000000000000000000000..6217dfe12689b379f2dad6f1e4bc3bbf6af8f60a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multiprocess/heap.py @@ -0,0 +1,337 @@ +# +# Module which supports allocation of memory from an mmap +# +# multiprocessing/heap.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +import bisect +from collections import defaultdict +import mmap +import os +import sys +import tempfile +import threading + +from .context import reduction, assert_spawning +from . import util + +__all__ = ['BufferWrapper'] + +# +# Inheritable class which wraps an mmap, and from which blocks can be allocated +# + +if sys.platform == 'win32': + + import _winapi + + class Arena(object): + """ + A shared memory area backed by anonymous memory (Windows). + """ + + _rand = tempfile._RandomNameSequence() + + def __init__(self, size): + self.size = size + for i in range(100): + name = 'pym-%d-%s' % (os.getpid(), next(self._rand)) + buf = mmap.mmap(-1, size, tagname=name) + if _winapi.GetLastError() == 0: + break + # We have reopened a preexisting mmap. + buf.close() + else: + raise FileExistsError('Cannot find name for new mmap') + self.name = name + self.buffer = buf + self._state = (self.size, self.name) + + def __getstate__(self): + assert_spawning(self) + return self._state + + def __setstate__(self, state): + self.size, self.name = self._state = state + # Reopen existing mmap + self.buffer = mmap.mmap(-1, self.size, tagname=self.name) + # XXX Temporarily preventing buildbot failures while determining + # XXX the correct long-term fix. See issue 23060 + #assert _winapi.GetLastError() == _winapi.ERROR_ALREADY_EXISTS + +else: + + class Arena(object): + """ + A shared memory area backed by a temporary file (POSIX). + """ + + if sys.platform == 'linux': + _dir_candidates = ['/dev/shm'] + else: + _dir_candidates = [] + + def __init__(self, size, fd=-1): + self.size = size + self.fd = fd + if fd == -1: + # Arena is created anew (if fd != -1, it means we're coming + # from rebuild_arena() below) + self.fd, name = tempfile.mkstemp( + prefix='pym-%d-'%os.getpid(), + dir=self._choose_dir(size)) + os.unlink(name) + util.Finalize(self, os.close, (self.fd,)) + os.ftruncate(self.fd, size) + self.buffer = mmap.mmap(self.fd, self.size) + + def _choose_dir(self, size): + # Choose a non-storage backed directory if possible, + # to improve performance + for d in self._dir_candidates: + st = os.statvfs(d) + if st.f_bavail * st.f_frsize >= size: # enough free space? + return d + return util.get_temp_dir() + + def reduce_arena(a): + if a.fd == -1: + raise ValueError('Arena is unpicklable because ' + 'forking was enabled when it was created') + return rebuild_arena, (a.size, reduction.DupFd(a.fd)) + + def rebuild_arena(size, dupfd): + return Arena(size, dupfd.detach()) + + reduction.register(Arena, reduce_arena) + +# +# Class allowing allocation of chunks of memory from arenas +# + +class Heap(object): + + # Minimum malloc() alignment + _alignment = 8 + + _DISCARD_FREE_SPACE_LARGER_THAN = 4 * 1024 ** 2 # 4 MB + _DOUBLE_ARENA_SIZE_UNTIL = 4 * 1024 ** 2 + + def __init__(self, size=mmap.PAGESIZE): + self._lastpid = os.getpid() + self._lock = threading.Lock() + # Current arena allocation size + self._size = size + # A sorted list of available block sizes in arenas + self._lengths = [] + + # Free block management: + # - map each block size to a list of `(Arena, start, stop)` blocks + self._len_to_seq = {} + # - map `(Arena, start)` tuple to the `(Arena, start, stop)` block + # starting at that offset + self._start_to_block = {} + # - map `(Arena, stop)` tuple to the `(Arena, start, stop)` block + # ending at that offset + self._stop_to_block = {} + + # Map arenas to their `(Arena, start, stop)` blocks in use + self._allocated_blocks = defaultdict(set) + self._arenas = [] + + # List of pending blocks to free - see comment in free() below + self._pending_free_blocks = [] + + # Statistics + self._n_mallocs = 0 + self._n_frees = 0 + + @staticmethod + def _roundup(n, alignment): + # alignment must be a power of 2 + mask = alignment - 1 + return (n + mask) & ~mask + + def _new_arena(self, size): + # Create a new arena with at least the given *size* + length = self._roundup(max(self._size, size), mmap.PAGESIZE) + # We carve larger and larger arenas, for efficiency, until we + # reach a large-ish size (roughly L3 cache-sized) + if self._size < self._DOUBLE_ARENA_SIZE_UNTIL: + self._size *= 2 + util.info('allocating a new mmap of length %d', length) + arena = Arena(length) + self._arenas.append(arena) + return (arena, 0, length) + + def _discard_arena(self, arena): + # Possibly delete the given (unused) arena + length = arena.size + # Reusing an existing arena is faster than creating a new one, so + # we only reclaim space if it's large enough. + if length < self._DISCARD_FREE_SPACE_LARGER_THAN: + return + blocks = self._allocated_blocks.pop(arena) + assert not blocks + del self._start_to_block[(arena, 0)] + del self._stop_to_block[(arena, length)] + self._arenas.remove(arena) + seq = self._len_to_seq[length] + seq.remove((arena, 0, length)) + if not seq: + del self._len_to_seq[length] + self._lengths.remove(length) + + def _malloc(self, size): + # returns a large enough block -- it might be much larger + i = bisect.bisect_left(self._lengths, size) + if i == len(self._lengths): + return self._new_arena(size) + else: + length = self._lengths[i] + seq = self._len_to_seq[length] + block = seq.pop() + if not seq: + del self._len_to_seq[length], self._lengths[i] + + (arena, start, stop) = block + del self._start_to_block[(arena, start)] + del self._stop_to_block[(arena, stop)] + return block + + def _add_free_block(self, block): + # make block available and try to merge with its neighbours in the arena + (arena, start, stop) = block + + try: + prev_block = self._stop_to_block[(arena, start)] + except KeyError: + pass + else: + start, _ = self._absorb(prev_block) + + try: + next_block = self._start_to_block[(arena, stop)] + except KeyError: + pass + else: + _, stop = self._absorb(next_block) + + block = (arena, start, stop) + length = stop - start + + try: + self._len_to_seq[length].append(block) + except KeyError: + self._len_to_seq[length] = [block] + bisect.insort(self._lengths, length) + + self._start_to_block[(arena, start)] = block + self._stop_to_block[(arena, stop)] = block + + def _absorb(self, block): + # deregister this block so it can be merged with a neighbour + (arena, start, stop) = block + del self._start_to_block[(arena, start)] + del self._stop_to_block[(arena, stop)] + + length = stop - start + seq = self._len_to_seq[length] + seq.remove(block) + if not seq: + del self._len_to_seq[length] + self._lengths.remove(length) + + return start, stop + + def _remove_allocated_block(self, block): + arena, start, stop = block + blocks = self._allocated_blocks[arena] + blocks.remove((start, stop)) + if not blocks: + # Arena is entirely free, discard it from this process + self._discard_arena(arena) + + def _free_pending_blocks(self): + # Free all the blocks in the pending list - called with the lock held. + while True: + try: + block = self._pending_free_blocks.pop() + except IndexError: + break + self._add_free_block(block) + self._remove_allocated_block(block) + + def free(self, block): + # free a block returned by malloc() + # Since free() can be called asynchronously by the GC, it could happen + # that it's called while self._lock is held: in that case, + # self._lock.acquire() would deadlock (issue #12352). To avoid that, a + # trylock is used instead, and if the lock can't be acquired + # immediately, the block is added to a list of blocks to be freed + # synchronously sometimes later from malloc() or free(), by calling + # _free_pending_blocks() (appending and retrieving from a list is not + # strictly thread-safe but under CPython it's atomic thanks to the GIL). + if os.getpid() != self._lastpid: + raise ValueError( + "My pid ({0:n}) is not last pid {1:n}".format( + os.getpid(),self._lastpid)) + if not self._lock.acquire(False): + # can't acquire the lock right now, add the block to the list of + # pending blocks to free + self._pending_free_blocks.append(block) + else: + # we hold the lock + try: + self._n_frees += 1 + self._free_pending_blocks() + self._add_free_block(block) + self._remove_allocated_block(block) + finally: + self._lock.release() + + def malloc(self, size): + # return a block of right size (possibly rounded up) + if size < 0: + raise ValueError("Size {0:n} out of range".format(size)) + if sys.maxsize <= size: + raise OverflowError("Size {0:n} too large".format(size)) + if os.getpid() != self._lastpid: + self.__init__() # reinitialize after fork + with self._lock: + self._n_mallocs += 1 + # allow pending blocks to be marked available + self._free_pending_blocks() + size = self._roundup(max(size, 1), self._alignment) + (arena, start, stop) = self._malloc(size) + real_stop = start + size + if real_stop < stop: + # if the returned block is larger than necessary, mark + # the remainder available + self._add_free_block((arena, real_stop, stop)) + self._allocated_blocks[arena].add((start, real_stop)) + return (arena, start, real_stop) + +# +# Class wrapping a block allocated out of a Heap -- can be inherited by child process +# + +class BufferWrapper(object): + + _heap = Heap() + + def __init__(self, size): + if size < 0: + raise ValueError("Size {0:n} out of range".format(size)) + if sys.maxsize <= size: + raise OverflowError("Size {0:n} too large".format(size)) + block = BufferWrapper._heap.malloc(size) + self._state = (block, size) + util.Finalize(self, BufferWrapper._heap.free, args=(block,)) + + def create_memoryview(self): + (arena, start, stop), size = self._state + return memoryview(arena.buffer)[start:start+size] diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/popen_spawn_posix.py b/env-llmeval/lib/python3.10/site-packages/multiprocess/popen_spawn_posix.py new file mode 100644 index 0000000000000000000000000000000000000000..24b8634523e5f2c29cd8bb21022c26d22a4fb13b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multiprocess/popen_spawn_posix.py @@ -0,0 +1,72 @@ +import io +import os + +from .context import reduction, set_spawning_popen +from . import popen_fork +from . import spawn +from . import util + +__all__ = ['Popen'] + + +# +# Wrapper for an fd used while launching a process +# + +class _DupFd(object): + def __init__(self, fd): + self.fd = fd + def detach(self): + return self.fd + +# +# Start child process using a fresh interpreter +# + +class Popen(popen_fork.Popen): + method = 'spawn' + DupFd = _DupFd + + def __init__(self, process_obj): + self._fds = [] + super().__init__(process_obj) + + def duplicate_for_child(self, fd): + self._fds.append(fd) + return fd + + def _launch(self, process_obj): + from . import resource_tracker + tracker_fd = resource_tracker.getfd() + self._fds.append(tracker_fd) + prep_data = spawn.get_preparation_data(process_obj._name) + fp = io.BytesIO() + set_spawning_popen(self) + try: + reduction.dump(prep_data, fp) + reduction.dump(process_obj, fp) + finally: + set_spawning_popen(None) + + parent_r = child_w = child_r = parent_w = None + try: + parent_r, child_w = os.pipe() + child_r, parent_w = os.pipe() + cmd = spawn.get_command_line(tracker_fd=tracker_fd, + pipe_handle=child_r) + self._fds.extend([child_r, child_w]) + self.pid = util.spawnv_passfds(spawn.get_executable(), + cmd, self._fds) + self.sentinel = parent_r + with open(parent_w, 'wb', closefd=False) as f: + f.write(fp.getbuffer()) + finally: + fds_to_close = [] + for fd in (parent_r, parent_w): + if fd is not None: + fds_to_close.append(fd) + self.finalizer = util.Finalize(self, util.close_fds, fds_to_close) + + for fd in (child_r, child_w): + if fd is not None: + os.close(fd) diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/process.py b/env-llmeval/lib/python3.10/site-packages/multiprocess/process.py new file mode 100644 index 0000000000000000000000000000000000000000..4c887e3b164600f9fee2993315ad8fc2f85f29c8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multiprocess/process.py @@ -0,0 +1,438 @@ +# +# Module providing the `Process` class which emulates `threading.Thread` +# +# multiprocessing/process.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = ['BaseProcess', 'current_process', 'active_children', + 'parent_process'] + +# +# Imports +# + +import os +import sys +import signal +import itertools +import threading +from _weakrefset import WeakSet + +# +# +# + +try: + ORIGINAL_DIR = os.path.abspath(os.getcwd()) +except OSError: + ORIGINAL_DIR = None + +# +# Public functions +# + +def current_process(): + ''' + Return process object representing the current process + ''' + return _current_process + +def active_children(): + ''' + Return list of process objects corresponding to live child processes + ''' + _cleanup() + return list(_children) + + +def parent_process(): + ''' + Return process object representing the parent process + ''' + return _parent_process + +# +# +# + +def _cleanup(): + # check for processes which have finished + for p in list(_children): + if p._popen.poll() is not None: + _children.discard(p) + +# +# The `Process` class +# + +class BaseProcess(object): + ''' + Process objects represent activity that is run in a separate process + + The class is analogous to `threading.Thread` + ''' + def _Popen(self): + raise NotImplementedError + + def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, + *, daemon=None): + assert group is None, 'group argument must be None for now' + count = next(_process_counter) + self._identity = _current_process._identity + (count,) + self._config = _current_process._config.copy() + self._parent_pid = os.getpid() + self._parent_name = _current_process.name + self._popen = None + self._closed = False + self._target = target + self._args = tuple(args) + self._kwargs = dict(kwargs) + self._name = name or type(self).__name__ + '-' + \ + ':'.join(str(i) for i in self._identity) + if daemon is not None: + self.daemon = daemon + _dangling.add(self) + + def _check_closed(self): + if self._closed: + raise ValueError("process object is closed") + + def run(self): + ''' + Method to be run in sub-process; can be overridden in sub-class + ''' + if self._target: + self._target(*self._args, **self._kwargs) + + def start(self): + ''' + Start child process + ''' + self._check_closed() + assert self._popen is None, 'cannot start a process twice' + assert self._parent_pid == os.getpid(), \ + 'can only start a process object created by current process' + assert not _current_process._config.get('daemon'), \ + 'daemonic processes are not allowed to have children' + _cleanup() + self._popen = self._Popen(self) + self._sentinel = self._popen.sentinel + # Avoid a refcycle if the target function holds an indirect + # reference to the process object (see bpo-30775) + del self._target, self._args, self._kwargs + _children.add(self) + + def terminate(self): + ''' + Terminate process; sends SIGTERM signal or uses TerminateProcess() + ''' + self._check_closed() + self._popen.terminate() + + def kill(self): + ''' + Terminate process; sends SIGKILL signal or uses TerminateProcess() + ''' + self._check_closed() + self._popen.kill() + + def join(self, timeout=None): + ''' + Wait until child process terminates + ''' + self._check_closed() + assert self._parent_pid == os.getpid(), 'can only join a child process' + assert self._popen is not None, 'can only join a started process' + res = self._popen.wait(timeout) + if res is not None: + _children.discard(self) + + def is_alive(self): + ''' + Return whether process is alive + ''' + self._check_closed() + if self is _current_process: + return True + assert self._parent_pid == os.getpid(), 'can only test a child process' + + if self._popen is None: + return False + + returncode = self._popen.poll() + if returncode is None: + return True + else: + _children.discard(self) + return False + + def close(self): + ''' + Close the Process object. + + This method releases resources held by the Process object. It is + an error to call this method if the child process is still running. + ''' + if self._popen is not None: + if self._popen.poll() is None: + raise ValueError("Cannot close a process while it is still running. " + "You should first call join() or terminate().") + self._popen.close() + self._popen = None + del self._sentinel + _children.discard(self) + self._closed = True + + @property + def name(self): + return self._name + + @name.setter + def name(self, name): + assert isinstance(name, str), 'name must be a string' + self._name = name + + @property + def daemon(self): + ''' + Return whether process is a daemon + ''' + return self._config.get('daemon', False) + + @daemon.setter + def daemon(self, daemonic): + ''' + Set whether process is a daemon + ''' + assert self._popen is None, 'process has already started' + self._config['daemon'] = daemonic + + @property + def authkey(self): + return self._config['authkey'] + + @authkey.setter + def authkey(self, authkey): + ''' + Set authorization key of process + ''' + self._config['authkey'] = AuthenticationString(authkey) + + @property + def exitcode(self): + ''' + Return exit code of process or `None` if it has yet to stop + ''' + self._check_closed() + if self._popen is None: + return self._popen + return self._popen.poll() + + @property + def ident(self): + ''' + Return identifier (PID) of process or `None` if it has yet to start + ''' + self._check_closed() + if self is _current_process: + return os.getpid() + else: + return self._popen and self._popen.pid + + pid = ident + + @property + def sentinel(self): + ''' + Return a file descriptor (Unix) or handle (Windows) suitable for + waiting for process termination. + ''' + self._check_closed() + try: + return self._sentinel + except AttributeError: + raise ValueError("process not started") from None + + def __repr__(self): + exitcode = None + if self is _current_process: + status = 'started' + elif self._closed: + status = 'closed' + elif self._parent_pid != os.getpid(): + status = 'unknown' + elif self._popen is None: + status = 'initial' + else: + exitcode = self._popen.poll() + if exitcode is not None: + status = 'stopped' + else: + status = 'started' + + info = [type(self).__name__, 'name=%r' % self._name] + if self._popen is not None: + info.append('pid=%s' % self._popen.pid) + info.append('parent=%s' % self._parent_pid) + info.append(status) + if exitcode is not None: + exitcode = _exitcode_to_name.get(exitcode, exitcode) + info.append('exitcode=%s' % exitcode) + if self.daemon: + info.append('daemon') + return '<%s>' % ' '.join(info) + + ## + + def _bootstrap(self, parent_sentinel=None): + from . import util, context + global _current_process, _parent_process, _process_counter, _children + + try: + if self._start_method is not None: + context._force_start_method(self._start_method) + _process_counter = itertools.count(1) + _children = set() + util._close_stdin() + old_process = _current_process + _current_process = self + _parent_process = _ParentProcess( + self._parent_name, self._parent_pid, parent_sentinel) + if threading._HAVE_THREAD_NATIVE_ID: + threading.main_thread()._set_native_id() + try: + self._after_fork() + finally: + # delay finalization of the old process object until after + # _run_after_forkers() is executed + del old_process + util.info('child process calling self.run()') + try: + self.run() + exitcode = 0 + finally: + util._exit_function() + except SystemExit as e: + if e.code is None: + exitcode = 0 + elif isinstance(e.code, int): + exitcode = e.code + else: + sys.stderr.write(str(e.code) + '\n') + exitcode = 1 + except: + exitcode = 1 + import traceback + sys.stderr.write('Process %s:\n' % self.name) + traceback.print_exc() + finally: + threading._shutdown() + util.info('process exiting with exitcode %d' % exitcode) + util._flush_std_streams() + + return exitcode + + @staticmethod + def _after_fork(): + from . import util + util._finalizer_registry.clear() + util._run_after_forkers() + + +# +# We subclass bytes to avoid accidental transmission of auth keys over network +# + +class AuthenticationString(bytes): + def __reduce__(self): + from .context import get_spawning_popen + if get_spawning_popen() is None: + raise TypeError( + 'Pickling an AuthenticationString object is ' + 'disallowed for security reasons' + ) + return AuthenticationString, (bytes(self),) + + +# +# Create object representing the parent process +# + +class _ParentProcess(BaseProcess): + + def __init__(self, name, pid, sentinel): + self._identity = () + self._name = name + self._pid = pid + self._parent_pid = None + self._popen = None + self._closed = False + self._sentinel = sentinel + self._config = {} + + def is_alive(self): + from multiprocess.connection import wait + return not wait([self._sentinel], timeout=0) + + @property + def ident(self): + return self._pid + + def join(self, timeout=None): + ''' + Wait until parent process terminates + ''' + from multiprocess.connection import wait + wait([self._sentinel], timeout=timeout) + + pid = ident + +# +# Create object representing the main process +# + +class _MainProcess(BaseProcess): + + def __init__(self): + self._identity = () + self._name = 'MainProcess' + self._parent_pid = None + self._popen = None + self._closed = False + self._config = {'authkey': AuthenticationString(os.urandom(32)), + 'semprefix': '/mp'} + # Note that some versions of FreeBSD only allow named + # semaphores to have names of up to 14 characters. Therefore + # we choose a short prefix. + # + # On MacOSX in a sandbox it may be necessary to use a + # different prefix -- see #19478. + # + # Everything in self._config will be inherited by descendant + # processes. + + def close(self): + pass + + +_parent_process = None +_current_process = _MainProcess() +_process_counter = itertools.count(1) +_children = set() +del _MainProcess + +# +# Give names to some return codes +# + +_exitcode_to_name = {} + +for name, signum in list(signal.__dict__.items()): + if name[:3]=='SIG' and '_' not in name: + _exitcode_to_name[-signum] = f'-{name}' + +# For debug and leak testing +_dangling = WeakSet() diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/queues.py b/env-llmeval/lib/python3.10/site-packages/multiprocess/queues.py new file mode 100644 index 0000000000000000000000000000000000000000..72b3a0ccf29dfd34448459696111ceb3355aacef --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multiprocess/queues.py @@ -0,0 +1,382 @@ +# +# Module implementing queues +# +# multiprocessing/queues.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = ['Queue', 'SimpleQueue', 'JoinableQueue'] + +import sys +import os +import threading +import collections +import time +import types +import weakref +import errno + +from queue import Empty, Full + +try: + import _multiprocess as _multiprocessing +except ImportError: + import _multiprocessing + +from . import connection +from . import context +_ForkingPickler = context.reduction.ForkingPickler + +from .util import debug, info, Finalize, register_after_fork, is_exiting + +# +# Queue type using a pipe, buffer and thread +# + +class Queue(object): + + def __init__(self, maxsize=0, *, ctx): + if maxsize <= 0: + # Can raise ImportError (see issues #3770 and #23400) + from .synchronize import SEM_VALUE_MAX as maxsize + self._maxsize = maxsize + self._reader, self._writer = connection.Pipe(duplex=False) + self._rlock = ctx.Lock() + self._opid = os.getpid() + if sys.platform == 'win32': + self._wlock = None + else: + self._wlock = ctx.Lock() + self._sem = ctx.BoundedSemaphore(maxsize) + # For use by concurrent.futures + self._ignore_epipe = False + self._reset() + + if sys.platform != 'win32': + register_after_fork(self, Queue._after_fork) + + def __getstate__(self): + context.assert_spawning(self) + return (self._ignore_epipe, self._maxsize, self._reader, self._writer, + self._rlock, self._wlock, self._sem, self._opid) + + def __setstate__(self, state): + (self._ignore_epipe, self._maxsize, self._reader, self._writer, + self._rlock, self._wlock, self._sem, self._opid) = state + self._reset() + + def _after_fork(self): + debug('Queue._after_fork()') + self._reset(after_fork=True) + + def _reset(self, after_fork=False): + if after_fork: + self._notempty._at_fork_reinit() + else: + self._notempty = threading.Condition(threading.Lock()) + self._buffer = collections.deque() + self._thread = None + self._jointhread = None + self._joincancelled = False + self._closed = False + self._close = None + self._send_bytes = self._writer.send_bytes + self._recv_bytes = self._reader.recv_bytes + self._poll = self._reader.poll + + def put(self, obj, block=True, timeout=None): + if self._closed: + raise ValueError(f"Queue {self!r} is closed") + if not self._sem.acquire(block, timeout): + raise Full + + with self._notempty: + if self._thread is None: + self._start_thread() + self._buffer.append(obj) + self._notempty.notify() + + def get(self, block=True, timeout=None): + if self._closed: + raise ValueError(f"Queue {self!r} is closed") + if block and timeout is None: + with self._rlock: + res = self._recv_bytes() + self._sem.release() + else: + if block: + deadline = getattr(time,'monotonic',time.time)() + timeout + if not self._rlock.acquire(block, timeout): + raise Empty + try: + if block: + timeout = deadline - getattr(time,'monotonic',time.time)() + if not self._poll(timeout): + raise Empty + elif not self._poll(): + raise Empty + res = self._recv_bytes() + self._sem.release() + finally: + self._rlock.release() + # unserialize the data after having released the lock + return _ForkingPickler.loads(res) + + def qsize(self): + # Raises NotImplementedError on Mac OSX because of broken sem_getvalue() + return self._maxsize - self._sem._semlock._get_value() + + def empty(self): + return not self._poll() + + def full(self): + return self._sem._semlock._is_zero() + + def get_nowait(self): + return self.get(False) + + def put_nowait(self, obj): + return self.put(obj, False) + + def close(self): + self._closed = True + close = self._close + if close: + self._close = None + close() + + def join_thread(self): + debug('Queue.join_thread()') + assert self._closed, "Queue {0!r} not closed".format(self) + if self._jointhread: + self._jointhread() + + def cancel_join_thread(self): + debug('Queue.cancel_join_thread()') + self._joincancelled = True + try: + self._jointhread.cancel() + except AttributeError: + pass + + def _start_thread(self): + debug('Queue._start_thread()') + + # Start thread which transfers data from buffer to pipe + self._buffer.clear() + self._thread = threading.Thread( + target=Queue._feed, + args=(self._buffer, self._notempty, self._send_bytes, + self._wlock, self._reader.close, self._writer.close, + self._ignore_epipe, self._on_queue_feeder_error, + self._sem), + name='QueueFeederThread' + ) + self._thread.daemon = True + + debug('doing self._thread.start()') + self._thread.start() + debug('... done self._thread.start()') + + if not self._joincancelled: + self._jointhread = Finalize( + self._thread, Queue._finalize_join, + [weakref.ref(self._thread)], + exitpriority=-5 + ) + + # Send sentinel to the thread queue object when garbage collected + self._close = Finalize( + self, Queue._finalize_close, + [self._buffer, self._notempty], + exitpriority=10 + ) + + @staticmethod + def _finalize_join(twr): + debug('joining queue thread') + thread = twr() + if thread is not None: + thread.join() + debug('... queue thread joined') + else: + debug('... queue thread already dead') + + @staticmethod + def _finalize_close(buffer, notempty): + debug('telling queue thread to quit') + with notempty: + buffer.append(_sentinel) + notempty.notify() + + @staticmethod + def _feed(buffer, notempty, send_bytes, writelock, reader_close, + writer_close, ignore_epipe, onerror, queue_sem): + debug('starting thread to feed data to pipe') + nacquire = notempty.acquire + nrelease = notempty.release + nwait = notempty.wait + bpopleft = buffer.popleft + sentinel = _sentinel + if sys.platform != 'win32': + wacquire = writelock.acquire + wrelease = writelock.release + else: + wacquire = None + + while 1: + try: + nacquire() + try: + if not buffer: + nwait() + finally: + nrelease() + try: + while 1: + obj = bpopleft() + if obj is sentinel: + debug('feeder thread got sentinel -- exiting') + reader_close() + writer_close() + return + + # serialize the data before acquiring the lock + obj = _ForkingPickler.dumps(obj) + if wacquire is None: + send_bytes(obj) + else: + wacquire() + try: + send_bytes(obj) + finally: + wrelease() + except IndexError: + pass + except Exception as e: + if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE: + return + # Since this runs in a daemon thread the resources it uses + # may be become unusable while the process is cleaning up. + # We ignore errors which happen after the process has + # started to cleanup. + if is_exiting(): + info('error in queue thread: %s', e) + return + else: + # Since the object has not been sent in the queue, we need + # to decrease the size of the queue. The error acts as + # if the object had been silently removed from the queue + # and this step is necessary to have a properly working + # queue. + queue_sem.release() + onerror(e, obj) + + @staticmethod + def _on_queue_feeder_error(e, obj): + """ + Private API hook called when feeding data in the background thread + raises an exception. For overriding by concurrent.futures. + """ + import traceback + traceback.print_exc() + + +_sentinel = object() + +# +# A queue type which also supports join() and task_done() methods +# +# Note that if you do not call task_done() for each finished task then +# eventually the counter's semaphore may overflow causing Bad Things +# to happen. +# + +class JoinableQueue(Queue): + + def __init__(self, maxsize=0, *, ctx): + Queue.__init__(self, maxsize, ctx=ctx) + self._unfinished_tasks = ctx.Semaphore(0) + self._cond = ctx.Condition() + + def __getstate__(self): + return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks) + + def __setstate__(self, state): + Queue.__setstate__(self, state[:-2]) + self._cond, self._unfinished_tasks = state[-2:] + + def put(self, obj, block=True, timeout=None): + if self._closed: + raise ValueError(f"Queue {self!r} is closed") + if not self._sem.acquire(block, timeout): + raise Full + + with self._notempty, self._cond: + if self._thread is None: + self._start_thread() + self._buffer.append(obj) + self._unfinished_tasks.release() + self._notempty.notify() + + def task_done(self): + with self._cond: + if not self._unfinished_tasks.acquire(False): + raise ValueError('task_done() called too many times') + if self._unfinished_tasks._semlock._is_zero(): + self._cond.notify_all() + + def join(self): + with self._cond: + if not self._unfinished_tasks._semlock._is_zero(): + self._cond.wait() + +# +# Simplified Queue type -- really just a locked pipe +# + +class SimpleQueue(object): + + def __init__(self, *, ctx): + self._reader, self._writer = connection.Pipe(duplex=False) + self._rlock = ctx.Lock() + self._poll = self._reader.poll + if sys.platform == 'win32': + self._wlock = None + else: + self._wlock = ctx.Lock() + + def close(self): + self._reader.close() + self._writer.close() + + def empty(self): + return not self._poll() + + def __getstate__(self): + context.assert_spawning(self) + return (self._reader, self._writer, self._rlock, self._wlock) + + def __setstate__(self, state): + (self._reader, self._writer, self._rlock, self._wlock) = state + self._poll = self._reader.poll + + def get(self): + with self._rlock: + res = self._reader.recv_bytes() + # unserialize the data after having released the lock + return _ForkingPickler.loads(res) + + def put(self, obj): + # serialize the data before acquiring the lock + obj = _ForkingPickler.dumps(obj) + if self._wlock is None: + # writes to a message oriented win32 pipe are atomic + self._writer.send_bytes(obj) + else: + with self._wlock: + self._writer.send_bytes(obj) + + __class_getitem__ = classmethod(types.GenericAlias) diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/reduction.py b/env-llmeval/lib/python3.10/site-packages/multiprocess/reduction.py new file mode 100644 index 0000000000000000000000000000000000000000..39b132c5e17067a215184866a25654b70b7bea1f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multiprocess/reduction.py @@ -0,0 +1,284 @@ +# +# Module which deals with pickling of objects. +# +# multiprocessing/reduction.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +from abc import ABCMeta +import copyreg +import functools +import io +import os +try: + import dill as pickle +except ImportError: + import pickle +import socket +import sys + +from . import context + +__all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump'] + + +HAVE_SEND_HANDLE = (sys.platform == 'win32' or + (hasattr(socket, 'CMSG_LEN') and + hasattr(socket, 'SCM_RIGHTS') and + hasattr(socket.socket, 'sendmsg'))) + +# +# Pickler subclass +# + +class ForkingPickler(pickle.Pickler): + '''Pickler subclass used by multiprocess.''' + _extra_reducers = {} + _copyreg_dispatch_table = copyreg.dispatch_table + + def __init__(self, *args, **kwds): + super().__init__(*args, **kwds) + self.dispatch_table = self._copyreg_dispatch_table.copy() + self.dispatch_table.update(self._extra_reducers) + + @classmethod + def register(cls, type, reduce): + '''Register a reduce function for a type.''' + cls._extra_reducers[type] = reduce + + @classmethod + def dumps(cls, obj, protocol=None, *args, **kwds): + buf = io.BytesIO() + cls(buf, protocol, *args, **kwds).dump(obj) + return buf.getbuffer() + + loads = pickle.loads + +register = ForkingPickler.register + +def dump(obj, file, protocol=None, *args, **kwds): + '''Replacement for pickle.dump() using ForkingPickler.''' + ForkingPickler(file, protocol, *args, **kwds).dump(obj) + +# +# Platform specific definitions +# + +if sys.platform == 'win32': + # Windows + __all__ += ['DupHandle', 'duplicate', 'steal_handle'] + import _winapi + + def duplicate(handle, target_process=None, inheritable=False, + *, source_process=None): + '''Duplicate a handle. (target_process is a handle not a pid!)''' + current_process = _winapi.GetCurrentProcess() + if source_process is None: + source_process = current_process + if target_process is None: + target_process = current_process + return _winapi.DuplicateHandle( + source_process, handle, target_process, + 0, inheritable, _winapi.DUPLICATE_SAME_ACCESS) + + def steal_handle(source_pid, handle): + '''Steal a handle from process identified by source_pid.''' + source_process_handle = _winapi.OpenProcess( + _winapi.PROCESS_DUP_HANDLE, False, source_pid) + try: + return _winapi.DuplicateHandle( + source_process_handle, handle, + _winapi.GetCurrentProcess(), 0, False, + _winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE) + finally: + _winapi.CloseHandle(source_process_handle) + + def send_handle(conn, handle, destination_pid): + '''Send a handle over a local connection.''' + dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid) + conn.send(dh) + + def recv_handle(conn): + '''Receive a handle over a local connection.''' + return conn.recv().detach() + + class DupHandle(object): + '''Picklable wrapper for a handle.''' + def __init__(self, handle, access, pid=None): + if pid is None: + # We just duplicate the handle in the current process and + # let the receiving process steal the handle. + pid = os.getpid() + proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid) + try: + self._handle = _winapi.DuplicateHandle( + _winapi.GetCurrentProcess(), + handle, proc, access, False, 0) + finally: + _winapi.CloseHandle(proc) + self._access = access + self._pid = pid + + def detach(self): + '''Get the handle. This should only be called once.''' + # retrieve handle from process which currently owns it + if self._pid == os.getpid(): + # The handle has already been duplicated for this process. + return self._handle + # We must steal the handle from the process whose pid is self._pid. + proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, + self._pid) + try: + return _winapi.DuplicateHandle( + proc, self._handle, _winapi.GetCurrentProcess(), + self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE) + finally: + _winapi.CloseHandle(proc) + +else: + # Unix + __all__ += ['DupFd', 'sendfds', 'recvfds'] + import array + + # On MacOSX we should acknowledge receipt of fds -- see Issue14669 + ACKNOWLEDGE = sys.platform == 'darwin' + + def sendfds(sock, fds): + '''Send an array of fds over an AF_UNIX socket.''' + fds = array.array('i', fds) + msg = bytes([len(fds) % 256]) + sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)]) + if ACKNOWLEDGE and sock.recv(1) != b'A': + raise RuntimeError('did not receive acknowledgement of fd') + + def recvfds(sock, size): + '''Receive an array of fds over an AF_UNIX socket.''' + a = array.array('i') + bytes_size = a.itemsize * size + msg, ancdata, flags, addr = sock.recvmsg(1, socket.CMSG_SPACE(bytes_size)) + if not msg and not ancdata: + raise EOFError + try: + if ACKNOWLEDGE: + sock.send(b'A') + if len(ancdata) != 1: + raise RuntimeError('received %d items of ancdata' % + len(ancdata)) + cmsg_level, cmsg_type, cmsg_data = ancdata[0] + if (cmsg_level == socket.SOL_SOCKET and + cmsg_type == socket.SCM_RIGHTS): + if len(cmsg_data) % a.itemsize != 0: + raise ValueError + a.frombytes(cmsg_data) + if len(a) % 256 != msg[0]: + raise AssertionError( + "Len is {0:n} but msg[0] is {1!r}".format( + len(a), msg[0])) + return list(a) + except (ValueError, IndexError): + pass + raise RuntimeError('Invalid data received') + + def send_handle(conn, handle, destination_pid): + '''Send a handle over a local connection.''' + with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: + sendfds(s, [handle]) + + def recv_handle(conn): + '''Receive a handle over a local connection.''' + with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: + return recvfds(s, 1)[0] + + def DupFd(fd): + '''Return a wrapper for an fd.''' + popen_obj = context.get_spawning_popen() + if popen_obj is not None: + return popen_obj.DupFd(popen_obj.duplicate_for_child(fd)) + elif HAVE_SEND_HANDLE: + from . import resource_sharer + return resource_sharer.DupFd(fd) + else: + raise ValueError('SCM_RIGHTS appears not to be available') + +# +# Try making some callable types picklable +# + +def _reduce_method(m): + if m.__self__ is None: + return getattr, (m.__class__, m.__func__.__name__) + else: + return getattr, (m.__self__, m.__func__.__name__) +class _C: + def f(self): + pass +register(type(_C().f), _reduce_method) + + +def _reduce_method_descriptor(m): + return getattr, (m.__objclass__, m.__name__) +register(type(list.append), _reduce_method_descriptor) +register(type(int.__add__), _reduce_method_descriptor) + + +def _reduce_partial(p): + return _rebuild_partial, (p.func, p.args, p.keywords or {}) +def _rebuild_partial(func, args, keywords): + return functools.partial(func, *args, **keywords) +register(functools.partial, _reduce_partial) + +# +# Make sockets picklable +# + +if sys.platform == 'win32': + def _reduce_socket(s): + from .resource_sharer import DupSocket + return _rebuild_socket, (DupSocket(s),) + def _rebuild_socket(ds): + return ds.detach() + register(socket.socket, _reduce_socket) + +else: + def _reduce_socket(s): + df = DupFd(s.fileno()) + return _rebuild_socket, (df, s.family, s.type, s.proto) + def _rebuild_socket(df, family, type, proto): + fd = df.detach() + return socket.socket(family, type, proto, fileno=fd) + register(socket.socket, _reduce_socket) + + +class AbstractReducer(metaclass=ABCMeta): + '''Abstract base class for use in implementing a Reduction class + suitable for use in replacing the standard reduction mechanism + used in multiprocess.''' + ForkingPickler = ForkingPickler + register = register + dump = dump + send_handle = send_handle + recv_handle = recv_handle + + if sys.platform == 'win32': + steal_handle = steal_handle + duplicate = duplicate + DupHandle = DupHandle + else: + sendfds = sendfds + recvfds = recvfds + DupFd = DupFd + + _reduce_method = _reduce_method + _reduce_method_descriptor = _reduce_method_descriptor + _rebuild_partial = _rebuild_partial + _reduce_socket = _reduce_socket + _rebuild_socket = _rebuild_socket + + def __init__(self, *args): + register(type(_C().f), _reduce_method) + register(type(list.append), _reduce_method_descriptor) + register(type(int.__add__), _reduce_method_descriptor) + register(functools.partial, _reduce_partial) + register(socket.socket, _reduce_socket) diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/resource_tracker.py b/env-llmeval/lib/python3.10/site-packages/multiprocess/resource_tracker.py new file mode 100644 index 0000000000000000000000000000000000000000..983e57e0f5ba1da0525f1757265b6f8f418afeb9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multiprocess/resource_tracker.py @@ -0,0 +1,242 @@ +############################################################################### +# Server process to keep track of unlinked resources (like shared memory +# segments, semaphores etc.) and clean them. +# +# On Unix we run a server process which keeps track of unlinked +# resources. The server ignores SIGINT and SIGTERM and reads from a +# pipe. Every other process of the program has a copy of the writable +# end of the pipe, so we get EOF when all other processes have exited. +# Then the server process unlinks any remaining resource names. +# +# This is important because there may be system limits for such resources: for +# instance, the system only supports a limited number of named semaphores, and +# shared-memory segments live in the RAM. If a python process leaks such a +# resource, this resource will not be removed till the next reboot. Without +# this resource tracker process, "killall python" would probably leave unlinked +# resources. + +import os +import signal +import sys +import threading +import warnings + +from . import spawn +from . import util + +__all__ = ['ensure_running', 'register', 'unregister'] + +_HAVE_SIGMASK = hasattr(signal, 'pthread_sigmask') +_IGNORED_SIGNALS = (signal.SIGINT, signal.SIGTERM) + +_CLEANUP_FUNCS = { + 'noop': lambda: None, +} + +if os.name == 'posix': + try: + import _multiprocess as _multiprocessing + except ImportError: + import _multiprocessing + import _posixshmem + + # Use sem_unlink() to clean up named semaphores. + # + # sem_unlink() may be missing if the Python build process detected the + # absence of POSIX named semaphores. In that case, no named semaphores were + # ever opened, so no cleanup would be necessary. + if hasattr(_multiprocessing, 'sem_unlink'): + _CLEANUP_FUNCS.update({ + 'semaphore': _multiprocessing.sem_unlink, + }) + _CLEANUP_FUNCS.update({ + 'shared_memory': _posixshmem.shm_unlink, + }) + + +class ResourceTracker(object): + + def __init__(self): + self._lock = threading.Lock() + self._fd = None + self._pid = None + + def _stop(self): + with self._lock: + if self._fd is None: + # not running + return + + # closing the "alive" file descriptor stops main() + os.close(self._fd) + self._fd = None + + os.waitpid(self._pid, 0) + self._pid = None + + def getfd(self): + self.ensure_running() + return self._fd + + def ensure_running(self): + '''Make sure that resource tracker process is running. + + This can be run from any process. Usually a child process will use + the resource created by its parent.''' + with self._lock: + if self._fd is not None: + # resource tracker was launched before, is it still running? + if self._check_alive(): + # => still alive + return + # => dead, launch it again + os.close(self._fd) + + # Clean-up to avoid dangling processes. + try: + # _pid can be None if this process is a child from another + # python process, which has started the resource_tracker. + if self._pid is not None: + os.waitpid(self._pid, 0) + except ChildProcessError: + # The resource_tracker has already been terminated. + pass + self._fd = None + self._pid = None + + warnings.warn('resource_tracker: process died unexpectedly, ' + 'relaunching. Some resources might leak.') + + fds_to_pass = [] + try: + fds_to_pass.append(sys.stderr.fileno()) + except Exception: + pass + cmd = 'from multiprocess.resource_tracker import main;main(%d)' + r, w = os.pipe() + try: + fds_to_pass.append(r) + # process will out live us, so no need to wait on pid + exe = spawn.get_executable() + args = [exe] + util._args_from_interpreter_flags() + args += ['-c', cmd % r] + # bpo-33613: Register a signal mask that will block the signals. + # This signal mask will be inherited by the child that is going + # to be spawned and will protect the child from a race condition + # that can make the child die before it registers signal handlers + # for SIGINT and SIGTERM. The mask is unregistered after spawning + # the child. + try: + if _HAVE_SIGMASK: + signal.pthread_sigmask(signal.SIG_BLOCK, _IGNORED_SIGNALS) + pid = util.spawnv_passfds(exe, args, fds_to_pass) + finally: + if _HAVE_SIGMASK: + signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS) + except: + os.close(w) + raise + else: + self._fd = w + self._pid = pid + finally: + os.close(r) + + def _check_alive(self): + '''Check that the pipe has not been closed by sending a probe.''' + try: + # We cannot use send here as it calls ensure_running, creating + # a cycle. + os.write(self._fd, b'PROBE:0:noop\n') + except OSError: + return False + else: + return True + + def register(self, name, rtype): + '''Register name of resource with resource tracker.''' + self._send('REGISTER', name, rtype) + + def unregister(self, name, rtype): + '''Unregister name of resource with resource tracker.''' + self._send('UNREGISTER', name, rtype) + + def _send(self, cmd, name, rtype): + self.ensure_running() + msg = '{0}:{1}:{2}\n'.format(cmd, name, rtype).encode('ascii') + if len(msg) > 512: + # posix guarantees that writes to a pipe of less than PIPE_BUF + # bytes are atomic, and that PIPE_BUF >= 512 + raise ValueError('msg too long') + nbytes = os.write(self._fd, msg) + assert nbytes == len(msg), "nbytes {0:n} but len(msg) {1:n}".format( + nbytes, len(msg)) + + +_resource_tracker = ResourceTracker() +ensure_running = _resource_tracker.ensure_running +register = _resource_tracker.register +unregister = _resource_tracker.unregister +getfd = _resource_tracker.getfd + +def main(fd): + '''Run resource tracker.''' + # protect the process from ^C and "killall python" etc + signal.signal(signal.SIGINT, signal.SIG_IGN) + signal.signal(signal.SIGTERM, signal.SIG_IGN) + if _HAVE_SIGMASK: + signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS) + + for f in (sys.stdin, sys.stdout): + try: + f.close() + except Exception: + pass + + cache = {rtype: set() for rtype in _CLEANUP_FUNCS.keys()} + try: + # keep track of registered/unregistered resources + with open(fd, 'rb') as f: + for line in f: + try: + cmd, name, rtype = line.strip().decode('ascii').split(':') + cleanup_func = _CLEANUP_FUNCS.get(rtype, None) + if cleanup_func is None: + raise ValueError( + f'Cannot register {name} for automatic cleanup: ' + f'unknown resource type {rtype}') + + if cmd == 'REGISTER': + cache[rtype].add(name) + elif cmd == 'UNREGISTER': + cache[rtype].remove(name) + elif cmd == 'PROBE': + pass + else: + raise RuntimeError('unrecognized command %r' % cmd) + except Exception: + try: + sys.excepthook(*sys.exc_info()) + except: + pass + finally: + # all processes have terminated; cleanup any remaining resources + for rtype, rtype_cache in cache.items(): + if rtype_cache: + try: + warnings.warn('resource_tracker: There appear to be %d ' + 'leaked %s objects to clean up at shutdown' % + (len(rtype_cache), rtype)) + except Exception: + pass + for name in rtype_cache: + # For some reason the process which created and registered this + # resource has failed to unregister it. Presumably it has + # died. We therefore unlink it. + try: + try: + _CLEANUP_FUNCS[rtype](name) + except Exception as e: + warnings.warn('resource_tracker: %r: %s' % (name, e)) + finally: + pass diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/shared_memory.py b/env-llmeval/lib/python3.10/site-packages/multiprocess/shared_memory.py new file mode 100644 index 0000000000000000000000000000000000000000..9a1e5aa17b87a232a831b5d250b8e0c2f15f574d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multiprocess/shared_memory.py @@ -0,0 +1,534 @@ +"""Provides shared memory for direct access across processes. + +The API of this package is currently provisional. Refer to the +documentation for details. +""" + + +__all__ = [ 'SharedMemory', 'ShareableList' ] + + +from functools import partial +import mmap +import os +import errno +import struct +import secrets +import types + +if os.name == "nt": + import _winapi + _USE_POSIX = False +else: + import _posixshmem + _USE_POSIX = True + +from . import resource_tracker + +_O_CREX = os.O_CREAT | os.O_EXCL + +# FreeBSD (and perhaps other BSDs) limit names to 14 characters. +_SHM_SAFE_NAME_LENGTH = 14 + +# Shared memory block name prefix +if _USE_POSIX: + _SHM_NAME_PREFIX = '/psm_' +else: + _SHM_NAME_PREFIX = 'wnsm_' + + +def _make_filename(): + "Create a random filename for the shared memory object." + # number of random bytes to use for name + nbytes = (_SHM_SAFE_NAME_LENGTH - len(_SHM_NAME_PREFIX)) // 2 + assert nbytes >= 2, '_SHM_NAME_PREFIX too long' + name = _SHM_NAME_PREFIX + secrets.token_hex(nbytes) + assert len(name) <= _SHM_SAFE_NAME_LENGTH + return name + + +class SharedMemory: + """Creates a new shared memory block or attaches to an existing + shared memory block. + + Every shared memory block is assigned a unique name. This enables + one process to create a shared memory block with a particular name + so that a different process can attach to that same shared memory + block using that same name. + + As a resource for sharing data across processes, shared memory blocks + may outlive the original process that created them. When one process + no longer needs access to a shared memory block that might still be + needed by other processes, the close() method should be called. + When a shared memory block is no longer needed by any process, the + unlink() method should be called to ensure proper cleanup.""" + + # Defaults; enables close() and unlink() to run without errors. + _name = None + _fd = -1 + _mmap = None + _buf = None + _flags = os.O_RDWR + _mode = 0o600 + _prepend_leading_slash = True if _USE_POSIX else False + + def __init__(self, name=None, create=False, size=0): + if not size >= 0: + raise ValueError("'size' must be a positive integer") + if create: + self._flags = _O_CREX | os.O_RDWR + if size == 0: + raise ValueError("'size' must be a positive number different from zero") + if name is None and not self._flags & os.O_EXCL: + raise ValueError("'name' can only be None if create=True") + + if _USE_POSIX: + + # POSIX Shared Memory + + if name is None: + while True: + name = _make_filename() + try: + self._fd = _posixshmem.shm_open( + name, + self._flags, + mode=self._mode + ) + except FileExistsError: + continue + self._name = name + break + else: + name = "/" + name if self._prepend_leading_slash else name + self._fd = _posixshmem.shm_open( + name, + self._flags, + mode=self._mode + ) + self._name = name + try: + if create and size: + os.ftruncate(self._fd, size) + stats = os.fstat(self._fd) + size = stats.st_size + self._mmap = mmap.mmap(self._fd, size) + except OSError: + self.unlink() + raise + + resource_tracker.register(self._name, "shared_memory") + + else: + + # Windows Named Shared Memory + + if create: + while True: + temp_name = _make_filename() if name is None else name + # Create and reserve shared memory block with this name + # until it can be attached to by mmap. + h_map = _winapi.CreateFileMapping( + _winapi.INVALID_HANDLE_VALUE, + _winapi.NULL, + _winapi.PAGE_READWRITE, + (size >> 32) & 0xFFFFFFFF, + size & 0xFFFFFFFF, + temp_name + ) + try: + last_error_code = _winapi.GetLastError() + if last_error_code == _winapi.ERROR_ALREADY_EXISTS: + if name is not None: + raise FileExistsError( + errno.EEXIST, + os.strerror(errno.EEXIST), + name, + _winapi.ERROR_ALREADY_EXISTS + ) + else: + continue + self._mmap = mmap.mmap(-1, size, tagname=temp_name) + finally: + _winapi.CloseHandle(h_map) + self._name = temp_name + break + + else: + self._name = name + # Dynamically determine the existing named shared memory + # block's size which is likely a multiple of mmap.PAGESIZE. + h_map = _winapi.OpenFileMapping( + _winapi.FILE_MAP_READ, + False, + name + ) + try: + p_buf = _winapi.MapViewOfFile( + h_map, + _winapi.FILE_MAP_READ, + 0, + 0, + 0 + ) + finally: + _winapi.CloseHandle(h_map) + try: + size = _winapi.VirtualQuerySize(p_buf) + finally: + _winapi.UnmapViewOfFile(p_buf) + self._mmap = mmap.mmap(-1, size, tagname=name) + + self._size = size + self._buf = memoryview(self._mmap) + + def __del__(self): + try: + self.close() + except OSError: + pass + + def __reduce__(self): + return ( + self.__class__, + ( + self.name, + False, + self.size, + ), + ) + + def __repr__(self): + return f'{self.__class__.__name__}({self.name!r}, size={self.size})' + + @property + def buf(self): + "A memoryview of contents of the shared memory block." + return self._buf + + @property + def name(self): + "Unique name that identifies the shared memory block." + reported_name = self._name + if _USE_POSIX and self._prepend_leading_slash: + if self._name.startswith("/"): + reported_name = self._name[1:] + return reported_name + + @property + def size(self): + "Size in bytes." + return self._size + + def close(self): + """Closes access to the shared memory from this instance but does + not destroy the shared memory block.""" + if self._buf is not None: + self._buf.release() + self._buf = None + if self._mmap is not None: + self._mmap.close() + self._mmap = None + if _USE_POSIX and self._fd >= 0: + os.close(self._fd) + self._fd = -1 + + def unlink(self): + """Requests that the underlying shared memory block be destroyed. + + In order to ensure proper cleanup of resources, unlink should be + called once (and only once) across all processes which have access + to the shared memory block.""" + if _USE_POSIX and self._name: + _posixshmem.shm_unlink(self._name) + resource_tracker.unregister(self._name, "shared_memory") + + +_encoding = "utf8" + +class ShareableList: + """Pattern for a mutable list-like object shareable via a shared + memory block. It differs from the built-in list type in that these + lists can not change their overall length (i.e. no append, insert, + etc.) + + Because values are packed into a memoryview as bytes, the struct + packing format for any storable value must require no more than 8 + characters to describe its format.""" + + # The shared memory area is organized as follows: + # - 8 bytes: number of items (N) as a 64-bit integer + # - (N + 1) * 8 bytes: offsets of each element from the start of the + # data area + # - K bytes: the data area storing item values (with encoding and size + # depending on their respective types) + # - N * 8 bytes: `struct` format string for each element + # - N bytes: index into _back_transforms_mapping for each element + # (for reconstructing the corresponding Python value) + _types_mapping = { + int: "q", + float: "d", + bool: "xxxxxxx?", + str: "%ds", + bytes: "%ds", + None.__class__: "xxxxxx?x", + } + _alignment = 8 + _back_transforms_mapping = { + 0: lambda value: value, # int, float, bool + 1: lambda value: value.rstrip(b'\x00').decode(_encoding), # str + 2: lambda value: value.rstrip(b'\x00'), # bytes + 3: lambda _value: None, # None + } + + @staticmethod + def _extract_recreation_code(value): + """Used in concert with _back_transforms_mapping to convert values + into the appropriate Python objects when retrieving them from + the list as well as when storing them.""" + if not isinstance(value, (str, bytes, None.__class__)): + return 0 + elif isinstance(value, str): + return 1 + elif isinstance(value, bytes): + return 2 + else: + return 3 # NoneType + + def __init__(self, sequence=None, *, name=None): + if name is None or sequence is not None: + sequence = sequence or () + _formats = [ + self._types_mapping[type(item)] + if not isinstance(item, (str, bytes)) + else self._types_mapping[type(item)] % ( + self._alignment * (len(item) // self._alignment + 1), + ) + for item in sequence + ] + self._list_len = len(_formats) + assert sum(len(fmt) <= 8 for fmt in _formats) == self._list_len + offset = 0 + # The offsets of each list element into the shared memory's + # data area (0 meaning the start of the data area, not the start + # of the shared memory area). + self._allocated_offsets = [0] + for fmt in _formats: + offset += self._alignment if fmt[-1] != "s" else int(fmt[:-1]) + self._allocated_offsets.append(offset) + _recreation_codes = [ + self._extract_recreation_code(item) for item in sequence + ] + requested_size = struct.calcsize( + "q" + self._format_size_metainfo + + "".join(_formats) + + self._format_packing_metainfo + + self._format_back_transform_codes + ) + + self.shm = SharedMemory(name, create=True, size=requested_size) + else: + self.shm = SharedMemory(name) + + if sequence is not None: + _enc = _encoding + struct.pack_into( + "q" + self._format_size_metainfo, + self.shm.buf, + 0, + self._list_len, + *(self._allocated_offsets) + ) + struct.pack_into( + "".join(_formats), + self.shm.buf, + self._offset_data_start, + *(v.encode(_enc) if isinstance(v, str) else v for v in sequence) + ) + struct.pack_into( + self._format_packing_metainfo, + self.shm.buf, + self._offset_packing_formats, + *(v.encode(_enc) for v in _formats) + ) + struct.pack_into( + self._format_back_transform_codes, + self.shm.buf, + self._offset_back_transform_codes, + *(_recreation_codes) + ) + + else: + self._list_len = len(self) # Obtains size from offset 0 in buffer. + self._allocated_offsets = list( + struct.unpack_from( + self._format_size_metainfo, + self.shm.buf, + 1 * 8 + ) + ) + + def _get_packing_format(self, position): + "Gets the packing format for a single value stored in the list." + position = position if position >= 0 else position + self._list_len + if (position >= self._list_len) or (self._list_len < 0): + raise IndexError("Requested position out of range.") + + v = struct.unpack_from( + "8s", + self.shm.buf, + self._offset_packing_formats + position * 8 + )[0] + fmt = v.rstrip(b'\x00') + fmt_as_str = fmt.decode(_encoding) + + return fmt_as_str + + def _get_back_transform(self, position): + "Gets the back transformation function for a single value." + + if (position >= self._list_len) or (self._list_len < 0): + raise IndexError("Requested position out of range.") + + transform_code = struct.unpack_from( + "b", + self.shm.buf, + self._offset_back_transform_codes + position + )[0] + transform_function = self._back_transforms_mapping[transform_code] + + return transform_function + + def _set_packing_format_and_transform(self, position, fmt_as_str, value): + """Sets the packing format and back transformation code for a + single value in the list at the specified position.""" + + if (position >= self._list_len) or (self._list_len < 0): + raise IndexError("Requested position out of range.") + + struct.pack_into( + "8s", + self.shm.buf, + self._offset_packing_formats + position * 8, + fmt_as_str.encode(_encoding) + ) + + transform_code = self._extract_recreation_code(value) + struct.pack_into( + "b", + self.shm.buf, + self._offset_back_transform_codes + position, + transform_code + ) + + def __getitem__(self, position): + position = position if position >= 0 else position + self._list_len + try: + offset = self._offset_data_start + self._allocated_offsets[position] + (v,) = struct.unpack_from( + self._get_packing_format(position), + self.shm.buf, + offset + ) + except IndexError: + raise IndexError("index out of range") + + back_transform = self._get_back_transform(position) + v = back_transform(v) + + return v + + def __setitem__(self, position, value): + position = position if position >= 0 else position + self._list_len + try: + item_offset = self._allocated_offsets[position] + offset = self._offset_data_start + item_offset + current_format = self._get_packing_format(position) + except IndexError: + raise IndexError("assignment index out of range") + + if not isinstance(value, (str, bytes)): + new_format = self._types_mapping[type(value)] + encoded_value = value + else: + allocated_length = self._allocated_offsets[position + 1] - item_offset + + encoded_value = (value.encode(_encoding) + if isinstance(value, str) else value) + if len(encoded_value) > allocated_length: + raise ValueError("bytes/str item exceeds available storage") + if current_format[-1] == "s": + new_format = current_format + else: + new_format = self._types_mapping[str] % ( + allocated_length, + ) + + self._set_packing_format_and_transform( + position, + new_format, + value + ) + struct.pack_into(new_format, self.shm.buf, offset, encoded_value) + + def __reduce__(self): + return partial(self.__class__, name=self.shm.name), () + + def __len__(self): + return struct.unpack_from("q", self.shm.buf, 0)[0] + + def __repr__(self): + return f'{self.__class__.__name__}({list(self)}, name={self.shm.name!r})' + + @property + def format(self): + "The struct packing format used by all currently stored items." + return "".join( + self._get_packing_format(i) for i in range(self._list_len) + ) + + @property + def _format_size_metainfo(self): + "The struct packing format used for the items' storage offsets." + return "q" * (self._list_len + 1) + + @property + def _format_packing_metainfo(self): + "The struct packing format used for the items' packing formats." + return "8s" * self._list_len + + @property + def _format_back_transform_codes(self): + "The struct packing format used for the items' back transforms." + return "b" * self._list_len + + @property + def _offset_data_start(self): + # - 8 bytes for the list length + # - (N + 1) * 8 bytes for the element offsets + return (self._list_len + 2) * 8 + + @property + def _offset_packing_formats(self): + return self._offset_data_start + self._allocated_offsets[-1] + + @property + def _offset_back_transform_codes(self): + return self._offset_packing_formats + self._list_len * 8 + + def count(self, value): + "L.count(value) -> integer -- return number of occurrences of value." + + return sum(value == entry for entry in self) + + def index(self, value): + """L.index(value) -> integer -- return first index of value. + Raises ValueError if the value is not present.""" + + for position, entry in enumerate(self): + if value == entry: + return position + else: + raise ValueError(f"{value!r} not in this container") + + __class_getitem__ = classmethod(types.GenericAlias) diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/sharedctypes.py b/env-llmeval/lib/python3.10/site-packages/multiprocess/sharedctypes.py new file mode 100644 index 0000000000000000000000000000000000000000..6071707027bea46dccfc4d92fb3195060faf07ce --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multiprocess/sharedctypes.py @@ -0,0 +1,240 @@ +# +# Module which supports allocation of ctypes objects from shared memory +# +# multiprocessing/sharedctypes.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +import ctypes +import weakref + +from . import heap +from . import get_context + +from .context import reduction, assert_spawning +_ForkingPickler = reduction.ForkingPickler + +__all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized'] + +# +# +# + +typecode_to_type = { + 'c': ctypes.c_char, 'u': ctypes.c_wchar, + 'b': ctypes.c_byte, 'B': ctypes.c_ubyte, + 'h': ctypes.c_short, 'H': ctypes.c_ushort, + 'i': ctypes.c_int, 'I': ctypes.c_uint, + 'l': ctypes.c_long, 'L': ctypes.c_ulong, + 'q': ctypes.c_longlong, 'Q': ctypes.c_ulonglong, + 'f': ctypes.c_float, 'd': ctypes.c_double + } + +# +# +# + +def _new_value(type_): + size = ctypes.sizeof(type_) + wrapper = heap.BufferWrapper(size) + return rebuild_ctype(type_, wrapper, None) + +def RawValue(typecode_or_type, *args): + ''' + Returns a ctypes object allocated from shared memory + ''' + type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) + obj = _new_value(type_) + ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) + obj.__init__(*args) + return obj + +def RawArray(typecode_or_type, size_or_initializer): + ''' + Returns a ctypes array allocated from shared memory + ''' + type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) + if isinstance(size_or_initializer, int): + type_ = type_ * size_or_initializer + obj = _new_value(type_) + ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) + return obj + else: + type_ = type_ * len(size_or_initializer) + result = _new_value(type_) + result.__init__(*size_or_initializer) + return result + +def Value(typecode_or_type, *args, lock=True, ctx=None): + ''' + Return a synchronization wrapper for a Value + ''' + obj = RawValue(typecode_or_type, *args) + if lock is False: + return obj + if lock in (True, None): + ctx = ctx or get_context() + lock = ctx.RLock() + if not hasattr(lock, 'acquire'): + raise AttributeError("%r has no method 'acquire'" % lock) + return synchronized(obj, lock, ctx=ctx) + +def Array(typecode_or_type, size_or_initializer, *, lock=True, ctx=None): + ''' + Return a synchronization wrapper for a RawArray + ''' + obj = RawArray(typecode_or_type, size_or_initializer) + if lock is False: + return obj + if lock in (True, None): + ctx = ctx or get_context() + lock = ctx.RLock() + if not hasattr(lock, 'acquire'): + raise AttributeError("%r has no method 'acquire'" % lock) + return synchronized(obj, lock, ctx=ctx) + +def copy(obj): + new_obj = _new_value(type(obj)) + ctypes.pointer(new_obj)[0] = obj + return new_obj + +def synchronized(obj, lock=None, ctx=None): + assert not isinstance(obj, SynchronizedBase), 'object already synchronized' + ctx = ctx or get_context() + + if isinstance(obj, ctypes._SimpleCData): + return Synchronized(obj, lock, ctx) + elif isinstance(obj, ctypes.Array): + if obj._type_ is ctypes.c_char: + return SynchronizedString(obj, lock, ctx) + return SynchronizedArray(obj, lock, ctx) + else: + cls = type(obj) + try: + scls = class_cache[cls] + except KeyError: + names = [field[0] for field in cls._fields_] + d = {name: make_property(name) for name in names} + classname = 'Synchronized' + cls.__name__ + scls = class_cache[cls] = type(classname, (SynchronizedBase,), d) + return scls(obj, lock, ctx) + +# +# Functions for pickling/unpickling +# + +def reduce_ctype(obj): + assert_spawning(obj) + if isinstance(obj, ctypes.Array): + return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_) + else: + return rebuild_ctype, (type(obj), obj._wrapper, None) + +def rebuild_ctype(type_, wrapper, length): + if length is not None: + type_ = type_ * length + _ForkingPickler.register(type_, reduce_ctype) + buf = wrapper.create_memoryview() + obj = type_.from_buffer(buf) + obj._wrapper = wrapper + return obj + +# +# Function to create properties +# + +def make_property(name): + try: + return prop_cache[name] + except KeyError: + d = {} + exec(template % ((name,)*7), d) + prop_cache[name] = d[name] + return d[name] + +template = ''' +def get%s(self): + self.acquire() + try: + return self._obj.%s + finally: + self.release() +def set%s(self, value): + self.acquire() + try: + self._obj.%s = value + finally: + self.release() +%s = property(get%s, set%s) +''' + +prop_cache = {} +class_cache = weakref.WeakKeyDictionary() + +# +# Synchronized wrappers +# + +class SynchronizedBase(object): + + def __init__(self, obj, lock=None, ctx=None): + self._obj = obj + if lock: + self._lock = lock + else: + ctx = ctx or get_context(force=True) + self._lock = ctx.RLock() + self.acquire = self._lock.acquire + self.release = self._lock.release + + def __enter__(self): + return self._lock.__enter__() + + def __exit__(self, *args): + return self._lock.__exit__(*args) + + def __reduce__(self): + assert_spawning(self) + return synchronized, (self._obj, self._lock) + + def get_obj(self): + return self._obj + + def get_lock(self): + return self._lock + + def __repr__(self): + return '<%s wrapper for %s>' % (type(self).__name__, self._obj) + + +class Synchronized(SynchronizedBase): + value = make_property('value') + + +class SynchronizedArray(SynchronizedBase): + + def __len__(self): + return len(self._obj) + + def __getitem__(self, i): + with self: + return self._obj[i] + + def __setitem__(self, i, value): + with self: + self._obj[i] = value + + def __getslice__(self, start, stop): + with self: + return self._obj[start:stop] + + def __setslice__(self, start, stop, values): + with self: + self._obj[start:stop] = values + + +class SynchronizedString(SynchronizedArray): + value = make_property('value') + raw = make_property('raw') diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/spawn.py b/env-llmeval/lib/python3.10/site-packages/multiprocess/spawn.py new file mode 100644 index 0000000000000000000000000000000000000000..2c0283d80c7799a09e83b363e3f956ebbb665e42 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multiprocess/spawn.py @@ -0,0 +1,297 @@ +# +# Code used to start processes when using the spawn or forkserver +# start methods. +# +# multiprocessing/spawn.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +import os +import sys +import runpy +import types + +from . import get_start_method, set_start_method +from . import process +from .context import reduction +from . import util + +__all__ = ['_main', 'freeze_support', 'set_executable', 'get_executable', + 'get_preparation_data', 'get_command_line', 'import_main_path'] + +# +# _python_exe is the assumed path to the python executable. +# People embedding Python want to modify it. +# + +if sys.platform != 'win32': + WINEXE = False + WINSERVICE = False +else: + WINEXE = getattr(sys, 'frozen', False) + WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") + +if WINSERVICE: + _python_exe = os.path.join(sys.exec_prefix, 'python.exe') +else: + _python_exe = sys.executable + +def set_executable(exe): + global _python_exe + _python_exe = exe + +def get_executable(): + return _python_exe + +# +# +# + +def is_forking(argv): + ''' + Return whether commandline indicates we are forking + ''' + if len(argv) >= 2 and argv[1] == '--multiprocessing-fork': + return True + else: + return False + + +def freeze_support(): + ''' + Run code for process object if this in not the main process + ''' + if is_forking(sys.argv): + kwds = {} + for arg in sys.argv[2:]: + name, value = arg.split('=') + if value == 'None': + kwds[name] = None + else: + kwds[name] = int(value) + spawn_main(**kwds) + sys.exit() + + +def get_command_line(**kwds): + ''' + Returns prefix of command line used for spawning a child process + ''' + if getattr(sys, 'frozen', False): + return ([sys.executable, '--multiprocessing-fork'] + + ['%s=%r' % item for item in kwds.items()]) + else: + prog = 'from multiprocess.spawn import spawn_main; spawn_main(%s)' + prog %= ', '.join('%s=%r' % item for item in kwds.items()) + opts = util._args_from_interpreter_flags() + return [_python_exe] + opts + ['-c', prog, '--multiprocessing-fork'] + + +def spawn_main(pipe_handle, parent_pid=None, tracker_fd=None): + ''' + Run code specified by data received over pipe + ''' + assert is_forking(sys.argv), "Not forking" + if sys.platform == 'win32': + import msvcrt + import _winapi + + if parent_pid is not None: + source_process = _winapi.OpenProcess( + _winapi.SYNCHRONIZE | _winapi.PROCESS_DUP_HANDLE, + False, parent_pid) + else: + source_process = None + new_handle = reduction.duplicate(pipe_handle, + source_process=source_process) + fd = msvcrt.open_osfhandle(new_handle, os.O_RDONLY) + parent_sentinel = source_process + else: + from . import resource_tracker + resource_tracker._resource_tracker._fd = tracker_fd + fd = pipe_handle + parent_sentinel = os.dup(pipe_handle) + exitcode = _main(fd, parent_sentinel) + sys.exit(exitcode) + + +def _main(fd, parent_sentinel): + with os.fdopen(fd, 'rb', closefd=True) as from_parent: + process.current_process()._inheriting = True + try: + preparation_data = reduction.pickle.load(from_parent) + prepare(preparation_data) + self = reduction.pickle.load(from_parent) + finally: + del process.current_process()._inheriting + return self._bootstrap(parent_sentinel) + + +def _check_not_importing_main(): + if getattr(process.current_process(), '_inheriting', False): + raise RuntimeError(''' + An attempt has been made to start a new process before the + current process has finished its bootstrapping phase. + + This probably means that you are not using fork to start your + child processes and you have forgotten to use the proper idiom + in the main module: + + if __name__ == '__main__': + freeze_support() + ... + + The "freeze_support()" line can be omitted if the program + is not going to be frozen to produce an executable.''') + + +def get_preparation_data(name): + ''' + Return info about parent needed by child to unpickle process object + ''' + _check_not_importing_main() + d = dict( + log_to_stderr=util._log_to_stderr, + authkey=process.current_process().authkey, + ) + + if util._logger is not None: + d['log_level'] = util._logger.getEffectiveLevel() + + sys_path=sys.path.copy() + try: + i = sys_path.index('') + except ValueError: + pass + else: + sys_path[i] = process.ORIGINAL_DIR + + d.update( + name=name, + sys_path=sys_path, + sys_argv=sys.argv, + orig_dir=process.ORIGINAL_DIR, + dir=os.getcwd(), + start_method=get_start_method(), + ) + + # Figure out whether to initialise main in the subprocess as a module + # or through direct execution (or to leave it alone entirely) + main_module = sys.modules['__main__'] + main_mod_name = getattr(main_module.__spec__, "name", None) + if main_mod_name is not None: + d['init_main_from_name'] = main_mod_name + elif sys.platform != 'win32' or (not WINEXE and not WINSERVICE): + main_path = getattr(main_module, '__file__', None) + if main_path is not None: + if (not os.path.isabs(main_path) and + process.ORIGINAL_DIR is not None): + main_path = os.path.join(process.ORIGINAL_DIR, main_path) + d['init_main_from_path'] = os.path.normpath(main_path) + + return d + +# +# Prepare current process +# + +old_main_modules = [] + +def prepare(data): + ''' + Try to get current process ready to unpickle process object + ''' + if 'name' in data: + process.current_process().name = data['name'] + + if 'authkey' in data: + process.current_process().authkey = data['authkey'] + + if 'log_to_stderr' in data and data['log_to_stderr']: + util.log_to_stderr() + + if 'log_level' in data: + util.get_logger().setLevel(data['log_level']) + + if 'sys_path' in data: + sys.path = data['sys_path'] + + if 'sys_argv' in data: + sys.argv = data['sys_argv'] + + if 'dir' in data: + os.chdir(data['dir']) + + if 'orig_dir' in data: + process.ORIGINAL_DIR = data['orig_dir'] + + if 'start_method' in data: + set_start_method(data['start_method'], force=True) + + if 'init_main_from_name' in data: + _fixup_main_from_name(data['init_main_from_name']) + elif 'init_main_from_path' in data: + _fixup_main_from_path(data['init_main_from_path']) + +# Multiprocessing module helpers to fix up the main module in +# spawned subprocesses +def _fixup_main_from_name(mod_name): + # __main__.py files for packages, directories, zip archives, etc, run + # their "main only" code unconditionally, so we don't even try to + # populate anything in __main__, nor do we make any changes to + # __main__ attributes + current_main = sys.modules['__main__'] + if mod_name == "__main__" or mod_name.endswith(".__main__"): + return + + # If this process was forked, __main__ may already be populated + if getattr(current_main.__spec__, "name", None) == mod_name: + return + + # Otherwise, __main__ may contain some non-main code where we need to + # support unpickling it properly. We rerun it as __mp_main__ and make + # the normal __main__ an alias to that + old_main_modules.append(current_main) + main_module = types.ModuleType("__mp_main__") + main_content = runpy.run_module(mod_name, + run_name="__mp_main__", + alter_sys=True) + main_module.__dict__.update(main_content) + sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module + + +def _fixup_main_from_path(main_path): + # If this process was forked, __main__ may already be populated + current_main = sys.modules['__main__'] + + # Unfortunately, the main ipython launch script historically had no + # "if __name__ == '__main__'" guard, so we work around that + # by treating it like a __main__.py file + # See https://github.com/ipython/ipython/issues/4698 + main_name = os.path.splitext(os.path.basename(main_path))[0] + if main_name == 'ipython': + return + + # Otherwise, if __file__ already has the setting we expect, + # there's nothing more to do + if getattr(current_main, '__file__', None) == main_path: + return + + # If the parent process has sent a path through rather than a module + # name we assume it is an executable script that may contain + # non-main code that needs to be executed + old_main_modules.append(current_main) + main_module = types.ModuleType("__mp_main__") + main_content = runpy.run_path(main_path, + run_name="__mp_main__") + main_module.__dict__.update(main_content) + sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module + + +def import_main_path(main_path): + ''' + Set sys.modules['__main__'] to module at main_path + ''' + _fixup_main_from_path(main_path) diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/synchronize.py b/env-llmeval/lib/python3.10/site-packages/multiprocess/synchronize.py new file mode 100644 index 0000000000000000000000000000000000000000..3d1c6c580ef46fb91954b6c9c15137ae0a2caa1a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multiprocess/synchronize.py @@ -0,0 +1,400 @@ +# +# Module implementing synchronization primitives +# +# multiprocessing/synchronize.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = [ + 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event' + ] + +import threading +import sys +import tempfile +try: + import _multiprocess as _multiprocessing +except ImportError: + import _multiprocessing +import time + +from . import context +from . import process +from . import util + +# Try to import the mp.synchronize module cleanly, if it fails +# raise ImportError for platforms lacking a working sem_open implementation. +# See issue 3770 +try: + from _multiprocess import SemLock, sem_unlink +except ImportError: + try: + from _multiprocessing import SemLock, sem_unlink + except (ImportError): + raise ImportError("This platform lacks a functioning sem_open" + + " implementation, therefore, the required" + + " synchronization primitives needed will not" + + " function, see issue 3770.") + +# +# Constants +# + +RECURSIVE_MUTEX, SEMAPHORE = list(range(2)) +SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX + +# +# Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock` +# + +class SemLock(object): + + _rand = tempfile._RandomNameSequence() + + def __init__(self, kind, value, maxvalue, *, ctx): + if ctx is None: + ctx = context._default_context.get_context() + name = ctx.get_start_method() + unlink_now = sys.platform == 'win32' or name == 'fork' + for i in range(100): + try: + sl = self._semlock = _multiprocessing.SemLock( + kind, value, maxvalue, self._make_name(), + unlink_now) + except FileExistsError: + pass + else: + break + else: + raise FileExistsError('cannot find name for semaphore') + + util.debug('created semlock with handle %s' % sl.handle) + self._make_methods() + + if sys.platform != 'win32': + def _after_fork(obj): + obj._semlock._after_fork() + util.register_after_fork(self, _after_fork) + + if self._semlock.name is not None: + # We only get here if we are on Unix with forking + # disabled. When the object is garbage collected or the + # process shuts down we unlink the semaphore name + from .resource_tracker import register + register(self._semlock.name, "semaphore") + util.Finalize(self, SemLock._cleanup, (self._semlock.name,), + exitpriority=0) + + @staticmethod + def _cleanup(name): + from .resource_tracker import unregister + sem_unlink(name) + unregister(name, "semaphore") + + def _make_methods(self): + self.acquire = self._semlock.acquire + self.release = self._semlock.release + + def __enter__(self): + return self._semlock.__enter__() + + def __exit__(self, *args): + return self._semlock.__exit__(*args) + + def __getstate__(self): + context.assert_spawning(self) + sl = self._semlock + if sys.platform == 'win32': + h = context.get_spawning_popen().duplicate_for_child(sl.handle) + else: + h = sl.handle + return (h, sl.kind, sl.maxvalue, sl.name) + + def __setstate__(self, state): + self._semlock = _multiprocessing.SemLock._rebuild(*state) + util.debug('recreated blocker with handle %r' % state[0]) + self._make_methods() + + @staticmethod + def _make_name(): + return '%s-%s' % (process.current_process()._config['semprefix'], + next(SemLock._rand)) + +# +# Semaphore +# + +class Semaphore(SemLock): + + def __init__(self, value=1, *, ctx): + SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX, ctx=ctx) + + def get_value(self): + return self._semlock._get_value() + + def __repr__(self): + try: + value = self._semlock._get_value() + except Exception: + value = 'unknown' + return '<%s(value=%s)>' % (self.__class__.__name__, value) + +# +# Bounded semaphore +# + +class BoundedSemaphore(Semaphore): + + def __init__(self, value=1, *, ctx): + SemLock.__init__(self, SEMAPHORE, value, value, ctx=ctx) + + def __repr__(self): + try: + value = self._semlock._get_value() + except Exception: + value = 'unknown' + return '<%s(value=%s, maxvalue=%s)>' % \ + (self.__class__.__name__, value, self._semlock.maxvalue) + +# +# Non-recursive lock +# + +class Lock(SemLock): + + def __init__(self, *, ctx): + SemLock.__init__(self, SEMAPHORE, 1, 1, ctx=ctx) + + def __repr__(self): + try: + if self._semlock._is_mine(): + name = process.current_process().name + if threading.current_thread().name != 'MainThread': + name += '|' + threading.current_thread().name + elif self._semlock._get_value() == 1: + name = 'None' + elif self._semlock._count() > 0: + name = 'SomeOtherThread' + else: + name = 'SomeOtherProcess' + except Exception: + name = 'unknown' + return '<%s(owner=%s)>' % (self.__class__.__name__, name) + +# +# Recursive lock +# + +class RLock(SemLock): + + def __init__(self, *, ctx): + SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1, ctx=ctx) + + def __repr__(self): + try: + if self._semlock._is_mine(): + name = process.current_process().name + if threading.current_thread().name != 'MainThread': + name += '|' + threading.current_thread().name + count = self._semlock._count() + elif self._semlock._get_value() == 1: + name, count = 'None', 0 + elif self._semlock._count() > 0: + name, count = 'SomeOtherThread', 'nonzero' + else: + name, count = 'SomeOtherProcess', 'nonzero' + except Exception: + name, count = 'unknown', 'unknown' + return '<%s(%s, %s)>' % (self.__class__.__name__, name, count) + +# +# Condition variable +# + +class Condition(object): + + def __init__(self, lock=None, *, ctx): + self._lock = lock or ctx.RLock() + self._sleeping_count = ctx.Semaphore(0) + self._woken_count = ctx.Semaphore(0) + self._wait_semaphore = ctx.Semaphore(0) + self._make_methods() + + def __getstate__(self): + context.assert_spawning(self) + return (self._lock, self._sleeping_count, + self._woken_count, self._wait_semaphore) + + def __setstate__(self, state): + (self._lock, self._sleeping_count, + self._woken_count, self._wait_semaphore) = state + self._make_methods() + + def __enter__(self): + return self._lock.__enter__() + + def __exit__(self, *args): + return self._lock.__exit__(*args) + + def _make_methods(self): + self.acquire = self._lock.acquire + self.release = self._lock.release + + def __repr__(self): + try: + num_waiters = (self._sleeping_count._semlock._get_value() - + self._woken_count._semlock._get_value()) + except Exception: + num_waiters = 'unknown' + return '<%s(%s, %s)>' % (self.__class__.__name__, self._lock, num_waiters) + + def wait(self, timeout=None): + assert self._lock._semlock._is_mine(), \ + 'must acquire() condition before using wait()' + + # indicate that this thread is going to sleep + self._sleeping_count.release() + + # release lock + count = self._lock._semlock._count() + for i in range(count): + self._lock.release() + + try: + # wait for notification or timeout + return self._wait_semaphore.acquire(True, timeout) + finally: + # indicate that this thread has woken + self._woken_count.release() + + # reacquire lock + for i in range(count): + self._lock.acquire() + + def notify(self, n=1): + assert self._lock._semlock._is_mine(), 'lock is not owned' + assert not self._wait_semaphore.acquire( + False), ('notify: Should not have been able to acquire ' + + '_wait_semaphore') + + # to take account of timeouts since last notify*() we subtract + # woken_count from sleeping_count and rezero woken_count + while self._woken_count.acquire(False): + res = self._sleeping_count.acquire(False) + assert res, ('notify: Bug in sleeping_count.acquire' + + '- res should not be False') + + sleepers = 0 + while sleepers < n and self._sleeping_count.acquire(False): + self._wait_semaphore.release() # wake up one sleeper + sleepers += 1 + + if sleepers: + for i in range(sleepers): + self._woken_count.acquire() # wait for a sleeper to wake + + # rezero wait_semaphore in case some timeouts just happened + while self._wait_semaphore.acquire(False): + pass + + def notify_all(self): + self.notify(n=sys.maxsize) + + def wait_for(self, predicate, timeout=None): + result = predicate() + if result: + return result + if timeout is not None: + endtime = getattr(time,'monotonic',time.time)() + timeout + else: + endtime = None + waittime = None + while not result: + if endtime is not None: + waittime = endtime - getattr(time,'monotonic',time.time)() + if waittime <= 0: + break + self.wait(waittime) + result = predicate() + return result + +# +# Event +# + +class Event(object): + + def __init__(self, *, ctx): + self._cond = ctx.Condition(ctx.Lock()) + self._flag = ctx.Semaphore(0) + + def is_set(self): + with self._cond: + if self._flag.acquire(False): + self._flag.release() + return True + return False + + def set(self): + with self._cond: + self._flag.acquire(False) + self._flag.release() + self._cond.notify_all() + + def clear(self): + with self._cond: + self._flag.acquire(False) + + def wait(self, timeout=None): + with self._cond: + if self._flag.acquire(False): + self._flag.release() + else: + self._cond.wait(timeout) + + if self._flag.acquire(False): + self._flag.release() + return True + return False + +# +# Barrier +# + +class Barrier(threading.Barrier): + + def __init__(self, parties, action=None, timeout=None, *, ctx): + import struct + from .heap import BufferWrapper + wrapper = BufferWrapper(struct.calcsize('i') * 2) + cond = ctx.Condition() + self.__setstate__((parties, action, timeout, cond, wrapper)) + self._state = 0 + self._count = 0 + + def __setstate__(self, state): + (self._parties, self._action, self._timeout, + self._cond, self._wrapper) = state + self._array = self._wrapper.create_memoryview().cast('i') + + def __getstate__(self): + return (self._parties, self._action, self._timeout, + self._cond, self._wrapper) + + @property + def _state(self): + return self._array[0] + + @_state.setter + def _state(self, value): + self._array[0] = value + + @property + def _count(self): + return self._array[1] + + @_count.setter + def _count(self, value): + self._array[1] = value diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2fee074ed217520894521edd6501e0192f57e8cb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__init__.py @@ -0,0 +1,6000 @@ +# +# Unit tests for the multiprocessing package +# + +import unittest +import unittest.mock +import queue as pyqueue +import textwrap +import time +import io +import itertools +import sys +import os +import gc +import errno +import signal +import array +import socket +import random +import logging +import subprocess +import struct +import operator +import pickle #XXX: use dill? +import weakref +import warnings +import test.support +import test.support.script_helper +from test import support +from test.support import hashlib_helper +from test.support import import_helper +from test.support import os_helper +from test.support import socket_helper +from test.support import threading_helper +from test.support import warnings_helper + + +# Skip tests if _multiprocessing wasn't built. +_multiprocessing = import_helper.import_module('_multiprocessing') +# Skip tests if sem_open implementation is broken. +import_helper.import_module('multiprocess.synchronize') +import threading + +import multiprocess as multiprocessing +import multiprocess.connection +import multiprocess.dummy +import multiprocess.heap +import multiprocess.managers +import multiprocess.pool +import multiprocess.queues + +from multiprocess import util + +try: + from multiprocess import reduction + HAS_REDUCTION = reduction.HAVE_SEND_HANDLE +except ImportError: + HAS_REDUCTION = False + +try: + from multiprocess.sharedctypes import Value, copy + HAS_SHAREDCTYPES = True +except ImportError: + HAS_SHAREDCTYPES = False + +try: + from multiprocess import shared_memory + HAS_SHMEM = True +except ImportError: + HAS_SHMEM = False + +try: + import msvcrt +except ImportError: + msvcrt = None + + +if hasattr(support,'check_sanitizer') and support.check_sanitizer(address=True): + # bpo-45200: Skip multiprocessing tests if Python is built with ASAN to + # work around a libasan race condition: dead lock in pthread_create(). + raise unittest.SkipTest("libasan has a pthread_create() dead lock") + +# Don't ignore user's installed packages +ENV = dict(__cleanenv = False, __isolated = False) + +# Timeout to wait until a process completes #XXX: travis-ci +TIMEOUT = (90.0 if os.environ.get('COVERAGE') else 60.0) # seconds + +def latin(s): + return s.encode('latin') + + +def close_queue(queue): + if isinstance(queue, multiprocessing.queues.Queue): + queue.close() + queue.join_thread() + + +def join_process(process): + # Since multiprocessing.Process has the same API than threading.Thread + # (join() and is_alive(), the support function can be reused + threading_helper.join_thread(process, timeout=TIMEOUT) + + +if os.name == "posix": + from multiprocess import resource_tracker + + def _resource_unlink(name, rtype): + resource_tracker._CLEANUP_FUNCS[rtype](name) + + +# +# Constants +# + +LOG_LEVEL = util.SUBWARNING +#LOG_LEVEL = logging.DEBUG + +DELTA = 0.1 +CHECK_TIMINGS = False # making true makes tests take a lot longer + # and can sometimes cause some non-serious + # failures because some calls block a bit + # longer than expected +if CHECK_TIMINGS: + TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4 +else: + TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1 + +HAVE_GETVALUE = not getattr(_multiprocessing, + 'HAVE_BROKEN_SEM_GETVALUE', False) + +WIN32 = (sys.platform == "win32") + +from multiprocess.connection import wait + +def wait_for_handle(handle, timeout): + if timeout is not None and timeout < 0.0: + timeout = None + return wait([handle], timeout) + +try: + MAXFD = os.sysconf("SC_OPEN_MAX") +except: + MAXFD = 256 + +# To speed up tests when using the forkserver, we can preload these: +PRELOAD = ['__main__', 'test_multiprocessing_forkserver'] + +# +# Some tests require ctypes +# + +try: + from ctypes import Structure, c_int, c_double, c_longlong +except ImportError: + Structure = object + c_int = c_double = c_longlong = None + + +def check_enough_semaphores(): + """Check that the system supports enough semaphores to run the test.""" + # minimum number of semaphores available according to POSIX + nsems_min = 256 + try: + nsems = os.sysconf("SC_SEM_NSEMS_MAX") + except (AttributeError, ValueError): + # sysconf not available or setting not available + return + if nsems == -1 or nsems >= nsems_min: + return + raise unittest.SkipTest("The OS doesn't support enough semaphores " + "to run the test (required: %d)." % nsems_min) + + +# +# Creates a wrapper for a function which records the time it takes to finish +# + +class TimingWrapper(object): + + def __init__(self, func): + self.func = func + self.elapsed = None + + def __call__(self, *args, **kwds): + t = time.monotonic() + try: + return self.func(*args, **kwds) + finally: + self.elapsed = time.monotonic() - t + +# +# Base class for test cases +# + +class BaseTestCase(object): + + ALLOWED_TYPES = ('processes', 'manager', 'threads') + + def assertTimingAlmostEqual(self, a, b): + if CHECK_TIMINGS: + self.assertAlmostEqual(a, b, 1) + + def assertReturnsIfImplemented(self, value, func, *args): + try: + res = func(*args) + except NotImplementedError: + pass + else: + return self.assertEqual(value, res) + + # For the sanity of Windows users, rather than crashing or freezing in + # multiple ways. + def __reduce__(self, *args): + raise NotImplementedError("shouldn't try to pickle a test case") + + __reduce_ex__ = __reduce__ + +# +# Return the value of a semaphore +# + +def get_value(self): + try: + return self.get_value() + except AttributeError: + try: + return self._Semaphore__value + except AttributeError: + try: + return self._value + except AttributeError: + raise NotImplementedError + +# +# Testcases +# + +class DummyCallable: + def __call__(self, q, c): + assert isinstance(c, DummyCallable) + q.put(5) + + +class _TestProcess(BaseTestCase): + + ALLOWED_TYPES = ('processes', 'threads') + + def test_current(self): + if self.TYPE == 'threads': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + current = self.current_process() + authkey = current.authkey + + self.assertTrue(current.is_alive()) + self.assertTrue(not current.daemon) + self.assertIsInstance(authkey, bytes) + self.assertTrue(len(authkey) > 0) + self.assertEqual(current.ident, os.getpid()) + self.assertEqual(current.exitcode, None) + + def test_daemon_argument(self): + if self.TYPE == "threads": + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + # By default uses the current process's daemon flag. + proc0 = self.Process(target=self._test) + self.assertEqual(proc0.daemon, self.current_process().daemon) + proc1 = self.Process(target=self._test, daemon=True) + self.assertTrue(proc1.daemon) + proc2 = self.Process(target=self._test, daemon=False) + self.assertFalse(proc2.daemon) + + @classmethod + def _test(cls, q, *args, **kwds): + current = cls.current_process() + q.put(args) + q.put(kwds) + q.put(current.name) + if cls.TYPE != 'threads': + q.put(bytes(current.authkey)) + q.put(current.pid) + + def test_parent_process_attributes(self): + if self.TYPE == "threads": + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + self.assertIsNone(self.parent_process()) + + rconn, wconn = self.Pipe(duplex=False) + p = self.Process(target=self._test_send_parent_process, args=(wconn,)) + p.start() + p.join() + parent_pid, parent_name = rconn.recv() + self.assertEqual(parent_pid, self.current_process().pid) + self.assertEqual(parent_pid, os.getpid()) + self.assertEqual(parent_name, self.current_process().name) + + @classmethod + def _test_send_parent_process(cls, wconn): + from multiprocess.process import parent_process + wconn.send([parent_process().pid, parent_process().name]) + + def _test_parent_process(self): + if self.TYPE == "threads": + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + # Launch a child process. Make it launch a grandchild process. Kill the + # child process and make sure that the grandchild notices the death of + # its parent (a.k.a the child process). + rconn, wconn = self.Pipe(duplex=False) + p = self.Process( + target=self._test_create_grandchild_process, args=(wconn, )) + p.start() + + if not rconn.poll(timeout=support.LONG_TIMEOUT): + raise AssertionError("Could not communicate with child process") + parent_process_status = rconn.recv() + self.assertEqual(parent_process_status, "alive") + + p.terminate() + p.join() + + if not rconn.poll(timeout=support.LONG_TIMEOUT): + raise AssertionError("Could not communicate with child process") + parent_process_status = rconn.recv() + self.assertEqual(parent_process_status, "not alive") + + @classmethod + def _test_create_grandchild_process(cls, wconn): + p = cls.Process(target=cls._test_report_parent_status, args=(wconn, )) + p.start() + time.sleep(300) + + @classmethod + def _test_report_parent_status(cls, wconn): + from multiprocess.process import parent_process + wconn.send("alive" if parent_process().is_alive() else "not alive") + parent_process().join(timeout=support.SHORT_TIMEOUT) + wconn.send("alive" if parent_process().is_alive() else "not alive") + + def test_process(self): + q = self.Queue(1) + e = self.Event() + args = (q, 1, 2) + kwargs = {'hello':23, 'bye':2.54} + name = 'SomeProcess' + p = self.Process( + target=self._test, args=args, kwargs=kwargs, name=name + ) + p.daemon = True + current = self.current_process() + + if self.TYPE != 'threads': + self.assertEqual(p.authkey, current.authkey) + self.assertEqual(p.is_alive(), False) + self.assertEqual(p.daemon, True) + self.assertNotIn(p, self.active_children()) + self.assertTrue(type(self.active_children()) is list) + self.assertEqual(p.exitcode, None) + + p.start() + + self.assertEqual(p.exitcode, None) + self.assertEqual(p.is_alive(), True) + self.assertIn(p, self.active_children()) + + self.assertEqual(q.get(), args[1:]) + self.assertEqual(q.get(), kwargs) + self.assertEqual(q.get(), p.name) + if self.TYPE != 'threads': + self.assertEqual(q.get(), current.authkey) + self.assertEqual(q.get(), p.pid) + + p.join() + + self.assertEqual(p.exitcode, 0) + self.assertEqual(p.is_alive(), False) + self.assertNotIn(p, self.active_children()) + close_queue(q) + + @unittest.skipUnless(threading._HAVE_THREAD_NATIVE_ID, "needs native_id") + def test_process_mainthread_native_id(self): + if self.TYPE == 'threads': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + current_mainthread_native_id = threading.main_thread().native_id + + q = self.Queue(1) + p = self.Process(target=self._test_process_mainthread_native_id, args=(q,)) + p.start() + + child_mainthread_native_id = q.get() + p.join() + close_queue(q) + + self.assertNotEqual(current_mainthread_native_id, child_mainthread_native_id) + + @classmethod + def _test_process_mainthread_native_id(cls, q): + mainthread_native_id = threading.main_thread().native_id + q.put(mainthread_native_id) + + @classmethod + def _sleep_some(cls): + time.sleep(100) + + @classmethod + def _test_sleep(cls, delay): + time.sleep(delay) + + def _kill_process(self, meth): + if self.TYPE == 'threads': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + p = self.Process(target=self._sleep_some) + p.daemon = True + p.start() + + self.assertEqual(p.is_alive(), True) + self.assertIn(p, self.active_children()) + self.assertEqual(p.exitcode, None) + + join = TimingWrapper(p.join) + + self.assertEqual(join(0), None) + self.assertTimingAlmostEqual(join.elapsed, 0.0) + self.assertEqual(p.is_alive(), True) + + self.assertEqual(join(-1), None) + self.assertTimingAlmostEqual(join.elapsed, 0.0) + self.assertEqual(p.is_alive(), True) + + # XXX maybe terminating too soon causes the problems on Gentoo... + time.sleep(1) + + meth(p) + + if hasattr(signal, 'alarm'): + # On the Gentoo buildbot waitpid() often seems to block forever. + # We use alarm() to interrupt it if it blocks for too long. + def handler(*args): + raise RuntimeError('join took too long: %s' % p) + old_handler = signal.signal(signal.SIGALRM, handler) + try: + signal.alarm(10) + self.assertEqual(join(), None) + finally: + signal.alarm(0) + signal.signal(signal.SIGALRM, old_handler) + else: + self.assertEqual(join(), None) + + self.assertTimingAlmostEqual(join.elapsed, 0.0) + + self.assertEqual(p.is_alive(), False) + self.assertNotIn(p, self.active_children()) + + p.join() + + return p.exitcode + + def test_terminate(self): + exitcode = self._kill_process(multiprocessing.Process.terminate) + if os.name != 'nt': + self.assertEqual(exitcode, -signal.SIGTERM) + + def test_kill(self): + exitcode = self._kill_process(multiprocessing.Process.kill) + if os.name != 'nt': + self.assertEqual(exitcode, -signal.SIGKILL) + + def test_cpu_count(self): + try: + cpus = multiprocessing.cpu_count() + except NotImplementedError: + cpus = 1 + self.assertTrue(type(cpus) is int) + self.assertTrue(cpus >= 1) + + def test_active_children(self): + self.assertEqual(type(self.active_children()), list) + + p = self.Process(target=time.sleep, args=(DELTA,)) + self.assertNotIn(p, self.active_children()) + + p.daemon = True + p.start() + self.assertIn(p, self.active_children()) + + p.join() + self.assertNotIn(p, self.active_children()) + + @classmethod + def _test_recursion(cls, wconn, id): + wconn.send(id) + if len(id) < 2: + for i in range(2): + p = cls.Process( + target=cls._test_recursion, args=(wconn, id+[i]) + ) + p.start() + p.join() + + @unittest.skipIf(True, "fails with is_dill(obj, child=True)") + def test_recursion(self): + rconn, wconn = self.Pipe(duplex=False) + self._test_recursion(wconn, []) + + time.sleep(DELTA) + result = [] + while rconn.poll(): + result.append(rconn.recv()) + + expected = [ + [], + [0], + [0, 0], + [0, 1], + [1], + [1, 0], + [1, 1] + ] + self.assertEqual(result, expected) + + @classmethod + def _test_sentinel(cls, event): + event.wait(10.0) + + def test_sentinel(self): + if self.TYPE == "threads": + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + event = self.Event() + p = self.Process(target=self._test_sentinel, args=(event,)) + with self.assertRaises(ValueError): + p.sentinel + p.start() + self.addCleanup(p.join) + sentinel = p.sentinel + self.assertIsInstance(sentinel, int) + self.assertFalse(wait_for_handle(sentinel, timeout=0.0)) + event.set() + p.join() + self.assertTrue(wait_for_handle(sentinel, timeout=1)) + + @classmethod + def _test_close(cls, rc=0, q=None): + if q is not None: + q.get() + sys.exit(rc) + + def test_close(self): + if self.TYPE == "threads": + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + q = self.Queue() + p = self.Process(target=self._test_close, kwargs={'q': q}) + p.daemon = True + p.start() + self.assertEqual(p.is_alive(), True) + # Child is still alive, cannot close + with self.assertRaises(ValueError): + p.close() + + q.put(None) + p.join() + self.assertEqual(p.is_alive(), False) + self.assertEqual(p.exitcode, 0) + p.close() + with self.assertRaises(ValueError): + p.is_alive() + with self.assertRaises(ValueError): + p.join() + with self.assertRaises(ValueError): + p.terminate() + p.close() + + wr = weakref.ref(p) + del p + gc.collect() + self.assertIs(wr(), None) + + close_queue(q) + + def test_many_processes(self): + if self.TYPE == 'threads': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + sm = multiprocessing.get_start_method() + travis = os.environ.get('COVERAGE') #XXX: travis-ci + N = (1 if travis else 5) if sm == 'spawn' else 100 + + # Try to overwhelm the forkserver loop with events + procs = [self.Process(target=self._test_sleep, args=(0.01,)) + for i in range(N)] + for p in procs: + p.start() + for p in procs: + join_process(p) + for p in procs: + self.assertEqual(p.exitcode, 0) + + procs = [self.Process(target=self._sleep_some) + for i in range(N)] + for p in procs: + p.start() + time.sleep(0.001) # let the children start... + for p in procs: + p.terminate() + for p in procs: + join_process(p) + if os.name != 'nt': + exitcodes = [-signal.SIGTERM] + if sys.platform == 'darwin': + # bpo-31510: On macOS, killing a freshly started process with + # SIGTERM sometimes kills the process with SIGKILL. + exitcodes.append(-signal.SIGKILL) + for p in procs: + self.assertIn(p.exitcode, exitcodes) + + def test_lose_target_ref(self): + c = DummyCallable() + wr = weakref.ref(c) + q = self.Queue() + p = self.Process(target=c, args=(q, c)) + del c + p.start() + p.join() + gc.collect() # For PyPy or other GCs. + self.assertIs(wr(), None) + self.assertEqual(q.get(), 5) + close_queue(q) + + @classmethod + def _test_child_fd_inflation(self, evt, q): + q.put(os_helper.fd_count()) + evt.wait() + + def test_child_fd_inflation(self): + # Number of fds in child processes should not grow with the + # number of running children. + if self.TYPE == 'threads': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + sm = multiprocessing.get_start_method() + if sm == 'fork': + # The fork method by design inherits all fds from the parent, + # trying to go against it is a lost battle + self.skipTest('test not appropriate for {}'.format(sm)) + + N = 5 + evt = self.Event() + q = self.Queue() + + procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q)) + for i in range(N)] + for p in procs: + p.start() + + try: + fd_counts = [q.get() for i in range(N)] + self.assertEqual(len(set(fd_counts)), 1, fd_counts) + + finally: + evt.set() + for p in procs: + p.join() + close_queue(q) + + @classmethod + def _test_wait_for_threads(self, evt): + def func1(): + time.sleep(0.5) + evt.set() + + def func2(): + time.sleep(20) + evt.clear() + + threading.Thread(target=func1).start() + threading.Thread(target=func2, daemon=True).start() + + def test_wait_for_threads(self): + # A child process should wait for non-daemonic threads to end + # before exiting + if self.TYPE == 'threads': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + evt = self.Event() + proc = self.Process(target=self._test_wait_for_threads, args=(evt,)) + proc.start() + proc.join() + self.assertTrue(evt.is_set()) + + @classmethod + def _test_error_on_stdio_flush(self, evt, break_std_streams={}): + for stream_name, action in break_std_streams.items(): + if action == 'close': + stream = io.StringIO() + stream.close() + else: + assert action == 'remove' + stream = None + setattr(sys, stream_name, None) + evt.set() + + def test_error_on_stdio_flush_1(self): + # Check that Process works with broken standard streams + streams = [io.StringIO(), None] + streams[0].close() + for stream_name in ('stdout', 'stderr'): + for stream in streams: + old_stream = getattr(sys, stream_name) + setattr(sys, stream_name, stream) + try: + evt = self.Event() + proc = self.Process(target=self._test_error_on_stdio_flush, + args=(evt,)) + proc.start() + proc.join() + self.assertTrue(evt.is_set()) + self.assertEqual(proc.exitcode, 0) + finally: + setattr(sys, stream_name, old_stream) + + def test_error_on_stdio_flush_2(self): + # Same as test_error_on_stdio_flush_1(), but standard streams are + # broken by the child process + for stream_name in ('stdout', 'stderr'): + for action in ('close', 'remove'): + old_stream = getattr(sys, stream_name) + try: + evt = self.Event() + proc = self.Process(target=self._test_error_on_stdio_flush, + args=(evt, {stream_name: action})) + proc.start() + proc.join() + self.assertTrue(evt.is_set()) + self.assertEqual(proc.exitcode, 0) + finally: + setattr(sys, stream_name, old_stream) + + @classmethod + def _sleep_and_set_event(self, evt, delay=0.0): + time.sleep(delay) + evt.set() + + def check_forkserver_death(self, signum): + # bpo-31308: if the forkserver process has died, we should still + # be able to create and run new Process instances (the forkserver + # is implicitly restarted). + if self.TYPE == 'threads': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + sm = multiprocessing.get_start_method() + if sm != 'forkserver': + # The fork method by design inherits all fds from the parent, + # trying to go against it is a lost battle + self.skipTest('test not appropriate for {}'.format(sm)) + + from multiprocess.forkserver import _forkserver + _forkserver.ensure_running() + + # First process sleeps 500 ms + delay = 0.5 + + evt = self.Event() + proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay)) + proc.start() + + pid = _forkserver._forkserver_pid + os.kill(pid, signum) + # give time to the fork server to die and time to proc to complete + time.sleep(delay * 2.0) + + evt2 = self.Event() + proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,)) + proc2.start() + proc2.join() + self.assertTrue(evt2.is_set()) + self.assertEqual(proc2.exitcode, 0) + + proc.join() + self.assertTrue(evt.is_set()) + self.assertIn(proc.exitcode, (0, 255)) + + def test_forkserver_sigint(self): + # Catchable signal + self.check_forkserver_death(signal.SIGINT) + + def test_forkserver_sigkill(self): + # Uncatchable signal + if os.name != 'nt': + self.check_forkserver_death(signal.SIGKILL) + + +# +# +# + +class _UpperCaser(multiprocessing.Process): + + def __init__(self): + multiprocessing.Process.__init__(self) + self.child_conn, self.parent_conn = multiprocessing.Pipe() + + def run(self): + self.parent_conn.close() + for s in iter(self.child_conn.recv, None): + self.child_conn.send(s.upper()) + self.child_conn.close() + + def submit(self, s): + assert type(s) is str + self.parent_conn.send(s) + return self.parent_conn.recv() + + def stop(self): + self.parent_conn.send(None) + self.parent_conn.close() + self.child_conn.close() + +class _TestSubclassingProcess(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + def test_subclassing(self): + uppercaser = _UpperCaser() + uppercaser.daemon = True + uppercaser.start() + self.assertEqual(uppercaser.submit('hello'), 'HELLO') + self.assertEqual(uppercaser.submit('world'), 'WORLD') + uppercaser.stop() + uppercaser.join() + + def test_stderr_flush(self): + # sys.stderr is flushed at process shutdown (issue #13812) + if self.TYPE == "threads": + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + testfn = os_helper.TESTFN + self.addCleanup(os_helper.unlink, testfn) + proc = self.Process(target=self._test_stderr_flush, args=(testfn,)) + proc.start() + proc.join() + with open(testfn, encoding="utf-8") as f: + err = f.read() + # The whole traceback was printed + self.assertIn("ZeroDivisionError", err) + self.assertIn("__init__.py", err) + #self.assertIn("1/0 # MARKER", err) #FIXME + + @classmethod + def _test_stderr_flush(cls, testfn): + fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL) + sys.stderr = open(fd, 'w', encoding="utf-8", closefd=False) + 1/0 # MARKER + + + @classmethod + def _test_sys_exit(cls, reason, testfn): + fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL) + sys.stderr = open(fd, 'w', encoding="utf-8", closefd=False) + sys.exit(reason) + + def test_sys_exit(self): + # See Issue 13854 + if self.TYPE == 'threads': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + testfn = os_helper.TESTFN + self.addCleanup(os_helper.unlink, testfn) + + for reason in ( + [1, 2, 3], + 'ignore this', + ): + p = self.Process(target=self._test_sys_exit, args=(reason, testfn)) + p.daemon = True + p.start() + join_process(p) + self.assertEqual(p.exitcode, 1) + + with open(testfn, encoding="utf-8") as f: + content = f.read() + self.assertEqual(content.rstrip(), str(reason)) + + os.unlink(testfn) + + cases = [ + ((True,), 1), + ((False,), 0), + ((8,), 8), + ((None,), 0), + ((), 0), + ] + + for args, expected in cases: + with self.subTest(args=args): + p = self.Process(target=sys.exit, args=args) + p.daemon = True + p.start() + join_process(p) + self.assertEqual(p.exitcode, expected) + +# +# +# + +def queue_empty(q): + if hasattr(q, 'empty'): + return q.empty() + else: + return q.qsize() == 0 + +def queue_full(q, maxsize): + if hasattr(q, 'full'): + return q.full() + else: + return q.qsize() == maxsize + + +class _TestQueue(BaseTestCase): + + + @classmethod + def _test_put(cls, queue, child_can_start, parent_can_continue): + child_can_start.wait() + for i in range(6): + queue.get() + parent_can_continue.set() + + def test_put(self): + MAXSIZE = 6 + queue = self.Queue(maxsize=MAXSIZE) + child_can_start = self.Event() + parent_can_continue = self.Event() + + proc = self.Process( + target=self._test_put, + args=(queue, child_can_start, parent_can_continue) + ) + proc.daemon = True + proc.start() + + self.assertEqual(queue_empty(queue), True) + self.assertEqual(queue_full(queue, MAXSIZE), False) + + queue.put(1) + queue.put(2, True) + queue.put(3, True, None) + queue.put(4, False) + queue.put(5, False, None) + queue.put_nowait(6) + + # the values may be in buffer but not yet in pipe so sleep a bit + time.sleep(DELTA) + + self.assertEqual(queue_empty(queue), False) + self.assertEqual(queue_full(queue, MAXSIZE), True) + + put = TimingWrapper(queue.put) + put_nowait = TimingWrapper(queue.put_nowait) + + self.assertRaises(pyqueue.Full, put, 7, False) + self.assertTimingAlmostEqual(put.elapsed, 0) + + self.assertRaises(pyqueue.Full, put, 7, False, None) + self.assertTimingAlmostEqual(put.elapsed, 0) + + self.assertRaises(pyqueue.Full, put_nowait, 7) + self.assertTimingAlmostEqual(put_nowait.elapsed, 0) + + self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1) + self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1) + + self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2) + self.assertTimingAlmostEqual(put.elapsed, 0) + + self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3) + self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3) + + child_can_start.set() + parent_can_continue.wait() + + self.assertEqual(queue_empty(queue), True) + self.assertEqual(queue_full(queue, MAXSIZE), False) + + proc.join() + close_queue(queue) + + @classmethod + def _test_get(cls, queue, child_can_start, parent_can_continue): + child_can_start.wait() + #queue.put(1) + queue.put(2) + queue.put(3) + queue.put(4) + queue.put(5) + parent_can_continue.set() + + def test_get(self): + queue = self.Queue() + child_can_start = self.Event() + parent_can_continue = self.Event() + + proc = self.Process( + target=self._test_get, + args=(queue, child_can_start, parent_can_continue) + ) + proc.daemon = True + proc.start() + + self.assertEqual(queue_empty(queue), True) + + child_can_start.set() + parent_can_continue.wait() + + time.sleep(DELTA) + self.assertEqual(queue_empty(queue), False) + + # Hangs unexpectedly, remove for now + #self.assertEqual(queue.get(), 1) + self.assertEqual(queue.get(True, None), 2) + self.assertEqual(queue.get(True), 3) + self.assertEqual(queue.get(timeout=1), 4) + self.assertEqual(queue.get_nowait(), 5) + + self.assertEqual(queue_empty(queue), True) + + get = TimingWrapper(queue.get) + get_nowait = TimingWrapper(queue.get_nowait) + + self.assertRaises(pyqueue.Empty, get, False) + self.assertTimingAlmostEqual(get.elapsed, 0) + + self.assertRaises(pyqueue.Empty, get, False, None) + self.assertTimingAlmostEqual(get.elapsed, 0) + + self.assertRaises(pyqueue.Empty, get_nowait) + self.assertTimingAlmostEqual(get_nowait.elapsed, 0) + + self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1) + self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) + + self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2) + self.assertTimingAlmostEqual(get.elapsed, 0) + + self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3) + self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3) + + proc.join() + close_queue(queue) + + @classmethod + def _test_fork(cls, queue): + for i in range(10, 20): + queue.put(i) + # note that at this point the items may only be buffered, so the + # process cannot shutdown until the feeder thread has finished + # pushing items onto the pipe. + + def test_fork(self): + # Old versions of Queue would fail to create a new feeder + # thread for a forked process if the original process had its + # own feeder thread. This test checks that this no longer + # happens. + + queue = self.Queue() + + # put items on queue so that main process starts a feeder thread + for i in range(10): + queue.put(i) + + # wait to make sure thread starts before we fork a new process + time.sleep(DELTA) + + # fork process + p = self.Process(target=self._test_fork, args=(queue,)) + p.daemon = True + p.start() + + # check that all expected items are in the queue + for i in range(20): + self.assertEqual(queue.get(), i) + self.assertRaises(pyqueue.Empty, queue.get, False) + + p.join() + close_queue(queue) + + def test_qsize(self): + q = self.Queue() + try: + self.assertEqual(q.qsize(), 0) + except NotImplementedError: + self.skipTest('qsize method not implemented') + q.put(1) + self.assertEqual(q.qsize(), 1) + q.put(5) + self.assertEqual(q.qsize(), 2) + q.get() + self.assertEqual(q.qsize(), 1) + q.get() + self.assertEqual(q.qsize(), 0) + close_queue(q) + + @classmethod + def _test_task_done(cls, q): + for obj in iter(q.get, None): + time.sleep(DELTA) + q.task_done() + + def test_task_done(self): + queue = self.JoinableQueue() + + workers = [self.Process(target=self._test_task_done, args=(queue,)) + for i in range(4)] + + for p in workers: + p.daemon = True + p.start() + + for i in range(10): + queue.put(i) + + queue.join() + + for p in workers: + queue.put(None) + + for p in workers: + p.join() + close_queue(queue) + + def test_no_import_lock_contention(self): + with os_helper.temp_cwd(): + module_name = 'imported_by_an_imported_module' + with open(module_name + '.py', 'w', encoding="utf-8") as f: + f.write("""if 1: + import multiprocess as multiprocessing + + q = multiprocessing.Queue() + q.put('knock knock') + q.get(timeout=3) + q.close() + del q + """) + + with import_helper.DirsOnSysPath(os.getcwd()): + try: + __import__(module_name) + except pyqueue.Empty: + self.fail("Probable regression on import lock contention;" + " see Issue #22853") + + def test_timeout(self): + q = multiprocessing.Queue() + start = time.monotonic() + self.assertRaises(pyqueue.Empty, q.get, True, 0.200) + delta = time.monotonic() - start + # bpo-30317: Tolerate a delta of 100 ms because of the bad clock + # resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once + # failed because the delta was only 135.8 ms. + self.assertGreaterEqual(delta, 0.100) + close_queue(q) + + def test_queue_feeder_donot_stop_onexc(self): + # bpo-30414: verify feeder handles exceptions correctly + if self.TYPE != 'processes': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + class NotSerializable(object): + def __reduce__(self): + raise AttributeError + with test.support.captured_stderr(): + q = self.Queue() + q.put(NotSerializable()) + q.put(True) + self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT)) + close_queue(q) + + with test.support.captured_stderr(): + # bpo-33078: verify that the queue size is correctly handled + # on errors. + q = self.Queue(maxsize=1) + q.put(NotSerializable()) + q.put(True) + try: + self.assertEqual(q.qsize(), 1) + except NotImplementedError: + # qsize is not available on all platform as it + # relies on sem_getvalue + pass + self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT)) + # Check that the size of the queue is correct + self.assertTrue(q.empty()) + close_queue(q) + + def test_queue_feeder_on_queue_feeder_error(self): + # bpo-30006: verify feeder handles exceptions using the + # _on_queue_feeder_error hook. + if self.TYPE != 'processes': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + class NotSerializable(object): + """Mock unserializable object""" + def __init__(self): + self.reduce_was_called = False + self.on_queue_feeder_error_was_called = False + + def __reduce__(self): + self.reduce_was_called = True + raise AttributeError + + class SafeQueue(multiprocessing.queues.Queue): + """Queue with overloaded _on_queue_feeder_error hook""" + @staticmethod + def _on_queue_feeder_error(e, obj): + if (isinstance(e, AttributeError) and + isinstance(obj, NotSerializable)): + obj.on_queue_feeder_error_was_called = True + + not_serializable_obj = NotSerializable() + # The captured_stderr reduces the noise in the test report + with test.support.captured_stderr(): + q = SafeQueue(ctx=multiprocessing.get_context()) + q.put(not_serializable_obj) + + # Verify that q is still functioning correctly + q.put(True) + self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT)) + + # Assert that the serialization and the hook have been called correctly + self.assertTrue(not_serializable_obj.reduce_was_called) + self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called) + + def test_closed_queue_put_get_exceptions(self): + for q in multiprocessing.Queue(), multiprocessing.JoinableQueue(): + q.close() + with self.assertRaisesRegex(ValueError, 'is closed'): + q.put('foo') + with self.assertRaisesRegex(ValueError, 'is closed'): + q.get() +# +# +# + +class _TestLock(BaseTestCase): + + def test_lock(self): + lock = self.Lock() + self.assertEqual(lock.acquire(), True) + self.assertEqual(lock.acquire(False), False) + self.assertEqual(lock.release(), None) + self.assertRaises((ValueError, threading.ThreadError), lock.release) + + def test_rlock(self): + lock = self.RLock() + self.assertEqual(lock.acquire(), True) + self.assertEqual(lock.acquire(), True) + self.assertEqual(lock.acquire(), True) + self.assertEqual(lock.release(), None) + self.assertEqual(lock.release(), None) + self.assertEqual(lock.release(), None) + self.assertRaises((AssertionError, RuntimeError), lock.release) + + def test_lock_context(self): + with self.Lock(): + pass + + +class _TestSemaphore(BaseTestCase): + + def _test_semaphore(self, sem): + self.assertReturnsIfImplemented(2, get_value, sem) + self.assertEqual(sem.acquire(), True) + self.assertReturnsIfImplemented(1, get_value, sem) + self.assertEqual(sem.acquire(), True) + self.assertReturnsIfImplemented(0, get_value, sem) + self.assertEqual(sem.acquire(False), False) + self.assertReturnsIfImplemented(0, get_value, sem) + self.assertEqual(sem.release(), None) + self.assertReturnsIfImplemented(1, get_value, sem) + self.assertEqual(sem.release(), None) + self.assertReturnsIfImplemented(2, get_value, sem) + + def test_semaphore(self): + sem = self.Semaphore(2) + self._test_semaphore(sem) + self.assertEqual(sem.release(), None) + self.assertReturnsIfImplemented(3, get_value, sem) + self.assertEqual(sem.release(), None) + self.assertReturnsIfImplemented(4, get_value, sem) + + def test_bounded_semaphore(self): + sem = self.BoundedSemaphore(2) + self._test_semaphore(sem) + # Currently fails on OS/X + #if HAVE_GETVALUE: + # self.assertRaises(ValueError, sem.release) + # self.assertReturnsIfImplemented(2, get_value, sem) + + def test_timeout(self): + if self.TYPE != 'processes': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + sem = self.Semaphore(0) + acquire = TimingWrapper(sem.acquire) + + self.assertEqual(acquire(False), False) + self.assertTimingAlmostEqual(acquire.elapsed, 0.0) + + self.assertEqual(acquire(False, None), False) + self.assertTimingAlmostEqual(acquire.elapsed, 0.0) + + self.assertEqual(acquire(False, TIMEOUT1), False) + self.assertTimingAlmostEqual(acquire.elapsed, 0) + + self.assertEqual(acquire(True, TIMEOUT2), False) + self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2) + + self.assertEqual(acquire(timeout=TIMEOUT3), False) + self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3) + + +class _TestCondition(BaseTestCase): + + @classmethod + def f(cls, cond, sleeping, woken, timeout=None): + cond.acquire() + sleeping.release() + cond.wait(timeout) + woken.release() + cond.release() + + def assertReachesEventually(self, func, value): + for i in range(10): + try: + if func() == value: + break + except NotImplementedError: + break + time.sleep(DELTA) + time.sleep(DELTA) + self.assertReturnsIfImplemented(value, func) + + def check_invariant(self, cond): + # this is only supposed to succeed when there are no sleepers + if self.TYPE == 'processes': + try: + sleepers = (cond._sleeping_count.get_value() - + cond._woken_count.get_value()) + self.assertEqual(sleepers, 0) + self.assertEqual(cond._wait_semaphore.get_value(), 0) + except NotImplementedError: + pass + + def test_notify(self): + cond = self.Condition() + sleeping = self.Semaphore(0) + woken = self.Semaphore(0) + + p = self.Process(target=self.f, args=(cond, sleeping, woken)) + p.daemon = True + p.start() + self.addCleanup(p.join) + + p = threading.Thread(target=self.f, args=(cond, sleeping, woken)) + p.daemon = True + p.start() + self.addCleanup(p.join) + + # wait for both children to start sleeping + sleeping.acquire() + sleeping.acquire() + + # check no process/thread has woken up + time.sleep(DELTA) + self.assertReturnsIfImplemented(0, get_value, woken) + + # wake up one process/thread + cond.acquire() + cond.notify() + cond.release() + + # check one process/thread has woken up + time.sleep(DELTA) + self.assertReturnsIfImplemented(1, get_value, woken) + + # wake up another + cond.acquire() + cond.notify() + cond.release() + + # check other has woken up + time.sleep(DELTA) + self.assertReturnsIfImplemented(2, get_value, woken) + + # check state is not mucked up + self.check_invariant(cond) + p.join() + + def test_notify_all(self): + cond = self.Condition() + sleeping = self.Semaphore(0) + woken = self.Semaphore(0) + + # start some threads/processes which will timeout + for i in range(3): + p = self.Process(target=self.f, + args=(cond, sleeping, woken, TIMEOUT1)) + p.daemon = True + p.start() + self.addCleanup(p.join) + + t = threading.Thread(target=self.f, + args=(cond, sleeping, woken, TIMEOUT1)) + t.daemon = True + t.start() + self.addCleanup(t.join) + + # wait for them all to sleep + for i in range(6): + sleeping.acquire() + + # check they have all timed out + for i in range(6): + woken.acquire() + self.assertReturnsIfImplemented(0, get_value, woken) + + # check state is not mucked up + self.check_invariant(cond) + + # start some more threads/processes + for i in range(3): + p = self.Process(target=self.f, args=(cond, sleeping, woken)) + p.daemon = True + p.start() + self.addCleanup(p.join) + + t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) + t.daemon = True + t.start() + self.addCleanup(t.join) + + # wait for them to all sleep + for i in range(6): + sleeping.acquire() + + # check no process/thread has woken up + time.sleep(DELTA) + self.assertReturnsIfImplemented(0, get_value, woken) + + # wake them all up + cond.acquire() + cond.notify_all() + cond.release() + + # check they have all woken + self.assertReachesEventually(lambda: get_value(woken), 6) + + # check state is not mucked up + self.check_invariant(cond) + + def test_notify_n(self): + cond = self.Condition() + sleeping = self.Semaphore(0) + woken = self.Semaphore(0) + + # start some threads/processes + for i in range(3): + p = self.Process(target=self.f, args=(cond, sleeping, woken)) + p.daemon = True + p.start() + self.addCleanup(p.join) + + t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) + t.daemon = True + t.start() + self.addCleanup(t.join) + + # wait for them to all sleep + for i in range(6): + sleeping.acquire() + + # check no process/thread has woken up + time.sleep(DELTA) + self.assertReturnsIfImplemented(0, get_value, woken) + + # wake some of them up + cond.acquire() + cond.notify(n=2) + cond.release() + + # check 2 have woken + self.assertReachesEventually(lambda: get_value(woken), 2) + + # wake the rest of them + cond.acquire() + cond.notify(n=4) + cond.release() + + self.assertReachesEventually(lambda: get_value(woken), 6) + + # doesn't do anything more + cond.acquire() + cond.notify(n=3) + cond.release() + + self.assertReturnsIfImplemented(6, get_value, woken) + + # check state is not mucked up + self.check_invariant(cond) + + def test_timeout(self): + cond = self.Condition() + wait = TimingWrapper(cond.wait) + cond.acquire() + res = wait(TIMEOUT1) + cond.release() + self.assertEqual(res, False) + self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) + + @classmethod + def _test_waitfor_f(cls, cond, state): + with cond: + state.value = 0 + cond.notify() + result = cond.wait_for(lambda : state.value==4) + if not result or state.value != 4: + sys.exit(1) + + @unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes') + def test_waitfor(self): + # based on test in test/lock_tests.py + cond = self.Condition() + state = self.Value('i', -1) + + p = self.Process(target=self._test_waitfor_f, args=(cond, state)) + p.daemon = True + p.start() + + with cond: + result = cond.wait_for(lambda : state.value==0) + self.assertTrue(result) + self.assertEqual(state.value, 0) + + for i in range(4): + time.sleep(0.01) + with cond: + state.value += 1 + cond.notify() + + join_process(p) + self.assertEqual(p.exitcode, 0) + + @classmethod + def _test_waitfor_timeout_f(cls, cond, state, success, sem): + sem.release() + with cond: + expected = 0.1 + dt = time.monotonic() + result = cond.wait_for(lambda : state.value==4, timeout=expected) + dt = time.monotonic() - dt + # borrow logic in assertTimeout() from test/lock_tests.py + if not result and expected * 0.6 < dt < expected * 10.0: + success.value = True + + @unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes') + def test_waitfor_timeout(self): + # based on test in test/lock_tests.py + cond = self.Condition() + state = self.Value('i', 0) + success = self.Value('i', False) + sem = self.Semaphore(0) + + p = self.Process(target=self._test_waitfor_timeout_f, + args=(cond, state, success, sem)) + p.daemon = True + p.start() + self.assertTrue(sem.acquire(timeout=support.LONG_TIMEOUT)) + + # Only increment 3 times, so state == 4 is never reached. + for i in range(3): + time.sleep(0.01) + with cond: + state.value += 1 + cond.notify() + + join_process(p) + self.assertTrue(success.value) + + @classmethod + def _test_wait_result(cls, c, pid): + with c: + c.notify() + time.sleep(1) + if pid is not None: + os.kill(pid, signal.SIGINT) + + def test_wait_result(self): + if isinstance(self, ProcessesMixin) and sys.platform != 'win32': + pid = os.getpid() + else: + pid = None + + c = self.Condition() + with c: + self.assertFalse(c.wait(0)) + self.assertFalse(c.wait(0.1)) + + p = self.Process(target=self._test_wait_result, args=(c, pid)) + p.start() + + self.assertTrue(c.wait(60)) + if pid is not None: + self.assertRaises(KeyboardInterrupt, c.wait, 60) + + p.join() + + +class _TestEvent(BaseTestCase): + + @classmethod + def _test_event(cls, event): + time.sleep(TIMEOUT2) + event.set() + + def test_event(self): + event = self.Event() + wait = TimingWrapper(event.wait) + + # Removed temporarily, due to API shear, this does not + # work with threading._Event objects. is_set == isSet + self.assertEqual(event.is_set(), False) + + # Removed, threading.Event.wait() will return the value of the __flag + # instead of None. API Shear with the semaphore backed mp.Event + self.assertEqual(wait(0.0), False) + self.assertTimingAlmostEqual(wait.elapsed, 0.0) + self.assertEqual(wait(TIMEOUT1), False) + self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) + + event.set() + + # See note above on the API differences + self.assertEqual(event.is_set(), True) + self.assertEqual(wait(), True) + self.assertTimingAlmostEqual(wait.elapsed, 0.0) + self.assertEqual(wait(TIMEOUT1), True) + self.assertTimingAlmostEqual(wait.elapsed, 0.0) + # self.assertEqual(event.is_set(), True) + + event.clear() + + #self.assertEqual(event.is_set(), False) + + p = self.Process(target=self._test_event, args=(event,)) + p.daemon = True + p.start() + self.assertEqual(wait(), True) + p.join() + +# +# Tests for Barrier - adapted from tests in test/lock_tests.py +# + +# Many of the tests for threading.Barrier use a list as an atomic +# counter: a value is appended to increment the counter, and the +# length of the list gives the value. We use the class DummyList +# for the same purpose. + +class _DummyList(object): + + def __init__(self): + wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i')) + lock = multiprocessing.Lock() + self.__setstate__((wrapper, lock)) + self._lengthbuf[0] = 0 + + def __setstate__(self, state): + (self._wrapper, self._lock) = state + self._lengthbuf = self._wrapper.create_memoryview().cast('i') + + def __getstate__(self): + return (self._wrapper, self._lock) + + def append(self, _): + with self._lock: + self._lengthbuf[0] += 1 + + def __len__(self): + with self._lock: + return self._lengthbuf[0] + +def _wait(): + # A crude wait/yield function not relying on synchronization primitives. + time.sleep(0.01) + + +class Bunch(object): + """ + A bunch of threads. + """ + def __init__(self, namespace, f, args, n, wait_before_exit=False): + """ + Construct a bunch of `n` threads running the same function `f`. + If `wait_before_exit` is True, the threads won't terminate until + do_finish() is called. + """ + self.f = f + self.args = args + self.n = n + self.started = namespace.DummyList() + self.finished = namespace.DummyList() + self._can_exit = namespace.Event() + if not wait_before_exit: + self._can_exit.set() + + threads = [] + for i in range(n): + p = namespace.Process(target=self.task) + p.daemon = True + p.start() + threads.append(p) + + def finalize(threads): + for p in threads: + p.join() + + self._finalizer = weakref.finalize(self, finalize, threads) + + def task(self): + pid = os.getpid() + self.started.append(pid) + try: + self.f(*self.args) + finally: + self.finished.append(pid) + self._can_exit.wait(30) + assert self._can_exit.is_set() + + def wait_for_started(self): + while len(self.started) < self.n: + _wait() + + def wait_for_finished(self): + while len(self.finished) < self.n: + _wait() + + def do_finish(self): + self._can_exit.set() + + def close(self): + self._finalizer() + + +class AppendTrue(object): + def __init__(self, obj): + self.obj = obj + def __call__(self): + self.obj.append(True) + + +class _TestBarrier(BaseTestCase): + """ + Tests for Barrier objects. + """ + N = 5 + defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout + + def setUp(self): + self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout) + + def tearDown(self): + self.barrier.abort() + self.barrier = None + + def DummyList(self): + if self.TYPE == 'threads': + return [] + elif self.TYPE == 'manager': + return self.manager.list() + else: + return _DummyList() + + def run_threads(self, f, args): + b = Bunch(self, f, args, self.N-1) + try: + f(*args) + b.wait_for_finished() + finally: + b.close() + + @classmethod + def multipass(cls, barrier, results, n): + m = barrier.parties + assert m == cls.N + for i in range(n): + results[0].append(True) + assert len(results[1]) == i * m + barrier.wait() + results[1].append(True) + assert len(results[0]) == (i + 1) * m + barrier.wait() + try: + assert barrier.n_waiting == 0 + except NotImplementedError: + pass + assert not barrier.broken + + def test_barrier(self, passes=1): + """ + Test that a barrier is passed in lockstep + """ + results = [self.DummyList(), self.DummyList()] + self.run_threads(self.multipass, (self.barrier, results, passes)) + + def test_barrier_10(self): + """ + Test that a barrier works for 10 consecutive runs + """ + return self.test_barrier(10) + + @classmethod + def _test_wait_return_f(cls, barrier, queue): + res = barrier.wait() + queue.put(res) + + def test_wait_return(self): + """ + test the return value from barrier.wait + """ + queue = self.Queue() + self.run_threads(self._test_wait_return_f, (self.barrier, queue)) + results = [queue.get() for i in range(self.N)] + self.assertEqual(results.count(0), 1) + close_queue(queue) + + @classmethod + def _test_action_f(cls, barrier, results): + barrier.wait() + if len(results) != 1: + raise RuntimeError + + def test_action(self): + """ + Test the 'action' callback + """ + results = self.DummyList() + barrier = self.Barrier(self.N, action=AppendTrue(results)) + self.run_threads(self._test_action_f, (barrier, results)) + self.assertEqual(len(results), 1) + + @classmethod + def _test_abort_f(cls, barrier, results1, results2): + try: + i = barrier.wait() + if i == cls.N//2: + raise RuntimeError + barrier.wait() + results1.append(True) + except threading.BrokenBarrierError: + results2.append(True) + except RuntimeError: + barrier.abort() + + def test_abort(self): + """ + Test that an abort will put the barrier in a broken state + """ + results1 = self.DummyList() + results2 = self.DummyList() + self.run_threads(self._test_abort_f, + (self.barrier, results1, results2)) + self.assertEqual(len(results1), 0) + self.assertEqual(len(results2), self.N-1) + self.assertTrue(self.barrier.broken) + + @classmethod + def _test_reset_f(cls, barrier, results1, results2, results3): + i = barrier.wait() + if i == cls.N//2: + # Wait until the other threads are all in the barrier. + while barrier.n_waiting < cls.N-1: + time.sleep(0.001) + barrier.reset() + else: + try: + barrier.wait() + results1.append(True) + except threading.BrokenBarrierError: + results2.append(True) + # Now, pass the barrier again + barrier.wait() + results3.append(True) + + def test_reset(self): + """ + Test that a 'reset' on a barrier frees the waiting threads + """ + results1 = self.DummyList() + results2 = self.DummyList() + results3 = self.DummyList() + self.run_threads(self._test_reset_f, + (self.barrier, results1, results2, results3)) + self.assertEqual(len(results1), 0) + self.assertEqual(len(results2), self.N-1) + self.assertEqual(len(results3), self.N) + + @classmethod + def _test_abort_and_reset_f(cls, barrier, barrier2, + results1, results2, results3): + try: + i = barrier.wait() + if i == cls.N//2: + raise RuntimeError + barrier.wait() + results1.append(True) + except threading.BrokenBarrierError: + results2.append(True) + except RuntimeError: + barrier.abort() + # Synchronize and reset the barrier. Must synchronize first so + # that everyone has left it when we reset, and after so that no + # one enters it before the reset. + if barrier2.wait() == cls.N//2: + barrier.reset() + barrier2.wait() + barrier.wait() + results3.append(True) + + def test_abort_and_reset(self): + """ + Test that a barrier can be reset after being broken. + """ + results1 = self.DummyList() + results2 = self.DummyList() + results3 = self.DummyList() + barrier2 = self.Barrier(self.N) + + self.run_threads(self._test_abort_and_reset_f, + (self.barrier, barrier2, results1, results2, results3)) + self.assertEqual(len(results1), 0) + self.assertEqual(len(results2), self.N-1) + self.assertEqual(len(results3), self.N) + + @classmethod + def _test_timeout_f(cls, barrier, results): + i = barrier.wait() + if i == cls.N//2: + # One thread is late! + time.sleep(1.0) + try: + barrier.wait(0.5) + except threading.BrokenBarrierError: + results.append(True) + + def test_timeout(self): + """ + Test wait(timeout) + """ + results = self.DummyList() + self.run_threads(self._test_timeout_f, (self.barrier, results)) + self.assertEqual(len(results), self.barrier.parties) + + @classmethod + def _test_default_timeout_f(cls, barrier, results): + i = barrier.wait(cls.defaultTimeout) + if i == cls.N//2: + # One thread is later than the default timeout + time.sleep(1.0) + try: + barrier.wait() + except threading.BrokenBarrierError: + results.append(True) + + def test_default_timeout(self): + """ + Test the barrier's default timeout + """ + barrier = self.Barrier(self.N, timeout=0.5) + results = self.DummyList() + self.run_threads(self._test_default_timeout_f, (barrier, results)) + self.assertEqual(len(results), barrier.parties) + + def test_single_thread(self): + b = self.Barrier(1) + b.wait() + b.wait() + + @classmethod + def _test_thousand_f(cls, barrier, passes, conn, lock): + for i in range(passes): + barrier.wait() + with lock: + conn.send(i) + + def test_thousand(self): + if self.TYPE == 'manager': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + passes = 1000 + lock = self.Lock() + conn, child_conn = self.Pipe(False) + for j in range(self.N): + p = self.Process(target=self._test_thousand_f, + args=(self.barrier, passes, child_conn, lock)) + p.start() + self.addCleanup(p.join) + + for i in range(passes): + for j in range(self.N): + self.assertEqual(conn.recv(), i) + +# +# +# + +class _TestValue(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + codes_values = [ + ('i', 4343, 24234), + ('d', 3.625, -4.25), + ('h', -232, 234), + ('q', 2 ** 33, 2 ** 34), + ('c', latin('x'), latin('y')) + ] + + def setUp(self): + if not HAS_SHAREDCTYPES: + self.skipTest("requires multiprocess.sharedctypes") + + @classmethod + def _test(cls, values): + for sv, cv in zip(values, cls.codes_values): + sv.value = cv[2] + + + def test_value(self, raw=False): + if raw: + values = [self.RawValue(code, value) + for code, value, _ in self.codes_values] + else: + values = [self.Value(code, value) + for code, value, _ in self.codes_values] + + for sv, cv in zip(values, self.codes_values): + self.assertEqual(sv.value, cv[1]) + + proc = self.Process(target=self._test, args=(values,)) + proc.daemon = True + proc.start() + proc.join() + + for sv, cv in zip(values, self.codes_values): + self.assertEqual(sv.value, cv[2]) + + def test_rawvalue(self): + self.test_value(raw=True) + + def test_getobj_getlock(self): + val1 = self.Value('i', 5) + lock1 = val1.get_lock() + obj1 = val1.get_obj() + + val2 = self.Value('i', 5, lock=None) + lock2 = val2.get_lock() + obj2 = val2.get_obj() + + lock = self.Lock() + val3 = self.Value('i', 5, lock=lock) + lock3 = val3.get_lock() + obj3 = val3.get_obj() + self.assertEqual(lock, lock3) + + arr4 = self.Value('i', 5, lock=False) + self.assertFalse(hasattr(arr4, 'get_lock')) + self.assertFalse(hasattr(arr4, 'get_obj')) + + self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue') + + arr5 = self.RawValue('i', 5) + self.assertFalse(hasattr(arr5, 'get_lock')) + self.assertFalse(hasattr(arr5, 'get_obj')) + + +class _TestArray(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + @classmethod + def f(cls, seq): + for i in range(1, len(seq)): + seq[i] += seq[i-1] + + @unittest.skipIf(c_int is None, "requires _ctypes") + def test_array(self, raw=False): + seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831] + if raw: + arr = self.RawArray('i', seq) + else: + arr = self.Array('i', seq) + + self.assertEqual(len(arr), len(seq)) + self.assertEqual(arr[3], seq[3]) + self.assertEqual(list(arr[2:7]), list(seq[2:7])) + + arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4]) + + self.assertEqual(list(arr[:]), seq) + + self.f(seq) + + p = self.Process(target=self.f, args=(arr,)) + p.daemon = True + p.start() + p.join() + + self.assertEqual(list(arr[:]), seq) + + @unittest.skipIf(c_int is None, "requires _ctypes") + def test_array_from_size(self): + size = 10 + # Test for zeroing (see issue #11675). + # The repetition below strengthens the test by increasing the chances + # of previously allocated non-zero memory being used for the new array + # on the 2nd and 3rd loops. + for _ in range(3): + arr = self.Array('i', size) + self.assertEqual(len(arr), size) + self.assertEqual(list(arr), [0] * size) + arr[:] = range(10) + self.assertEqual(list(arr), list(range(10))) + del arr + + @unittest.skipIf(c_int is None, "requires _ctypes") + def test_rawarray(self): + self.test_array(raw=True) + + @unittest.skipIf(c_int is None, "requires _ctypes") + def test_getobj_getlock_obj(self): + arr1 = self.Array('i', list(range(10))) + lock1 = arr1.get_lock() + obj1 = arr1.get_obj() + + arr2 = self.Array('i', list(range(10)), lock=None) + lock2 = arr2.get_lock() + obj2 = arr2.get_obj() + + lock = self.Lock() + arr3 = self.Array('i', list(range(10)), lock=lock) + lock3 = arr3.get_lock() + obj3 = arr3.get_obj() + self.assertEqual(lock, lock3) + + arr4 = self.Array('i', range(10), lock=False) + self.assertFalse(hasattr(arr4, 'get_lock')) + self.assertFalse(hasattr(arr4, 'get_obj')) + self.assertRaises(AttributeError, + self.Array, 'i', range(10), lock='notalock') + + arr5 = self.RawArray('i', range(10)) + self.assertFalse(hasattr(arr5, 'get_lock')) + self.assertFalse(hasattr(arr5, 'get_obj')) + +# +# +# + +class _TestContainers(BaseTestCase): + + ALLOWED_TYPES = ('manager',) + + def test_list(self): + a = self.list(list(range(10))) + self.assertEqual(a[:], list(range(10))) + + b = self.list() + self.assertEqual(b[:], []) + + b.extend(list(range(5))) + self.assertEqual(b[:], list(range(5))) + + self.assertEqual(b[2], 2) + self.assertEqual(b[2:10], [2,3,4]) + + b *= 2 + self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]) + + self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6]) + + self.assertEqual(a[:], list(range(10))) + + d = [a, b] + e = self.list(d) + self.assertEqual( + [element[:] for element in e], + [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]] + ) + + f = self.list([a]) + a.append('hello') + self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']) + + def test_list_iter(self): + a = self.list(list(range(10))) + it = iter(a) + self.assertEqual(list(it), list(range(10))) + self.assertEqual(list(it), []) # exhausted + # list modified during iteration + it = iter(a) + a[0] = 100 + self.assertEqual(next(it), 100) + + def test_list_proxy_in_list(self): + a = self.list([self.list(range(3)) for _i in range(3)]) + self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3) + + a[0][-1] = 55 + self.assertEqual(a[0][:], [0, 1, 55]) + for i in range(1, 3): + self.assertEqual(a[i][:], [0, 1, 2]) + + self.assertEqual(a[1].pop(), 2) + self.assertEqual(len(a[1]), 2) + for i in range(0, 3, 2): + self.assertEqual(len(a[i]), 3) + + del a + + b = self.list() + b.append(b) + del b + + def test_dict(self): + d = self.dict() + indices = list(range(65, 70)) + for i in indices: + d[i] = chr(i) + self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices)) + self.assertEqual(sorted(d.keys()), indices) + self.assertEqual(sorted(d.values()), [chr(i) for i in indices]) + self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices]) + + def test_dict_iter(self): + d = self.dict() + indices = list(range(65, 70)) + for i in indices: + d[i] = chr(i) + it = iter(d) + self.assertEqual(list(it), indices) + self.assertEqual(list(it), []) # exhausted + # dictionary changed size during iteration + it = iter(d) + d.clear() + self.assertRaises(RuntimeError, next, it) + + def test_dict_proxy_nested(self): + pets = self.dict(ferrets=2, hamsters=4) + supplies = self.dict(water=10, feed=3) + d = self.dict(pets=pets, supplies=supplies) + + self.assertEqual(supplies['water'], 10) + self.assertEqual(d['supplies']['water'], 10) + + d['supplies']['blankets'] = 5 + self.assertEqual(supplies['blankets'], 5) + self.assertEqual(d['supplies']['blankets'], 5) + + d['supplies']['water'] = 7 + self.assertEqual(supplies['water'], 7) + self.assertEqual(d['supplies']['water'], 7) + + del pets + del supplies + self.assertEqual(d['pets']['ferrets'], 2) + d['supplies']['blankets'] = 11 + self.assertEqual(d['supplies']['blankets'], 11) + + pets = d['pets'] + supplies = d['supplies'] + supplies['water'] = 7 + self.assertEqual(supplies['water'], 7) + self.assertEqual(d['supplies']['water'], 7) + + d.clear() + self.assertEqual(len(d), 0) + self.assertEqual(supplies['water'], 7) + self.assertEqual(pets['hamsters'], 4) + + l = self.list([pets, supplies]) + l[0]['marmots'] = 1 + self.assertEqual(pets['marmots'], 1) + self.assertEqual(l[0]['marmots'], 1) + + del pets + del supplies + self.assertEqual(l[0]['marmots'], 1) + + outer = self.list([[88, 99], l]) + self.assertIsInstance(outer[0], list) # Not a ListProxy + self.assertEqual(outer[-1][-1]['feed'], 3) + + def test_nested_queue(self): + a = self.list() # Test queue inside list + a.append(self.Queue()) + a[0].put(123) + self.assertEqual(a[0].get(), 123) + b = self.dict() # Test queue inside dict + b[0] = self.Queue() + b[0].put(456) + self.assertEqual(b[0].get(), 456) + + def test_namespace(self): + n = self.Namespace() + n.name = 'Bob' + n.job = 'Builder' + n._hidden = 'hidden' + self.assertEqual((n.name, n.job), ('Bob', 'Builder')) + del n.job + self.assertEqual(str(n), "Namespace(name='Bob')") + self.assertTrue(hasattr(n, 'name')) + self.assertTrue(not hasattr(n, 'job')) + +# +# +# + +def sqr(x, wait=0.0): + time.sleep(wait) + return x*x + +def mul(x, y): + return x*y + +def raise_large_valuerror(wait): + time.sleep(wait) + raise ValueError("x" * 1024**2) + +def identity(x): + return x + +class CountedObject(object): + n_instances = 0 + + def __new__(cls): + cls.n_instances += 1 + return object.__new__(cls) + + def __del__(self): + type(self).n_instances -= 1 + +class SayWhenError(ValueError): pass + +def exception_throwing_generator(total, when): + if when == -1: + raise SayWhenError("Somebody said when") + for i in range(total): + if i == when: + raise SayWhenError("Somebody said when") + yield i + + +class _TestPool(BaseTestCase): + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.pool = cls.Pool(4) + + @classmethod + def tearDownClass(cls): + cls.pool.terminate() + cls.pool.join() + cls.pool = None + super().tearDownClass() + + def test_apply(self): + papply = self.pool.apply + self.assertEqual(papply(sqr, (5,)), sqr(5)) + self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3)) + + def test_map(self): + pmap = self.pool.map + self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10))))) + self.assertEqual(pmap(sqr, list(range(100)), chunksize=20), + list(map(sqr, list(range(100))))) + + def test_starmap(self): + psmap = self.pool.starmap + tuples = list(zip(range(10), range(9,-1, -1))) + self.assertEqual(psmap(mul, tuples), + list(itertools.starmap(mul, tuples))) + tuples = list(zip(range(100), range(99,-1, -1))) + self.assertEqual(psmap(mul, tuples, chunksize=20), + list(itertools.starmap(mul, tuples))) + + def test_starmap_async(self): + tuples = list(zip(range(100), range(99,-1, -1))) + self.assertEqual(self.pool.starmap_async(mul, tuples).get(), + list(itertools.starmap(mul, tuples))) + + def test_map_async(self): + self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(), + list(map(sqr, list(range(10))))) + + def test_map_async_callbacks(self): + call_args = self.manager.list() if self.TYPE == 'manager' else [] + self.pool.map_async(int, ['1'], + callback=call_args.append, + error_callback=call_args.append).wait() + self.assertEqual(1, len(call_args)) + self.assertEqual([1], call_args[0]) + self.pool.map_async(int, ['a'], + callback=call_args.append, + error_callback=call_args.append).wait() + self.assertEqual(2, len(call_args)) + self.assertIsInstance(call_args[1], ValueError) + + def test_map_unplicklable(self): + # Issue #19425 -- failure to pickle should not cause a hang + if self.TYPE == 'threads': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + class A(object): + def __reduce__(self): + raise RuntimeError('cannot pickle') + with self.assertRaises(RuntimeError): + self.pool.map(sqr, [A()]*10) + + def test_map_chunksize(self): + try: + self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1) + except multiprocessing.TimeoutError: + self.fail("pool.map_async with chunksize stalled on null list") + + def test_map_handle_iterable_exception(self): + if self.TYPE == 'manager': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + # SayWhenError seen at the very first of the iterable + with self.assertRaises(SayWhenError): + self.pool.map(sqr, exception_throwing_generator(1, -1), 1) + # again, make sure it's reentrant + with self.assertRaises(SayWhenError): + self.pool.map(sqr, exception_throwing_generator(1, -1), 1) + + with self.assertRaises(SayWhenError): + self.pool.map(sqr, exception_throwing_generator(10, 3), 1) + + class SpecialIterable: + def __iter__(self): + return self + def __next__(self): + raise SayWhenError + def __len__(self): + return 1 + with self.assertRaises(SayWhenError): + self.pool.map(sqr, SpecialIterable(), 1) + with self.assertRaises(SayWhenError): + self.pool.map(sqr, SpecialIterable(), 1) + + def test_async(self): + res = self.pool.apply_async(sqr, (7, TIMEOUT1,)) + get = TimingWrapper(res.get) + self.assertEqual(get(), 49) + self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) + + def test_async_timeout(self): + res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0)) + get = TimingWrapper(res.get) + self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2) + self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2) + + def test_imap(self): + it = self.pool.imap(sqr, list(range(10))) + self.assertEqual(list(it), list(map(sqr, list(range(10))))) + + it = self.pool.imap(sqr, list(range(10))) + for i in range(10): + self.assertEqual(next(it), i*i) + self.assertRaises(StopIteration, it.__next__) + + it = self.pool.imap(sqr, list(range(1000)), chunksize=100) + for i in range(1000): + self.assertEqual(next(it), i*i) + self.assertRaises(StopIteration, it.__next__) + + def test_imap_handle_iterable_exception(self): + if self.TYPE == 'manager': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + # SayWhenError seen at the very first of the iterable + it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1) + self.assertRaises(SayWhenError, it.__next__) + # again, make sure it's reentrant + it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1) + self.assertRaises(SayWhenError, it.__next__) + + it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1) + for i in range(3): + self.assertEqual(next(it), i*i) + self.assertRaises(SayWhenError, it.__next__) + + # SayWhenError seen at start of problematic chunk's results + it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2) + for i in range(6): + self.assertEqual(next(it), i*i) + self.assertRaises(SayWhenError, it.__next__) + it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4) + for i in range(4): + self.assertEqual(next(it), i*i) + self.assertRaises(SayWhenError, it.__next__) + + def test_imap_unordered(self): + it = self.pool.imap_unordered(sqr, list(range(10))) + self.assertEqual(sorted(it), list(map(sqr, list(range(10))))) + + it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100) + self.assertEqual(sorted(it), list(map(sqr, list(range(1000))))) + + def test_imap_unordered_handle_iterable_exception(self): + if self.TYPE == 'manager': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + # SayWhenError seen at the very first of the iterable + it = self.pool.imap_unordered(sqr, + exception_throwing_generator(1, -1), + 1) + self.assertRaises(SayWhenError, it.__next__) + # again, make sure it's reentrant + it = self.pool.imap_unordered(sqr, + exception_throwing_generator(1, -1), + 1) + self.assertRaises(SayWhenError, it.__next__) + + it = self.pool.imap_unordered(sqr, + exception_throwing_generator(10, 3), + 1) + expected_values = list(map(sqr, list(range(10)))) + with self.assertRaises(SayWhenError): + # imap_unordered makes it difficult to anticipate the SayWhenError + for i in range(10): + value = next(it) + self.assertIn(value, expected_values) + expected_values.remove(value) + + it = self.pool.imap_unordered(sqr, + exception_throwing_generator(20, 7), + 2) + expected_values = list(map(sqr, list(range(20)))) + with self.assertRaises(SayWhenError): + for i in range(20): + value = next(it) + self.assertIn(value, expected_values) + expected_values.remove(value) + + def test_make_pool(self): + expected_error = (RemoteError if self.TYPE == 'manager' + else ValueError) + + self.assertRaises(expected_error, self.Pool, -1) + self.assertRaises(expected_error, self.Pool, 0) + + if self.TYPE != 'manager': + p = self.Pool(3) + try: + self.assertEqual(3, len(p._pool)) + finally: + p.close() + p.join() + + def test_terminate(self): + result = self.pool.map_async( + time.sleep, [0.1 for i in range(10000)], chunksize=1 + ) + self.pool.terminate() + join = TimingWrapper(self.pool.join) + join() + # Sanity check the pool didn't wait for all tasks to finish + self.assertLess(join.elapsed, 2.0) + + def test_empty_iterable(self): + # See Issue 12157 + p = self.Pool(1) + + self.assertEqual(p.map(sqr, []), []) + self.assertEqual(list(p.imap(sqr, [])), []) + self.assertEqual(list(p.imap_unordered(sqr, [])), []) + self.assertEqual(p.map_async(sqr, []).get(), []) + + p.close() + p.join() + + def test_context(self): + if self.TYPE == 'processes': + L = list(range(10)) + expected = [sqr(i) for i in L] + with self.Pool(2) as p: + r = p.map_async(sqr, L) + self.assertEqual(r.get(), expected) + p.join() + self.assertRaises(ValueError, p.map_async, sqr, L) + + @classmethod + def _test_traceback(cls): + raise RuntimeError(123) # some comment + + @unittest.skipIf(True, "fails with is_dill(obj, child=True)") + def test_traceback(self): + # We want ensure that the traceback from the child process is + # contained in the traceback raised in the main process. + if self.TYPE == 'processes': + with self.Pool(1) as p: + try: + p.apply(self._test_traceback) + except Exception as e: + exc = e + else: + self.fail('expected RuntimeError') + p.join() + self.assertIs(type(exc), RuntimeError) + self.assertEqual(exc.args, (123,)) + cause = exc.__cause__ + self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback) + self.assertIn('raise RuntimeError(123) # some comment', cause.tb) + + with test.support.captured_stderr() as f1: + try: + raise exc + except RuntimeError: + sys.excepthook(*sys.exc_info()) + self.assertIn('raise RuntimeError(123) # some comment', + f1.getvalue()) + # _helper_reraises_exception should not make the error + # a remote exception + with self.Pool(1) as p: + try: + p.map(sqr, exception_throwing_generator(1, -1), 1) + except Exception as e: + exc = e + else: + self.fail('expected SayWhenError') + self.assertIs(type(exc), SayWhenError) + self.assertIs(exc.__cause__, None) + p.join() + + @classmethod + def _test_wrapped_exception(cls): + raise RuntimeError('foo') + + @unittest.skipIf(True, "fails with is_dill(obj, child=True)") + def test_wrapped_exception(self): + # Issue #20980: Should not wrap exception when using thread pool + with self.Pool(1) as p: + with self.assertRaises(RuntimeError): + p.apply(self._test_wrapped_exception) + p.join() + + def test_map_no_failfast(self): + # Issue #23992: the fail-fast behaviour when an exception is raised + # during map() would make Pool.join() deadlock, because a worker + # process would fill the result queue (after the result handler thread + # terminated, hence not draining it anymore). + + t_start = time.monotonic() + + with self.assertRaises(ValueError): + with self.Pool(2) as p: + try: + p.map(raise_large_valuerror, [0, 1]) + finally: + time.sleep(0.5) + p.close() + p.join() + + # check that we indeed waited for all jobs + self.assertGreater(time.monotonic() - t_start, 0.9) + + def test_release_task_refs(self): + # Issue #29861: task arguments and results should not be kept + # alive after we are done with them. + objs = [CountedObject() for i in range(10)] + refs = [weakref.ref(o) for o in objs] + self.pool.map(identity, objs) + + del objs + gc.collect() # For PyPy or other GCs. + time.sleep(DELTA) # let threaded cleanup code run + self.assertEqual(set(wr() for wr in refs), {None}) + # With a process pool, copies of the objects are returned, check + # they were released too. + self.assertEqual(CountedObject.n_instances, 0) + + def test_enter(self): + if self.TYPE == 'manager': + self.skipTest("test not applicable to manager") + + pool = self.Pool(1) + with pool: + pass + # call pool.terminate() + # pool is no longer running + + with self.assertRaises(ValueError): + # bpo-35477: pool.__enter__() fails if the pool is not running + with pool: + pass + pool.join() + + def test_resource_warning(self): + if self.TYPE == 'manager': + self.skipTest("test not applicable to manager") + + pool = self.Pool(1) + pool.terminate() + pool.join() + + # force state to RUN to emit ResourceWarning in __del__() + pool._state = multiprocessing.pool.RUN + + with warnings_helper.check_warnings( + ('unclosed running multiprocessing pool', ResourceWarning)): + pool = None + support.gc_collect() + +def raising(): + raise KeyError("key") + +def unpickleable_result(): + return lambda: 42 + +class _TestPoolWorkerErrors(BaseTestCase): + ALLOWED_TYPES = ('processes', ) + + def test_async_error_callback(self): + p = multiprocessing.Pool(2) + + scratchpad = [None] + def errback(exc): + scratchpad[0] = exc + + res = p.apply_async(raising, error_callback=errback) + self.assertRaises(KeyError, res.get) + self.assertTrue(scratchpad[0]) + self.assertIsInstance(scratchpad[0], KeyError) + + p.close() + p.join() + + def _test_unpickleable_result(self): + from multiprocess.pool import MaybeEncodingError + p = multiprocessing.Pool(2) + + # Make sure we don't lose pool processes because of encoding errors. + for iteration in range(20): + + scratchpad = [None] + def errback(exc): + scratchpad[0] = exc + + res = p.apply_async(unpickleable_result, error_callback=errback) + self.assertRaises(MaybeEncodingError, res.get) + wrapped = scratchpad[0] + self.assertTrue(wrapped) + self.assertIsInstance(scratchpad[0], MaybeEncodingError) + self.assertIsNotNone(wrapped.exc) + self.assertIsNotNone(wrapped.value) + + p.close() + p.join() + +class _TestPoolWorkerLifetime(BaseTestCase): + ALLOWED_TYPES = ('processes', ) + + def test_pool_worker_lifetime(self): + p = multiprocessing.Pool(3, maxtasksperchild=10) + self.assertEqual(3, len(p._pool)) + origworkerpids = [w.pid for w in p._pool] + # Run many tasks so each worker gets replaced (hopefully) + results = [] + for i in range(100): + results.append(p.apply_async(sqr, (i, ))) + # Fetch the results and verify we got the right answers, + # also ensuring all the tasks have completed. + for (j, res) in enumerate(results): + self.assertEqual(res.get(), sqr(j)) + # Refill the pool + p._repopulate_pool() + # Wait until all workers are alive + # (countdown * DELTA = 5 seconds max startup process time) + countdown = 50 + while countdown and not all(w.is_alive() for w in p._pool): + countdown -= 1 + time.sleep(DELTA) + finalworkerpids = [w.pid for w in p._pool] + # All pids should be assigned. See issue #7805. + self.assertNotIn(None, origworkerpids) + self.assertNotIn(None, finalworkerpids) + # Finally, check that the worker pids have changed + self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids)) + p.close() + p.join() + + def test_pool_worker_lifetime_early_close(self): + # Issue #10332: closing a pool whose workers have limited lifetimes + # before all the tasks completed would make join() hang. + p = multiprocessing.Pool(3, maxtasksperchild=1) + results = [] + for i in range(6): + results.append(p.apply_async(sqr, (i, 0.3))) + p.close() + p.join() + # check the results + for (j, res) in enumerate(results): + self.assertEqual(res.get(), sqr(j)) + + def test_pool_maxtasksperchild_invalid(self): + for value in [0, -1, 0.5, "12"]: + with self.assertRaises(ValueError): + multiprocessing.Pool(3, maxtasksperchild=value) + + def test_worker_finalization_via_atexit_handler_of_multiprocessing(self): + # tests cases against bpo-38744 and bpo-39360 + cmd = '''if 1: + from multiprocess import Pool + problem = None + class A: + def __init__(self): + self.pool = Pool(processes=1) + def test(): + global problem + problem = A() + problem.pool.map(float, tuple(range(10))) + if __name__ == "__main__": + test() + ''' + rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd, **ENV) + self.assertEqual(rc, 0) + +# +# Test of creating a customized manager class +# + +from multiprocess.managers import BaseManager, BaseProxy, RemoteError + +class FooBar(object): + def f(self): + return 'f()' + def g(self): + raise ValueError + def _h(self): + return '_h()' + +def baz(): + for i in range(10): + yield i*i + +class IteratorProxy(BaseProxy): + _exposed_ = ('__next__',) + def __iter__(self): + return self + def __next__(self): + return self._callmethod('__next__') + +class MyManager(BaseManager): + pass + +MyManager.register('Foo', callable=FooBar) +MyManager.register('Bar', callable=FooBar, exposed=('f', '_h')) +MyManager.register('baz', callable=baz, proxytype=IteratorProxy) + + +class _TestMyManager(BaseTestCase): + + ALLOWED_TYPES = ('manager',) + + def test_mymanager(self): + manager = MyManager() + manager.start() + self.common(manager) + manager.shutdown() + + # bpo-30356: BaseManager._finalize_manager() sends SIGTERM + # to the manager process if it takes longer than 1 second to stop, + # which happens on slow buildbots. + self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM)) + + def test_mymanager_context(self): + with MyManager() as manager: + self.common(manager) + # bpo-30356: BaseManager._finalize_manager() sends SIGTERM + # to the manager process if it takes longer than 1 second to stop, + # which happens on slow buildbots. + self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM)) + + def test_mymanager_context_prestarted(self): + manager = MyManager() + manager.start() + with manager: + self.common(manager) + self.assertEqual(manager._process.exitcode, 0) + + def common(self, manager): + foo = manager.Foo() + bar = manager.Bar() + baz = manager.baz() + + foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)] + bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)] + + self.assertEqual(foo_methods, ['f', 'g']) + self.assertEqual(bar_methods, ['f', '_h']) + + self.assertEqual(foo.f(), 'f()') + self.assertRaises(ValueError, foo.g) + self.assertEqual(foo._callmethod('f'), 'f()') + self.assertRaises(RemoteError, foo._callmethod, '_h') + + self.assertEqual(bar.f(), 'f()') + self.assertEqual(bar._h(), '_h()') + self.assertEqual(bar._callmethod('f'), 'f()') + self.assertEqual(bar._callmethod('_h'), '_h()') + + self.assertEqual(list(baz), [i*i for i in range(10)]) + + +# +# Test of connecting to a remote server and using xmlrpclib for serialization +# + +_queue = pyqueue.Queue() +def get_queue(): + return _queue + +class QueueManager(BaseManager): + '''manager class used by server process''' +QueueManager.register('get_queue', callable=get_queue) + +class QueueManager2(BaseManager): + '''manager class which specifies the same interface as QueueManager''' +QueueManager2.register('get_queue') + + +SERIALIZER = 'xmlrpclib' + +class _TestRemoteManager(BaseTestCase): + + ALLOWED_TYPES = ('manager',) + values = ['hello world', None, True, 2.25, + 'hall\xe5 v\xe4rlden', + '\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442', + b'hall\xe5 v\xe4rlden', + ] + result = values[:] + + @classmethod + def _putter(cls, address, authkey): + manager = QueueManager2( + address=address, authkey=authkey, serializer=SERIALIZER + ) + manager.connect() + queue = manager.get_queue() + # Note that xmlrpclib will deserialize object as a list not a tuple + queue.put(tuple(cls.values)) + + def test_remote(self): + authkey = os.urandom(32) + + manager = QueueManager( + address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER + ) + manager.start() + self.addCleanup(manager.shutdown) + + p = self.Process(target=self._putter, args=(manager.address, authkey)) + p.daemon = True + p.start() + + manager2 = QueueManager2( + address=manager.address, authkey=authkey, serializer=SERIALIZER + ) + manager2.connect() + queue = manager2.get_queue() + + self.assertEqual(queue.get(), self.result) + + # Because we are using xmlrpclib for serialization instead of + # pickle this will cause a serialization error. + self.assertRaises(Exception, queue.put, time.sleep) + + # Make queue finalizer run before the server is stopped + del queue + + +@hashlib_helper.requires_hashdigest('md5') +class _TestManagerRestart(BaseTestCase): + + @classmethod + def _putter(cls, address, authkey): + manager = QueueManager( + address=address, authkey=authkey, serializer=SERIALIZER) + manager.connect() + queue = manager.get_queue() + queue.put('hello world') + + def test_rapid_restart(self): + authkey = os.urandom(32) + manager = QueueManager( + address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER) + try: + srvr = manager.get_server() + addr = srvr.address + # Close the connection.Listener socket which gets opened as a part + # of manager.get_server(). It's not needed for the test. + srvr.listener.close() + manager.start() + + p = self.Process(target=self._putter, args=(manager.address, authkey)) + p.start() + p.join() + queue = manager.get_queue() + self.assertEqual(queue.get(), 'hello world') + del queue + finally: + if hasattr(manager, "shutdown"): + manager.shutdown() + + manager = QueueManager( + address=addr, authkey=authkey, serializer=SERIALIZER) + try: + manager.start() + self.addCleanup(manager.shutdown) + except OSError as e: + if e.errno != errno.EADDRINUSE: + raise + # Retry after some time, in case the old socket was lingering + # (sporadic failure on buildbots) + time.sleep(1.0) + manager = QueueManager( + address=addr, authkey=authkey, serializer=SERIALIZER) + if hasattr(manager, "shutdown"): + self.addCleanup(manager.shutdown) + +# +# +# + +SENTINEL = latin('') + +class _TestConnection(BaseTestCase): + + ALLOWED_TYPES = ('processes', 'threads') + + @classmethod + def _echo(cls, conn): + for msg in iter(conn.recv_bytes, SENTINEL): + conn.send_bytes(msg) + conn.close() + + def test_connection(self): + conn, child_conn = self.Pipe() + + p = self.Process(target=self._echo, args=(child_conn,)) + p.daemon = True + p.start() + + seq = [1, 2.25, None] + msg = latin('hello world') + longmsg = msg * 10 + arr = array.array('i', list(range(4))) + + if self.TYPE == 'processes': + self.assertEqual(type(conn.fileno()), int) + + self.assertEqual(conn.send(seq), None) + self.assertEqual(conn.recv(), seq) + + self.assertEqual(conn.send_bytes(msg), None) + self.assertEqual(conn.recv_bytes(), msg) + + if self.TYPE == 'processes': + buffer = array.array('i', [0]*10) + expected = list(arr) + [0] * (10 - len(arr)) + self.assertEqual(conn.send_bytes(arr), None) + self.assertEqual(conn.recv_bytes_into(buffer), + len(arr) * buffer.itemsize) + self.assertEqual(list(buffer), expected) + + buffer = array.array('i', [0]*10) + expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr)) + self.assertEqual(conn.send_bytes(arr), None) + self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize), + len(arr) * buffer.itemsize) + self.assertEqual(list(buffer), expected) + + buffer = bytearray(latin(' ' * 40)) + self.assertEqual(conn.send_bytes(longmsg), None) + try: + res = conn.recv_bytes_into(buffer) + except multiprocessing.BufferTooShort as e: + self.assertEqual(e.args, (longmsg,)) + else: + self.fail('expected BufferTooShort, got %s' % res) + + poll = TimingWrapper(conn.poll) + + self.assertEqual(poll(), False) + self.assertTimingAlmostEqual(poll.elapsed, 0) + + self.assertEqual(poll(-1), False) + self.assertTimingAlmostEqual(poll.elapsed, 0) + + self.assertEqual(poll(TIMEOUT1), False) + self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1) + + conn.send(None) + time.sleep(.1) + + self.assertEqual(poll(TIMEOUT1), True) + self.assertTimingAlmostEqual(poll.elapsed, 0) + + self.assertEqual(conn.recv(), None) + + really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb + conn.send_bytes(really_big_msg) + self.assertEqual(conn.recv_bytes(), really_big_msg) + + conn.send_bytes(SENTINEL) # tell child to quit + child_conn.close() + + if self.TYPE == 'processes': + self.assertEqual(conn.readable, True) + self.assertEqual(conn.writable, True) + self.assertRaises(EOFError, conn.recv) + self.assertRaises(EOFError, conn.recv_bytes) + + p.join() + + def test_duplex_false(self): + reader, writer = self.Pipe(duplex=False) + self.assertEqual(writer.send(1), None) + self.assertEqual(reader.recv(), 1) + if self.TYPE == 'processes': + self.assertEqual(reader.readable, True) + self.assertEqual(reader.writable, False) + self.assertEqual(writer.readable, False) + self.assertEqual(writer.writable, True) + self.assertRaises(OSError, reader.send, 2) + self.assertRaises(OSError, writer.recv) + self.assertRaises(OSError, writer.poll) + + def test_spawn_close(self): + # We test that a pipe connection can be closed by parent + # process immediately after child is spawned. On Windows this + # would have sometimes failed on old versions because + # child_conn would be closed before the child got a chance to + # duplicate it. + conn, child_conn = self.Pipe() + + p = self.Process(target=self._echo, args=(child_conn,)) + p.daemon = True + p.start() + child_conn.close() # this might complete before child initializes + + msg = latin('hello') + conn.send_bytes(msg) + self.assertEqual(conn.recv_bytes(), msg) + + conn.send_bytes(SENTINEL) + conn.close() + p.join() + + def test_sendbytes(self): + if self.TYPE != 'processes': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + msg = latin('abcdefghijklmnopqrstuvwxyz') + a, b = self.Pipe() + + a.send_bytes(msg) + self.assertEqual(b.recv_bytes(), msg) + + a.send_bytes(msg, 5) + self.assertEqual(b.recv_bytes(), msg[5:]) + + a.send_bytes(msg, 7, 8) + self.assertEqual(b.recv_bytes(), msg[7:7+8]) + + a.send_bytes(msg, 26) + self.assertEqual(b.recv_bytes(), latin('')) + + a.send_bytes(msg, 26, 0) + self.assertEqual(b.recv_bytes(), latin('')) + + self.assertRaises(ValueError, a.send_bytes, msg, 27) + + self.assertRaises(ValueError, a.send_bytes, msg, 22, 5) + + self.assertRaises(ValueError, a.send_bytes, msg, 26, 1) + + self.assertRaises(ValueError, a.send_bytes, msg, -1) + + self.assertRaises(ValueError, a.send_bytes, msg, 4, -1) + + @classmethod + def _is_fd_assigned(cls, fd): + try: + os.fstat(fd) + except OSError as e: + if e.errno == errno.EBADF: + return False + raise + else: + return True + + @classmethod + def _writefd(cls, conn, data, create_dummy_fds=False): + if create_dummy_fds: + for i in range(0, 256): + if not cls._is_fd_assigned(i): + os.dup2(conn.fileno(), i) + fd = reduction.recv_handle(conn) + if msvcrt: + fd = msvcrt.open_osfhandle(fd, os.O_WRONLY) + os.write(fd, data) + os.close(fd) + + @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") + def test_fd_transfer(self): + if self.TYPE != 'processes': + self.skipTest("only makes sense with processes") + conn, child_conn = self.Pipe(duplex=True) + + p = self.Process(target=self._writefd, args=(child_conn, b"foo")) + p.daemon = True + p.start() + self.addCleanup(os_helper.unlink, os_helper.TESTFN) + with open(os_helper.TESTFN, "wb") as f: + fd = f.fileno() + if msvcrt: + fd = msvcrt.get_osfhandle(fd) + reduction.send_handle(conn, fd, p.pid) + p.join() + with open(os_helper.TESTFN, "rb") as f: + self.assertEqual(f.read(), b"foo") + + @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") + @unittest.skipIf(sys.platform == "win32", + "test semantics don't make sense on Windows") + @unittest.skipIf(MAXFD <= 256, + "largest assignable fd number is too small") + @unittest.skipUnless(hasattr(os, "dup2"), + "test needs os.dup2()") + def test_large_fd_transfer(self): + # With fd > 256 (issue #11657) + if self.TYPE != 'processes': + self.skipTest("only makes sense with processes") + conn, child_conn = self.Pipe(duplex=True) + + p = self.Process(target=self._writefd, args=(child_conn, b"bar", True)) + p.daemon = True + p.start() + self.addCleanup(os_helper.unlink, os_helper.TESTFN) + with open(os_helper.TESTFN, "wb") as f: + fd = f.fileno() + for newfd in range(256, MAXFD): + if not self._is_fd_assigned(newfd): + break + else: + self.fail("could not find an unassigned large file descriptor") + os.dup2(fd, newfd) + try: + reduction.send_handle(conn, newfd, p.pid) + finally: + os.close(newfd) + p.join() + with open(os_helper.TESTFN, "rb") as f: + self.assertEqual(f.read(), b"bar") + + @classmethod + def _send_data_without_fd(self, conn): + os.write(conn.fileno(), b"\0") + + @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") + @unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows") + def test_missing_fd_transfer(self): + # Check that exception is raised when received data is not + # accompanied by a file descriptor in ancillary data. + if self.TYPE != 'processes': + self.skipTest("only makes sense with processes") + conn, child_conn = self.Pipe(duplex=True) + + p = self.Process(target=self._send_data_without_fd, args=(child_conn,)) + p.daemon = True + p.start() + self.assertRaises(RuntimeError, reduction.recv_handle, conn) + p.join() + + def test_context(self): + a, b = self.Pipe() + + with a, b: + a.send(1729) + self.assertEqual(b.recv(), 1729) + if self.TYPE == 'processes': + self.assertFalse(a.closed) + self.assertFalse(b.closed) + + if self.TYPE == 'processes': + self.assertTrue(a.closed) + self.assertTrue(b.closed) + self.assertRaises(OSError, a.recv) + self.assertRaises(OSError, b.recv) + +class _TestListener(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + def test_multiple_bind(self): + for family in self.connection.families: + l = self.connection.Listener(family=family) + self.addCleanup(l.close) + self.assertRaises(OSError, self.connection.Listener, + l.address, family) + + def test_context(self): + with self.connection.Listener() as l: + with self.connection.Client(l.address) as c: + with l.accept() as d: + c.send(1729) + self.assertEqual(d.recv(), 1729) + + if self.TYPE == 'processes': + self.assertRaises(OSError, l.accept) + + @unittest.skipUnless(util.abstract_sockets_supported, + "test needs abstract socket support") + def test_abstract_socket(self): + with self.connection.Listener("\0something") as listener: + with self.connection.Client(listener.address) as client: + with listener.accept() as d: + client.send(1729) + self.assertEqual(d.recv(), 1729) + + if self.TYPE == 'processes': + self.assertRaises(OSError, listener.accept) + + +class _TestListenerClient(BaseTestCase): + + ALLOWED_TYPES = ('processes', 'threads') + + @classmethod + def _test(cls, address): + conn = cls.connection.Client(address) + conn.send('hello') + conn.close() + + def test_listener_client(self): + for family in self.connection.families: + l = self.connection.Listener(family=family) + p = self.Process(target=self._test, args=(l.address,)) + p.daemon = True + p.start() + conn = l.accept() + self.assertEqual(conn.recv(), 'hello') + p.join() + l.close() + + def test_issue14725(self): + l = self.connection.Listener() + p = self.Process(target=self._test, args=(l.address,)) + p.daemon = True + p.start() + time.sleep(1) + # On Windows the client process should by now have connected, + # written data and closed the pipe handle by now. This causes + # ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue + # 14725. + conn = l.accept() + self.assertEqual(conn.recv(), 'hello') + conn.close() + p.join() + l.close() + + def test_issue16955(self): + for fam in self.connection.families: + l = self.connection.Listener(family=fam) + c = self.connection.Client(l.address) + a = l.accept() + a.send_bytes(b"hello") + self.assertTrue(c.poll(1)) + a.close() + c.close() + l.close() + +class _TestPoll(BaseTestCase): + + ALLOWED_TYPES = ('processes', 'threads') + + def test_empty_string(self): + a, b = self.Pipe() + self.assertEqual(a.poll(), False) + b.send_bytes(b'') + self.assertEqual(a.poll(), True) + self.assertEqual(a.poll(), True) + + @classmethod + def _child_strings(cls, conn, strings): + for s in strings: + time.sleep(0.1) + conn.send_bytes(s) + conn.close() + + def test_strings(self): + strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop') + a, b = self.Pipe() + p = self.Process(target=self._child_strings, args=(b, strings)) + p.start() + + for s in strings: + for i in range(200): + if a.poll(0.01): + break + x = a.recv_bytes() + self.assertEqual(s, x) + + p.join() + + @classmethod + def _child_boundaries(cls, r): + # Polling may "pull" a message in to the child process, but we + # don't want it to pull only part of a message, as that would + # corrupt the pipe for any other processes which might later + # read from it. + r.poll(5) + + def test_boundaries(self): + r, w = self.Pipe(False) + p = self.Process(target=self._child_boundaries, args=(r,)) + p.start() + time.sleep(2) + L = [b"first", b"second"] + for obj in L: + w.send_bytes(obj) + w.close() + p.join() + self.assertIn(r.recv_bytes(), L) + + @classmethod + def _child_dont_merge(cls, b): + b.send_bytes(b'a') + b.send_bytes(b'b') + b.send_bytes(b'cd') + + def test_dont_merge(self): + a, b = self.Pipe() + self.assertEqual(a.poll(0.0), False) + self.assertEqual(a.poll(0.1), False) + + p = self.Process(target=self._child_dont_merge, args=(b,)) + p.start() + + self.assertEqual(a.recv_bytes(), b'a') + self.assertEqual(a.poll(1.0), True) + self.assertEqual(a.poll(1.0), True) + self.assertEqual(a.recv_bytes(), b'b') + self.assertEqual(a.poll(1.0), True) + self.assertEqual(a.poll(1.0), True) + self.assertEqual(a.poll(0.0), True) + self.assertEqual(a.recv_bytes(), b'cd') + + p.join() + +# +# Test of sending connection and socket objects between processes +# + +@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") +@hashlib_helper.requires_hashdigest('md5') +class _TestPicklingConnections(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + @classmethod + def tearDownClass(cls): + from multiprocess import resource_sharer + resource_sharer.stop(timeout=support.LONG_TIMEOUT) + + @classmethod + def _listener(cls, conn, families): + for fam in families: + l = cls.connection.Listener(family=fam) + conn.send(l.address) + new_conn = l.accept() + conn.send(new_conn) + new_conn.close() + l.close() + + l = socket.create_server((socket_helper.HOST, 0)) + conn.send(l.getsockname()) + new_conn, addr = l.accept() + conn.send(new_conn) + new_conn.close() + l.close() + + conn.recv() + + @classmethod + def _remote(cls, conn): + for (address, msg) in iter(conn.recv, None): + client = cls.connection.Client(address) + client.send(msg.upper()) + client.close() + + address, msg = conn.recv() + client = socket.socket() + client.connect(address) + client.sendall(msg.upper()) + client.close() + + conn.close() + + def test_pickling(self): + families = self.connection.families + + lconn, lconn0 = self.Pipe() + lp = self.Process(target=self._listener, args=(lconn0, families)) + lp.daemon = True + lp.start() + lconn0.close() + + rconn, rconn0 = self.Pipe() + rp = self.Process(target=self._remote, args=(rconn0,)) + rp.daemon = True + rp.start() + rconn0.close() + + for fam in families: + msg = ('This connection uses family %s' % fam).encode('ascii') + address = lconn.recv() + rconn.send((address, msg)) + new_conn = lconn.recv() + self.assertEqual(new_conn.recv(), msg.upper()) + + rconn.send(None) + + msg = latin('This connection uses a normal socket') + address = lconn.recv() + rconn.send((address, msg)) + new_conn = lconn.recv() + buf = [] + while True: + s = new_conn.recv(100) + if not s: + break + buf.append(s) + buf = b''.join(buf) + self.assertEqual(buf, msg.upper()) + new_conn.close() + + lconn.send(None) + + rconn.close() + lconn.close() + + lp.join() + rp.join() + + @classmethod + def child_access(cls, conn): + w = conn.recv() + w.send('all is well') + w.close() + + r = conn.recv() + msg = r.recv() + conn.send(msg*2) + + conn.close() + + def test_access(self): + # On Windows, if we do not specify a destination pid when + # using DupHandle then we need to be careful to use the + # correct access flags for DuplicateHandle(), or else + # DupHandle.detach() will raise PermissionError. For example, + # for a read only pipe handle we should use + # access=FILE_GENERIC_READ. (Unfortunately + # DUPLICATE_SAME_ACCESS does not work.) + conn, child_conn = self.Pipe() + p = self.Process(target=self.child_access, args=(child_conn,)) + p.daemon = True + p.start() + child_conn.close() + + r, w = self.Pipe(duplex=False) + conn.send(w) + w.close() + self.assertEqual(r.recv(), 'all is well') + r.close() + + r, w = self.Pipe(duplex=False) + conn.send(r) + r.close() + w.send('foobar') + w.close() + self.assertEqual(conn.recv(), 'foobar'*2) + + p.join() + +# +# +# + +class _TestHeap(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + def setUp(self): + super().setUp() + # Make pristine heap for these tests + self.old_heap = multiprocessing.heap.BufferWrapper._heap + multiprocessing.heap.BufferWrapper._heap = multiprocessing.heap.Heap() + + def tearDown(self): + multiprocessing.heap.BufferWrapper._heap = self.old_heap + super().tearDown() + + def test_heap(self): + iterations = 5000 + maxblocks = 50 + blocks = [] + + # get the heap object + heap = multiprocessing.heap.BufferWrapper._heap + heap._DISCARD_FREE_SPACE_LARGER_THAN = 0 + + # create and destroy lots of blocks of different sizes + for i in range(iterations): + size = int(random.lognormvariate(0, 1) * 1000) + b = multiprocessing.heap.BufferWrapper(size) + blocks.append(b) + if len(blocks) > maxblocks: + i = random.randrange(maxblocks) + del blocks[i] + del b + + # verify the state of the heap + with heap._lock: + all = [] + free = 0 + occupied = 0 + for L in list(heap._len_to_seq.values()): + # count all free blocks in arenas + for arena, start, stop in L: + all.append((heap._arenas.index(arena), start, stop, + stop-start, 'free')) + free += (stop-start) + for arena, arena_blocks in heap._allocated_blocks.items(): + # count all allocated blocks in arenas + for start, stop in arena_blocks: + all.append((heap._arenas.index(arena), start, stop, + stop-start, 'occupied')) + occupied += (stop-start) + + self.assertEqual(free + occupied, + sum(arena.size for arena in heap._arenas)) + + all.sort() + + for i in range(len(all)-1): + (arena, start, stop) = all[i][:3] + (narena, nstart, nstop) = all[i+1][:3] + if arena != narena: + # Two different arenas + self.assertEqual(stop, heap._arenas[arena].size) # last block + self.assertEqual(nstart, 0) # first block + else: + # Same arena: two adjacent blocks + self.assertEqual(stop, nstart) + + # test free'ing all blocks + random.shuffle(blocks) + while blocks: + blocks.pop() + + self.assertEqual(heap._n_frees, heap._n_mallocs) + self.assertEqual(len(heap._pending_free_blocks), 0) + self.assertEqual(len(heap._arenas), 0) + self.assertEqual(len(heap._allocated_blocks), 0, heap._allocated_blocks) + self.assertEqual(len(heap._len_to_seq), 0) + + def test_free_from_gc(self): + # Check that freeing of blocks by the garbage collector doesn't deadlock + # (issue #12352). + # Make sure the GC is enabled, and set lower collection thresholds to + # make collections more frequent (and increase the probability of + # deadlock). + if not gc.isenabled(): + gc.enable() + self.addCleanup(gc.disable) + thresholds = gc.get_threshold() + self.addCleanup(gc.set_threshold, *thresholds) + gc.set_threshold(10) + + # perform numerous block allocations, with cyclic references to make + # sure objects are collected asynchronously by the gc + for i in range(5000): + a = multiprocessing.heap.BufferWrapper(1) + b = multiprocessing.heap.BufferWrapper(1) + # circular references + a.buddy = b + b.buddy = a + +# +# +# + +class _Foo(Structure): + _fields_ = [ + ('x', c_int), + ('y', c_double), + ('z', c_longlong,) + ] + +class _TestSharedCTypes(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + def setUp(self): + if not HAS_SHAREDCTYPES: + self.skipTest("requires multiprocess.sharedctypes") + + @classmethod + def _double(cls, x, y, z, foo, arr, string): + x.value *= 2 + y.value *= 2 + z.value *= 2 + foo.x *= 2 + foo.y *= 2 + string.value *= 2 + for i in range(len(arr)): + arr[i] *= 2 + + def test_sharedctypes(self, lock=False): + x = Value('i', 7, lock=lock) + y = Value(c_double, 1.0/3.0, lock=lock) + z = Value(c_longlong, 2 ** 33, lock=lock) + foo = Value(_Foo, 3, 2, lock=lock) + arr = self.Array('d', list(range(10)), lock=lock) + string = self.Array('c', 20, lock=lock) + string.value = latin('hello') + + p = self.Process(target=self._double, args=(x, y, z, foo, arr, string)) + p.daemon = True + p.start() + p.join() + + self.assertEqual(x.value, 14) + self.assertAlmostEqual(y.value, 2.0/3.0) + self.assertEqual(z.value, 2 ** 34) + self.assertEqual(foo.x, 6) + self.assertAlmostEqual(foo.y, 4.0) + for i in range(10): + self.assertAlmostEqual(arr[i], i*2) + self.assertEqual(string.value, latin('hellohello')) + + def test_synchronize(self): + self.test_sharedctypes(lock=True) + + def test_copy(self): + foo = _Foo(2, 5.0, 2 ** 33) + bar = copy(foo) + foo.x = 0 + foo.y = 0 + foo.z = 0 + self.assertEqual(bar.x, 2) + self.assertAlmostEqual(bar.y, 5.0) + self.assertEqual(bar.z, 2 ** 33) + + +@unittest.skipUnless(HAS_SHMEM, "requires multiprocess.shared_memory") +@hashlib_helper.requires_hashdigest('md5') +class _TestSharedMemory(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + @staticmethod + def _attach_existing_shmem_then_write(shmem_name_or_obj, binary_data): + if isinstance(shmem_name_or_obj, str): + local_sms = shared_memory.SharedMemory(shmem_name_or_obj) + else: + local_sms = shmem_name_or_obj + local_sms.buf[:len(binary_data)] = binary_data + local_sms.close() + + def _new_shm_name(self, prefix): + # Add a PID to the name of a POSIX shared memory object to allow + # running multiprocessing tests (test_multiprocessing_fork, + # test_multiprocessing_spawn, etc) in parallel. + return prefix + str(os.getpid()) + + def test_shared_memory_basics(self): + name_tsmb = self._new_shm_name('test01_tsmb') + sms = shared_memory.SharedMemory(name_tsmb, create=True, size=512) + self.addCleanup(sms.unlink) + + # Verify attributes are readable. + self.assertEqual(sms.name, name_tsmb) + self.assertGreaterEqual(sms.size, 512) + self.assertGreaterEqual(len(sms.buf), sms.size) + + # Verify __repr__ + self.assertIn(sms.name, str(sms)) + self.assertIn(str(sms.size), str(sms)) + + # Modify contents of shared memory segment through memoryview. + sms.buf[0] = 42 + self.assertEqual(sms.buf[0], 42) + + # Attach to existing shared memory segment. + also_sms = shared_memory.SharedMemory(name_tsmb) + self.assertEqual(also_sms.buf[0], 42) + also_sms.close() + + # Attach to existing shared memory segment but specify a new size. + same_sms = shared_memory.SharedMemory(name_tsmb, size=20*sms.size) + self.assertLess(same_sms.size, 20*sms.size) # Size was ignored. + same_sms.close() + + # Creating Shared Memory Segment with -ve size + with self.assertRaises(ValueError): + shared_memory.SharedMemory(create=True, size=-2) + + # Attaching Shared Memory Segment without a name + with self.assertRaises(ValueError): + shared_memory.SharedMemory(create=False) + + # Test if shared memory segment is created properly, + # when _make_filename returns an existing shared memory segment name + with unittest.mock.patch( + 'multiprocess.shared_memory._make_filename') as mock_make_filename: + + NAME_PREFIX = shared_memory._SHM_NAME_PREFIX + names = [self._new_shm_name('test01_fn'), self._new_shm_name('test02_fn')] + # Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary + # because some POSIX compliant systems require name to start with / + names = [NAME_PREFIX + name for name in names] + + mock_make_filename.side_effect = names + shm1 = shared_memory.SharedMemory(create=True, size=1) + self.addCleanup(shm1.unlink) + self.assertEqual(shm1._name, names[0]) + + mock_make_filename.side_effect = names + shm2 = shared_memory.SharedMemory(create=True, size=1) + self.addCleanup(shm2.unlink) + self.assertEqual(shm2._name, names[1]) + + if shared_memory._USE_POSIX: + # Posix Shared Memory can only be unlinked once. Here we + # test an implementation detail that is not observed across + # all supported platforms (since WindowsNamedSharedMemory + # manages unlinking on its own and unlink() does nothing). + # True release of shared memory segment does not necessarily + # happen until process exits, depending on the OS platform. + name_dblunlink = self._new_shm_name('test01_dblunlink') + sms_uno = shared_memory.SharedMemory( + name_dblunlink, + create=True, + size=5000 + ) + with self.assertRaises(FileNotFoundError): + try: + self.assertGreaterEqual(sms_uno.size, 5000) + + sms_duo = shared_memory.SharedMemory(name_dblunlink) + sms_duo.unlink() # First shm_unlink() call. + sms_duo.close() + sms_uno.close() + + finally: + sms_uno.unlink() # A second shm_unlink() call is bad. + + with self.assertRaises(FileExistsError): + # Attempting to create a new shared memory segment with a + # name that is already in use triggers an exception. + there_can_only_be_one_sms = shared_memory.SharedMemory( + name_tsmb, + create=True, + size=512 + ) + + if shared_memory._USE_POSIX: + # Requesting creation of a shared memory segment with the option + # to attach to an existing segment, if that name is currently in + # use, should not trigger an exception. + # Note: Using a smaller size could possibly cause truncation of + # the existing segment but is OS platform dependent. In the + # case of MacOS/darwin, requesting a smaller size is disallowed. + class OptionalAttachSharedMemory(shared_memory.SharedMemory): + _flags = os.O_CREAT | os.O_RDWR + ok_if_exists_sms = OptionalAttachSharedMemory(name_tsmb) + self.assertEqual(ok_if_exists_sms.size, sms.size) + ok_if_exists_sms.close() + + # Attempting to attach to an existing shared memory segment when + # no segment exists with the supplied name triggers an exception. + with self.assertRaises(FileNotFoundError): + nonexisting_sms = shared_memory.SharedMemory('test01_notthere') + nonexisting_sms.unlink() # Error should occur on prior line. + + sms.close() + + @unittest.skipIf(True, "fails with dill >= 0.3.5") + def test_shared_memory_recreate(self): + # Test if shared memory segment is created properly, + # when _make_filename returns an existing shared memory segment name + with unittest.mock.patch( + 'multiprocess.shared_memory._make_filename') as mock_make_filename: + + NAME_PREFIX = shared_memory._SHM_NAME_PREFIX + names = [self._new_shm_name('test03_fn'), self._new_shm_name('test04_fn')] + # Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary + # because some POSIX compliant systems require name to start with / + names = [NAME_PREFIX + name for name in names] + + mock_make_filename.side_effect = names + shm1 = shared_memory.SharedMemory(create=True, size=1) + self.addCleanup(shm1.unlink) + self.assertEqual(shm1._name, names[0]) + + mock_make_filename.side_effect = names + shm2 = shared_memory.SharedMemory(create=True, size=1) + self.addCleanup(shm2.unlink) + self.assertEqual(shm2._name, names[1]) + + def test_invalid_shared_memory_cration(self): + # Test creating a shared memory segment with negative size + with self.assertRaises(ValueError): + sms_invalid = shared_memory.SharedMemory(create=True, size=-1) + + # Test creating a shared memory segment with size 0 + with self.assertRaises(ValueError): + sms_invalid = shared_memory.SharedMemory(create=True, size=0) + + # Test creating a shared memory segment without size argument + with self.assertRaises(ValueError): + sms_invalid = shared_memory.SharedMemory(create=True) + + def test_shared_memory_pickle_unpickle(self): + for proto in range(pickle.HIGHEST_PROTOCOL + 1): + with self.subTest(proto=proto): + sms = shared_memory.SharedMemory(create=True, size=512) + self.addCleanup(sms.unlink) + sms.buf[0:6] = b'pickle' + + # Test pickling + pickled_sms = pickle.dumps(sms, protocol=proto) + + # Test unpickling + sms2 = pickle.loads(pickled_sms) + self.assertIsInstance(sms2, shared_memory.SharedMemory) + self.assertEqual(sms.name, sms2.name) + self.assertEqual(bytes(sms.buf[0:6]), b'pickle') + self.assertEqual(bytes(sms2.buf[0:6]), b'pickle') + + # Test that unpickled version is still the same SharedMemory + sms.buf[0:6] = b'newval' + self.assertEqual(bytes(sms.buf[0:6]), b'newval') + self.assertEqual(bytes(sms2.buf[0:6]), b'newval') + + sms2.buf[0:6] = b'oldval' + self.assertEqual(bytes(sms.buf[0:6]), b'oldval') + self.assertEqual(bytes(sms2.buf[0:6]), b'oldval') + + def test_shared_memory_pickle_unpickle_dead_object(self): + for proto in range(pickle.HIGHEST_PROTOCOL + 1): + with self.subTest(proto=proto): + sms = shared_memory.SharedMemory(create=True, size=512) + sms.buf[0:6] = b'pickle' + pickled_sms = pickle.dumps(sms, protocol=proto) + + # Now, we are going to kill the original object. + # So, unpickled one won't be able to attach to it. + sms.close() + sms.unlink() + + with self.assertRaises(FileNotFoundError): + pickle.loads(pickled_sms) + + def test_shared_memory_across_processes(self): + # bpo-40135: don't define shared memory block's name in case of + # the failure when we run multiprocessing tests in parallel. + sms = shared_memory.SharedMemory(create=True, size=512) + self.addCleanup(sms.unlink) + + # Verify remote attachment to existing block by name is working. + p = self.Process( + target=self._attach_existing_shmem_then_write, + args=(sms.name, b'howdy') + ) + p.daemon = True + p.start() + p.join() + self.assertEqual(bytes(sms.buf[:5]), b'howdy') + + # Verify pickling of SharedMemory instance also works. + p = self.Process( + target=self._attach_existing_shmem_then_write, + args=(sms, b'HELLO') + ) + p.daemon = True + p.start() + p.join() + self.assertEqual(bytes(sms.buf[:5]), b'HELLO') + + sms.close() + + @unittest.skipIf(os.name != "posix", "not feasible in non-posix platforms") + def test_shared_memory_SharedMemoryServer_ignores_sigint(self): + # bpo-36368: protect SharedMemoryManager server process from + # KeyboardInterrupt signals. + smm = multiprocessing.managers.SharedMemoryManager() + smm.start() + + # make sure the manager works properly at the beginning + sl = smm.ShareableList(range(10)) + + # the manager's server should ignore KeyboardInterrupt signals, and + # maintain its connection with the current process, and success when + # asked to deliver memory segments. + os.kill(smm._process.pid, signal.SIGINT) + + sl2 = smm.ShareableList(range(10)) + + # test that the custom signal handler registered in the Manager does + # not affect signal handling in the parent process. + with self.assertRaises(KeyboardInterrupt): + os.kill(os.getpid(), signal.SIGINT) + + smm.shutdown() + + @unittest.skipIf(os.name != "posix", "resource_tracker is posix only") + def test_shared_memory_SharedMemoryManager_reuses_resource_tracker(self): + # bpo-36867: test that a SharedMemoryManager uses the + # same resource_tracker process as its parent. + cmd = '''if 1: + from multiprocessing.managers import SharedMemoryManager + + + smm = SharedMemoryManager() + smm.start() + sl = smm.ShareableList(range(10)) + smm.shutdown() + ''' #XXX: ensure correct resource_tracker + rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd, **ENV) + + # Before bpo-36867 was fixed, a SharedMemoryManager not using the same + # resource_tracker process as its parent would make the parent's + # tracker complain about sl being leaked even though smm.shutdown() + # properly released sl. + self.assertFalse(err) + + def test_shared_memory_SharedMemoryManager_basics(self): + smm1 = multiprocessing.managers.SharedMemoryManager() + with self.assertRaises(ValueError): + smm1.SharedMemory(size=9) # Fails if SharedMemoryServer not started + smm1.start() + lol = [ smm1.ShareableList(range(i)) for i in range(5, 10) ] + lom = [ smm1.SharedMemory(size=j) for j in range(32, 128, 16) ] + doppleganger_list0 = shared_memory.ShareableList(name=lol[0].shm.name) + self.assertEqual(len(doppleganger_list0), 5) + doppleganger_shm0 = shared_memory.SharedMemory(name=lom[0].name) + self.assertGreaterEqual(len(doppleganger_shm0.buf), 32) + held_name = lom[0].name + smm1.shutdown() + if sys.platform != "win32": + # Calls to unlink() have no effect on Windows platform; shared + # memory will only be released once final process exits. + with self.assertRaises(FileNotFoundError): + # No longer there to be attached to again. + absent_shm = shared_memory.SharedMemory(name=held_name) + + with multiprocessing.managers.SharedMemoryManager() as smm2: + sl = smm2.ShareableList("howdy") + shm = smm2.SharedMemory(size=128) + held_name = sl.shm.name + if sys.platform != "win32": + with self.assertRaises(FileNotFoundError): + # No longer there to be attached to again. + absent_sl = shared_memory.ShareableList(name=held_name) + + + def test_shared_memory_ShareableList_basics(self): + sl = shared_memory.ShareableList( + ['howdy', b'HoWdY', -273.154, 100, None, True, 42] + ) + self.addCleanup(sl.shm.unlink) + + # Verify __repr__ + self.assertIn(sl.shm.name, str(sl)) + self.assertIn(str(list(sl)), str(sl)) + + # Index Out of Range (get) + with self.assertRaises(IndexError): + sl[7] + + # Index Out of Range (set) + with self.assertRaises(IndexError): + sl[7] = 2 + + # Assign value without format change (str -> str) + current_format = sl._get_packing_format(0) + sl[0] = 'howdy' + self.assertEqual(current_format, sl._get_packing_format(0)) + + # Verify attributes are readable. + self.assertEqual(sl.format, '8s8sdqxxxxxx?xxxxxxxx?q') + + # Exercise len(). + self.assertEqual(len(sl), 7) + + # Exercise index(). + with warnings.catch_warnings(): + # Suppress BytesWarning when comparing against b'HoWdY'. + warnings.simplefilter('ignore') + with self.assertRaises(ValueError): + sl.index('100') + self.assertEqual(sl.index(100), 3) + + # Exercise retrieving individual values. + self.assertEqual(sl[0], 'howdy') + self.assertEqual(sl[-2], True) + + # Exercise iterability. + self.assertEqual( + tuple(sl), + ('howdy', b'HoWdY', -273.154, 100, None, True, 42) + ) + + # Exercise modifying individual values. + sl[3] = 42 + self.assertEqual(sl[3], 42) + sl[4] = 'some' # Change type at a given position. + self.assertEqual(sl[4], 'some') + self.assertEqual(sl.format, '8s8sdq8sxxxxxxx?q') + with self.assertRaisesRegex(ValueError, + "exceeds available storage"): + sl[4] = 'far too many' + self.assertEqual(sl[4], 'some') + sl[0] = 'encodés' # Exactly 8 bytes of UTF-8 data + self.assertEqual(sl[0], 'encodés') + self.assertEqual(sl[1], b'HoWdY') # no spillage + with self.assertRaisesRegex(ValueError, + "exceeds available storage"): + sl[0] = 'encodées' # Exactly 9 bytes of UTF-8 data + self.assertEqual(sl[1], b'HoWdY') + with self.assertRaisesRegex(ValueError, + "exceeds available storage"): + sl[1] = b'123456789' + self.assertEqual(sl[1], b'HoWdY') + + # Exercise count(). + with warnings.catch_warnings(): + # Suppress BytesWarning when comparing against b'HoWdY'. + warnings.simplefilter('ignore') + self.assertEqual(sl.count(42), 2) + self.assertEqual(sl.count(b'HoWdY'), 1) + self.assertEqual(sl.count(b'adios'), 0) + + # Exercise creating a duplicate. + name_duplicate = self._new_shm_name('test03_duplicate') + sl_copy = shared_memory.ShareableList(sl, name=name_duplicate) + try: + self.assertNotEqual(sl.shm.name, sl_copy.shm.name) + self.assertEqual(name_duplicate, sl_copy.shm.name) + self.assertEqual(list(sl), list(sl_copy)) + self.assertEqual(sl.format, sl_copy.format) + sl_copy[-1] = 77 + self.assertEqual(sl_copy[-1], 77) + self.assertNotEqual(sl[-1], 77) + sl_copy.shm.close() + finally: + sl_copy.shm.unlink() + + # Obtain a second handle on the same ShareableList. + sl_tethered = shared_memory.ShareableList(name=sl.shm.name) + self.assertEqual(sl.shm.name, sl_tethered.shm.name) + sl_tethered[-1] = 880 + self.assertEqual(sl[-1], 880) + sl_tethered.shm.close() + + sl.shm.close() + + # Exercise creating an empty ShareableList. + empty_sl = shared_memory.ShareableList() + try: + self.assertEqual(len(empty_sl), 0) + self.assertEqual(empty_sl.format, '') + self.assertEqual(empty_sl.count('any'), 0) + with self.assertRaises(ValueError): + empty_sl.index(None) + empty_sl.shm.close() + finally: + empty_sl.shm.unlink() + + def test_shared_memory_ShareableList_pickling(self): + for proto in range(pickle.HIGHEST_PROTOCOL + 1): + with self.subTest(proto=proto): + sl = shared_memory.ShareableList(range(10)) + self.addCleanup(sl.shm.unlink) + + serialized_sl = pickle.dumps(sl, protocol=proto) + deserialized_sl = pickle.loads(serialized_sl) + self.assertIsInstance( + deserialized_sl, shared_memory.ShareableList) + self.assertEqual(deserialized_sl[-1], 9) + self.assertIsNot(sl, deserialized_sl) + + deserialized_sl[4] = "changed" + self.assertEqual(sl[4], "changed") + sl[3] = "newvalue" + self.assertEqual(deserialized_sl[3], "newvalue") + + larger_sl = shared_memory.ShareableList(range(400)) + self.addCleanup(larger_sl.shm.unlink) + serialized_larger_sl = pickle.dumps(larger_sl, protocol=proto) + self.assertEqual(len(serialized_sl), len(serialized_larger_sl)) + larger_sl.shm.close() + + deserialized_sl.shm.close() + sl.shm.close() + + def test_shared_memory_ShareableList_pickling_dead_object(self): + for proto in range(pickle.HIGHEST_PROTOCOL + 1): + with self.subTest(proto=proto): + sl = shared_memory.ShareableList(range(10)) + serialized_sl = pickle.dumps(sl, protocol=proto) + + # Now, we are going to kill the original object. + # So, unpickled one won't be able to attach to it. + sl.shm.close() + sl.shm.unlink() + + with self.assertRaises(FileNotFoundError): + pickle.loads(serialized_sl) + + def test_shared_memory_cleaned_after_process_termination(self): + cmd = '''if 1: + import os, time, sys + from multiprocessing import shared_memory + + # Create a shared_memory segment, and send the segment name + sm = shared_memory.SharedMemory(create=True, size=10) + sys.stdout.write(sm.name + '\\n') + sys.stdout.flush() + time.sleep(100) + ''' + with subprocess.Popen([sys.executable, '-E', '-c', cmd], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) as p: + name = p.stdout.readline().strip().decode() + + # killing abruptly processes holding reference to a shared memory + # segment should not leak the given memory segment. + p.terminate() + p.wait() + + deadline = time.monotonic() + support.LONG_TIMEOUT + t = 0.1 + while time.monotonic() < deadline: + time.sleep(t) + t = min(t*2, 5) + try: + smm = shared_memory.SharedMemory(name, create=False) + except FileNotFoundError: + break + else: + raise AssertionError("A SharedMemory segment was leaked after" + " a process was abruptly terminated.") + + if os.name == 'posix': + # Without this line it was raising warnings like: + # UserWarning: resource_tracker: + # There appear to be 1 leaked shared_memory + # objects to clean up at shutdown + # See: https://bugs.python.org/issue45209 + resource_tracker.unregister(f"/{name}", "shared_memory") + + # A warning was emitted by the subprocess' own + # resource_tracker (on Windows, shared memory segments + # are released automatically by the OS). + err = p.stderr.read().decode() + self.assertIn( + "resource_tracker: There appear to be 1 leaked " + "shared_memory objects to clean up at shutdown", err) + +# +# Test to verify that `Finalize` works. +# + +class _TestFinalize(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + def setUp(self): + self.registry_backup = util._finalizer_registry.copy() + util._finalizer_registry.clear() + + def tearDown(self): + gc.collect() # For PyPy or other GCs. + self.assertFalse(util._finalizer_registry) + util._finalizer_registry.update(self.registry_backup) + + @classmethod + def _test_finalize(cls, conn): + class Foo(object): + pass + + a = Foo() + util.Finalize(a, conn.send, args=('a',)) + del a # triggers callback for a + gc.collect() # For PyPy or other GCs. + + b = Foo() + close_b = util.Finalize(b, conn.send, args=('b',)) + close_b() # triggers callback for b + close_b() # does nothing because callback has already been called + del b # does nothing because callback has already been called + gc.collect() # For PyPy or other GCs. + + c = Foo() + util.Finalize(c, conn.send, args=('c',)) + + d10 = Foo() + util.Finalize(d10, conn.send, args=('d10',), exitpriority=1) + + d01 = Foo() + util.Finalize(d01, conn.send, args=('d01',), exitpriority=0) + d02 = Foo() + util.Finalize(d02, conn.send, args=('d02',), exitpriority=0) + d03 = Foo() + util.Finalize(d03, conn.send, args=('d03',), exitpriority=0) + + util.Finalize(None, conn.send, args=('e',), exitpriority=-10) + + util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100) + + # call multiprocessing's cleanup function then exit process without + # garbage collecting locals + util._exit_function() + conn.close() + os._exit(0) + + def test_finalize(self): + conn, child_conn = self.Pipe() + + p = self.Process(target=self._test_finalize, args=(child_conn,)) + p.daemon = True + p.start() + p.join() + + result = [obj for obj in iter(conn.recv, 'STOP')] + self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e']) + + def test_thread_safety(self): + # bpo-24484: _run_finalizers() should be thread-safe + def cb(): + pass + + class Foo(object): + def __init__(self): + self.ref = self # create reference cycle + # insert finalizer at random key + util.Finalize(self, cb, exitpriority=random.randint(1, 100)) + + finish = False + exc = None + + def run_finalizers(): + nonlocal exc + while not finish: + time.sleep(random.random() * 1e-1) + try: + # A GC run will eventually happen during this, + # collecting stale Foo's and mutating the registry + util._run_finalizers() + except Exception as e: + exc = e + + def make_finalizers(): + nonlocal exc + d = {} + while not finish: + try: + # Old Foo's get gradually replaced and later + # collected by the GC (because of the cyclic ref) + d[random.getrandbits(5)] = {Foo() for i in range(10)} + except Exception as e: + exc = e + d.clear() + + old_interval = sys.getswitchinterval() + old_threshold = gc.get_threshold() + try: + sys.setswitchinterval(1e-6) + gc.set_threshold(5, 5, 5) + threads = [threading.Thread(target=run_finalizers), + threading.Thread(target=make_finalizers)] + with threading_helper.start_threads(threads): + time.sleep(4.0) # Wait a bit to trigger race condition + finish = True + if exc is not None: + raise exc + finally: + sys.setswitchinterval(old_interval) + gc.set_threshold(*old_threshold) + gc.collect() # Collect remaining Foo's + + +# +# Test that from ... import * works for each module +# + +class _TestImportStar(unittest.TestCase): + + def get_module_names(self): + import glob + folder = os.path.dirname(multiprocessing.__file__) + pattern = os.path.join(glob.escape(folder), '*.py') + files = glob.glob(pattern) + modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files] + modules = ['multiprocess.' + m for m in modules] + modules.remove('multiprocess.__init__') + modules.append('multiprocess') + return modules + + def test_import(self): + modules = self.get_module_names() + if sys.platform == 'win32': + modules.remove('multiprocess.popen_fork') + modules.remove('multiprocess.popen_forkserver') + modules.remove('multiprocess.popen_spawn_posix') + else: + modules.remove('multiprocess.popen_spawn_win32') + if not HAS_REDUCTION: + modules.remove('multiprocess.popen_forkserver') + + if c_int is None: + # This module requires _ctypes + modules.remove('multiprocess.sharedctypes') + + for name in modules: + __import__(name) + mod = sys.modules[name] + self.assertTrue(hasattr(mod, '__all__'), name) + + for attr in mod.__all__: + self.assertTrue( + hasattr(mod, attr), + '%r does not have attribute %r' % (mod, attr) + ) + +# +# Quick test that logging works -- does not test logging output +# + +class _TestLogging(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + def test_enable_logging(self): + logger = multiprocessing.get_logger() + logger.setLevel(util.SUBWARNING) + self.assertTrue(logger is not None) + logger.debug('this will not be printed') + logger.info('nor will this') + logger.setLevel(LOG_LEVEL) + + @classmethod + def _test_level(cls, conn): + logger = multiprocessing.get_logger() + conn.send(logger.getEffectiveLevel()) + + def test_level(self): + LEVEL1 = 32 + LEVEL2 = 37 + + logger = multiprocessing.get_logger() + root_logger = logging.getLogger() + root_level = root_logger.level + + reader, writer = multiprocessing.Pipe(duplex=False) + + logger.setLevel(LEVEL1) + p = self.Process(target=self._test_level, args=(writer,)) + p.start() + self.assertEqual(LEVEL1, reader.recv()) + p.join() + p.close() + + logger.setLevel(logging.NOTSET) + root_logger.setLevel(LEVEL2) + p = self.Process(target=self._test_level, args=(writer,)) + p.start() + self.assertEqual(LEVEL2, reader.recv()) + p.join() + p.close() + + root_logger.setLevel(root_level) + logger.setLevel(level=LOG_LEVEL) + + +# class _TestLoggingProcessName(BaseTestCase): +# +# def handle(self, record): +# assert record.processName == multiprocessing.current_process().name +# self.__handled = True +# +# def test_logging(self): +# handler = logging.Handler() +# handler.handle = self.handle +# self.__handled = False +# # Bypass getLogger() and side-effects +# logger = logging.getLoggerClass()( +# 'multiprocessing.test.TestLoggingProcessName') +# logger.addHandler(handler) +# logger.propagate = False +# +# logger.warn('foo') +# assert self.__handled + +# +# Check that Process.join() retries if os.waitpid() fails with EINTR +# + +class _TestPollEintr(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + @classmethod + def _killer(cls, pid): + time.sleep(0.1) + os.kill(pid, signal.SIGUSR1) + + @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') + def test_poll_eintr(self): + got_signal = [False] + def record(*args): + got_signal[0] = True + pid = os.getpid() + oldhandler = signal.signal(signal.SIGUSR1, record) + try: + killer = self.Process(target=self._killer, args=(pid,)) + killer.start() + try: + p = self.Process(target=time.sleep, args=(2,)) + p.start() + p.join() + finally: + killer.join() + self.assertTrue(got_signal[0]) + self.assertEqual(p.exitcode, 0) + finally: + signal.signal(signal.SIGUSR1, oldhandler) + +# +# Test to verify handle verification, see issue 3321 +# + +class TestInvalidHandle(unittest.TestCase): + + @unittest.skipIf(WIN32, "skipped on Windows") + def test_invalid_handles(self): + conn = multiprocessing.connection.Connection(44977608) + # check that poll() doesn't crash + try: + conn.poll() + except (ValueError, OSError): + pass + finally: + # Hack private attribute _handle to avoid printing an error + # in conn.__del__ + conn._handle = None + self.assertRaises((ValueError, OSError), + multiprocessing.connection.Connection, -1) + + + +@hashlib_helper.requires_hashdigest('md5') +class OtherTest(unittest.TestCase): + # TODO: add more tests for deliver/answer challenge. + def test_deliver_challenge_auth_failure(self): + class _FakeConnection(object): + def recv_bytes(self, size): + return b'something bogus' + def send_bytes(self, data): + pass + self.assertRaises(multiprocessing.AuthenticationError, + multiprocessing.connection.deliver_challenge, + _FakeConnection(), b'abc') + + def test_answer_challenge_auth_failure(self): + class _FakeConnection(object): + def __init__(self): + self.count = 0 + def recv_bytes(self, size): + self.count += 1 + if self.count == 1: + return multiprocessing.connection.CHALLENGE + elif self.count == 2: + return b'something bogus' + return b'' + def send_bytes(self, data): + pass + self.assertRaises(multiprocessing.AuthenticationError, + multiprocessing.connection.answer_challenge, + _FakeConnection(), b'abc') + +# +# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585 +# + +def initializer(ns): + ns.test += 1 + +@hashlib_helper.requires_hashdigest('md5') +class TestInitializers(unittest.TestCase): + def setUp(self): + self.mgr = multiprocessing.Manager() + self.ns = self.mgr.Namespace() + self.ns.test = 0 + + def tearDown(self): + self.mgr.shutdown() + self.mgr.join() + + def test_manager_initializer(self): + m = multiprocessing.managers.SyncManager() + self.assertRaises(TypeError, m.start, 1) + m.start(initializer, (self.ns,)) + self.assertEqual(self.ns.test, 1) + m.shutdown() + m.join() + + def test_pool_initializer(self): + self.assertRaises(TypeError, multiprocessing.Pool, initializer=1) + p = multiprocessing.Pool(1, initializer, (self.ns,)) + p.close() + p.join() + self.assertEqual(self.ns.test, 1) + +# +# Issue 5155, 5313, 5331: Test process in processes +# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior +# + +def _this_sub_process(q): + try: + item = q.get(block=False) + except pyqueue.Empty: + pass + +def _test_process(): + queue = multiprocessing.Queue() + subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,)) + subProc.daemon = True + subProc.start() + subProc.join() + +def _afunc(x): + return x*x + +def pool_in_process(): + pool = multiprocessing.Pool(processes=4) + x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7]) + pool.close() + pool.join() + +class _file_like(object): + def __init__(self, delegate): + self._delegate = delegate + self._pid = None + + @property + def cache(self): + pid = os.getpid() + # There are no race conditions since fork keeps only the running thread + if pid != self._pid: + self._pid = pid + self._cache = [] + return self._cache + + def write(self, data): + self.cache.append(data) + + def flush(self): + self._delegate.write(''.join(self.cache)) + self._cache = [] + +class TestStdinBadfiledescriptor(unittest.TestCase): + + def test_queue_in_process(self): + proc = multiprocessing.Process(target=_test_process) + proc.start() + proc.join() + + def test_pool_in_process(self): + p = multiprocessing.Process(target=pool_in_process) + p.start() + p.join() + + def test_flushing(self): + sio = io.StringIO() + flike = _file_like(sio) + flike.write('foo') + proc = multiprocessing.Process(target=lambda: flike.flush()) + flike.flush() + assert sio.getvalue() == 'foo' + + +class TestWait(unittest.TestCase): + + @classmethod + def _child_test_wait(cls, w, slow): + for i in range(10): + if slow: + time.sleep(random.random()*0.1) + w.send((i, os.getpid())) + w.close() + + def test_wait(self, slow=False): + from multiprocess.connection import wait + readers = [] + procs = [] + messages = [] + + for i in range(4): + r, w = multiprocessing.Pipe(duplex=False) + p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow)) + p.daemon = True + p.start() + w.close() + readers.append(r) + procs.append(p) + self.addCleanup(p.join) + + while readers: + for r in wait(readers): + try: + msg = r.recv() + except EOFError: + readers.remove(r) + r.close() + else: + messages.append(msg) + + messages.sort() + expected = sorted((i, p.pid) for i in range(10) for p in procs) + self.assertEqual(messages, expected) + + @classmethod + def _child_test_wait_socket(cls, address, slow): + s = socket.socket() + s.connect(address) + for i in range(10): + if slow: + time.sleep(random.random()*0.1) + s.sendall(('%s\n' % i).encode('ascii')) + s.close() + + def test_wait_socket(self, slow=False): + from multiprocess.connection import wait + l = socket.create_server((socket_helper.HOST, 0)) + addr = l.getsockname() + readers = [] + procs = [] + dic = {} + + for i in range(4): + p = multiprocessing.Process(target=self._child_test_wait_socket, + args=(addr, slow)) + p.daemon = True + p.start() + procs.append(p) + self.addCleanup(p.join) + + for i in range(4): + r, _ = l.accept() + readers.append(r) + dic[r] = [] + l.close() + + while readers: + for r in wait(readers): + msg = r.recv(32) + if not msg: + readers.remove(r) + r.close() + else: + dic[r].append(msg) + + expected = ''.join('%s\n' % i for i in range(10)).encode('ascii') + for v in dic.values(): + self.assertEqual(b''.join(v), expected) + + def test_wait_slow(self): + self.test_wait(True) + + def test_wait_socket_slow(self): + self.test_wait_socket(True) + + def test_wait_timeout(self): + from multiprocess.connection import wait + + expected = 5 + a, b = multiprocessing.Pipe() + + start = time.monotonic() + res = wait([a, b], expected) + delta = time.monotonic() - start + + self.assertEqual(res, []) + self.assertLess(delta, expected * 2) + self.assertGreater(delta, expected * 0.5) + + b.send(None) + + start = time.monotonic() + res = wait([a, b], 20) + delta = time.monotonic() - start + + self.assertEqual(res, [a]) + self.assertLess(delta, 0.4) + + @classmethod + def signal_and_sleep(cls, sem, period): + sem.release() + time.sleep(period) + + def test_wait_integer(self): + from multiprocess.connection import wait + + expected = 3 + sorted_ = lambda l: sorted(l, key=lambda x: id(x)) + sem = multiprocessing.Semaphore(0) + a, b = multiprocessing.Pipe() + p = multiprocessing.Process(target=self.signal_and_sleep, + args=(sem, expected)) + + p.start() + self.assertIsInstance(p.sentinel, int) + self.assertTrue(sem.acquire(timeout=20)) + + start = time.monotonic() + res = wait([a, p.sentinel, b], expected + 20) + delta = time.monotonic() - start + + self.assertEqual(res, [p.sentinel]) + self.assertLess(delta, expected + 2) + self.assertGreater(delta, expected - 2) + + a.send(None) + + start = time.monotonic() + res = wait([a, p.sentinel, b], 20) + delta = time.monotonic() - start + + self.assertEqual(sorted_(res), sorted_([p.sentinel, b])) + self.assertLess(delta, 0.4) + + b.send(None) + + start = time.monotonic() + res = wait([a, p.sentinel, b], 20) + delta = time.monotonic() - start + + self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b])) + self.assertLess(delta, 0.4) + + p.terminate() + p.join() + + def test_neg_timeout(self): + from multiprocess.connection import wait + a, b = multiprocessing.Pipe() + t = time.monotonic() + res = wait([a], timeout=-1) + t = time.monotonic() - t + self.assertEqual(res, []) + self.assertLess(t, 1) + a.close() + b.close() + +# +# Issue 14151: Test invalid family on invalid environment +# + +class TestInvalidFamily(unittest.TestCase): + + @unittest.skipIf(WIN32, "skipped on Windows") + def test_invalid_family(self): + with self.assertRaises(ValueError): + multiprocessing.connection.Listener(r'\\.\test') + + @unittest.skipUnless(WIN32, "skipped on non-Windows platforms") + def test_invalid_family_win32(self): + with self.assertRaises(ValueError): + multiprocessing.connection.Listener('/var/test.pipe') + +# +# Issue 12098: check sys.flags of child matches that for parent +# + +class TestFlags(unittest.TestCase): + @classmethod + def run_in_grandchild(cls, conn): + conn.send(tuple(sys.flags)) + + @classmethod + def run_in_child(cls): + import json + r, w = multiprocessing.Pipe(duplex=False) + p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,)) + p.start() + grandchild_flags = r.recv() + p.join() + r.close() + w.close() + flags = (tuple(sys.flags), grandchild_flags) + print(json.dumps(flags)) + + def _test_flags(self): + import json + # start child process using unusual flags + prog = ('from multiprocess.tests import TestFlags; ' + + 'TestFlags.run_in_child()') + data = subprocess.check_output( + [sys.executable, '-E', '-S', '-O', '-c', prog]) + child_flags, grandchild_flags = json.loads(data.decode('ascii')) + self.assertEqual(child_flags, grandchild_flags) + +# +# Test interaction with socket timeouts - see Issue #6056 +# + +class TestTimeouts(unittest.TestCase): + @classmethod + def _test_timeout(cls, child, address): + time.sleep(1) + child.send(123) + child.close() + conn = multiprocessing.connection.Client(address) + conn.send(456) + conn.close() + + def test_timeout(self): + old_timeout = socket.getdefaulttimeout() + try: + socket.setdefaulttimeout(0.1) + parent, child = multiprocessing.Pipe(duplex=True) + l = multiprocessing.connection.Listener(family='AF_INET') + p = multiprocessing.Process(target=self._test_timeout, + args=(child, l.address)) + p.start() + child.close() + self.assertEqual(parent.recv(), 123) + parent.close() + conn = l.accept() + self.assertEqual(conn.recv(), 456) + conn.close() + l.close() + join_process(p) + finally: + socket.setdefaulttimeout(old_timeout) + +# +# Test what happens with no "if __name__ == '__main__'" +# + +class TestNoForkBomb(unittest.TestCase): + def test_noforkbomb(self): + sm = multiprocessing.get_start_method() + name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py') + if sm != 'fork': + rc, out, err = test.support.script_helper.assert_python_failure(name, sm) + self.assertEqual(out, b'') + self.assertIn(b'RuntimeError', err) + else: + rc, out, err = test.support.script_helper.assert_python_ok(name, sm, **ENV) + self.assertEqual(out.rstrip(), b'123') + self.assertEqual(err, b'') + +# +# Issue #17555: ForkAwareThreadLock +# + +class TestForkAwareThreadLock(unittest.TestCase): + # We recursively start processes. Issue #17555 meant that the + # after fork registry would get duplicate entries for the same + # lock. The size of the registry at generation n was ~2**n. + + @classmethod + def child(cls, n, conn): + if n > 1: + p = multiprocessing.Process(target=cls.child, args=(n-1, conn)) + p.start() + conn.close() + join_process(p) + else: + conn.send(len(util._afterfork_registry)) + conn.close() + + def test_lock(self): + r, w = multiprocessing.Pipe(False) + l = util.ForkAwareThreadLock() + old_size = len(util._afterfork_registry) + p = multiprocessing.Process(target=self.child, args=(5, w)) + p.start() + w.close() + new_size = r.recv() + join_process(p) + self.assertLessEqual(new_size, old_size) + +# +# Check that non-forked child processes do not inherit unneeded fds/handles +# + +class TestCloseFds(unittest.TestCase): + + def get_high_socket_fd(self): + if WIN32: + # The child process will not have any socket handles, so + # calling socket.fromfd() should produce WSAENOTSOCK even + # if there is a handle of the same number. + return socket.socket().detach() + else: + # We want to produce a socket with an fd high enough that a + # freshly created child process will not have any fds as high. + fd = socket.socket().detach() + to_close = [] + while fd < 50: + to_close.append(fd) + fd = os.dup(fd) + for x in to_close: + os.close(x) + return fd + + def close(self, fd): + if WIN32: + socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close() + else: + os.close(fd) + + @classmethod + def _test_closefds(cls, conn, fd): + try: + s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM) + except Exception as e: + conn.send(e) + else: + s.close() + conn.send(None) + + def test_closefd(self): + if not HAS_REDUCTION: + raise unittest.SkipTest('requires fd pickling') + + reader, writer = multiprocessing.Pipe() + fd = self.get_high_socket_fd() + try: + p = multiprocessing.Process(target=self._test_closefds, + args=(writer, fd)) + p.start() + writer.close() + e = reader.recv() + join_process(p) + finally: + self.close(fd) + writer.close() + reader.close() + + if multiprocessing.get_start_method() == 'fork': + self.assertIs(e, None) + else: + WSAENOTSOCK = 10038 + self.assertIsInstance(e, OSError) + self.assertTrue(e.errno == errno.EBADF or + e.winerror == WSAENOTSOCK, e) + +# +# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc +# + +class TestIgnoreEINTR(unittest.TestCase): + + # Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block + CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE) + + @classmethod + def _test_ignore(cls, conn): + def handler(signum, frame): + pass + signal.signal(signal.SIGUSR1, handler) + conn.send('ready') + x = conn.recv() + conn.send(x) + conn.send_bytes(b'x' * cls.CONN_MAX_SIZE) + + @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') + def test_ignore(self): + conn, child_conn = multiprocessing.Pipe() + try: + p = multiprocessing.Process(target=self._test_ignore, + args=(child_conn,)) + p.daemon = True + p.start() + child_conn.close() + self.assertEqual(conn.recv(), 'ready') + time.sleep(0.1) + os.kill(p.pid, signal.SIGUSR1) + time.sleep(0.1) + conn.send(1234) + self.assertEqual(conn.recv(), 1234) + time.sleep(0.1) + os.kill(p.pid, signal.SIGUSR1) + self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE) + time.sleep(0.1) + p.join() + finally: + conn.close() + + @classmethod + def _test_ignore_listener(cls, conn): + def handler(signum, frame): + pass + signal.signal(signal.SIGUSR1, handler) + with multiprocessing.connection.Listener() as l: + conn.send(l.address) + a = l.accept() + a.send('welcome') + + @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') + def test_ignore_listener(self): + conn, child_conn = multiprocessing.Pipe() + try: + p = multiprocessing.Process(target=self._test_ignore_listener, + args=(child_conn,)) + p.daemon = True + p.start() + child_conn.close() + address = conn.recv() + time.sleep(0.1) + os.kill(p.pid, signal.SIGUSR1) + time.sleep(0.1) + client = multiprocessing.connection.Client(address) + self.assertEqual(client.recv(), 'welcome') + p.join() + finally: + conn.close() + +class TestStartMethod(unittest.TestCase): + @classmethod + def _check_context(cls, conn): + conn.send(multiprocessing.get_start_method()) + + def check_context(self, ctx): + r, w = ctx.Pipe(duplex=False) + p = ctx.Process(target=self._check_context, args=(w,)) + p.start() + w.close() + child_method = r.recv() + r.close() + p.join() + self.assertEqual(child_method, ctx.get_start_method()) + + def test_context(self): + for method in ('fork', 'spawn', 'forkserver'): + try: + ctx = multiprocessing.get_context(method) + except ValueError: + continue + self.assertEqual(ctx.get_start_method(), method) + self.assertIs(ctx.get_context(), ctx) + self.assertRaises(ValueError, ctx.set_start_method, 'spawn') + self.assertRaises(ValueError, ctx.set_start_method, None) + self.check_context(ctx) + + def test_set_get(self): + multiprocessing.set_forkserver_preload(PRELOAD) + count = 0 + old_method = multiprocessing.get_start_method() + try: + for method in ('fork', 'spawn', 'forkserver'): + try: + multiprocessing.set_start_method(method, force=True) + except ValueError: + continue + self.assertEqual(multiprocessing.get_start_method(), method) + ctx = multiprocessing.get_context() + self.assertEqual(ctx.get_start_method(), method) + self.assertTrue(type(ctx).__name__.lower().startswith(method)) + self.assertTrue( + ctx.Process.__name__.lower().startswith(method)) + self.check_context(multiprocessing) + count += 1 + finally: + multiprocessing.set_start_method(old_method, force=True) + self.assertGreaterEqual(count, 1) + + def test_get_all(self): + methods = multiprocessing.get_all_start_methods() + if sys.platform == 'win32': + self.assertEqual(methods, ['spawn']) + else: + self.assertTrue(methods == ['fork', 'spawn'] or + methods == ['spawn', 'fork'] or + methods == ['fork', 'spawn', 'forkserver'] or + methods == ['spawn', 'fork', 'forkserver']) + + def test_preload_resources(self): + if multiprocessing.get_start_method() != 'forkserver': + self.skipTest("test only relevant for 'forkserver' method") + name = os.path.join(os.path.dirname(__file__), 'mp_preload.py') + rc, out, err = test.support.script_helper.assert_python_ok(name, **ENV) + out = out.decode() + err = err.decode() + if out.rstrip() != 'ok' or err != '': + print(out) + print(err) + self.fail("failed spawning forkserver or grandchild") + + +@unittest.skipIf(sys.platform == "win32", + "test semantics don't make sense on Windows") +class TestResourceTracker(unittest.TestCase): + + def _test_resource_tracker(self): + # + # Check that killing process does not leak named semaphores + # + cmd = '''if 1: + import time, os, tempfile + import multiprocess as mp + from multiprocess import resource_tracker + from multiprocess.shared_memory import SharedMemory + + mp.set_start_method("spawn") + rand = tempfile._RandomNameSequence() + + + def create_and_register_resource(rtype): + if rtype == "semaphore": + lock = mp.Lock() + return lock, lock._semlock.name + elif rtype == "shared_memory": + sm = SharedMemory(create=True, size=10) + return sm, sm._name + else: + raise ValueError( + "Resource type {{}} not understood".format(rtype)) + + + resource1, rname1 = create_and_register_resource("{rtype}") + resource2, rname2 = create_and_register_resource("{rtype}") + + os.write({w}, rname1.encode("ascii") + b"\\n") + os.write({w}, rname2.encode("ascii") + b"\\n") + + time.sleep(10) + ''' + for rtype in resource_tracker._CLEANUP_FUNCS: + with self.subTest(rtype=rtype): + if rtype == "noop": + # Artefact resource type used by the resource_tracker + continue + r, w = os.pipe() + p = subprocess.Popen([sys.executable, + '-E', '-c', cmd.format(w=w, rtype=rtype)], + pass_fds=[w], + stderr=subprocess.PIPE) + os.close(w) + with open(r, 'rb', closefd=True) as f: + name1 = f.readline().rstrip().decode('ascii') + name2 = f.readline().rstrip().decode('ascii') + _resource_unlink(name1, rtype) + p.terminate() + p.wait() + + deadline = time.monotonic() + support.LONG_TIMEOUT + while time.monotonic() < deadline: + time.sleep(.5) + try: + _resource_unlink(name2, rtype) + except OSError as e: + # docs say it should be ENOENT, but OSX seems to give + # EINVAL + self.assertIn(e.errno, (errno.ENOENT, errno.EINVAL)) + break + else: + raise AssertionError( + f"A {rtype} resource was leaked after a process was " + f"abruptly terminated.") + err = p.stderr.read().decode('utf-8') + p.stderr.close() + expected = ('resource_tracker: There appear to be 2 leaked {} ' + 'objects'.format( + rtype)) + self.assertRegex(err, expected) + self.assertRegex(err, r'resource_tracker: %r: \[Errno' % name1) + + def check_resource_tracker_death(self, signum, should_die): + # bpo-31310: if the semaphore tracker process has died, it should + # be restarted implicitly. + from multiprocess.resource_tracker import _resource_tracker + pid = _resource_tracker._pid + if pid is not None: + os.kill(pid, signal.SIGKILL) + support.wait_process(pid, exitcode=-signal.SIGKILL) + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + _resource_tracker.ensure_running() + pid = _resource_tracker._pid + + os.kill(pid, signum) + time.sleep(1.0) # give it time to die + + ctx = multiprocessing.get_context("spawn") + with warnings.catch_warnings(record=True) as all_warn: + warnings.simplefilter("always") + sem = ctx.Semaphore() + sem.acquire() + sem.release() + wr = weakref.ref(sem) + # ensure `sem` gets collected, which triggers communication with + # the semaphore tracker + del sem + gc.collect() + self.assertIsNone(wr()) + if should_die: + self.assertEqual(len(all_warn), 1) + the_warn = all_warn[0] + self.assertTrue(issubclass(the_warn.category, UserWarning)) + self.assertTrue("resource_tracker: process died" + in str(the_warn.message)) + else: + self.assertEqual(len(all_warn), 0) + + def test_resource_tracker_sigint(self): + # Catchable signal (ignored by semaphore tracker) + self.check_resource_tracker_death(signal.SIGINT, False) + + def test_resource_tracker_sigterm(self): + # Catchable signal (ignored by semaphore tracker) + self.check_resource_tracker_death(signal.SIGTERM, False) + + def test_resource_tracker_sigkill(self): + # Uncatchable signal. + self.check_resource_tracker_death(signal.SIGKILL, True) + + @staticmethod + def _is_resource_tracker_reused(conn, pid): + from multiprocess.resource_tracker import _resource_tracker + _resource_tracker.ensure_running() + # The pid should be None in the child process, expect for the fork + # context. It should not be a new value. + reused = _resource_tracker._pid in (None, pid) + reused &= _resource_tracker._check_alive() + conn.send(reused) + + def test_resource_tracker_reused(self): + from multiprocess.resource_tracker import _resource_tracker + _resource_tracker.ensure_running() + pid = _resource_tracker._pid + + r, w = multiprocessing.Pipe(duplex=False) + p = multiprocessing.Process(target=self._is_resource_tracker_reused, + args=(w, pid)) + p.start() + is_resource_tracker_reused = r.recv() + + # Clean up + p.join() + w.close() + r.close() + + self.assertTrue(is_resource_tracker_reused) + + def test_too_long_name_resource(self): + # gh-96819: Resource names that will make the length of a write to a pipe + # greater than PIPE_BUF are not allowed + rtype = "shared_memory" + too_long_name_resource = "a" * (512 - len(rtype)) + with self.assertRaises(ValueError): + resource_tracker.register(too_long_name_resource, rtype) + + +class TestSimpleQueue(unittest.TestCase): + + @classmethod + def _test_empty(cls, queue, child_can_start, parent_can_continue): + child_can_start.wait() + # issue 30301, could fail under spawn and forkserver + try: + queue.put(queue.empty()) + queue.put(queue.empty()) + finally: + parent_can_continue.set() + + def test_empty(self): + queue = multiprocessing.SimpleQueue() + child_can_start = multiprocessing.Event() + parent_can_continue = multiprocessing.Event() + + proc = multiprocessing.Process( + target=self._test_empty, + args=(queue, child_can_start, parent_can_continue) + ) + proc.daemon = True + proc.start() + + self.assertTrue(queue.empty()) + + child_can_start.set() + parent_can_continue.wait() + + self.assertFalse(queue.empty()) + self.assertEqual(queue.get(), True) + self.assertEqual(queue.get(), False) + self.assertTrue(queue.empty()) + + proc.join() + + def test_close(self): + queue = multiprocessing.SimpleQueue() + queue.close() + # closing a queue twice should not fail + queue.close() + + # Test specific to CPython since it tests private attributes + @test.support.cpython_only + def test_closed(self): + queue = multiprocessing.SimpleQueue() + queue.close() + self.assertTrue(queue._reader.closed) + self.assertTrue(queue._writer.closed) + + +class TestPoolNotLeakOnFailure(unittest.TestCase): + + def test_release_unused_processes(self): + # Issue #19675: During pool creation, if we can't create a process, + # don't leak already created ones. + will_fail_in = 3 + forked_processes = [] + + class FailingForkProcess: + def __init__(self, **kwargs): + self.name = 'Fake Process' + self.exitcode = None + self.state = None + forked_processes.append(self) + + def start(self): + nonlocal will_fail_in + if will_fail_in <= 0: + raise OSError("Manually induced OSError") + will_fail_in -= 1 + self.state = 'started' + + def terminate(self): + self.state = 'stopping' + + def join(self): + if self.state == 'stopping': + self.state = 'stopped' + + def is_alive(self): + return self.state == 'started' or self.state == 'stopping' + + with self.assertRaisesRegex(OSError, 'Manually induced OSError'): + p = multiprocessing.pool.Pool(5, context=unittest.mock.MagicMock( + Process=FailingForkProcess)) + p.close() + p.join() + self.assertFalse( + any(process.is_alive() for process in forked_processes)) + + +@hashlib_helper.requires_hashdigest('md5') +class TestSyncManagerTypes(unittest.TestCase): + """Test all the types which can be shared between a parent and a + child process by using a manager which acts as an intermediary + between them. + + In the following unit-tests the base type is created in the parent + process, the @classmethod represents the worker process and the + shared object is readable and editable between the two. + + # The child. + @classmethod + def _test_list(cls, obj): + assert obj[0] == 5 + assert obj.append(6) + + # The parent. + def test_list(self): + o = self.manager.list() + o.append(5) + self.run_worker(self._test_list, o) + assert o[1] == 6 + """ + manager_class = multiprocessing.managers.SyncManager + + def setUp(self): + self.manager = self.manager_class() + self.manager.start() + self.proc = None + + def tearDown(self): + if self.proc is not None and self.proc.is_alive(): + self.proc.terminate() + self.proc.join() + self.manager.shutdown() + self.manager = None + self.proc = None + + @classmethod + def setUpClass(cls): + support.reap_children() + + tearDownClass = setUpClass + + def wait_proc_exit(self): + # Only the manager process should be returned by active_children() + # but this can take a bit on slow machines, so wait a few seconds + # if there are other children too (see #17395). + join_process(self.proc) + start_time = time.monotonic() + t = 0.01 + while len(multiprocessing.active_children()) > 1: + time.sleep(t) + t *= 2 + dt = time.monotonic() - start_time + if dt >= 5.0: + test.support.environment_altered = True + support.print_warning(f"multiprocess.Manager still has " + f"{multiprocessing.active_children()} " + f"active children after {dt} seconds") + break + + def run_worker(self, worker, obj): + self.proc = multiprocessing.Process(target=worker, args=(obj, )) + self.proc.daemon = True + self.proc.start() + self.wait_proc_exit() + self.assertEqual(self.proc.exitcode, 0) + + @classmethod + def _test_event(cls, obj): + assert obj.is_set() + obj.wait() + obj.clear() + obj.wait(0.001) + + def test_event(self): + o = self.manager.Event() + o.set() + self.run_worker(self._test_event, o) + assert not o.is_set() + o.wait(0.001) + + @classmethod + def _test_lock(cls, obj): + obj.acquire() + + def test_lock(self, lname="Lock"): + o = getattr(self.manager, lname)() + self.run_worker(self._test_lock, o) + o.release() + self.assertRaises(RuntimeError, o.release) # already released + + @classmethod + def _test_rlock(cls, obj): + obj.acquire() + obj.release() + + def test_rlock(self, lname="Lock"): + o = getattr(self.manager, lname)() + self.run_worker(self._test_rlock, o) + + @classmethod + def _test_semaphore(cls, obj): + obj.acquire() + + def test_semaphore(self, sname="Semaphore"): + o = getattr(self.manager, sname)() + self.run_worker(self._test_semaphore, o) + o.release() + + def test_bounded_semaphore(self): + self.test_semaphore(sname="BoundedSemaphore") + + @classmethod + def _test_condition(cls, obj): + obj.acquire() + obj.release() + + def test_condition(self): + o = self.manager.Condition() + self.run_worker(self._test_condition, o) + + @classmethod + def _test_barrier(cls, obj): + assert obj.parties == 5 + obj.reset() + + def test_barrier(self): + o = self.manager.Barrier(5) + self.run_worker(self._test_barrier, o) + + @classmethod + def _test_pool(cls, obj): + # TODO: fix https://bugs.python.org/issue35919 + with obj: + pass + + def test_pool(self): + o = self.manager.Pool(processes=4) + self.run_worker(self._test_pool, o) + + @classmethod + def _test_queue(cls, obj): + assert obj.qsize() == 2 + assert obj.full() + assert not obj.empty() + assert obj.get() == 5 + assert not obj.empty() + assert obj.get() == 6 + assert obj.empty() + + def test_queue(self, qname="Queue"): + o = getattr(self.manager, qname)(2) + o.put(5) + o.put(6) + self.run_worker(self._test_queue, o) + assert o.empty() + assert not o.full() + + def test_joinable_queue(self): + self.test_queue("JoinableQueue") + + @classmethod + def _test_list(cls, obj): + assert obj[0] == 5 + assert obj.count(5) == 1 + assert obj.index(5) == 0 + obj.sort() + obj.reverse() + for x in obj: + pass + assert len(obj) == 1 + assert obj.pop(0) == 5 + + def test_list(self): + o = self.manager.list() + o.append(5) + self.run_worker(self._test_list, o) + assert not o + self.assertEqual(len(o), 0) + + @classmethod + def _test_dict(cls, obj): + assert len(obj) == 1 + assert obj['foo'] == 5 + assert obj.get('foo') == 5 + assert list(obj.items()) == [('foo', 5)] + assert list(obj.keys()) == ['foo'] + assert list(obj.values()) == [5] + assert obj.copy() == {'foo': 5} + assert obj.popitem() == ('foo', 5) + + def test_dict(self): + o = self.manager.dict() + o['foo'] = 5 + self.run_worker(self._test_dict, o) + assert not o + self.assertEqual(len(o), 0) + + @classmethod + def _test_value(cls, obj): + assert obj.value == 1 + assert obj.get() == 1 + obj.set(2) + + def test_value(self): + o = self.manager.Value('i', 1) + self.run_worker(self._test_value, o) + self.assertEqual(o.value, 2) + self.assertEqual(o.get(), 2) + + @classmethod + def _test_array(cls, obj): + assert obj[0] == 0 + assert obj[1] == 1 + assert len(obj) == 2 + assert list(obj) == [0, 1] + + def test_array(self): + o = self.manager.Array('i', [0, 1]) + self.run_worker(self._test_array, o) + + @classmethod + def _test_namespace(cls, obj): + assert obj.x == 0 + assert obj.y == 1 + + def test_namespace(self): + o = self.manager.Namespace() + o.x = 0 + o.y = 1 + self.run_worker(self._test_namespace, o) + + +class TestNamedResource(unittest.TestCase): + @unittest.skipIf(sys.hexversion <= 0x30a05f0, "SemLock subclass") + def test_global_named_resource_spawn(self): + # + # gh-90549: Check that global named resources in main module + # will not leak by a subprocess, in spawn context. + # + testfn = os_helper.TESTFN + self.addCleanup(os_helper.unlink, testfn) + with open(testfn, 'w', encoding='utf-8') as f: + f.write(textwrap.dedent('''\ + import multiprocess as mp + + ctx = mp.get_context('spawn') + + global_resource = ctx.Semaphore() + + def submain(): pass + + if __name__ == '__main__': + p = ctx.Process(target=submain) + p.start() + p.join() + ''')) + rc, out, err = test.support.script_helper.assert_python_ok(testfn, **ENV) + # on error, err = 'UserWarning: resource_tracker: There appear to + # be 1 leaked semaphore objects to clean up at shutdown' + self.assertEqual(err, b'') + + +class MiscTestCase(unittest.TestCase): + def test__all__(self): + # Just make sure names in not_exported are excluded + support.check__all__(self, multiprocessing, extra=multiprocessing.__all__, + not_exported=['SUBDEBUG', 'SUBWARNING', + 'license', 'citation']) + + +# +# Mixins +# + +class BaseMixin(object): + @classmethod + def setUpClass(cls): + cls.dangling = (multiprocessing.process._dangling.copy(), + threading._dangling.copy()) + + @classmethod + def tearDownClass(cls): + # bpo-26762: Some multiprocessing objects like Pool create reference + # cycles. Trigger a garbage collection to break these cycles. + test.support.gc_collect() + + processes = set(multiprocessing.process._dangling) - set(cls.dangling[0]) + if processes: + test.support.environment_altered = True + support.print_warning(f'Dangling processes: {processes}') + processes = None + + threads = set(threading._dangling) - set(cls.dangling[1]) + if threads: + test.support.environment_altered = True + support.print_warning(f'Dangling threads: {threads}') + threads = None + + +class ProcessesMixin(BaseMixin): + TYPE = 'processes' + Process = multiprocessing.Process + connection = multiprocessing.connection + current_process = staticmethod(multiprocessing.current_process) + parent_process = staticmethod(multiprocessing.parent_process) + active_children = staticmethod(multiprocessing.active_children) + Pool = staticmethod(multiprocessing.Pool) + Pipe = staticmethod(multiprocessing.Pipe) + Queue = staticmethod(multiprocessing.Queue) + JoinableQueue = staticmethod(multiprocessing.JoinableQueue) + Lock = staticmethod(multiprocessing.Lock) + RLock = staticmethod(multiprocessing.RLock) + Semaphore = staticmethod(multiprocessing.Semaphore) + BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore) + Condition = staticmethod(multiprocessing.Condition) + Event = staticmethod(multiprocessing.Event) + Barrier = staticmethod(multiprocessing.Barrier) + Value = staticmethod(multiprocessing.Value) + Array = staticmethod(multiprocessing.Array) + RawValue = staticmethod(multiprocessing.RawValue) + RawArray = staticmethod(multiprocessing.RawArray) + + +class ManagerMixin(BaseMixin): + TYPE = 'manager' + Process = multiprocessing.Process + Queue = property(operator.attrgetter('manager.Queue')) + JoinableQueue = property(operator.attrgetter('manager.JoinableQueue')) + Lock = property(operator.attrgetter('manager.Lock')) + RLock = property(operator.attrgetter('manager.RLock')) + Semaphore = property(operator.attrgetter('manager.Semaphore')) + BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore')) + Condition = property(operator.attrgetter('manager.Condition')) + Event = property(operator.attrgetter('manager.Event')) + Barrier = property(operator.attrgetter('manager.Barrier')) + Value = property(operator.attrgetter('manager.Value')) + Array = property(operator.attrgetter('manager.Array')) + list = property(operator.attrgetter('manager.list')) + dict = property(operator.attrgetter('manager.dict')) + Namespace = property(operator.attrgetter('manager.Namespace')) + + @classmethod + def Pool(cls, *args, **kwds): + return cls.manager.Pool(*args, **kwds) + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.manager = multiprocessing.Manager() + + @classmethod + def tearDownClass(cls): + # only the manager process should be returned by active_children() + # but this can take a bit on slow machines, so wait a few seconds + # if there are other children too (see #17395) + start_time = time.monotonic() + t = 0.01 + while len(multiprocessing.active_children()) > 1: + time.sleep(t) + t *= 2 + dt = time.monotonic() - start_time + if dt >= 5.0: + test.support.environment_altered = True + support.print_warning(f"multiprocess.Manager still has " + f"{multiprocessing.active_children()} " + f"active children after {dt} seconds") + break + + gc.collect() # do garbage collection + if cls.manager._number_of_objects() != 0: + # This is not really an error since some tests do not + # ensure that all processes which hold a reference to a + # managed object have been joined. + test.support.environment_altered = True + support.print_warning('Shared objects which still exist ' + 'at manager shutdown:') + support.print_warning(cls.manager._debug_info()) + cls.manager.shutdown() + cls.manager.join() + cls.manager = None + + super().tearDownClass() + + +class ThreadsMixin(BaseMixin): + TYPE = 'threads' + Process = multiprocessing.dummy.Process + connection = multiprocessing.dummy.connection + current_process = staticmethod(multiprocessing.dummy.current_process) + active_children = staticmethod(multiprocessing.dummy.active_children) + Pool = staticmethod(multiprocessing.dummy.Pool) + Pipe = staticmethod(multiprocessing.dummy.Pipe) + Queue = staticmethod(multiprocessing.dummy.Queue) + JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue) + Lock = staticmethod(multiprocessing.dummy.Lock) + RLock = staticmethod(multiprocessing.dummy.RLock) + Semaphore = staticmethod(multiprocessing.dummy.Semaphore) + BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore) + Condition = staticmethod(multiprocessing.dummy.Condition) + Event = staticmethod(multiprocessing.dummy.Event) + Barrier = staticmethod(multiprocessing.dummy.Barrier) + Value = staticmethod(multiprocessing.dummy.Value) + Array = staticmethod(multiprocessing.dummy.Array) + +# +# Functions used to create test cases from the base ones in this module +# + +def install_tests_in_module_dict(remote_globs, start_method): + __module__ = remote_globs['__name__'] + local_globs = globals() + ALL_TYPES = {'processes', 'threads', 'manager'} + + for name, base in local_globs.items(): + if not isinstance(base, type): + continue + if issubclass(base, BaseTestCase): + if base is BaseTestCase: + continue + assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES + for type_ in base.ALLOWED_TYPES: + newname = 'With' + type_.capitalize() + name[1:] + Mixin = local_globs[type_.capitalize() + 'Mixin'] + class Temp(base, Mixin, unittest.TestCase): + pass + if type_ == 'manager': + Temp = hashlib_helper.requires_hashdigest('md5')(Temp) + Temp.__name__ = Temp.__qualname__ = newname + Temp.__module__ = __module__ + remote_globs[newname] = Temp + elif issubclass(base, unittest.TestCase): + class Temp(base, object): + pass + Temp.__name__ = Temp.__qualname__ = name + Temp.__module__ = __module__ + remote_globs[name] = Temp + + dangling = [None, None] + old_start_method = [None] + + def setUpModule(): + multiprocessing.set_forkserver_preload(PRELOAD) + multiprocessing.process._cleanup() + dangling[0] = multiprocessing.process._dangling.copy() + dangling[1] = threading._dangling.copy() + old_start_method[0] = multiprocessing.get_start_method(allow_none=True) + try: + multiprocessing.set_start_method(start_method, force=True) + except ValueError: + raise unittest.SkipTest(start_method + + ' start method not supported') + + if sys.platform.startswith("linux"): + try: + lock = multiprocessing.RLock() + except OSError: + raise unittest.SkipTest("OSError raises on RLock creation, " + "see issue 3111!") + check_enough_semaphores() + util.get_temp_dir() # creates temp directory + multiprocessing.get_logger().setLevel(LOG_LEVEL) + + def tearDownModule(): + need_sleep = False + + # bpo-26762: Some multiprocessing objects like Pool create reference + # cycles. Trigger a garbage collection to break these cycles. + test.support.gc_collect() + + multiprocessing.set_start_method(old_start_method[0], force=True) + # pause a bit so we don't get warning about dangling threads/processes + processes = set(multiprocessing.process._dangling) - set(dangling[0]) + if processes: + need_sleep = True + test.support.environment_altered = True + support.print_warning(f'Dangling processes: {processes}') + processes = None + + threads = set(threading._dangling) - set(dangling[1]) + if threads: + need_sleep = True + test.support.environment_altered = True + support.print_warning(f'Dangling threads: {threads}') + threads = None + + # Sleep 500 ms to give time to child processes to complete. + if need_sleep: + time.sleep(0.5) + + multiprocessing.util._cleanup_tests() + + remote_globs['setUpModule'] = setUpModule + remote_globs['tearDownModule'] = tearDownModule + + +@unittest.skipIf(not hasattr(_multiprocessing, 'SemLock'), 'SemLock not available') +@unittest.skipIf(sys.platform != "linux", "Linux only") +@unittest.skipIf(sys.hexversion <= 0x30a05f0, "SemLock subclass") +class SemLockTests(unittest.TestCase): + + def test_semlock_subclass(self): + class SemLock(_multiprocessing.SemLock): + pass + name = f'test_semlock_subclass-{os.getpid()}' + s = SemLock(1, 0, 10, name, False) + _multiprocessing.sem_unlink(name) diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__main__.py b/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..f0de127e06a38f895225e79aa06678dc8e58e731 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/__main__.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2018-2024 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE + +import glob +import os +import sys +import subprocess as sp +python = sys.executable +try: + import pox + python = pox.which_python(version=True) or python +except ImportError: + pass +shell = sys.platform[:3] == 'win' + +suite = os.path.dirname(__file__) or os.path.curdir +tests = glob.glob(suite + os.path.sep + 'test_*.py') +tests = glob.glob(suite + os.path.sep + '__init__.py') + \ + [i for i in tests if 'main' not in i] + + +if __name__ == '__main__': + + failed = 0 + for test in tests: + p = sp.Popen([python, test], shell=shell).wait() + if p: + failed = 1 + print('') + exit(failed) diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/mp_fork_bomb.py b/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/mp_fork_bomb.py new file mode 100644 index 0000000000000000000000000000000000000000..017e010ba0e6fd4372356e7c2bef5b0f23717c1a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/mp_fork_bomb.py @@ -0,0 +1,18 @@ +import multiprocessing, sys + +def foo(): + print("123") + +# Because "if __name__ == '__main__'" is missing this will not work +# correctly on Windows. However, we should get a RuntimeError rather +# than the Windows equivalent of a fork bomb. + +if len(sys.argv) > 1: + multiprocessing.set_start_method(sys.argv[1]) +else: + multiprocessing.set_start_method('spawn') + +p = multiprocessing.Process(target=foo) +p.start() +p.join() +sys.exit(p.exitcode) diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/mp_preload.py b/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/mp_preload.py new file mode 100644 index 0000000000000000000000000000000000000000..a5d10606e41616d9e18c4a474f674566b39199a0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/mp_preload.py @@ -0,0 +1,18 @@ +import multiprocessing + +multiprocessing.Lock() + + +def f(): + print("ok") + + +if __name__ == "__main__": + ctx = multiprocessing.get_context("forkserver") + modname = "multiprocess.tests.mp_preload" + # Make sure it's importable + __import__(modname) + ctx.set_forkserver_preload([modname]) + proc = ctx.Process(target=f) + proc.start() + proc.join() diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_forkserver.py b/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_forkserver.py new file mode 100644 index 0000000000000000000000000000000000000000..b3251208c2060ee3ccc3fc30f4904534e98058f2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_forkserver.py @@ -0,0 +1,16 @@ +import unittest +from multiprocess.tests import install_tests_in_module_dict + +import sys +from test import support + +if support.PGO: + raise unittest.SkipTest("test is not helpful for PGO") + +if sys.platform == "win32": + raise unittest.SkipTest("forkserver is not available on Windows") + +install_tests_in_module_dict(globals(), 'forkserver') + +if __name__ == '__main__': + unittest.main() diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_main_handling.py b/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_main_handling.py new file mode 100644 index 0000000000000000000000000000000000000000..ab88d2261b3c1fa6c2b7ae7065cbc270ac2be676 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_main_handling.py @@ -0,0 +1,303 @@ +# tests __main__ module handling in multiprocessing +from test import support +from test.support import import_helper +# Skip tests if _multiprocessing wasn't built. +import_helper.import_module('_multiprocessing') + +import importlib +import importlib.machinery +import unittest +import sys +import os +import os.path +import py_compile + +from test.support import os_helper +from test.support.script_helper import ( + make_pkg, make_script, make_zip_pkg, make_zip_script, + assert_python_ok) + +if support.PGO: + raise unittest.SkipTest("test is not helpful for PGO") + +# Look up which start methods are available to test +import multiprocess as multiprocessing +AVAILABLE_START_METHODS = set(multiprocessing.get_all_start_methods()) + +# Issue #22332: Skip tests if sem_open implementation is broken. +import_helper.import_module('multiprocess.synchronize') + +verbose = support.verbose + +test_source = """\ +# multiprocessing includes all sorts of shenanigans to make __main__ +# attributes accessible in the subprocess in a pickle compatible way. + +# We run the "doesn't work in the interactive interpreter" example from +# the docs to make sure it *does* work from an executed __main__, +# regardless of the invocation mechanism + +import sys +import time +sys.path.extend({0}) +from multiprocess import Pool, set_start_method + +# We use this __main__ defined function in the map call below in order to +# check that multiprocessing in correctly running the unguarded +# code in child processes and then making it available as __main__ +def f(x): + return x*x + +# Check explicit relative imports +if "check_sibling" in __file__: + # We're inside a package and not in a __main__.py file + # so make sure explicit relative imports work correctly + from . import sibling + +if __name__ == '__main__': + start_method = sys.argv[1] + set_start_method(start_method) + results = [] + with Pool(5) as pool: + pool.map_async(f, [1, 2, 3], callback=results.extend) + start_time = getattr(time,'monotonic',time.time)() + while not results: + time.sleep(0.05) + # up to 1 min to report the results + dt = getattr(time,'monotonic',time.time)() - start_time + if dt > 60.0: + raise RuntimeError("Timed out waiting for results (%.1f sec)" % dt) + + results.sort() + print(start_method, "->", results) + + pool.join() +""".format(sys.path) + +test_source_main_skipped_in_children = """\ +# __main__.py files have an implied "if __name__ == '__main__'" so +# multiprocessing should always skip running them in child processes + +# This means we can't use __main__ defined functions in child processes, +# so we just use "int" as a passthrough operation below + +if __name__ != "__main__": + raise RuntimeError("Should only be called as __main__!") + +import sys +import time +sys.path.extend({0}) +from multiprocess import Pool, set_start_method + +start_method = sys.argv[1] +set_start_method(start_method) +results = [] +with Pool(5) as pool: + pool.map_async(int, [1, 4, 9], callback=results.extend) + start_time = getattr(time,'monotonic',time.time)() + while not results: + time.sleep(0.05) + # up to 1 min to report the results + dt = getattr(time,'monotonic',time.time)() - start_time + if dt > 60.0: + raise RuntimeError("Timed out waiting for results (%.1f sec)" % dt) + +results.sort() +print(start_method, "->", results) + +pool.join() +""".format(sys.path) + +# These helpers were copied from test_cmd_line_script & tweaked a bit... + +def _make_test_script(script_dir, script_basename, + source=test_source, omit_suffix=False): + to_return = make_script(script_dir, script_basename, + source, omit_suffix) + # Hack to check explicit relative imports + if script_basename == "check_sibling": + make_script(script_dir, "sibling", "") + importlib.invalidate_caches() + return to_return + +def _make_test_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename, + source=test_source, depth=1): + to_return = make_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename, + source, depth) + importlib.invalidate_caches() + return to_return + +# There's no easy way to pass the script directory in to get +# -m to work (avoiding that is the whole point of making +# directories and zipfiles executable!) +# So we fake it for testing purposes with a custom launch script +launch_source = """\ +import sys, os.path, runpy +sys.path.insert(0, %s) +runpy._run_module_as_main(%r) +""" + +def _make_launch_script(script_dir, script_basename, module_name, path=None): + if path is None: + path = "os.path.dirname(__file__)" + else: + path = repr(path) + source = launch_source % (path, module_name) + to_return = make_script(script_dir, script_basename, source) + importlib.invalidate_caches() + return to_return + +class MultiProcessingCmdLineMixin(): + maxDiff = None # Show full tracebacks on subprocess failure + + def setUp(self): + if self.start_method not in AVAILABLE_START_METHODS: + self.skipTest("%r start method not available" % self.start_method) + + def _check_output(self, script_name, exit_code, out, err): + if verbose > 1: + print("Output from test script %r:" % script_name) + print(repr(out)) + self.assertEqual(exit_code, 0) + self.assertEqual(err.decode('utf-8'), '') + expected_results = "%s -> [1, 4, 9]" % self.start_method + self.assertEqual(out.decode('utf-8').strip(), expected_results) + + def _check_script(self, script_name, *cmd_line_switches): + if not __debug__: + cmd_line_switches += ('-' + 'O' * sys.flags.optimize,) + run_args = cmd_line_switches + (script_name, self.start_method) + rc, out, err = assert_python_ok(*run_args, __isolated=False) + self._check_output(script_name, rc, out, err) + + def test_basic_script(self): + with os_helper.temp_dir() as script_dir: + script_name = _make_test_script(script_dir, 'script') + self._check_script(script_name) + + def test_basic_script_no_suffix(self): + with os_helper.temp_dir() as script_dir: + script_name = _make_test_script(script_dir, 'script', + omit_suffix=True) + self._check_script(script_name) + + def test_ipython_workaround(self): + # Some versions of the IPython launch script are missing the + # __name__ = "__main__" guard, and multiprocessing has long had + # a workaround for that case + # See https://github.com/ipython/ipython/issues/4698 + source = test_source_main_skipped_in_children + with os_helper.temp_dir() as script_dir: + script_name = _make_test_script(script_dir, 'ipython', + source=source) + self._check_script(script_name) + script_no_suffix = _make_test_script(script_dir, 'ipython', + source=source, + omit_suffix=True) + self._check_script(script_no_suffix) + + def test_script_compiled(self): + with os_helper.temp_dir() as script_dir: + script_name = _make_test_script(script_dir, 'script') + py_compile.compile(script_name, doraise=True) + os.remove(script_name) + pyc_file = import_helper.make_legacy_pyc(script_name) + self._check_script(pyc_file) + + def test_directory(self): + source = self.main_in_children_source + with os_helper.temp_dir() as script_dir: + script_name = _make_test_script(script_dir, '__main__', + source=source) + self._check_script(script_dir) + + def test_directory_compiled(self): + source = self.main_in_children_source + with os_helper.temp_dir() as script_dir: + script_name = _make_test_script(script_dir, '__main__', + source=source) + py_compile.compile(script_name, doraise=True) + os.remove(script_name) + pyc_file = import_helper.make_legacy_pyc(script_name) + self._check_script(script_dir) + + def test_zipfile(self): + source = self.main_in_children_source + with os_helper.temp_dir() as script_dir: + script_name = _make_test_script(script_dir, '__main__', + source=source) + zip_name, run_name = make_zip_script(script_dir, 'test_zip', script_name) + self._check_script(zip_name) + + def test_zipfile_compiled(self): + source = self.main_in_children_source + with os_helper.temp_dir() as script_dir: + script_name = _make_test_script(script_dir, '__main__', + source=source) + compiled_name = py_compile.compile(script_name, doraise=True) + zip_name, run_name = make_zip_script(script_dir, 'test_zip', compiled_name) + self._check_script(zip_name) + + def test_module_in_package(self): + with os_helper.temp_dir() as script_dir: + pkg_dir = os.path.join(script_dir, 'test_pkg') + make_pkg(pkg_dir) + script_name = _make_test_script(pkg_dir, 'check_sibling') + launch_name = _make_launch_script(script_dir, 'launch', + 'test_pkg.check_sibling') + self._check_script(launch_name) + + def test_module_in_package_in_zipfile(self): + with os_helper.temp_dir() as script_dir: + zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script') + launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.script', zip_name) + self._check_script(launch_name) + + def test_module_in_subpackage_in_zipfile(self): + with os_helper.temp_dir() as script_dir: + zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script', depth=2) + launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.test_pkg.script', zip_name) + self._check_script(launch_name) + + def test_package(self): + source = self.main_in_children_source + with os_helper.temp_dir() as script_dir: + pkg_dir = os.path.join(script_dir, 'test_pkg') + make_pkg(pkg_dir) + script_name = _make_test_script(pkg_dir, '__main__', + source=source) + launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg') + self._check_script(launch_name) + + def test_package_compiled(self): + source = self.main_in_children_source + with os_helper.temp_dir() as script_dir: + pkg_dir = os.path.join(script_dir, 'test_pkg') + make_pkg(pkg_dir) + script_name = _make_test_script(pkg_dir, '__main__', + source=source) + compiled_name = py_compile.compile(script_name, doraise=True) + os.remove(script_name) + pyc_file = import_helper.make_legacy_pyc(script_name) + launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg') + self._check_script(launch_name) + +# Test all supported start methods (setupClass skips as appropriate) + +class SpawnCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): + start_method = 'spawn' + main_in_children_source = test_source_main_skipped_in_children + +class ForkCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): + start_method = 'fork' + main_in_children_source = test_source + +class ForkServerCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): + start_method = 'forkserver' + main_in_children_source = test_source_main_skipped_in_children + +def tearDownModule(): + support.reap_children() + +if __name__ == '__main__': + unittest.main() diff --git a/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_spawn.py b/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_spawn.py new file mode 100644 index 0000000000000000000000000000000000000000..e5236c789d4b4828b618e7c5dda80778ba0504de --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_spawn.py @@ -0,0 +1,12 @@ +import unittest +from multiprocess.tests import install_tests_in_module_dict + +from test import support + +if support.PGO: + raise unittest.SkipTest("test is not helpful for PGO") + +install_tests_in_module_dict(globals(), 'spawn') + +if __name__ == '__main__': + unittest.main() diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..272357764667008f525a7954cef467898c4bad74 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/_compute_docstrings.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/_compute_docstrings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f76e5070d5ea130776fc687bb89cfa4096592e71 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/_compute_docstrings.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/_generated_version.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/_generated_version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2acf3340092c6fdec228d397f4f0b8ab4999dec Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/_generated_version.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/acero.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/acero.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2cce71d5211a70a74af255572581c652c7a65ad7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/acero.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/benchmark.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/benchmark.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ead3aaf138b8b633905e8d6adfbd407de628563 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/benchmark.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/cffi.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/cffi.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61b65dffe8e2aa25761deb9e0518547791e4b0b2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/cffi.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/compute.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/compute.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa140de5cbcec6f348ddec8fb5bc0284cefe53f7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/compute.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/conftest.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3a0d98360334697c8630beb415d9b241e075f75 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/conftest.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/csv.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/csv.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44b0f6f137b941ab8a50933006c81c9426f0daec Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/csv.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/cuda.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/cuda.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20a08f210b6961cd875bcceb9165df00b284bcc0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/cuda.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/dataset.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/dataset.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ef730294a4834e71d8b0d0cfc94db04333813b7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/dataset.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/feather.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/feather.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a45a19d4c7ef5ee324e8b62c711215d06c0d0232 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/feather.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/filesystem.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/filesystem.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68e1a28a7e1ba9aa06530a69066c15216e729d7e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/filesystem.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/flight.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/flight.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7bc7d3d4e75eb40e0b467ee986997512dfaebc42 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/flight.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/fs.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/fs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5251cd3fa9ae2e4de1ccef7fb658e4075f837b26 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/fs.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/hdfs.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/hdfs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fcced5778e403af9d646a468f20ba17d2a8931b7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/hdfs.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/ipc.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/ipc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23e3736078587f6e0e1c5bbdafdbb0bf077f0174 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/ipc.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/json.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/json.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d71f5067536321b25d932074dcad838713aeee3a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/json.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/jvm.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/jvm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47a19d9149d760203fb0a487ae789e29a19a1ca7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/jvm.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/orc.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/orc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b80998174ef89c628fd99e6f795d52937ec6365 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/orc.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/pandas_compat.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/pandas_compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..664f27a4b82a2c2f1abe5ff08b90f921f9566c05 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/pandas_compat.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/substrait.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/substrait.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a96e1b6a403c77af4965ef94cad9d22fbb8f80b5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/substrait.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/types.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..242bc197ad39651257bea9f500fa1b9dabd56969 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/types.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/util.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14761aff9db89cf51088427a92b7578ba8f42790 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/__pycache__/util.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_scalars.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_scalars.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19b545a6fcec41d2b117857e44a2eb2138f702c4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_scalars.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_util.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e7aac3be557835eca25d7527125a6e2b07a573e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_util.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/data/feather/v0.17.0.version.2-compression.lz4.feather b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/data/feather/v0.17.0.version.2-compression.lz4.feather new file mode 100644 index 0000000000000000000000000000000000000000..562b0b2c53d8684fcb2b0417bad36877d92af53e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/data/feather/v0.17.0.version.2-compression.lz4.feather differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/data/orc/README.md b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/data/orc/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c548225155331252e380ef9317af43f105c509d0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/data/orc/README.md @@ -0,0 +1,22 @@ + + +The ORC and JSON files come from the `examples` directory in the Apache ORC +source tree: +https://github.com/apache/orc/tree/main/examples diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/data/orc/TestOrcFile.emptyFile.orc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/data/orc/TestOrcFile.emptyFile.orc new file mode 100644 index 0000000000000000000000000000000000000000..ecdadcbff134615d7eefcb740d55fe710cee059b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/data/orc/TestOrcFile.emptyFile.orc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/data/orc/TestOrcFile.test1.orc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/data/orc/TestOrcFile.test1.orc new file mode 100644 index 0000000000000000000000000000000000000000..4fb0beff868971efb653739fe6ae47a37e4a1c66 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/data/orc/TestOrcFile.test1.orc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/data/orc/TestOrcFile.testDate1900.orc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/data/orc/TestOrcFile.testDate1900.orc new file mode 100644 index 0000000000000000000000000000000000000000..f51ffdbd03a43fadbedce302ffa8e5967a30ad59 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/data/orc/TestOrcFile.testDate1900.orc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/data/orc/decimal.orc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/data/orc/decimal.orc new file mode 100644 index 0000000000000000000000000000000000000000..cb0f7b9d767a37159c5509da1a47877d1c8b411e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/data/orc/decimal.orc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__init__.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d08d67d2860f480a17d3b2508d4cad70ea8b27b1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__init__.py @@ -0,0 +1,24 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import pytest + +# Marks all of the tests in this module +# Ignore these with pytest ... -m 'not parquet' +pytestmark = [ + pytest.mark.parquet, +] diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4131f1dd6367287537b7385e630c0abdd53ef2de Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/common.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bff4782ca4bcb699a26bbe7b8ef74383d9523ea9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/common.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/test_compliant_nested_type.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/test_compliant_nested_type.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..371363dd2d5ac73d199ed812a24deb1e69b1127f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/test_compliant_nested_type.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/test_dataset.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/test_dataset.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91b643e4fdf109af8028504f6d823fe98ecd814c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/test_dataset.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/test_datetime.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/test_datetime.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0fafd9947b5875f665882b0570ac28cfcfb8201d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/test_datetime.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/test_metadata.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/test_metadata.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3505da92f79819807a42135ded2181e7cb00715c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/test_metadata.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/test_pandas.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/test_pandas.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22174fbde5e43ff1ae676138dadb722cd348a0d3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/test_pandas.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/common.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/common.py new file mode 100644 index 0000000000000000000000000000000000000000..8365ed5b28543552c4fd3930f0fcda67c0341912 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/common.py @@ -0,0 +1,165 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import io + +import numpy as np + +import pyarrow as pa +from pyarrow.tests import util + + +def _write_table(table, path, **kwargs): + # So we see the ImportError somewhere + import pyarrow.parquet as pq + from pyarrow.pandas_compat import _pandas_api + + if _pandas_api.is_data_frame(table): + table = pa.Table.from_pandas(table) + + pq.write_table(table, path, **kwargs) + return table + + +def _read_table(*args, **kwargs): + import pyarrow.parquet as pq + + table = pq.read_table(*args, **kwargs) + table.validate(full=True) + return table + + +def _roundtrip_table(table, read_table_kwargs=None, + write_table_kwargs=None): + read_table_kwargs = read_table_kwargs or {} + write_table_kwargs = write_table_kwargs or {} + + writer = pa.BufferOutputStream() + _write_table(table, writer, **write_table_kwargs) + reader = pa.BufferReader(writer.getvalue()) + return _read_table(reader, **read_table_kwargs) + + +def _check_roundtrip(table, expected=None, read_table_kwargs=None, + **write_table_kwargs): + if expected is None: + expected = table + + read_table_kwargs = read_table_kwargs or {} + + # intentionally check twice + result = _roundtrip_table(table, read_table_kwargs=read_table_kwargs, + write_table_kwargs=write_table_kwargs) + assert result.equals(expected) + result = _roundtrip_table(result, read_table_kwargs=read_table_kwargs, + write_table_kwargs=write_table_kwargs) + assert result.equals(expected) + + +def _roundtrip_pandas_dataframe(df, write_kwargs): + table = pa.Table.from_pandas(df) + result = _roundtrip_table( + table, write_table_kwargs=write_kwargs) + return result.to_pandas() + + +def _random_integers(size, dtype): + # We do not generate integers outside the int64 range + platform_int_info = np.iinfo('int_') + iinfo = np.iinfo(dtype) + return np.random.randint(max(iinfo.min, platform_int_info.min), + min(iinfo.max, platform_int_info.max), + size=size).astype(dtype) + + +def _range_integers(size, dtype): + return pa.array(np.arange(size, dtype=dtype)) + + +def _test_dataframe(size=10000, seed=0): + import pandas as pd + + np.random.seed(seed) + df = pd.DataFrame({ + 'uint8': _random_integers(size, np.uint8), + 'uint16': _random_integers(size, np.uint16), + 'uint32': _random_integers(size, np.uint32), + 'uint64': _random_integers(size, np.uint64), + 'int8': _random_integers(size, np.int8), + 'int16': _random_integers(size, np.int16), + 'int32': _random_integers(size, np.int32), + 'int64': _random_integers(size, np.int64), + 'float32': np.random.randn(size).astype(np.float32), + 'float64': np.arange(size, dtype=np.float64), + 'bool': np.random.randn(size) > 0, + 'strings': [util.rands(10) for i in range(size)], + 'all_none': [None] * size, + 'all_none_category': [None] * size + }) + + # TODO(PARQUET-1015) + # df['all_none_category'] = df['all_none_category'].astype('category') + return df + + +def make_sample_file(table_or_df): + import pyarrow.parquet as pq + + if isinstance(table_or_df, pa.Table): + a_table = table_or_df + else: + a_table = pa.Table.from_pandas(table_or_df) + + buf = io.BytesIO() + _write_table(a_table, buf, compression='SNAPPY', version='2.6') + + buf.seek(0) + return pq.ParquetFile(buf) + + +def alltypes_sample(size=10000, seed=0, categorical=False): + import pandas as pd + + np.random.seed(seed) + arrays = { + 'uint8': np.arange(size, dtype=np.uint8), + 'uint16': np.arange(size, dtype=np.uint16), + 'uint32': np.arange(size, dtype=np.uint32), + 'uint64': np.arange(size, dtype=np.uint64), + 'int8': np.arange(size, dtype=np.int16), + 'int16': np.arange(size, dtype=np.int16), + 'int32': np.arange(size, dtype=np.int32), + 'int64': np.arange(size, dtype=np.int64), + 'float32': np.arange(size, dtype=np.float32), + 'float64': np.arange(size, dtype=np.float64), + 'bool': np.random.randn(size) > 0, + 'datetime_ms': np.arange("2016-01-01T00:00:00.001", size, + dtype='datetime64[ms]'), + 'datetime_us': np.arange("2016-01-01T00:00:00.000001", size, + dtype='datetime64[us]'), + 'datetime_ns': np.arange("2016-01-01T00:00:00.000000001", size, + dtype='datetime64[ns]'), + 'timedelta': np.arange(0, size, dtype="timedelta64[s]"), + 'str': pd.Series([str(x) for x in range(size)]), + 'empty_str': [''] * size, + 'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None], + 'null': [None] * size, + 'null_list': [None] * 2 + [[None] * (x % 4) for x in range(size - 2)], + } + if categorical: + arrays['str_category'] = arrays['str'].astype('category') + return pd.DataFrame(arrays) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/encryption.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/encryption.py new file mode 100644 index 0000000000000000000000000000000000000000..d07f8ae2735207f5f6c4e6463e123ff04c539e47 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/encryption.py @@ -0,0 +1,61 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +import base64 + +import pyarrow.parquet.encryption as pe + + +class InMemoryKmsClient(pe.KmsClient): + """This is a mock class implementation of KmsClient, built for testing + only. + """ + + def __init__(self, config): + """Create an InMemoryKmsClient instance.""" + pe.KmsClient.__init__(self) + self.master_keys_map = config.custom_kms_conf + + def wrap_key(self, key_bytes, master_key_identifier): + """Not a secure cipher - the wrapped key + is just the master key concatenated with key bytes""" + master_key_bytes = self.master_keys_map[master_key_identifier].encode( + 'utf-8') + wrapped_key = b"".join([master_key_bytes, key_bytes]) + result = base64.b64encode(wrapped_key) + return result + + def unwrap_key(self, wrapped_key, master_key_identifier): + """Not a secure cipher - just extract the key from + the wrapped key""" + expected_master_key = self.master_keys_map[master_key_identifier] + decoded_wrapped_key = base64.b64decode(wrapped_key) + master_key_bytes = decoded_wrapped_key[:16] + decrypted_key = decoded_wrapped_key[16:] + if (expected_master_key == master_key_bytes.decode('utf-8')): + return decrypted_key + raise ValueError("Incorrect master key used", + master_key_bytes, decrypted_key) + + +def verify_file_encrypted(path): + """Verify that the file is encrypted by looking at its first 4 bytes. + If it's the magic string PARE + then this is a parquet with encrypted footer.""" + with open(path, "rb") as file: + magic_str = file.read(4) + # Verify magic string for parquet with encrypted footer is PARE + assert magic_str == b'PARE' diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/test_basic.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/test_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..3c867776ac052f08b6a7feba89a90353c8d6acd2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/test_basic.py @@ -0,0 +1,930 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from collections import OrderedDict +import io +import warnings +from shutil import copytree + +import numpy as np +import pytest + +import pyarrow as pa +from pyarrow import fs +from pyarrow.filesystem import LocalFileSystem, FileSystem +from pyarrow.tests import util +from pyarrow.tests.parquet.common import (_check_roundtrip, _roundtrip_table, + _test_dataframe) + +try: + import pyarrow.parquet as pq + from pyarrow.tests.parquet.common import _read_table, _write_table +except ImportError: + pq = None + + +try: + import pandas as pd + import pandas.testing as tm + + from pyarrow.tests.pandas_examples import dataframe_with_lists + from pyarrow.tests.parquet.common import alltypes_sample +except ImportError: + pd = tm = None + + +# Marks all of the tests in this module +# Ignore these with pytest ... -m 'not parquet' +pytestmark = pytest.mark.parquet + + +def test_parquet_invalid_version(tempdir): + table = pa.table({'a': [1, 2, 3]}) + with pytest.raises(ValueError, match="Unsupported Parquet format version"): + _write_table(table, tempdir / 'test_version.parquet', version="2.2") + with pytest.raises(ValueError, match="Unsupported Parquet data page " + + "version"): + _write_table(table, tempdir / 'test_version.parquet', + data_page_version="2.2") + + +def test_set_data_page_size(): + arr = pa.array([1, 2, 3] * 100000) + t = pa.Table.from_arrays([arr], names=['f0']) + + # 128K, 512K + page_sizes = [2 << 16, 2 << 18] + for target_page_size in page_sizes: + _check_roundtrip(t, data_page_size=target_page_size) + + +@pytest.mark.pandas +def test_set_write_batch_size(): + df = _test_dataframe(100) + table = pa.Table.from_pandas(df, preserve_index=False) + + _check_roundtrip( + table, data_page_size=10, write_batch_size=1, version='2.4' + ) + + +@pytest.mark.pandas +def test_set_dictionary_pagesize_limit(): + df = _test_dataframe(100) + table = pa.Table.from_pandas(df, preserve_index=False) + + _check_roundtrip(table, dictionary_pagesize_limit=1, + data_page_size=10, version='2.4') + + with pytest.raises(TypeError): + _check_roundtrip(table, dictionary_pagesize_limit="a", + data_page_size=10, version='2.4') + + +@pytest.mark.pandas +def test_chunked_table_write(): + # ARROW-232 + tables = [] + batch = pa.RecordBatch.from_pandas(alltypes_sample(size=10)) + tables.append(pa.Table.from_batches([batch] * 3)) + df, _ = dataframe_with_lists() + batch = pa.RecordBatch.from_pandas(df) + tables.append(pa.Table.from_batches([batch] * 3)) + + for data_page_version in ['1.0', '2.0']: + for use_dictionary in [True, False]: + for table in tables: + _check_roundtrip( + table, version='2.6', + data_page_version=data_page_version, + use_dictionary=use_dictionary) + + +@pytest.mark.pandas +def test_memory_map(tempdir): + df = alltypes_sample(size=10) + + table = pa.Table.from_pandas(df) + _check_roundtrip(table, read_table_kwargs={'memory_map': True}, + version='2.6') + + filename = str(tempdir / 'tmp_file') + with open(filename, 'wb') as f: + _write_table(table, f, version='2.6') + table_read = pq.read_pandas(filename, memory_map=True) + assert table_read.equals(table) + + +@pytest.mark.pandas +def test_enable_buffered_stream(tempdir): + df = alltypes_sample(size=10) + + table = pa.Table.from_pandas(df) + _check_roundtrip(table, read_table_kwargs={'buffer_size': 1025}, + version='2.6') + + filename = str(tempdir / 'tmp_file') + with open(filename, 'wb') as f: + _write_table(table, f, version='2.6') + table_read = pq.read_pandas(filename, buffer_size=4096) + assert table_read.equals(table) + + +def test_special_chars_filename(tempdir): + table = pa.Table.from_arrays([pa.array([42])], ["ints"]) + filename = "foo # bar" + path = tempdir / filename + assert not path.exists() + _write_table(table, str(path)) + assert path.exists() + table_read = _read_table(str(path)) + assert table_read.equals(table) + + +def test_invalid_source(): + # Test that we provide an helpful error message pointing out + # that None wasn't expected when trying to open a Parquet None file. + with pytest.raises(TypeError, match="None"): + pq.read_table(None) + + with pytest.raises(TypeError, match="None"): + pq.ParquetFile(None) + + +@pytest.mark.slow +def test_file_with_over_int16_max_row_groups(): + # PARQUET-1857: Parquet encryption support introduced a INT16_MAX upper + # limit on the number of row groups, but this limit only impacts files with + # encrypted row group metadata because of the int16 row group ordinal used + # in the Parquet Thrift metadata. Unencrypted files are not impacted, so + # this test checks that it works (even if it isn't a good idea) + t = pa.table([list(range(40000))], names=['f0']) + _check_roundtrip(t, row_group_size=1) + + +@pytest.mark.pandas +def test_empty_table_roundtrip(): + df = alltypes_sample(size=10) + + # Create a non-empty table to infer the types correctly, then slice to 0 + table = pa.Table.from_pandas(df) + table = pa.Table.from_arrays( + [col.chunk(0)[:0] for col in table.itercolumns()], + names=table.schema.names) + + assert table.schema.field('null').type == pa.null() + assert table.schema.field('null_list').type == pa.list_(pa.null()) + _check_roundtrip( + table, version='2.6') + + +@pytest.mark.pandas +def test_empty_table_no_columns(): + df = pd.DataFrame() + empty = pa.Table.from_pandas(df, preserve_index=False) + _check_roundtrip(empty) + + +def test_write_nested_zero_length_array_chunk_failure(): + # Bug report in ARROW-3792 + cols = OrderedDict( + int32=pa.int32(), + list_string=pa.list_(pa.string()) + ) + data = [[], [OrderedDict(int32=1, list_string=('G',)), ]] + + # This produces a table with a column like + # )> + # [ + # [], + # [ + # [ + # "G" + # ] + # ] + # ] + # + # Each column is a ChunkedArray with 2 elements + my_arrays = [pa.array(batch, type=pa.struct(cols)).flatten() + for batch in data] + my_batches = [pa.RecordBatch.from_arrays(batch, schema=pa.schema(cols)) + for batch in my_arrays] + tbl = pa.Table.from_batches(my_batches, pa.schema(cols)) + _check_roundtrip(tbl) + + +@pytest.mark.pandas +def test_multiple_path_types(tempdir): + # Test compatibility with PEP 519 path-like objects + path = tempdir / 'zzz.parquet' + df = pd.DataFrame({'x': np.arange(10, dtype=np.int64)}) + _write_table(df, path) + table_read = _read_table(path) + df_read = table_read.to_pandas() + tm.assert_frame_equal(df, df_read) + + # Test compatibility with plain string paths + path = str(tempdir) + 'zzz.parquet' + df = pd.DataFrame({'x': np.arange(10, dtype=np.int64)}) + _write_table(df, path) + table_read = _read_table(path) + df_read = table_read.to_pandas() + tm.assert_frame_equal(df, df_read) + + +def test_fspath(tempdir): + # ARROW-12472 support __fspath__ objects without using str() + path = tempdir / "test.parquet" + table = pa.table({"a": [1, 2, 3]}) + _write_table(table, path) + + fs_protocol_obj = util.FSProtocolClass(path) + + result = _read_table(fs_protocol_obj) + assert result.equals(table) + + # combined with non-local filesystem raises + with pytest.raises(TypeError): + _read_table(fs_protocol_obj, filesystem=FileSystem()) + + +@pytest.mark.parametrize("filesystem", [ + None, fs.LocalFileSystem(), LocalFileSystem._get_instance() +]) +@pytest.mark.parametrize("name", ("data.parquet", "例.parquet")) +def test_relative_paths(tempdir, filesystem, name): + # reading and writing from relative paths + table = pa.table({"a": [1, 2, 3]}) + path = tempdir / name + + # reading + pq.write_table(table, str(path)) + with util.change_cwd(tempdir): + result = pq.read_table(name, filesystem=filesystem) + assert result.equals(table) + + path.unlink() + assert not path.exists() + + # writing + with util.change_cwd(tempdir): + pq.write_table(table, name, filesystem=filesystem) + result = pq.read_table(path) + assert result.equals(table) + + +def test_read_non_existing_file(): + # ensure we have a proper error message + with pytest.raises(FileNotFoundError): + pq.read_table('i-am-not-existing.parquet') + + +def test_file_error_python_exception(): + class BogusFile(io.BytesIO): + def read(self, *args): + raise ZeroDivisionError("zorglub") + + def seek(self, *args): + raise ZeroDivisionError("zorglub") + + # ensure the Python exception is restored + with pytest.raises(ZeroDivisionError, match="zorglub"): + pq.read_table(BogusFile(b"")) + + +def test_parquet_read_from_buffer(tempdir): + # reading from a buffer from python's open() + table = pa.table({"a": [1, 2, 3]}) + pq.write_table(table, str(tempdir / "data.parquet")) + + with open(str(tempdir / "data.parquet"), "rb") as f: + result = pq.read_table(f) + assert result.equals(table) + + with open(str(tempdir / "data.parquet"), "rb") as f: + result = pq.read_table(pa.PythonFile(f)) + assert result.equals(table) + + +def test_byte_stream_split(): + # This is only a smoke test. + arr_float = pa.array(list(map(float, range(100)))) + arr_int = pa.array(list(map(int, range(100)))) + data_float = [arr_float, arr_float] + table = pa.Table.from_arrays(data_float, names=['a', 'b']) + + # Check with byte_stream_split for both columns. + _check_roundtrip(table, expected=table, compression="gzip", + use_dictionary=False, use_byte_stream_split=True) + + # Check with byte_stream_split for column 'b' and dictionary + # for column 'a'. + _check_roundtrip(table, expected=table, compression="gzip", + use_dictionary=['a'], + use_byte_stream_split=['b']) + + # Check with a collision for both columns. + _check_roundtrip(table, expected=table, compression="gzip", + use_dictionary=['a', 'b'], + use_byte_stream_split=['a', 'b']) + + # Check with mixed column types. + mixed_table = pa.Table.from_arrays([arr_float, arr_int], + names=['a', 'b']) + _check_roundtrip(mixed_table, expected=mixed_table, + use_dictionary=['b'], + use_byte_stream_split=['a']) + + # Try to use the wrong data type with the byte_stream_split encoding. + # This should throw an exception. + table = pa.Table.from_arrays([arr_int], names=['tmp']) + with pytest.raises(IOError): + _check_roundtrip(table, expected=table, use_byte_stream_split=True, + use_dictionary=False) + + +def test_column_encoding(): + arr_float = pa.array(list(map(float, range(100)))) + arr_int = pa.array(list(map(int, range(100)))) + arr_bin = pa.array([str(x) for x in range(100)], type=pa.binary()) + arr_flba = pa.array( + [str(x).zfill(10) for x in range(100)], type=pa.binary(10)) + arr_bool = pa.array([False, True, False, False] * 25) + mixed_table = pa.Table.from_arrays( + [arr_float, arr_int, arr_bin, arr_flba, arr_bool], + names=['a', 'b', 'c', 'd', 'e']) + + # Check "BYTE_STREAM_SPLIT" for column 'a' and "PLAIN" column_encoding for + # column 'b' and 'c'. + _check_roundtrip(mixed_table, expected=mixed_table, use_dictionary=False, + column_encoding={'a': "BYTE_STREAM_SPLIT", + 'b': "PLAIN", + 'c': "PLAIN"}) + + # Check "PLAIN" for all columns. + _check_roundtrip(mixed_table, expected=mixed_table, + use_dictionary=False, + column_encoding="PLAIN") + + # Check "DELTA_BINARY_PACKED" for integer columns. + _check_roundtrip(mixed_table, expected=mixed_table, + use_dictionary=False, + column_encoding={'a': "PLAIN", + 'b': "DELTA_BINARY_PACKED", + 'c': "PLAIN"}) + + # Check "DELTA_LENGTH_BYTE_ARRAY" for byte columns. + _check_roundtrip(mixed_table, expected=mixed_table, + use_dictionary=False, + column_encoding={'a': "PLAIN", + 'b': "DELTA_BINARY_PACKED", + 'c': "DELTA_LENGTH_BYTE_ARRAY"}) + + # Check "DELTA_BYTE_ARRAY" for byte columns. + _check_roundtrip(mixed_table, expected=mixed_table, + use_dictionary=False, + column_encoding={'a': "PLAIN", + 'b': "DELTA_BINARY_PACKED", + 'c': "DELTA_BYTE_ARRAY", + 'd': "DELTA_BYTE_ARRAY"}) + + # Check "RLE" for boolean columns. + _check_roundtrip(mixed_table, expected=mixed_table, + use_dictionary=False, + column_encoding={'e': "RLE"}) + + # Try to pass "BYTE_STREAM_SPLIT" column encoding for integer column 'b'. + # This should throw an error as it is only supports FLOAT and DOUBLE. + with pytest.raises(IOError, + match="BYTE_STREAM_SPLIT only supports FLOAT and" + " DOUBLE"): + _check_roundtrip(mixed_table, expected=mixed_table, + use_dictionary=False, + column_encoding={'a': "PLAIN", + 'b': "BYTE_STREAM_SPLIT", + 'c': "PLAIN"}) + + # Try to pass use "DELTA_BINARY_PACKED" encoding on float column. + # This should throw an error as only integers are supported. + with pytest.raises(OSError): + _check_roundtrip(mixed_table, expected=mixed_table, + use_dictionary=False, + column_encoding={'a': "DELTA_BINARY_PACKED", + 'b': "PLAIN", + 'c': "PLAIN"}) + + # Try to pass "RLE_DICTIONARY". + # This should throw an error as dictionary encoding is already used by + # default and not supported to be specified as "fallback" encoding + with pytest.raises(ValueError): + _check_roundtrip(mixed_table, expected=mixed_table, + use_dictionary=False, + column_encoding="RLE_DICTIONARY") + + # Try to pass unsupported encoding. + with pytest.raises(ValueError): + _check_roundtrip(mixed_table, expected=mixed_table, + use_dictionary=False, + column_encoding={'a': "MADE_UP_ENCODING"}) + + # Try to pass column_encoding and use_dictionary. + # This should throw an error. + with pytest.raises(ValueError): + _check_roundtrip(mixed_table, expected=mixed_table, + use_dictionary=['b'], + column_encoding={'b': "PLAIN"}) + + # Try to pass column_encoding and use_dictionary=True (default value). + # This should throw an error. + with pytest.raises(ValueError): + _check_roundtrip(mixed_table, expected=mixed_table, + column_encoding={'b': "PLAIN"}) + + # Try to pass column_encoding and use_byte_stream_split on same column. + # This should throw an error. + with pytest.raises(ValueError): + _check_roundtrip(mixed_table, expected=mixed_table, + use_dictionary=False, + use_byte_stream_split=['a'], + column_encoding={'a': "RLE", + 'b': "BYTE_STREAM_SPLIT", + 'c': "PLAIN"}) + + # Try to pass column_encoding and use_byte_stream_split=True. + # This should throw an error. + with pytest.raises(ValueError): + _check_roundtrip(mixed_table, expected=mixed_table, + use_dictionary=False, + use_byte_stream_split=True, + column_encoding={'a': "RLE", + 'b': "BYTE_STREAM_SPLIT", + 'c': "PLAIN"}) + + # Try to pass column_encoding=True. + # This should throw an error. + with pytest.raises(TypeError): + _check_roundtrip(mixed_table, expected=mixed_table, + use_dictionary=False, + column_encoding=True) + + +def test_compression_level(): + arr = pa.array(list(map(int, range(1000)))) + data = [arr, arr] + table = pa.Table.from_arrays(data, names=['a', 'b']) + + # Check one compression level. + _check_roundtrip(table, expected=table, compression="gzip", + compression_level=1) + + # Check another one to make sure that compression_level=1 does not + # coincide with the default one in Arrow. + _check_roundtrip(table, expected=table, compression="gzip", + compression_level=5) + + # Check that the user can provide a compression per column + _check_roundtrip(table, expected=table, + compression={'a': "gzip", 'b': "snappy"}) + + # Check that the user can provide a compression level per column + _check_roundtrip(table, expected=table, compression="gzip", + compression_level={'a': 2, 'b': 3}) + + # Check if both LZ4 compressors are working + # (level < 3 -> fast, level >= 3 -> HC) + _check_roundtrip(table, expected=table, compression="lz4", + compression_level=1) + + _check_roundtrip(table, expected=table, compression="lz4", + compression_level=9) + + # Check that specifying a compression level for a codec which does allow + # specifying one, results into an error. + # Uncompressed, snappy and lzo do not support specifying a compression + # level. + # GZIP (zlib) allows for specifying a compression level but as of up + # to version 1.2.11 the valid range is [-1, 9]. + invalid_combinations = [("snappy", 4), ("gzip", -1337), + ("None", 444), ("lzo", 14)] + buf = io.BytesIO() + for (codec, level) in invalid_combinations: + with pytest.raises((ValueError, OSError)): + _write_table(table, buf, compression=codec, + compression_level=level) + + +def test_sanitized_spark_field_names(): + a0 = pa.array([0, 1, 2, 3, 4]) + name = 'prohib; ,\t{}' + table = pa.Table.from_arrays([a0], [name]) + + result = _roundtrip_table(table, write_table_kwargs={'flavor': 'spark'}) + + expected_name = 'prohib______' + assert result.schema[0].name == expected_name + + +@pytest.mark.pandas +def test_multithreaded_read(): + df = alltypes_sample(size=10000) + + table = pa.Table.from_pandas(df) + + buf = io.BytesIO() + _write_table(table, buf, compression='SNAPPY', version='2.6') + + buf.seek(0) + table1 = _read_table(buf, use_threads=True) + + buf.seek(0) + table2 = _read_table(buf, use_threads=False) + + assert table1.equals(table2) + + +@pytest.mark.pandas +def test_min_chunksize(): + data = pd.DataFrame([np.arange(4)], columns=['A', 'B', 'C', 'D']) + table = pa.Table.from_pandas(data.reset_index()) + + buf = io.BytesIO() + _write_table(table, buf, chunk_size=-1) + + buf.seek(0) + result = _read_table(buf) + + assert result.equals(table) + + with pytest.raises(ValueError): + _write_table(table, buf, chunk_size=0) + + +@pytest.mark.pandas +def test_write_error_deletes_incomplete_file(tempdir): + # ARROW-1285 + df = pd.DataFrame({'a': list('abc'), + 'b': list(range(1, 4)), + 'c': np.arange(3, 6).astype('u1'), + 'd': np.arange(4.0, 7.0, dtype='float64'), + 'e': [True, False, True], + 'f': pd.Categorical(list('abc')), + 'g': pd.date_range('20130101', periods=3), + 'h': pd.date_range('20130101', periods=3, + tz='US/Eastern'), + 'i': pd.date_range('20130101', periods=3, freq='ns')}) + + pdf = pa.Table.from_pandas(df) + + filename = tempdir / 'tmp_file' + try: + # Test relies on writing nanoseconds to raise an error + # true for Parquet 2.4 + _write_table(pdf, filename, version="2.4") + except pa.ArrowException: + pass + + assert not filename.exists() + + +def test_read_non_existent_file(tempdir): + path = 'nonexistent-file.parquet' + try: + pq.read_table(path) + except Exception as e: + assert path in e.args[0] + + +def test_read_table_doesnt_warn(datadir): + with warnings.catch_warnings(): + warnings.simplefilter(action="error") + pq.read_table(datadir / 'v0.7.1.parquet') + + +@pytest.mark.pandas +def test_zlib_compression_bug(): + # ARROW-3514: "zlib deflate failed, output buffer too small" + table = pa.Table.from_arrays([pa.array(['abc', 'def'])], ['some_col']) + f = io.BytesIO() + pq.write_table(table, f, compression='gzip') + + f.seek(0) + roundtrip = pq.read_table(f) + tm.assert_frame_equal(roundtrip.to_pandas(), table.to_pandas()) + + +def test_parquet_file_too_small(tempdir): + path = str(tempdir / "test.parquet") + # TODO(dataset) with datasets API it raises OSError instead + with pytest.raises((pa.ArrowInvalid, OSError), + match='size is 0 bytes'): + with open(path, 'wb') as f: + pass + pq.read_table(path) + + with pytest.raises((pa.ArrowInvalid, OSError), + match='size is 4 bytes'): + with open(path, 'wb') as f: + f.write(b'ffff') + pq.read_table(path) + + +@pytest.mark.pandas +@pytest.mark.fastparquet +@pytest.mark.filterwarnings("ignore:RangeIndex:FutureWarning") +@pytest.mark.filterwarnings("ignore:tostring:DeprecationWarning:fastparquet") +def test_fastparquet_cross_compatibility(tempdir): + fp = pytest.importorskip('fastparquet') + + df = pd.DataFrame( + { + "a": list("abc"), + "b": list(range(1, 4)), + "c": np.arange(4.0, 7.0, dtype="float64"), + "d": [True, False, True], + "e": pd.date_range("20130101", periods=3), + "f": pd.Categorical(["a", "b", "a"]), + # fastparquet writes list as BYTE_ARRAY JSON, so no roundtrip + # "g": [[1, 2], None, [1, 2, 3]], + } + ) + table = pa.table(df) + + # Arrow -> fastparquet + file_arrow = str(tempdir / "cross_compat_arrow.parquet") + pq.write_table(table, file_arrow, compression=None) + + fp_file = fp.ParquetFile(file_arrow) + df_fp = fp_file.to_pandas() + tm.assert_frame_equal(df, df_fp) + + # Fastparquet -> arrow + file_fastparquet = str(tempdir / "cross_compat_fastparquet.parquet") + fp.write(file_fastparquet, df) + + table_fp = pq.read_pandas(file_fastparquet) + # for fastparquet written file, categoricals comes back as strings + # (no arrow schema in parquet metadata) + df['f'] = df['f'].astype(object) + tm.assert_frame_equal(table_fp.to_pandas(), df) + + +@pytest.mark.parametrize('array_factory', [ + lambda: pa.array([0, None] * 10), + lambda: pa.array([0, None] * 10).dictionary_encode(), + lambda: pa.array(["", None] * 10), + lambda: pa.array(["", None] * 10).dictionary_encode(), +]) +@pytest.mark.parametrize('read_dictionary', [False, True]) +def test_buffer_contents( + array_factory, read_dictionary +): + # Test that null values are deterministically initialized to zero + # after a roundtrip through Parquet. + # See ARROW-8006 and ARROW-8011. + orig_table = pa.Table.from_pydict({"col": array_factory()}) + bio = io.BytesIO() + pq.write_table(orig_table, bio, use_dictionary=True) + bio.seek(0) + read_dictionary = ['col'] if read_dictionary else None + table = pq.read_table(bio, use_threads=False, + read_dictionary=read_dictionary) + + for col in table.columns: + [chunk] = col.chunks + buf = chunk.buffers()[1] + assert buf.to_pybytes() == buf.size * b"\0" + + +def test_parquet_compression_roundtrip(tempdir): + # ARROW-10480: ensure even with nonstandard Parquet file naming + # conventions, writing and then reading a file works. In + # particular, ensure that we don't automatically double-compress + # the stream due to auto-detecting the extension in the filename + table = pa.table([pa.array(range(4))], names=["ints"]) + path = tempdir / "arrow-10480.pyarrow.gz" + pq.write_table(table, path, compression="GZIP") + result = pq.read_table(path) + assert result.equals(table) + + +def test_empty_row_groups(tempdir): + # ARROW-3020 + table = pa.Table.from_arrays([pa.array([], type='int32')], ['f0']) + + path = tempdir / 'empty_row_groups.parquet' + + num_groups = 3 + with pq.ParquetWriter(path, table.schema) as writer: + for i in range(num_groups): + writer.write_table(table) + + reader = pq.ParquetFile(path) + assert reader.metadata.num_row_groups == num_groups + + for i in range(num_groups): + assert reader.read_row_group(i).equals(table) + + +def test_reads_over_batch(tempdir): + data = [None] * (1 << 20) + data.append([1]) + # Large list with mostly nones and one final + # value. This should force batched reads when + # reading back. + table = pa.Table.from_arrays([data], ['column']) + + path = tempdir / 'arrow-11607.parquet' + pq.write_table(table, path) + table2 = pq.read_table(path) + assert table == table2 + + +def test_permutation_of_column_order(tempdir): + # ARROW-2366 + case = tempdir / "dataset_column_order_permutation" + case.mkdir(exist_ok=True) + + data1 = pa.table([[1, 2, 3], [.1, .2, .3]], names=['a', 'b']) + pq.write_table(data1, case / "data1.parquet") + + data2 = pa.table([[.4, .5, .6], [4, 5, 6]], names=['b', 'a']) + pq.write_table(data2, case / "data2.parquet") + + table = pq.read_table(str(case)) + table2 = pa.table([[1, 2, 3, 4, 5, 6], + [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]], + names=['a', 'b']) + + assert table == table2 + + +def test_thrift_size_limits(tempdir): + path = tempdir / 'largethrift.parquet' + + array = pa.array(list(range(10))) + num_cols = 1000 + table = pa.table( + [array] * num_cols, + names=[f'some_long_column_name_{i}' for i in range(num_cols)]) + pq.write_table(table, path) + + with pytest.raises( + OSError, + match="Couldn't deserialize thrift:.*Exceeded size limit"): + pq.read_table(path, thrift_string_size_limit=50 * num_cols) + with pytest.raises( + OSError, + match="Couldn't deserialize thrift:.*Exceeded size limit"): + pq.read_table(path, thrift_container_size_limit=num_cols) + + got = pq.read_table(path, thrift_string_size_limit=100 * num_cols) + assert got == table + got = pq.read_table(path, thrift_container_size_limit=2 * num_cols) + assert got == table + got = pq.read_table(path) + assert got == table + + +def test_page_checksum_verification_write_table(tempdir): + """Check that checksum verification works for datasets created with + pq.write_table()""" + + # Write some sample data into a parquet file with page checksum enabled + original_path = tempdir / 'correct.parquet' + table_orig = pa.table({'a': [1, 2, 3, 4]}) + pq.write_table(table_orig, original_path, write_page_checksum=True) + + # Read file and verify that the data is correct + table_check = pq.read_table(original_path, page_checksum_verification=True) + assert table_orig == table_check + + # Read the original file as binary and swap the 31-th and 36-th bytes. This + # should be equivalent to storing the following data: + # pa.table({'a': [1, 3, 2, 4]}) + bin_data = bytearray(original_path.read_bytes()) + + # Swap two bytes to emulate corruption. Also, check that the two bytes are + # different, otherwise no corruption occurs + assert bin_data[31] != bin_data[36] + bin_data[31], bin_data[36] = bin_data[36], bin_data[31] + + # Write the corrupted data to another parquet file + corrupted_path = tempdir / 'corrupted.parquet' + corrupted_path.write_bytes(bin_data) + + # Case 1: Reading the corrupted file with read_table() and without page + # checksum verification succeeds but yields corrupted data + table_corrupt = pq.read_table(corrupted_path, + page_checksum_verification=False) + # The read should complete without error, but the table has different + # content than the original file! + assert table_corrupt != table_orig + assert table_corrupt == pa.table({'a': [1, 3, 2, 4]}) + + # Case 2: Reading the corrupted file with read_table() and with page + # checksum verification enabled raises an exception + with pytest.raises(OSError, match="CRC checksum verification"): + _ = pq.read_table(corrupted_path, page_checksum_verification=True) + + # Case 3: Reading the corrupted file with ParquetFile.read() and without + # page checksum verification succeeds but yields corrupted data + corrupted_pq_file = pq.ParquetFile(corrupted_path, + page_checksum_verification=False) + table_corrupt2 = corrupted_pq_file.read() + assert table_corrupt2 != table_orig + assert table_corrupt2 == pa.table({'a': [1, 3, 2, 4]}) + + # Case 4: Reading the corrupted file with ParquetFile.read() and with page + # checksum verification enabled raises an exception + corrupted_pq_file = pq.ParquetFile(corrupted_path, + page_checksum_verification=True) + # Accessing the data should result in an error + with pytest.raises(OSError, match="CRC checksum verification"): + _ = corrupted_pq_file.read() + + +@pytest.mark.dataset +def test_checksum_write_to_dataset(tempdir): + """Check that checksum verification works for datasets created with + pq.write_to_dataset""" + + table_orig = pa.table({'a': [1, 2, 3, 4]}) + + # Write a sample dataset with page checksum enabled + original_dir_path = tempdir / 'correct_dir' + pq.write_to_dataset(table_orig, + original_dir_path, + write_page_checksum=True) + + # Read file and verify that the data is correct + original_file_path_list = list(original_dir_path.iterdir()) + assert len(original_file_path_list) == 1 + original_path = original_file_path_list[0] + table_check = pq.read_table(original_path, page_checksum_verification=True) + assert table_orig == table_check + + # Read the original file as binary and swap the 31-th and 36-th bytes. This + # should be equivalent to storing the following data: + # pa.table({'a': [1, 3, 2, 4]}) + bin_data = bytearray(original_path.read_bytes()) + + # Swap two bytes to emulate corruption. Also, check that the two bytes are + # different, otherwise no corruption occurs + assert bin_data[31] != bin_data[36] + bin_data[31], bin_data[36] = bin_data[36], bin_data[31] + + # Write the corrupted data to another parquet dataset + # Copy dataset dir (which should be just one file) + corrupted_dir_path = tempdir / 'corrupted_dir' + copytree(original_dir_path, corrupted_dir_path) + # Corrupt just the one file with the dataset + corrupted_file_path = corrupted_dir_path / original_path.name + corrupted_file_path.write_bytes(bin_data) + + # Case 1: Reading the corrupted file with read_table() and without page + # checksum verification succeeds but yields corrupted data + table_corrupt = pq.read_table(corrupted_file_path, + page_checksum_verification=False) + # The read should complete without error, but the table has different + # content than the original file! + assert table_corrupt != table_orig + assert table_corrupt == pa.table({'a': [1, 3, 2, 4]}) + + # Case 2: Reading the corrupted file with read_table() and with page + # checksum verification enabled raises an exception + with pytest.raises(OSError, match="CRC checksum verification"): + _ = pq.read_table(corrupted_file_path, page_checksum_verification=True) + + +@pytest.mark.dataset +def test_deprecated_use_legacy_dataset(tempdir): + # Test that specifying use_legacy_dataset in ParquetDataset, write_to_dataset + # and read_table doesn't raise an error but gives a warning. + table = pa.table({"a": [1, 2, 3]}) + path = tempdir / "deprecate_legacy" + + msg = "Passing 'use_legacy_dataset'" + with pytest.warns(FutureWarning, match=msg): + pq.write_to_dataset(table, path, use_legacy_dataset=False) + + pq.write_to_dataset(table, path) + + with pytest.warns(FutureWarning, match=msg): + pq.read_table(path, use_legacy_dataset=False) + + with pytest.warns(FutureWarning, match=msg): + pq.ParquetDataset(path, use_legacy_dataset=False) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/test_data_types.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/test_data_types.py new file mode 100644 index 0000000000000000000000000000000000000000..e6b66b00428fbfc3553cf99bf97d7bc4ac94426e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/test_data_types.py @@ -0,0 +1,507 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import decimal +import io + +import numpy as np +import pytest + +import pyarrow as pa +from pyarrow.tests import util +from pyarrow.tests.parquet.common import _check_roundtrip + +try: + import pyarrow.parquet as pq + from pyarrow.tests.parquet.common import _read_table, _write_table +except ImportError: + pq = None + + +try: + import pandas as pd + import pandas.testing as tm + + from pyarrow.tests.pandas_examples import (dataframe_with_arrays, + dataframe_with_lists) + from pyarrow.tests.parquet.common import alltypes_sample +except ImportError: + pd = tm = None + + +# Marks all of the tests in this module +# Ignore these with pytest ... -m 'not parquet' +pytestmark = pytest.mark.parquet + + +# General roundtrip of data types +# ----------------------------------------------------------------------------- + + +@pytest.mark.pandas +@pytest.mark.parametrize('chunk_size', [None, 1000]) +def test_parquet_2_0_roundtrip(tempdir, chunk_size): + df = alltypes_sample(size=10000, categorical=True) + + filename = tempdir / 'pandas_roundtrip.parquet' + arrow_table = pa.Table.from_pandas(df) + assert arrow_table.schema.pandas_metadata is not None + + _write_table(arrow_table, filename, version='2.6', + chunk_size=chunk_size) + table_read = pq.read_pandas(filename) + assert table_read.schema.pandas_metadata is not None + + read_metadata = table_read.schema.metadata + assert arrow_table.schema.metadata == read_metadata + + df_read = table_read.to_pandas() + tm.assert_frame_equal(df, df_read) + + +@pytest.mark.pandas +def test_parquet_1_0_roundtrip(tempdir): + size = 10000 + np.random.seed(0) + df = pd.DataFrame({ + 'uint8': np.arange(size, dtype=np.uint8), + 'uint16': np.arange(size, dtype=np.uint16), + 'uint32': np.arange(size, dtype=np.uint32), + 'uint64': np.arange(size, dtype=np.uint64), + 'int8': np.arange(size, dtype=np.int16), + 'int16': np.arange(size, dtype=np.int16), + 'int32': np.arange(size, dtype=np.int32), + 'int64': np.arange(size, dtype=np.int64), + 'float32': np.arange(size, dtype=np.float32), + 'float64': np.arange(size, dtype=np.float64), + 'bool': np.random.randn(size) > 0, + 'str': [str(x) for x in range(size)], + 'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None], + 'empty_str': [''] * size + }) + filename = tempdir / 'pandas_roundtrip.parquet' + arrow_table = pa.Table.from_pandas(df) + _write_table(arrow_table, filename, version='1.0') + table_read = _read_table(filename) + df_read = table_read.to_pandas() + + # We pass uint32_t as int64_t if we write Parquet version 1.0 + df['uint32'] = df['uint32'].values.astype(np.int64) + + tm.assert_frame_equal(df, df_read) + + +# Dictionary +# ----------------------------------------------------------------------------- + + +def _simple_table_write_read(table): + bio = pa.BufferOutputStream() + pq.write_table(table, bio) + contents = bio.getvalue() + return pq.read_table( + pa.BufferReader(contents) + ) + + +@pytest.mark.pandas +def test_direct_read_dictionary(): + # ARROW-3325 + repeats = 10 + nunique = 5 + + data = [ + [util.rands(10) for i in range(nunique)] * repeats, + + ] + table = pa.table(data, names=['f0']) + + bio = pa.BufferOutputStream() + pq.write_table(table, bio) + contents = bio.getvalue() + + result = pq.read_table(pa.BufferReader(contents), + read_dictionary=['f0']) + + # Compute dictionary-encoded subfield + expected = pa.table([table[0].dictionary_encode()], names=['f0']) + assert result.equals(expected) + + +@pytest.mark.pandas +def test_direct_read_dictionary_subfield(): + repeats = 10 + nunique = 5 + + data = [ + [[util.rands(10)] for i in range(nunique)] * repeats, + ] + table = pa.table(data, names=['f0']) + + bio = pa.BufferOutputStream() + pq.write_table(table, bio) + contents = bio.getvalue() + result = pq.read_table(pa.BufferReader(contents), + read_dictionary=['f0.list.element']) + + arr = pa.array(data[0]) + values_as_dict = arr.values.dictionary_encode() + + inner_indices = values_as_dict.indices.cast('int32') + new_values = pa.DictionaryArray.from_arrays(inner_indices, + values_as_dict.dictionary) + + offsets = pa.array(range(51), type='int32') + expected_arr = pa.ListArray.from_arrays(offsets, new_values) + expected = pa.table([expected_arr], names=['f0']) + + assert result.equals(expected) + assert result[0].num_chunks == 1 + + +def test_dictionary_array_automatically_read(): + # ARROW-3246 + + # Make a large dictionary, a little over 4MB of data + dict_length = 4000 + dict_values = pa.array([('x' * 1000 + '_{}'.format(i)) + for i in range(dict_length)]) + + num_chunks = 10 + chunk_size = 100 + chunks = [] + for i in range(num_chunks): + indices = np.random.randint(0, dict_length, + size=chunk_size).astype(np.int32) + chunks.append(pa.DictionaryArray.from_arrays(pa.array(indices), + dict_values)) + + table = pa.table([pa.chunked_array(chunks)], names=['f0']) + result = _simple_table_write_read(table) + + assert result.equals(table) + + # The only key in the metadata was the Arrow schema key + assert result.schema.metadata is None + + +# Decimal +# ----------------------------------------------------------------------------- + + +@pytest.mark.pandas +def test_decimal_roundtrip(tempdir): + num_values = 10 + + columns = {} + for precision in range(1, 39): + for scale in range(0, precision + 1): + with util.random_seed(0): + random_decimal_values = [ + util.randdecimal(precision, scale) + for _ in range(num_values) + ] + column_name = ('dec_precision_{:d}_scale_{:d}' + .format(precision, scale)) + columns[column_name] = random_decimal_values + + expected = pd.DataFrame(columns) + filename = tempdir / 'decimals.parquet' + string_filename = str(filename) + table = pa.Table.from_pandas(expected) + _write_table(table, string_filename) + result_table = _read_table(string_filename) + result = result_table.to_pandas() + tm.assert_frame_equal(result, expected) + + +@pytest.mark.pandas +@pytest.mark.xfail( + raises=OSError, reason='Parquet does not support negative scale' +) +def test_decimal_roundtrip_negative_scale(tempdir): + expected = pd.DataFrame({'decimal_num': [decimal.Decimal('1.23E4')]}) + filename = tempdir / 'decimals.parquet' + string_filename = str(filename) + t = pa.Table.from_pandas(expected) + _write_table(t, string_filename) + result_table = _read_table(string_filename) + result = result_table.to_pandas() + tm.assert_frame_equal(result, expected) + + +# List types +# ----------------------------------------------------------------------------- + + +@pytest.mark.parametrize('dtype', [int, float]) +def test_single_pylist_column_roundtrip(tempdir, dtype,): + filename = tempdir / 'single_{}_column.parquet'.format(dtype.__name__) + data = [pa.array(list(map(dtype, range(5))))] + table = pa.Table.from_arrays(data, names=['a']) + _write_table(table, filename) + table_read = _read_table(filename) + for i in range(table.num_columns): + col_written = table[i] + col_read = table_read[i] + assert table.field(i).name == table_read.field(i).name + assert col_read.num_chunks == 1 + data_written = col_written.chunk(0) + data_read = col_read.chunk(0) + assert data_written.equals(data_read) + + +def test_empty_lists_table_roundtrip(): + # ARROW-2744: Shouldn't crash when writing an array of empty lists + arr = pa.array([[], []], type=pa.list_(pa.int32())) + table = pa.Table.from_arrays([arr], ["A"]) + _check_roundtrip(table) + + +def test_nested_list_nonnullable_roundtrip_bug(): + # Reproduce failure in ARROW-5630 + typ = pa.list_(pa.field("item", pa.float32(), False)) + num_rows = 10000 + t = pa.table([ + pa.array(([[0] * ((i + 5) % 10) for i in range(0, 10)] * + (num_rows // 10)), type=typ) + ], ['a']) + _check_roundtrip( + t, data_page_size=4096) + + +def test_nested_list_struct_multiple_batches_roundtrip(tempdir): + # Reproduce failure in ARROW-11024 + data = [[{'x': 'abc', 'y': 'abc'}]]*100 + [[{'x': 'abc', 'y': 'gcb'}]]*100 + table = pa.table([pa.array(data)], names=['column']) + _check_roundtrip( + table, row_group_size=20) + + # Reproduce failure in ARROW-11069 (plain non-nested structs with strings) + data = pa.array( + [{'a': '1', 'b': '2'}, {'a': '3', 'b': '4'}, {'a': '5', 'b': '6'}]*10 + ) + table = pa.table({'column': data}) + _check_roundtrip(table, row_group_size=10) + + +def test_writing_empty_lists(): + # ARROW-2591: [Python] Segmentation fault issue in pq.write_table + arr1 = pa.array([[], []], pa.list_(pa.int32())) + table = pa.Table.from_arrays([arr1], ['list(int32)']) + _check_roundtrip(table) + + +@pytest.mark.pandas +def test_column_of_arrays(tempdir): + df, schema = dataframe_with_arrays() + + filename = tempdir / 'pandas_roundtrip.parquet' + arrow_table = pa.Table.from_pandas(df, schema=schema) + _write_table(arrow_table, filename, version='2.6', coerce_timestamps='ms') + table_read = _read_table(filename) + df_read = table_read.to_pandas() + tm.assert_frame_equal(df, df_read) + + +@pytest.mark.pandas +def test_column_of_lists(tempdir): + df, schema = dataframe_with_lists(parquet_compatible=True) + + filename = tempdir / 'pandas_roundtrip.parquet' + arrow_table = pa.Table.from_pandas(df, schema=schema) + _write_table(arrow_table, filename, version='2.6') + table_read = _read_table(filename) + df_read = table_read.to_pandas() + + tm.assert_frame_equal(df, df_read) + + +def test_large_list_records(): + # This was fixed in PARQUET-1100 + + list_lengths = np.random.randint(0, 500, size=50) + list_lengths[::10] = 0 + + list_values = [list(map(int, np.random.randint(0, 100, size=x))) + if i % 8 else None + for i, x in enumerate(list_lengths)] + + a1 = pa.array(list_values) + + table = pa.Table.from_arrays([a1], ['int_lists']) + _check_roundtrip(table) + + +@pytest.mark.pandas +def test_parquet_nested_convenience(tempdir): + # ARROW-1684 + df = pd.DataFrame({ + 'a': [[1, 2, 3], None, [4, 5], []], + 'b': [[1.], None, None, [6., 7.]], + }) + + path = str(tempdir / 'nested_convenience.parquet') + + table = pa.Table.from_pandas(df, preserve_index=False) + _write_table(table, path) + + read = pq.read_table( + path, columns=['a']) + tm.assert_frame_equal(read.to_pandas(), df[['a']]) + + read = pq.read_table( + path, columns=['a', 'b']) + tm.assert_frame_equal(read.to_pandas(), df) + + +# Binary +# ----------------------------------------------------------------------------- + + +def test_fixed_size_binary(): + t0 = pa.binary(10) + data = [b'fooooooooo', None, b'barooooooo', b'quxooooooo'] + a0 = pa.array(data, type=t0) + + table = pa.Table.from_arrays([a0], + ['binary[10]']) + _check_roundtrip(table) + + +# Large types +# ----------------------------------------------------------------------------- + + +@pytest.mark.slow +@pytest.mark.large_memory +def test_large_table_int32_overflow(): + size = np.iinfo('int32').max + 1 + + arr = np.ones(size, dtype='uint8') + + parr = pa.array(arr, type=pa.uint8()) + + table = pa.Table.from_arrays([parr], names=['one']) + f = io.BytesIO() + _write_table(table, f) + + +def _simple_table_roundtrip(table, **write_kwargs): + stream = pa.BufferOutputStream() + _write_table(table, stream, **write_kwargs) + buf = stream.getvalue() + return _read_table(buf) + + +@pytest.mark.slow +@pytest.mark.large_memory +def test_byte_array_exactly_2gb(): + # Test edge case reported in ARROW-3762 + val = b'x' * (1 << 10) + + base = pa.array([val] * ((1 << 21) - 1)) + cases = [ + [b'x' * 1023], # 2^31 - 1 + [b'x' * 1024], # 2^31 + [b'x' * 1025] # 2^31 + 1 + ] + for case in cases: + values = pa.chunked_array([base, pa.array(case)]) + t = pa.table([values], names=['f0']) + result = _simple_table_roundtrip( + t, use_dictionary=False) + assert t.equals(result) + + +@pytest.mark.slow +@pytest.mark.pandas +@pytest.mark.large_memory +def test_binary_array_overflow_to_chunked(): + # ARROW-3762 + + # 2^31 + 1 bytes + values = [b'x'] + [ + b'x' * (1 << 20) + ] * 2 * (1 << 10) + df = pd.DataFrame({'byte_col': values}) + + tbl = pa.Table.from_pandas(df, preserve_index=False) + read_tbl = _simple_table_roundtrip(tbl) + + col0_data = read_tbl[0] + assert isinstance(col0_data, pa.ChunkedArray) + + # Split up into 2GB chunks + assert col0_data.num_chunks == 2 + + assert tbl.equals(read_tbl) + + +@pytest.mark.slow +@pytest.mark.pandas +@pytest.mark.large_memory +def test_list_of_binary_large_cell(): + # ARROW-4688 + data = [] + + # TODO(wesm): handle chunked children + # 2^31 - 1 bytes in a single cell + # data.append([b'x' * (1 << 20)] * 2047 + [b'x' * ((1 << 20) - 1)]) + + # A little under 2GB in cell each containing approximately 10MB each + data.extend([[b'x' * 1000000] * 10] * 214) + + arr = pa.array(data) + table = pa.Table.from_arrays([arr], ['chunky_cells']) + read_table = _simple_table_roundtrip(table) + assert table.equals(read_table) + + +def test_large_binary(): + data = [b'foo', b'bar'] * 50 + for type in [pa.large_binary(), pa.large_string()]: + arr = pa.array(data, type=type) + table = pa.Table.from_arrays([arr], names=['strs']) + for use_dictionary in [False, True]: + _check_roundtrip(table, use_dictionary=use_dictionary) + + +@pytest.mark.slow +@pytest.mark.large_memory +def test_large_binary_huge(): + s = b'xy' * 997 + data = [s] * ((1 << 33) // len(s)) + for type in [pa.large_binary(), pa.large_string()]: + arr = pa.array(data, type=type) + table = pa.Table.from_arrays([arr], names=['strs']) + for use_dictionary in [False, True]: + _check_roundtrip(table, use_dictionary=use_dictionary) + del arr, table + + +@pytest.mark.large_memory +def test_large_binary_overflow(): + s = b'x' * (1 << 31) + arr = pa.array([s], type=pa.large_binary()) + table = pa.Table.from_arrays([arr], names=['strs']) + for use_dictionary in [False, True]: + writer = pa.BufferOutputStream() + with pytest.raises( + pa.ArrowInvalid, + match="Parquet cannot store strings with size 2GB or more"): + _write_table(table, writer, use_dictionary=use_dictionary) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/test_dataset.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/test_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..b6e351bdef9a78b7c9f85d6ae6cacd7bd6cefb19 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/test_dataset.py @@ -0,0 +1,1340 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import datetime +import inspect +import os +import pathlib + +import numpy as np +import pytest +import unittest.mock as mock + +import pyarrow as pa +import pyarrow.compute as pc +from pyarrow import fs +from pyarrow.filesystem import LocalFileSystem +from pyarrow.tests import util +from pyarrow.util import guid +from pyarrow.vendored.version import Version + +try: + import pyarrow.parquet as pq + from pyarrow.tests.parquet.common import ( + _read_table, _test_dataframe, _write_table) +except ImportError: + pq = None + + +try: + import pandas as pd + import pandas.testing as tm + +except ImportError: + pd = tm = None + + +# Marks all of the tests in this module +# Ignore these with pytest ... -m 'not parquet' +pytestmark = [pytest.mark.parquet, pytest.mark.dataset] + + +def test_filesystem_uri(tempdir): + table = pa.table({"a": [1, 2, 3]}) + + directory = tempdir / "data_dir" + directory.mkdir() + path = directory / "data.parquet" + pq.write_table(table, str(path)) + + # filesystem object + result = pq.read_table( + path, filesystem=fs.LocalFileSystem()) + assert result.equals(table) + + # filesystem URI + result = pq.read_table( + "data_dir/data.parquet", filesystem=util._filesystem_uri(tempdir)) + assert result.equals(table) + + +@pytest.mark.pandas +def test_read_partitioned_directory(tempdir): + fs = LocalFileSystem._get_instance() + _partition_test_for_filesystem(fs, tempdir) + + +@pytest.mark.pandas +def test_read_partitioned_columns_selection(tempdir): + # ARROW-3861 - do not include partition columns in resulting table when + # `columns` keyword was passed without those columns + fs = LocalFileSystem._get_instance() + base_path = tempdir + _partition_test_for_filesystem(fs, base_path) + + dataset = pq.ParquetDataset(base_path) + result = dataset.read(columns=["values"]) + assert result.column_names == ["values"] + + +@pytest.mark.pandas +def test_filters_equivalency(tempdir): + fs = LocalFileSystem._get_instance() + base_path = tempdir + + integer_keys = [0, 1] + string_keys = ['a', 'b', 'c'] + boolean_keys = [True, False] + partition_spec = [ + ['integer', integer_keys], + ['string', string_keys], + ['boolean', boolean_keys] + ] + + df = pd.DataFrame({ + 'integer': np.array(integer_keys, dtype='i4').repeat(15), + 'string': np.tile(np.tile(np.array(string_keys, dtype=object), 5), 2), + 'boolean': np.tile(np.tile(np.array(boolean_keys, dtype='bool'), 5), + 3), + }, columns=['integer', 'string', 'boolean']) + + _generate_partition_directories(fs, base_path, partition_spec, df) + + # Old filters syntax: + # integer == 1 AND string != b AND boolean == True + dataset = pq.ParquetDataset( + base_path, filesystem=fs, + filters=[('integer', '=', 1), ('string', '!=', 'b'), + ('boolean', '==', 'True')], + ) + table = dataset.read() + result_df = (table.to_pandas().reset_index(drop=True)) + + assert 0 not in result_df['integer'].values + assert 'b' not in result_df['string'].values + assert False not in result_df['boolean'].values + + # filters in disjunctive normal form: + # (integer == 1 AND string != b AND boolean == True) OR + # (integer == 2 AND boolean == False) + # TODO(ARROW-3388): boolean columns are reconstructed as string + filters = [ + [ + ('integer', '=', 1), + ('string', '!=', 'b'), + ('boolean', '==', 'True') + ], + [('integer', '=', 0), ('boolean', '==', 'False')] + ] + dataset = pq.ParquetDataset( + base_path, filesystem=fs, filters=filters) + table = dataset.read() + result_df = table.to_pandas().reset_index(drop=True) + + # Check that all rows in the DF fulfill the filter + df_filter_1 = (result_df['integer'] == 1) \ + & (result_df['string'] != 'b') \ + & (result_df['boolean'] == 'True') + df_filter_2 = (np.array(result_df['integer']) == 0) \ + & (result_df['boolean'] == 'False') + assert df_filter_1.sum() > 0 + assert df_filter_2.sum() > 0 + assert result_df.shape[0] == (df_filter_1.sum() + df_filter_2.sum()) + + for filters in [[[('string', '==', b'1\0a')]], + [[('string', '==', '1\0a')]]]: + dataset = pq.ParquetDataset( + base_path, filesystem=fs, filters=filters) + assert dataset.read().num_rows == 0 + + +@pytest.mark.pandas +def test_filters_cutoff_exclusive_integer(tempdir): + fs = LocalFileSystem._get_instance() + base_path = tempdir + + integer_keys = [0, 1, 2, 3, 4] + partition_spec = [ + ['integers', integer_keys], + ] + N = 5 + + df = pd.DataFrame({ + 'index': np.arange(N), + 'integers': np.array(integer_keys, dtype='i4'), + }, columns=['index', 'integers']) + + _generate_partition_directories(fs, base_path, partition_spec, df) + + dataset = pq.ParquetDataset( + base_path, filesystem=fs, + filters=[ + ('integers', '<', 4), + ('integers', '>', 1), + ], + ) + table = dataset.read() + result_df = (table.to_pandas() + .sort_values(by='index') + .reset_index(drop=True)) + + result_list = [x for x in map(int, result_df['integers'].values)] + assert result_list == [2, 3] + + +@pytest.mark.xfail( + # different error with use_legacy_datasets because result_df is no longer + # categorical + raises=(TypeError, AssertionError), + reason='Loss of type information in creation of categoricals.' +) +@pytest.mark.pandas +def test_filters_cutoff_exclusive_datetime(tempdir): + fs = LocalFileSystem._get_instance() + base_path = tempdir + + date_keys = [ + datetime.date(2018, 4, 9), + datetime.date(2018, 4, 10), + datetime.date(2018, 4, 11), + datetime.date(2018, 4, 12), + datetime.date(2018, 4, 13) + ] + partition_spec = [ + ['dates', date_keys] + ] + N = 5 + + df = pd.DataFrame({ + 'index': np.arange(N), + 'dates': np.array(date_keys, dtype='datetime64'), + }, columns=['index', 'dates']) + + _generate_partition_directories(fs, base_path, partition_spec, df) + + dataset = pq.ParquetDataset( + base_path, filesystem=fs, + filters=[ + ('dates', '<', "2018-04-12"), + ('dates', '>', "2018-04-10") + ], + ) + table = dataset.read() + result_df = (table.to_pandas() + .sort_values(by='index') + .reset_index(drop=True)) + + expected = pd.Categorical( + np.array([datetime.date(2018, 4, 11)], dtype='datetime64'), + categories=np.array(date_keys, dtype='datetime64')) + + assert result_df['dates'].values == expected + + +@pytest.mark.pandas +def test_filters_inclusive_datetime(tempdir): + # ARROW-11480 + path = tempdir / 'timestamps.parquet' + + pd.DataFrame({ + "dates": pd.date_range("2020-01-01", periods=10, freq="D"), + "id": range(10) + }).to_parquet(path, use_deprecated_int96_timestamps=True) + + table = pq.read_table(path, filters=[ + ("dates", "<=", datetime.datetime(2020, 1, 5)) + ]) + + assert table.column('id').to_pylist() == [0, 1, 2, 3, 4] + + +@pytest.mark.pandas +def test_filters_inclusive_integer(tempdir): + fs = LocalFileSystem._get_instance() + base_path = tempdir + + integer_keys = [0, 1, 2, 3, 4] + partition_spec = [ + ['integers', integer_keys], + ] + N = 5 + + df = pd.DataFrame({ + 'index': np.arange(N), + 'integers': np.array(integer_keys, dtype='i4'), + }, columns=['index', 'integers']) + + _generate_partition_directories(fs, base_path, partition_spec, df) + + dataset = pq.ParquetDataset( + base_path, filesystem=fs, + filters=[ + ('integers', '<=', 3), + ('integers', '>=', 2), + ], + ) + table = dataset.read() + result_df = (table.to_pandas() + .sort_values(by='index') + .reset_index(drop=True)) + + result_list = [int(x) for x in map(int, result_df['integers'].values)] + assert result_list == [2, 3] + + +@pytest.mark.pandas +def test_filters_inclusive_set(tempdir): + fs = LocalFileSystem._get_instance() + base_path = tempdir + + integer_keys = [0, 1] + string_keys = ['a', 'b', 'c'] + boolean_keys = [True, False] + partition_spec = [ + ['integer', integer_keys], + ['string', string_keys], + ['boolean', boolean_keys] + ] + + df = pd.DataFrame({ + 'integer': np.array(integer_keys, dtype='i4').repeat(15), + 'string': np.tile(np.tile(np.array(string_keys, dtype=object), 5), 2), + 'boolean': np.tile(np.tile(np.array(boolean_keys, dtype='bool'), 5), + 3), + }, columns=['integer', 'string', 'boolean']) + + _generate_partition_directories(fs, base_path, partition_spec, df) + + dataset = pq.ParquetDataset( + base_path, filesystem=fs, + filters=[('string', 'in', 'ab')], + ) + table = dataset.read() + result_df = (table.to_pandas().reset_index(drop=True)) + + assert 'a' in result_df['string'].values + assert 'b' in result_df['string'].values + assert 'c' not in result_df['string'].values + + dataset = pq.ParquetDataset( + base_path, filesystem=fs, + filters=[('integer', 'in', [1]), ('string', 'in', ('a', 'b')), + ('boolean', 'not in', {'False'})], + ) + table = dataset.read() + result_df = (table.to_pandas().reset_index(drop=True)) + + assert 0 not in result_df['integer'].values + assert 'c' not in result_df['string'].values + assert False not in result_df['boolean'].values + + +@pytest.mark.pandas +def test_filters_invalid_pred_op(tempdir): + fs = LocalFileSystem._get_instance() + base_path = tempdir + + integer_keys = [0, 1, 2, 3, 4] + partition_spec = [ + ['integers', integer_keys], + ] + N = 5 + + df = pd.DataFrame({ + 'index': np.arange(N), + 'integers': np.array(integer_keys, dtype='i4'), + }, columns=['index', 'integers']) + + _generate_partition_directories(fs, base_path, partition_spec, df) + + with pytest.raises(TypeError): + pq.ParquetDataset(base_path, + filesystem=fs, + filters=[('integers', 'in', 3), ]) + + with pytest.raises(ValueError): + pq.ParquetDataset(base_path, + filesystem=fs, + filters=[('integers', '=<', 3), ]) + + # Dataset API returns empty table + dataset = pq.ParquetDataset(base_path, + filesystem=fs, + filters=[('integers', 'in', set()), ]) + assert dataset.read().num_rows == 0 + + dataset = pq.ParquetDataset(base_path, + filesystem=fs, + filters=[('integers', '!=', {3})]) + with pytest.raises(NotImplementedError): + assert dataset.read().num_rows == 0 + + +@pytest.mark.pandas +def test_filters_invalid_column(tempdir): + # ARROW-5572 - raise error on invalid name in filter specification + # works with new dataset + fs = LocalFileSystem._get_instance() + base_path = tempdir + + integer_keys = [0, 1, 2, 3, 4] + partition_spec = [['integers', integer_keys]] + N = 5 + + df = pd.DataFrame({ + 'index': np.arange(N), + 'integers': np.array(integer_keys, dtype='i4'), + }, columns=['index', 'integers']) + + _generate_partition_directories(fs, base_path, partition_spec, df) + + msg = r"No match for FieldRef.Name\(non_existent_column\)" + with pytest.raises(ValueError, match=msg): + pq.ParquetDataset(base_path, filesystem=fs, + filters=[('non_existent_column', '<', 3), ]).read() + + +@pytest.mark.pandas +@pytest.mark.parametrize("filters", + ([('integers', '<', 3)], + [[('integers', '<', 3)]], + pc.field('integers') < 3, + pc.field('nested', 'a') < 3, + pc.field('nested', 'b').cast(pa.int64()) < 3)) +@pytest.mark.parametrize("read_method", ("read_table", "read_pandas")) +def test_filters_read_table(tempdir, filters, read_method): + read = getattr(pq, read_method) + # test that filters keyword is passed through in read_table + fs = LocalFileSystem._get_instance() + base_path = tempdir + + integer_keys = [0, 1, 2, 3, 4] + partition_spec = [ + ['integers', integer_keys], + ] + N = len(integer_keys) + + df = pd.DataFrame({ + 'index': np.arange(N), + 'integers': np.array(integer_keys, dtype='i4'), + 'nested': np.array([{'a': i, 'b': str(i)} for i in range(N)]) + }) + + _generate_partition_directories(fs, base_path, partition_spec, df) + + kwargs = dict(filesystem=fs, filters=filters) + + table = read(base_path, **kwargs) + assert table.num_rows == 3 + + +@pytest.mark.pandas +def test_partition_keys_with_underscores(tempdir): + # ARROW-5666 - partition field values with underscores preserve underscores + fs = LocalFileSystem._get_instance() + base_path = tempdir + + string_keys = ["2019_2", "2019_3"] + partition_spec = [ + ['year_week', string_keys], + ] + N = 2 + + df = pd.DataFrame({ + 'index': np.arange(N), + 'year_week': np.array(string_keys, dtype='object'), + }, columns=['index', 'year_week']) + + _generate_partition_directories(fs, base_path, partition_spec, df) + + dataset = pq.ParquetDataset(base_path) + result = dataset.read() + assert result.column("year_week").to_pylist() == string_keys + + +@pytest.mark.s3 +def test_read_s3fs(s3_example_s3fs, ): + fs, path = s3_example_s3fs + path = path + "/test.parquet" + table = pa.table({"a": [1, 2, 3]}) + _write_table(table, path, filesystem=fs) + + result = _read_table(path, filesystem=fs) + assert result.equals(table) + + +@pytest.mark.s3 +def test_read_directory_s3fs(s3_example_s3fs): + fs, directory = s3_example_s3fs + path = directory + "/test.parquet" + table = pa.table({"a": [1, 2, 3]}) + _write_table(table, path, filesystem=fs) + + result = _read_table(directory, filesystem=fs) + assert result.equals(table) + + +@pytest.mark.pandas +def test_read_single_file_list(tempdir): + data_path = str(tempdir / 'data.parquet') + + table = pa.table({"a": [1, 2, 3]}) + _write_table(table, data_path) + + result = pq.ParquetDataset([data_path]).read() + assert result.equals(table) + + +@pytest.mark.pandas +@pytest.mark.s3 +def test_read_partitioned_directory_s3fs_wrapper(s3_example_s3fs): + import s3fs + + from pyarrow.filesystem import S3FSWrapper + + if Version(s3fs.__version__) >= Version("0.5"): + pytest.skip("S3FSWrapper no longer working for s3fs 0.5+") + + fs, path = s3_example_s3fs + with pytest.warns(FutureWarning): + wrapper = S3FSWrapper(fs) + _partition_test_for_filesystem(wrapper, path) + + # Check that we can auto-wrap + dataset = pq.ParquetDataset(path, filesystem=fs) + dataset.read() + + +@pytest.mark.pandas +@pytest.mark.s3 +def test_read_partitioned_directory_s3fs(s3_example_s3fs): + fs, path = s3_example_s3fs + _partition_test_for_filesystem(fs, path) + + +def _partition_test_for_filesystem(fs, base_path): + foo_keys = [0, 1] + bar_keys = ['a', 'b', 'c'] + partition_spec = [ + ['foo', foo_keys], + ['bar', bar_keys] + ] + N = 30 + + df = pd.DataFrame({ + 'index': np.arange(N), + 'foo': np.array(foo_keys, dtype='i4').repeat(15), + 'bar': np.tile(np.tile(np.array(bar_keys, dtype=object), 5), 2), + 'values': np.random.randn(N) + }, columns=['index', 'foo', 'bar', 'values']) + + _generate_partition_directories(fs, base_path, partition_spec, df) + + dataset = pq.ParquetDataset(base_path, filesystem=fs) + table = dataset.read() + result_df = (table.to_pandas() + .sort_values(by='index') + .reset_index(drop=True)) + + expected_df = (df.sort_values(by='index') + .reset_index(drop=True) + .reindex(columns=result_df.columns)) + + # With pandas 2.0.0 Index can store all numeric dtypes (not just + # int64/uint64/float64). Using astype() to create a categorical + # column preserves original dtype (int32) + expected_df['foo'] = expected_df['foo'].astype("category") + expected_df['bar'] = expected_df['bar'].astype("category") + + assert (result_df.columns == ['index', 'values', 'foo', 'bar']).all() + + tm.assert_frame_equal(result_df, expected_df) + + +def _generate_partition_directories(fs, base_dir, partition_spec, df): + # partition_spec : list of lists, e.g. [['foo', [0, 1, 2], + # ['bar', ['a', 'b', 'c']] + # part_table : a pyarrow.Table to write to each partition + DEPTH = len(partition_spec) + + pathsep = getattr(fs, "pathsep", getattr(fs, "sep", "/")) + + def _visit_level(base_dir, level, part_keys): + name, values = partition_spec[level] + for value in values: + this_part_keys = part_keys + [(name, value)] + + level_dir = pathsep.join([ + str(base_dir), + '{}={}'.format(name, value) + ]) + fs.mkdir(level_dir) + + if level == DEPTH - 1: + # Generate example data + file_path = pathsep.join([level_dir, guid()]) + filtered_df = _filter_partition(df, this_part_keys) + part_table = pa.Table.from_pandas(filtered_df) + with fs.open(file_path, 'wb') as f: + _write_table(part_table, f) + assert fs.exists(file_path) + + file_success = pathsep.join([level_dir, '_SUCCESS']) + with fs.open(file_success, 'wb') as f: + pass + else: + _visit_level(level_dir, level + 1, this_part_keys) + file_success = pathsep.join([level_dir, '_SUCCESS']) + with fs.open(file_success, 'wb') as f: + pass + + _visit_level(base_dir, 0, []) + + +def _filter_partition(df, part_keys): + predicate = np.ones(len(df), dtype=bool) + + to_drop = [] + for name, value in part_keys: + to_drop.append(name) + + # to avoid pandas warning + if isinstance(value, (datetime.date, datetime.datetime)): + value = pd.Timestamp(value) + + predicate &= df[name] == value + + return df[predicate].drop(to_drop, axis=1) + + +@pytest.mark.pandas +def test_filter_before_validate_schema(tempdir): + # ARROW-4076 apply filter before schema validation + # to avoid checking unneeded schemas + + # create partitioned dataset with mismatching schemas which would + # otherwise raise if first validation all schemas + dir1 = tempdir / 'A=0' + dir1.mkdir() + table1 = pa.Table.from_pandas(pd.DataFrame({'B': [1, 2, 3]})) + pq.write_table(table1, dir1 / 'data.parquet') + + dir2 = tempdir / 'A=1' + dir2.mkdir() + table2 = pa.Table.from_pandas(pd.DataFrame({'B': ['a', 'b', 'c']})) + pq.write_table(table2, dir2 / 'data.parquet') + + # read single file using filter + table = pq.read_table(tempdir, filters=[[('A', '==', 0)]]) + assert table.column('B').equals(pa.chunked_array([[1, 2, 3]])) + + +@pytest.mark.pandas +def test_read_multiple_files(tempdir): + nfiles = 10 + size = 5 + + dirpath = tempdir / guid() + dirpath.mkdir() + + test_data = [] + paths = [] + for i in range(nfiles): + df = _test_dataframe(size, seed=i) + + # Hack so that we don't have a dtype cast in v1 files + df['uint32'] = df['uint32'].astype(np.int64) + + path = dirpath / '{}.parquet'.format(i) + + table = pa.Table.from_pandas(df) + _write_table(table, path) + + test_data.append(table) + paths.append(path) + + # Write a _SUCCESS.crc file + (dirpath / '_SUCCESS.crc').touch() + + def read_multiple_files(paths, columns=None, use_threads=True, **kwargs): + dataset = pq.ParquetDataset(paths, **kwargs) + return dataset.read(columns=columns, use_threads=use_threads) + + result = read_multiple_files(paths) + expected = pa.concat_tables(test_data) + + assert result.equals(expected) + + # Read column subset + to_read = [0, 2, 6, result.num_columns - 1] + + col_names = [result.field(i).name for i in to_read] + out = pq.read_table(dirpath, columns=col_names) + expected = pa.Table.from_arrays([result.column(i) for i in to_read], + names=col_names, + metadata=result.schema.metadata) + assert out.equals(expected) + + # Read with multiple threads + pq.read_table(dirpath, use_threads=True) + + # Test failure modes with non-uniform metadata + bad_apple = _test_dataframe(size, seed=i).iloc[:, :4] + bad_apple_path = tempdir / '{}.parquet'.format(guid()) + + t = pa.Table.from_pandas(bad_apple) + _write_table(t, bad_apple_path) + + # TODO(dataset) Dataset API skips bad files + + # bad_meta = pq.read_metadata(bad_apple_path) + + # with pytest.raises(ValueError): + # read_multiple_files(paths + [bad_apple_path]) + + # with pytest.raises(ValueError): + # read_multiple_files(paths, metadata=bad_meta) + + # mixed_paths = [bad_apple_path, paths[0]] + + # with pytest.raises(ValueError): + # read_multiple_files(mixed_paths) + + +@pytest.mark.pandas +def test_dataset_read_pandas(tempdir): + nfiles = 5 + size = 5 + + dirpath = tempdir / guid() + dirpath.mkdir() + + test_data = [] + frames = [] + paths = [] + for i in range(nfiles): + df = _test_dataframe(size, seed=i) + df.index = np.arange(i * size, (i + 1) * size) + df.index.name = 'index' + + path = dirpath / '{}.parquet'.format(i) + + table = pa.Table.from_pandas(df) + _write_table(table, path) + test_data.append(table) + frames.append(df) + paths.append(path) + + dataset = pq.ParquetDataset(dirpath) + columns = ['uint8', 'strings'] + result = dataset.read_pandas(columns=columns).to_pandas() + expected = pd.concat([x[columns] for x in frames]) + + tm.assert_frame_equal(result, expected) + + # also be able to pass the columns as a set (ARROW-12314) + result = dataset.read_pandas(columns=set(columns)).to_pandas() + assert result.shape == expected.shape + # column order can be different because of using a set + tm.assert_frame_equal(result.reindex(columns=expected.columns), expected) + + +@pytest.mark.pandas +def test_dataset_memory_map(tempdir): + # ARROW-2627: Check that we can use ParquetDataset with memory-mapping + dirpath = tempdir / guid() + dirpath.mkdir() + + df = _test_dataframe(10, seed=0) + path = dirpath / '{}.parquet'.format(0) + table = pa.Table.from_pandas(df) + _write_table(table, path, version='2.6') + + dataset = pq.ParquetDataset( + dirpath, memory_map=True) + assert dataset.read().equals(table) + + +@pytest.mark.pandas +def test_dataset_enable_buffered_stream(tempdir): + dirpath = tempdir / guid() + dirpath.mkdir() + + df = _test_dataframe(10, seed=0) + path = dirpath / '{}.parquet'.format(0) + table = pa.Table.from_pandas(df) + _write_table(table, path, version='2.6') + + with pytest.raises(ValueError): + pq.ParquetDataset( + dirpath, buffer_size=-64) + + for buffer_size in [128, 1024]: + dataset = pq.ParquetDataset( + dirpath, buffer_size=buffer_size) + assert dataset.read().equals(table) + + +@pytest.mark.pandas +def test_dataset_enable_pre_buffer(tempdir): + dirpath = tempdir / guid() + dirpath.mkdir() + + df = _test_dataframe(10, seed=0) + path = dirpath / '{}.parquet'.format(0) + table = pa.Table.from_pandas(df) + _write_table(table, path, version='2.6') + + for pre_buffer in (True, False): + dataset = pq.ParquetDataset( + dirpath, pre_buffer=pre_buffer) + assert dataset.read().equals(table) + actual = pq.read_table(dirpath, pre_buffer=pre_buffer) + assert actual.equals(table) + + +def _make_example_multifile_dataset(base_path, nfiles=10, file_nrows=5): + test_data = [] + paths = [] + for i in range(nfiles): + df = _test_dataframe(file_nrows, seed=i) + path = base_path / '{}.parquet'.format(i) + + test_data.append(_write_table(df, path)) + paths.append(path) + return paths + + +def _assert_dataset_paths(dataset, paths): + paths = [str(path.as_posix()) for path in paths] + assert set(paths) == set(dataset.files) + + +@pytest.mark.pandas +@pytest.mark.parametrize('dir_prefix', ['_', '.']) +def test_ignore_private_directories(tempdir, dir_prefix): + dirpath = tempdir / guid() + dirpath.mkdir() + + paths = _make_example_multifile_dataset(dirpath, nfiles=10, + file_nrows=5) + + # private directory + (dirpath / '{}staging'.format(dir_prefix)).mkdir() + + dataset = pq.ParquetDataset(dirpath) + + _assert_dataset_paths(dataset, paths) + + +@pytest.mark.pandas +def test_ignore_hidden_files_dot(tempdir): + dirpath = tempdir / guid() + dirpath.mkdir() + + paths = _make_example_multifile_dataset(dirpath, nfiles=10, + file_nrows=5) + + with (dirpath / '.DS_Store').open('wb') as f: + f.write(b'gibberish') + + with (dirpath / '.private').open('wb') as f: + f.write(b'gibberish') + + dataset = pq.ParquetDataset(dirpath) + + _assert_dataset_paths(dataset, paths) + + +@pytest.mark.pandas +def test_ignore_hidden_files_underscore(tempdir): + dirpath = tempdir / guid() + dirpath.mkdir() + + paths = _make_example_multifile_dataset(dirpath, nfiles=10, + file_nrows=5) + + with (dirpath / '_committed_123').open('wb') as f: + f.write(b'abcd') + + with (dirpath / '_started_321').open('wb') as f: + f.write(b'abcd') + + dataset = pq.ParquetDataset(dirpath) + + _assert_dataset_paths(dataset, paths) + + +@pytest.mark.pandas +@pytest.mark.parametrize('dir_prefix', ['_', '.']) +def test_ignore_no_private_directories_in_base_path(tempdir, dir_prefix): + # ARROW-8427 - don't ignore explicitly listed files if parent directory + # is a private directory + dirpath = tempdir / "{0}data".format(dir_prefix) / guid() + dirpath.mkdir(parents=True) + + paths = _make_example_multifile_dataset(dirpath, nfiles=10, + file_nrows=5) + + dataset = pq.ParquetDataset(paths) + _assert_dataset_paths(dataset, paths) + + # ARROW-9644 - don't ignore full directory with underscore in base path + dataset = pq.ParquetDataset(dirpath) + _assert_dataset_paths(dataset, paths) + + +def test_ignore_custom_prefixes(tempdir): + # ARROW-9573 - allow override of default ignore_prefixes + part = ["xxx"] * 3 + ["yyy"] * 3 + table = pa.table([ + pa.array(range(len(part))), + pa.array(part).dictionary_encode(), + ], names=['index', '_part']) + + pq.write_to_dataset(table, str(tempdir), partition_cols=['_part']) + + private_duplicate = tempdir / '_private_duplicate' + private_duplicate.mkdir() + pq.write_to_dataset(table, str(private_duplicate), + partition_cols=['_part']) + + read = pq.read_table( + tempdir, ignore_prefixes=['_private']) + + assert read.equals(table) + + +def test_empty_directory(tempdir): + # ARROW-5310 + empty_dir = tempdir / 'dataset' + empty_dir.mkdir() + + dataset = pq.ParquetDataset(empty_dir) + result = dataset.read() + assert result.num_rows == 0 + assert result.num_columns == 0 + + +def _test_write_to_dataset_with_partitions(base_path, + filesystem=None, + schema=None, + index_name=None): + import pandas as pd + import pandas.testing as tm + + import pyarrow.parquet as pq + + # ARROW-1400 + output_df = pd.DataFrame({ + 'group1': list('aaabbbbccc'), + 'group2': list('eefeffgeee'), + 'num': list(range(10)), + 'nan': [np.nan] * 10, + 'date': np.arange('2017-01-01', '2017-01-11', dtype='datetime64[D]').astype( + 'datetime64[ns]') + }) + cols = output_df.columns.tolist() + partition_by = ['group1', 'group2'] + output_table = pa.Table.from_pandas(output_df, schema=schema, safe=False, + preserve_index=False) + pq.write_to_dataset(output_table, base_path, partition_by, + filesystem=filesystem) + + metadata_path = os.path.join(str(base_path), '_common_metadata') + + if filesystem is not None: + with filesystem.open(metadata_path, 'wb') as f: + pq.write_metadata(output_table.schema, f) + else: + pq.write_metadata(output_table.schema, metadata_path) + + dataset = pq.ParquetDataset(base_path, + filesystem=filesystem) + # ARROW-2209: Ensure the dataset schema also includes the partition columns + # NB schema property is an arrow and not parquet schema + dataset_cols = set(dataset.schema.names) + + assert dataset_cols == set(output_table.schema.names) + + input_table = dataset.read() + input_df = input_table.to_pandas() + + # Read data back in and compare with original DataFrame + # Partitioned columns added to the end of the DataFrame when read + input_df_cols = input_df.columns.tolist() + assert partition_by == input_df_cols[-1 * len(partition_by):] + + input_df = input_df[cols] + # Partitioned columns become 'categorical' dtypes + for col in partition_by: + output_df[col] = output_df[col].astype('category') + + if schema: + expected_date_type = schema.field('date').type.to_pandas_dtype() + output_df["date"] = output_df["date"].astype(expected_date_type) + + tm.assert_frame_equal(output_df, input_df) + + +def _test_write_to_dataset_no_partitions(base_path, + filesystem=None): + import pandas as pd + + import pyarrow.parquet as pq + + # ARROW-1400 + output_df = pd.DataFrame({ + 'group1': list('aaabbbbccc'), + 'group2': list('eefeffgeee'), + 'num': list(range(10)), + 'date': np.arange('2017-01-01', '2017-01-11', dtype='datetime64[D]').astype( + 'datetime64[ns]') + }) + cols = output_df.columns.tolist() + output_table = pa.Table.from_pandas(output_df) + + if filesystem is None: + filesystem = LocalFileSystem._get_instance() + + # Without partitions, append files to root_path + n = 5 + for i in range(n): + pq.write_to_dataset(output_table, base_path, + filesystem=filesystem) + output_files = [file for file in filesystem.ls(str(base_path)) + if file.endswith(".parquet")] + assert len(output_files) == n + + # Deduplicated incoming DataFrame should match + # original outgoing Dataframe + input_table = pq.ParquetDataset( + base_path, filesystem=filesystem + ).read() + input_df = input_table.to_pandas() + input_df = input_df.drop_duplicates() + input_df = input_df[cols] + tm.assert_frame_equal(output_df, input_df) + + +@pytest.mark.pandas +def test_write_to_dataset_with_partitions(tempdir): + _test_write_to_dataset_with_partitions(str(tempdir)) + + +@pytest.mark.pandas +def test_write_to_dataset_with_partitions_and_schema(tempdir): + schema = pa.schema([pa.field('group1', type=pa.string()), + pa.field('group2', type=pa.string()), + pa.field('num', type=pa.int64()), + pa.field('nan', type=pa.int32()), + pa.field('date', type=pa.timestamp(unit='us'))]) + _test_write_to_dataset_with_partitions( + str(tempdir), schema=schema) + + +@pytest.mark.pandas +def test_write_to_dataset_with_partitions_and_index_name(tempdir): + _test_write_to_dataset_with_partitions( + str(tempdir), index_name='index_name') + + +@pytest.mark.pandas +def test_write_to_dataset_no_partitions(tempdir): + _test_write_to_dataset_no_partitions(str(tempdir)) + + +@pytest.mark.pandas +def test_write_to_dataset_pathlib(tempdir): + _test_write_to_dataset_with_partitions(tempdir / "test1") + _test_write_to_dataset_no_partitions(tempdir / "test2") + + +@pytest.mark.pandas +@pytest.mark.s3 +def test_write_to_dataset_pathlib_nonlocal(tempdir, s3_example_s3fs): + # pathlib paths are only accepted for local files + fs, _ = s3_example_s3fs + + with pytest.raises(TypeError, match="path-like objects are only allowed"): + _test_write_to_dataset_with_partitions( + tempdir / "test1", filesystem=fs) + + with pytest.raises(TypeError, match="path-like objects are only allowed"): + _test_write_to_dataset_no_partitions( + tempdir / "test2", filesystem=fs) + + +@pytest.mark.pandas +@pytest.mark.s3 +def test_write_to_dataset_with_partitions_s3fs(s3_example_s3fs): + fs, path = s3_example_s3fs + + _test_write_to_dataset_with_partitions( + path, filesystem=fs) + + +@pytest.mark.pandas +@pytest.mark.s3 +def test_write_to_dataset_no_partitions_s3fs(s3_example_s3fs): + fs, path = s3_example_s3fs + + _test_write_to_dataset_no_partitions( + path, filesystem=fs) + + +@pytest.mark.pandas +def test_write_to_dataset_filesystem(tempdir): + df = pd.DataFrame({'A': [1, 2, 3]}) + table = pa.Table.from_pandas(df) + path = str(tempdir) + + pq.write_to_dataset(table, path, filesystem=fs.LocalFileSystem()) + result = pq.read_table(path) + assert result.equals(table) + + +def _make_dataset_for_pickling(tempdir, N=100): + path = tempdir / 'data.parquet' + fs = LocalFileSystem._get_instance() + + df = pd.DataFrame({ + 'index': np.arange(N), + 'values': np.random.randn(N) + }, columns=['index', 'values']) + table = pa.Table.from_pandas(df) + + num_groups = 3 + with pq.ParquetWriter(path, table.schema) as writer: + for i in range(num_groups): + writer.write_table(table) + + reader = pq.ParquetFile(path) + assert reader.metadata.num_row_groups == num_groups + + metadata_path = tempdir / '_metadata' + with fs.open(metadata_path, 'wb') as f: + pq.write_metadata(table.schema, f) + + dataset = pq.ParquetDataset( + tempdir, filesystem=fs) + + return dataset + + +@pytest.mark.pandas +def test_pickle_dataset(tempdir, pickle_module): + def is_pickleable(obj): + return obj == pickle_module.loads(pickle_module.dumps(obj)) + + dataset = _make_dataset_for_pickling(tempdir) + assert is_pickleable(dataset) + + +@pytest.mark.pandas +def test_partitioned_dataset(tempdir): + # ARROW-3208: Segmentation fault when reading a Parquet partitioned dataset + # to a Parquet file + path = tempdir / "ARROW-3208" + df = pd.DataFrame({ + 'one': [-1, 10, 2.5, 100, 1000, 1, 29.2], + 'two': [-1, 10, 2, 100, 1000, 1, 11], + 'three': [0, 0, 0, 0, 0, 0, 0] + }) + table = pa.Table.from_pandas(df) + pq.write_to_dataset(table, root_path=str(path), + partition_cols=['one', 'two']) + table = pq.ParquetDataset(path).read() + pq.write_table(table, path / "output.parquet") + + +def test_dataset_read_dictionary(tempdir): + path = tempdir / "ARROW-3325-dataset" + t1 = pa.table([[util.rands(10) for i in range(5)] * 10], names=['f0']) + t2 = pa.table([[util.rands(10) for i in range(5)] * 10], names=['f0']) + pq.write_to_dataset(t1, root_path=str(path)) + pq.write_to_dataset(t2, root_path=str(path)) + + result = pq.ParquetDataset( + path, read_dictionary=['f0']).read() + + # The order of the chunks is non-deterministic + ex_chunks = [t1[0].chunk(0).dictionary_encode(), + t2[0].chunk(0).dictionary_encode()] + + assert result[0].num_chunks == 2 + c0, c1 = result[0].chunk(0), result[0].chunk(1) + if c0.equals(ex_chunks[0]): + assert c1.equals(ex_chunks[1]) + else: + assert c0.equals(ex_chunks[1]) + assert c1.equals(ex_chunks[0]) + + +def test_read_table_schema(tempdir): + # test that schema keyword is passed through in read_table + table = pa.table({'a': pa.array([1, 2, 3], pa.int32())}) + pq.write_table(table, tempdir / "data1.parquet") + pq.write_table(table, tempdir / "data2.parquet") + + schema = pa.schema([('a', 'int64')]) + + # reading single file (which is special cased in the code) + result = pq.read_table(tempdir / "data1.parquet", schema=schema) + expected = pa.table({'a': [1, 2, 3]}, schema=schema) + assert result.equals(expected) + + # reading multiple fields + result = pq.read_table(tempdir, schema=schema) + expected = pa.table({'a': [1, 2, 3, 1, 2, 3]}, schema=schema) + assert result.equals(expected) + + result = pq.ParquetDataset(tempdir, schema=schema) + expected = pa.table({'a': [1, 2, 3, 1, 2, 3]}, schema=schema) + assert result.read().equals(expected) + + +def test_read_table_duplicate_column_selection(tempdir): + # test that duplicate column selection gives duplicate columns + table = pa.table({'a': pa.array([1, 2, 3], pa.int32()), + 'b': pa.array([1, 2, 3], pa.uint8())}) + pq.write_table(table, tempdir / "data.parquet") + + result = pq.read_table(tempdir / "data.parquet", columns=['a', 'a']) + expected_schema = pa.schema([('a', 'int32'), ('a', 'int32')]) + + assert result.column_names == ['a', 'a'] + assert result.schema == expected_schema + + +def test_dataset_partitioning(tempdir): + import pyarrow.dataset as ds + + # create small dataset with directory partitioning + root_path = tempdir / "test_partitioning" + (root_path / "2012" / "10" / "01").mkdir(parents=True) + + table = pa.table({'a': [1, 2, 3]}) + pq.write_table( + table, str(root_path / "2012" / "10" / "01" / "data.parquet")) + + # This works with new dataset API + + # read_table + part = ds.partitioning(field_names=["year", "month", "day"]) + result = pq.read_table( + str(root_path), partitioning=part) + assert result.column_names == ["a", "year", "month", "day"] + + result = pq.ParquetDataset( + str(root_path), partitioning=part).read() + assert result.column_names == ["a", "year", "month", "day"] + + +def test_parquet_dataset_new_filesystem(tempdir): + # Ensure we can pass new FileSystem object to ParquetDataset + table = pa.table({'a': [1, 2, 3]}) + pq.write_table(table, tempdir / 'data.parquet') + filesystem = fs.SubTreeFileSystem(str(tempdir), fs.LocalFileSystem()) + dataset = pq.ParquetDataset('.', filesystem=filesystem) + result = dataset.read() + assert result.equals(table) + + +def test_parquet_dataset_partitions_piece_path_with_fsspec(tempdir): + # ARROW-10462 ensure that on Windows we properly use posix-style paths + # as used by fsspec + fsspec = pytest.importorskip("fsspec") + filesystem = fsspec.filesystem('file') + table = pa.table({'a': [1, 2, 3]}) + pq.write_table(table, tempdir / 'data.parquet') + + # pass a posix-style path (using "/" also on Windows) + path = str(tempdir).replace("\\", "/") + dataset = pq.ParquetDataset( + path, filesystem=filesystem) + # ensure the piece path is also posix-style + expected = path + "/data.parquet" + assert dataset.fragments[0].path == expected + + +def test_parquet_write_to_dataset_exposed_keywords(tempdir): + table = pa.table({'a': [1, 2, 3]}) + path = tempdir / 'partitioning' + + paths_written = [] + + def file_visitor(written_file): + paths_written.append(written_file.path) + + basename_template = 'part-{i}.parquet' + + pq.write_to_dataset(table, path, partitioning=["a"], + file_visitor=file_visitor, + basename_template=basename_template) + + expected_paths = { + path / '1' / 'part-0.parquet', + path / '2' / 'part-0.parquet', + path / '3' / 'part-0.parquet' + } + paths_written_set = set(map(pathlib.Path, paths_written)) + assert paths_written_set == expected_paths + + +@pytest.mark.parametrize("write_dataset_kwarg", ( + ("create_dir", True), + ("create_dir", False), +)) +def test_write_to_dataset_kwargs_passed(tempdir, write_dataset_kwarg): + """Verify kwargs in pq.write_to_dataset are passed onto ds.write_dataset""" + import pyarrow.dataset as ds + + table = pa.table({"a": [1, 2, 3]}) + path = tempdir / 'out.parquet' + + signature = inspect.signature(ds.write_dataset) + key, arg = write_dataset_kwarg + + # kwarg not in pq.write_to_dataset, but will be passed to ds.write_dataset + assert key not in inspect.signature(pq.write_to_dataset).parameters + assert key in signature.parameters + + with mock.patch.object(ds, "write_dataset", autospec=True)\ + as mock_write_dataset: + pq.write_to_dataset(table, path, **{key: arg}) + _name, _args, kwargs = mock_write_dataset.mock_calls[0] + assert kwargs[key] == arg + + +@pytest.mark.pandas +def test_write_to_dataset_category_observed(tempdir): + # if we partition on a categorical variable with "unobserved" categories + # (values present in the dictionary, but not in the actual data) + # ensure those are not creating empty files/directories + df = pd.DataFrame({ + "cat": pd.Categorical(["a", "b", "a"], categories=["a", "b", "c"]), + "col": [1, 2, 3] + }) + table = pa.table(df) + path = tempdir / "dataset" + pq.write_to_dataset( + table, tempdir / "dataset", partition_cols=["cat"] + ) + subdirs = [f.name for f in path.iterdir() if f.is_dir()] + assert len(subdirs) == 2 + assert "cat=c" not in subdirs diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/test_encryption.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/test_encryption.py new file mode 100644 index 0000000000000000000000000000000000000000..db9bc52e4698b8cd84ab116d890686778b59a3a2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/test_encryption.py @@ -0,0 +1,532 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +import pytest +from datetime import timedelta + +import pyarrow as pa +try: + import pyarrow.parquet as pq + import pyarrow.parquet.encryption as pe +except ImportError: + pq = None + pe = None +else: + from pyarrow.tests.parquet.encryption import ( + InMemoryKmsClient, verify_file_encrypted) + + +PARQUET_NAME = 'encrypted_table.in_mem.parquet' +FOOTER_KEY = b"0123456789112345" +FOOTER_KEY_NAME = "footer_key" +COL_KEY = b"1234567890123450" +COL_KEY_NAME = "col_key" + + +# Marks all of the tests in this module +# Ignore these with pytest ... -m 'not parquet_encryption' +# Ignore these with pytest ... -m 'not parquet' +pytestmark = [ + pytest.mark.parquet_encryption, + pytest.mark.parquet +] + + +@pytest.fixture(scope='module') +def data_table(): + data_table = pa.Table.from_pydict({ + 'a': pa.array([1, 2, 3]), + 'b': pa.array(['a', 'b', 'c']), + 'c': pa.array(['x', 'y', 'z']) + }) + return data_table + + +@pytest.fixture(scope='module') +def basic_encryption_config(): + basic_encryption_config = pe.EncryptionConfiguration( + footer_key=FOOTER_KEY_NAME, + column_keys={ + COL_KEY_NAME: ["a", "b"], + }) + return basic_encryption_config + + +def test_encrypted_parquet_write_read(tempdir, data_table): + """Write an encrypted parquet, verify it's encrypted, and then read it.""" + path = tempdir / PARQUET_NAME + + # Encrypt the footer with the footer key, + # encrypt column `a` and column `b` with another key, + # keep `c` plaintext + encryption_config = pe.EncryptionConfiguration( + footer_key=FOOTER_KEY_NAME, + column_keys={ + COL_KEY_NAME: ["a", "b"], + }, + encryption_algorithm="AES_GCM_V1", + cache_lifetime=timedelta(minutes=5.0), + data_key_length_bits=256) + + kms_connection_config = pe.KmsConnectionConfig( + custom_kms_conf={ + FOOTER_KEY_NAME: FOOTER_KEY.decode("UTF-8"), + COL_KEY_NAME: COL_KEY.decode("UTF-8"), + } + ) + + def kms_factory(kms_connection_configuration): + return InMemoryKmsClient(kms_connection_configuration) + + crypto_factory = pe.CryptoFactory(kms_factory) + # Write with encryption properties + write_encrypted_parquet(path, data_table, encryption_config, + kms_connection_config, crypto_factory) + verify_file_encrypted(path) + + # Read with decryption properties + decryption_config = pe.DecryptionConfiguration( + cache_lifetime=timedelta(minutes=5.0)) + result_table = read_encrypted_parquet( + path, decryption_config, kms_connection_config, crypto_factory) + assert data_table.equals(result_table) + + +def write_encrypted_parquet(path, table, encryption_config, + kms_connection_config, crypto_factory): + file_encryption_properties = crypto_factory.file_encryption_properties( + kms_connection_config, encryption_config) + assert file_encryption_properties is not None + with pq.ParquetWriter( + path, table.schema, + encryption_properties=file_encryption_properties) as writer: + writer.write_table(table) + + +def read_encrypted_parquet(path, decryption_config, + kms_connection_config, crypto_factory): + file_decryption_properties = crypto_factory.file_decryption_properties( + kms_connection_config, decryption_config) + assert file_decryption_properties is not None + meta = pq.read_metadata( + path, decryption_properties=file_decryption_properties) + assert meta.num_columns == 3 + schema = pq.read_schema( + path, decryption_properties=file_decryption_properties) + assert len(schema.names) == 3 + + result = pq.ParquetFile( + path, decryption_properties=file_decryption_properties) + return result.read(use_threads=True) + + +def test_encrypted_parquet_write_read_wrong_key(tempdir, data_table): + """Write an encrypted parquet, verify it's encrypted, + and then read it using wrong keys.""" + path = tempdir / PARQUET_NAME + + # Encrypt the footer with the footer key, + # encrypt column `a` and column `b` with another key, + # keep `c` plaintext + encryption_config = pe.EncryptionConfiguration( + footer_key=FOOTER_KEY_NAME, + column_keys={ + COL_KEY_NAME: ["a", "b"], + }, + encryption_algorithm="AES_GCM_V1", + cache_lifetime=timedelta(minutes=5.0), + data_key_length_bits=256) + + kms_connection_config = pe.KmsConnectionConfig( + custom_kms_conf={ + FOOTER_KEY_NAME: FOOTER_KEY.decode("UTF-8"), + COL_KEY_NAME: COL_KEY.decode("UTF-8"), + } + ) + + def kms_factory(kms_connection_configuration): + return InMemoryKmsClient(kms_connection_configuration) + + crypto_factory = pe.CryptoFactory(kms_factory) + # Write with encryption properties + write_encrypted_parquet(path, data_table, encryption_config, + kms_connection_config, crypto_factory) + verify_file_encrypted(path) + + # Read with decryption properties + wrong_kms_connection_config = pe.KmsConnectionConfig( + custom_kms_conf={ + # Wrong keys - mixup in names + FOOTER_KEY_NAME: COL_KEY.decode("UTF-8"), + COL_KEY_NAME: FOOTER_KEY.decode("UTF-8"), + } + ) + decryption_config = pe.DecryptionConfiguration( + cache_lifetime=timedelta(minutes=5.0)) + with pytest.raises(ValueError, match=r"Incorrect master key used"): + read_encrypted_parquet( + path, decryption_config, wrong_kms_connection_config, + crypto_factory) + + +def test_encrypted_parquet_read_no_decryption_config(tempdir, data_table): + """Write an encrypted parquet, verify it's encrypted, + but then try to read it without decryption properties.""" + test_encrypted_parquet_write_read(tempdir, data_table) + # Read without decryption properties + with pytest.raises(IOError, match=r"no decryption"): + pq.ParquetFile(tempdir / PARQUET_NAME).read() + + +def test_encrypted_parquet_read_metadata_no_decryption_config( + tempdir, data_table): + """Write an encrypted parquet, verify it's encrypted, + but then try to read its metadata without decryption properties.""" + test_encrypted_parquet_write_read(tempdir, data_table) + # Read metadata without decryption properties + with pytest.raises(IOError, match=r"no decryption"): + pq.read_metadata(tempdir / PARQUET_NAME) + + +def test_encrypted_parquet_read_schema_no_decryption_config( + tempdir, data_table): + """Write an encrypted parquet, verify it's encrypted, + but then try to read its schema without decryption properties.""" + test_encrypted_parquet_write_read(tempdir, data_table) + with pytest.raises(IOError, match=r"no decryption"): + pq.read_schema(tempdir / PARQUET_NAME) + + +def test_encrypted_parquet_write_no_col_key(tempdir, data_table): + """Write an encrypted parquet, but give only footer key, + without column key.""" + path = tempdir / 'encrypted_table_no_col_key.in_mem.parquet' + + # Encrypt the footer with the footer key + encryption_config = pe.EncryptionConfiguration( + footer_key=FOOTER_KEY_NAME) + + kms_connection_config = pe.KmsConnectionConfig( + custom_kms_conf={ + FOOTER_KEY_NAME: FOOTER_KEY.decode("UTF-8"), + COL_KEY_NAME: COL_KEY.decode("UTF-8"), + } + ) + + def kms_factory(kms_connection_configuration): + return InMemoryKmsClient(kms_connection_configuration) + + crypto_factory = pe.CryptoFactory(kms_factory) + with pytest.raises(OSError, + match="Either column_keys or uniform_encryption " + "must be set"): + # Write with encryption properties + write_encrypted_parquet(path, data_table, encryption_config, + kms_connection_config, crypto_factory) + + +def test_encrypted_parquet_write_kms_error(tempdir, data_table, + basic_encryption_config): + """Write an encrypted parquet, but raise KeyError in KmsClient.""" + path = tempdir / 'encrypted_table_kms_error.in_mem.parquet' + encryption_config = basic_encryption_config + + # Empty master_keys_map + kms_connection_config = pe.KmsConnectionConfig() + + def kms_factory(kms_connection_configuration): + # Empty master keys map will cause KeyError to be raised + # on wrap/unwrap calls + return InMemoryKmsClient(kms_connection_configuration) + + crypto_factory = pe.CryptoFactory(kms_factory) + with pytest.raises(KeyError, match="footer_key"): + # Write with encryption properties + write_encrypted_parquet(path, data_table, encryption_config, + kms_connection_config, crypto_factory) + + +def test_encrypted_parquet_write_kms_specific_error(tempdir, data_table, + basic_encryption_config): + """Write an encrypted parquet, but raise KeyError in KmsClient.""" + path = tempdir / 'encrypted_table_kms_error.in_mem.parquet' + encryption_config = basic_encryption_config + + # Empty master_keys_map + kms_connection_config = pe.KmsConnectionConfig() + + class ThrowingKmsClient(pe.KmsClient): + """A KmsClient implementation that throws exception in + wrap/unwrap calls + """ + + def __init__(self, config): + """Create an InMemoryKmsClient instance.""" + pe.KmsClient.__init__(self) + self.config = config + + def wrap_key(self, key_bytes, master_key_identifier): + raise ValueError("Cannot Wrap Key") + + def unwrap_key(self, wrapped_key, master_key_identifier): + raise ValueError("Cannot Unwrap Key") + + def kms_factory(kms_connection_configuration): + # Exception thrown in wrap/unwrap calls + return ThrowingKmsClient(kms_connection_configuration) + + crypto_factory = pe.CryptoFactory(kms_factory) + with pytest.raises(ValueError, match="Cannot Wrap Key"): + # Write with encryption properties + write_encrypted_parquet(path, data_table, encryption_config, + kms_connection_config, crypto_factory) + + +def test_encrypted_parquet_write_kms_factory_error(tempdir, data_table, + basic_encryption_config): + """Write an encrypted parquet, but raise ValueError in kms_factory.""" + path = tempdir / 'encrypted_table_kms_factory_error.in_mem.parquet' + encryption_config = basic_encryption_config + + # Empty master_keys_map + kms_connection_config = pe.KmsConnectionConfig() + + def kms_factory(kms_connection_configuration): + raise ValueError('Cannot create KmsClient') + + crypto_factory = pe.CryptoFactory(kms_factory) + with pytest.raises(ValueError, + match="Cannot create KmsClient"): + # Write with encryption properties + write_encrypted_parquet(path, data_table, encryption_config, + kms_connection_config, crypto_factory) + + +def test_encrypted_parquet_write_kms_factory_type_error( + tempdir, data_table, basic_encryption_config): + """Write an encrypted parquet, but use wrong KMS client type + that doesn't implement KmsClient.""" + path = tempdir / 'encrypted_table_kms_factory_error.in_mem.parquet' + encryption_config = basic_encryption_config + + # Empty master_keys_map + kms_connection_config = pe.KmsConnectionConfig() + + class WrongTypeKmsClient(): + """This is not an implementation of KmsClient. + """ + + def __init__(self, config): + self.master_keys_map = config.custom_kms_conf + + def wrap_key(self, key_bytes, master_key_identifier): + return None + + def unwrap_key(self, wrapped_key, master_key_identifier): + return None + + def kms_factory(kms_connection_configuration): + return WrongTypeKmsClient(kms_connection_configuration) + + crypto_factory = pe.CryptoFactory(kms_factory) + with pytest.raises(TypeError): + # Write with encryption properties + write_encrypted_parquet(path, data_table, encryption_config, + kms_connection_config, crypto_factory) + + +def test_encrypted_parquet_encryption_configuration(): + def validate_encryption_configuration(encryption_config): + assert FOOTER_KEY_NAME == encryption_config.footer_key + assert ["a", "b"] == encryption_config.column_keys[COL_KEY_NAME] + assert "AES_GCM_CTR_V1" == encryption_config.encryption_algorithm + assert encryption_config.plaintext_footer + assert not encryption_config.double_wrapping + assert timedelta(minutes=10.0) == encryption_config.cache_lifetime + assert not encryption_config.internal_key_material + assert 192 == encryption_config.data_key_length_bits + + encryption_config = pe.EncryptionConfiguration( + footer_key=FOOTER_KEY_NAME, + column_keys={COL_KEY_NAME: ["a", "b"], }, + encryption_algorithm="AES_GCM_CTR_V1", + plaintext_footer=True, + double_wrapping=False, + cache_lifetime=timedelta(minutes=10.0), + internal_key_material=False, + data_key_length_bits=192, + ) + validate_encryption_configuration(encryption_config) + + encryption_config_1 = pe.EncryptionConfiguration( + footer_key=FOOTER_KEY_NAME) + encryption_config_1.column_keys = {COL_KEY_NAME: ["a", "b"], } + encryption_config_1.encryption_algorithm = "AES_GCM_CTR_V1" + encryption_config_1.plaintext_footer = True + encryption_config_1.double_wrapping = False + encryption_config_1.cache_lifetime = timedelta(minutes=10.0) + encryption_config_1.internal_key_material = False + encryption_config_1.data_key_length_bits = 192 + validate_encryption_configuration(encryption_config_1) + + +def test_encrypted_parquet_decryption_configuration(): + decryption_config = pe.DecryptionConfiguration( + cache_lifetime=timedelta(minutes=10.0)) + assert timedelta(minutes=10.0) == decryption_config.cache_lifetime + + decryption_config_1 = pe.DecryptionConfiguration() + decryption_config_1.cache_lifetime = timedelta(minutes=10.0) + assert timedelta(minutes=10.0) == decryption_config_1.cache_lifetime + + +def test_encrypted_parquet_kms_configuration(): + def validate_kms_connection_config(kms_connection_config): + assert "Instance1" == kms_connection_config.kms_instance_id + assert "URL1" == kms_connection_config.kms_instance_url + assert "MyToken" == kms_connection_config.key_access_token + assert ({"key1": "key_material_1", "key2": "key_material_2"} == + kms_connection_config.custom_kms_conf) + + kms_connection_config = pe.KmsConnectionConfig( + kms_instance_id="Instance1", + kms_instance_url="URL1", + key_access_token="MyToken", + custom_kms_conf={ + "key1": "key_material_1", + "key2": "key_material_2", + }) + validate_kms_connection_config(kms_connection_config) + + kms_connection_config_1 = pe.KmsConnectionConfig() + kms_connection_config_1.kms_instance_id = "Instance1" + kms_connection_config_1.kms_instance_url = "URL1" + kms_connection_config_1.key_access_token = "MyToken" + kms_connection_config_1.custom_kms_conf = { + "key1": "key_material_1", + "key2": "key_material_2", + } + validate_kms_connection_config(kms_connection_config_1) + + +@pytest.mark.xfail(reason="Plaintext footer - reading plaintext column subset" + " reads encrypted columns too") +def test_encrypted_parquet_write_read_plain_footer_single_wrapping( + tempdir, data_table): + """Write an encrypted parquet, with plaintext footer + and with single wrapping, + verify it's encrypted, and then read plaintext columns.""" + path = tempdir / PARQUET_NAME + + # Encrypt the footer with the footer key, + # encrypt column `a` and column `b` with another key, + # keep `c` plaintext + encryption_config = pe.EncryptionConfiguration( + footer_key=FOOTER_KEY_NAME, + column_keys={ + COL_KEY_NAME: ["a", "b"], + }, + plaintext_footer=True, + double_wrapping=False) + + kms_connection_config = pe.KmsConnectionConfig( + custom_kms_conf={ + FOOTER_KEY_NAME: FOOTER_KEY.decode("UTF-8"), + COL_KEY_NAME: COL_KEY.decode("UTF-8"), + } + ) + + def kms_factory(kms_connection_configuration): + return InMemoryKmsClient(kms_connection_configuration) + + crypto_factory = pe.CryptoFactory(kms_factory) + # Write with encryption properties + write_encrypted_parquet(path, data_table, encryption_config, + kms_connection_config, crypto_factory) + + # # Read without decryption properties only the plaintext column + # result = pq.ParquetFile(path) + # result_table = result.read(columns='c', use_threads=False) + # assert table.num_rows == result_table.num_rows + + +@pytest.mark.xfail(reason="External key material not supported yet") +def test_encrypted_parquet_write_external(tempdir, data_table): + """Write an encrypted parquet, with external key + material. + Currently it's not implemented, so should throw + an exception""" + path = tempdir / PARQUET_NAME + + # Encrypt the file with the footer key + encryption_config = pe.EncryptionConfiguration( + footer_key=FOOTER_KEY_NAME, + column_keys={}, + internal_key_material=False) + + kms_connection_config = pe.KmsConnectionConfig( + custom_kms_conf={FOOTER_KEY_NAME: FOOTER_KEY.decode("UTF-8")} + ) + + def kms_factory(kms_connection_configuration): + return InMemoryKmsClient(kms_connection_configuration) + + crypto_factory = pe.CryptoFactory(kms_factory) + # Write with encryption properties + write_encrypted_parquet(path, data_table, encryption_config, + kms_connection_config, crypto_factory) + + +def test_encrypted_parquet_loop(tempdir, data_table, basic_encryption_config): + """Write an encrypted parquet, verify it's encrypted, + and then read it multithreaded in a loop.""" + path = tempdir / PARQUET_NAME + + # Encrypt the footer with the footer key, + # encrypt column `a` and column `b` with another key, + # keep `c` plaintext + encryption_config = basic_encryption_config + + kms_connection_config = pe.KmsConnectionConfig( + custom_kms_conf={ + FOOTER_KEY_NAME: FOOTER_KEY.decode("UTF-8"), + COL_KEY_NAME: COL_KEY.decode("UTF-8"), + } + ) + + def kms_factory(kms_connection_configuration): + return InMemoryKmsClient(kms_connection_configuration) + + crypto_factory = pe.CryptoFactory(kms_factory) + + # Write with encryption properties + write_encrypted_parquet(path, data_table, encryption_config, + kms_connection_config, crypto_factory) + verify_file_encrypted(path) + + decryption_config = pe.DecryptionConfiguration( + cache_lifetime=timedelta(minutes=5.0)) + + for i in range(50): + # Read with decryption properties + file_decryption_properties = crypto_factory.file_decryption_properties( + kms_connection_config, decryption_config) + assert file_decryption_properties is not None + + result = pq.ParquetFile( + path, decryption_properties=file_decryption_properties) + result_table = result.read(use_threads=True) + assert data_table.equals(result_table) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/test_metadata.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/test_metadata.py new file mode 100644 index 0000000000000000000000000000000000000000..73284d2e53b9ecc7c669ef5677d9f79c03722d87 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/test_metadata.py @@ -0,0 +1,748 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import datetime +import decimal +from collections import OrderedDict +import io + +import numpy as np +import pytest + +import pyarrow as pa +from pyarrow.tests.parquet.common import _check_roundtrip, make_sample_file +from pyarrow.fs import LocalFileSystem +from pyarrow.tests import util + +try: + import pyarrow.parquet as pq + from pyarrow.tests.parquet.common import _write_table +except ImportError: + pq = None + + +try: + import pandas as pd + import pandas.testing as tm + + from pyarrow.tests.parquet.common import alltypes_sample +except ImportError: + pd = tm = None + + +# Marks all of the tests in this module +# Ignore these with pytest ... -m 'not parquet' +pytestmark = pytest.mark.parquet + + +@pytest.mark.pandas +def test_parquet_metadata_api(): + df = alltypes_sample(size=10000) + df = df.reindex(columns=sorted(df.columns)) + df.index = np.random.randint(0, 1000000, size=len(df)) + + fileh = make_sample_file(df) + ncols = len(df.columns) + + # Series of sniff tests + meta = fileh.metadata + repr(meta) + assert meta.num_rows == len(df) + assert meta.num_columns == ncols + 1 # +1 for index + assert meta.num_row_groups == 1 + assert meta.format_version == '2.6' + assert 'parquet-cpp' in meta.created_by + assert isinstance(meta.serialized_size, int) + assert isinstance(meta.metadata, dict) + + # Schema + schema = fileh.schema + assert meta.schema is schema + assert len(schema) == ncols + 1 # +1 for index + repr(schema) + + col = schema[0] + repr(col) + assert col.name == df.columns[0] + assert col.max_definition_level == 1 + assert col.max_repetition_level == 0 + assert col.max_repetition_level == 0 + + assert col.physical_type == 'BOOLEAN' + assert col.converted_type == 'NONE' + + with pytest.raises(IndexError): + schema[ncols + 1] # +1 for index + + with pytest.raises(IndexError): + schema[-1] + + # Row group + for rg in range(meta.num_row_groups): + rg_meta = meta.row_group(rg) + assert isinstance(rg_meta, pq.RowGroupMetaData) + repr(rg_meta) + + for col in range(rg_meta.num_columns): + col_meta = rg_meta.column(col) + assert isinstance(col_meta, pq.ColumnChunkMetaData) + repr(col_meta) + + with pytest.raises(IndexError): + meta.row_group(-1) + + with pytest.raises(IndexError): + meta.row_group(meta.num_row_groups + 1) + + rg_meta = meta.row_group(0) + assert rg_meta.num_rows == len(df) + assert rg_meta.num_columns == ncols + 1 # +1 for index + assert rg_meta.total_byte_size > 0 + + with pytest.raises(IndexError): + col_meta = rg_meta.column(-1) + + with pytest.raises(IndexError): + col_meta = rg_meta.column(ncols + 2) + + col_meta = rg_meta.column(0) + assert col_meta.file_offset > 0 + assert col_meta.file_path == '' # created from BytesIO + assert col_meta.physical_type == 'BOOLEAN' + assert col_meta.num_values == 10000 + assert col_meta.path_in_schema == 'bool' + assert col_meta.is_stats_set is True + assert isinstance(col_meta.statistics, pq.Statistics) + assert col_meta.compression == 'SNAPPY' + assert set(col_meta.encodings) == {'PLAIN', 'RLE'} + assert col_meta.has_dictionary_page is False + assert col_meta.dictionary_page_offset is None + assert col_meta.data_page_offset > 0 + assert col_meta.total_compressed_size > 0 + assert col_meta.total_uncompressed_size > 0 + with pytest.raises(NotImplementedError): + col_meta.has_index_page + with pytest.raises(NotImplementedError): + col_meta.index_page_offset + + +def test_parquet_metadata_lifetime(tempdir): + # ARROW-6642 - ensure that chained access keeps parent objects alive + table = pa.table({'a': [1, 2, 3]}) + pq.write_table(table, tempdir / 'test_metadata_segfault.parquet') + parquet_file = pq.ParquetFile(tempdir / 'test_metadata_segfault.parquet') + parquet_file.metadata.row_group(0).column(0).statistics + + +@pytest.mark.pandas +@pytest.mark.parametrize( + ( + 'data', + 'type', + 'physical_type', + 'min_value', + 'max_value', + 'null_count', + 'num_values', + 'distinct_count' + ), + [ + ([1, 2, 2, None, 4], pa.uint8(), 'INT32', 1, 4, 1, 4, None), + ([1, 2, 2, None, 4], pa.uint16(), 'INT32', 1, 4, 1, 4, None), + ([1, 2, 2, None, 4], pa.uint32(), 'INT32', 1, 4, 1, 4, None), + ([1, 2, 2, None, 4], pa.uint64(), 'INT64', 1, 4, 1, 4, None), + ([-1, 2, 2, None, 4], pa.int8(), 'INT32', -1, 4, 1, 4, None), + ([-1, 2, 2, None, 4], pa.int16(), 'INT32', -1, 4, 1, 4, None), + ([-1, 2, 2, None, 4], pa.int32(), 'INT32', -1, 4, 1, 4, None), + ([-1, 2, 2, None, 4], pa.int64(), 'INT64', -1, 4, 1, 4, None), + ( + [-1.1, 2.2, 2.3, None, 4.4], pa.float32(), + 'FLOAT', -1.1, 4.4, 1, 4, None + ), + ( + [-1.1, 2.2, 2.3, None, 4.4], pa.float64(), + 'DOUBLE', -1.1, 4.4, 1, 4, None + ), + ( + ['', 'b', chr(1000), None, 'aaa'], pa.binary(), + 'BYTE_ARRAY', b'', chr(1000).encode('utf-8'), 1, 4, None + ), + ( + [True, False, False, True, True], pa.bool_(), + 'BOOLEAN', False, True, 0, 5, None + ), + ( + [b'\x00', b'b', b'12', None, b'aaa'], pa.binary(), + 'BYTE_ARRAY', b'\x00', b'b', 1, 4, None + ), + ] +) +def test_parquet_column_statistics_api(data, type, physical_type, min_value, + max_value, null_count, num_values, + distinct_count): + df = pd.DataFrame({'data': data}) + schema = pa.schema([pa.field('data', type)]) + table = pa.Table.from_pandas(df, schema=schema, safe=False) + fileh = make_sample_file(table) + + meta = fileh.metadata + + rg_meta = meta.row_group(0) + col_meta = rg_meta.column(0) + + stat = col_meta.statistics + assert stat.has_min_max + assert _close(type, stat.min, min_value) + assert _close(type, stat.max, max_value) + assert stat.null_count == null_count + assert stat.num_values == num_values + # TODO(kszucs) until parquet-cpp API doesn't expose HasDistinctCount + # method, missing distinct_count is represented as zero instead of None + assert stat.distinct_count == distinct_count + assert stat.physical_type == physical_type + + +def _close(type, left, right): + if type == pa.float32(): + return abs(left - right) < 1E-7 + elif type == pa.float64(): + return abs(left - right) < 1E-13 + else: + return left == right + + +# ARROW-6339 +@pytest.mark.pandas +def test_parquet_raise_on_unset_statistics(): + df = pd.DataFrame({"t": pd.Series([pd.NaT], dtype="datetime64[ns]")}) + meta = make_sample_file(pa.Table.from_pandas(df)).metadata + + assert not meta.row_group(0).column(0).statistics.has_min_max + assert meta.row_group(0).column(0).statistics.max is None + + +def test_statistics_convert_logical_types(tempdir): + # ARROW-5166, ARROW-4139 + + # (min, max, type) + cases = [(10, 11164359321221007157, pa.uint64()), + (10, 4294967295, pa.uint32()), + ("ähnlich", "öffentlich", pa.utf8()), + (datetime.time(10, 30, 0, 1000), datetime.time(15, 30, 0, 1000), + pa.time32('ms')), + (datetime.time(10, 30, 0, 1000), datetime.time(15, 30, 0, 1000), + pa.time64('us')), + (datetime.datetime(2019, 6, 24, 0, 0, 0, 1000), + datetime.datetime(2019, 6, 25, 0, 0, 0, 1000), + pa.timestamp('ms')), + (datetime.datetime(2019, 6, 24, 0, 0, 0, 1000), + datetime.datetime(2019, 6, 25, 0, 0, 0, 1000), + pa.timestamp('us')), + (datetime.date(2019, 6, 24), + datetime.date(2019, 6, 25), + pa.date32()), + (decimal.Decimal("20.123"), + decimal.Decimal("20.124"), + pa.decimal128(12, 5))] + + for i, (min_val, max_val, typ) in enumerate(cases): + t = pa.Table.from_arrays([pa.array([min_val, max_val], type=typ)], + ['col']) + path = str(tempdir / ('example{}.parquet'.format(i))) + pq.write_table(t, path, version='2.6') + pf = pq.ParquetFile(path) + stats = pf.metadata.row_group(0).column(0).statistics + assert stats.min == min_val + assert stats.max == max_val + + +def test_parquet_write_disable_statistics(tempdir): + table = pa.Table.from_pydict( + OrderedDict([ + ('a', pa.array([1, 2, 3])), + ('b', pa.array(['a', 'b', 'c'])) + ]) + ) + _write_table(table, tempdir / 'data.parquet') + meta = pq.read_metadata(tempdir / 'data.parquet') + for col in [0, 1]: + cc = meta.row_group(0).column(col) + assert cc.is_stats_set is True + assert cc.statistics is not None + + _write_table(table, tempdir / 'data2.parquet', write_statistics=False) + meta = pq.read_metadata(tempdir / 'data2.parquet') + for col in [0, 1]: + cc = meta.row_group(0).column(col) + assert cc.is_stats_set is False + assert cc.statistics is None + + _write_table(table, tempdir / 'data3.parquet', write_statistics=['a']) + meta = pq.read_metadata(tempdir / 'data3.parquet') + cc_a = meta.row_group(0).column(0) + cc_b = meta.row_group(0).column(1) + assert cc_a.is_stats_set is True + assert cc_b.is_stats_set is False + assert cc_a.statistics is not None + assert cc_b.statistics is None + + +def test_parquet_sorting_column(): + sorting_col = pq.SortingColumn(10) + assert sorting_col.column_index == 10 + assert sorting_col.descending is False + assert sorting_col.nulls_first is False + + sorting_col = pq.SortingColumn(0, descending=True, nulls_first=True) + assert sorting_col.column_index == 0 + assert sorting_col.descending is True + assert sorting_col.nulls_first is True + + schema = pa.schema([('a', pa.int64()), ('b', pa.int64())]) + sorting_cols = ( + pq.SortingColumn(1, descending=True), + pq.SortingColumn(0, descending=False), + ) + sort_order, null_placement = pq.SortingColumn.to_ordering(schema, sorting_cols) + assert sort_order == (('b', "descending"), ('a', "ascending")) + assert null_placement == "at_end" + + sorting_cols_roundtripped = pq.SortingColumn.from_ordering( + schema, sort_order, null_placement) + assert sorting_cols_roundtripped == sorting_cols + + sorting_cols = pq.SortingColumn.from_ordering( + schema, ('a', ('b', "descending")), null_placement="at_start") + expected = ( + pq.SortingColumn(0, descending=False, nulls_first=True), + pq.SortingColumn(1, descending=True, nulls_first=True), + ) + assert sorting_cols == expected + + # Conversions handle empty tuples + empty_sorting_cols = pq.SortingColumn.from_ordering(schema, ()) + assert empty_sorting_cols == () + + assert pq.SortingColumn.to_ordering(schema, ()) == ((), "at_end") + + with pytest.raises(ValueError): + pq.SortingColumn.from_ordering(schema, (("a", "not a valid sort order"))) + + with pytest.raises(ValueError, match="inconsistent null placement"): + sorting_cols = ( + pq.SortingColumn(1, nulls_first=True), + pq.SortingColumn(0, nulls_first=False), + ) + pq.SortingColumn.to_ordering(schema, sorting_cols) + + +def test_parquet_sorting_column_nested(): + schema = pa.schema({ + 'a': pa.struct([('x', pa.int64()), ('y', pa.int64())]), + 'b': pa.int64() + }) + + sorting_columns = [ + pq.SortingColumn(0, descending=True), # a.x + pq.SortingColumn(2, descending=False) # b + ] + + sort_order, null_placement = pq.SortingColumn.to_ordering(schema, sorting_columns) + assert null_placement == "at_end" + assert len(sort_order) == 2 + assert sort_order[0] == ("a.x", "descending") + assert sort_order[1] == ("b", "ascending") + + +def test_parquet_file_sorting_columns(): + table = pa.table({'a': [1, 2, 3], 'b': ['a', 'b', 'c']}) + + sorting_columns = ( + pq.SortingColumn(column_index=0, descending=True, nulls_first=True), + pq.SortingColumn(column_index=1, descending=False), + ) + writer = pa.BufferOutputStream() + _write_table(table, writer, sorting_columns=sorting_columns) + reader = pa.BufferReader(writer.getvalue()) + + # Can retrieve sorting columns from metadata + metadata = pq.read_metadata(reader) + assert metadata.num_row_groups == 1 + assert sorting_columns == metadata.row_group(0).sorting_columns + + +def test_field_id_metadata(): + # ARROW-7080 + field_id = b'PARQUET:field_id' + inner = pa.field('inner', pa.int32(), metadata={field_id: b'100'}) + middle = pa.field('middle', pa.struct( + [inner]), metadata={field_id: b'101'}) + fields = [ + pa.field('basic', pa.int32(), metadata={ + b'other': b'abc', field_id: b'1'}), + pa.field( + 'list', + pa.list_(pa.field('list-inner', pa.int32(), + metadata={field_id: b'10'})), + metadata={field_id: b'11'}), + pa.field('struct', pa.struct([middle]), metadata={field_id: b'102'}), + pa.field('no-metadata', pa.int32()), + pa.field('non-integral-field-id', pa.int32(), + metadata={field_id: b'xyz'}), + pa.field('negative-field-id', pa.int32(), + metadata={field_id: b'-1000'}) + ] + arrs = [[] for _ in fields] + table = pa.table(arrs, schema=pa.schema(fields)) + + bio = pa.BufferOutputStream() + pq.write_table(table, bio) + contents = bio.getvalue() + + pf = pq.ParquetFile(pa.BufferReader(contents)) + schema = pf.schema_arrow + + assert schema[0].metadata[field_id] == b'1' + assert schema[0].metadata[b'other'] == b'abc' + + list_field = schema[1] + assert list_field.metadata[field_id] == b'11' + + list_item_field = list_field.type.value_field + assert list_item_field.metadata[field_id] == b'10' + + struct_field = schema[2] + assert struct_field.metadata[field_id] == b'102' + + struct_middle_field = struct_field.type[0] + assert struct_middle_field.metadata[field_id] == b'101' + + struct_inner_field = struct_middle_field.type[0] + assert struct_inner_field.metadata[field_id] == b'100' + + assert schema[3].metadata is None + # Invalid input is passed through (ok) but does not + # have field_id in parquet (not tested) + assert schema[4].metadata[field_id] == b'xyz' + assert schema[5].metadata[field_id] == b'-1000' + + +def test_parquet_file_page_index(): + for write_page_index in (False, True): + table = pa.table({'a': [1, 2, 3]}) + + writer = pa.BufferOutputStream() + _write_table(table, writer, write_page_index=write_page_index) + reader = pa.BufferReader(writer.getvalue()) + + # Can retrieve sorting columns from metadata + metadata = pq.read_metadata(reader) + cc = metadata.row_group(0).column(0) + assert cc.has_offset_index is write_page_index + assert cc.has_column_index is write_page_index + + +@pytest.mark.pandas +def test_multi_dataset_metadata(tempdir): + filenames = ["ARROW-1983-dataset.0", "ARROW-1983-dataset.1"] + metapath = str(tempdir / "_metadata") + + # create a test dataset + df = pd.DataFrame({ + 'one': [1, 2, 3], + 'two': [-1, -2, -3], + 'three': [[1, 2], [2, 3], [3, 4]], + }) + table = pa.Table.from_pandas(df) + + # write dataset twice and collect/merge metadata + _meta = None + for filename in filenames: + meta = [] + pq.write_table(table, str(tempdir / filename), + metadata_collector=meta) + meta[0].set_file_path(filename) + if _meta is None: + _meta = meta[0] + else: + _meta.append_row_groups(meta[0]) + + # Write merged metadata-only file + with open(metapath, "wb") as f: + _meta.write_metadata_file(f) + + # Read back the metadata + meta = pq.read_metadata(metapath) + md = meta.to_dict() + _md = _meta.to_dict() + for key in _md: + if key != 'serialized_size': + assert _md[key] == md[key] + assert _md['num_columns'] == 3 + assert _md['num_rows'] == 6 + assert _md['num_row_groups'] == 2 + assert _md['serialized_size'] == 0 + assert md['serialized_size'] > 0 + + +@pytest.mark.filterwarnings("ignore:Parquet format:FutureWarning") +def test_write_metadata(tempdir): + path = str(tempdir / "metadata") + schema = pa.schema([("a", "int64"), ("b", "float64")]) + + # write a pyarrow schema + pq.write_metadata(schema, path) + parquet_meta = pq.read_metadata(path) + schema_as_arrow = parquet_meta.schema.to_arrow_schema() + assert schema_as_arrow.equals(schema) + + # ARROW-8980: Check that the ARROW:schema metadata key was removed + if schema_as_arrow.metadata: + assert b'ARROW:schema' not in schema_as_arrow.metadata + + # pass through writer keyword arguments + for version in ["1.0", "2.0", "2.4", "2.6"]: + pq.write_metadata(schema, path, version=version) + parquet_meta = pq.read_metadata(path) + # The version is stored as a single integer in the Parquet metadata, + # so it cannot correctly express dotted format versions + expected_version = "1.0" if version == "1.0" else "2.6" + assert parquet_meta.format_version == expected_version + + # metadata_collector: list of FileMetaData objects + table = pa.table({'a': [1, 2], 'b': [.1, .2]}, schema=schema) + pq.write_table(table, tempdir / "data.parquet") + parquet_meta = pq.read_metadata(str(tempdir / "data.parquet")) + pq.write_metadata( + schema, path, metadata_collector=[parquet_meta, parquet_meta] + ) + parquet_meta_mult = pq.read_metadata(path) + assert parquet_meta_mult.num_row_groups == 2 + + # append metadata with different schema raises an error + msg = ("AppendRowGroups requires equal schemas.\n" + "The two columns with index 0 differ.") + with pytest.raises(RuntimeError, match=msg): + pq.write_metadata( + pa.schema([("a", "int32"), ("b", "null")]), + path, metadata_collector=[parquet_meta, parquet_meta] + ) + + +def test_table_large_metadata(): + # ARROW-8694 + my_schema = pa.schema([pa.field('f0', 'double')], + metadata={'large': 'x' * 10000000}) + + table = pa.table([np.arange(10)], schema=my_schema) + _check_roundtrip(table) + + +@pytest.mark.pandas +def test_compare_schemas(): + df = alltypes_sample(size=10000) + + fileh = make_sample_file(df) + fileh2 = make_sample_file(df) + fileh3 = make_sample_file(df[df.columns[::2]]) + + # ParquetSchema + assert isinstance(fileh.schema, pq.ParquetSchema) + assert fileh.schema.equals(fileh.schema) + assert fileh.schema == fileh.schema + assert fileh.schema.equals(fileh2.schema) + assert fileh.schema == fileh2.schema + assert fileh.schema != 'arbitrary object' + assert not fileh.schema.equals(fileh3.schema) + assert fileh.schema != fileh3.schema + + # ColumnSchema + assert isinstance(fileh.schema[0], pq.ColumnSchema) + assert fileh.schema[0].equals(fileh.schema[0]) + assert fileh.schema[0] == fileh.schema[0] + assert not fileh.schema[0].equals(fileh.schema[1]) + assert fileh.schema[0] != fileh.schema[1] + assert fileh.schema[0] != 'arbitrary object' + + +@pytest.mark.pandas +def test_read_schema(tempdir): + N = 100 + df = pd.DataFrame({ + 'index': np.arange(N), + 'values': np.random.randn(N) + }, columns=['index', 'values']) + + data_path = tempdir / 'test.parquet' + + table = pa.Table.from_pandas(df) + _write_table(table, data_path) + + read1 = pq.read_schema(data_path) + read2 = pq.read_schema(data_path, memory_map=True) + assert table.schema.equals(read1) + assert table.schema.equals(read2) + + assert table.schema.metadata[b'pandas'] == read1.metadata[b'pandas'] + + +def test_parquet_metadata_empty_to_dict(tempdir): + # https://issues.apache.org/jira/browse/ARROW-10146 + table = pa.table({"a": pa.array([], type="int64")}) + pq.write_table(table, tempdir / "data.parquet") + metadata = pq.read_metadata(tempdir / "data.parquet") + # ensure this doesn't error / statistics set to None + metadata_dict = metadata.to_dict() + assert len(metadata_dict["row_groups"]) == 1 + assert len(metadata_dict["row_groups"][0]["columns"]) == 1 + assert metadata_dict["row_groups"][0]["columns"][0]["statistics"] is None + + +@pytest.mark.slow +@pytest.mark.large_memory +def test_metadata_exceeds_message_size(): + # ARROW-13655: Thrift may enable a default message size that limits + # the size of Parquet metadata that can be written. + NCOLS = 1000 + NREPEATS = 4000 + + table = pa.table({str(i): np.random.randn(10) for i in range(NCOLS)}) + + with pa.BufferOutputStream() as out: + pq.write_table(table, out) + buf = out.getvalue() + + original_metadata = pq.read_metadata(pa.BufferReader(buf)) + metadata = pq.read_metadata(pa.BufferReader(buf)) + for i in range(NREPEATS): + metadata.append_row_groups(original_metadata) + + with pa.BufferOutputStream() as out: + metadata.write_metadata_file(out) + buf = out.getvalue() + + metadata = pq.read_metadata(pa.BufferReader(buf)) + + +def test_metadata_schema_filesystem(tempdir): + table = pa.table({"a": [1, 2, 3]}) + + # URI writing to local file. + fname = "data.parquet" + file_path = str(tempdir / fname) + file_uri = 'file:///' + file_path + + pq.write_table(table, file_path) + + # Get expected `metadata` from path. + metadata = pq.read_metadata(tempdir / fname) + schema = table.schema + + assert pq.read_metadata(file_uri).equals(metadata) + assert pq.read_metadata( + file_path, filesystem=LocalFileSystem()).equals(metadata) + assert pq.read_metadata( + fname, filesystem=f'file:///{tempdir}').equals(metadata) + + assert pq.read_schema(file_uri).equals(schema) + assert pq.read_schema( + file_path, filesystem=LocalFileSystem()).equals(schema) + assert pq.read_schema( + fname, filesystem=f'file:///{tempdir}').equals(schema) + + with util.change_cwd(tempdir): + # Pass `filesystem` arg + assert pq.read_metadata( + fname, filesystem=LocalFileSystem()).equals(metadata) + + assert pq.read_schema( + fname, filesystem=LocalFileSystem()).equals(schema) + + +def test_metadata_equals(): + table = pa.table({"a": [1, 2, 3]}) + with pa.BufferOutputStream() as out: + pq.write_table(table, out) + buf = out.getvalue() + + original_metadata = pq.read_metadata(pa.BufferReader(buf)) + match = "Argument 'other' has incorrect type" + with pytest.raises(TypeError, match=match): + original_metadata.equals(None) + + +@pytest.mark.parametrize("t1,t2,expected_error", ( + ({'col1': range(10)}, {'col1': range(10)}, None), + ({'col1': range(10)}, {'col2': range(10)}, + "The two columns with index 0 differ."), + ({'col1': range(10), 'col2': range(10)}, {'col3': range(10)}, + "This schema has 2 columns, other has 1") +)) +def test_metadata_append_row_groups_diff(t1, t2, expected_error): + table1 = pa.table(t1) + table2 = pa.table(t2) + + buf1 = io.BytesIO() + buf2 = io.BytesIO() + pq.write_table(table1, buf1) + pq.write_table(table2, buf2) + buf1.seek(0) + buf2.seek(0) + + meta1 = pq.ParquetFile(buf1).metadata + meta2 = pq.ParquetFile(buf2).metadata + + if expected_error: + # Error clearly defines it's happening at append row groups call + prefix = "AppendRowGroups requires equal schemas.\n" + with pytest.raises(RuntimeError, match=prefix + expected_error): + meta1.append_row_groups(meta2) + else: + meta1.append_row_groups(meta2) + + +@pytest.mark.s3 +def test_write_metadata_fs_file_combinations(tempdir, s3_example_s3fs): + s3_fs, s3_path = s3_example_s3fs + + meta1 = tempdir / "meta1" + meta2 = tempdir / "meta2" + meta3 = tempdir / "meta3" + meta4 = tempdir / "meta4" + meta5 = f"{s3_path}/meta5" + + table = pa.table({"col": range(5)}) + + # plain local path + pq.write_metadata(table.schema, meta1, []) + + # Used the localfilesystem to resolve opening an output stream + pq.write_metadata(table.schema, meta2, [], filesystem=LocalFileSystem()) + + # Can resolve local file URI + pq.write_metadata(table.schema, meta3.as_uri(), []) + + # Take a file-like obj all the way thru? + with meta4.open('wb+') as meta4_stream: + pq.write_metadata(table.schema, meta4_stream, []) + + # S3FileSystem + pq.write_metadata(table.schema, meta5, [], filesystem=s3_fs) + + assert meta1.read_bytes() == meta2.read_bytes() \ + == meta3.read_bytes() == meta4.read_bytes() \ + == s3_fs.open(meta5).read() diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/test_pandas.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/test_pandas.py new file mode 100644 index 0000000000000000000000000000000000000000..b5913bf5c6b6e7a4dc61041e9a2cde093be03dc6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/test_pandas.py @@ -0,0 +1,654 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import io +import json + +import numpy as np +import pytest + +import pyarrow as pa +from pyarrow.fs import LocalFileSystem, SubTreeFileSystem +from pyarrow.util import guid +from pyarrow.vendored.version import Version + +try: + import pyarrow.parquet as pq + from pyarrow.tests.parquet.common import (_read_table, _test_dataframe, + _write_table) +except ImportError: + pq = None + + +try: + import pandas as pd + import pandas.testing as tm + + from pyarrow.tests.parquet.common import (_roundtrip_pandas_dataframe, + alltypes_sample) +except ImportError: + pd = tm = None + + +# Marks all of the tests in this module +# Ignore these with pytest ... -m 'not parquet' +pytestmark = pytest.mark.parquet + + +@pytest.mark.pandas +def test_pandas_parquet_custom_metadata(tempdir): + df = alltypes_sample(size=10000) + + filename = tempdir / 'pandas_roundtrip.parquet' + arrow_table = pa.Table.from_pandas(df) + assert b'pandas' in arrow_table.schema.metadata + + _write_table(arrow_table, filename) + + metadata = pq.read_metadata(filename).metadata + assert b'pandas' in metadata + + js = json.loads(metadata[b'pandas'].decode('utf8')) + assert js['index_columns'] == [{'kind': 'range', + 'name': None, + 'start': 0, 'stop': 10000, + 'step': 1}] + + +@pytest.mark.pandas +def test_merging_parquet_tables_with_different_pandas_metadata(tempdir): + # ARROW-3728: Merging Parquet Files - Pandas Meta in Schema Mismatch + schema = pa.schema([ + pa.field('int', pa.int16()), + pa.field('float', pa.float32()), + pa.field('string', pa.string()) + ]) + df1 = pd.DataFrame({ + 'int': np.arange(3, dtype=np.uint8), + 'float': np.arange(3, dtype=np.float32), + 'string': ['ABBA', 'EDDA', 'ACDC'] + }) + df2 = pd.DataFrame({ + 'int': [4, 5], + 'float': [1.1, None], + 'string': [None, None] + }) + table1 = pa.Table.from_pandas(df1, schema=schema, preserve_index=False) + table2 = pa.Table.from_pandas(df2, schema=schema, preserve_index=False) + + assert not table1.schema.equals(table2.schema, check_metadata=True) + assert table1.schema.equals(table2.schema) + + writer = pq.ParquetWriter(tempdir / 'merged.parquet', schema=schema) + writer.write_table(table1) + writer.write_table(table2) + + +@pytest.mark.pandas +def test_pandas_parquet_column_multiindex(tempdir): + df = alltypes_sample(size=10) + df.columns = pd.MultiIndex.from_tuples( + list(zip(df.columns, df.columns[::-1])), + names=['level_1', 'level_2'] + ) + + filename = tempdir / 'pandas_roundtrip.parquet' + arrow_table = pa.Table.from_pandas(df) + assert arrow_table.schema.pandas_metadata is not None + + _write_table(arrow_table, filename) + + table_read = pq.read_pandas(filename) + df_read = table_read.to_pandas() + tm.assert_frame_equal(df, df_read) + + +@pytest.mark.pandas +def test_pandas_parquet_2_0_roundtrip_read_pandas_no_index_written(tempdir): + df = alltypes_sample(size=10000) + + filename = tempdir / 'pandas_roundtrip.parquet' + arrow_table = pa.Table.from_pandas(df, preserve_index=False) + js = arrow_table.schema.pandas_metadata + assert not js['index_columns'] + # ARROW-2170 + # While index_columns should be empty, columns needs to be filled still. + assert js['columns'] + + _write_table(arrow_table, filename) + table_read = pq.read_pandas(filename) + + js = table_read.schema.pandas_metadata + assert not js['index_columns'] + + read_metadata = table_read.schema.metadata + assert arrow_table.schema.metadata == read_metadata + + df_read = table_read.to_pandas() + tm.assert_frame_equal(df, df_read) + + +@pytest.mark.pandas +def test_pandas_parquet_native_file_roundtrip(): + df = _test_dataframe(10000) + arrow_table = pa.Table.from_pandas(df) + imos = pa.BufferOutputStream() + _write_table(arrow_table, imos, version='2.6') + buf = imos.getvalue() + reader = pa.BufferReader(buf) + df_read = _read_table(reader).to_pandas() + tm.assert_frame_equal(df, df_read) + + +@pytest.mark.pandas +def test_read_pandas_column_subset(): + df = _test_dataframe(10000) + arrow_table = pa.Table.from_pandas(df) + imos = pa.BufferOutputStream() + _write_table(arrow_table, imos, version='2.6') + buf = imos.getvalue() + reader = pa.BufferReader(buf) + df_read = pq.read_pandas( + reader, columns=['strings', 'uint8'], + ).to_pandas() + tm.assert_frame_equal(df[['strings', 'uint8']], df_read) + + +@pytest.mark.pandas +def test_pandas_parquet_empty_roundtrip(): + df = _test_dataframe(0) + arrow_table = pa.Table.from_pandas(df) + imos = pa.BufferOutputStream() + _write_table(arrow_table, imos, version='2.6') + buf = imos.getvalue() + reader = pa.BufferReader(buf) + df_read = _read_table(reader).to_pandas() + tm.assert_frame_equal(df, df_read) + + +@pytest.mark.pandas +def test_pandas_can_write_nested_data(): + data = { + "agg_col": [ + {"page_type": 1}, + {"record_type": 1}, + {"non_consecutive_home": 0}, + ], + "uid_first": "1001" + } + df = pd.DataFrame(data=data) + arrow_table = pa.Table.from_pandas(df) + imos = pa.BufferOutputStream() + # This succeeds under V2 + _write_table(arrow_table, imos) + + +@pytest.mark.pandas +def test_pandas_parquet_pyfile_roundtrip(tempdir): + filename = tempdir / 'pandas_pyfile_roundtrip.parquet' + size = 5 + df = pd.DataFrame({ + 'int64': np.arange(size, dtype=np.int64), + 'float32': np.arange(size, dtype=np.float32), + 'float64': np.arange(size, dtype=np.float64), + 'bool': np.random.randn(size) > 0, + 'strings': ['foo', 'bar', None, 'baz', 'qux'] + }) + + arrow_table = pa.Table.from_pandas(df) + + with filename.open('wb') as f: + _write_table(arrow_table, f, version="2.6") + + data = io.BytesIO(filename.read_bytes()) + + table_read = _read_table(data) + df_read = table_read.to_pandas() + tm.assert_frame_equal(df, df_read) + + +@pytest.mark.pandas +def test_pandas_parquet_configuration_options(tempdir): + size = 10000 + np.random.seed(0) + df = pd.DataFrame({ + 'uint8': np.arange(size, dtype=np.uint8), + 'uint16': np.arange(size, dtype=np.uint16), + 'uint32': np.arange(size, dtype=np.uint32), + 'uint64': np.arange(size, dtype=np.uint64), + 'int8': np.arange(size, dtype=np.int16), + 'int16': np.arange(size, dtype=np.int16), + 'int32': np.arange(size, dtype=np.int32), + 'int64': np.arange(size, dtype=np.int64), + 'float32': np.arange(size, dtype=np.float32), + 'float64': np.arange(size, dtype=np.float64), + 'bool': np.random.randn(size) > 0 + }) + filename = tempdir / 'pandas_roundtrip.parquet' + arrow_table = pa.Table.from_pandas(df) + + for use_dictionary in [True, False]: + _write_table(arrow_table, filename, version='2.6', + use_dictionary=use_dictionary) + table_read = _read_table(filename) + df_read = table_read.to_pandas() + tm.assert_frame_equal(df, df_read) + + for write_statistics in [True, False]: + _write_table(arrow_table, filename, version='2.6', + write_statistics=write_statistics) + table_read = _read_table(filename) + df_read = table_read.to_pandas() + tm.assert_frame_equal(df, df_read) + + for compression in ['NONE', 'SNAPPY', 'GZIP', 'LZ4', 'ZSTD']: + if (compression != 'NONE' and + not pa.lib.Codec.is_available(compression)): + continue + _write_table(arrow_table, filename, version='2.6', + compression=compression) + table_read = _read_table(filename) + df_read = table_read.to_pandas() + tm.assert_frame_equal(df, df_read) + + +@pytest.mark.pandas +@pytest.mark.filterwarnings("ignore:Parquet format '2.0':FutureWarning") +def test_spark_flavor_preserves_pandas_metadata(): + df = _test_dataframe(size=100) + df.index = np.arange(0, 10 * len(df), 10) + df.index.name = 'foo' + + result = _roundtrip_pandas_dataframe(df, {'version': '2.0', + 'flavor': 'spark'}) + tm.assert_frame_equal(result, df) + + +@pytest.mark.pandas +def test_index_column_name_duplicate(tempdir): + data = { + 'close': { + pd.Timestamp('2017-06-30 01:31:00'): 154.99958999999998, + pd.Timestamp('2017-06-30 01:32:00'): 154.99958999999998, + }, + 'time': { + pd.Timestamp('2017-06-30 01:31:00'): pd.Timestamp( + '2017-06-30 01:31:00' + ), + pd.Timestamp('2017-06-30 01:32:00'): pd.Timestamp( + '2017-06-30 01:32:00' + ), + } + } + path = str(tempdir / 'data.parquet') + + # Pandas v2 defaults to [ns], but Arrow defaults to [us] time units + # so we need to cast the pandas dtype. Pandas v1 will always silently + # coerce to [ns] due to lack of non-[ns] support. + dfx = pd.DataFrame(data, dtype='datetime64[us]').set_index('time', drop=False) + + tdfx = pa.Table.from_pandas(dfx) + _write_table(tdfx, path) + arrow_table = _read_table(path) + result_df = arrow_table.to_pandas() + tm.assert_frame_equal(result_df, dfx) + + +@pytest.mark.pandas +def test_multiindex_duplicate_values(tempdir): + num_rows = 3 + numbers = list(range(num_rows)) + index = pd.MultiIndex.from_arrays( + [['foo', 'foo', 'bar'], numbers], + names=['foobar', 'some_numbers'], + ) + + df = pd.DataFrame({'numbers': numbers}, index=index) + table = pa.Table.from_pandas(df) + + filename = tempdir / 'dup_multi_index_levels.parquet' + + _write_table(table, filename) + result_table = _read_table(filename) + assert table.equals(result_table) + + result_df = result_table.to_pandas() + tm.assert_frame_equal(result_df, df) + + +@pytest.mark.pandas +def test_backwards_compatible_index_naming(datadir): + expected_string = b"""\ +carat cut color clarity depth table price x y z + 0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43 + 0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31 + 0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31 + 0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63 + 0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75 + 0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48 + 0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47 + 0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53 + 0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49 + 0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39""" + expected = pd.read_csv(io.BytesIO(expected_string), sep=r'\s{2,}', + index_col=None, header=0, engine='python') + table = _read_table(datadir / 'v0.7.1.parquet') + result = table.to_pandas() + tm.assert_frame_equal(result, expected) + + +@pytest.mark.pandas +def test_backwards_compatible_index_multi_level_named(datadir): + expected_string = b"""\ +carat cut color clarity depth table price x y z + 0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43 + 0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31 + 0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31 + 0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63 + 0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75 + 0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48 + 0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47 + 0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53 + 0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49 + 0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39""" + expected = pd.read_csv( + io.BytesIO(expected_string), sep=r'\s{2,}', + index_col=['cut', 'color', 'clarity'], + header=0, engine='python' + ).sort_index() + + table = _read_table(datadir / 'v0.7.1.all-named-index.parquet') + result = table.to_pandas() + tm.assert_frame_equal(result, expected) + + +@pytest.mark.pandas +def test_backwards_compatible_index_multi_level_some_named(datadir): + expected_string = b"""\ +carat cut color clarity depth table price x y z + 0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43 + 0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31 + 0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31 + 0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63 + 0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75 + 0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48 + 0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47 + 0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53 + 0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49 + 0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39""" + expected = pd.read_csv( + io.BytesIO(expected_string), + sep=r'\s{2,}', index_col=['cut', 'color', 'clarity'], + header=0, engine='python' + ).sort_index() + expected.index = expected.index.set_names(['cut', None, 'clarity']) + + table = _read_table(datadir / 'v0.7.1.some-named-index.parquet') + result = table.to_pandas() + tm.assert_frame_equal(result, expected) + + +@pytest.mark.pandas +def test_backwards_compatible_column_metadata_handling(datadir): + if Version("2.2.0") <= Version(pd.__version__): + # TODO: regression in pandas + # https://github.com/pandas-dev/pandas/issues/56775 + pytest.skip("Regression in pandas 2.2.0") + expected = pd.DataFrame( + {'a': [1, 2, 3], 'b': [.1, .2, .3], + 'c': pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')}) + expected.index = pd.MultiIndex.from_arrays( + [['a', 'b', 'c'], + pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')], + names=['index', None]) + + path = datadir / 'v0.7.1.column-metadata-handling.parquet' + table = _read_table(path) + result = table.to_pandas() + tm.assert_frame_equal(result, expected) + + table = _read_table( + path, columns=['a']) + result = table.to_pandas() + tm.assert_frame_equal(result, expected[['a']].reset_index(drop=True)) + + +@pytest.mark.pandas +def test_categorical_index_survives_roundtrip(): + # ARROW-3652, addressed by ARROW-3246 + df = pd.DataFrame([['a', 'b'], ['c', 'd']], columns=['c1', 'c2']) + df['c1'] = df['c1'].astype('category') + df = df.set_index(['c1']) + + table = pa.Table.from_pandas(df) + bos = pa.BufferOutputStream() + pq.write_table(table, bos) + ref_df = pq.read_pandas(bos.getvalue()).to_pandas() + assert isinstance(ref_df.index, pd.CategoricalIndex) + assert ref_df.index.equals(df.index) + + +@pytest.mark.pandas +def test_categorical_order_survives_roundtrip(): + # ARROW-6302 + df = pd.DataFrame({"a": pd.Categorical( + ["a", "b", "c", "a"], categories=["b", "c", "d"], ordered=True)}) + + table = pa.Table.from_pandas(df) + bos = pa.BufferOutputStream() + pq.write_table(table, bos) + + contents = bos.getvalue() + result = pq.read_pandas(contents).to_pandas() + + tm.assert_frame_equal(result, df) + + +@pytest.mark.pandas +def test_pandas_categorical_na_type_row_groups(): + # ARROW-5085 + df = pd.DataFrame({"col": [None] * 100, "int": [1.0] * 100}) + df_category = df.astype({"col": "category", "int": "category"}) + table = pa.Table.from_pandas(df) + table_cat = pa.Table.from_pandas(df_category) + buf = pa.BufferOutputStream() + + # it works + pq.write_table(table_cat, buf, version='2.6', chunk_size=10) + result = pq.read_table(buf.getvalue()) + + # Result is non-categorical + assert result[0].equals(table[0]) + assert result[1].equals(table[1]) + + +@pytest.mark.pandas +def test_pandas_categorical_roundtrip(): + # ARROW-5480, this was enabled by ARROW-3246 + + # Have one of the categories unobserved and include a null (-1) + codes = np.array([2, 0, 0, 2, 0, -1, 2], dtype='int32') + categories = ['foo', 'bar', 'baz'] + df = pd.DataFrame({'x': pd.Categorical.from_codes( + codes, categories=categories)}) + + buf = pa.BufferOutputStream() + pq.write_table(pa.table(df), buf) + + result = pq.read_table(buf.getvalue()).to_pandas() + assert result.x.dtype == 'category' + assert (result.x.cat.categories == categories).all() + tm.assert_frame_equal(result, df) + + +@pytest.mark.pandas +def test_categories_with_string_pyarrow_dtype(tempdir): + # gh-33727: writing to parquet should not fail + if Version(pd.__version__) < Version("1.3.0"): + pytest.skip("PyArrow backed string data type introduced in pandas 1.3.0") + + df1 = pd.DataFrame({"x": ["foo", "bar", "foo"]}, dtype="string[pyarrow]") + df1 = df1.astype("category") + + df2 = pd.DataFrame({"x": ["foo", "bar", "foo"]}) + df2 = df2.astype("category") + + # categories should be converted to pa.Array + assert pa.array(df1["x"]).to_pylist() == pa.array(df2["x"]).to_pylist() + assert pa.array(df1["x"].cat.categories.values).to_pylist() == pa.array( + df2["x"].cat.categories.values).to_pylist() + + path = str(tempdir / 'cat.parquet') + pq.write_table(pa.table(df1), path) + result = pq.read_table(path).to_pandas() + + tm.assert_frame_equal(result, df2) + + +@pytest.mark.pandas +def test_write_to_dataset_pandas_preserve_extensiondtypes(tempdir): + df = pd.DataFrame({'part': 'a', "col": [1, 2, 3]}) + df['col'] = df['col'].astype("Int64") + table = pa.table(df) + + pq.write_to_dataset( + table, str(tempdir / "case1"), partition_cols=['part'], + ) + result = pq.read_table(str(tempdir / "case1")).to_pandas() + tm.assert_frame_equal(result[["col"]], df[["col"]]) + + pq.write_to_dataset(table, str(tempdir / "case2")) + result = pq.read_table(str(tempdir / "case2")).to_pandas() + tm.assert_frame_equal(result[["col"]], df[["col"]]) + + pq.write_table(table, str(tempdir / "data.parquet")) + result = pq.read_table(str(tempdir / "data.parquet")).to_pandas() + tm.assert_frame_equal(result[["col"]], df[["col"]]) + + +@pytest.mark.pandas +def test_write_to_dataset_pandas_preserve_index(tempdir): + # ARROW-8251 - preserve pandas index in roundtrip + + df = pd.DataFrame({'part': ['a', 'a', 'b'], "col": [1, 2, 3]}) + df.index = pd.Index(['a', 'b', 'c'], name="idx") + table = pa.table(df) + df_cat = df[["col", "part"]].copy() + df_cat["part"] = df_cat["part"].astype("category") + + pq.write_to_dataset( + table, str(tempdir / "case1"), partition_cols=['part'], + ) + result = pq.read_table(str(tempdir / "case1")).to_pandas() + tm.assert_frame_equal(result, df_cat) + + pq.write_to_dataset(table, str(tempdir / "case2")) + result = pq.read_table(str(tempdir / "case2")).to_pandas() + tm.assert_frame_equal(result, df) + + pq.write_table(table, str(tempdir / "data.parquet")) + result = pq.read_table(str(tempdir / "data.parquet")).to_pandas() + tm.assert_frame_equal(result, df) + + +@pytest.mark.pandas +@pytest.mark.parametrize('preserve_index', [True, False, None]) +@pytest.mark.parametrize('metadata_fname', ["_metadata", "_common_metadata"]) +def test_dataset_read_pandas_common_metadata( + tempdir, preserve_index, metadata_fname +): + # ARROW-1103 + nfiles = 5 + size = 5 + + dirpath = tempdir / guid() + dirpath.mkdir() + + test_data = [] + frames = [] + paths = [] + for i in range(nfiles): + df = _test_dataframe(size, seed=i) + df.index = pd.Index( + np.arange(i * size, (i + 1) * size, dtype="int64"), name='index' + ) + + path = dirpath / '{}.parquet'.format(i) + + table = pa.Table.from_pandas(df, preserve_index=preserve_index) + + # Obliterate metadata + table = table.replace_schema_metadata(None) + assert table.schema.metadata is None + + _write_table(table, path) + test_data.append(table) + frames.append(df) + paths.append(path) + + # Write _metadata common file + table_for_metadata = pa.Table.from_pandas( + df, preserve_index=preserve_index + ) + pq.write_metadata(table_for_metadata.schema, dirpath / metadata_fname) + + dataset = pq.ParquetDataset(dirpath) + columns = ['uint8', 'strings'] + result = dataset.read_pandas(columns=columns).to_pandas() + expected = pd.concat([x[columns] for x in frames]) + expected.index.name = ( + df.index.name if preserve_index is not False else None) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.pandas +def test_read_pandas_passthrough_keywords(tempdir): + # ARROW-11464 - previously not all keywords were passed through (such as + # the filesystem keyword) + df = pd.DataFrame({'a': [1, 2, 3]}) + + filename = tempdir / 'data.parquet' + _write_table(df, filename) + + result = pq.read_pandas( + 'data.parquet', + filesystem=SubTreeFileSystem(str(tempdir), LocalFileSystem()) + ) + assert result.equals(pa.table(df)) + + +@pytest.mark.pandas +def test_read_pandas_map_fields(tempdir): + # ARROW-10140 - table created from Pandas with mapping fields + df = pd.DataFrame({ + 'col1': pd.Series([ + [('id', 'something'), ('value2', 'else')], + [('id', 'something2'), ('value', 'else2')], + ]), + 'col2': pd.Series(['foo', 'bar']) + }) + + filename = tempdir / 'data.parquet' + + udt = pa.map_(pa.string(), pa.string()) + schema = pa.schema([pa.field('col1', udt), pa.field('col2', pa.string())]) + arrow_table = pa.Table.from_pandas(df, schema) + + _write_table(arrow_table, filename) + + result = pq.read_pandas(filename).to_pandas() + tm.assert_frame_equal(result, df) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/test_parquet_file.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/test_parquet_file.py new file mode 100644 index 0000000000000000000000000000000000000000..93097a1afaac96d4260d5ce6ceca9224dfbc5ab2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/test_parquet_file.py @@ -0,0 +1,336 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import io +import os +import sys + +import pytest + +import pyarrow as pa + +try: + import pyarrow.parquet as pq + from pyarrow.tests.parquet.common import _write_table +except ImportError: + pq = None + +try: + import pandas as pd + import pandas.testing as tm + + from pyarrow.tests.parquet.common import alltypes_sample +except ImportError: + pd = tm = None + + +# Marks all of the tests in this module +# Ignore these with pytest ... -m 'not parquet' +pytestmark = pytest.mark.parquet + + +@pytest.mark.pandas +def test_pass_separate_metadata(): + # ARROW-471 + df = alltypes_sample(size=10000) + + a_table = pa.Table.from_pandas(df) + + buf = io.BytesIO() + _write_table(a_table, buf, compression='snappy', version='2.6') + + buf.seek(0) + metadata = pq.read_metadata(buf) + + buf.seek(0) + + fileh = pq.ParquetFile(buf, metadata=metadata) + + tm.assert_frame_equal(df, fileh.read().to_pandas()) + + +@pytest.mark.pandas +def test_read_single_row_group(): + # ARROW-471 + N, K = 10000, 4 + df = alltypes_sample(size=N) + + a_table = pa.Table.from_pandas(df) + + buf = io.BytesIO() + _write_table(a_table, buf, row_group_size=N / K, + compression='snappy', version='2.6') + + buf.seek(0) + + pf = pq.ParquetFile(buf) + + assert pf.num_row_groups == K + + row_groups = [pf.read_row_group(i) for i in range(K)] + result = pa.concat_tables(row_groups) + tm.assert_frame_equal(df, result.to_pandas()) + + +@pytest.mark.pandas +def test_read_single_row_group_with_column_subset(): + N, K = 10000, 4 + df = alltypes_sample(size=N) + a_table = pa.Table.from_pandas(df) + + buf = io.BytesIO() + _write_table(a_table, buf, row_group_size=N / K, + compression='snappy', version='2.6') + + buf.seek(0) + pf = pq.ParquetFile(buf) + + cols = list(df.columns[:2]) + row_groups = [pf.read_row_group(i, columns=cols) for i in range(K)] + result = pa.concat_tables(row_groups) + tm.assert_frame_equal(df[cols], result.to_pandas()) + + # ARROW-4267: Selection of duplicate columns still leads to these columns + # being read uniquely. + row_groups = [pf.read_row_group(i, columns=cols + cols) for i in range(K)] + result = pa.concat_tables(row_groups) + tm.assert_frame_equal(df[cols], result.to_pandas()) + + +@pytest.mark.pandas +def test_read_multiple_row_groups(): + N, K = 10000, 4 + df = alltypes_sample(size=N) + + a_table = pa.Table.from_pandas(df) + + buf = io.BytesIO() + _write_table(a_table, buf, row_group_size=N / K, + compression='snappy', version='2.6') + + buf.seek(0) + + pf = pq.ParquetFile(buf) + + assert pf.num_row_groups == K + + result = pf.read_row_groups(range(K)) + tm.assert_frame_equal(df, result.to_pandas()) + + +@pytest.mark.pandas +def test_read_multiple_row_groups_with_column_subset(): + N, K = 10000, 4 + df = alltypes_sample(size=N) + a_table = pa.Table.from_pandas(df) + + buf = io.BytesIO() + _write_table(a_table, buf, row_group_size=N / K, + compression='snappy', version='2.6') + + buf.seek(0) + pf = pq.ParquetFile(buf) + + cols = list(df.columns[:2]) + result = pf.read_row_groups(range(K), columns=cols) + tm.assert_frame_equal(df[cols], result.to_pandas()) + + # ARROW-4267: Selection of duplicate columns still leads to these columns + # being read uniquely. + result = pf.read_row_groups(range(K), columns=cols + cols) + tm.assert_frame_equal(df[cols], result.to_pandas()) + + +@pytest.mark.pandas +def test_scan_contents(): + N, K = 10000, 4 + df = alltypes_sample(size=N) + a_table = pa.Table.from_pandas(df) + + buf = io.BytesIO() + _write_table(a_table, buf, row_group_size=N / K, + compression='snappy', version='2.6') + + buf.seek(0) + pf = pq.ParquetFile(buf) + + assert pf.scan_contents() == 10000 + assert pf.scan_contents(df.columns[:4]) == 10000 + + +def test_parquet_file_pass_directory_instead_of_file(tempdir): + # ARROW-7208 + path = tempdir / 'directory' + os.mkdir(str(path)) + + msg = f"Cannot open for reading: path '{str(path)}' is a directory" + with pytest.raises(IOError) as exc: + pq.ParquetFile(path) + if exc.errisinstance(PermissionError) and sys.platform == 'win32': + return # Windows CI can get a PermissionError here. + exc.match(msg) + + +def test_read_column_invalid_index(): + table = pa.table([pa.array([4, 5]), pa.array(["foo", "bar"])], + names=['ints', 'strs']) + bio = pa.BufferOutputStream() + pq.write_table(table, bio) + f = pq.ParquetFile(bio.getvalue()) + assert f.reader.read_column(0).to_pylist() == [4, 5] + assert f.reader.read_column(1).to_pylist() == ["foo", "bar"] + for index in (-1, 2): + with pytest.raises((ValueError, IndexError)): + f.reader.read_column(index) + + +@pytest.mark.pandas +@pytest.mark.parametrize('batch_size', [300, 1000, 1300]) +def test_iter_batches_columns_reader(tempdir, batch_size): + total_size = 3000 + chunk_size = 1000 + # TODO: Add categorical support + df = alltypes_sample(size=total_size) + + filename = tempdir / 'pandas_roundtrip.parquet' + arrow_table = pa.Table.from_pandas(df) + _write_table(arrow_table, filename, version='2.6', + chunk_size=chunk_size) + + file_ = pq.ParquetFile(filename) + for columns in [df.columns[:10], df.columns[10:]]: + batches = file_.iter_batches(batch_size=batch_size, columns=columns) + batch_starts = range(0, total_size+batch_size, batch_size) + for batch, start in zip(batches, batch_starts): + end = min(total_size, start + batch_size) + tm.assert_frame_equal( + batch.to_pandas(), + df.iloc[start:end, :].loc[:, columns].reset_index(drop=True) + ) + + +@pytest.mark.pandas +@pytest.mark.parametrize('chunk_size', [1000]) +def test_iter_batches_reader(tempdir, chunk_size): + df = alltypes_sample(size=10000, categorical=True) + + filename = tempdir / 'pandas_roundtrip.parquet' + arrow_table = pa.Table.from_pandas(df) + assert arrow_table.schema.pandas_metadata is not None + + _write_table(arrow_table, filename, version='2.6', + chunk_size=chunk_size) + + file_ = pq.ParquetFile(filename) + + def get_all_batches(f): + for row_group in range(f.num_row_groups): + batches = f.iter_batches( + batch_size=900, + row_groups=[row_group], + ) + + for batch in batches: + yield batch + + batches = list(get_all_batches(file_)) + batch_no = 0 + + for i in range(file_.num_row_groups): + tm.assert_frame_equal( + batches[batch_no].to_pandas(), + file_.read_row_groups([i]).to_pandas().head(900) + ) + + batch_no += 1 + + tm.assert_frame_equal( + batches[batch_no].to_pandas().reset_index(drop=True), + file_.read_row_groups([i]).to_pandas().iloc[900:].reset_index( + drop=True + ) + ) + + batch_no += 1 + + +@pytest.mark.pandas +@pytest.mark.parametrize('pre_buffer', [False, True]) +def test_pre_buffer(pre_buffer): + N, K = 10000, 4 + df = alltypes_sample(size=N) + a_table = pa.Table.from_pandas(df) + + buf = io.BytesIO() + _write_table(a_table, buf, row_group_size=N / K, + compression='snappy', version='2.6') + + buf.seek(0) + pf = pq.ParquetFile(buf, pre_buffer=pre_buffer) + assert pf.read().num_rows == N + + +def test_parquet_file_explicitly_closed(tempdir): + """ + Unopened files should be closed explicitly after use, + and previously opened files should be left open. + Applies to read_table, ParquetDataset, and ParquetFile + """ + # create test parquet file + fn = tempdir.joinpath('file.parquet') + table = pa.table({'col1': [0, 1], 'col2': [0, 1]}) + pq.write_table(table, fn) + + # ParquetFile with opened file (will leave open) + with open(fn, 'rb') as f: + with pq.ParquetFile(f) as p: + p.read() + assert not f.closed + assert not p.closed + assert not f.closed # opened input file was not closed + assert not p.closed # parquet file obj reports as not closed + assert f.closed + assert p.closed # parquet file being closed reflects underlying file + + # ParquetFile with unopened file (will close) + with pq.ParquetFile(fn) as p: + p.read() + assert not p.closed + assert p.closed # parquet file obj reports as closed + + +@pytest.mark.s3 +@pytest.mark.parametrize("use_uri", (True, False)) +def test_parquet_file_with_filesystem(s3_example_fs, use_uri): + s3_fs, s3_uri, s3_path = s3_example_fs + + args = (s3_uri if use_uri else s3_path,) + kwargs = {} if use_uri else dict(filesystem=s3_fs) + + table = pa.table({"a": range(10)}) + pq.write_table(table, s3_path, filesystem=s3_fs) + + parquet_file = pq.ParquetFile(*args, **kwargs) + assert parquet_file.read() == table + assert not parquet_file.closed + parquet_file.close() + assert parquet_file.closed + + with pq.ParquetFile(*args, **kwargs) as f: + assert f.read() == table + assert not f.closed + assert f.closed diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/test_parquet_writer.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/test_parquet_writer.py new file mode 100644 index 0000000000000000000000000000000000000000..16584684f5c7f21afb63e4bb2ee054afed2f516a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/test_parquet_writer.py @@ -0,0 +1,391 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import pytest + +import pyarrow as pa +from pyarrow import fs +from pyarrow.filesystem import FileSystem, LocalFileSystem + +try: + import pyarrow.parquet as pq + from pyarrow.tests.parquet.common import (_read_table, _test_dataframe, + _range_integers) +except ImportError: + pq = None + + +try: + import pandas as pd + import pandas.testing as tm + +except ImportError: + pd = tm = None + + +# Marks all of the tests in this module +# Ignore these with pytest ... -m 'not parquet' +pytestmark = pytest.mark.parquet + + +@pytest.mark.pandas +def test_parquet_incremental_file_build(tempdir): + df = _test_dataframe(100) + df['unique_id'] = 0 + + arrow_table = pa.Table.from_pandas(df, preserve_index=False) + out = pa.BufferOutputStream() + + writer = pq.ParquetWriter(out, arrow_table.schema, version='2.6') + + frames = [] + for i in range(10): + df['unique_id'] = i + arrow_table = pa.Table.from_pandas(df, preserve_index=False) + writer.write_table(arrow_table) + + frames.append(df.copy()) + + writer.close() + + buf = out.getvalue() + result = _read_table(pa.BufferReader(buf)) + + expected = pd.concat(frames, ignore_index=True) + tm.assert_frame_equal(result.to_pandas(), expected) + + +def test_validate_schema_write_table(tempdir): + # ARROW-2926 + simple_fields = [ + pa.field('POS', pa.uint32()), + pa.field('desc', pa.string()) + ] + + simple_schema = pa.schema(simple_fields) + + # simple_table schema does not match simple_schema + simple_from_array = [pa.array([1]), pa.array(['bla'])] + simple_table = pa.Table.from_arrays(simple_from_array, ['POS', 'desc']) + + path = tempdir / 'simple_validate_schema.parquet' + + with pq.ParquetWriter(path, simple_schema, + version='2.6', + compression='snappy', flavor='spark') as w: + with pytest.raises(ValueError): + w.write_table(simple_table) + + +def test_parquet_invalid_writer(tempdir): + # avoid segfaults with invalid construction + with pytest.raises(TypeError): + some_schema = pa.schema([pa.field("x", pa.int32())]) + pq.ParquetWriter(None, some_schema) + + with pytest.raises(TypeError): + pq.ParquetWriter(tempdir / "some_path", None) + + +@pytest.mark.pandas +def test_parquet_writer_context_obj(tempdir): + df = _test_dataframe(100) + df['unique_id'] = 0 + + arrow_table = pa.Table.from_pandas(df, preserve_index=False) + out = pa.BufferOutputStream() + + with pq.ParquetWriter(out, arrow_table.schema, version='2.6') as writer: + + frames = [] + for i in range(10): + df['unique_id'] = i + arrow_table = pa.Table.from_pandas(df, preserve_index=False) + writer.write_table(arrow_table) + + frames.append(df.copy()) + + buf = out.getvalue() + result = _read_table(pa.BufferReader(buf)) + + expected = pd.concat(frames, ignore_index=True) + tm.assert_frame_equal(result.to_pandas(), expected) + + +@pytest.mark.pandas +def test_parquet_writer_context_obj_with_exception(tempdir): + df = _test_dataframe(100) + df['unique_id'] = 0 + + arrow_table = pa.Table.from_pandas(df, preserve_index=False) + out = pa.BufferOutputStream() + error_text = 'Artificial Error' + + try: + with pq.ParquetWriter(out, + arrow_table.schema, + version='2.6') as writer: + + frames = [] + for i in range(10): + df['unique_id'] = i + arrow_table = pa.Table.from_pandas(df, preserve_index=False) + writer.write_table(arrow_table) + frames.append(df.copy()) + if i == 5: + raise ValueError(error_text) + except Exception as e: + assert str(e) == error_text + + buf = out.getvalue() + result = _read_table(pa.BufferReader(buf)) + + expected = pd.concat(frames, ignore_index=True) + tm.assert_frame_equal(result.to_pandas(), expected) + + +@pytest.mark.pandas +@pytest.mark.parametrize("filesystem", [ + None, + LocalFileSystem._get_instance(), + fs.LocalFileSystem(), +]) +def test_parquet_writer_write_wrappers(tempdir, filesystem): + df = _test_dataframe(100) + table = pa.Table.from_pandas(df, preserve_index=False) + batch = pa.RecordBatch.from_pandas(df, preserve_index=False) + path_table = str(tempdir / 'data_table.parquet') + path_batch = str(tempdir / 'data_batch.parquet') + + with pq.ParquetWriter( + path_table, table.schema, filesystem=filesystem, version='2.6' + ) as writer: + writer.write_table(table) + + result = _read_table(path_table).to_pandas() + tm.assert_frame_equal(result, df) + + with pq.ParquetWriter( + path_batch, table.schema, filesystem=filesystem, version='2.6' + ) as writer: + writer.write_batch(batch) + + result = _read_table(path_batch).to_pandas() + tm.assert_frame_equal(result, df) + + with pq.ParquetWriter( + path_table, table.schema, filesystem=filesystem, version='2.6' + ) as writer: + writer.write(table) + + result = _read_table(path_table).to_pandas() + tm.assert_frame_equal(result, df) + + with pq.ParquetWriter( + path_batch, table.schema, filesystem=filesystem, version='2.6' + ) as writer: + writer.write(batch) + + result = _read_table(path_batch).to_pandas() + tm.assert_frame_equal(result, df) + + +@pytest.mark.large_memory +@pytest.mark.pandas +def test_parquet_writer_chunk_size(tempdir): + default_chunk_size = 1024 * 1024 + abs_max_chunk_size = 64 * 1024 * 1024 + + def check_chunk_size(data_size, chunk_size, expect_num_chunks): + table = pa.Table.from_arrays([ + _range_integers(data_size, 'b') + ], names=['x']) + if chunk_size is None: + pq.write_table(table, tempdir / 'test.parquet') + else: + pq.write_table(table, tempdir / 'test.parquet', row_group_size=chunk_size) + metadata = pq.read_metadata(tempdir / 'test.parquet') + expected_chunk_size = default_chunk_size if chunk_size is None else chunk_size + assert metadata.num_row_groups == expect_num_chunks + latched_chunk_size = min(expected_chunk_size, abs_max_chunk_size) + # First chunks should be full size + for chunk_idx in range(expect_num_chunks - 1): + assert metadata.row_group(chunk_idx).num_rows == latched_chunk_size + # Last chunk may be smaller + remainder = data_size - (expected_chunk_size * (expect_num_chunks - 1)) + if remainder == 0: + assert metadata.row_group( + expect_num_chunks - 1).num_rows == latched_chunk_size + else: + assert metadata.row_group(expect_num_chunks - 1).num_rows == remainder + + check_chunk_size(default_chunk_size * 2, default_chunk_size - 100, 3) + check_chunk_size(default_chunk_size * 2, default_chunk_size, 2) + check_chunk_size(default_chunk_size * 2, default_chunk_size + 100, 2) + check_chunk_size(default_chunk_size + 100, default_chunk_size + 100, 1) + # Even though the chunk size requested is large enough it will be capped + # by the absolute max chunk size + check_chunk_size(abs_max_chunk_size * 2, abs_max_chunk_size * 2, 2) + + # These tests don't pass a chunk_size to write_table and so the chunk size + # should be default_chunk_size + check_chunk_size(default_chunk_size, None, 1) + check_chunk_size(default_chunk_size + 1, None, 2) + + +@pytest.mark.pandas +@pytest.mark.parametrize("filesystem", [ + None, + LocalFileSystem._get_instance(), + fs.LocalFileSystem(), +]) +def test_parquet_writer_filesystem_local(tempdir, filesystem): + df = _test_dataframe(100) + table = pa.Table.from_pandas(df, preserve_index=False) + path = str(tempdir / 'data.parquet') + + with pq.ParquetWriter( + path, table.schema, filesystem=filesystem, version='2.6' + ) as writer: + writer.write_table(table) + + result = _read_table(path).to_pandas() + tm.assert_frame_equal(result, df) + + +@pytest.mark.pandas +@pytest.mark.s3 +def test_parquet_writer_filesystem_s3(s3_example_fs): + df = _test_dataframe(100) + table = pa.Table.from_pandas(df, preserve_index=False) + + fs, uri, path = s3_example_fs + + with pq.ParquetWriter( + path, table.schema, filesystem=fs, version='2.6' + ) as writer: + writer.write_table(table) + + result = _read_table(uri).to_pandas() + tm.assert_frame_equal(result, df) + + +@pytest.mark.pandas +@pytest.mark.s3 +def test_parquet_writer_filesystem_s3_uri(s3_example_fs): + df = _test_dataframe(100) + table = pa.Table.from_pandas(df, preserve_index=False) + + fs, uri, path = s3_example_fs + + with pq.ParquetWriter(uri, table.schema, version='2.6') as writer: + writer.write_table(table) + + result = _read_table(path, filesystem=fs).to_pandas() + tm.assert_frame_equal(result, df) + + +@pytest.mark.pandas +@pytest.mark.s3 +def test_parquet_writer_filesystem_s3fs(s3_example_s3fs): + df = _test_dataframe(100) + table = pa.Table.from_pandas(df, preserve_index=False) + + fs, directory = s3_example_s3fs + path = directory + "/test.parquet" + + with pq.ParquetWriter( + path, table.schema, filesystem=fs, version='2.6' + ) as writer: + writer.write_table(table) + + result = _read_table(path, filesystem=fs).to_pandas() + tm.assert_frame_equal(result, df) + + +@pytest.mark.pandas +def test_parquet_writer_filesystem_buffer_raises(): + df = _test_dataframe(100) + table = pa.Table.from_pandas(df, preserve_index=False) + filesystem = fs.LocalFileSystem() + + # Should raise ValueError when filesystem is passed with file-like object + with pytest.raises(ValueError, match="specified path is file-like"): + pq.ParquetWriter( + pa.BufferOutputStream(), table.schema, filesystem=filesystem + ) + + +@pytest.mark.pandas +def test_parquet_writer_with_caller_provided_filesystem(): + out = pa.BufferOutputStream() + + class CustomFS(FileSystem): + def __init__(self): + self.path = None + self.mode = None + + def open(self, path, mode='rb'): + self.path = path + self.mode = mode + return out + + fs = CustomFS() + fname = 'expected_fname.parquet' + df = _test_dataframe(100) + table = pa.Table.from_pandas(df, preserve_index=False) + + with pq.ParquetWriter(fname, table.schema, filesystem=fs, version='2.6') \ + as writer: + writer.write_table(table) + + assert fs.path == fname + assert fs.mode == 'wb' + assert out.closed + + buf = out.getvalue() + table_read = _read_table(pa.BufferReader(buf)) + df_read = table_read.to_pandas() + tm.assert_frame_equal(df_read, df) + + # Should raise ValueError when filesystem is passed with file-like object + with pytest.raises(ValueError) as err_info: + pq.ParquetWriter(pa.BufferOutputStream(), table.schema, filesystem=fs) + expected_msg = ("filesystem passed but where is file-like, so" + " there is nothing to open with filesystem.") + assert str(err_info) == expected_msg + + +def test_parquet_writer_store_schema(tempdir): + table = pa.table({'a': [1, 2, 3]}) + + # default -> write schema information + path1 = tempdir / 'test_with_schema.parquet' + with pq.ParquetWriter(path1, table.schema) as writer: + writer.write_table(table) + + meta = pq.read_metadata(path1) + assert b'ARROW:schema' in meta.metadata + assert meta.metadata[b'ARROW:schema'] + + # disable adding schema information + path2 = tempdir / 'test_without_schema.parquet' + with pq.ParquetWriter(path2, table.schema, store_schema=False) as writer: + writer.write_table(table) + + meta = pq.read_metadata(path2) + assert meta.metadata is None