diff --git a/ckpts/universal/global_step40/zero/19.input_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/19.input_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..dfd6b90d9f7bcf3e93339c9a8dd09d93c855e7b7 --- /dev/null +++ b/ckpts/universal/global_step40/zero/19.input_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ced0f2d1fdbd76cf895d36727809de67b20139df7662cd022b883efe9ecb5622 +size 9387 diff --git a/ckpts/universal/global_step40/zero/19.input_layernorm.weight/fp32.pt b/ckpts/universal/global_step40/zero/19.input_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..b10d4877b4efdf5fbb8e37968f7ae0c5543d1183 --- /dev/null +++ b/ckpts/universal/global_step40/zero/19.input_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3bdbea9caf0e7e7b5bb293f6687533d0d585bd9059633a9c31d6d0a2e1680028 +size 9293 diff --git a/ckpts/universal/global_step40/zero/20.mlp.dense_4h_to_h.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/20.mlp.dense_4h_to_h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..6198921c36412f8be6db7b2d9a26a16c2e0c9994 --- /dev/null +++ b/ckpts/universal/global_step40/zero/20.mlp.dense_4h_to_h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7beded3102893a73e3cd4abef5837f706bc2f9b1537f51dbda8aa73d5209deed +size 33555612 diff --git a/ckpts/universal/global_step40/zero/20.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/20.mlp.dense_4h_to_h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..1f9891c838d37e1faa06169a44224c8c9946c5da --- /dev/null +++ b/ckpts/universal/global_step40/zero/20.mlp.dense_4h_to_h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3676a7f071672905f8ad6b2c66ff7213221cf0254c6f145dbccd47940b460d8b +size 33555627 diff --git a/ckpts/universal/global_step40/zero/20.mlp.dense_4h_to_h.weight/fp32.pt b/ckpts/universal/global_step40/zero/20.mlp.dense_4h_to_h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..8227d9d95d6411a68e13ae14e22e319f173a75f9 --- /dev/null +++ b/ckpts/universal/global_step40/zero/20.mlp.dense_4h_to_h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd96afbbffe38b02733be55c4e3ecc8f22e0e28a982e3a3dae31519806e16c87 +size 33555533 diff --git a/venv/lib/python3.10/site-packages/multiprocess/__pycache__/__info__.cpython-310.pyc b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/__info__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5bc243ac62cbe73033f7d1f7028c40b5cd364bf5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/__info__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/multiprocess/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e99919a6651dbc6ac6b0e287c4d02b8d8f2512b Binary files /dev/null and b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/multiprocess/__pycache__/connection.cpython-310.pyc b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/connection.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dbaa29eb4d5459ba32beafb9c7eb9480fb12e63e Binary files /dev/null and b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/connection.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/multiprocess/__pycache__/context.cpython-310.pyc b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/context.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ec963f66c216b9a26aeaab5f61fefe2e5ea47ff Binary files /dev/null and b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/context.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/multiprocess/__pycache__/forkserver.cpython-310.pyc b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/forkserver.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..531ae98135cd13dccf0a5ea74daeae4d11dcd993 Binary files /dev/null and b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/forkserver.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/multiprocess/__pycache__/heap.cpython-310.pyc b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/heap.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b66d89915863830daaf75cd97e8eb4cfa157bd7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/heap.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/multiprocess/__pycache__/managers.cpython-310.pyc b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/managers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0e862767f2a2a415e8ddb0681310c07aabf5e2f Binary files /dev/null and b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/managers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/multiprocess/__pycache__/pool.cpython-310.pyc b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/pool.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b25c4c86cf1cfacc14ef424afd65b835c421789 Binary files /dev/null and b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/pool.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/multiprocess/__pycache__/popen_fork.cpython-310.pyc b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/popen_fork.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8aa6bbc9609b4a64cae88ff50989fa6ec03fdb00 Binary files /dev/null and b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/popen_fork.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/multiprocess/__pycache__/popen_forkserver.cpython-310.pyc b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/popen_forkserver.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7dfd76cf7682805c4cb58737254c1d2d32dff8ad Binary files /dev/null and b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/popen_forkserver.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/multiprocess/__pycache__/popen_spawn_posix.cpython-310.pyc b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/popen_spawn_posix.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d80a0a88cd19465aeeb37d6ebcd6c9c16ba3fecd Binary files /dev/null and b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/popen_spawn_posix.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/multiprocess/__pycache__/popen_spawn_win32.cpython-310.pyc b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/popen_spawn_win32.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02adc308d9f01856df166ce358585ca8c372f842 Binary files /dev/null and b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/popen_spawn_win32.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/multiprocess/__pycache__/process.cpython-310.pyc b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/process.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae44dd41113ba5c3a6e19b88954a2a417da1c6df Binary files /dev/null and b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/process.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/multiprocess/__pycache__/queues.cpython-310.pyc b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/queues.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..562a59d29ed3ad443b2aeb48fcafd8b3e42713ce Binary files /dev/null and b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/queues.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/multiprocess/__pycache__/reduction.cpython-310.pyc b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/reduction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38d746d33e2f061bef5b980fd124c3cb37a5446a Binary files /dev/null and b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/reduction.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/multiprocess/__pycache__/resource_sharer.cpython-310.pyc b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/resource_sharer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14c21c1bc4b3154fdcaa0a276165ffc878fecf47 Binary files /dev/null and b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/resource_sharer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/multiprocess/__pycache__/resource_tracker.cpython-310.pyc b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/resource_tracker.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc0e6a1ab9c04225d06c07358891d9949497a417 Binary files /dev/null and b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/resource_tracker.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/multiprocess/__pycache__/shared_memory.cpython-310.pyc b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/shared_memory.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab6c9c42bda4936ad6b107eeff42354afa8e640d Binary files /dev/null and b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/shared_memory.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/multiprocess/__pycache__/sharedctypes.cpython-310.pyc b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/sharedctypes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b516a473a45dfb14d17edd39f1302dfadfcebb02 Binary files /dev/null and b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/sharedctypes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/multiprocess/__pycache__/spawn.cpython-310.pyc b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/spawn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b59f2930089e006841c56f5c14d9844628fc08d6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/spawn.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/multiprocess/__pycache__/synchronize.cpython-310.pyc b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/synchronize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2cc342b4a215fb843e1844fa239cc72061304f40 Binary files /dev/null and b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/synchronize.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/multiprocess/__pycache__/util.cpython-310.pyc b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c3765559facfc02bc1ef5767ffd9e0985546f1b Binary files /dev/null and b/venv/lib/python3.10/site-packages/multiprocess/__pycache__/util.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/multiprocess/context.py b/venv/lib/python3.10/site-packages/multiprocess/context.py new file mode 100644 index 0000000000000000000000000000000000000000..e8f29c65fe98b4c16598cdd3c93be195e1a84676 --- /dev/null +++ b/venv/lib/python3.10/site-packages/multiprocess/context.py @@ -0,0 +1,376 @@ +import os +import sys +import threading + +from . import process +from . import reduction + +__all__ = () + +# +# Exceptions +# + +class ProcessError(Exception): + pass + +class BufferTooShort(ProcessError): + pass + +class TimeoutError(ProcessError): + pass + +class AuthenticationError(ProcessError): + pass + +# +# Base type for contexts. Bound methods of an instance of this type are included in __all__ of __init__.py +# + +class BaseContext(object): + + ProcessError = ProcessError + BufferTooShort = BufferTooShort + TimeoutError = TimeoutError + AuthenticationError = AuthenticationError + + current_process = staticmethod(process.current_process) + parent_process = staticmethod(process.parent_process) + active_children = staticmethod(process.active_children) + + def cpu_count(self): + '''Returns the number of CPUs in the system''' + num = os.cpu_count() + if num is None: + raise NotImplementedError('cannot determine number of cpus') + else: + return num + + def Manager(self): + '''Returns a manager associated with a running server process + + The managers methods such as `Lock()`, `Condition()` and `Queue()` + can be used to create shared objects. + ''' + from .managers import SyncManager + m = SyncManager(ctx=self.get_context()) + m.start() + return m + + def Pipe(self, duplex=True): + '''Returns two connection object connected by a pipe''' + from .connection import Pipe + return Pipe(duplex) + + def Lock(self): + '''Returns a non-recursive lock object''' + from .synchronize import Lock + return Lock(ctx=self.get_context()) + + def RLock(self): + '''Returns a recursive lock object''' + from .synchronize import RLock + return RLock(ctx=self.get_context()) + + def Condition(self, lock=None): + '''Returns a condition object''' + from .synchronize import Condition + return Condition(lock, ctx=self.get_context()) + + def Semaphore(self, value=1): + '''Returns a semaphore object''' + from .synchronize import Semaphore + return Semaphore(value, ctx=self.get_context()) + + def BoundedSemaphore(self, value=1): + '''Returns a bounded semaphore object''' + from .synchronize import BoundedSemaphore + return BoundedSemaphore(value, ctx=self.get_context()) + + def Event(self): + '''Returns an event object''' + from .synchronize import Event + return Event(ctx=self.get_context()) + + def Barrier(self, parties, action=None, timeout=None): + '''Returns a barrier object''' + from .synchronize import Barrier + return Barrier(parties, action, timeout, ctx=self.get_context()) + + def Queue(self, maxsize=0): + '''Returns a queue object''' + from .queues import Queue + return Queue(maxsize, ctx=self.get_context()) + + def JoinableQueue(self, maxsize=0): + '''Returns a queue object''' + from .queues import JoinableQueue + return JoinableQueue(maxsize, ctx=self.get_context()) + + def SimpleQueue(self): + '''Returns a queue object''' + from .queues import SimpleQueue + return SimpleQueue(ctx=self.get_context()) + + def Pool(self, processes=None, initializer=None, initargs=(), + maxtasksperchild=None): + '''Returns a process pool object''' + from .pool import Pool + return Pool(processes, initializer, initargs, maxtasksperchild, + context=self.get_context()) + + def RawValue(self, typecode_or_type, *args): + '''Returns a shared object''' + from .sharedctypes import RawValue + return RawValue(typecode_or_type, *args) + + def RawArray(self, typecode_or_type, size_or_initializer): + '''Returns a shared array''' + from .sharedctypes import RawArray + return RawArray(typecode_or_type, size_or_initializer) + + def Value(self, typecode_or_type, *args, lock=True): + '''Returns a synchronized shared object''' + from .sharedctypes import Value + return Value(typecode_or_type, *args, lock=lock, + ctx=self.get_context()) + + def Array(self, typecode_or_type, size_or_initializer, *, lock=True): + '''Returns a synchronized shared array''' + from .sharedctypes import Array + return Array(typecode_or_type, size_or_initializer, lock=lock, + ctx=self.get_context()) + + def freeze_support(self): + '''Check whether this is a fake forked process in a frozen executable. + If so then run code specified by commandline and exit. + ''' + if sys.platform == 'win32' and getattr(sys, 'frozen', False): + from .spawn import freeze_support + freeze_support() + + def get_logger(self): + '''Return package logger -- if it does not already exist then + it is created. + ''' + from .util import get_logger + return get_logger() + + def log_to_stderr(self, level=None): + '''Turn on logging and add a handler which prints to stderr''' + from .util import log_to_stderr + return log_to_stderr(level) + + def allow_connection_pickling(self): + '''Install support for sending connections and sockets + between processes + ''' + # This is undocumented. In previous versions of multiprocessing + # its only effect was to make socket objects inheritable on Windows. + from . import connection + + def set_executable(self, executable): + '''Sets the path to a python.exe or pythonw.exe binary used to run + child processes instead of sys.executable when using the 'spawn' + start method. Useful for people embedding Python. + ''' + from .spawn import set_executable + set_executable(executable) + + def set_forkserver_preload(self, module_names): + '''Set list of module names to try to load in forkserver process. + This is really just a hint. + ''' + from .forkserver import set_forkserver_preload + set_forkserver_preload(module_names) + + def get_context(self, method=None): + if method is None: + return self + try: + ctx = _concrete_contexts[method] + except KeyError: + raise ValueError('cannot find context for %r' % method) from None + ctx._check_available() + return ctx + + def get_start_method(self, allow_none=False): + return self._name + + def set_start_method(self, method, force=False): + raise ValueError('cannot set start method of concrete context') + + @property + def reducer(self): + '''Controls how objects will be reduced to a form that can be + shared with other processes.''' + return globals().get('reduction') + + @reducer.setter + def reducer(self, reduction): + globals()['reduction'] = reduction + + def _check_available(self): + pass + +# +# Type of default context -- underlying context can be set at most once +# + +class Process(process.BaseProcess): + _start_method = None + @staticmethod + def _Popen(process_obj): + return _default_context.get_context().Process._Popen(process_obj) + + @staticmethod + def _after_fork(): + return _default_context.get_context().Process._after_fork() + +class DefaultContext(BaseContext): + Process = Process + + def __init__(self, context): + self._default_context = context + self._actual_context = None + + def get_context(self, method=None): + if method is None: + if self._actual_context is None: + self._actual_context = self._default_context + return self._actual_context + else: + return super().get_context(method) + + def set_start_method(self, method, force=False): + if self._actual_context is not None and not force: + raise RuntimeError('context has already been set') + if method is None and force: + self._actual_context = None + return + self._actual_context = self.get_context(method) + + def get_start_method(self, allow_none=False): + if self._actual_context is None: + if allow_none: + return None + self._actual_context = self._default_context + return self._actual_context._name + + def get_all_start_methods(self): + if sys.platform == 'win32': + return ['spawn'] + else: + methods = ['spawn', 'fork'] if sys.platform == 'darwin' else ['fork', 'spawn'] + if reduction.HAVE_SEND_HANDLE: + methods.append('forkserver') + return methods + + +# +# Context types for fixed start method +# + +if sys.platform != 'win32': + + class ForkProcess(process.BaseProcess): + _start_method = 'fork' + @staticmethod + def _Popen(process_obj): + from .popen_fork import Popen + return Popen(process_obj) + + class SpawnProcess(process.BaseProcess): + _start_method = 'spawn' + @staticmethod + def _Popen(process_obj): + from .popen_spawn_posix import Popen + return Popen(process_obj) + + @staticmethod + def _after_fork(): + # process is spawned, nothing to do + pass + + class ForkServerProcess(process.BaseProcess): + _start_method = 'forkserver' + @staticmethod + def _Popen(process_obj): + from .popen_forkserver import Popen + return Popen(process_obj) + + class ForkContext(BaseContext): + _name = 'fork' + Process = ForkProcess + + class SpawnContext(BaseContext): + _name = 'spawn' + Process = SpawnProcess + + class ForkServerContext(BaseContext): + _name = 'forkserver' + Process = ForkServerProcess + def _check_available(self): + if not reduction.HAVE_SEND_HANDLE: + raise ValueError('forkserver start method not available') + + _concrete_contexts = { + 'fork': ForkContext(), + 'spawn': SpawnContext(), + 'forkserver': ForkServerContext(), + } + if sys.platform == 'darwin': + # bpo-33725: running arbitrary code after fork() is no longer reliable + # on macOS since macOS 10.14 (Mojave). Use spawn by default instead. + _default_context = DefaultContext(_concrete_contexts['fork']) #FIXME: spawn + else: + _default_context = DefaultContext(_concrete_contexts['fork']) + +else: + + class SpawnProcess(process.BaseProcess): + _start_method = 'spawn' + @staticmethod + def _Popen(process_obj): + from .popen_spawn_win32 import Popen + return Popen(process_obj) + + @staticmethod + def _after_fork(): + # process is spawned, nothing to do + pass + + class SpawnContext(BaseContext): + _name = 'spawn' + Process = SpawnProcess + + _concrete_contexts = { + 'spawn': SpawnContext(), + } + _default_context = DefaultContext(_concrete_contexts['spawn']) + +# +# Force the start method +# + +def _force_start_method(method): + _default_context._actual_context = _concrete_contexts[method] + +# +# Check that the current thread is spawning a child process +# + +_tls = threading.local() + +def get_spawning_popen(): + return getattr(_tls, 'spawning_popen', None) + +def set_spawning_popen(popen): + _tls.spawning_popen = popen + +def assert_spawning(obj): + if get_spawning_popen() is None: + raise RuntimeError( + '%s objects should only be shared between processes' + ' through inheritance' % type(obj).__name__ + ) diff --git a/venv/lib/python3.10/site-packages/multiprocess/dummy/__init__.py b/venv/lib/python3.10/site-packages/multiprocess/dummy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6a1468609e347b3a0b9281e5c9e6ec311fcb37e5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/multiprocess/dummy/__init__.py @@ -0,0 +1,126 @@ +# +# Support for the API of the multiprocessing package using threads +# +# multiprocessing/dummy/__init__.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = [ + 'Process', 'current_process', 'active_children', 'freeze_support', + 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', + 'Event', 'Barrier', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue' + ] + +# +# Imports +# + +import threading +import sys +import weakref +import array + +from .connection import Pipe +from threading import Lock, RLock, Semaphore, BoundedSemaphore +from threading import Event, Condition, Barrier +from queue import Queue + +# +# +# + +class DummyProcess(threading.Thread): + + def __init__(self, group=None, target=None, name=None, args=(), kwargs={}): + threading.Thread.__init__(self, group, target, name, args, kwargs) + self._pid = None + self._children = weakref.WeakKeyDictionary() + self._start_called = False + self._parent = current_process() + + def start(self): + if self._parent is not current_process(): + raise RuntimeError( + "Parent is {0!r} but current_process is {1!r}".format( + self._parent, current_process())) + self._start_called = True + if hasattr(self._parent, '_children'): + self._parent._children[self] = None + threading.Thread.start(self) + + @property + def exitcode(self): + if self._start_called and not self.is_alive(): + return 0 + else: + return None + +# +# +# + +Process = DummyProcess +current_process = threading.current_thread +current_process()._children = weakref.WeakKeyDictionary() + +def active_children(): + children = current_process()._children + for p in list(children): + if not p.is_alive(): + children.pop(p, None) + return list(children) + +def freeze_support(): + pass + +# +# +# + +class Namespace(object): + def __init__(self, /, **kwds): + self.__dict__.update(kwds) + def __repr__(self): + items = list(self.__dict__.items()) + temp = [] + for name, value in items: + if not name.startswith('_'): + temp.append('%s=%r' % (name, value)) + temp.sort() + return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) + +dict = dict +list = list + +def Array(typecode, sequence, lock=True): + return array.array(typecode, sequence) + +class Value(object): + def __init__(self, typecode, value, lock=True): + self._typecode = typecode + self._value = value + + @property + def value(self): + return self._value + + @value.setter + def value(self, value): + self._value = value + + def __repr__(self): + return '<%s(%r, %r)>'%(type(self).__name__,self._typecode,self._value) + +def Manager(): + return sys.modules[__name__] + +def shutdown(): + pass + +def Pool(processes=None, initializer=None, initargs=()): + from ..pool import ThreadPool + return ThreadPool(processes, initializer, initargs) + +JoinableQueue = Queue diff --git a/venv/lib/python3.10/site-packages/multiprocess/dummy/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/multiprocess/dummy/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..802d389fe3fedcf69c12709d414ceadaa7458c16 Binary files /dev/null and b/venv/lib/python3.10/site-packages/multiprocess/dummy/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/multiprocess/dummy/__pycache__/connection.cpython-310.pyc b/venv/lib/python3.10/site-packages/multiprocess/dummy/__pycache__/connection.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70eb574f2d8dd79f64fd3d47f8210bd5ef8dc629 Binary files /dev/null and b/venv/lib/python3.10/site-packages/multiprocess/dummy/__pycache__/connection.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/multiprocess/dummy/connection.py b/venv/lib/python3.10/site-packages/multiprocess/dummy/connection.py new file mode 100644 index 0000000000000000000000000000000000000000..f0ce320fcf514083f3a6477e87abf40e9719285a --- /dev/null +++ b/venv/lib/python3.10/site-packages/multiprocess/dummy/connection.py @@ -0,0 +1,75 @@ +# +# Analogue of `multiprocessing.connection` which uses queues instead of sockets +# +# multiprocessing/dummy/connection.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = [ 'Client', 'Listener', 'Pipe' ] + +from queue import Queue + + +families = [None] + + +class Listener(object): + + def __init__(self, address=None, family=None, backlog=1): + self._backlog_queue = Queue(backlog) + + def accept(self): + return Connection(*self._backlog_queue.get()) + + def close(self): + self._backlog_queue = None + + @property + def address(self): + return self._backlog_queue + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.close() + + +def Client(address): + _in, _out = Queue(), Queue() + address.put((_out, _in)) + return Connection(_in, _out) + + +def Pipe(duplex=True): + a, b = Queue(), Queue() + return Connection(a, b), Connection(b, a) + + +class Connection(object): + + def __init__(self, _in, _out): + self._out = _out + self._in = _in + self.send = self.send_bytes = _out.put + self.recv = self.recv_bytes = _in.get + + def poll(self, timeout=0.0): + if self._in.qsize() > 0: + return True + if timeout <= 0.0: + return False + with self._in.not_empty: + self._in.not_empty.wait(timeout) + return self._in.qsize() > 0 + + def close(self): + pass + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.close() diff --git a/venv/lib/python3.10/site-packages/multiprocess/managers.py b/venv/lib/python3.10/site-packages/multiprocess/managers.py new file mode 100644 index 0000000000000000000000000000000000000000..50989cbe0a841dd91924e02df4eaaf50c9b5b2fe --- /dev/null +++ b/venv/lib/python3.10/site-packages/multiprocess/managers.py @@ -0,0 +1,1378 @@ +# +# Module providing manager classes for dealing +# with shared objects +# +# multiprocessing/managers.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ] + +# +# Imports +# + +import sys +import threading +import signal +import array +import queue +import time +import types +import os +from os import getpid + +from traceback import format_exc + +from . import connection +from .context import reduction, get_spawning_popen, ProcessError +from . import pool +from . import process +from . import util +from . import get_context +try: + from . import shared_memory +except ImportError: + HAS_SHMEM = False +else: + HAS_SHMEM = True + __all__.append('SharedMemoryManager') + +# +# Register some things for pickling +# + +def reduce_array(a): + return array.array, (a.typecode, a.tobytes()) +reduction.register(array.array, reduce_array) + +view_types = [type(getattr({}, name)()) for name in ('items','keys','values')] +if view_types[0] is not list: # only needed in Py3.0 + def rebuild_as_list(obj): + return list, (list(obj),) + for view_type in view_types: + reduction.register(view_type, rebuild_as_list) + +# +# Type for identifying shared objects +# + +class Token(object): + ''' + Type to uniquely identify a shared object + ''' + __slots__ = ('typeid', 'address', 'id') + + def __init__(self, typeid, address, id): + (self.typeid, self.address, self.id) = (typeid, address, id) + + def __getstate__(self): + return (self.typeid, self.address, self.id) + + def __setstate__(self, state): + (self.typeid, self.address, self.id) = state + + def __repr__(self): + return '%s(typeid=%r, address=%r, id=%r)' % \ + (self.__class__.__name__, self.typeid, self.address, self.id) + +# +# Function for communication with a manager's server process +# + +def dispatch(c, id, methodname, args=(), kwds={}): + ''' + Send a message to manager using connection `c` and return response + ''' + c.send((id, methodname, args, kwds)) + kind, result = c.recv() + if kind == '#RETURN': + return result + raise convert_to_error(kind, result) + +def convert_to_error(kind, result): + if kind == '#ERROR': + return result + elif kind in ('#TRACEBACK', '#UNSERIALIZABLE'): + if not isinstance(result, str): + raise TypeError( + "Result {0!r} (kind '{1}') type is {2}, not str".format( + result, kind, type(result))) + if kind == '#UNSERIALIZABLE': + return RemoteError('Unserializable message: %s\n' % result) + else: + return RemoteError(result) + else: + return ValueError('Unrecognized message type {!r}'.format(kind)) + +class RemoteError(Exception): + def __str__(self): + return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75) + +# +# Functions for finding the method names of an object +# + +def all_methods(obj): + ''' + Return a list of names of methods of `obj` + ''' + temp = [] + for name in dir(obj): + func = getattr(obj, name) + if callable(func): + temp.append(name) + return temp + +def public_methods(obj): + ''' + Return a list of names of methods of `obj` which do not start with '_' + ''' + return [name for name in all_methods(obj) if name[0] != '_'] + +# +# Server which is run in a process controlled by a manager +# + +class Server(object): + ''' + Server class which runs in a process controlled by a manager object + ''' + public = ['shutdown', 'create', 'accept_connection', 'get_methods', + 'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref'] + + def __init__(self, registry, address, authkey, serializer): + if not isinstance(authkey, bytes): + raise TypeError( + "Authkey {0!r} is type {1!s}, not bytes".format( + authkey, type(authkey))) + self.registry = registry + self.authkey = process.AuthenticationString(authkey) + Listener, Client = listener_client[serializer] + + # do authentication later + self.listener = Listener(address=address, backlog=16) + self.address = self.listener.address + + self.id_to_obj = {'0': (None, ())} + self.id_to_refcount = {} + self.id_to_local_proxy_obj = {} + self.mutex = threading.Lock() + + def serve_forever(self): + ''' + Run the server forever + ''' + self.stop_event = threading.Event() + process.current_process()._manager_server = self + try: + accepter = threading.Thread(target=self.accepter) + accepter.daemon = True + accepter.start() + try: + while not self.stop_event.is_set(): + self.stop_event.wait(1) + except (KeyboardInterrupt, SystemExit): + pass + finally: + if sys.stdout != sys.__stdout__: # what about stderr? + util.debug('resetting stdout, stderr') + sys.stdout = sys.__stdout__ + sys.stderr = sys.__stderr__ + sys.exit(0) + + def accepter(self): + while True: + try: + c = self.listener.accept() + except OSError: + continue + t = threading.Thread(target=self.handle_request, args=(c,)) + t.daemon = True + t.start() + + def _handle_request(self, c): + request = None + try: + connection.deliver_challenge(c, self.authkey) + connection.answer_challenge(c, self.authkey) + request = c.recv() + ignore, funcname, args, kwds = request + assert funcname in self.public, '%r unrecognized' % funcname + func = getattr(self, funcname) + except Exception: + msg = ('#TRACEBACK', format_exc()) + else: + try: + result = func(c, *args, **kwds) + except Exception: + msg = ('#TRACEBACK', format_exc()) + else: + msg = ('#RETURN', result) + + try: + c.send(msg) + except Exception as e: + try: + c.send(('#TRACEBACK', format_exc())) + except Exception: + pass + util.info('Failure to send message: %r', msg) + util.info(' ... request was %r', request) + util.info(' ... exception was %r', e) + + def handle_request(self, conn): + ''' + Handle a new connection + ''' + try: + self._handle_request(conn) + except SystemExit: + # Server.serve_client() calls sys.exit(0) on EOF + pass + finally: + conn.close() + + def serve_client(self, conn): + ''' + Handle requests from the proxies in a particular process/thread + ''' + util.debug('starting server thread to service %r', + threading.current_thread().name) + + recv = conn.recv + send = conn.send + id_to_obj = self.id_to_obj + + while not self.stop_event.is_set(): + + try: + methodname = obj = None + request = recv() + ident, methodname, args, kwds = request + try: + obj, exposed, gettypeid = id_to_obj[ident] + except KeyError as ke: + try: + obj, exposed, gettypeid = \ + self.id_to_local_proxy_obj[ident] + except KeyError: + raise ke + + if methodname not in exposed: + raise AttributeError( + 'method %r of %r object is not in exposed=%r' % + (methodname, type(obj), exposed) + ) + + function = getattr(obj, methodname) + + try: + res = function(*args, **kwds) + except Exception as e: + msg = ('#ERROR', e) + else: + typeid = gettypeid and gettypeid.get(methodname, None) + if typeid: + rident, rexposed = self.create(conn, typeid, res) + token = Token(typeid, self.address, rident) + msg = ('#PROXY', (rexposed, token)) + else: + msg = ('#RETURN', res) + + except AttributeError: + if methodname is None: + msg = ('#TRACEBACK', format_exc()) + else: + try: + fallback_func = self.fallback_mapping[methodname] + result = fallback_func( + self, conn, ident, obj, *args, **kwds + ) + msg = ('#RETURN', result) + except Exception: + msg = ('#TRACEBACK', format_exc()) + + except EOFError: + util.debug('got EOF -- exiting thread serving %r', + threading.current_thread().name) + sys.exit(0) + + except Exception: + msg = ('#TRACEBACK', format_exc()) + + try: + try: + send(msg) + except Exception: + send(('#UNSERIALIZABLE', format_exc())) + except Exception as e: + util.info('exception in thread serving %r', + threading.current_thread().name) + util.info(' ... message was %r', msg) + util.info(' ... exception was %r', e) + conn.close() + sys.exit(1) + + def fallback_getvalue(self, conn, ident, obj): + return obj + + def fallback_str(self, conn, ident, obj): + return str(obj) + + def fallback_repr(self, conn, ident, obj): + return repr(obj) + + fallback_mapping = { + '__str__':fallback_str, + '__repr__':fallback_repr, + '#GETVALUE':fallback_getvalue + } + + def dummy(self, c): + pass + + def debug_info(self, c): + ''' + Return some info --- useful to spot problems with refcounting + ''' + # Perhaps include debug info about 'c'? + with self.mutex: + result = [] + keys = list(self.id_to_refcount.keys()) + keys.sort() + for ident in keys: + if ident != '0': + result.append(' %s: refcount=%s\n %s' % + (ident, self.id_to_refcount[ident], + str(self.id_to_obj[ident][0])[:75])) + return '\n'.join(result) + + def number_of_objects(self, c): + ''' + Number of shared objects + ''' + # Doesn't use (len(self.id_to_obj) - 1) as we shouldn't count ident='0' + return len(self.id_to_refcount) + + def shutdown(self, c): + ''' + Shutdown this process + ''' + try: + util.debug('manager received shutdown message') + c.send(('#RETURN', None)) + except: + import traceback + traceback.print_exc() + finally: + self.stop_event.set() + + def create(self, c, typeid, /, *args, **kwds): + ''' + Create a new shared object and return its id + ''' + with self.mutex: + callable, exposed, method_to_typeid, proxytype = \ + self.registry[typeid] + + if callable is None: + if kwds or (len(args) != 1): + raise ValueError( + "Without callable, must have one non-keyword argument") + obj = args[0] + else: + obj = callable(*args, **kwds) + + if exposed is None: + exposed = public_methods(obj) + if method_to_typeid is not None: + if not isinstance(method_to_typeid, dict): + raise TypeError( + "Method_to_typeid {0!r}: type {1!s}, not dict".format( + method_to_typeid, type(method_to_typeid))) + exposed = list(exposed) + list(method_to_typeid) + + ident = '%x' % id(obj) # convert to string because xmlrpclib + # only has 32 bit signed integers + util.debug('%r callable returned object with id %r', typeid, ident) + + self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid) + if ident not in self.id_to_refcount: + self.id_to_refcount[ident] = 0 + + self.incref(c, ident) + return ident, tuple(exposed) + + def get_methods(self, c, token): + ''' + Return the methods of the shared object indicated by token + ''' + return tuple(self.id_to_obj[token.id][1]) + + def accept_connection(self, c, name): + ''' + Spawn a new thread to serve this connection + ''' + threading.current_thread().name = name + c.send(('#RETURN', None)) + self.serve_client(c) + + def incref(self, c, ident): + with self.mutex: + try: + self.id_to_refcount[ident] += 1 + except KeyError as ke: + # If no external references exist but an internal (to the + # manager) still does and a new external reference is created + # from it, restore the manager's tracking of it from the + # previously stashed internal ref. + if ident in self.id_to_local_proxy_obj: + self.id_to_refcount[ident] = 1 + self.id_to_obj[ident] = \ + self.id_to_local_proxy_obj[ident] + obj, exposed, gettypeid = self.id_to_obj[ident] + util.debug('Server re-enabled tracking & INCREF %r', ident) + else: + raise ke + + def decref(self, c, ident): + if ident not in self.id_to_refcount and \ + ident in self.id_to_local_proxy_obj: + util.debug('Server DECREF skipping %r', ident) + return + + with self.mutex: + if self.id_to_refcount[ident] <= 0: + raise AssertionError( + "Id {0!s} ({1!r}) has refcount {2:n}, not 1+".format( + ident, self.id_to_obj[ident], + self.id_to_refcount[ident])) + self.id_to_refcount[ident] -= 1 + if self.id_to_refcount[ident] == 0: + del self.id_to_refcount[ident] + + if ident not in self.id_to_refcount: + # Two-step process in case the object turns out to contain other + # proxy objects (e.g. a managed list of managed lists). + # Otherwise, deleting self.id_to_obj[ident] would trigger the + # deleting of the stored value (another managed object) which would + # in turn attempt to acquire the mutex that is already held here. + self.id_to_obj[ident] = (None, (), None) # thread-safe + util.debug('disposing of obj with id %r', ident) + with self.mutex: + del self.id_to_obj[ident] + + +# +# Class to represent state of a manager +# + +class State(object): + __slots__ = ['value'] + INITIAL = 0 + STARTED = 1 + SHUTDOWN = 2 + +# +# Mapping from serializer name to Listener and Client types +# + +listener_client = { #XXX: register dill? + 'pickle' : (connection.Listener, connection.Client), + 'xmlrpclib' : (connection.XmlListener, connection.XmlClient) + } + +# +# Definition of BaseManager +# + +class BaseManager(object): + ''' + Base class for managers + ''' + _registry = {} + _Server = Server + + def __init__(self, address=None, authkey=None, serializer='pickle', + ctx=None): + if authkey is None: + authkey = process.current_process().authkey + self._address = address # XXX not final address if eg ('', 0) + self._authkey = process.AuthenticationString(authkey) + self._state = State() + self._state.value = State.INITIAL + self._serializer = serializer + self._Listener, self._Client = listener_client[serializer] + self._ctx = ctx or get_context() + + def get_server(self): + ''' + Return server object with serve_forever() method and address attribute + ''' + if self._state.value != State.INITIAL: + if self._state.value == State.STARTED: + raise ProcessError("Already started server") + elif self._state.value == State.SHUTDOWN: + raise ProcessError("Manager has shut down") + else: + raise ProcessError( + "Unknown state {!r}".format(self._state.value)) + return Server(self._registry, self._address, + self._authkey, self._serializer) + + def connect(self): + ''' + Connect manager object to the server process + ''' + Listener, Client = listener_client[self._serializer] + conn = Client(self._address, authkey=self._authkey) + dispatch(conn, None, 'dummy') + self._state.value = State.STARTED + + def start(self, initializer=None, initargs=()): + ''' + Spawn a server process for this manager object + ''' + if self._state.value != State.INITIAL: + if self._state.value == State.STARTED: + raise ProcessError("Already started server") + elif self._state.value == State.SHUTDOWN: + raise ProcessError("Manager has shut down") + else: + raise ProcessError( + "Unknown state {!r}".format(self._state.value)) + + if initializer is not None and not callable(initializer): + raise TypeError('initializer must be a callable') + + # pipe over which we will retrieve address of server + reader, writer = connection.Pipe(duplex=False) + + # spawn process which runs a server + self._process = self._ctx.Process( + target=type(self)._run_server, + args=(self._registry, self._address, self._authkey, + self._serializer, writer, initializer, initargs), + ) + ident = ':'.join(str(i) for i in self._process._identity) + self._process.name = type(self).__name__ + '-' + ident + self._process.start() + + # get address of server + writer.close() + self._address = reader.recv() + reader.close() + + # register a finalizer + self._state.value = State.STARTED + self.shutdown = util.Finalize( + self, type(self)._finalize_manager, + args=(self._process, self._address, self._authkey, + self._state, self._Client), + exitpriority=0 + ) + + @classmethod + def _run_server(cls, registry, address, authkey, serializer, writer, + initializer=None, initargs=()): + ''' + Create a server, report its address and run it + ''' + # bpo-36368: protect server process from KeyboardInterrupt signals + signal.signal(signal.SIGINT, signal.SIG_IGN) + + if initializer is not None: + initializer(*initargs) + + # create server + server = cls._Server(registry, address, authkey, serializer) + + # inform parent process of the server's address + writer.send(server.address) + writer.close() + + # run the manager + util.info('manager serving at %r', server.address) + server.serve_forever() + + def _create(self, typeid, /, *args, **kwds): + ''' + Create a new shared object; return the token and exposed tuple + ''' + assert self._state.value == State.STARTED, 'server not yet started' + conn = self._Client(self._address, authkey=self._authkey) + try: + id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds) + finally: + conn.close() + return Token(typeid, self._address, id), exposed + + def join(self, timeout=None): + ''' + Join the manager process (if it has been spawned) + ''' + if self._process is not None: + self._process.join(timeout) + if not self._process.is_alive(): + self._process = None + + def _debug_info(self): + ''' + Return some info about the servers shared objects and connections + ''' + conn = self._Client(self._address, authkey=self._authkey) + try: + return dispatch(conn, None, 'debug_info') + finally: + conn.close() + + def _number_of_objects(self): + ''' + Return the number of shared objects + ''' + conn = self._Client(self._address, authkey=self._authkey) + try: + return dispatch(conn, None, 'number_of_objects') + finally: + conn.close() + + def __enter__(self): + if self._state.value == State.INITIAL: + self.start() + if self._state.value != State.STARTED: + if self._state.value == State.INITIAL: + raise ProcessError("Unable to start server") + elif self._state.value == State.SHUTDOWN: + raise ProcessError("Manager has shut down") + else: + raise ProcessError( + "Unknown state {!r}".format(self._state.value)) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.shutdown() + + @staticmethod + def _finalize_manager(process, address, authkey, state, _Client): + ''' + Shutdown the manager process; will be registered as a finalizer + ''' + if process.is_alive(): + util.info('sending shutdown message to manager') + try: + conn = _Client(address, authkey=authkey) + try: + dispatch(conn, None, 'shutdown') + finally: + conn.close() + except Exception: + pass + + process.join(timeout=1.0) + if process.is_alive(): + util.info('manager still alive') + if hasattr(process, 'terminate'): + util.info('trying to `terminate()` manager process') + process.terminate() + process.join(timeout=1.0) + if process.is_alive(): + util.info('manager still alive after terminate') + + state.value = State.SHUTDOWN + try: + del BaseProxy._address_to_local[address] + except KeyError: + pass + + @property + def address(self): + return self._address + + @classmethod + def register(cls, typeid, callable=None, proxytype=None, exposed=None, + method_to_typeid=None, create_method=True): + ''' + Register a typeid with the manager type + ''' + if '_registry' not in cls.__dict__: + cls._registry = cls._registry.copy() + + if proxytype is None: + proxytype = AutoProxy + + exposed = exposed or getattr(proxytype, '_exposed_', None) + + method_to_typeid = method_to_typeid or \ + getattr(proxytype, '_method_to_typeid_', None) + + if method_to_typeid: + for key, value in list(method_to_typeid.items()): # isinstance? + assert type(key) is str, '%r is not a string' % key + assert type(value) is str, '%r is not a string' % value + + cls._registry[typeid] = ( + callable, exposed, method_to_typeid, proxytype + ) + + if create_method: + def temp(self, /, *args, **kwds): + util.debug('requesting creation of a shared %r object', typeid) + token, exp = self._create(typeid, *args, **kwds) + proxy = proxytype( + token, self._serializer, manager=self, + authkey=self._authkey, exposed=exp + ) + conn = self._Client(token.address, authkey=self._authkey) + dispatch(conn, None, 'decref', (token.id,)) + return proxy + temp.__name__ = typeid + setattr(cls, typeid, temp) + +# +# Subclass of set which get cleared after a fork +# + +class ProcessLocalSet(set): + def __init__(self): + util.register_after_fork(self, lambda obj: obj.clear()) + def __reduce__(self): + return type(self), () + +# +# Definition of BaseProxy +# + +class BaseProxy(object): + ''' + A base for proxies of shared objects + ''' + _address_to_local = {} + _mutex = util.ForkAwareThreadLock() + + def __init__(self, token, serializer, manager=None, + authkey=None, exposed=None, incref=True, manager_owned=False): + with BaseProxy._mutex: + tls_idset = BaseProxy._address_to_local.get(token.address, None) + if tls_idset is None: + tls_idset = util.ForkAwareLocal(), ProcessLocalSet() + BaseProxy._address_to_local[token.address] = tls_idset + + # self._tls is used to record the connection used by this + # thread to communicate with the manager at token.address + self._tls = tls_idset[0] + + # self._idset is used to record the identities of all shared + # objects for which the current process owns references and + # which are in the manager at token.address + self._idset = tls_idset[1] + + self._token = token + self._id = self._token.id + self._manager = manager + self._serializer = serializer + self._Client = listener_client[serializer][1] + + # Should be set to True only when a proxy object is being created + # on the manager server; primary use case: nested proxy objects. + # RebuildProxy detects when a proxy is being created on the manager + # and sets this value appropriately. + self._owned_by_manager = manager_owned + + if authkey is not None: + self._authkey = process.AuthenticationString(authkey) + elif self._manager is not None: + self._authkey = self._manager._authkey + else: + self._authkey = process.current_process().authkey + + if incref: + self._incref() + + util.register_after_fork(self, BaseProxy._after_fork) + + def _connect(self): + util.debug('making connection to manager') + name = process.current_process().name + if threading.current_thread().name != 'MainThread': + name += '|' + threading.current_thread().name + conn = self._Client(self._token.address, authkey=self._authkey) + dispatch(conn, None, 'accept_connection', (name,)) + self._tls.connection = conn + + def _callmethod(self, methodname, args=(), kwds={}): + ''' + Try to call a method of the referent and return a copy of the result + ''' + try: + conn = self._tls.connection + except AttributeError: + util.debug('thread %r does not own a connection', + threading.current_thread().name) + self._connect() + conn = self._tls.connection + + conn.send((self._id, methodname, args, kwds)) + kind, result = conn.recv() + + if kind == '#RETURN': + return result + elif kind == '#PROXY': + exposed, token = result + proxytype = self._manager._registry[token.typeid][-1] + token.address = self._token.address + proxy = proxytype( + token, self._serializer, manager=self._manager, + authkey=self._authkey, exposed=exposed + ) + conn = self._Client(token.address, authkey=self._authkey) + dispatch(conn, None, 'decref', (token.id,)) + return proxy + raise convert_to_error(kind, result) + + def _getvalue(self): + ''' + Get a copy of the value of the referent + ''' + return self._callmethod('#GETVALUE') + + def _incref(self): + if self._owned_by_manager: + util.debug('owned_by_manager skipped INCREF of %r', self._token.id) + return + + conn = self._Client(self._token.address, authkey=self._authkey) + dispatch(conn, None, 'incref', (self._id,)) + util.debug('INCREF %r', self._token.id) + + self._idset.add(self._id) + + state = self._manager and self._manager._state + + self._close = util.Finalize( + self, BaseProxy._decref, + args=(self._token, self._authkey, state, + self._tls, self._idset, self._Client), + exitpriority=10 + ) + + @staticmethod + def _decref(token, authkey, state, tls, idset, _Client): + idset.discard(token.id) + + # check whether manager is still alive + if state is None or state.value == State.STARTED: + # tell manager this process no longer cares about referent + try: + util.debug('DECREF %r', token.id) + conn = _Client(token.address, authkey=authkey) + dispatch(conn, None, 'decref', (token.id,)) + except Exception as e: + util.debug('... decref failed %s', e) + + else: + util.debug('DECREF %r -- manager already shutdown', token.id) + + # check whether we can close this thread's connection because + # the process owns no more references to objects for this manager + if not idset and hasattr(tls, 'connection'): + util.debug('thread %r has no more proxies so closing conn', + threading.current_thread().name) + tls.connection.close() + del tls.connection + + def _after_fork(self): + self._manager = None + try: + self._incref() + except Exception as e: + # the proxy may just be for a manager which has shutdown + util.info('incref failed: %s' % e) + + def __reduce__(self): + kwds = {} + if get_spawning_popen() is not None: + kwds['authkey'] = self._authkey + + if getattr(self, '_isauto', False): + kwds['exposed'] = self._exposed_ + return (RebuildProxy, + (AutoProxy, self._token, self._serializer, kwds)) + else: + return (RebuildProxy, + (type(self), self._token, self._serializer, kwds)) + + def __deepcopy__(self, memo): + return self._getvalue() + + def __repr__(self): + return '<%s object, typeid %r at %#x>' % \ + (type(self).__name__, self._token.typeid, id(self)) + + def __str__(self): + ''' + Return representation of the referent (or a fall-back if that fails) + ''' + try: + return self._callmethod('__repr__') + except Exception: + return repr(self)[:-1] + "; '__str__()' failed>" + +# +# Function used for unpickling +# + +def RebuildProxy(func, token, serializer, kwds): + ''' + Function used for unpickling proxy objects. + ''' + server = getattr(process.current_process(), '_manager_server', None) + if server and server.address == token.address: + util.debug('Rebuild a proxy owned by manager, token=%r', token) + kwds['manager_owned'] = True + if token.id not in server.id_to_local_proxy_obj: + server.id_to_local_proxy_obj[token.id] = \ + server.id_to_obj[token.id] + incref = ( + kwds.pop('incref', True) and + not getattr(process.current_process(), '_inheriting', False) + ) + return func(token, serializer, incref=incref, **kwds) + +# +# Functions to create proxies and proxy types +# + +def MakeProxyType(name, exposed, _cache={}): + ''' + Return a proxy type whose methods are given by `exposed` + ''' + exposed = tuple(exposed) + try: + return _cache[(name, exposed)] + except KeyError: + pass + + dic = {} + + for meth in exposed: + exec('''def %s(self, /, *args, **kwds): + return self._callmethod(%r, args, kwds)''' % (meth, meth), dic) + + ProxyType = type(name, (BaseProxy,), dic) + ProxyType._exposed_ = exposed + _cache[(name, exposed)] = ProxyType + return ProxyType + + +def AutoProxy(token, serializer, manager=None, authkey=None, + exposed=None, incref=True, manager_owned=False): + ''' + Return an auto-proxy for `token` + ''' + _Client = listener_client[serializer][1] + + if exposed is None: + conn = _Client(token.address, authkey=authkey) + try: + exposed = dispatch(conn, None, 'get_methods', (token,)) + finally: + conn.close() + + if authkey is None and manager is not None: + authkey = manager._authkey + if authkey is None: + authkey = process.current_process().authkey + + ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed) + proxy = ProxyType(token, serializer, manager=manager, authkey=authkey, + incref=incref, manager_owned=manager_owned) + proxy._isauto = True + return proxy + +# +# Types/callables which we will register with SyncManager +# + +class Namespace(object): + def __init__(self, /, **kwds): + self.__dict__.update(kwds) + def __repr__(self): + items = list(self.__dict__.items()) + temp = [] + for name, value in items: + if not name.startswith('_'): + temp.append('%s=%r' % (name, value)) + temp.sort() + return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) + +class Value(object): + def __init__(self, typecode, value, lock=True): + self._typecode = typecode + self._value = value + def get(self): + return self._value + def set(self, value): + self._value = value + def __repr__(self): + return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value) + value = property(get, set) + +def Array(typecode, sequence, lock=True): + return array.array(typecode, sequence) + +# +# Proxy types used by SyncManager +# + +class IteratorProxy(BaseProxy): + _exposed_ = ('__next__', 'send', 'throw', 'close') + def __iter__(self): + return self + def __next__(self, *args): + return self._callmethod('__next__', args) + def send(self, *args): + return self._callmethod('send', args) + def throw(self, *args): + return self._callmethod('throw', args) + def close(self, *args): + return self._callmethod('close', args) + + +class AcquirerProxy(BaseProxy): + _exposed_ = ('acquire', 'release') + def acquire(self, blocking=True, timeout=None): + args = (blocking,) if timeout is None else (blocking, timeout) + return self._callmethod('acquire', args) + def release(self): + return self._callmethod('release') + def __enter__(self): + return self._callmethod('acquire') + def __exit__(self, exc_type, exc_val, exc_tb): + return self._callmethod('release') + + +class ConditionProxy(AcquirerProxy): + _exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all') + def wait(self, timeout=None): + return self._callmethod('wait', (timeout,)) + def notify(self, n=1): + return self._callmethod('notify', (n,)) + def notify_all(self): + return self._callmethod('notify_all') + def wait_for(self, predicate, timeout=None): + result = predicate() + if result: + return result + if timeout is not None: + endtime = getattr(time,'monotonic',time.time)() + timeout + else: + endtime = None + waittime = None + while not result: + if endtime is not None: + waittime = endtime - getattr(time,'monotonic',time.time)() + if waittime <= 0: + break + self.wait(waittime) + result = predicate() + return result + + +class EventProxy(BaseProxy): + _exposed_ = ('is_set', 'set', 'clear', 'wait') + def is_set(self): + return self._callmethod('is_set') + def set(self): + return self._callmethod('set') + def clear(self): + return self._callmethod('clear') + def wait(self, timeout=None): + return self._callmethod('wait', (timeout,)) + + +class BarrierProxy(BaseProxy): + _exposed_ = ('__getattribute__', 'wait', 'abort', 'reset') + def wait(self, timeout=None): + return self._callmethod('wait', (timeout,)) + def abort(self): + return self._callmethod('abort') + def reset(self): + return self._callmethod('reset') + @property + def parties(self): + return self._callmethod('__getattribute__', ('parties',)) + @property + def n_waiting(self): + return self._callmethod('__getattribute__', ('n_waiting',)) + @property + def broken(self): + return self._callmethod('__getattribute__', ('broken',)) + + +class NamespaceProxy(BaseProxy): + _exposed_ = ('__getattribute__', '__setattr__', '__delattr__') + def __getattr__(self, key): + if key[0] == '_': + return object.__getattribute__(self, key) + callmethod = object.__getattribute__(self, '_callmethod') + return callmethod('__getattribute__', (key,)) + def __setattr__(self, key, value): + if key[0] == '_': + return object.__setattr__(self, key, value) + callmethod = object.__getattribute__(self, '_callmethod') + return callmethod('__setattr__', (key, value)) + def __delattr__(self, key): + if key[0] == '_': + return object.__delattr__(self, key) + callmethod = object.__getattribute__(self, '_callmethod') + return callmethod('__delattr__', (key,)) + + +class ValueProxy(BaseProxy): + _exposed_ = ('get', 'set') + def get(self): + return self._callmethod('get') + def set(self, value): + return self._callmethod('set', (value,)) + value = property(get, set) + + __class_getitem__ = classmethod(types.GenericAlias) + + +BaseListProxy = MakeProxyType('BaseListProxy', ( + '__add__', '__contains__', '__delitem__', '__getitem__', '__len__', + '__mul__', '__reversed__', '__rmul__', '__setitem__', + 'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove', + 'reverse', 'sort', '__imul__' + )) +class ListProxy(BaseListProxy): + def __iadd__(self, value): + self._callmethod('extend', (value,)) + return self + def __imul__(self, value): + self._callmethod('__imul__', (value,)) + return self + + +DictProxy = MakeProxyType('DictProxy', ( + '__contains__', '__delitem__', '__getitem__', '__iter__', '__len__', + '__setitem__', 'clear', 'copy', 'get', 'items', + 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values' + )) +DictProxy._method_to_typeid_ = { + '__iter__': 'Iterator', + } + + +ArrayProxy = MakeProxyType('ArrayProxy', ( + '__len__', '__getitem__', '__setitem__' + )) + + +BasePoolProxy = MakeProxyType('PoolProxy', ( + 'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join', + 'map', 'map_async', 'starmap', 'starmap_async', 'terminate', + )) +BasePoolProxy._method_to_typeid_ = { + 'apply_async': 'AsyncResult', + 'map_async': 'AsyncResult', + 'starmap_async': 'AsyncResult', + 'imap': 'Iterator', + 'imap_unordered': 'Iterator' + } +class PoolProxy(BasePoolProxy): + def __enter__(self): + return self + def __exit__(self, exc_type, exc_val, exc_tb): + self.terminate() + +# +# Definition of SyncManager +# + +class SyncManager(BaseManager): + ''' + Subclass of `BaseManager` which supports a number of shared object types. + + The types registered are those intended for the synchronization + of threads, plus `dict`, `list` and `Namespace`. + + The `multiprocess.Manager()` function creates started instances of + this class. + ''' + +SyncManager.register('Queue', queue.Queue) +SyncManager.register('JoinableQueue', queue.Queue) +SyncManager.register('Event', threading.Event, EventProxy) +SyncManager.register('Lock', threading.Lock, AcquirerProxy) +SyncManager.register('RLock', threading.RLock, AcquirerProxy) +SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy) +SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore, + AcquirerProxy) +SyncManager.register('Condition', threading.Condition, ConditionProxy) +SyncManager.register('Barrier', threading.Barrier, BarrierProxy) +SyncManager.register('Pool', pool.Pool, PoolProxy) +SyncManager.register('list', list, ListProxy) +SyncManager.register('dict', dict, DictProxy) +SyncManager.register('Value', Value, ValueProxy) +SyncManager.register('Array', Array, ArrayProxy) +SyncManager.register('Namespace', Namespace, NamespaceProxy) + +# types returned by methods of PoolProxy +SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False) +SyncManager.register('AsyncResult', create_method=False) + +# +# Definition of SharedMemoryManager and SharedMemoryServer +# + +if HAS_SHMEM: + class _SharedMemoryTracker: + "Manages one or more shared memory segments." + + def __init__(self, name, segment_names=[]): + self.shared_memory_context_name = name + self.segment_names = segment_names + + def register_segment(self, segment_name): + "Adds the supplied shared memory block name to tracker." + util.debug(f"Register segment {segment_name!r} in pid {getpid()}") + self.segment_names.append(segment_name) + + def destroy_segment(self, segment_name): + """Calls unlink() on the shared memory block with the supplied name + and removes it from the list of blocks being tracked.""" + util.debug(f"Destroy segment {segment_name!r} in pid {getpid()}") + self.segment_names.remove(segment_name) + segment = shared_memory.SharedMemory(segment_name) + segment.close() + segment.unlink() + + def unlink(self): + "Calls destroy_segment() on all tracked shared memory blocks." + for segment_name in self.segment_names[:]: + self.destroy_segment(segment_name) + + def __del__(self): + util.debug(f"Call {self.__class__.__name__}.__del__ in {getpid()}") + self.unlink() + + def __getstate__(self): + return (self.shared_memory_context_name, self.segment_names) + + def __setstate__(self, state): + self.__init__(*state) + + + class SharedMemoryServer(Server): + + public = Server.public + \ + ['track_segment', 'release_segment', 'list_segments'] + + def __init__(self, *args, **kwargs): + Server.__init__(self, *args, **kwargs) + address = self.address + # The address of Linux abstract namespaces can be bytes + if isinstance(address, bytes): + address = os.fsdecode(address) + self.shared_memory_context = \ + _SharedMemoryTracker(f"shm_{address}_{getpid()}") + util.debug(f"SharedMemoryServer started by pid {getpid()}") + + def create(self, c, typeid, /, *args, **kwargs): + """Create a new distributed-shared object (not backed by a shared + memory block) and return its id to be used in a Proxy Object.""" + # Unless set up as a shared proxy, don't make shared_memory_context + # a standard part of kwargs. This makes things easier for supplying + # simple functions. + if hasattr(self.registry[typeid][-1], "_shared_memory_proxy"): + kwargs['shared_memory_context'] = self.shared_memory_context + return Server.create(self, c, typeid, *args, **kwargs) + + def shutdown(self, c): + "Call unlink() on all tracked shared memory, terminate the Server." + self.shared_memory_context.unlink() + return Server.shutdown(self, c) + + def track_segment(self, c, segment_name): + "Adds the supplied shared memory block name to Server's tracker." + self.shared_memory_context.register_segment(segment_name) + + def release_segment(self, c, segment_name): + """Calls unlink() on the shared memory block with the supplied name + and removes it from the tracker instance inside the Server.""" + self.shared_memory_context.destroy_segment(segment_name) + + def list_segments(self, c): + """Returns a list of names of shared memory blocks that the Server + is currently tracking.""" + return self.shared_memory_context.segment_names + + + class SharedMemoryManager(BaseManager): + """Like SyncManager but uses SharedMemoryServer instead of Server. + + It provides methods for creating and returning SharedMemory instances + and for creating a list-like object (ShareableList) backed by shared + memory. It also provides methods that create and return Proxy Objects + that support synchronization across processes (i.e. multi-process-safe + locks and semaphores). + """ + + _Server = SharedMemoryServer + + def __init__(self, *args, **kwargs): + if os.name == "posix": + # bpo-36867: Ensure the resource_tracker is running before + # launching the manager process, so that concurrent + # shared_memory manipulation both in the manager and in the + # current process does not create two resource_tracker + # processes. + from . import resource_tracker + resource_tracker.ensure_running() + BaseManager.__init__(self, *args, **kwargs) + util.debug(f"{self.__class__.__name__} created by pid {getpid()}") + + def __del__(self): + util.debug(f"{self.__class__.__name__}.__del__ by pid {getpid()}") + pass + + def get_server(self): + 'Better than monkeypatching for now; merge into Server ultimately' + if self._state.value != State.INITIAL: + if self._state.value == State.STARTED: + raise ProcessError("Already started SharedMemoryServer") + elif self._state.value == State.SHUTDOWN: + raise ProcessError("SharedMemoryManager has shut down") + else: + raise ProcessError( + "Unknown state {!r}".format(self._state.value)) + return self._Server(self._registry, self._address, + self._authkey, self._serializer) + + def SharedMemory(self, size): + """Returns a new SharedMemory instance with the specified size in + bytes, to be tracked by the manager.""" + with self._Client(self._address, authkey=self._authkey) as conn: + sms = shared_memory.SharedMemory(None, create=True, size=size) + try: + dispatch(conn, None, 'track_segment', (sms.name,)) + except BaseException as e: + sms.unlink() + raise e + return sms + + def ShareableList(self, sequence): + """Returns a new ShareableList instance populated with the values + from the input sequence, to be tracked by the manager.""" + with self._Client(self._address, authkey=self._authkey) as conn: + sl = shared_memory.ShareableList(sequence) + try: + dispatch(conn, None, 'track_segment', (sl.shm.name,)) + except BaseException as e: + sl.shm.unlink() + raise e + return sl diff --git a/venv/lib/python3.10/site-packages/multiprocess/popen_fork.py b/venv/lib/python3.10/site-packages/multiprocess/popen_fork.py new file mode 100644 index 0000000000000000000000000000000000000000..fa7c52d589fea22aad4b2ff4ba969db442a9ec1c --- /dev/null +++ b/venv/lib/python3.10/site-packages/multiprocess/popen_fork.py @@ -0,0 +1,83 @@ +import os +import signal + +from . import util + +__all__ = ['Popen'] + +# +# Start child process using fork +# + +class Popen(object): + method = 'fork' + + def __init__(self, process_obj): + util._flush_std_streams() + self.returncode = None + self.finalizer = None + self._launch(process_obj) + + def duplicate_for_child(self, fd): + return fd + + def poll(self, flag=os.WNOHANG): + if self.returncode is None: + try: + pid, sts = os.waitpid(self.pid, flag) + except OSError: + # Child process not yet created. See #1731717 + # e.errno == errno.ECHILD == 10 + return None + if pid == self.pid: + self.returncode = os.waitstatus_to_exitcode(sts) + return self.returncode + + def wait(self, timeout=None): + if self.returncode is None: + if timeout is not None: + from multiprocess.connection import wait + if not wait([self.sentinel], timeout): + return None + # This shouldn't block if wait() returned successfully. + return self.poll(os.WNOHANG if timeout == 0.0 else 0) + return self.returncode + + def _send_signal(self, sig): + if self.returncode is None: + try: + os.kill(self.pid, sig) + except ProcessLookupError: + pass + except OSError: + if self.wait(timeout=0.1) is None: + raise + + def terminate(self): + self._send_signal(signal.SIGTERM) + + def kill(self): + self._send_signal(signal.SIGKILL) + + def _launch(self, process_obj): + code = 1 + parent_r, child_w = os.pipe() + child_r, parent_w = os.pipe() + self.pid = os.fork() + if self.pid == 0: + try: + os.close(parent_r) + os.close(parent_w) + code = process_obj._bootstrap(parent_sentinel=child_r) + finally: + os._exit(code) + else: + os.close(child_w) + os.close(child_r) + self.finalizer = util.Finalize(self, util.close_fds, + (parent_r, parent_w,)) + self.sentinel = parent_r + + def close(self): + if self.finalizer is not None: + self.finalizer() diff --git a/venv/lib/python3.10/site-packages/multiprocess/popen_spawn_posix.py b/venv/lib/python3.10/site-packages/multiprocess/popen_spawn_posix.py new file mode 100644 index 0000000000000000000000000000000000000000..24b8634523e5f2c29cd8bb21022c26d22a4fb13b --- /dev/null +++ b/venv/lib/python3.10/site-packages/multiprocess/popen_spawn_posix.py @@ -0,0 +1,72 @@ +import io +import os + +from .context import reduction, set_spawning_popen +from . import popen_fork +from . import spawn +from . import util + +__all__ = ['Popen'] + + +# +# Wrapper for an fd used while launching a process +# + +class _DupFd(object): + def __init__(self, fd): + self.fd = fd + def detach(self): + return self.fd + +# +# Start child process using a fresh interpreter +# + +class Popen(popen_fork.Popen): + method = 'spawn' + DupFd = _DupFd + + def __init__(self, process_obj): + self._fds = [] + super().__init__(process_obj) + + def duplicate_for_child(self, fd): + self._fds.append(fd) + return fd + + def _launch(self, process_obj): + from . import resource_tracker + tracker_fd = resource_tracker.getfd() + self._fds.append(tracker_fd) + prep_data = spawn.get_preparation_data(process_obj._name) + fp = io.BytesIO() + set_spawning_popen(self) + try: + reduction.dump(prep_data, fp) + reduction.dump(process_obj, fp) + finally: + set_spawning_popen(None) + + parent_r = child_w = child_r = parent_w = None + try: + parent_r, child_w = os.pipe() + child_r, parent_w = os.pipe() + cmd = spawn.get_command_line(tracker_fd=tracker_fd, + pipe_handle=child_r) + self._fds.extend([child_r, child_w]) + self.pid = util.spawnv_passfds(spawn.get_executable(), + cmd, self._fds) + self.sentinel = parent_r + with open(parent_w, 'wb', closefd=False) as f: + f.write(fp.getbuffer()) + finally: + fds_to_close = [] + for fd in (parent_r, parent_w): + if fd is not None: + fds_to_close.append(fd) + self.finalizer = util.Finalize(self, util.close_fds, fds_to_close) + + for fd in (child_r, child_w): + if fd is not None: + os.close(fd) diff --git a/venv/lib/python3.10/site-packages/multiprocess/popen_spawn_win32.py b/venv/lib/python3.10/site-packages/multiprocess/popen_spawn_win32.py new file mode 100644 index 0000000000000000000000000000000000000000..9c4098d0fa4f1e6e3ec94ecc8e596dd3857d741f --- /dev/null +++ b/venv/lib/python3.10/site-packages/multiprocess/popen_spawn_win32.py @@ -0,0 +1,131 @@ +import os +import msvcrt +import signal +import sys +import _winapi + +from .context import reduction, get_spawning_popen, set_spawning_popen +from . import spawn +from . import util + +__all__ = ['Popen'] + +# +# +# + +TERMINATE = 0x10000 +WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False)) +WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") + + +def _path_eq(p1, p2): + return p1 == p2 or os.path.normcase(p1) == os.path.normcase(p2) + +WINENV = not _path_eq(sys.executable, sys._base_executable) + + +def _close_handles(*handles): + for handle in handles: + _winapi.CloseHandle(handle) + + +# +# We define a Popen class similar to the one from subprocess, but +# whose constructor takes a process object as its argument. +# + +class Popen(object): + ''' + Start a subprocess to run the code of a process object + ''' + method = 'spawn' + + def __init__(self, process_obj): + prep_data = spawn.get_preparation_data(process_obj._name) + + # read end of pipe will be duplicated by the child process + # -- see spawn_main() in spawn.py. + # + # bpo-33929: Previously, the read end of pipe was "stolen" by the child + # process, but it leaked a handle if the child process had been + # terminated before it could steal the handle from the parent process. + rhandle, whandle = _winapi.CreatePipe(None, 0) + wfd = msvcrt.open_osfhandle(whandle, 0) + cmd = spawn.get_command_line(parent_pid=os.getpid(), + pipe_handle=rhandle) + cmd = ' '.join('"%s"' % x for x in cmd) + + python_exe = spawn.get_executable() + + # bpo-35797: When running in a venv, we bypass the redirect + # executor and launch our base Python. + if WINENV and _path_eq(python_exe, sys.executable): + python_exe = sys._base_executable + env = os.environ.copy() + env["__PYVENV_LAUNCHER__"] = sys.executable + else: + env = None + + with open(wfd, 'wb', closefd=True) as to_child: + # start process + try: + hp, ht, pid, tid = _winapi.CreateProcess( + python_exe, cmd, + None, None, False, 0, env, None, None) + _winapi.CloseHandle(ht) + except: + _winapi.CloseHandle(rhandle) + raise + + # set attributes of self + self.pid = pid + self.returncode = None + self._handle = hp + self.sentinel = int(hp) + self.finalizer = util.Finalize(self, _close_handles, + (self.sentinel, int(rhandle))) + + # send information to child + set_spawning_popen(self) + try: + reduction.dump(prep_data, to_child) + reduction.dump(process_obj, to_child) + finally: + set_spawning_popen(None) + + def duplicate_for_child(self, handle): + assert self is get_spawning_popen() + return reduction.duplicate(handle, self.sentinel) + + def wait(self, timeout=None): + if self.returncode is None: + if timeout is None: + msecs = _winapi.INFINITE + else: + msecs = max(0, int(timeout * 1000 + 0.5)) + + res = _winapi.WaitForSingleObject(int(self._handle), msecs) + if res == _winapi.WAIT_OBJECT_0: + code = _winapi.GetExitCodeProcess(self._handle) + if code == TERMINATE: + code = -signal.SIGTERM + self.returncode = code + + return self.returncode + + def poll(self): + return self.wait(timeout=0) + + def terminate(self): + if self.returncode is None: + try: + _winapi.TerminateProcess(int(self._handle), TERMINATE) + except OSError: + if self.wait(timeout=1.0) is None: + raise + + kill = terminate + + def close(self): + self.finalizer() diff --git a/venv/lib/python3.10/site-packages/multiprocess/process.py b/venv/lib/python3.10/site-packages/multiprocess/process.py new file mode 100644 index 0000000000000000000000000000000000000000..4c887e3b164600f9fee2993315ad8fc2f85f29c8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/multiprocess/process.py @@ -0,0 +1,438 @@ +# +# Module providing the `Process` class which emulates `threading.Thread` +# +# multiprocessing/process.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = ['BaseProcess', 'current_process', 'active_children', + 'parent_process'] + +# +# Imports +# + +import os +import sys +import signal +import itertools +import threading +from _weakrefset import WeakSet + +# +# +# + +try: + ORIGINAL_DIR = os.path.abspath(os.getcwd()) +except OSError: + ORIGINAL_DIR = None + +# +# Public functions +# + +def current_process(): + ''' + Return process object representing the current process + ''' + return _current_process + +def active_children(): + ''' + Return list of process objects corresponding to live child processes + ''' + _cleanup() + return list(_children) + + +def parent_process(): + ''' + Return process object representing the parent process + ''' + return _parent_process + +# +# +# + +def _cleanup(): + # check for processes which have finished + for p in list(_children): + if p._popen.poll() is not None: + _children.discard(p) + +# +# The `Process` class +# + +class BaseProcess(object): + ''' + Process objects represent activity that is run in a separate process + + The class is analogous to `threading.Thread` + ''' + def _Popen(self): + raise NotImplementedError + + def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, + *, daemon=None): + assert group is None, 'group argument must be None for now' + count = next(_process_counter) + self._identity = _current_process._identity + (count,) + self._config = _current_process._config.copy() + self._parent_pid = os.getpid() + self._parent_name = _current_process.name + self._popen = None + self._closed = False + self._target = target + self._args = tuple(args) + self._kwargs = dict(kwargs) + self._name = name or type(self).__name__ + '-' + \ + ':'.join(str(i) for i in self._identity) + if daemon is not None: + self.daemon = daemon + _dangling.add(self) + + def _check_closed(self): + if self._closed: + raise ValueError("process object is closed") + + def run(self): + ''' + Method to be run in sub-process; can be overridden in sub-class + ''' + if self._target: + self._target(*self._args, **self._kwargs) + + def start(self): + ''' + Start child process + ''' + self._check_closed() + assert self._popen is None, 'cannot start a process twice' + assert self._parent_pid == os.getpid(), \ + 'can only start a process object created by current process' + assert not _current_process._config.get('daemon'), \ + 'daemonic processes are not allowed to have children' + _cleanup() + self._popen = self._Popen(self) + self._sentinel = self._popen.sentinel + # Avoid a refcycle if the target function holds an indirect + # reference to the process object (see bpo-30775) + del self._target, self._args, self._kwargs + _children.add(self) + + def terminate(self): + ''' + Terminate process; sends SIGTERM signal or uses TerminateProcess() + ''' + self._check_closed() + self._popen.terminate() + + def kill(self): + ''' + Terminate process; sends SIGKILL signal or uses TerminateProcess() + ''' + self._check_closed() + self._popen.kill() + + def join(self, timeout=None): + ''' + Wait until child process terminates + ''' + self._check_closed() + assert self._parent_pid == os.getpid(), 'can only join a child process' + assert self._popen is not None, 'can only join a started process' + res = self._popen.wait(timeout) + if res is not None: + _children.discard(self) + + def is_alive(self): + ''' + Return whether process is alive + ''' + self._check_closed() + if self is _current_process: + return True + assert self._parent_pid == os.getpid(), 'can only test a child process' + + if self._popen is None: + return False + + returncode = self._popen.poll() + if returncode is None: + return True + else: + _children.discard(self) + return False + + def close(self): + ''' + Close the Process object. + + This method releases resources held by the Process object. It is + an error to call this method if the child process is still running. + ''' + if self._popen is not None: + if self._popen.poll() is None: + raise ValueError("Cannot close a process while it is still running. " + "You should first call join() or terminate().") + self._popen.close() + self._popen = None + del self._sentinel + _children.discard(self) + self._closed = True + + @property + def name(self): + return self._name + + @name.setter + def name(self, name): + assert isinstance(name, str), 'name must be a string' + self._name = name + + @property + def daemon(self): + ''' + Return whether process is a daemon + ''' + return self._config.get('daemon', False) + + @daemon.setter + def daemon(self, daemonic): + ''' + Set whether process is a daemon + ''' + assert self._popen is None, 'process has already started' + self._config['daemon'] = daemonic + + @property + def authkey(self): + return self._config['authkey'] + + @authkey.setter + def authkey(self, authkey): + ''' + Set authorization key of process + ''' + self._config['authkey'] = AuthenticationString(authkey) + + @property + def exitcode(self): + ''' + Return exit code of process or `None` if it has yet to stop + ''' + self._check_closed() + if self._popen is None: + return self._popen + return self._popen.poll() + + @property + def ident(self): + ''' + Return identifier (PID) of process or `None` if it has yet to start + ''' + self._check_closed() + if self is _current_process: + return os.getpid() + else: + return self._popen and self._popen.pid + + pid = ident + + @property + def sentinel(self): + ''' + Return a file descriptor (Unix) or handle (Windows) suitable for + waiting for process termination. + ''' + self._check_closed() + try: + return self._sentinel + except AttributeError: + raise ValueError("process not started") from None + + def __repr__(self): + exitcode = None + if self is _current_process: + status = 'started' + elif self._closed: + status = 'closed' + elif self._parent_pid != os.getpid(): + status = 'unknown' + elif self._popen is None: + status = 'initial' + else: + exitcode = self._popen.poll() + if exitcode is not None: + status = 'stopped' + else: + status = 'started' + + info = [type(self).__name__, 'name=%r' % self._name] + if self._popen is not None: + info.append('pid=%s' % self._popen.pid) + info.append('parent=%s' % self._parent_pid) + info.append(status) + if exitcode is not None: + exitcode = _exitcode_to_name.get(exitcode, exitcode) + info.append('exitcode=%s' % exitcode) + if self.daemon: + info.append('daemon') + return '<%s>' % ' '.join(info) + + ## + + def _bootstrap(self, parent_sentinel=None): + from . import util, context + global _current_process, _parent_process, _process_counter, _children + + try: + if self._start_method is not None: + context._force_start_method(self._start_method) + _process_counter = itertools.count(1) + _children = set() + util._close_stdin() + old_process = _current_process + _current_process = self + _parent_process = _ParentProcess( + self._parent_name, self._parent_pid, parent_sentinel) + if threading._HAVE_THREAD_NATIVE_ID: + threading.main_thread()._set_native_id() + try: + self._after_fork() + finally: + # delay finalization of the old process object until after + # _run_after_forkers() is executed + del old_process + util.info('child process calling self.run()') + try: + self.run() + exitcode = 0 + finally: + util._exit_function() + except SystemExit as e: + if e.code is None: + exitcode = 0 + elif isinstance(e.code, int): + exitcode = e.code + else: + sys.stderr.write(str(e.code) + '\n') + exitcode = 1 + except: + exitcode = 1 + import traceback + sys.stderr.write('Process %s:\n' % self.name) + traceback.print_exc() + finally: + threading._shutdown() + util.info('process exiting with exitcode %d' % exitcode) + util._flush_std_streams() + + return exitcode + + @staticmethod + def _after_fork(): + from . import util + util._finalizer_registry.clear() + util._run_after_forkers() + + +# +# We subclass bytes to avoid accidental transmission of auth keys over network +# + +class AuthenticationString(bytes): + def __reduce__(self): + from .context import get_spawning_popen + if get_spawning_popen() is None: + raise TypeError( + 'Pickling an AuthenticationString object is ' + 'disallowed for security reasons' + ) + return AuthenticationString, (bytes(self),) + + +# +# Create object representing the parent process +# + +class _ParentProcess(BaseProcess): + + def __init__(self, name, pid, sentinel): + self._identity = () + self._name = name + self._pid = pid + self._parent_pid = None + self._popen = None + self._closed = False + self._sentinel = sentinel + self._config = {} + + def is_alive(self): + from multiprocess.connection import wait + return not wait([self._sentinel], timeout=0) + + @property + def ident(self): + return self._pid + + def join(self, timeout=None): + ''' + Wait until parent process terminates + ''' + from multiprocess.connection import wait + wait([self._sentinel], timeout=timeout) + + pid = ident + +# +# Create object representing the main process +# + +class _MainProcess(BaseProcess): + + def __init__(self): + self._identity = () + self._name = 'MainProcess' + self._parent_pid = None + self._popen = None + self._closed = False + self._config = {'authkey': AuthenticationString(os.urandom(32)), + 'semprefix': '/mp'} + # Note that some versions of FreeBSD only allow named + # semaphores to have names of up to 14 characters. Therefore + # we choose a short prefix. + # + # On MacOSX in a sandbox it may be necessary to use a + # different prefix -- see #19478. + # + # Everything in self._config will be inherited by descendant + # processes. + + def close(self): + pass + + +_parent_process = None +_current_process = _MainProcess() +_process_counter = itertools.count(1) +_children = set() +del _MainProcess + +# +# Give names to some return codes +# + +_exitcode_to_name = {} + +for name, signum in list(signal.__dict__.items()): + if name[:3]=='SIG' and '_' not in name: + _exitcode_to_name[-signum] = f'-{name}' + +# For debug and leak testing +_dangling = WeakSet() diff --git a/venv/lib/python3.10/site-packages/multiprocess/reduction.py b/venv/lib/python3.10/site-packages/multiprocess/reduction.py new file mode 100644 index 0000000000000000000000000000000000000000..39b132c5e17067a215184866a25654b70b7bea1f --- /dev/null +++ b/venv/lib/python3.10/site-packages/multiprocess/reduction.py @@ -0,0 +1,284 @@ +# +# Module which deals with pickling of objects. +# +# multiprocessing/reduction.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +from abc import ABCMeta +import copyreg +import functools +import io +import os +try: + import dill as pickle +except ImportError: + import pickle +import socket +import sys + +from . import context + +__all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump'] + + +HAVE_SEND_HANDLE = (sys.platform == 'win32' or + (hasattr(socket, 'CMSG_LEN') and + hasattr(socket, 'SCM_RIGHTS') and + hasattr(socket.socket, 'sendmsg'))) + +# +# Pickler subclass +# + +class ForkingPickler(pickle.Pickler): + '''Pickler subclass used by multiprocess.''' + _extra_reducers = {} + _copyreg_dispatch_table = copyreg.dispatch_table + + def __init__(self, *args, **kwds): + super().__init__(*args, **kwds) + self.dispatch_table = self._copyreg_dispatch_table.copy() + self.dispatch_table.update(self._extra_reducers) + + @classmethod + def register(cls, type, reduce): + '''Register a reduce function for a type.''' + cls._extra_reducers[type] = reduce + + @classmethod + def dumps(cls, obj, protocol=None, *args, **kwds): + buf = io.BytesIO() + cls(buf, protocol, *args, **kwds).dump(obj) + return buf.getbuffer() + + loads = pickle.loads + +register = ForkingPickler.register + +def dump(obj, file, protocol=None, *args, **kwds): + '''Replacement for pickle.dump() using ForkingPickler.''' + ForkingPickler(file, protocol, *args, **kwds).dump(obj) + +# +# Platform specific definitions +# + +if sys.platform == 'win32': + # Windows + __all__ += ['DupHandle', 'duplicate', 'steal_handle'] + import _winapi + + def duplicate(handle, target_process=None, inheritable=False, + *, source_process=None): + '''Duplicate a handle. (target_process is a handle not a pid!)''' + current_process = _winapi.GetCurrentProcess() + if source_process is None: + source_process = current_process + if target_process is None: + target_process = current_process + return _winapi.DuplicateHandle( + source_process, handle, target_process, + 0, inheritable, _winapi.DUPLICATE_SAME_ACCESS) + + def steal_handle(source_pid, handle): + '''Steal a handle from process identified by source_pid.''' + source_process_handle = _winapi.OpenProcess( + _winapi.PROCESS_DUP_HANDLE, False, source_pid) + try: + return _winapi.DuplicateHandle( + source_process_handle, handle, + _winapi.GetCurrentProcess(), 0, False, + _winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE) + finally: + _winapi.CloseHandle(source_process_handle) + + def send_handle(conn, handle, destination_pid): + '''Send a handle over a local connection.''' + dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid) + conn.send(dh) + + def recv_handle(conn): + '''Receive a handle over a local connection.''' + return conn.recv().detach() + + class DupHandle(object): + '''Picklable wrapper for a handle.''' + def __init__(self, handle, access, pid=None): + if pid is None: + # We just duplicate the handle in the current process and + # let the receiving process steal the handle. + pid = os.getpid() + proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid) + try: + self._handle = _winapi.DuplicateHandle( + _winapi.GetCurrentProcess(), + handle, proc, access, False, 0) + finally: + _winapi.CloseHandle(proc) + self._access = access + self._pid = pid + + def detach(self): + '''Get the handle. This should only be called once.''' + # retrieve handle from process which currently owns it + if self._pid == os.getpid(): + # The handle has already been duplicated for this process. + return self._handle + # We must steal the handle from the process whose pid is self._pid. + proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, + self._pid) + try: + return _winapi.DuplicateHandle( + proc, self._handle, _winapi.GetCurrentProcess(), + self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE) + finally: + _winapi.CloseHandle(proc) + +else: + # Unix + __all__ += ['DupFd', 'sendfds', 'recvfds'] + import array + + # On MacOSX we should acknowledge receipt of fds -- see Issue14669 + ACKNOWLEDGE = sys.platform == 'darwin' + + def sendfds(sock, fds): + '''Send an array of fds over an AF_UNIX socket.''' + fds = array.array('i', fds) + msg = bytes([len(fds) % 256]) + sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)]) + if ACKNOWLEDGE and sock.recv(1) != b'A': + raise RuntimeError('did not receive acknowledgement of fd') + + def recvfds(sock, size): + '''Receive an array of fds over an AF_UNIX socket.''' + a = array.array('i') + bytes_size = a.itemsize * size + msg, ancdata, flags, addr = sock.recvmsg(1, socket.CMSG_SPACE(bytes_size)) + if not msg and not ancdata: + raise EOFError + try: + if ACKNOWLEDGE: + sock.send(b'A') + if len(ancdata) != 1: + raise RuntimeError('received %d items of ancdata' % + len(ancdata)) + cmsg_level, cmsg_type, cmsg_data = ancdata[0] + if (cmsg_level == socket.SOL_SOCKET and + cmsg_type == socket.SCM_RIGHTS): + if len(cmsg_data) % a.itemsize != 0: + raise ValueError + a.frombytes(cmsg_data) + if len(a) % 256 != msg[0]: + raise AssertionError( + "Len is {0:n} but msg[0] is {1!r}".format( + len(a), msg[0])) + return list(a) + except (ValueError, IndexError): + pass + raise RuntimeError('Invalid data received') + + def send_handle(conn, handle, destination_pid): + '''Send a handle over a local connection.''' + with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: + sendfds(s, [handle]) + + def recv_handle(conn): + '''Receive a handle over a local connection.''' + with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: + return recvfds(s, 1)[0] + + def DupFd(fd): + '''Return a wrapper for an fd.''' + popen_obj = context.get_spawning_popen() + if popen_obj is not None: + return popen_obj.DupFd(popen_obj.duplicate_for_child(fd)) + elif HAVE_SEND_HANDLE: + from . import resource_sharer + return resource_sharer.DupFd(fd) + else: + raise ValueError('SCM_RIGHTS appears not to be available') + +# +# Try making some callable types picklable +# + +def _reduce_method(m): + if m.__self__ is None: + return getattr, (m.__class__, m.__func__.__name__) + else: + return getattr, (m.__self__, m.__func__.__name__) +class _C: + def f(self): + pass +register(type(_C().f), _reduce_method) + + +def _reduce_method_descriptor(m): + return getattr, (m.__objclass__, m.__name__) +register(type(list.append), _reduce_method_descriptor) +register(type(int.__add__), _reduce_method_descriptor) + + +def _reduce_partial(p): + return _rebuild_partial, (p.func, p.args, p.keywords or {}) +def _rebuild_partial(func, args, keywords): + return functools.partial(func, *args, **keywords) +register(functools.partial, _reduce_partial) + +# +# Make sockets picklable +# + +if sys.platform == 'win32': + def _reduce_socket(s): + from .resource_sharer import DupSocket + return _rebuild_socket, (DupSocket(s),) + def _rebuild_socket(ds): + return ds.detach() + register(socket.socket, _reduce_socket) + +else: + def _reduce_socket(s): + df = DupFd(s.fileno()) + return _rebuild_socket, (df, s.family, s.type, s.proto) + def _rebuild_socket(df, family, type, proto): + fd = df.detach() + return socket.socket(family, type, proto, fileno=fd) + register(socket.socket, _reduce_socket) + + +class AbstractReducer(metaclass=ABCMeta): + '''Abstract base class for use in implementing a Reduction class + suitable for use in replacing the standard reduction mechanism + used in multiprocess.''' + ForkingPickler = ForkingPickler + register = register + dump = dump + send_handle = send_handle + recv_handle = recv_handle + + if sys.platform == 'win32': + steal_handle = steal_handle + duplicate = duplicate + DupHandle = DupHandle + else: + sendfds = sendfds + recvfds = recvfds + DupFd = DupFd + + _reduce_method = _reduce_method + _reduce_method_descriptor = _reduce_method_descriptor + _rebuild_partial = _rebuild_partial + _reduce_socket = _reduce_socket + _rebuild_socket = _rebuild_socket + + def __init__(self, *args): + register(type(_C().f), _reduce_method) + register(type(list.append), _reduce_method_descriptor) + register(type(int.__add__), _reduce_method_descriptor) + register(functools.partial, _reduce_partial) + register(socket.socket, _reduce_socket) diff --git a/venv/lib/python3.10/site-packages/multiprocess/resource_sharer.py b/venv/lib/python3.10/site-packages/multiprocess/resource_sharer.py new file mode 100644 index 0000000000000000000000000000000000000000..66076509a1202e7a1b4d8a481f64621a4bfbbf3e --- /dev/null +++ b/venv/lib/python3.10/site-packages/multiprocess/resource_sharer.py @@ -0,0 +1,154 @@ +# +# We use a background thread for sharing fds on Unix, and for sharing sockets on +# Windows. +# +# A client which wants to pickle a resource registers it with the resource +# sharer and gets an identifier in return. The unpickling process will connect +# to the resource sharer, sends the identifier and its pid, and then receives +# the resource. +# + +import os +import signal +import socket +import sys +import threading + +from . import process +from .context import reduction +from . import util + +__all__ = ['stop'] + + +if sys.platform == 'win32': + __all__ += ['DupSocket'] + + class DupSocket(object): + '''Picklable wrapper for a socket.''' + def __init__(self, sock): + new_sock = sock.dup() + def send(conn, pid): + share = new_sock.share(pid) + conn.send_bytes(share) + self._id = _resource_sharer.register(send, new_sock.close) + + def detach(self): + '''Get the socket. This should only be called once.''' + with _resource_sharer.get_connection(self._id) as conn: + share = conn.recv_bytes() + return socket.fromshare(share) + +else: + __all__ += ['DupFd'] + + class DupFd(object): + '''Wrapper for fd which can be used at any time.''' + def __init__(self, fd): + new_fd = os.dup(fd) + def send(conn, pid): + reduction.send_handle(conn, new_fd, pid) + def close(): + os.close(new_fd) + self._id = _resource_sharer.register(send, close) + + def detach(self): + '''Get the fd. This should only be called once.''' + with _resource_sharer.get_connection(self._id) as conn: + return reduction.recv_handle(conn) + + +class _ResourceSharer(object): + '''Manager for resources using background thread.''' + def __init__(self): + self._key = 0 + self._cache = {} + self._lock = threading.Lock() + self._listener = None + self._address = None + self._thread = None + util.register_after_fork(self, _ResourceSharer._afterfork) + + def register(self, send, close): + '''Register resource, returning an identifier.''' + with self._lock: + if self._address is None: + self._start() + self._key += 1 + self._cache[self._key] = (send, close) + return (self._address, self._key) + + @staticmethod + def get_connection(ident): + '''Return connection from which to receive identified resource.''' + from .connection import Client + address, key = ident + c = Client(address, authkey=process.current_process().authkey) + c.send((key, os.getpid())) + return c + + def stop(self, timeout=None): + '''Stop the background thread and clear registered resources.''' + from .connection import Client + with self._lock: + if self._address is not None: + c = Client(self._address, + authkey=process.current_process().authkey) + c.send(None) + c.close() + self._thread.join(timeout) + if self._thread.is_alive(): + util.sub_warning('_ResourceSharer thread did ' + 'not stop when asked') + self._listener.close() + self._thread = None + self._address = None + self._listener = None + for key, (send, close) in self._cache.items(): + close() + self._cache.clear() + + def _afterfork(self): + for key, (send, close) in self._cache.items(): + close() + self._cache.clear() + self._lock._at_fork_reinit() + if self._listener is not None: + self._listener.close() + self._listener = None + self._address = None + self._thread = None + + def _start(self): + from .connection import Listener + assert self._listener is None, "Already have Listener" + util.debug('starting listener and thread for sending handles') + self._listener = Listener(authkey=process.current_process().authkey) + self._address = self._listener.address + t = threading.Thread(target=self._serve) + t.daemon = True + t.start() + self._thread = t + + def _serve(self): + if hasattr(signal, 'pthread_sigmask'): + signal.pthread_sigmask(signal.SIG_BLOCK, signal.valid_signals()) + while 1: + try: + with self._listener.accept() as conn: + msg = conn.recv() + if msg is None: + break + key, destination_pid = msg + send, close = self._cache.pop(key) + try: + send(conn, destination_pid) + finally: + close() + except: + if not util.is_exiting(): + sys.excepthook(*sys.exc_info()) + + +_resource_sharer = _ResourceSharer() +stop = _resource_sharer.stop diff --git a/venv/lib/python3.10/site-packages/multiprocess/tests/__init__.py b/venv/lib/python3.10/site-packages/multiprocess/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2fee074ed217520894521edd6501e0192f57e8cb --- /dev/null +++ b/venv/lib/python3.10/site-packages/multiprocess/tests/__init__.py @@ -0,0 +1,6000 @@ +# +# Unit tests for the multiprocessing package +# + +import unittest +import unittest.mock +import queue as pyqueue +import textwrap +import time +import io +import itertools +import sys +import os +import gc +import errno +import signal +import array +import socket +import random +import logging +import subprocess +import struct +import operator +import pickle #XXX: use dill? +import weakref +import warnings +import test.support +import test.support.script_helper +from test import support +from test.support import hashlib_helper +from test.support import import_helper +from test.support import os_helper +from test.support import socket_helper +from test.support import threading_helper +from test.support import warnings_helper + + +# Skip tests if _multiprocessing wasn't built. +_multiprocessing = import_helper.import_module('_multiprocessing') +# Skip tests if sem_open implementation is broken. +import_helper.import_module('multiprocess.synchronize') +import threading + +import multiprocess as multiprocessing +import multiprocess.connection +import multiprocess.dummy +import multiprocess.heap +import multiprocess.managers +import multiprocess.pool +import multiprocess.queues + +from multiprocess import util + +try: + from multiprocess import reduction + HAS_REDUCTION = reduction.HAVE_SEND_HANDLE +except ImportError: + HAS_REDUCTION = False + +try: + from multiprocess.sharedctypes import Value, copy + HAS_SHAREDCTYPES = True +except ImportError: + HAS_SHAREDCTYPES = False + +try: + from multiprocess import shared_memory + HAS_SHMEM = True +except ImportError: + HAS_SHMEM = False + +try: + import msvcrt +except ImportError: + msvcrt = None + + +if hasattr(support,'check_sanitizer') and support.check_sanitizer(address=True): + # bpo-45200: Skip multiprocessing tests if Python is built with ASAN to + # work around a libasan race condition: dead lock in pthread_create(). + raise unittest.SkipTest("libasan has a pthread_create() dead lock") + +# Don't ignore user's installed packages +ENV = dict(__cleanenv = False, __isolated = False) + +# Timeout to wait until a process completes #XXX: travis-ci +TIMEOUT = (90.0 if os.environ.get('COVERAGE') else 60.0) # seconds + +def latin(s): + return s.encode('latin') + + +def close_queue(queue): + if isinstance(queue, multiprocessing.queues.Queue): + queue.close() + queue.join_thread() + + +def join_process(process): + # Since multiprocessing.Process has the same API than threading.Thread + # (join() and is_alive(), the support function can be reused + threading_helper.join_thread(process, timeout=TIMEOUT) + + +if os.name == "posix": + from multiprocess import resource_tracker + + def _resource_unlink(name, rtype): + resource_tracker._CLEANUP_FUNCS[rtype](name) + + +# +# Constants +# + +LOG_LEVEL = util.SUBWARNING +#LOG_LEVEL = logging.DEBUG + +DELTA = 0.1 +CHECK_TIMINGS = False # making true makes tests take a lot longer + # and can sometimes cause some non-serious + # failures because some calls block a bit + # longer than expected +if CHECK_TIMINGS: + TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4 +else: + TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1 + +HAVE_GETVALUE = not getattr(_multiprocessing, + 'HAVE_BROKEN_SEM_GETVALUE', False) + +WIN32 = (sys.platform == "win32") + +from multiprocess.connection import wait + +def wait_for_handle(handle, timeout): + if timeout is not None and timeout < 0.0: + timeout = None + return wait([handle], timeout) + +try: + MAXFD = os.sysconf("SC_OPEN_MAX") +except: + MAXFD = 256 + +# To speed up tests when using the forkserver, we can preload these: +PRELOAD = ['__main__', 'test_multiprocessing_forkserver'] + +# +# Some tests require ctypes +# + +try: + from ctypes import Structure, c_int, c_double, c_longlong +except ImportError: + Structure = object + c_int = c_double = c_longlong = None + + +def check_enough_semaphores(): + """Check that the system supports enough semaphores to run the test.""" + # minimum number of semaphores available according to POSIX + nsems_min = 256 + try: + nsems = os.sysconf("SC_SEM_NSEMS_MAX") + except (AttributeError, ValueError): + # sysconf not available or setting not available + return + if nsems == -1 or nsems >= nsems_min: + return + raise unittest.SkipTest("The OS doesn't support enough semaphores " + "to run the test (required: %d)." % nsems_min) + + +# +# Creates a wrapper for a function which records the time it takes to finish +# + +class TimingWrapper(object): + + def __init__(self, func): + self.func = func + self.elapsed = None + + def __call__(self, *args, **kwds): + t = time.monotonic() + try: + return self.func(*args, **kwds) + finally: + self.elapsed = time.monotonic() - t + +# +# Base class for test cases +# + +class BaseTestCase(object): + + ALLOWED_TYPES = ('processes', 'manager', 'threads') + + def assertTimingAlmostEqual(self, a, b): + if CHECK_TIMINGS: + self.assertAlmostEqual(a, b, 1) + + def assertReturnsIfImplemented(self, value, func, *args): + try: + res = func(*args) + except NotImplementedError: + pass + else: + return self.assertEqual(value, res) + + # For the sanity of Windows users, rather than crashing or freezing in + # multiple ways. + def __reduce__(self, *args): + raise NotImplementedError("shouldn't try to pickle a test case") + + __reduce_ex__ = __reduce__ + +# +# Return the value of a semaphore +# + +def get_value(self): + try: + return self.get_value() + except AttributeError: + try: + return self._Semaphore__value + except AttributeError: + try: + return self._value + except AttributeError: + raise NotImplementedError + +# +# Testcases +# + +class DummyCallable: + def __call__(self, q, c): + assert isinstance(c, DummyCallable) + q.put(5) + + +class _TestProcess(BaseTestCase): + + ALLOWED_TYPES = ('processes', 'threads') + + def test_current(self): + if self.TYPE == 'threads': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + current = self.current_process() + authkey = current.authkey + + self.assertTrue(current.is_alive()) + self.assertTrue(not current.daemon) + self.assertIsInstance(authkey, bytes) + self.assertTrue(len(authkey) > 0) + self.assertEqual(current.ident, os.getpid()) + self.assertEqual(current.exitcode, None) + + def test_daemon_argument(self): + if self.TYPE == "threads": + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + # By default uses the current process's daemon flag. + proc0 = self.Process(target=self._test) + self.assertEqual(proc0.daemon, self.current_process().daemon) + proc1 = self.Process(target=self._test, daemon=True) + self.assertTrue(proc1.daemon) + proc2 = self.Process(target=self._test, daemon=False) + self.assertFalse(proc2.daemon) + + @classmethod + def _test(cls, q, *args, **kwds): + current = cls.current_process() + q.put(args) + q.put(kwds) + q.put(current.name) + if cls.TYPE != 'threads': + q.put(bytes(current.authkey)) + q.put(current.pid) + + def test_parent_process_attributes(self): + if self.TYPE == "threads": + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + self.assertIsNone(self.parent_process()) + + rconn, wconn = self.Pipe(duplex=False) + p = self.Process(target=self._test_send_parent_process, args=(wconn,)) + p.start() + p.join() + parent_pid, parent_name = rconn.recv() + self.assertEqual(parent_pid, self.current_process().pid) + self.assertEqual(parent_pid, os.getpid()) + self.assertEqual(parent_name, self.current_process().name) + + @classmethod + def _test_send_parent_process(cls, wconn): + from multiprocess.process import parent_process + wconn.send([parent_process().pid, parent_process().name]) + + def _test_parent_process(self): + if self.TYPE == "threads": + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + # Launch a child process. Make it launch a grandchild process. Kill the + # child process and make sure that the grandchild notices the death of + # its parent (a.k.a the child process). + rconn, wconn = self.Pipe(duplex=False) + p = self.Process( + target=self._test_create_grandchild_process, args=(wconn, )) + p.start() + + if not rconn.poll(timeout=support.LONG_TIMEOUT): + raise AssertionError("Could not communicate with child process") + parent_process_status = rconn.recv() + self.assertEqual(parent_process_status, "alive") + + p.terminate() + p.join() + + if not rconn.poll(timeout=support.LONG_TIMEOUT): + raise AssertionError("Could not communicate with child process") + parent_process_status = rconn.recv() + self.assertEqual(parent_process_status, "not alive") + + @classmethod + def _test_create_grandchild_process(cls, wconn): + p = cls.Process(target=cls._test_report_parent_status, args=(wconn, )) + p.start() + time.sleep(300) + + @classmethod + def _test_report_parent_status(cls, wconn): + from multiprocess.process import parent_process + wconn.send("alive" if parent_process().is_alive() else "not alive") + parent_process().join(timeout=support.SHORT_TIMEOUT) + wconn.send("alive" if parent_process().is_alive() else "not alive") + + def test_process(self): + q = self.Queue(1) + e = self.Event() + args = (q, 1, 2) + kwargs = {'hello':23, 'bye':2.54} + name = 'SomeProcess' + p = self.Process( + target=self._test, args=args, kwargs=kwargs, name=name + ) + p.daemon = True + current = self.current_process() + + if self.TYPE != 'threads': + self.assertEqual(p.authkey, current.authkey) + self.assertEqual(p.is_alive(), False) + self.assertEqual(p.daemon, True) + self.assertNotIn(p, self.active_children()) + self.assertTrue(type(self.active_children()) is list) + self.assertEqual(p.exitcode, None) + + p.start() + + self.assertEqual(p.exitcode, None) + self.assertEqual(p.is_alive(), True) + self.assertIn(p, self.active_children()) + + self.assertEqual(q.get(), args[1:]) + self.assertEqual(q.get(), kwargs) + self.assertEqual(q.get(), p.name) + if self.TYPE != 'threads': + self.assertEqual(q.get(), current.authkey) + self.assertEqual(q.get(), p.pid) + + p.join() + + self.assertEqual(p.exitcode, 0) + self.assertEqual(p.is_alive(), False) + self.assertNotIn(p, self.active_children()) + close_queue(q) + + @unittest.skipUnless(threading._HAVE_THREAD_NATIVE_ID, "needs native_id") + def test_process_mainthread_native_id(self): + if self.TYPE == 'threads': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + current_mainthread_native_id = threading.main_thread().native_id + + q = self.Queue(1) + p = self.Process(target=self._test_process_mainthread_native_id, args=(q,)) + p.start() + + child_mainthread_native_id = q.get() + p.join() + close_queue(q) + + self.assertNotEqual(current_mainthread_native_id, child_mainthread_native_id) + + @classmethod + def _test_process_mainthread_native_id(cls, q): + mainthread_native_id = threading.main_thread().native_id + q.put(mainthread_native_id) + + @classmethod + def _sleep_some(cls): + time.sleep(100) + + @classmethod + def _test_sleep(cls, delay): + time.sleep(delay) + + def _kill_process(self, meth): + if self.TYPE == 'threads': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + p = self.Process(target=self._sleep_some) + p.daemon = True + p.start() + + self.assertEqual(p.is_alive(), True) + self.assertIn(p, self.active_children()) + self.assertEqual(p.exitcode, None) + + join = TimingWrapper(p.join) + + self.assertEqual(join(0), None) + self.assertTimingAlmostEqual(join.elapsed, 0.0) + self.assertEqual(p.is_alive(), True) + + self.assertEqual(join(-1), None) + self.assertTimingAlmostEqual(join.elapsed, 0.0) + self.assertEqual(p.is_alive(), True) + + # XXX maybe terminating too soon causes the problems on Gentoo... + time.sleep(1) + + meth(p) + + if hasattr(signal, 'alarm'): + # On the Gentoo buildbot waitpid() often seems to block forever. + # We use alarm() to interrupt it if it blocks for too long. + def handler(*args): + raise RuntimeError('join took too long: %s' % p) + old_handler = signal.signal(signal.SIGALRM, handler) + try: + signal.alarm(10) + self.assertEqual(join(), None) + finally: + signal.alarm(0) + signal.signal(signal.SIGALRM, old_handler) + else: + self.assertEqual(join(), None) + + self.assertTimingAlmostEqual(join.elapsed, 0.0) + + self.assertEqual(p.is_alive(), False) + self.assertNotIn(p, self.active_children()) + + p.join() + + return p.exitcode + + def test_terminate(self): + exitcode = self._kill_process(multiprocessing.Process.terminate) + if os.name != 'nt': + self.assertEqual(exitcode, -signal.SIGTERM) + + def test_kill(self): + exitcode = self._kill_process(multiprocessing.Process.kill) + if os.name != 'nt': + self.assertEqual(exitcode, -signal.SIGKILL) + + def test_cpu_count(self): + try: + cpus = multiprocessing.cpu_count() + except NotImplementedError: + cpus = 1 + self.assertTrue(type(cpus) is int) + self.assertTrue(cpus >= 1) + + def test_active_children(self): + self.assertEqual(type(self.active_children()), list) + + p = self.Process(target=time.sleep, args=(DELTA,)) + self.assertNotIn(p, self.active_children()) + + p.daemon = True + p.start() + self.assertIn(p, self.active_children()) + + p.join() + self.assertNotIn(p, self.active_children()) + + @classmethod + def _test_recursion(cls, wconn, id): + wconn.send(id) + if len(id) < 2: + for i in range(2): + p = cls.Process( + target=cls._test_recursion, args=(wconn, id+[i]) + ) + p.start() + p.join() + + @unittest.skipIf(True, "fails with is_dill(obj, child=True)") + def test_recursion(self): + rconn, wconn = self.Pipe(duplex=False) + self._test_recursion(wconn, []) + + time.sleep(DELTA) + result = [] + while rconn.poll(): + result.append(rconn.recv()) + + expected = [ + [], + [0], + [0, 0], + [0, 1], + [1], + [1, 0], + [1, 1] + ] + self.assertEqual(result, expected) + + @classmethod + def _test_sentinel(cls, event): + event.wait(10.0) + + def test_sentinel(self): + if self.TYPE == "threads": + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + event = self.Event() + p = self.Process(target=self._test_sentinel, args=(event,)) + with self.assertRaises(ValueError): + p.sentinel + p.start() + self.addCleanup(p.join) + sentinel = p.sentinel + self.assertIsInstance(sentinel, int) + self.assertFalse(wait_for_handle(sentinel, timeout=0.0)) + event.set() + p.join() + self.assertTrue(wait_for_handle(sentinel, timeout=1)) + + @classmethod + def _test_close(cls, rc=0, q=None): + if q is not None: + q.get() + sys.exit(rc) + + def test_close(self): + if self.TYPE == "threads": + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + q = self.Queue() + p = self.Process(target=self._test_close, kwargs={'q': q}) + p.daemon = True + p.start() + self.assertEqual(p.is_alive(), True) + # Child is still alive, cannot close + with self.assertRaises(ValueError): + p.close() + + q.put(None) + p.join() + self.assertEqual(p.is_alive(), False) + self.assertEqual(p.exitcode, 0) + p.close() + with self.assertRaises(ValueError): + p.is_alive() + with self.assertRaises(ValueError): + p.join() + with self.assertRaises(ValueError): + p.terminate() + p.close() + + wr = weakref.ref(p) + del p + gc.collect() + self.assertIs(wr(), None) + + close_queue(q) + + def test_many_processes(self): + if self.TYPE == 'threads': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + sm = multiprocessing.get_start_method() + travis = os.environ.get('COVERAGE') #XXX: travis-ci + N = (1 if travis else 5) if sm == 'spawn' else 100 + + # Try to overwhelm the forkserver loop with events + procs = [self.Process(target=self._test_sleep, args=(0.01,)) + for i in range(N)] + for p in procs: + p.start() + for p in procs: + join_process(p) + for p in procs: + self.assertEqual(p.exitcode, 0) + + procs = [self.Process(target=self._sleep_some) + for i in range(N)] + for p in procs: + p.start() + time.sleep(0.001) # let the children start... + for p in procs: + p.terminate() + for p in procs: + join_process(p) + if os.name != 'nt': + exitcodes = [-signal.SIGTERM] + if sys.platform == 'darwin': + # bpo-31510: On macOS, killing a freshly started process with + # SIGTERM sometimes kills the process with SIGKILL. + exitcodes.append(-signal.SIGKILL) + for p in procs: + self.assertIn(p.exitcode, exitcodes) + + def test_lose_target_ref(self): + c = DummyCallable() + wr = weakref.ref(c) + q = self.Queue() + p = self.Process(target=c, args=(q, c)) + del c + p.start() + p.join() + gc.collect() # For PyPy or other GCs. + self.assertIs(wr(), None) + self.assertEqual(q.get(), 5) + close_queue(q) + + @classmethod + def _test_child_fd_inflation(self, evt, q): + q.put(os_helper.fd_count()) + evt.wait() + + def test_child_fd_inflation(self): + # Number of fds in child processes should not grow with the + # number of running children. + if self.TYPE == 'threads': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + sm = multiprocessing.get_start_method() + if sm == 'fork': + # The fork method by design inherits all fds from the parent, + # trying to go against it is a lost battle + self.skipTest('test not appropriate for {}'.format(sm)) + + N = 5 + evt = self.Event() + q = self.Queue() + + procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q)) + for i in range(N)] + for p in procs: + p.start() + + try: + fd_counts = [q.get() for i in range(N)] + self.assertEqual(len(set(fd_counts)), 1, fd_counts) + + finally: + evt.set() + for p in procs: + p.join() + close_queue(q) + + @classmethod + def _test_wait_for_threads(self, evt): + def func1(): + time.sleep(0.5) + evt.set() + + def func2(): + time.sleep(20) + evt.clear() + + threading.Thread(target=func1).start() + threading.Thread(target=func2, daemon=True).start() + + def test_wait_for_threads(self): + # A child process should wait for non-daemonic threads to end + # before exiting + if self.TYPE == 'threads': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + evt = self.Event() + proc = self.Process(target=self._test_wait_for_threads, args=(evt,)) + proc.start() + proc.join() + self.assertTrue(evt.is_set()) + + @classmethod + def _test_error_on_stdio_flush(self, evt, break_std_streams={}): + for stream_name, action in break_std_streams.items(): + if action == 'close': + stream = io.StringIO() + stream.close() + else: + assert action == 'remove' + stream = None + setattr(sys, stream_name, None) + evt.set() + + def test_error_on_stdio_flush_1(self): + # Check that Process works with broken standard streams + streams = [io.StringIO(), None] + streams[0].close() + for stream_name in ('stdout', 'stderr'): + for stream in streams: + old_stream = getattr(sys, stream_name) + setattr(sys, stream_name, stream) + try: + evt = self.Event() + proc = self.Process(target=self._test_error_on_stdio_flush, + args=(evt,)) + proc.start() + proc.join() + self.assertTrue(evt.is_set()) + self.assertEqual(proc.exitcode, 0) + finally: + setattr(sys, stream_name, old_stream) + + def test_error_on_stdio_flush_2(self): + # Same as test_error_on_stdio_flush_1(), but standard streams are + # broken by the child process + for stream_name in ('stdout', 'stderr'): + for action in ('close', 'remove'): + old_stream = getattr(sys, stream_name) + try: + evt = self.Event() + proc = self.Process(target=self._test_error_on_stdio_flush, + args=(evt, {stream_name: action})) + proc.start() + proc.join() + self.assertTrue(evt.is_set()) + self.assertEqual(proc.exitcode, 0) + finally: + setattr(sys, stream_name, old_stream) + + @classmethod + def _sleep_and_set_event(self, evt, delay=0.0): + time.sleep(delay) + evt.set() + + def check_forkserver_death(self, signum): + # bpo-31308: if the forkserver process has died, we should still + # be able to create and run new Process instances (the forkserver + # is implicitly restarted). + if self.TYPE == 'threads': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + sm = multiprocessing.get_start_method() + if sm != 'forkserver': + # The fork method by design inherits all fds from the parent, + # trying to go against it is a lost battle + self.skipTest('test not appropriate for {}'.format(sm)) + + from multiprocess.forkserver import _forkserver + _forkserver.ensure_running() + + # First process sleeps 500 ms + delay = 0.5 + + evt = self.Event() + proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay)) + proc.start() + + pid = _forkserver._forkserver_pid + os.kill(pid, signum) + # give time to the fork server to die and time to proc to complete + time.sleep(delay * 2.0) + + evt2 = self.Event() + proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,)) + proc2.start() + proc2.join() + self.assertTrue(evt2.is_set()) + self.assertEqual(proc2.exitcode, 0) + + proc.join() + self.assertTrue(evt.is_set()) + self.assertIn(proc.exitcode, (0, 255)) + + def test_forkserver_sigint(self): + # Catchable signal + self.check_forkserver_death(signal.SIGINT) + + def test_forkserver_sigkill(self): + # Uncatchable signal + if os.name != 'nt': + self.check_forkserver_death(signal.SIGKILL) + + +# +# +# + +class _UpperCaser(multiprocessing.Process): + + def __init__(self): + multiprocessing.Process.__init__(self) + self.child_conn, self.parent_conn = multiprocessing.Pipe() + + def run(self): + self.parent_conn.close() + for s in iter(self.child_conn.recv, None): + self.child_conn.send(s.upper()) + self.child_conn.close() + + def submit(self, s): + assert type(s) is str + self.parent_conn.send(s) + return self.parent_conn.recv() + + def stop(self): + self.parent_conn.send(None) + self.parent_conn.close() + self.child_conn.close() + +class _TestSubclassingProcess(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + def test_subclassing(self): + uppercaser = _UpperCaser() + uppercaser.daemon = True + uppercaser.start() + self.assertEqual(uppercaser.submit('hello'), 'HELLO') + self.assertEqual(uppercaser.submit('world'), 'WORLD') + uppercaser.stop() + uppercaser.join() + + def test_stderr_flush(self): + # sys.stderr is flushed at process shutdown (issue #13812) + if self.TYPE == "threads": + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + testfn = os_helper.TESTFN + self.addCleanup(os_helper.unlink, testfn) + proc = self.Process(target=self._test_stderr_flush, args=(testfn,)) + proc.start() + proc.join() + with open(testfn, encoding="utf-8") as f: + err = f.read() + # The whole traceback was printed + self.assertIn("ZeroDivisionError", err) + self.assertIn("__init__.py", err) + #self.assertIn("1/0 # MARKER", err) #FIXME + + @classmethod + def _test_stderr_flush(cls, testfn): + fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL) + sys.stderr = open(fd, 'w', encoding="utf-8", closefd=False) + 1/0 # MARKER + + + @classmethod + def _test_sys_exit(cls, reason, testfn): + fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL) + sys.stderr = open(fd, 'w', encoding="utf-8", closefd=False) + sys.exit(reason) + + def test_sys_exit(self): + # See Issue 13854 + if self.TYPE == 'threads': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + testfn = os_helper.TESTFN + self.addCleanup(os_helper.unlink, testfn) + + for reason in ( + [1, 2, 3], + 'ignore this', + ): + p = self.Process(target=self._test_sys_exit, args=(reason, testfn)) + p.daemon = True + p.start() + join_process(p) + self.assertEqual(p.exitcode, 1) + + with open(testfn, encoding="utf-8") as f: + content = f.read() + self.assertEqual(content.rstrip(), str(reason)) + + os.unlink(testfn) + + cases = [ + ((True,), 1), + ((False,), 0), + ((8,), 8), + ((None,), 0), + ((), 0), + ] + + for args, expected in cases: + with self.subTest(args=args): + p = self.Process(target=sys.exit, args=args) + p.daemon = True + p.start() + join_process(p) + self.assertEqual(p.exitcode, expected) + +# +# +# + +def queue_empty(q): + if hasattr(q, 'empty'): + return q.empty() + else: + return q.qsize() == 0 + +def queue_full(q, maxsize): + if hasattr(q, 'full'): + return q.full() + else: + return q.qsize() == maxsize + + +class _TestQueue(BaseTestCase): + + + @classmethod + def _test_put(cls, queue, child_can_start, parent_can_continue): + child_can_start.wait() + for i in range(6): + queue.get() + parent_can_continue.set() + + def test_put(self): + MAXSIZE = 6 + queue = self.Queue(maxsize=MAXSIZE) + child_can_start = self.Event() + parent_can_continue = self.Event() + + proc = self.Process( + target=self._test_put, + args=(queue, child_can_start, parent_can_continue) + ) + proc.daemon = True + proc.start() + + self.assertEqual(queue_empty(queue), True) + self.assertEqual(queue_full(queue, MAXSIZE), False) + + queue.put(1) + queue.put(2, True) + queue.put(3, True, None) + queue.put(4, False) + queue.put(5, False, None) + queue.put_nowait(6) + + # the values may be in buffer but not yet in pipe so sleep a bit + time.sleep(DELTA) + + self.assertEqual(queue_empty(queue), False) + self.assertEqual(queue_full(queue, MAXSIZE), True) + + put = TimingWrapper(queue.put) + put_nowait = TimingWrapper(queue.put_nowait) + + self.assertRaises(pyqueue.Full, put, 7, False) + self.assertTimingAlmostEqual(put.elapsed, 0) + + self.assertRaises(pyqueue.Full, put, 7, False, None) + self.assertTimingAlmostEqual(put.elapsed, 0) + + self.assertRaises(pyqueue.Full, put_nowait, 7) + self.assertTimingAlmostEqual(put_nowait.elapsed, 0) + + self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1) + self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1) + + self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2) + self.assertTimingAlmostEqual(put.elapsed, 0) + + self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3) + self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3) + + child_can_start.set() + parent_can_continue.wait() + + self.assertEqual(queue_empty(queue), True) + self.assertEqual(queue_full(queue, MAXSIZE), False) + + proc.join() + close_queue(queue) + + @classmethod + def _test_get(cls, queue, child_can_start, parent_can_continue): + child_can_start.wait() + #queue.put(1) + queue.put(2) + queue.put(3) + queue.put(4) + queue.put(5) + parent_can_continue.set() + + def test_get(self): + queue = self.Queue() + child_can_start = self.Event() + parent_can_continue = self.Event() + + proc = self.Process( + target=self._test_get, + args=(queue, child_can_start, parent_can_continue) + ) + proc.daemon = True + proc.start() + + self.assertEqual(queue_empty(queue), True) + + child_can_start.set() + parent_can_continue.wait() + + time.sleep(DELTA) + self.assertEqual(queue_empty(queue), False) + + # Hangs unexpectedly, remove for now + #self.assertEqual(queue.get(), 1) + self.assertEqual(queue.get(True, None), 2) + self.assertEqual(queue.get(True), 3) + self.assertEqual(queue.get(timeout=1), 4) + self.assertEqual(queue.get_nowait(), 5) + + self.assertEqual(queue_empty(queue), True) + + get = TimingWrapper(queue.get) + get_nowait = TimingWrapper(queue.get_nowait) + + self.assertRaises(pyqueue.Empty, get, False) + self.assertTimingAlmostEqual(get.elapsed, 0) + + self.assertRaises(pyqueue.Empty, get, False, None) + self.assertTimingAlmostEqual(get.elapsed, 0) + + self.assertRaises(pyqueue.Empty, get_nowait) + self.assertTimingAlmostEqual(get_nowait.elapsed, 0) + + self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1) + self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) + + self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2) + self.assertTimingAlmostEqual(get.elapsed, 0) + + self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3) + self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3) + + proc.join() + close_queue(queue) + + @classmethod + def _test_fork(cls, queue): + for i in range(10, 20): + queue.put(i) + # note that at this point the items may only be buffered, so the + # process cannot shutdown until the feeder thread has finished + # pushing items onto the pipe. + + def test_fork(self): + # Old versions of Queue would fail to create a new feeder + # thread for a forked process if the original process had its + # own feeder thread. This test checks that this no longer + # happens. + + queue = self.Queue() + + # put items on queue so that main process starts a feeder thread + for i in range(10): + queue.put(i) + + # wait to make sure thread starts before we fork a new process + time.sleep(DELTA) + + # fork process + p = self.Process(target=self._test_fork, args=(queue,)) + p.daemon = True + p.start() + + # check that all expected items are in the queue + for i in range(20): + self.assertEqual(queue.get(), i) + self.assertRaises(pyqueue.Empty, queue.get, False) + + p.join() + close_queue(queue) + + def test_qsize(self): + q = self.Queue() + try: + self.assertEqual(q.qsize(), 0) + except NotImplementedError: + self.skipTest('qsize method not implemented') + q.put(1) + self.assertEqual(q.qsize(), 1) + q.put(5) + self.assertEqual(q.qsize(), 2) + q.get() + self.assertEqual(q.qsize(), 1) + q.get() + self.assertEqual(q.qsize(), 0) + close_queue(q) + + @classmethod + def _test_task_done(cls, q): + for obj in iter(q.get, None): + time.sleep(DELTA) + q.task_done() + + def test_task_done(self): + queue = self.JoinableQueue() + + workers = [self.Process(target=self._test_task_done, args=(queue,)) + for i in range(4)] + + for p in workers: + p.daemon = True + p.start() + + for i in range(10): + queue.put(i) + + queue.join() + + for p in workers: + queue.put(None) + + for p in workers: + p.join() + close_queue(queue) + + def test_no_import_lock_contention(self): + with os_helper.temp_cwd(): + module_name = 'imported_by_an_imported_module' + with open(module_name + '.py', 'w', encoding="utf-8") as f: + f.write("""if 1: + import multiprocess as multiprocessing + + q = multiprocessing.Queue() + q.put('knock knock') + q.get(timeout=3) + q.close() + del q + """) + + with import_helper.DirsOnSysPath(os.getcwd()): + try: + __import__(module_name) + except pyqueue.Empty: + self.fail("Probable regression on import lock contention;" + " see Issue #22853") + + def test_timeout(self): + q = multiprocessing.Queue() + start = time.monotonic() + self.assertRaises(pyqueue.Empty, q.get, True, 0.200) + delta = time.monotonic() - start + # bpo-30317: Tolerate a delta of 100 ms because of the bad clock + # resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once + # failed because the delta was only 135.8 ms. + self.assertGreaterEqual(delta, 0.100) + close_queue(q) + + def test_queue_feeder_donot_stop_onexc(self): + # bpo-30414: verify feeder handles exceptions correctly + if self.TYPE != 'processes': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + class NotSerializable(object): + def __reduce__(self): + raise AttributeError + with test.support.captured_stderr(): + q = self.Queue() + q.put(NotSerializable()) + q.put(True) + self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT)) + close_queue(q) + + with test.support.captured_stderr(): + # bpo-33078: verify that the queue size is correctly handled + # on errors. + q = self.Queue(maxsize=1) + q.put(NotSerializable()) + q.put(True) + try: + self.assertEqual(q.qsize(), 1) + except NotImplementedError: + # qsize is not available on all platform as it + # relies on sem_getvalue + pass + self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT)) + # Check that the size of the queue is correct + self.assertTrue(q.empty()) + close_queue(q) + + def test_queue_feeder_on_queue_feeder_error(self): + # bpo-30006: verify feeder handles exceptions using the + # _on_queue_feeder_error hook. + if self.TYPE != 'processes': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + class NotSerializable(object): + """Mock unserializable object""" + def __init__(self): + self.reduce_was_called = False + self.on_queue_feeder_error_was_called = False + + def __reduce__(self): + self.reduce_was_called = True + raise AttributeError + + class SafeQueue(multiprocessing.queues.Queue): + """Queue with overloaded _on_queue_feeder_error hook""" + @staticmethod + def _on_queue_feeder_error(e, obj): + if (isinstance(e, AttributeError) and + isinstance(obj, NotSerializable)): + obj.on_queue_feeder_error_was_called = True + + not_serializable_obj = NotSerializable() + # The captured_stderr reduces the noise in the test report + with test.support.captured_stderr(): + q = SafeQueue(ctx=multiprocessing.get_context()) + q.put(not_serializable_obj) + + # Verify that q is still functioning correctly + q.put(True) + self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT)) + + # Assert that the serialization and the hook have been called correctly + self.assertTrue(not_serializable_obj.reduce_was_called) + self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called) + + def test_closed_queue_put_get_exceptions(self): + for q in multiprocessing.Queue(), multiprocessing.JoinableQueue(): + q.close() + with self.assertRaisesRegex(ValueError, 'is closed'): + q.put('foo') + with self.assertRaisesRegex(ValueError, 'is closed'): + q.get() +# +# +# + +class _TestLock(BaseTestCase): + + def test_lock(self): + lock = self.Lock() + self.assertEqual(lock.acquire(), True) + self.assertEqual(lock.acquire(False), False) + self.assertEqual(lock.release(), None) + self.assertRaises((ValueError, threading.ThreadError), lock.release) + + def test_rlock(self): + lock = self.RLock() + self.assertEqual(lock.acquire(), True) + self.assertEqual(lock.acquire(), True) + self.assertEqual(lock.acquire(), True) + self.assertEqual(lock.release(), None) + self.assertEqual(lock.release(), None) + self.assertEqual(lock.release(), None) + self.assertRaises((AssertionError, RuntimeError), lock.release) + + def test_lock_context(self): + with self.Lock(): + pass + + +class _TestSemaphore(BaseTestCase): + + def _test_semaphore(self, sem): + self.assertReturnsIfImplemented(2, get_value, sem) + self.assertEqual(sem.acquire(), True) + self.assertReturnsIfImplemented(1, get_value, sem) + self.assertEqual(sem.acquire(), True) + self.assertReturnsIfImplemented(0, get_value, sem) + self.assertEqual(sem.acquire(False), False) + self.assertReturnsIfImplemented(0, get_value, sem) + self.assertEqual(sem.release(), None) + self.assertReturnsIfImplemented(1, get_value, sem) + self.assertEqual(sem.release(), None) + self.assertReturnsIfImplemented(2, get_value, sem) + + def test_semaphore(self): + sem = self.Semaphore(2) + self._test_semaphore(sem) + self.assertEqual(sem.release(), None) + self.assertReturnsIfImplemented(3, get_value, sem) + self.assertEqual(sem.release(), None) + self.assertReturnsIfImplemented(4, get_value, sem) + + def test_bounded_semaphore(self): + sem = self.BoundedSemaphore(2) + self._test_semaphore(sem) + # Currently fails on OS/X + #if HAVE_GETVALUE: + # self.assertRaises(ValueError, sem.release) + # self.assertReturnsIfImplemented(2, get_value, sem) + + def test_timeout(self): + if self.TYPE != 'processes': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + sem = self.Semaphore(0) + acquire = TimingWrapper(sem.acquire) + + self.assertEqual(acquire(False), False) + self.assertTimingAlmostEqual(acquire.elapsed, 0.0) + + self.assertEqual(acquire(False, None), False) + self.assertTimingAlmostEqual(acquire.elapsed, 0.0) + + self.assertEqual(acquire(False, TIMEOUT1), False) + self.assertTimingAlmostEqual(acquire.elapsed, 0) + + self.assertEqual(acquire(True, TIMEOUT2), False) + self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2) + + self.assertEqual(acquire(timeout=TIMEOUT3), False) + self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3) + + +class _TestCondition(BaseTestCase): + + @classmethod + def f(cls, cond, sleeping, woken, timeout=None): + cond.acquire() + sleeping.release() + cond.wait(timeout) + woken.release() + cond.release() + + def assertReachesEventually(self, func, value): + for i in range(10): + try: + if func() == value: + break + except NotImplementedError: + break + time.sleep(DELTA) + time.sleep(DELTA) + self.assertReturnsIfImplemented(value, func) + + def check_invariant(self, cond): + # this is only supposed to succeed when there are no sleepers + if self.TYPE == 'processes': + try: + sleepers = (cond._sleeping_count.get_value() - + cond._woken_count.get_value()) + self.assertEqual(sleepers, 0) + self.assertEqual(cond._wait_semaphore.get_value(), 0) + except NotImplementedError: + pass + + def test_notify(self): + cond = self.Condition() + sleeping = self.Semaphore(0) + woken = self.Semaphore(0) + + p = self.Process(target=self.f, args=(cond, sleeping, woken)) + p.daemon = True + p.start() + self.addCleanup(p.join) + + p = threading.Thread(target=self.f, args=(cond, sleeping, woken)) + p.daemon = True + p.start() + self.addCleanup(p.join) + + # wait for both children to start sleeping + sleeping.acquire() + sleeping.acquire() + + # check no process/thread has woken up + time.sleep(DELTA) + self.assertReturnsIfImplemented(0, get_value, woken) + + # wake up one process/thread + cond.acquire() + cond.notify() + cond.release() + + # check one process/thread has woken up + time.sleep(DELTA) + self.assertReturnsIfImplemented(1, get_value, woken) + + # wake up another + cond.acquire() + cond.notify() + cond.release() + + # check other has woken up + time.sleep(DELTA) + self.assertReturnsIfImplemented(2, get_value, woken) + + # check state is not mucked up + self.check_invariant(cond) + p.join() + + def test_notify_all(self): + cond = self.Condition() + sleeping = self.Semaphore(0) + woken = self.Semaphore(0) + + # start some threads/processes which will timeout + for i in range(3): + p = self.Process(target=self.f, + args=(cond, sleeping, woken, TIMEOUT1)) + p.daemon = True + p.start() + self.addCleanup(p.join) + + t = threading.Thread(target=self.f, + args=(cond, sleeping, woken, TIMEOUT1)) + t.daemon = True + t.start() + self.addCleanup(t.join) + + # wait for them all to sleep + for i in range(6): + sleeping.acquire() + + # check they have all timed out + for i in range(6): + woken.acquire() + self.assertReturnsIfImplemented(0, get_value, woken) + + # check state is not mucked up + self.check_invariant(cond) + + # start some more threads/processes + for i in range(3): + p = self.Process(target=self.f, args=(cond, sleeping, woken)) + p.daemon = True + p.start() + self.addCleanup(p.join) + + t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) + t.daemon = True + t.start() + self.addCleanup(t.join) + + # wait for them to all sleep + for i in range(6): + sleeping.acquire() + + # check no process/thread has woken up + time.sleep(DELTA) + self.assertReturnsIfImplemented(0, get_value, woken) + + # wake them all up + cond.acquire() + cond.notify_all() + cond.release() + + # check they have all woken + self.assertReachesEventually(lambda: get_value(woken), 6) + + # check state is not mucked up + self.check_invariant(cond) + + def test_notify_n(self): + cond = self.Condition() + sleeping = self.Semaphore(0) + woken = self.Semaphore(0) + + # start some threads/processes + for i in range(3): + p = self.Process(target=self.f, args=(cond, sleeping, woken)) + p.daemon = True + p.start() + self.addCleanup(p.join) + + t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) + t.daemon = True + t.start() + self.addCleanup(t.join) + + # wait for them to all sleep + for i in range(6): + sleeping.acquire() + + # check no process/thread has woken up + time.sleep(DELTA) + self.assertReturnsIfImplemented(0, get_value, woken) + + # wake some of them up + cond.acquire() + cond.notify(n=2) + cond.release() + + # check 2 have woken + self.assertReachesEventually(lambda: get_value(woken), 2) + + # wake the rest of them + cond.acquire() + cond.notify(n=4) + cond.release() + + self.assertReachesEventually(lambda: get_value(woken), 6) + + # doesn't do anything more + cond.acquire() + cond.notify(n=3) + cond.release() + + self.assertReturnsIfImplemented(6, get_value, woken) + + # check state is not mucked up + self.check_invariant(cond) + + def test_timeout(self): + cond = self.Condition() + wait = TimingWrapper(cond.wait) + cond.acquire() + res = wait(TIMEOUT1) + cond.release() + self.assertEqual(res, False) + self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) + + @classmethod + def _test_waitfor_f(cls, cond, state): + with cond: + state.value = 0 + cond.notify() + result = cond.wait_for(lambda : state.value==4) + if not result or state.value != 4: + sys.exit(1) + + @unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes') + def test_waitfor(self): + # based on test in test/lock_tests.py + cond = self.Condition() + state = self.Value('i', -1) + + p = self.Process(target=self._test_waitfor_f, args=(cond, state)) + p.daemon = True + p.start() + + with cond: + result = cond.wait_for(lambda : state.value==0) + self.assertTrue(result) + self.assertEqual(state.value, 0) + + for i in range(4): + time.sleep(0.01) + with cond: + state.value += 1 + cond.notify() + + join_process(p) + self.assertEqual(p.exitcode, 0) + + @classmethod + def _test_waitfor_timeout_f(cls, cond, state, success, sem): + sem.release() + with cond: + expected = 0.1 + dt = time.monotonic() + result = cond.wait_for(lambda : state.value==4, timeout=expected) + dt = time.monotonic() - dt + # borrow logic in assertTimeout() from test/lock_tests.py + if not result and expected * 0.6 < dt < expected * 10.0: + success.value = True + + @unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes') + def test_waitfor_timeout(self): + # based on test in test/lock_tests.py + cond = self.Condition() + state = self.Value('i', 0) + success = self.Value('i', False) + sem = self.Semaphore(0) + + p = self.Process(target=self._test_waitfor_timeout_f, + args=(cond, state, success, sem)) + p.daemon = True + p.start() + self.assertTrue(sem.acquire(timeout=support.LONG_TIMEOUT)) + + # Only increment 3 times, so state == 4 is never reached. + for i in range(3): + time.sleep(0.01) + with cond: + state.value += 1 + cond.notify() + + join_process(p) + self.assertTrue(success.value) + + @classmethod + def _test_wait_result(cls, c, pid): + with c: + c.notify() + time.sleep(1) + if pid is not None: + os.kill(pid, signal.SIGINT) + + def test_wait_result(self): + if isinstance(self, ProcessesMixin) and sys.platform != 'win32': + pid = os.getpid() + else: + pid = None + + c = self.Condition() + with c: + self.assertFalse(c.wait(0)) + self.assertFalse(c.wait(0.1)) + + p = self.Process(target=self._test_wait_result, args=(c, pid)) + p.start() + + self.assertTrue(c.wait(60)) + if pid is not None: + self.assertRaises(KeyboardInterrupt, c.wait, 60) + + p.join() + + +class _TestEvent(BaseTestCase): + + @classmethod + def _test_event(cls, event): + time.sleep(TIMEOUT2) + event.set() + + def test_event(self): + event = self.Event() + wait = TimingWrapper(event.wait) + + # Removed temporarily, due to API shear, this does not + # work with threading._Event objects. is_set == isSet + self.assertEqual(event.is_set(), False) + + # Removed, threading.Event.wait() will return the value of the __flag + # instead of None. API Shear with the semaphore backed mp.Event + self.assertEqual(wait(0.0), False) + self.assertTimingAlmostEqual(wait.elapsed, 0.0) + self.assertEqual(wait(TIMEOUT1), False) + self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) + + event.set() + + # See note above on the API differences + self.assertEqual(event.is_set(), True) + self.assertEqual(wait(), True) + self.assertTimingAlmostEqual(wait.elapsed, 0.0) + self.assertEqual(wait(TIMEOUT1), True) + self.assertTimingAlmostEqual(wait.elapsed, 0.0) + # self.assertEqual(event.is_set(), True) + + event.clear() + + #self.assertEqual(event.is_set(), False) + + p = self.Process(target=self._test_event, args=(event,)) + p.daemon = True + p.start() + self.assertEqual(wait(), True) + p.join() + +# +# Tests for Barrier - adapted from tests in test/lock_tests.py +# + +# Many of the tests for threading.Barrier use a list as an atomic +# counter: a value is appended to increment the counter, and the +# length of the list gives the value. We use the class DummyList +# for the same purpose. + +class _DummyList(object): + + def __init__(self): + wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i')) + lock = multiprocessing.Lock() + self.__setstate__((wrapper, lock)) + self._lengthbuf[0] = 0 + + def __setstate__(self, state): + (self._wrapper, self._lock) = state + self._lengthbuf = self._wrapper.create_memoryview().cast('i') + + def __getstate__(self): + return (self._wrapper, self._lock) + + def append(self, _): + with self._lock: + self._lengthbuf[0] += 1 + + def __len__(self): + with self._lock: + return self._lengthbuf[0] + +def _wait(): + # A crude wait/yield function not relying on synchronization primitives. + time.sleep(0.01) + + +class Bunch(object): + """ + A bunch of threads. + """ + def __init__(self, namespace, f, args, n, wait_before_exit=False): + """ + Construct a bunch of `n` threads running the same function `f`. + If `wait_before_exit` is True, the threads won't terminate until + do_finish() is called. + """ + self.f = f + self.args = args + self.n = n + self.started = namespace.DummyList() + self.finished = namespace.DummyList() + self._can_exit = namespace.Event() + if not wait_before_exit: + self._can_exit.set() + + threads = [] + for i in range(n): + p = namespace.Process(target=self.task) + p.daemon = True + p.start() + threads.append(p) + + def finalize(threads): + for p in threads: + p.join() + + self._finalizer = weakref.finalize(self, finalize, threads) + + def task(self): + pid = os.getpid() + self.started.append(pid) + try: + self.f(*self.args) + finally: + self.finished.append(pid) + self._can_exit.wait(30) + assert self._can_exit.is_set() + + def wait_for_started(self): + while len(self.started) < self.n: + _wait() + + def wait_for_finished(self): + while len(self.finished) < self.n: + _wait() + + def do_finish(self): + self._can_exit.set() + + def close(self): + self._finalizer() + + +class AppendTrue(object): + def __init__(self, obj): + self.obj = obj + def __call__(self): + self.obj.append(True) + + +class _TestBarrier(BaseTestCase): + """ + Tests for Barrier objects. + """ + N = 5 + defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout + + def setUp(self): + self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout) + + def tearDown(self): + self.barrier.abort() + self.barrier = None + + def DummyList(self): + if self.TYPE == 'threads': + return [] + elif self.TYPE == 'manager': + return self.manager.list() + else: + return _DummyList() + + def run_threads(self, f, args): + b = Bunch(self, f, args, self.N-1) + try: + f(*args) + b.wait_for_finished() + finally: + b.close() + + @classmethod + def multipass(cls, barrier, results, n): + m = barrier.parties + assert m == cls.N + for i in range(n): + results[0].append(True) + assert len(results[1]) == i * m + barrier.wait() + results[1].append(True) + assert len(results[0]) == (i + 1) * m + barrier.wait() + try: + assert barrier.n_waiting == 0 + except NotImplementedError: + pass + assert not barrier.broken + + def test_barrier(self, passes=1): + """ + Test that a barrier is passed in lockstep + """ + results = [self.DummyList(), self.DummyList()] + self.run_threads(self.multipass, (self.barrier, results, passes)) + + def test_barrier_10(self): + """ + Test that a barrier works for 10 consecutive runs + """ + return self.test_barrier(10) + + @classmethod + def _test_wait_return_f(cls, barrier, queue): + res = barrier.wait() + queue.put(res) + + def test_wait_return(self): + """ + test the return value from barrier.wait + """ + queue = self.Queue() + self.run_threads(self._test_wait_return_f, (self.barrier, queue)) + results = [queue.get() for i in range(self.N)] + self.assertEqual(results.count(0), 1) + close_queue(queue) + + @classmethod + def _test_action_f(cls, barrier, results): + barrier.wait() + if len(results) != 1: + raise RuntimeError + + def test_action(self): + """ + Test the 'action' callback + """ + results = self.DummyList() + barrier = self.Barrier(self.N, action=AppendTrue(results)) + self.run_threads(self._test_action_f, (barrier, results)) + self.assertEqual(len(results), 1) + + @classmethod + def _test_abort_f(cls, barrier, results1, results2): + try: + i = barrier.wait() + if i == cls.N//2: + raise RuntimeError + barrier.wait() + results1.append(True) + except threading.BrokenBarrierError: + results2.append(True) + except RuntimeError: + barrier.abort() + + def test_abort(self): + """ + Test that an abort will put the barrier in a broken state + """ + results1 = self.DummyList() + results2 = self.DummyList() + self.run_threads(self._test_abort_f, + (self.barrier, results1, results2)) + self.assertEqual(len(results1), 0) + self.assertEqual(len(results2), self.N-1) + self.assertTrue(self.barrier.broken) + + @classmethod + def _test_reset_f(cls, barrier, results1, results2, results3): + i = barrier.wait() + if i == cls.N//2: + # Wait until the other threads are all in the barrier. + while barrier.n_waiting < cls.N-1: + time.sleep(0.001) + barrier.reset() + else: + try: + barrier.wait() + results1.append(True) + except threading.BrokenBarrierError: + results2.append(True) + # Now, pass the barrier again + barrier.wait() + results3.append(True) + + def test_reset(self): + """ + Test that a 'reset' on a barrier frees the waiting threads + """ + results1 = self.DummyList() + results2 = self.DummyList() + results3 = self.DummyList() + self.run_threads(self._test_reset_f, + (self.barrier, results1, results2, results3)) + self.assertEqual(len(results1), 0) + self.assertEqual(len(results2), self.N-1) + self.assertEqual(len(results3), self.N) + + @classmethod + def _test_abort_and_reset_f(cls, barrier, barrier2, + results1, results2, results3): + try: + i = barrier.wait() + if i == cls.N//2: + raise RuntimeError + barrier.wait() + results1.append(True) + except threading.BrokenBarrierError: + results2.append(True) + except RuntimeError: + barrier.abort() + # Synchronize and reset the barrier. Must synchronize first so + # that everyone has left it when we reset, and after so that no + # one enters it before the reset. + if barrier2.wait() == cls.N//2: + barrier.reset() + barrier2.wait() + barrier.wait() + results3.append(True) + + def test_abort_and_reset(self): + """ + Test that a barrier can be reset after being broken. + """ + results1 = self.DummyList() + results2 = self.DummyList() + results3 = self.DummyList() + barrier2 = self.Barrier(self.N) + + self.run_threads(self._test_abort_and_reset_f, + (self.barrier, barrier2, results1, results2, results3)) + self.assertEqual(len(results1), 0) + self.assertEqual(len(results2), self.N-1) + self.assertEqual(len(results3), self.N) + + @classmethod + def _test_timeout_f(cls, barrier, results): + i = barrier.wait() + if i == cls.N//2: + # One thread is late! + time.sleep(1.0) + try: + barrier.wait(0.5) + except threading.BrokenBarrierError: + results.append(True) + + def test_timeout(self): + """ + Test wait(timeout) + """ + results = self.DummyList() + self.run_threads(self._test_timeout_f, (self.barrier, results)) + self.assertEqual(len(results), self.barrier.parties) + + @classmethod + def _test_default_timeout_f(cls, barrier, results): + i = barrier.wait(cls.defaultTimeout) + if i == cls.N//2: + # One thread is later than the default timeout + time.sleep(1.0) + try: + barrier.wait() + except threading.BrokenBarrierError: + results.append(True) + + def test_default_timeout(self): + """ + Test the barrier's default timeout + """ + barrier = self.Barrier(self.N, timeout=0.5) + results = self.DummyList() + self.run_threads(self._test_default_timeout_f, (barrier, results)) + self.assertEqual(len(results), barrier.parties) + + def test_single_thread(self): + b = self.Barrier(1) + b.wait() + b.wait() + + @classmethod + def _test_thousand_f(cls, barrier, passes, conn, lock): + for i in range(passes): + barrier.wait() + with lock: + conn.send(i) + + def test_thousand(self): + if self.TYPE == 'manager': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + passes = 1000 + lock = self.Lock() + conn, child_conn = self.Pipe(False) + for j in range(self.N): + p = self.Process(target=self._test_thousand_f, + args=(self.barrier, passes, child_conn, lock)) + p.start() + self.addCleanup(p.join) + + for i in range(passes): + for j in range(self.N): + self.assertEqual(conn.recv(), i) + +# +# +# + +class _TestValue(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + codes_values = [ + ('i', 4343, 24234), + ('d', 3.625, -4.25), + ('h', -232, 234), + ('q', 2 ** 33, 2 ** 34), + ('c', latin('x'), latin('y')) + ] + + def setUp(self): + if not HAS_SHAREDCTYPES: + self.skipTest("requires multiprocess.sharedctypes") + + @classmethod + def _test(cls, values): + for sv, cv in zip(values, cls.codes_values): + sv.value = cv[2] + + + def test_value(self, raw=False): + if raw: + values = [self.RawValue(code, value) + for code, value, _ in self.codes_values] + else: + values = [self.Value(code, value) + for code, value, _ in self.codes_values] + + for sv, cv in zip(values, self.codes_values): + self.assertEqual(sv.value, cv[1]) + + proc = self.Process(target=self._test, args=(values,)) + proc.daemon = True + proc.start() + proc.join() + + for sv, cv in zip(values, self.codes_values): + self.assertEqual(sv.value, cv[2]) + + def test_rawvalue(self): + self.test_value(raw=True) + + def test_getobj_getlock(self): + val1 = self.Value('i', 5) + lock1 = val1.get_lock() + obj1 = val1.get_obj() + + val2 = self.Value('i', 5, lock=None) + lock2 = val2.get_lock() + obj2 = val2.get_obj() + + lock = self.Lock() + val3 = self.Value('i', 5, lock=lock) + lock3 = val3.get_lock() + obj3 = val3.get_obj() + self.assertEqual(lock, lock3) + + arr4 = self.Value('i', 5, lock=False) + self.assertFalse(hasattr(arr4, 'get_lock')) + self.assertFalse(hasattr(arr4, 'get_obj')) + + self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue') + + arr5 = self.RawValue('i', 5) + self.assertFalse(hasattr(arr5, 'get_lock')) + self.assertFalse(hasattr(arr5, 'get_obj')) + + +class _TestArray(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + @classmethod + def f(cls, seq): + for i in range(1, len(seq)): + seq[i] += seq[i-1] + + @unittest.skipIf(c_int is None, "requires _ctypes") + def test_array(self, raw=False): + seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831] + if raw: + arr = self.RawArray('i', seq) + else: + arr = self.Array('i', seq) + + self.assertEqual(len(arr), len(seq)) + self.assertEqual(arr[3], seq[3]) + self.assertEqual(list(arr[2:7]), list(seq[2:7])) + + arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4]) + + self.assertEqual(list(arr[:]), seq) + + self.f(seq) + + p = self.Process(target=self.f, args=(arr,)) + p.daemon = True + p.start() + p.join() + + self.assertEqual(list(arr[:]), seq) + + @unittest.skipIf(c_int is None, "requires _ctypes") + def test_array_from_size(self): + size = 10 + # Test for zeroing (see issue #11675). + # The repetition below strengthens the test by increasing the chances + # of previously allocated non-zero memory being used for the new array + # on the 2nd and 3rd loops. + for _ in range(3): + arr = self.Array('i', size) + self.assertEqual(len(arr), size) + self.assertEqual(list(arr), [0] * size) + arr[:] = range(10) + self.assertEqual(list(arr), list(range(10))) + del arr + + @unittest.skipIf(c_int is None, "requires _ctypes") + def test_rawarray(self): + self.test_array(raw=True) + + @unittest.skipIf(c_int is None, "requires _ctypes") + def test_getobj_getlock_obj(self): + arr1 = self.Array('i', list(range(10))) + lock1 = arr1.get_lock() + obj1 = arr1.get_obj() + + arr2 = self.Array('i', list(range(10)), lock=None) + lock2 = arr2.get_lock() + obj2 = arr2.get_obj() + + lock = self.Lock() + arr3 = self.Array('i', list(range(10)), lock=lock) + lock3 = arr3.get_lock() + obj3 = arr3.get_obj() + self.assertEqual(lock, lock3) + + arr4 = self.Array('i', range(10), lock=False) + self.assertFalse(hasattr(arr4, 'get_lock')) + self.assertFalse(hasattr(arr4, 'get_obj')) + self.assertRaises(AttributeError, + self.Array, 'i', range(10), lock='notalock') + + arr5 = self.RawArray('i', range(10)) + self.assertFalse(hasattr(arr5, 'get_lock')) + self.assertFalse(hasattr(arr5, 'get_obj')) + +# +# +# + +class _TestContainers(BaseTestCase): + + ALLOWED_TYPES = ('manager',) + + def test_list(self): + a = self.list(list(range(10))) + self.assertEqual(a[:], list(range(10))) + + b = self.list() + self.assertEqual(b[:], []) + + b.extend(list(range(5))) + self.assertEqual(b[:], list(range(5))) + + self.assertEqual(b[2], 2) + self.assertEqual(b[2:10], [2,3,4]) + + b *= 2 + self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]) + + self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6]) + + self.assertEqual(a[:], list(range(10))) + + d = [a, b] + e = self.list(d) + self.assertEqual( + [element[:] for element in e], + [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]] + ) + + f = self.list([a]) + a.append('hello') + self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']) + + def test_list_iter(self): + a = self.list(list(range(10))) + it = iter(a) + self.assertEqual(list(it), list(range(10))) + self.assertEqual(list(it), []) # exhausted + # list modified during iteration + it = iter(a) + a[0] = 100 + self.assertEqual(next(it), 100) + + def test_list_proxy_in_list(self): + a = self.list([self.list(range(3)) for _i in range(3)]) + self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3) + + a[0][-1] = 55 + self.assertEqual(a[0][:], [0, 1, 55]) + for i in range(1, 3): + self.assertEqual(a[i][:], [0, 1, 2]) + + self.assertEqual(a[1].pop(), 2) + self.assertEqual(len(a[1]), 2) + for i in range(0, 3, 2): + self.assertEqual(len(a[i]), 3) + + del a + + b = self.list() + b.append(b) + del b + + def test_dict(self): + d = self.dict() + indices = list(range(65, 70)) + for i in indices: + d[i] = chr(i) + self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices)) + self.assertEqual(sorted(d.keys()), indices) + self.assertEqual(sorted(d.values()), [chr(i) for i in indices]) + self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices]) + + def test_dict_iter(self): + d = self.dict() + indices = list(range(65, 70)) + for i in indices: + d[i] = chr(i) + it = iter(d) + self.assertEqual(list(it), indices) + self.assertEqual(list(it), []) # exhausted + # dictionary changed size during iteration + it = iter(d) + d.clear() + self.assertRaises(RuntimeError, next, it) + + def test_dict_proxy_nested(self): + pets = self.dict(ferrets=2, hamsters=4) + supplies = self.dict(water=10, feed=3) + d = self.dict(pets=pets, supplies=supplies) + + self.assertEqual(supplies['water'], 10) + self.assertEqual(d['supplies']['water'], 10) + + d['supplies']['blankets'] = 5 + self.assertEqual(supplies['blankets'], 5) + self.assertEqual(d['supplies']['blankets'], 5) + + d['supplies']['water'] = 7 + self.assertEqual(supplies['water'], 7) + self.assertEqual(d['supplies']['water'], 7) + + del pets + del supplies + self.assertEqual(d['pets']['ferrets'], 2) + d['supplies']['blankets'] = 11 + self.assertEqual(d['supplies']['blankets'], 11) + + pets = d['pets'] + supplies = d['supplies'] + supplies['water'] = 7 + self.assertEqual(supplies['water'], 7) + self.assertEqual(d['supplies']['water'], 7) + + d.clear() + self.assertEqual(len(d), 0) + self.assertEqual(supplies['water'], 7) + self.assertEqual(pets['hamsters'], 4) + + l = self.list([pets, supplies]) + l[0]['marmots'] = 1 + self.assertEqual(pets['marmots'], 1) + self.assertEqual(l[0]['marmots'], 1) + + del pets + del supplies + self.assertEqual(l[0]['marmots'], 1) + + outer = self.list([[88, 99], l]) + self.assertIsInstance(outer[0], list) # Not a ListProxy + self.assertEqual(outer[-1][-1]['feed'], 3) + + def test_nested_queue(self): + a = self.list() # Test queue inside list + a.append(self.Queue()) + a[0].put(123) + self.assertEqual(a[0].get(), 123) + b = self.dict() # Test queue inside dict + b[0] = self.Queue() + b[0].put(456) + self.assertEqual(b[0].get(), 456) + + def test_namespace(self): + n = self.Namespace() + n.name = 'Bob' + n.job = 'Builder' + n._hidden = 'hidden' + self.assertEqual((n.name, n.job), ('Bob', 'Builder')) + del n.job + self.assertEqual(str(n), "Namespace(name='Bob')") + self.assertTrue(hasattr(n, 'name')) + self.assertTrue(not hasattr(n, 'job')) + +# +# +# + +def sqr(x, wait=0.0): + time.sleep(wait) + return x*x + +def mul(x, y): + return x*y + +def raise_large_valuerror(wait): + time.sleep(wait) + raise ValueError("x" * 1024**2) + +def identity(x): + return x + +class CountedObject(object): + n_instances = 0 + + def __new__(cls): + cls.n_instances += 1 + return object.__new__(cls) + + def __del__(self): + type(self).n_instances -= 1 + +class SayWhenError(ValueError): pass + +def exception_throwing_generator(total, when): + if when == -1: + raise SayWhenError("Somebody said when") + for i in range(total): + if i == when: + raise SayWhenError("Somebody said when") + yield i + + +class _TestPool(BaseTestCase): + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.pool = cls.Pool(4) + + @classmethod + def tearDownClass(cls): + cls.pool.terminate() + cls.pool.join() + cls.pool = None + super().tearDownClass() + + def test_apply(self): + papply = self.pool.apply + self.assertEqual(papply(sqr, (5,)), sqr(5)) + self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3)) + + def test_map(self): + pmap = self.pool.map + self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10))))) + self.assertEqual(pmap(sqr, list(range(100)), chunksize=20), + list(map(sqr, list(range(100))))) + + def test_starmap(self): + psmap = self.pool.starmap + tuples = list(zip(range(10), range(9,-1, -1))) + self.assertEqual(psmap(mul, tuples), + list(itertools.starmap(mul, tuples))) + tuples = list(zip(range(100), range(99,-1, -1))) + self.assertEqual(psmap(mul, tuples, chunksize=20), + list(itertools.starmap(mul, tuples))) + + def test_starmap_async(self): + tuples = list(zip(range(100), range(99,-1, -1))) + self.assertEqual(self.pool.starmap_async(mul, tuples).get(), + list(itertools.starmap(mul, tuples))) + + def test_map_async(self): + self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(), + list(map(sqr, list(range(10))))) + + def test_map_async_callbacks(self): + call_args = self.manager.list() if self.TYPE == 'manager' else [] + self.pool.map_async(int, ['1'], + callback=call_args.append, + error_callback=call_args.append).wait() + self.assertEqual(1, len(call_args)) + self.assertEqual([1], call_args[0]) + self.pool.map_async(int, ['a'], + callback=call_args.append, + error_callback=call_args.append).wait() + self.assertEqual(2, len(call_args)) + self.assertIsInstance(call_args[1], ValueError) + + def test_map_unplicklable(self): + # Issue #19425 -- failure to pickle should not cause a hang + if self.TYPE == 'threads': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + class A(object): + def __reduce__(self): + raise RuntimeError('cannot pickle') + with self.assertRaises(RuntimeError): + self.pool.map(sqr, [A()]*10) + + def test_map_chunksize(self): + try: + self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1) + except multiprocessing.TimeoutError: + self.fail("pool.map_async with chunksize stalled on null list") + + def test_map_handle_iterable_exception(self): + if self.TYPE == 'manager': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + # SayWhenError seen at the very first of the iterable + with self.assertRaises(SayWhenError): + self.pool.map(sqr, exception_throwing_generator(1, -1), 1) + # again, make sure it's reentrant + with self.assertRaises(SayWhenError): + self.pool.map(sqr, exception_throwing_generator(1, -1), 1) + + with self.assertRaises(SayWhenError): + self.pool.map(sqr, exception_throwing_generator(10, 3), 1) + + class SpecialIterable: + def __iter__(self): + return self + def __next__(self): + raise SayWhenError + def __len__(self): + return 1 + with self.assertRaises(SayWhenError): + self.pool.map(sqr, SpecialIterable(), 1) + with self.assertRaises(SayWhenError): + self.pool.map(sqr, SpecialIterable(), 1) + + def test_async(self): + res = self.pool.apply_async(sqr, (7, TIMEOUT1,)) + get = TimingWrapper(res.get) + self.assertEqual(get(), 49) + self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) + + def test_async_timeout(self): + res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0)) + get = TimingWrapper(res.get) + self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2) + self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2) + + def test_imap(self): + it = self.pool.imap(sqr, list(range(10))) + self.assertEqual(list(it), list(map(sqr, list(range(10))))) + + it = self.pool.imap(sqr, list(range(10))) + for i in range(10): + self.assertEqual(next(it), i*i) + self.assertRaises(StopIteration, it.__next__) + + it = self.pool.imap(sqr, list(range(1000)), chunksize=100) + for i in range(1000): + self.assertEqual(next(it), i*i) + self.assertRaises(StopIteration, it.__next__) + + def test_imap_handle_iterable_exception(self): + if self.TYPE == 'manager': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + # SayWhenError seen at the very first of the iterable + it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1) + self.assertRaises(SayWhenError, it.__next__) + # again, make sure it's reentrant + it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1) + self.assertRaises(SayWhenError, it.__next__) + + it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1) + for i in range(3): + self.assertEqual(next(it), i*i) + self.assertRaises(SayWhenError, it.__next__) + + # SayWhenError seen at start of problematic chunk's results + it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2) + for i in range(6): + self.assertEqual(next(it), i*i) + self.assertRaises(SayWhenError, it.__next__) + it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4) + for i in range(4): + self.assertEqual(next(it), i*i) + self.assertRaises(SayWhenError, it.__next__) + + def test_imap_unordered(self): + it = self.pool.imap_unordered(sqr, list(range(10))) + self.assertEqual(sorted(it), list(map(sqr, list(range(10))))) + + it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100) + self.assertEqual(sorted(it), list(map(sqr, list(range(1000))))) + + def test_imap_unordered_handle_iterable_exception(self): + if self.TYPE == 'manager': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + # SayWhenError seen at the very first of the iterable + it = self.pool.imap_unordered(sqr, + exception_throwing_generator(1, -1), + 1) + self.assertRaises(SayWhenError, it.__next__) + # again, make sure it's reentrant + it = self.pool.imap_unordered(sqr, + exception_throwing_generator(1, -1), + 1) + self.assertRaises(SayWhenError, it.__next__) + + it = self.pool.imap_unordered(sqr, + exception_throwing_generator(10, 3), + 1) + expected_values = list(map(sqr, list(range(10)))) + with self.assertRaises(SayWhenError): + # imap_unordered makes it difficult to anticipate the SayWhenError + for i in range(10): + value = next(it) + self.assertIn(value, expected_values) + expected_values.remove(value) + + it = self.pool.imap_unordered(sqr, + exception_throwing_generator(20, 7), + 2) + expected_values = list(map(sqr, list(range(20)))) + with self.assertRaises(SayWhenError): + for i in range(20): + value = next(it) + self.assertIn(value, expected_values) + expected_values.remove(value) + + def test_make_pool(self): + expected_error = (RemoteError if self.TYPE == 'manager' + else ValueError) + + self.assertRaises(expected_error, self.Pool, -1) + self.assertRaises(expected_error, self.Pool, 0) + + if self.TYPE != 'manager': + p = self.Pool(3) + try: + self.assertEqual(3, len(p._pool)) + finally: + p.close() + p.join() + + def test_terminate(self): + result = self.pool.map_async( + time.sleep, [0.1 for i in range(10000)], chunksize=1 + ) + self.pool.terminate() + join = TimingWrapper(self.pool.join) + join() + # Sanity check the pool didn't wait for all tasks to finish + self.assertLess(join.elapsed, 2.0) + + def test_empty_iterable(self): + # See Issue 12157 + p = self.Pool(1) + + self.assertEqual(p.map(sqr, []), []) + self.assertEqual(list(p.imap(sqr, [])), []) + self.assertEqual(list(p.imap_unordered(sqr, [])), []) + self.assertEqual(p.map_async(sqr, []).get(), []) + + p.close() + p.join() + + def test_context(self): + if self.TYPE == 'processes': + L = list(range(10)) + expected = [sqr(i) for i in L] + with self.Pool(2) as p: + r = p.map_async(sqr, L) + self.assertEqual(r.get(), expected) + p.join() + self.assertRaises(ValueError, p.map_async, sqr, L) + + @classmethod + def _test_traceback(cls): + raise RuntimeError(123) # some comment + + @unittest.skipIf(True, "fails with is_dill(obj, child=True)") + def test_traceback(self): + # We want ensure that the traceback from the child process is + # contained in the traceback raised in the main process. + if self.TYPE == 'processes': + with self.Pool(1) as p: + try: + p.apply(self._test_traceback) + except Exception as e: + exc = e + else: + self.fail('expected RuntimeError') + p.join() + self.assertIs(type(exc), RuntimeError) + self.assertEqual(exc.args, (123,)) + cause = exc.__cause__ + self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback) + self.assertIn('raise RuntimeError(123) # some comment', cause.tb) + + with test.support.captured_stderr() as f1: + try: + raise exc + except RuntimeError: + sys.excepthook(*sys.exc_info()) + self.assertIn('raise RuntimeError(123) # some comment', + f1.getvalue()) + # _helper_reraises_exception should not make the error + # a remote exception + with self.Pool(1) as p: + try: + p.map(sqr, exception_throwing_generator(1, -1), 1) + except Exception as e: + exc = e + else: + self.fail('expected SayWhenError') + self.assertIs(type(exc), SayWhenError) + self.assertIs(exc.__cause__, None) + p.join() + + @classmethod + def _test_wrapped_exception(cls): + raise RuntimeError('foo') + + @unittest.skipIf(True, "fails with is_dill(obj, child=True)") + def test_wrapped_exception(self): + # Issue #20980: Should not wrap exception when using thread pool + with self.Pool(1) as p: + with self.assertRaises(RuntimeError): + p.apply(self._test_wrapped_exception) + p.join() + + def test_map_no_failfast(self): + # Issue #23992: the fail-fast behaviour when an exception is raised + # during map() would make Pool.join() deadlock, because a worker + # process would fill the result queue (after the result handler thread + # terminated, hence not draining it anymore). + + t_start = time.monotonic() + + with self.assertRaises(ValueError): + with self.Pool(2) as p: + try: + p.map(raise_large_valuerror, [0, 1]) + finally: + time.sleep(0.5) + p.close() + p.join() + + # check that we indeed waited for all jobs + self.assertGreater(time.monotonic() - t_start, 0.9) + + def test_release_task_refs(self): + # Issue #29861: task arguments and results should not be kept + # alive after we are done with them. + objs = [CountedObject() for i in range(10)] + refs = [weakref.ref(o) for o in objs] + self.pool.map(identity, objs) + + del objs + gc.collect() # For PyPy or other GCs. + time.sleep(DELTA) # let threaded cleanup code run + self.assertEqual(set(wr() for wr in refs), {None}) + # With a process pool, copies of the objects are returned, check + # they were released too. + self.assertEqual(CountedObject.n_instances, 0) + + def test_enter(self): + if self.TYPE == 'manager': + self.skipTest("test not applicable to manager") + + pool = self.Pool(1) + with pool: + pass + # call pool.terminate() + # pool is no longer running + + with self.assertRaises(ValueError): + # bpo-35477: pool.__enter__() fails if the pool is not running + with pool: + pass + pool.join() + + def test_resource_warning(self): + if self.TYPE == 'manager': + self.skipTest("test not applicable to manager") + + pool = self.Pool(1) + pool.terminate() + pool.join() + + # force state to RUN to emit ResourceWarning in __del__() + pool._state = multiprocessing.pool.RUN + + with warnings_helper.check_warnings( + ('unclosed running multiprocessing pool', ResourceWarning)): + pool = None + support.gc_collect() + +def raising(): + raise KeyError("key") + +def unpickleable_result(): + return lambda: 42 + +class _TestPoolWorkerErrors(BaseTestCase): + ALLOWED_TYPES = ('processes', ) + + def test_async_error_callback(self): + p = multiprocessing.Pool(2) + + scratchpad = [None] + def errback(exc): + scratchpad[0] = exc + + res = p.apply_async(raising, error_callback=errback) + self.assertRaises(KeyError, res.get) + self.assertTrue(scratchpad[0]) + self.assertIsInstance(scratchpad[0], KeyError) + + p.close() + p.join() + + def _test_unpickleable_result(self): + from multiprocess.pool import MaybeEncodingError + p = multiprocessing.Pool(2) + + # Make sure we don't lose pool processes because of encoding errors. + for iteration in range(20): + + scratchpad = [None] + def errback(exc): + scratchpad[0] = exc + + res = p.apply_async(unpickleable_result, error_callback=errback) + self.assertRaises(MaybeEncodingError, res.get) + wrapped = scratchpad[0] + self.assertTrue(wrapped) + self.assertIsInstance(scratchpad[0], MaybeEncodingError) + self.assertIsNotNone(wrapped.exc) + self.assertIsNotNone(wrapped.value) + + p.close() + p.join() + +class _TestPoolWorkerLifetime(BaseTestCase): + ALLOWED_TYPES = ('processes', ) + + def test_pool_worker_lifetime(self): + p = multiprocessing.Pool(3, maxtasksperchild=10) + self.assertEqual(3, len(p._pool)) + origworkerpids = [w.pid for w in p._pool] + # Run many tasks so each worker gets replaced (hopefully) + results = [] + for i in range(100): + results.append(p.apply_async(sqr, (i, ))) + # Fetch the results and verify we got the right answers, + # also ensuring all the tasks have completed. + for (j, res) in enumerate(results): + self.assertEqual(res.get(), sqr(j)) + # Refill the pool + p._repopulate_pool() + # Wait until all workers are alive + # (countdown * DELTA = 5 seconds max startup process time) + countdown = 50 + while countdown and not all(w.is_alive() for w in p._pool): + countdown -= 1 + time.sleep(DELTA) + finalworkerpids = [w.pid for w in p._pool] + # All pids should be assigned. See issue #7805. + self.assertNotIn(None, origworkerpids) + self.assertNotIn(None, finalworkerpids) + # Finally, check that the worker pids have changed + self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids)) + p.close() + p.join() + + def test_pool_worker_lifetime_early_close(self): + # Issue #10332: closing a pool whose workers have limited lifetimes + # before all the tasks completed would make join() hang. + p = multiprocessing.Pool(3, maxtasksperchild=1) + results = [] + for i in range(6): + results.append(p.apply_async(sqr, (i, 0.3))) + p.close() + p.join() + # check the results + for (j, res) in enumerate(results): + self.assertEqual(res.get(), sqr(j)) + + def test_pool_maxtasksperchild_invalid(self): + for value in [0, -1, 0.5, "12"]: + with self.assertRaises(ValueError): + multiprocessing.Pool(3, maxtasksperchild=value) + + def test_worker_finalization_via_atexit_handler_of_multiprocessing(self): + # tests cases against bpo-38744 and bpo-39360 + cmd = '''if 1: + from multiprocess import Pool + problem = None + class A: + def __init__(self): + self.pool = Pool(processes=1) + def test(): + global problem + problem = A() + problem.pool.map(float, tuple(range(10))) + if __name__ == "__main__": + test() + ''' + rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd, **ENV) + self.assertEqual(rc, 0) + +# +# Test of creating a customized manager class +# + +from multiprocess.managers import BaseManager, BaseProxy, RemoteError + +class FooBar(object): + def f(self): + return 'f()' + def g(self): + raise ValueError + def _h(self): + return '_h()' + +def baz(): + for i in range(10): + yield i*i + +class IteratorProxy(BaseProxy): + _exposed_ = ('__next__',) + def __iter__(self): + return self + def __next__(self): + return self._callmethod('__next__') + +class MyManager(BaseManager): + pass + +MyManager.register('Foo', callable=FooBar) +MyManager.register('Bar', callable=FooBar, exposed=('f', '_h')) +MyManager.register('baz', callable=baz, proxytype=IteratorProxy) + + +class _TestMyManager(BaseTestCase): + + ALLOWED_TYPES = ('manager',) + + def test_mymanager(self): + manager = MyManager() + manager.start() + self.common(manager) + manager.shutdown() + + # bpo-30356: BaseManager._finalize_manager() sends SIGTERM + # to the manager process if it takes longer than 1 second to stop, + # which happens on slow buildbots. + self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM)) + + def test_mymanager_context(self): + with MyManager() as manager: + self.common(manager) + # bpo-30356: BaseManager._finalize_manager() sends SIGTERM + # to the manager process if it takes longer than 1 second to stop, + # which happens on slow buildbots. + self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM)) + + def test_mymanager_context_prestarted(self): + manager = MyManager() + manager.start() + with manager: + self.common(manager) + self.assertEqual(manager._process.exitcode, 0) + + def common(self, manager): + foo = manager.Foo() + bar = manager.Bar() + baz = manager.baz() + + foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)] + bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)] + + self.assertEqual(foo_methods, ['f', 'g']) + self.assertEqual(bar_methods, ['f', '_h']) + + self.assertEqual(foo.f(), 'f()') + self.assertRaises(ValueError, foo.g) + self.assertEqual(foo._callmethod('f'), 'f()') + self.assertRaises(RemoteError, foo._callmethod, '_h') + + self.assertEqual(bar.f(), 'f()') + self.assertEqual(bar._h(), '_h()') + self.assertEqual(bar._callmethod('f'), 'f()') + self.assertEqual(bar._callmethod('_h'), '_h()') + + self.assertEqual(list(baz), [i*i for i in range(10)]) + + +# +# Test of connecting to a remote server and using xmlrpclib for serialization +# + +_queue = pyqueue.Queue() +def get_queue(): + return _queue + +class QueueManager(BaseManager): + '''manager class used by server process''' +QueueManager.register('get_queue', callable=get_queue) + +class QueueManager2(BaseManager): + '''manager class which specifies the same interface as QueueManager''' +QueueManager2.register('get_queue') + + +SERIALIZER = 'xmlrpclib' + +class _TestRemoteManager(BaseTestCase): + + ALLOWED_TYPES = ('manager',) + values = ['hello world', None, True, 2.25, + 'hall\xe5 v\xe4rlden', + '\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442', + b'hall\xe5 v\xe4rlden', + ] + result = values[:] + + @classmethod + def _putter(cls, address, authkey): + manager = QueueManager2( + address=address, authkey=authkey, serializer=SERIALIZER + ) + manager.connect() + queue = manager.get_queue() + # Note that xmlrpclib will deserialize object as a list not a tuple + queue.put(tuple(cls.values)) + + def test_remote(self): + authkey = os.urandom(32) + + manager = QueueManager( + address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER + ) + manager.start() + self.addCleanup(manager.shutdown) + + p = self.Process(target=self._putter, args=(manager.address, authkey)) + p.daemon = True + p.start() + + manager2 = QueueManager2( + address=manager.address, authkey=authkey, serializer=SERIALIZER + ) + manager2.connect() + queue = manager2.get_queue() + + self.assertEqual(queue.get(), self.result) + + # Because we are using xmlrpclib for serialization instead of + # pickle this will cause a serialization error. + self.assertRaises(Exception, queue.put, time.sleep) + + # Make queue finalizer run before the server is stopped + del queue + + +@hashlib_helper.requires_hashdigest('md5') +class _TestManagerRestart(BaseTestCase): + + @classmethod + def _putter(cls, address, authkey): + manager = QueueManager( + address=address, authkey=authkey, serializer=SERIALIZER) + manager.connect() + queue = manager.get_queue() + queue.put('hello world') + + def test_rapid_restart(self): + authkey = os.urandom(32) + manager = QueueManager( + address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER) + try: + srvr = manager.get_server() + addr = srvr.address + # Close the connection.Listener socket which gets opened as a part + # of manager.get_server(). It's not needed for the test. + srvr.listener.close() + manager.start() + + p = self.Process(target=self._putter, args=(manager.address, authkey)) + p.start() + p.join() + queue = manager.get_queue() + self.assertEqual(queue.get(), 'hello world') + del queue + finally: + if hasattr(manager, "shutdown"): + manager.shutdown() + + manager = QueueManager( + address=addr, authkey=authkey, serializer=SERIALIZER) + try: + manager.start() + self.addCleanup(manager.shutdown) + except OSError as e: + if e.errno != errno.EADDRINUSE: + raise + # Retry after some time, in case the old socket was lingering + # (sporadic failure on buildbots) + time.sleep(1.0) + manager = QueueManager( + address=addr, authkey=authkey, serializer=SERIALIZER) + if hasattr(manager, "shutdown"): + self.addCleanup(manager.shutdown) + +# +# +# + +SENTINEL = latin('') + +class _TestConnection(BaseTestCase): + + ALLOWED_TYPES = ('processes', 'threads') + + @classmethod + def _echo(cls, conn): + for msg in iter(conn.recv_bytes, SENTINEL): + conn.send_bytes(msg) + conn.close() + + def test_connection(self): + conn, child_conn = self.Pipe() + + p = self.Process(target=self._echo, args=(child_conn,)) + p.daemon = True + p.start() + + seq = [1, 2.25, None] + msg = latin('hello world') + longmsg = msg * 10 + arr = array.array('i', list(range(4))) + + if self.TYPE == 'processes': + self.assertEqual(type(conn.fileno()), int) + + self.assertEqual(conn.send(seq), None) + self.assertEqual(conn.recv(), seq) + + self.assertEqual(conn.send_bytes(msg), None) + self.assertEqual(conn.recv_bytes(), msg) + + if self.TYPE == 'processes': + buffer = array.array('i', [0]*10) + expected = list(arr) + [0] * (10 - len(arr)) + self.assertEqual(conn.send_bytes(arr), None) + self.assertEqual(conn.recv_bytes_into(buffer), + len(arr) * buffer.itemsize) + self.assertEqual(list(buffer), expected) + + buffer = array.array('i', [0]*10) + expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr)) + self.assertEqual(conn.send_bytes(arr), None) + self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize), + len(arr) * buffer.itemsize) + self.assertEqual(list(buffer), expected) + + buffer = bytearray(latin(' ' * 40)) + self.assertEqual(conn.send_bytes(longmsg), None) + try: + res = conn.recv_bytes_into(buffer) + except multiprocessing.BufferTooShort as e: + self.assertEqual(e.args, (longmsg,)) + else: + self.fail('expected BufferTooShort, got %s' % res) + + poll = TimingWrapper(conn.poll) + + self.assertEqual(poll(), False) + self.assertTimingAlmostEqual(poll.elapsed, 0) + + self.assertEqual(poll(-1), False) + self.assertTimingAlmostEqual(poll.elapsed, 0) + + self.assertEqual(poll(TIMEOUT1), False) + self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1) + + conn.send(None) + time.sleep(.1) + + self.assertEqual(poll(TIMEOUT1), True) + self.assertTimingAlmostEqual(poll.elapsed, 0) + + self.assertEqual(conn.recv(), None) + + really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb + conn.send_bytes(really_big_msg) + self.assertEqual(conn.recv_bytes(), really_big_msg) + + conn.send_bytes(SENTINEL) # tell child to quit + child_conn.close() + + if self.TYPE == 'processes': + self.assertEqual(conn.readable, True) + self.assertEqual(conn.writable, True) + self.assertRaises(EOFError, conn.recv) + self.assertRaises(EOFError, conn.recv_bytes) + + p.join() + + def test_duplex_false(self): + reader, writer = self.Pipe(duplex=False) + self.assertEqual(writer.send(1), None) + self.assertEqual(reader.recv(), 1) + if self.TYPE == 'processes': + self.assertEqual(reader.readable, True) + self.assertEqual(reader.writable, False) + self.assertEqual(writer.readable, False) + self.assertEqual(writer.writable, True) + self.assertRaises(OSError, reader.send, 2) + self.assertRaises(OSError, writer.recv) + self.assertRaises(OSError, writer.poll) + + def test_spawn_close(self): + # We test that a pipe connection can be closed by parent + # process immediately after child is spawned. On Windows this + # would have sometimes failed on old versions because + # child_conn would be closed before the child got a chance to + # duplicate it. + conn, child_conn = self.Pipe() + + p = self.Process(target=self._echo, args=(child_conn,)) + p.daemon = True + p.start() + child_conn.close() # this might complete before child initializes + + msg = latin('hello') + conn.send_bytes(msg) + self.assertEqual(conn.recv_bytes(), msg) + + conn.send_bytes(SENTINEL) + conn.close() + p.join() + + def test_sendbytes(self): + if self.TYPE != 'processes': + self.skipTest('test not appropriate for {}'.format(self.TYPE)) + + msg = latin('abcdefghijklmnopqrstuvwxyz') + a, b = self.Pipe() + + a.send_bytes(msg) + self.assertEqual(b.recv_bytes(), msg) + + a.send_bytes(msg, 5) + self.assertEqual(b.recv_bytes(), msg[5:]) + + a.send_bytes(msg, 7, 8) + self.assertEqual(b.recv_bytes(), msg[7:7+8]) + + a.send_bytes(msg, 26) + self.assertEqual(b.recv_bytes(), latin('')) + + a.send_bytes(msg, 26, 0) + self.assertEqual(b.recv_bytes(), latin('')) + + self.assertRaises(ValueError, a.send_bytes, msg, 27) + + self.assertRaises(ValueError, a.send_bytes, msg, 22, 5) + + self.assertRaises(ValueError, a.send_bytes, msg, 26, 1) + + self.assertRaises(ValueError, a.send_bytes, msg, -1) + + self.assertRaises(ValueError, a.send_bytes, msg, 4, -1) + + @classmethod + def _is_fd_assigned(cls, fd): + try: + os.fstat(fd) + except OSError as e: + if e.errno == errno.EBADF: + return False + raise + else: + return True + + @classmethod + def _writefd(cls, conn, data, create_dummy_fds=False): + if create_dummy_fds: + for i in range(0, 256): + if not cls._is_fd_assigned(i): + os.dup2(conn.fileno(), i) + fd = reduction.recv_handle(conn) + if msvcrt: + fd = msvcrt.open_osfhandle(fd, os.O_WRONLY) + os.write(fd, data) + os.close(fd) + + @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") + def test_fd_transfer(self): + if self.TYPE != 'processes': + self.skipTest("only makes sense with processes") + conn, child_conn = self.Pipe(duplex=True) + + p = self.Process(target=self._writefd, args=(child_conn, b"foo")) + p.daemon = True + p.start() + self.addCleanup(os_helper.unlink, os_helper.TESTFN) + with open(os_helper.TESTFN, "wb") as f: + fd = f.fileno() + if msvcrt: + fd = msvcrt.get_osfhandle(fd) + reduction.send_handle(conn, fd, p.pid) + p.join() + with open(os_helper.TESTFN, "rb") as f: + self.assertEqual(f.read(), b"foo") + + @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") + @unittest.skipIf(sys.platform == "win32", + "test semantics don't make sense on Windows") + @unittest.skipIf(MAXFD <= 256, + "largest assignable fd number is too small") + @unittest.skipUnless(hasattr(os, "dup2"), + "test needs os.dup2()") + def test_large_fd_transfer(self): + # With fd > 256 (issue #11657) + if self.TYPE != 'processes': + self.skipTest("only makes sense with processes") + conn, child_conn = self.Pipe(duplex=True) + + p = self.Process(target=self._writefd, args=(child_conn, b"bar", True)) + p.daemon = True + p.start() + self.addCleanup(os_helper.unlink, os_helper.TESTFN) + with open(os_helper.TESTFN, "wb") as f: + fd = f.fileno() + for newfd in range(256, MAXFD): + if not self._is_fd_assigned(newfd): + break + else: + self.fail("could not find an unassigned large file descriptor") + os.dup2(fd, newfd) + try: + reduction.send_handle(conn, newfd, p.pid) + finally: + os.close(newfd) + p.join() + with open(os_helper.TESTFN, "rb") as f: + self.assertEqual(f.read(), b"bar") + + @classmethod + def _send_data_without_fd(self, conn): + os.write(conn.fileno(), b"\0") + + @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") + @unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows") + def test_missing_fd_transfer(self): + # Check that exception is raised when received data is not + # accompanied by a file descriptor in ancillary data. + if self.TYPE != 'processes': + self.skipTest("only makes sense with processes") + conn, child_conn = self.Pipe(duplex=True) + + p = self.Process(target=self._send_data_without_fd, args=(child_conn,)) + p.daemon = True + p.start() + self.assertRaises(RuntimeError, reduction.recv_handle, conn) + p.join() + + def test_context(self): + a, b = self.Pipe() + + with a, b: + a.send(1729) + self.assertEqual(b.recv(), 1729) + if self.TYPE == 'processes': + self.assertFalse(a.closed) + self.assertFalse(b.closed) + + if self.TYPE == 'processes': + self.assertTrue(a.closed) + self.assertTrue(b.closed) + self.assertRaises(OSError, a.recv) + self.assertRaises(OSError, b.recv) + +class _TestListener(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + def test_multiple_bind(self): + for family in self.connection.families: + l = self.connection.Listener(family=family) + self.addCleanup(l.close) + self.assertRaises(OSError, self.connection.Listener, + l.address, family) + + def test_context(self): + with self.connection.Listener() as l: + with self.connection.Client(l.address) as c: + with l.accept() as d: + c.send(1729) + self.assertEqual(d.recv(), 1729) + + if self.TYPE == 'processes': + self.assertRaises(OSError, l.accept) + + @unittest.skipUnless(util.abstract_sockets_supported, + "test needs abstract socket support") + def test_abstract_socket(self): + with self.connection.Listener("\0something") as listener: + with self.connection.Client(listener.address) as client: + with listener.accept() as d: + client.send(1729) + self.assertEqual(d.recv(), 1729) + + if self.TYPE == 'processes': + self.assertRaises(OSError, listener.accept) + + +class _TestListenerClient(BaseTestCase): + + ALLOWED_TYPES = ('processes', 'threads') + + @classmethod + def _test(cls, address): + conn = cls.connection.Client(address) + conn.send('hello') + conn.close() + + def test_listener_client(self): + for family in self.connection.families: + l = self.connection.Listener(family=family) + p = self.Process(target=self._test, args=(l.address,)) + p.daemon = True + p.start() + conn = l.accept() + self.assertEqual(conn.recv(), 'hello') + p.join() + l.close() + + def test_issue14725(self): + l = self.connection.Listener() + p = self.Process(target=self._test, args=(l.address,)) + p.daemon = True + p.start() + time.sleep(1) + # On Windows the client process should by now have connected, + # written data and closed the pipe handle by now. This causes + # ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue + # 14725. + conn = l.accept() + self.assertEqual(conn.recv(), 'hello') + conn.close() + p.join() + l.close() + + def test_issue16955(self): + for fam in self.connection.families: + l = self.connection.Listener(family=fam) + c = self.connection.Client(l.address) + a = l.accept() + a.send_bytes(b"hello") + self.assertTrue(c.poll(1)) + a.close() + c.close() + l.close() + +class _TestPoll(BaseTestCase): + + ALLOWED_TYPES = ('processes', 'threads') + + def test_empty_string(self): + a, b = self.Pipe() + self.assertEqual(a.poll(), False) + b.send_bytes(b'') + self.assertEqual(a.poll(), True) + self.assertEqual(a.poll(), True) + + @classmethod + def _child_strings(cls, conn, strings): + for s in strings: + time.sleep(0.1) + conn.send_bytes(s) + conn.close() + + def test_strings(self): + strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop') + a, b = self.Pipe() + p = self.Process(target=self._child_strings, args=(b, strings)) + p.start() + + for s in strings: + for i in range(200): + if a.poll(0.01): + break + x = a.recv_bytes() + self.assertEqual(s, x) + + p.join() + + @classmethod + def _child_boundaries(cls, r): + # Polling may "pull" a message in to the child process, but we + # don't want it to pull only part of a message, as that would + # corrupt the pipe for any other processes which might later + # read from it. + r.poll(5) + + def test_boundaries(self): + r, w = self.Pipe(False) + p = self.Process(target=self._child_boundaries, args=(r,)) + p.start() + time.sleep(2) + L = [b"first", b"second"] + for obj in L: + w.send_bytes(obj) + w.close() + p.join() + self.assertIn(r.recv_bytes(), L) + + @classmethod + def _child_dont_merge(cls, b): + b.send_bytes(b'a') + b.send_bytes(b'b') + b.send_bytes(b'cd') + + def test_dont_merge(self): + a, b = self.Pipe() + self.assertEqual(a.poll(0.0), False) + self.assertEqual(a.poll(0.1), False) + + p = self.Process(target=self._child_dont_merge, args=(b,)) + p.start() + + self.assertEqual(a.recv_bytes(), b'a') + self.assertEqual(a.poll(1.0), True) + self.assertEqual(a.poll(1.0), True) + self.assertEqual(a.recv_bytes(), b'b') + self.assertEqual(a.poll(1.0), True) + self.assertEqual(a.poll(1.0), True) + self.assertEqual(a.poll(0.0), True) + self.assertEqual(a.recv_bytes(), b'cd') + + p.join() + +# +# Test of sending connection and socket objects between processes +# + +@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") +@hashlib_helper.requires_hashdigest('md5') +class _TestPicklingConnections(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + @classmethod + def tearDownClass(cls): + from multiprocess import resource_sharer + resource_sharer.stop(timeout=support.LONG_TIMEOUT) + + @classmethod + def _listener(cls, conn, families): + for fam in families: + l = cls.connection.Listener(family=fam) + conn.send(l.address) + new_conn = l.accept() + conn.send(new_conn) + new_conn.close() + l.close() + + l = socket.create_server((socket_helper.HOST, 0)) + conn.send(l.getsockname()) + new_conn, addr = l.accept() + conn.send(new_conn) + new_conn.close() + l.close() + + conn.recv() + + @classmethod + def _remote(cls, conn): + for (address, msg) in iter(conn.recv, None): + client = cls.connection.Client(address) + client.send(msg.upper()) + client.close() + + address, msg = conn.recv() + client = socket.socket() + client.connect(address) + client.sendall(msg.upper()) + client.close() + + conn.close() + + def test_pickling(self): + families = self.connection.families + + lconn, lconn0 = self.Pipe() + lp = self.Process(target=self._listener, args=(lconn0, families)) + lp.daemon = True + lp.start() + lconn0.close() + + rconn, rconn0 = self.Pipe() + rp = self.Process(target=self._remote, args=(rconn0,)) + rp.daemon = True + rp.start() + rconn0.close() + + for fam in families: + msg = ('This connection uses family %s' % fam).encode('ascii') + address = lconn.recv() + rconn.send((address, msg)) + new_conn = lconn.recv() + self.assertEqual(new_conn.recv(), msg.upper()) + + rconn.send(None) + + msg = latin('This connection uses a normal socket') + address = lconn.recv() + rconn.send((address, msg)) + new_conn = lconn.recv() + buf = [] + while True: + s = new_conn.recv(100) + if not s: + break + buf.append(s) + buf = b''.join(buf) + self.assertEqual(buf, msg.upper()) + new_conn.close() + + lconn.send(None) + + rconn.close() + lconn.close() + + lp.join() + rp.join() + + @classmethod + def child_access(cls, conn): + w = conn.recv() + w.send('all is well') + w.close() + + r = conn.recv() + msg = r.recv() + conn.send(msg*2) + + conn.close() + + def test_access(self): + # On Windows, if we do not specify a destination pid when + # using DupHandle then we need to be careful to use the + # correct access flags for DuplicateHandle(), or else + # DupHandle.detach() will raise PermissionError. For example, + # for a read only pipe handle we should use + # access=FILE_GENERIC_READ. (Unfortunately + # DUPLICATE_SAME_ACCESS does not work.) + conn, child_conn = self.Pipe() + p = self.Process(target=self.child_access, args=(child_conn,)) + p.daemon = True + p.start() + child_conn.close() + + r, w = self.Pipe(duplex=False) + conn.send(w) + w.close() + self.assertEqual(r.recv(), 'all is well') + r.close() + + r, w = self.Pipe(duplex=False) + conn.send(r) + r.close() + w.send('foobar') + w.close() + self.assertEqual(conn.recv(), 'foobar'*2) + + p.join() + +# +# +# + +class _TestHeap(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + def setUp(self): + super().setUp() + # Make pristine heap for these tests + self.old_heap = multiprocessing.heap.BufferWrapper._heap + multiprocessing.heap.BufferWrapper._heap = multiprocessing.heap.Heap() + + def tearDown(self): + multiprocessing.heap.BufferWrapper._heap = self.old_heap + super().tearDown() + + def test_heap(self): + iterations = 5000 + maxblocks = 50 + blocks = [] + + # get the heap object + heap = multiprocessing.heap.BufferWrapper._heap + heap._DISCARD_FREE_SPACE_LARGER_THAN = 0 + + # create and destroy lots of blocks of different sizes + for i in range(iterations): + size = int(random.lognormvariate(0, 1) * 1000) + b = multiprocessing.heap.BufferWrapper(size) + blocks.append(b) + if len(blocks) > maxblocks: + i = random.randrange(maxblocks) + del blocks[i] + del b + + # verify the state of the heap + with heap._lock: + all = [] + free = 0 + occupied = 0 + for L in list(heap._len_to_seq.values()): + # count all free blocks in arenas + for arena, start, stop in L: + all.append((heap._arenas.index(arena), start, stop, + stop-start, 'free')) + free += (stop-start) + for arena, arena_blocks in heap._allocated_blocks.items(): + # count all allocated blocks in arenas + for start, stop in arena_blocks: + all.append((heap._arenas.index(arena), start, stop, + stop-start, 'occupied')) + occupied += (stop-start) + + self.assertEqual(free + occupied, + sum(arena.size for arena in heap._arenas)) + + all.sort() + + for i in range(len(all)-1): + (arena, start, stop) = all[i][:3] + (narena, nstart, nstop) = all[i+1][:3] + if arena != narena: + # Two different arenas + self.assertEqual(stop, heap._arenas[arena].size) # last block + self.assertEqual(nstart, 0) # first block + else: + # Same arena: two adjacent blocks + self.assertEqual(stop, nstart) + + # test free'ing all blocks + random.shuffle(blocks) + while blocks: + blocks.pop() + + self.assertEqual(heap._n_frees, heap._n_mallocs) + self.assertEqual(len(heap._pending_free_blocks), 0) + self.assertEqual(len(heap._arenas), 0) + self.assertEqual(len(heap._allocated_blocks), 0, heap._allocated_blocks) + self.assertEqual(len(heap._len_to_seq), 0) + + def test_free_from_gc(self): + # Check that freeing of blocks by the garbage collector doesn't deadlock + # (issue #12352). + # Make sure the GC is enabled, and set lower collection thresholds to + # make collections more frequent (and increase the probability of + # deadlock). + if not gc.isenabled(): + gc.enable() + self.addCleanup(gc.disable) + thresholds = gc.get_threshold() + self.addCleanup(gc.set_threshold, *thresholds) + gc.set_threshold(10) + + # perform numerous block allocations, with cyclic references to make + # sure objects are collected asynchronously by the gc + for i in range(5000): + a = multiprocessing.heap.BufferWrapper(1) + b = multiprocessing.heap.BufferWrapper(1) + # circular references + a.buddy = b + b.buddy = a + +# +# +# + +class _Foo(Structure): + _fields_ = [ + ('x', c_int), + ('y', c_double), + ('z', c_longlong,) + ] + +class _TestSharedCTypes(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + def setUp(self): + if not HAS_SHAREDCTYPES: + self.skipTest("requires multiprocess.sharedctypes") + + @classmethod + def _double(cls, x, y, z, foo, arr, string): + x.value *= 2 + y.value *= 2 + z.value *= 2 + foo.x *= 2 + foo.y *= 2 + string.value *= 2 + for i in range(len(arr)): + arr[i] *= 2 + + def test_sharedctypes(self, lock=False): + x = Value('i', 7, lock=lock) + y = Value(c_double, 1.0/3.0, lock=lock) + z = Value(c_longlong, 2 ** 33, lock=lock) + foo = Value(_Foo, 3, 2, lock=lock) + arr = self.Array('d', list(range(10)), lock=lock) + string = self.Array('c', 20, lock=lock) + string.value = latin('hello') + + p = self.Process(target=self._double, args=(x, y, z, foo, arr, string)) + p.daemon = True + p.start() + p.join() + + self.assertEqual(x.value, 14) + self.assertAlmostEqual(y.value, 2.0/3.0) + self.assertEqual(z.value, 2 ** 34) + self.assertEqual(foo.x, 6) + self.assertAlmostEqual(foo.y, 4.0) + for i in range(10): + self.assertAlmostEqual(arr[i], i*2) + self.assertEqual(string.value, latin('hellohello')) + + def test_synchronize(self): + self.test_sharedctypes(lock=True) + + def test_copy(self): + foo = _Foo(2, 5.0, 2 ** 33) + bar = copy(foo) + foo.x = 0 + foo.y = 0 + foo.z = 0 + self.assertEqual(bar.x, 2) + self.assertAlmostEqual(bar.y, 5.0) + self.assertEqual(bar.z, 2 ** 33) + + +@unittest.skipUnless(HAS_SHMEM, "requires multiprocess.shared_memory") +@hashlib_helper.requires_hashdigest('md5') +class _TestSharedMemory(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + @staticmethod + def _attach_existing_shmem_then_write(shmem_name_or_obj, binary_data): + if isinstance(shmem_name_or_obj, str): + local_sms = shared_memory.SharedMemory(shmem_name_or_obj) + else: + local_sms = shmem_name_or_obj + local_sms.buf[:len(binary_data)] = binary_data + local_sms.close() + + def _new_shm_name(self, prefix): + # Add a PID to the name of a POSIX shared memory object to allow + # running multiprocessing tests (test_multiprocessing_fork, + # test_multiprocessing_spawn, etc) in parallel. + return prefix + str(os.getpid()) + + def test_shared_memory_basics(self): + name_tsmb = self._new_shm_name('test01_tsmb') + sms = shared_memory.SharedMemory(name_tsmb, create=True, size=512) + self.addCleanup(sms.unlink) + + # Verify attributes are readable. + self.assertEqual(sms.name, name_tsmb) + self.assertGreaterEqual(sms.size, 512) + self.assertGreaterEqual(len(sms.buf), sms.size) + + # Verify __repr__ + self.assertIn(sms.name, str(sms)) + self.assertIn(str(sms.size), str(sms)) + + # Modify contents of shared memory segment through memoryview. + sms.buf[0] = 42 + self.assertEqual(sms.buf[0], 42) + + # Attach to existing shared memory segment. + also_sms = shared_memory.SharedMemory(name_tsmb) + self.assertEqual(also_sms.buf[0], 42) + also_sms.close() + + # Attach to existing shared memory segment but specify a new size. + same_sms = shared_memory.SharedMemory(name_tsmb, size=20*sms.size) + self.assertLess(same_sms.size, 20*sms.size) # Size was ignored. + same_sms.close() + + # Creating Shared Memory Segment with -ve size + with self.assertRaises(ValueError): + shared_memory.SharedMemory(create=True, size=-2) + + # Attaching Shared Memory Segment without a name + with self.assertRaises(ValueError): + shared_memory.SharedMemory(create=False) + + # Test if shared memory segment is created properly, + # when _make_filename returns an existing shared memory segment name + with unittest.mock.patch( + 'multiprocess.shared_memory._make_filename') as mock_make_filename: + + NAME_PREFIX = shared_memory._SHM_NAME_PREFIX + names = [self._new_shm_name('test01_fn'), self._new_shm_name('test02_fn')] + # Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary + # because some POSIX compliant systems require name to start with / + names = [NAME_PREFIX + name for name in names] + + mock_make_filename.side_effect = names + shm1 = shared_memory.SharedMemory(create=True, size=1) + self.addCleanup(shm1.unlink) + self.assertEqual(shm1._name, names[0]) + + mock_make_filename.side_effect = names + shm2 = shared_memory.SharedMemory(create=True, size=1) + self.addCleanup(shm2.unlink) + self.assertEqual(shm2._name, names[1]) + + if shared_memory._USE_POSIX: + # Posix Shared Memory can only be unlinked once. Here we + # test an implementation detail that is not observed across + # all supported platforms (since WindowsNamedSharedMemory + # manages unlinking on its own and unlink() does nothing). + # True release of shared memory segment does not necessarily + # happen until process exits, depending on the OS platform. + name_dblunlink = self._new_shm_name('test01_dblunlink') + sms_uno = shared_memory.SharedMemory( + name_dblunlink, + create=True, + size=5000 + ) + with self.assertRaises(FileNotFoundError): + try: + self.assertGreaterEqual(sms_uno.size, 5000) + + sms_duo = shared_memory.SharedMemory(name_dblunlink) + sms_duo.unlink() # First shm_unlink() call. + sms_duo.close() + sms_uno.close() + + finally: + sms_uno.unlink() # A second shm_unlink() call is bad. + + with self.assertRaises(FileExistsError): + # Attempting to create a new shared memory segment with a + # name that is already in use triggers an exception. + there_can_only_be_one_sms = shared_memory.SharedMemory( + name_tsmb, + create=True, + size=512 + ) + + if shared_memory._USE_POSIX: + # Requesting creation of a shared memory segment with the option + # to attach to an existing segment, if that name is currently in + # use, should not trigger an exception. + # Note: Using a smaller size could possibly cause truncation of + # the existing segment but is OS platform dependent. In the + # case of MacOS/darwin, requesting a smaller size is disallowed. + class OptionalAttachSharedMemory(shared_memory.SharedMemory): + _flags = os.O_CREAT | os.O_RDWR + ok_if_exists_sms = OptionalAttachSharedMemory(name_tsmb) + self.assertEqual(ok_if_exists_sms.size, sms.size) + ok_if_exists_sms.close() + + # Attempting to attach to an existing shared memory segment when + # no segment exists with the supplied name triggers an exception. + with self.assertRaises(FileNotFoundError): + nonexisting_sms = shared_memory.SharedMemory('test01_notthere') + nonexisting_sms.unlink() # Error should occur on prior line. + + sms.close() + + @unittest.skipIf(True, "fails with dill >= 0.3.5") + def test_shared_memory_recreate(self): + # Test if shared memory segment is created properly, + # when _make_filename returns an existing shared memory segment name + with unittest.mock.patch( + 'multiprocess.shared_memory._make_filename') as mock_make_filename: + + NAME_PREFIX = shared_memory._SHM_NAME_PREFIX + names = [self._new_shm_name('test03_fn'), self._new_shm_name('test04_fn')] + # Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary + # because some POSIX compliant systems require name to start with / + names = [NAME_PREFIX + name for name in names] + + mock_make_filename.side_effect = names + shm1 = shared_memory.SharedMemory(create=True, size=1) + self.addCleanup(shm1.unlink) + self.assertEqual(shm1._name, names[0]) + + mock_make_filename.side_effect = names + shm2 = shared_memory.SharedMemory(create=True, size=1) + self.addCleanup(shm2.unlink) + self.assertEqual(shm2._name, names[1]) + + def test_invalid_shared_memory_cration(self): + # Test creating a shared memory segment with negative size + with self.assertRaises(ValueError): + sms_invalid = shared_memory.SharedMemory(create=True, size=-1) + + # Test creating a shared memory segment with size 0 + with self.assertRaises(ValueError): + sms_invalid = shared_memory.SharedMemory(create=True, size=0) + + # Test creating a shared memory segment without size argument + with self.assertRaises(ValueError): + sms_invalid = shared_memory.SharedMemory(create=True) + + def test_shared_memory_pickle_unpickle(self): + for proto in range(pickle.HIGHEST_PROTOCOL + 1): + with self.subTest(proto=proto): + sms = shared_memory.SharedMemory(create=True, size=512) + self.addCleanup(sms.unlink) + sms.buf[0:6] = b'pickle' + + # Test pickling + pickled_sms = pickle.dumps(sms, protocol=proto) + + # Test unpickling + sms2 = pickle.loads(pickled_sms) + self.assertIsInstance(sms2, shared_memory.SharedMemory) + self.assertEqual(sms.name, sms2.name) + self.assertEqual(bytes(sms.buf[0:6]), b'pickle') + self.assertEqual(bytes(sms2.buf[0:6]), b'pickle') + + # Test that unpickled version is still the same SharedMemory + sms.buf[0:6] = b'newval' + self.assertEqual(bytes(sms.buf[0:6]), b'newval') + self.assertEqual(bytes(sms2.buf[0:6]), b'newval') + + sms2.buf[0:6] = b'oldval' + self.assertEqual(bytes(sms.buf[0:6]), b'oldval') + self.assertEqual(bytes(sms2.buf[0:6]), b'oldval') + + def test_shared_memory_pickle_unpickle_dead_object(self): + for proto in range(pickle.HIGHEST_PROTOCOL + 1): + with self.subTest(proto=proto): + sms = shared_memory.SharedMemory(create=True, size=512) + sms.buf[0:6] = b'pickle' + pickled_sms = pickle.dumps(sms, protocol=proto) + + # Now, we are going to kill the original object. + # So, unpickled one won't be able to attach to it. + sms.close() + sms.unlink() + + with self.assertRaises(FileNotFoundError): + pickle.loads(pickled_sms) + + def test_shared_memory_across_processes(self): + # bpo-40135: don't define shared memory block's name in case of + # the failure when we run multiprocessing tests in parallel. + sms = shared_memory.SharedMemory(create=True, size=512) + self.addCleanup(sms.unlink) + + # Verify remote attachment to existing block by name is working. + p = self.Process( + target=self._attach_existing_shmem_then_write, + args=(sms.name, b'howdy') + ) + p.daemon = True + p.start() + p.join() + self.assertEqual(bytes(sms.buf[:5]), b'howdy') + + # Verify pickling of SharedMemory instance also works. + p = self.Process( + target=self._attach_existing_shmem_then_write, + args=(sms, b'HELLO') + ) + p.daemon = True + p.start() + p.join() + self.assertEqual(bytes(sms.buf[:5]), b'HELLO') + + sms.close() + + @unittest.skipIf(os.name != "posix", "not feasible in non-posix platforms") + def test_shared_memory_SharedMemoryServer_ignores_sigint(self): + # bpo-36368: protect SharedMemoryManager server process from + # KeyboardInterrupt signals. + smm = multiprocessing.managers.SharedMemoryManager() + smm.start() + + # make sure the manager works properly at the beginning + sl = smm.ShareableList(range(10)) + + # the manager's server should ignore KeyboardInterrupt signals, and + # maintain its connection with the current process, and success when + # asked to deliver memory segments. + os.kill(smm._process.pid, signal.SIGINT) + + sl2 = smm.ShareableList(range(10)) + + # test that the custom signal handler registered in the Manager does + # not affect signal handling in the parent process. + with self.assertRaises(KeyboardInterrupt): + os.kill(os.getpid(), signal.SIGINT) + + smm.shutdown() + + @unittest.skipIf(os.name != "posix", "resource_tracker is posix only") + def test_shared_memory_SharedMemoryManager_reuses_resource_tracker(self): + # bpo-36867: test that a SharedMemoryManager uses the + # same resource_tracker process as its parent. + cmd = '''if 1: + from multiprocessing.managers import SharedMemoryManager + + + smm = SharedMemoryManager() + smm.start() + sl = smm.ShareableList(range(10)) + smm.shutdown() + ''' #XXX: ensure correct resource_tracker + rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd, **ENV) + + # Before bpo-36867 was fixed, a SharedMemoryManager not using the same + # resource_tracker process as its parent would make the parent's + # tracker complain about sl being leaked even though smm.shutdown() + # properly released sl. + self.assertFalse(err) + + def test_shared_memory_SharedMemoryManager_basics(self): + smm1 = multiprocessing.managers.SharedMemoryManager() + with self.assertRaises(ValueError): + smm1.SharedMemory(size=9) # Fails if SharedMemoryServer not started + smm1.start() + lol = [ smm1.ShareableList(range(i)) for i in range(5, 10) ] + lom = [ smm1.SharedMemory(size=j) for j in range(32, 128, 16) ] + doppleganger_list0 = shared_memory.ShareableList(name=lol[0].shm.name) + self.assertEqual(len(doppleganger_list0), 5) + doppleganger_shm0 = shared_memory.SharedMemory(name=lom[0].name) + self.assertGreaterEqual(len(doppleganger_shm0.buf), 32) + held_name = lom[0].name + smm1.shutdown() + if sys.platform != "win32": + # Calls to unlink() have no effect on Windows platform; shared + # memory will only be released once final process exits. + with self.assertRaises(FileNotFoundError): + # No longer there to be attached to again. + absent_shm = shared_memory.SharedMemory(name=held_name) + + with multiprocessing.managers.SharedMemoryManager() as smm2: + sl = smm2.ShareableList("howdy") + shm = smm2.SharedMemory(size=128) + held_name = sl.shm.name + if sys.platform != "win32": + with self.assertRaises(FileNotFoundError): + # No longer there to be attached to again. + absent_sl = shared_memory.ShareableList(name=held_name) + + + def test_shared_memory_ShareableList_basics(self): + sl = shared_memory.ShareableList( + ['howdy', b'HoWdY', -273.154, 100, None, True, 42] + ) + self.addCleanup(sl.shm.unlink) + + # Verify __repr__ + self.assertIn(sl.shm.name, str(sl)) + self.assertIn(str(list(sl)), str(sl)) + + # Index Out of Range (get) + with self.assertRaises(IndexError): + sl[7] + + # Index Out of Range (set) + with self.assertRaises(IndexError): + sl[7] = 2 + + # Assign value without format change (str -> str) + current_format = sl._get_packing_format(0) + sl[0] = 'howdy' + self.assertEqual(current_format, sl._get_packing_format(0)) + + # Verify attributes are readable. + self.assertEqual(sl.format, '8s8sdqxxxxxx?xxxxxxxx?q') + + # Exercise len(). + self.assertEqual(len(sl), 7) + + # Exercise index(). + with warnings.catch_warnings(): + # Suppress BytesWarning when comparing against b'HoWdY'. + warnings.simplefilter('ignore') + with self.assertRaises(ValueError): + sl.index('100') + self.assertEqual(sl.index(100), 3) + + # Exercise retrieving individual values. + self.assertEqual(sl[0], 'howdy') + self.assertEqual(sl[-2], True) + + # Exercise iterability. + self.assertEqual( + tuple(sl), + ('howdy', b'HoWdY', -273.154, 100, None, True, 42) + ) + + # Exercise modifying individual values. + sl[3] = 42 + self.assertEqual(sl[3], 42) + sl[4] = 'some' # Change type at a given position. + self.assertEqual(sl[4], 'some') + self.assertEqual(sl.format, '8s8sdq8sxxxxxxx?q') + with self.assertRaisesRegex(ValueError, + "exceeds available storage"): + sl[4] = 'far too many' + self.assertEqual(sl[4], 'some') + sl[0] = 'encodés' # Exactly 8 bytes of UTF-8 data + self.assertEqual(sl[0], 'encodés') + self.assertEqual(sl[1], b'HoWdY') # no spillage + with self.assertRaisesRegex(ValueError, + "exceeds available storage"): + sl[0] = 'encodées' # Exactly 9 bytes of UTF-8 data + self.assertEqual(sl[1], b'HoWdY') + with self.assertRaisesRegex(ValueError, + "exceeds available storage"): + sl[1] = b'123456789' + self.assertEqual(sl[1], b'HoWdY') + + # Exercise count(). + with warnings.catch_warnings(): + # Suppress BytesWarning when comparing against b'HoWdY'. + warnings.simplefilter('ignore') + self.assertEqual(sl.count(42), 2) + self.assertEqual(sl.count(b'HoWdY'), 1) + self.assertEqual(sl.count(b'adios'), 0) + + # Exercise creating a duplicate. + name_duplicate = self._new_shm_name('test03_duplicate') + sl_copy = shared_memory.ShareableList(sl, name=name_duplicate) + try: + self.assertNotEqual(sl.shm.name, sl_copy.shm.name) + self.assertEqual(name_duplicate, sl_copy.shm.name) + self.assertEqual(list(sl), list(sl_copy)) + self.assertEqual(sl.format, sl_copy.format) + sl_copy[-1] = 77 + self.assertEqual(sl_copy[-1], 77) + self.assertNotEqual(sl[-1], 77) + sl_copy.shm.close() + finally: + sl_copy.shm.unlink() + + # Obtain a second handle on the same ShareableList. + sl_tethered = shared_memory.ShareableList(name=sl.shm.name) + self.assertEqual(sl.shm.name, sl_tethered.shm.name) + sl_tethered[-1] = 880 + self.assertEqual(sl[-1], 880) + sl_tethered.shm.close() + + sl.shm.close() + + # Exercise creating an empty ShareableList. + empty_sl = shared_memory.ShareableList() + try: + self.assertEqual(len(empty_sl), 0) + self.assertEqual(empty_sl.format, '') + self.assertEqual(empty_sl.count('any'), 0) + with self.assertRaises(ValueError): + empty_sl.index(None) + empty_sl.shm.close() + finally: + empty_sl.shm.unlink() + + def test_shared_memory_ShareableList_pickling(self): + for proto in range(pickle.HIGHEST_PROTOCOL + 1): + with self.subTest(proto=proto): + sl = shared_memory.ShareableList(range(10)) + self.addCleanup(sl.shm.unlink) + + serialized_sl = pickle.dumps(sl, protocol=proto) + deserialized_sl = pickle.loads(serialized_sl) + self.assertIsInstance( + deserialized_sl, shared_memory.ShareableList) + self.assertEqual(deserialized_sl[-1], 9) + self.assertIsNot(sl, deserialized_sl) + + deserialized_sl[4] = "changed" + self.assertEqual(sl[4], "changed") + sl[3] = "newvalue" + self.assertEqual(deserialized_sl[3], "newvalue") + + larger_sl = shared_memory.ShareableList(range(400)) + self.addCleanup(larger_sl.shm.unlink) + serialized_larger_sl = pickle.dumps(larger_sl, protocol=proto) + self.assertEqual(len(serialized_sl), len(serialized_larger_sl)) + larger_sl.shm.close() + + deserialized_sl.shm.close() + sl.shm.close() + + def test_shared_memory_ShareableList_pickling_dead_object(self): + for proto in range(pickle.HIGHEST_PROTOCOL + 1): + with self.subTest(proto=proto): + sl = shared_memory.ShareableList(range(10)) + serialized_sl = pickle.dumps(sl, protocol=proto) + + # Now, we are going to kill the original object. + # So, unpickled one won't be able to attach to it. + sl.shm.close() + sl.shm.unlink() + + with self.assertRaises(FileNotFoundError): + pickle.loads(serialized_sl) + + def test_shared_memory_cleaned_after_process_termination(self): + cmd = '''if 1: + import os, time, sys + from multiprocessing import shared_memory + + # Create a shared_memory segment, and send the segment name + sm = shared_memory.SharedMemory(create=True, size=10) + sys.stdout.write(sm.name + '\\n') + sys.stdout.flush() + time.sleep(100) + ''' + with subprocess.Popen([sys.executable, '-E', '-c', cmd], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) as p: + name = p.stdout.readline().strip().decode() + + # killing abruptly processes holding reference to a shared memory + # segment should not leak the given memory segment. + p.terminate() + p.wait() + + deadline = time.monotonic() + support.LONG_TIMEOUT + t = 0.1 + while time.monotonic() < deadline: + time.sleep(t) + t = min(t*2, 5) + try: + smm = shared_memory.SharedMemory(name, create=False) + except FileNotFoundError: + break + else: + raise AssertionError("A SharedMemory segment was leaked after" + " a process was abruptly terminated.") + + if os.name == 'posix': + # Without this line it was raising warnings like: + # UserWarning: resource_tracker: + # There appear to be 1 leaked shared_memory + # objects to clean up at shutdown + # See: https://bugs.python.org/issue45209 + resource_tracker.unregister(f"/{name}", "shared_memory") + + # A warning was emitted by the subprocess' own + # resource_tracker (on Windows, shared memory segments + # are released automatically by the OS). + err = p.stderr.read().decode() + self.assertIn( + "resource_tracker: There appear to be 1 leaked " + "shared_memory objects to clean up at shutdown", err) + +# +# Test to verify that `Finalize` works. +# + +class _TestFinalize(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + def setUp(self): + self.registry_backup = util._finalizer_registry.copy() + util._finalizer_registry.clear() + + def tearDown(self): + gc.collect() # For PyPy or other GCs. + self.assertFalse(util._finalizer_registry) + util._finalizer_registry.update(self.registry_backup) + + @classmethod + def _test_finalize(cls, conn): + class Foo(object): + pass + + a = Foo() + util.Finalize(a, conn.send, args=('a',)) + del a # triggers callback for a + gc.collect() # For PyPy or other GCs. + + b = Foo() + close_b = util.Finalize(b, conn.send, args=('b',)) + close_b() # triggers callback for b + close_b() # does nothing because callback has already been called + del b # does nothing because callback has already been called + gc.collect() # For PyPy or other GCs. + + c = Foo() + util.Finalize(c, conn.send, args=('c',)) + + d10 = Foo() + util.Finalize(d10, conn.send, args=('d10',), exitpriority=1) + + d01 = Foo() + util.Finalize(d01, conn.send, args=('d01',), exitpriority=0) + d02 = Foo() + util.Finalize(d02, conn.send, args=('d02',), exitpriority=0) + d03 = Foo() + util.Finalize(d03, conn.send, args=('d03',), exitpriority=0) + + util.Finalize(None, conn.send, args=('e',), exitpriority=-10) + + util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100) + + # call multiprocessing's cleanup function then exit process without + # garbage collecting locals + util._exit_function() + conn.close() + os._exit(0) + + def test_finalize(self): + conn, child_conn = self.Pipe() + + p = self.Process(target=self._test_finalize, args=(child_conn,)) + p.daemon = True + p.start() + p.join() + + result = [obj for obj in iter(conn.recv, 'STOP')] + self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e']) + + def test_thread_safety(self): + # bpo-24484: _run_finalizers() should be thread-safe + def cb(): + pass + + class Foo(object): + def __init__(self): + self.ref = self # create reference cycle + # insert finalizer at random key + util.Finalize(self, cb, exitpriority=random.randint(1, 100)) + + finish = False + exc = None + + def run_finalizers(): + nonlocal exc + while not finish: + time.sleep(random.random() * 1e-1) + try: + # A GC run will eventually happen during this, + # collecting stale Foo's and mutating the registry + util._run_finalizers() + except Exception as e: + exc = e + + def make_finalizers(): + nonlocal exc + d = {} + while not finish: + try: + # Old Foo's get gradually replaced and later + # collected by the GC (because of the cyclic ref) + d[random.getrandbits(5)] = {Foo() for i in range(10)} + except Exception as e: + exc = e + d.clear() + + old_interval = sys.getswitchinterval() + old_threshold = gc.get_threshold() + try: + sys.setswitchinterval(1e-6) + gc.set_threshold(5, 5, 5) + threads = [threading.Thread(target=run_finalizers), + threading.Thread(target=make_finalizers)] + with threading_helper.start_threads(threads): + time.sleep(4.0) # Wait a bit to trigger race condition + finish = True + if exc is not None: + raise exc + finally: + sys.setswitchinterval(old_interval) + gc.set_threshold(*old_threshold) + gc.collect() # Collect remaining Foo's + + +# +# Test that from ... import * works for each module +# + +class _TestImportStar(unittest.TestCase): + + def get_module_names(self): + import glob + folder = os.path.dirname(multiprocessing.__file__) + pattern = os.path.join(glob.escape(folder), '*.py') + files = glob.glob(pattern) + modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files] + modules = ['multiprocess.' + m for m in modules] + modules.remove('multiprocess.__init__') + modules.append('multiprocess') + return modules + + def test_import(self): + modules = self.get_module_names() + if sys.platform == 'win32': + modules.remove('multiprocess.popen_fork') + modules.remove('multiprocess.popen_forkserver') + modules.remove('multiprocess.popen_spawn_posix') + else: + modules.remove('multiprocess.popen_spawn_win32') + if not HAS_REDUCTION: + modules.remove('multiprocess.popen_forkserver') + + if c_int is None: + # This module requires _ctypes + modules.remove('multiprocess.sharedctypes') + + for name in modules: + __import__(name) + mod = sys.modules[name] + self.assertTrue(hasattr(mod, '__all__'), name) + + for attr in mod.__all__: + self.assertTrue( + hasattr(mod, attr), + '%r does not have attribute %r' % (mod, attr) + ) + +# +# Quick test that logging works -- does not test logging output +# + +class _TestLogging(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + def test_enable_logging(self): + logger = multiprocessing.get_logger() + logger.setLevel(util.SUBWARNING) + self.assertTrue(logger is not None) + logger.debug('this will not be printed') + logger.info('nor will this') + logger.setLevel(LOG_LEVEL) + + @classmethod + def _test_level(cls, conn): + logger = multiprocessing.get_logger() + conn.send(logger.getEffectiveLevel()) + + def test_level(self): + LEVEL1 = 32 + LEVEL2 = 37 + + logger = multiprocessing.get_logger() + root_logger = logging.getLogger() + root_level = root_logger.level + + reader, writer = multiprocessing.Pipe(duplex=False) + + logger.setLevel(LEVEL1) + p = self.Process(target=self._test_level, args=(writer,)) + p.start() + self.assertEqual(LEVEL1, reader.recv()) + p.join() + p.close() + + logger.setLevel(logging.NOTSET) + root_logger.setLevel(LEVEL2) + p = self.Process(target=self._test_level, args=(writer,)) + p.start() + self.assertEqual(LEVEL2, reader.recv()) + p.join() + p.close() + + root_logger.setLevel(root_level) + logger.setLevel(level=LOG_LEVEL) + + +# class _TestLoggingProcessName(BaseTestCase): +# +# def handle(self, record): +# assert record.processName == multiprocessing.current_process().name +# self.__handled = True +# +# def test_logging(self): +# handler = logging.Handler() +# handler.handle = self.handle +# self.__handled = False +# # Bypass getLogger() and side-effects +# logger = logging.getLoggerClass()( +# 'multiprocessing.test.TestLoggingProcessName') +# logger.addHandler(handler) +# logger.propagate = False +# +# logger.warn('foo') +# assert self.__handled + +# +# Check that Process.join() retries if os.waitpid() fails with EINTR +# + +class _TestPollEintr(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + @classmethod + def _killer(cls, pid): + time.sleep(0.1) + os.kill(pid, signal.SIGUSR1) + + @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') + def test_poll_eintr(self): + got_signal = [False] + def record(*args): + got_signal[0] = True + pid = os.getpid() + oldhandler = signal.signal(signal.SIGUSR1, record) + try: + killer = self.Process(target=self._killer, args=(pid,)) + killer.start() + try: + p = self.Process(target=time.sleep, args=(2,)) + p.start() + p.join() + finally: + killer.join() + self.assertTrue(got_signal[0]) + self.assertEqual(p.exitcode, 0) + finally: + signal.signal(signal.SIGUSR1, oldhandler) + +# +# Test to verify handle verification, see issue 3321 +# + +class TestInvalidHandle(unittest.TestCase): + + @unittest.skipIf(WIN32, "skipped on Windows") + def test_invalid_handles(self): + conn = multiprocessing.connection.Connection(44977608) + # check that poll() doesn't crash + try: + conn.poll() + except (ValueError, OSError): + pass + finally: + # Hack private attribute _handle to avoid printing an error + # in conn.__del__ + conn._handle = None + self.assertRaises((ValueError, OSError), + multiprocessing.connection.Connection, -1) + + + +@hashlib_helper.requires_hashdigest('md5') +class OtherTest(unittest.TestCase): + # TODO: add more tests for deliver/answer challenge. + def test_deliver_challenge_auth_failure(self): + class _FakeConnection(object): + def recv_bytes(self, size): + return b'something bogus' + def send_bytes(self, data): + pass + self.assertRaises(multiprocessing.AuthenticationError, + multiprocessing.connection.deliver_challenge, + _FakeConnection(), b'abc') + + def test_answer_challenge_auth_failure(self): + class _FakeConnection(object): + def __init__(self): + self.count = 0 + def recv_bytes(self, size): + self.count += 1 + if self.count == 1: + return multiprocessing.connection.CHALLENGE + elif self.count == 2: + return b'something bogus' + return b'' + def send_bytes(self, data): + pass + self.assertRaises(multiprocessing.AuthenticationError, + multiprocessing.connection.answer_challenge, + _FakeConnection(), b'abc') + +# +# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585 +# + +def initializer(ns): + ns.test += 1 + +@hashlib_helper.requires_hashdigest('md5') +class TestInitializers(unittest.TestCase): + def setUp(self): + self.mgr = multiprocessing.Manager() + self.ns = self.mgr.Namespace() + self.ns.test = 0 + + def tearDown(self): + self.mgr.shutdown() + self.mgr.join() + + def test_manager_initializer(self): + m = multiprocessing.managers.SyncManager() + self.assertRaises(TypeError, m.start, 1) + m.start(initializer, (self.ns,)) + self.assertEqual(self.ns.test, 1) + m.shutdown() + m.join() + + def test_pool_initializer(self): + self.assertRaises(TypeError, multiprocessing.Pool, initializer=1) + p = multiprocessing.Pool(1, initializer, (self.ns,)) + p.close() + p.join() + self.assertEqual(self.ns.test, 1) + +# +# Issue 5155, 5313, 5331: Test process in processes +# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior +# + +def _this_sub_process(q): + try: + item = q.get(block=False) + except pyqueue.Empty: + pass + +def _test_process(): + queue = multiprocessing.Queue() + subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,)) + subProc.daemon = True + subProc.start() + subProc.join() + +def _afunc(x): + return x*x + +def pool_in_process(): + pool = multiprocessing.Pool(processes=4) + x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7]) + pool.close() + pool.join() + +class _file_like(object): + def __init__(self, delegate): + self._delegate = delegate + self._pid = None + + @property + def cache(self): + pid = os.getpid() + # There are no race conditions since fork keeps only the running thread + if pid != self._pid: + self._pid = pid + self._cache = [] + return self._cache + + def write(self, data): + self.cache.append(data) + + def flush(self): + self._delegate.write(''.join(self.cache)) + self._cache = [] + +class TestStdinBadfiledescriptor(unittest.TestCase): + + def test_queue_in_process(self): + proc = multiprocessing.Process(target=_test_process) + proc.start() + proc.join() + + def test_pool_in_process(self): + p = multiprocessing.Process(target=pool_in_process) + p.start() + p.join() + + def test_flushing(self): + sio = io.StringIO() + flike = _file_like(sio) + flike.write('foo') + proc = multiprocessing.Process(target=lambda: flike.flush()) + flike.flush() + assert sio.getvalue() == 'foo' + + +class TestWait(unittest.TestCase): + + @classmethod + def _child_test_wait(cls, w, slow): + for i in range(10): + if slow: + time.sleep(random.random()*0.1) + w.send((i, os.getpid())) + w.close() + + def test_wait(self, slow=False): + from multiprocess.connection import wait + readers = [] + procs = [] + messages = [] + + for i in range(4): + r, w = multiprocessing.Pipe(duplex=False) + p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow)) + p.daemon = True + p.start() + w.close() + readers.append(r) + procs.append(p) + self.addCleanup(p.join) + + while readers: + for r in wait(readers): + try: + msg = r.recv() + except EOFError: + readers.remove(r) + r.close() + else: + messages.append(msg) + + messages.sort() + expected = sorted((i, p.pid) for i in range(10) for p in procs) + self.assertEqual(messages, expected) + + @classmethod + def _child_test_wait_socket(cls, address, slow): + s = socket.socket() + s.connect(address) + for i in range(10): + if slow: + time.sleep(random.random()*0.1) + s.sendall(('%s\n' % i).encode('ascii')) + s.close() + + def test_wait_socket(self, slow=False): + from multiprocess.connection import wait + l = socket.create_server((socket_helper.HOST, 0)) + addr = l.getsockname() + readers = [] + procs = [] + dic = {} + + for i in range(4): + p = multiprocessing.Process(target=self._child_test_wait_socket, + args=(addr, slow)) + p.daemon = True + p.start() + procs.append(p) + self.addCleanup(p.join) + + for i in range(4): + r, _ = l.accept() + readers.append(r) + dic[r] = [] + l.close() + + while readers: + for r in wait(readers): + msg = r.recv(32) + if not msg: + readers.remove(r) + r.close() + else: + dic[r].append(msg) + + expected = ''.join('%s\n' % i for i in range(10)).encode('ascii') + for v in dic.values(): + self.assertEqual(b''.join(v), expected) + + def test_wait_slow(self): + self.test_wait(True) + + def test_wait_socket_slow(self): + self.test_wait_socket(True) + + def test_wait_timeout(self): + from multiprocess.connection import wait + + expected = 5 + a, b = multiprocessing.Pipe() + + start = time.monotonic() + res = wait([a, b], expected) + delta = time.monotonic() - start + + self.assertEqual(res, []) + self.assertLess(delta, expected * 2) + self.assertGreater(delta, expected * 0.5) + + b.send(None) + + start = time.monotonic() + res = wait([a, b], 20) + delta = time.monotonic() - start + + self.assertEqual(res, [a]) + self.assertLess(delta, 0.4) + + @classmethod + def signal_and_sleep(cls, sem, period): + sem.release() + time.sleep(period) + + def test_wait_integer(self): + from multiprocess.connection import wait + + expected = 3 + sorted_ = lambda l: sorted(l, key=lambda x: id(x)) + sem = multiprocessing.Semaphore(0) + a, b = multiprocessing.Pipe() + p = multiprocessing.Process(target=self.signal_and_sleep, + args=(sem, expected)) + + p.start() + self.assertIsInstance(p.sentinel, int) + self.assertTrue(sem.acquire(timeout=20)) + + start = time.monotonic() + res = wait([a, p.sentinel, b], expected + 20) + delta = time.monotonic() - start + + self.assertEqual(res, [p.sentinel]) + self.assertLess(delta, expected + 2) + self.assertGreater(delta, expected - 2) + + a.send(None) + + start = time.monotonic() + res = wait([a, p.sentinel, b], 20) + delta = time.monotonic() - start + + self.assertEqual(sorted_(res), sorted_([p.sentinel, b])) + self.assertLess(delta, 0.4) + + b.send(None) + + start = time.monotonic() + res = wait([a, p.sentinel, b], 20) + delta = time.monotonic() - start + + self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b])) + self.assertLess(delta, 0.4) + + p.terminate() + p.join() + + def test_neg_timeout(self): + from multiprocess.connection import wait + a, b = multiprocessing.Pipe() + t = time.monotonic() + res = wait([a], timeout=-1) + t = time.monotonic() - t + self.assertEqual(res, []) + self.assertLess(t, 1) + a.close() + b.close() + +# +# Issue 14151: Test invalid family on invalid environment +# + +class TestInvalidFamily(unittest.TestCase): + + @unittest.skipIf(WIN32, "skipped on Windows") + def test_invalid_family(self): + with self.assertRaises(ValueError): + multiprocessing.connection.Listener(r'\\.\test') + + @unittest.skipUnless(WIN32, "skipped on non-Windows platforms") + def test_invalid_family_win32(self): + with self.assertRaises(ValueError): + multiprocessing.connection.Listener('/var/test.pipe') + +# +# Issue 12098: check sys.flags of child matches that for parent +# + +class TestFlags(unittest.TestCase): + @classmethod + def run_in_grandchild(cls, conn): + conn.send(tuple(sys.flags)) + + @classmethod + def run_in_child(cls): + import json + r, w = multiprocessing.Pipe(duplex=False) + p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,)) + p.start() + grandchild_flags = r.recv() + p.join() + r.close() + w.close() + flags = (tuple(sys.flags), grandchild_flags) + print(json.dumps(flags)) + + def _test_flags(self): + import json + # start child process using unusual flags + prog = ('from multiprocess.tests import TestFlags; ' + + 'TestFlags.run_in_child()') + data = subprocess.check_output( + [sys.executable, '-E', '-S', '-O', '-c', prog]) + child_flags, grandchild_flags = json.loads(data.decode('ascii')) + self.assertEqual(child_flags, grandchild_flags) + +# +# Test interaction with socket timeouts - see Issue #6056 +# + +class TestTimeouts(unittest.TestCase): + @classmethod + def _test_timeout(cls, child, address): + time.sleep(1) + child.send(123) + child.close() + conn = multiprocessing.connection.Client(address) + conn.send(456) + conn.close() + + def test_timeout(self): + old_timeout = socket.getdefaulttimeout() + try: + socket.setdefaulttimeout(0.1) + parent, child = multiprocessing.Pipe(duplex=True) + l = multiprocessing.connection.Listener(family='AF_INET') + p = multiprocessing.Process(target=self._test_timeout, + args=(child, l.address)) + p.start() + child.close() + self.assertEqual(parent.recv(), 123) + parent.close() + conn = l.accept() + self.assertEqual(conn.recv(), 456) + conn.close() + l.close() + join_process(p) + finally: + socket.setdefaulttimeout(old_timeout) + +# +# Test what happens with no "if __name__ == '__main__'" +# + +class TestNoForkBomb(unittest.TestCase): + def test_noforkbomb(self): + sm = multiprocessing.get_start_method() + name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py') + if sm != 'fork': + rc, out, err = test.support.script_helper.assert_python_failure(name, sm) + self.assertEqual(out, b'') + self.assertIn(b'RuntimeError', err) + else: + rc, out, err = test.support.script_helper.assert_python_ok(name, sm, **ENV) + self.assertEqual(out.rstrip(), b'123') + self.assertEqual(err, b'') + +# +# Issue #17555: ForkAwareThreadLock +# + +class TestForkAwareThreadLock(unittest.TestCase): + # We recursively start processes. Issue #17555 meant that the + # after fork registry would get duplicate entries for the same + # lock. The size of the registry at generation n was ~2**n. + + @classmethod + def child(cls, n, conn): + if n > 1: + p = multiprocessing.Process(target=cls.child, args=(n-1, conn)) + p.start() + conn.close() + join_process(p) + else: + conn.send(len(util._afterfork_registry)) + conn.close() + + def test_lock(self): + r, w = multiprocessing.Pipe(False) + l = util.ForkAwareThreadLock() + old_size = len(util._afterfork_registry) + p = multiprocessing.Process(target=self.child, args=(5, w)) + p.start() + w.close() + new_size = r.recv() + join_process(p) + self.assertLessEqual(new_size, old_size) + +# +# Check that non-forked child processes do not inherit unneeded fds/handles +# + +class TestCloseFds(unittest.TestCase): + + def get_high_socket_fd(self): + if WIN32: + # The child process will not have any socket handles, so + # calling socket.fromfd() should produce WSAENOTSOCK even + # if there is a handle of the same number. + return socket.socket().detach() + else: + # We want to produce a socket with an fd high enough that a + # freshly created child process will not have any fds as high. + fd = socket.socket().detach() + to_close = [] + while fd < 50: + to_close.append(fd) + fd = os.dup(fd) + for x in to_close: + os.close(x) + return fd + + def close(self, fd): + if WIN32: + socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close() + else: + os.close(fd) + + @classmethod + def _test_closefds(cls, conn, fd): + try: + s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM) + except Exception as e: + conn.send(e) + else: + s.close() + conn.send(None) + + def test_closefd(self): + if not HAS_REDUCTION: + raise unittest.SkipTest('requires fd pickling') + + reader, writer = multiprocessing.Pipe() + fd = self.get_high_socket_fd() + try: + p = multiprocessing.Process(target=self._test_closefds, + args=(writer, fd)) + p.start() + writer.close() + e = reader.recv() + join_process(p) + finally: + self.close(fd) + writer.close() + reader.close() + + if multiprocessing.get_start_method() == 'fork': + self.assertIs(e, None) + else: + WSAENOTSOCK = 10038 + self.assertIsInstance(e, OSError) + self.assertTrue(e.errno == errno.EBADF or + e.winerror == WSAENOTSOCK, e) + +# +# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc +# + +class TestIgnoreEINTR(unittest.TestCase): + + # Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block + CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE) + + @classmethod + def _test_ignore(cls, conn): + def handler(signum, frame): + pass + signal.signal(signal.SIGUSR1, handler) + conn.send('ready') + x = conn.recv() + conn.send(x) + conn.send_bytes(b'x' * cls.CONN_MAX_SIZE) + + @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') + def test_ignore(self): + conn, child_conn = multiprocessing.Pipe() + try: + p = multiprocessing.Process(target=self._test_ignore, + args=(child_conn,)) + p.daemon = True + p.start() + child_conn.close() + self.assertEqual(conn.recv(), 'ready') + time.sleep(0.1) + os.kill(p.pid, signal.SIGUSR1) + time.sleep(0.1) + conn.send(1234) + self.assertEqual(conn.recv(), 1234) + time.sleep(0.1) + os.kill(p.pid, signal.SIGUSR1) + self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE) + time.sleep(0.1) + p.join() + finally: + conn.close() + + @classmethod + def _test_ignore_listener(cls, conn): + def handler(signum, frame): + pass + signal.signal(signal.SIGUSR1, handler) + with multiprocessing.connection.Listener() as l: + conn.send(l.address) + a = l.accept() + a.send('welcome') + + @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') + def test_ignore_listener(self): + conn, child_conn = multiprocessing.Pipe() + try: + p = multiprocessing.Process(target=self._test_ignore_listener, + args=(child_conn,)) + p.daemon = True + p.start() + child_conn.close() + address = conn.recv() + time.sleep(0.1) + os.kill(p.pid, signal.SIGUSR1) + time.sleep(0.1) + client = multiprocessing.connection.Client(address) + self.assertEqual(client.recv(), 'welcome') + p.join() + finally: + conn.close() + +class TestStartMethod(unittest.TestCase): + @classmethod + def _check_context(cls, conn): + conn.send(multiprocessing.get_start_method()) + + def check_context(self, ctx): + r, w = ctx.Pipe(duplex=False) + p = ctx.Process(target=self._check_context, args=(w,)) + p.start() + w.close() + child_method = r.recv() + r.close() + p.join() + self.assertEqual(child_method, ctx.get_start_method()) + + def test_context(self): + for method in ('fork', 'spawn', 'forkserver'): + try: + ctx = multiprocessing.get_context(method) + except ValueError: + continue + self.assertEqual(ctx.get_start_method(), method) + self.assertIs(ctx.get_context(), ctx) + self.assertRaises(ValueError, ctx.set_start_method, 'spawn') + self.assertRaises(ValueError, ctx.set_start_method, None) + self.check_context(ctx) + + def test_set_get(self): + multiprocessing.set_forkserver_preload(PRELOAD) + count = 0 + old_method = multiprocessing.get_start_method() + try: + for method in ('fork', 'spawn', 'forkserver'): + try: + multiprocessing.set_start_method(method, force=True) + except ValueError: + continue + self.assertEqual(multiprocessing.get_start_method(), method) + ctx = multiprocessing.get_context() + self.assertEqual(ctx.get_start_method(), method) + self.assertTrue(type(ctx).__name__.lower().startswith(method)) + self.assertTrue( + ctx.Process.__name__.lower().startswith(method)) + self.check_context(multiprocessing) + count += 1 + finally: + multiprocessing.set_start_method(old_method, force=True) + self.assertGreaterEqual(count, 1) + + def test_get_all(self): + methods = multiprocessing.get_all_start_methods() + if sys.platform == 'win32': + self.assertEqual(methods, ['spawn']) + else: + self.assertTrue(methods == ['fork', 'spawn'] or + methods == ['spawn', 'fork'] or + methods == ['fork', 'spawn', 'forkserver'] or + methods == ['spawn', 'fork', 'forkserver']) + + def test_preload_resources(self): + if multiprocessing.get_start_method() != 'forkserver': + self.skipTest("test only relevant for 'forkserver' method") + name = os.path.join(os.path.dirname(__file__), 'mp_preload.py') + rc, out, err = test.support.script_helper.assert_python_ok(name, **ENV) + out = out.decode() + err = err.decode() + if out.rstrip() != 'ok' or err != '': + print(out) + print(err) + self.fail("failed spawning forkserver or grandchild") + + +@unittest.skipIf(sys.platform == "win32", + "test semantics don't make sense on Windows") +class TestResourceTracker(unittest.TestCase): + + def _test_resource_tracker(self): + # + # Check that killing process does not leak named semaphores + # + cmd = '''if 1: + import time, os, tempfile + import multiprocess as mp + from multiprocess import resource_tracker + from multiprocess.shared_memory import SharedMemory + + mp.set_start_method("spawn") + rand = tempfile._RandomNameSequence() + + + def create_and_register_resource(rtype): + if rtype == "semaphore": + lock = mp.Lock() + return lock, lock._semlock.name + elif rtype == "shared_memory": + sm = SharedMemory(create=True, size=10) + return sm, sm._name + else: + raise ValueError( + "Resource type {{}} not understood".format(rtype)) + + + resource1, rname1 = create_and_register_resource("{rtype}") + resource2, rname2 = create_and_register_resource("{rtype}") + + os.write({w}, rname1.encode("ascii") + b"\\n") + os.write({w}, rname2.encode("ascii") + b"\\n") + + time.sleep(10) + ''' + for rtype in resource_tracker._CLEANUP_FUNCS: + with self.subTest(rtype=rtype): + if rtype == "noop": + # Artefact resource type used by the resource_tracker + continue + r, w = os.pipe() + p = subprocess.Popen([sys.executable, + '-E', '-c', cmd.format(w=w, rtype=rtype)], + pass_fds=[w], + stderr=subprocess.PIPE) + os.close(w) + with open(r, 'rb', closefd=True) as f: + name1 = f.readline().rstrip().decode('ascii') + name2 = f.readline().rstrip().decode('ascii') + _resource_unlink(name1, rtype) + p.terminate() + p.wait() + + deadline = time.monotonic() + support.LONG_TIMEOUT + while time.monotonic() < deadline: + time.sleep(.5) + try: + _resource_unlink(name2, rtype) + except OSError as e: + # docs say it should be ENOENT, but OSX seems to give + # EINVAL + self.assertIn(e.errno, (errno.ENOENT, errno.EINVAL)) + break + else: + raise AssertionError( + f"A {rtype} resource was leaked after a process was " + f"abruptly terminated.") + err = p.stderr.read().decode('utf-8') + p.stderr.close() + expected = ('resource_tracker: There appear to be 2 leaked {} ' + 'objects'.format( + rtype)) + self.assertRegex(err, expected) + self.assertRegex(err, r'resource_tracker: %r: \[Errno' % name1) + + def check_resource_tracker_death(self, signum, should_die): + # bpo-31310: if the semaphore tracker process has died, it should + # be restarted implicitly. + from multiprocess.resource_tracker import _resource_tracker + pid = _resource_tracker._pid + if pid is not None: + os.kill(pid, signal.SIGKILL) + support.wait_process(pid, exitcode=-signal.SIGKILL) + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + _resource_tracker.ensure_running() + pid = _resource_tracker._pid + + os.kill(pid, signum) + time.sleep(1.0) # give it time to die + + ctx = multiprocessing.get_context("spawn") + with warnings.catch_warnings(record=True) as all_warn: + warnings.simplefilter("always") + sem = ctx.Semaphore() + sem.acquire() + sem.release() + wr = weakref.ref(sem) + # ensure `sem` gets collected, which triggers communication with + # the semaphore tracker + del sem + gc.collect() + self.assertIsNone(wr()) + if should_die: + self.assertEqual(len(all_warn), 1) + the_warn = all_warn[0] + self.assertTrue(issubclass(the_warn.category, UserWarning)) + self.assertTrue("resource_tracker: process died" + in str(the_warn.message)) + else: + self.assertEqual(len(all_warn), 0) + + def test_resource_tracker_sigint(self): + # Catchable signal (ignored by semaphore tracker) + self.check_resource_tracker_death(signal.SIGINT, False) + + def test_resource_tracker_sigterm(self): + # Catchable signal (ignored by semaphore tracker) + self.check_resource_tracker_death(signal.SIGTERM, False) + + def test_resource_tracker_sigkill(self): + # Uncatchable signal. + self.check_resource_tracker_death(signal.SIGKILL, True) + + @staticmethod + def _is_resource_tracker_reused(conn, pid): + from multiprocess.resource_tracker import _resource_tracker + _resource_tracker.ensure_running() + # The pid should be None in the child process, expect for the fork + # context. It should not be a new value. + reused = _resource_tracker._pid in (None, pid) + reused &= _resource_tracker._check_alive() + conn.send(reused) + + def test_resource_tracker_reused(self): + from multiprocess.resource_tracker import _resource_tracker + _resource_tracker.ensure_running() + pid = _resource_tracker._pid + + r, w = multiprocessing.Pipe(duplex=False) + p = multiprocessing.Process(target=self._is_resource_tracker_reused, + args=(w, pid)) + p.start() + is_resource_tracker_reused = r.recv() + + # Clean up + p.join() + w.close() + r.close() + + self.assertTrue(is_resource_tracker_reused) + + def test_too_long_name_resource(self): + # gh-96819: Resource names that will make the length of a write to a pipe + # greater than PIPE_BUF are not allowed + rtype = "shared_memory" + too_long_name_resource = "a" * (512 - len(rtype)) + with self.assertRaises(ValueError): + resource_tracker.register(too_long_name_resource, rtype) + + +class TestSimpleQueue(unittest.TestCase): + + @classmethod + def _test_empty(cls, queue, child_can_start, parent_can_continue): + child_can_start.wait() + # issue 30301, could fail under spawn and forkserver + try: + queue.put(queue.empty()) + queue.put(queue.empty()) + finally: + parent_can_continue.set() + + def test_empty(self): + queue = multiprocessing.SimpleQueue() + child_can_start = multiprocessing.Event() + parent_can_continue = multiprocessing.Event() + + proc = multiprocessing.Process( + target=self._test_empty, + args=(queue, child_can_start, parent_can_continue) + ) + proc.daemon = True + proc.start() + + self.assertTrue(queue.empty()) + + child_can_start.set() + parent_can_continue.wait() + + self.assertFalse(queue.empty()) + self.assertEqual(queue.get(), True) + self.assertEqual(queue.get(), False) + self.assertTrue(queue.empty()) + + proc.join() + + def test_close(self): + queue = multiprocessing.SimpleQueue() + queue.close() + # closing a queue twice should not fail + queue.close() + + # Test specific to CPython since it tests private attributes + @test.support.cpython_only + def test_closed(self): + queue = multiprocessing.SimpleQueue() + queue.close() + self.assertTrue(queue._reader.closed) + self.assertTrue(queue._writer.closed) + + +class TestPoolNotLeakOnFailure(unittest.TestCase): + + def test_release_unused_processes(self): + # Issue #19675: During pool creation, if we can't create a process, + # don't leak already created ones. + will_fail_in = 3 + forked_processes = [] + + class FailingForkProcess: + def __init__(self, **kwargs): + self.name = 'Fake Process' + self.exitcode = None + self.state = None + forked_processes.append(self) + + def start(self): + nonlocal will_fail_in + if will_fail_in <= 0: + raise OSError("Manually induced OSError") + will_fail_in -= 1 + self.state = 'started' + + def terminate(self): + self.state = 'stopping' + + def join(self): + if self.state == 'stopping': + self.state = 'stopped' + + def is_alive(self): + return self.state == 'started' or self.state == 'stopping' + + with self.assertRaisesRegex(OSError, 'Manually induced OSError'): + p = multiprocessing.pool.Pool(5, context=unittest.mock.MagicMock( + Process=FailingForkProcess)) + p.close() + p.join() + self.assertFalse( + any(process.is_alive() for process in forked_processes)) + + +@hashlib_helper.requires_hashdigest('md5') +class TestSyncManagerTypes(unittest.TestCase): + """Test all the types which can be shared between a parent and a + child process by using a manager which acts as an intermediary + between them. + + In the following unit-tests the base type is created in the parent + process, the @classmethod represents the worker process and the + shared object is readable and editable between the two. + + # The child. + @classmethod + def _test_list(cls, obj): + assert obj[0] == 5 + assert obj.append(6) + + # The parent. + def test_list(self): + o = self.manager.list() + o.append(5) + self.run_worker(self._test_list, o) + assert o[1] == 6 + """ + manager_class = multiprocessing.managers.SyncManager + + def setUp(self): + self.manager = self.manager_class() + self.manager.start() + self.proc = None + + def tearDown(self): + if self.proc is not None and self.proc.is_alive(): + self.proc.terminate() + self.proc.join() + self.manager.shutdown() + self.manager = None + self.proc = None + + @classmethod + def setUpClass(cls): + support.reap_children() + + tearDownClass = setUpClass + + def wait_proc_exit(self): + # Only the manager process should be returned by active_children() + # but this can take a bit on slow machines, so wait a few seconds + # if there are other children too (see #17395). + join_process(self.proc) + start_time = time.monotonic() + t = 0.01 + while len(multiprocessing.active_children()) > 1: + time.sleep(t) + t *= 2 + dt = time.monotonic() - start_time + if dt >= 5.0: + test.support.environment_altered = True + support.print_warning(f"multiprocess.Manager still has " + f"{multiprocessing.active_children()} " + f"active children after {dt} seconds") + break + + def run_worker(self, worker, obj): + self.proc = multiprocessing.Process(target=worker, args=(obj, )) + self.proc.daemon = True + self.proc.start() + self.wait_proc_exit() + self.assertEqual(self.proc.exitcode, 0) + + @classmethod + def _test_event(cls, obj): + assert obj.is_set() + obj.wait() + obj.clear() + obj.wait(0.001) + + def test_event(self): + o = self.manager.Event() + o.set() + self.run_worker(self._test_event, o) + assert not o.is_set() + o.wait(0.001) + + @classmethod + def _test_lock(cls, obj): + obj.acquire() + + def test_lock(self, lname="Lock"): + o = getattr(self.manager, lname)() + self.run_worker(self._test_lock, o) + o.release() + self.assertRaises(RuntimeError, o.release) # already released + + @classmethod + def _test_rlock(cls, obj): + obj.acquire() + obj.release() + + def test_rlock(self, lname="Lock"): + o = getattr(self.manager, lname)() + self.run_worker(self._test_rlock, o) + + @classmethod + def _test_semaphore(cls, obj): + obj.acquire() + + def test_semaphore(self, sname="Semaphore"): + o = getattr(self.manager, sname)() + self.run_worker(self._test_semaphore, o) + o.release() + + def test_bounded_semaphore(self): + self.test_semaphore(sname="BoundedSemaphore") + + @classmethod + def _test_condition(cls, obj): + obj.acquire() + obj.release() + + def test_condition(self): + o = self.manager.Condition() + self.run_worker(self._test_condition, o) + + @classmethod + def _test_barrier(cls, obj): + assert obj.parties == 5 + obj.reset() + + def test_barrier(self): + o = self.manager.Barrier(5) + self.run_worker(self._test_barrier, o) + + @classmethod + def _test_pool(cls, obj): + # TODO: fix https://bugs.python.org/issue35919 + with obj: + pass + + def test_pool(self): + o = self.manager.Pool(processes=4) + self.run_worker(self._test_pool, o) + + @classmethod + def _test_queue(cls, obj): + assert obj.qsize() == 2 + assert obj.full() + assert not obj.empty() + assert obj.get() == 5 + assert not obj.empty() + assert obj.get() == 6 + assert obj.empty() + + def test_queue(self, qname="Queue"): + o = getattr(self.manager, qname)(2) + o.put(5) + o.put(6) + self.run_worker(self._test_queue, o) + assert o.empty() + assert not o.full() + + def test_joinable_queue(self): + self.test_queue("JoinableQueue") + + @classmethod + def _test_list(cls, obj): + assert obj[0] == 5 + assert obj.count(5) == 1 + assert obj.index(5) == 0 + obj.sort() + obj.reverse() + for x in obj: + pass + assert len(obj) == 1 + assert obj.pop(0) == 5 + + def test_list(self): + o = self.manager.list() + o.append(5) + self.run_worker(self._test_list, o) + assert not o + self.assertEqual(len(o), 0) + + @classmethod + def _test_dict(cls, obj): + assert len(obj) == 1 + assert obj['foo'] == 5 + assert obj.get('foo') == 5 + assert list(obj.items()) == [('foo', 5)] + assert list(obj.keys()) == ['foo'] + assert list(obj.values()) == [5] + assert obj.copy() == {'foo': 5} + assert obj.popitem() == ('foo', 5) + + def test_dict(self): + o = self.manager.dict() + o['foo'] = 5 + self.run_worker(self._test_dict, o) + assert not o + self.assertEqual(len(o), 0) + + @classmethod + def _test_value(cls, obj): + assert obj.value == 1 + assert obj.get() == 1 + obj.set(2) + + def test_value(self): + o = self.manager.Value('i', 1) + self.run_worker(self._test_value, o) + self.assertEqual(o.value, 2) + self.assertEqual(o.get(), 2) + + @classmethod + def _test_array(cls, obj): + assert obj[0] == 0 + assert obj[1] == 1 + assert len(obj) == 2 + assert list(obj) == [0, 1] + + def test_array(self): + o = self.manager.Array('i', [0, 1]) + self.run_worker(self._test_array, o) + + @classmethod + def _test_namespace(cls, obj): + assert obj.x == 0 + assert obj.y == 1 + + def test_namespace(self): + o = self.manager.Namespace() + o.x = 0 + o.y = 1 + self.run_worker(self._test_namespace, o) + + +class TestNamedResource(unittest.TestCase): + @unittest.skipIf(sys.hexversion <= 0x30a05f0, "SemLock subclass") + def test_global_named_resource_spawn(self): + # + # gh-90549: Check that global named resources in main module + # will not leak by a subprocess, in spawn context. + # + testfn = os_helper.TESTFN + self.addCleanup(os_helper.unlink, testfn) + with open(testfn, 'w', encoding='utf-8') as f: + f.write(textwrap.dedent('''\ + import multiprocess as mp + + ctx = mp.get_context('spawn') + + global_resource = ctx.Semaphore() + + def submain(): pass + + if __name__ == '__main__': + p = ctx.Process(target=submain) + p.start() + p.join() + ''')) + rc, out, err = test.support.script_helper.assert_python_ok(testfn, **ENV) + # on error, err = 'UserWarning: resource_tracker: There appear to + # be 1 leaked semaphore objects to clean up at shutdown' + self.assertEqual(err, b'') + + +class MiscTestCase(unittest.TestCase): + def test__all__(self): + # Just make sure names in not_exported are excluded + support.check__all__(self, multiprocessing, extra=multiprocessing.__all__, + not_exported=['SUBDEBUG', 'SUBWARNING', + 'license', 'citation']) + + +# +# Mixins +# + +class BaseMixin(object): + @classmethod + def setUpClass(cls): + cls.dangling = (multiprocessing.process._dangling.copy(), + threading._dangling.copy()) + + @classmethod + def tearDownClass(cls): + # bpo-26762: Some multiprocessing objects like Pool create reference + # cycles. Trigger a garbage collection to break these cycles. + test.support.gc_collect() + + processes = set(multiprocessing.process._dangling) - set(cls.dangling[0]) + if processes: + test.support.environment_altered = True + support.print_warning(f'Dangling processes: {processes}') + processes = None + + threads = set(threading._dangling) - set(cls.dangling[1]) + if threads: + test.support.environment_altered = True + support.print_warning(f'Dangling threads: {threads}') + threads = None + + +class ProcessesMixin(BaseMixin): + TYPE = 'processes' + Process = multiprocessing.Process + connection = multiprocessing.connection + current_process = staticmethod(multiprocessing.current_process) + parent_process = staticmethod(multiprocessing.parent_process) + active_children = staticmethod(multiprocessing.active_children) + Pool = staticmethod(multiprocessing.Pool) + Pipe = staticmethod(multiprocessing.Pipe) + Queue = staticmethod(multiprocessing.Queue) + JoinableQueue = staticmethod(multiprocessing.JoinableQueue) + Lock = staticmethod(multiprocessing.Lock) + RLock = staticmethod(multiprocessing.RLock) + Semaphore = staticmethod(multiprocessing.Semaphore) + BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore) + Condition = staticmethod(multiprocessing.Condition) + Event = staticmethod(multiprocessing.Event) + Barrier = staticmethod(multiprocessing.Barrier) + Value = staticmethod(multiprocessing.Value) + Array = staticmethod(multiprocessing.Array) + RawValue = staticmethod(multiprocessing.RawValue) + RawArray = staticmethod(multiprocessing.RawArray) + + +class ManagerMixin(BaseMixin): + TYPE = 'manager' + Process = multiprocessing.Process + Queue = property(operator.attrgetter('manager.Queue')) + JoinableQueue = property(operator.attrgetter('manager.JoinableQueue')) + Lock = property(operator.attrgetter('manager.Lock')) + RLock = property(operator.attrgetter('manager.RLock')) + Semaphore = property(operator.attrgetter('manager.Semaphore')) + BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore')) + Condition = property(operator.attrgetter('manager.Condition')) + Event = property(operator.attrgetter('manager.Event')) + Barrier = property(operator.attrgetter('manager.Barrier')) + Value = property(operator.attrgetter('manager.Value')) + Array = property(operator.attrgetter('manager.Array')) + list = property(operator.attrgetter('manager.list')) + dict = property(operator.attrgetter('manager.dict')) + Namespace = property(operator.attrgetter('manager.Namespace')) + + @classmethod + def Pool(cls, *args, **kwds): + return cls.manager.Pool(*args, **kwds) + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.manager = multiprocessing.Manager() + + @classmethod + def tearDownClass(cls): + # only the manager process should be returned by active_children() + # but this can take a bit on slow machines, so wait a few seconds + # if there are other children too (see #17395) + start_time = time.monotonic() + t = 0.01 + while len(multiprocessing.active_children()) > 1: + time.sleep(t) + t *= 2 + dt = time.monotonic() - start_time + if dt >= 5.0: + test.support.environment_altered = True + support.print_warning(f"multiprocess.Manager still has " + f"{multiprocessing.active_children()} " + f"active children after {dt} seconds") + break + + gc.collect() # do garbage collection + if cls.manager._number_of_objects() != 0: + # This is not really an error since some tests do not + # ensure that all processes which hold a reference to a + # managed object have been joined. + test.support.environment_altered = True + support.print_warning('Shared objects which still exist ' + 'at manager shutdown:') + support.print_warning(cls.manager._debug_info()) + cls.manager.shutdown() + cls.manager.join() + cls.manager = None + + super().tearDownClass() + + +class ThreadsMixin(BaseMixin): + TYPE = 'threads' + Process = multiprocessing.dummy.Process + connection = multiprocessing.dummy.connection + current_process = staticmethod(multiprocessing.dummy.current_process) + active_children = staticmethod(multiprocessing.dummy.active_children) + Pool = staticmethod(multiprocessing.dummy.Pool) + Pipe = staticmethod(multiprocessing.dummy.Pipe) + Queue = staticmethod(multiprocessing.dummy.Queue) + JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue) + Lock = staticmethod(multiprocessing.dummy.Lock) + RLock = staticmethod(multiprocessing.dummy.RLock) + Semaphore = staticmethod(multiprocessing.dummy.Semaphore) + BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore) + Condition = staticmethod(multiprocessing.dummy.Condition) + Event = staticmethod(multiprocessing.dummy.Event) + Barrier = staticmethod(multiprocessing.dummy.Barrier) + Value = staticmethod(multiprocessing.dummy.Value) + Array = staticmethod(multiprocessing.dummy.Array) + +# +# Functions used to create test cases from the base ones in this module +# + +def install_tests_in_module_dict(remote_globs, start_method): + __module__ = remote_globs['__name__'] + local_globs = globals() + ALL_TYPES = {'processes', 'threads', 'manager'} + + for name, base in local_globs.items(): + if not isinstance(base, type): + continue + if issubclass(base, BaseTestCase): + if base is BaseTestCase: + continue + assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES + for type_ in base.ALLOWED_TYPES: + newname = 'With' + type_.capitalize() + name[1:] + Mixin = local_globs[type_.capitalize() + 'Mixin'] + class Temp(base, Mixin, unittest.TestCase): + pass + if type_ == 'manager': + Temp = hashlib_helper.requires_hashdigest('md5')(Temp) + Temp.__name__ = Temp.__qualname__ = newname + Temp.__module__ = __module__ + remote_globs[newname] = Temp + elif issubclass(base, unittest.TestCase): + class Temp(base, object): + pass + Temp.__name__ = Temp.__qualname__ = name + Temp.__module__ = __module__ + remote_globs[name] = Temp + + dangling = [None, None] + old_start_method = [None] + + def setUpModule(): + multiprocessing.set_forkserver_preload(PRELOAD) + multiprocessing.process._cleanup() + dangling[0] = multiprocessing.process._dangling.copy() + dangling[1] = threading._dangling.copy() + old_start_method[0] = multiprocessing.get_start_method(allow_none=True) + try: + multiprocessing.set_start_method(start_method, force=True) + except ValueError: + raise unittest.SkipTest(start_method + + ' start method not supported') + + if sys.platform.startswith("linux"): + try: + lock = multiprocessing.RLock() + except OSError: + raise unittest.SkipTest("OSError raises on RLock creation, " + "see issue 3111!") + check_enough_semaphores() + util.get_temp_dir() # creates temp directory + multiprocessing.get_logger().setLevel(LOG_LEVEL) + + def tearDownModule(): + need_sleep = False + + # bpo-26762: Some multiprocessing objects like Pool create reference + # cycles. Trigger a garbage collection to break these cycles. + test.support.gc_collect() + + multiprocessing.set_start_method(old_start_method[0], force=True) + # pause a bit so we don't get warning about dangling threads/processes + processes = set(multiprocessing.process._dangling) - set(dangling[0]) + if processes: + need_sleep = True + test.support.environment_altered = True + support.print_warning(f'Dangling processes: {processes}') + processes = None + + threads = set(threading._dangling) - set(dangling[1]) + if threads: + need_sleep = True + test.support.environment_altered = True + support.print_warning(f'Dangling threads: {threads}') + threads = None + + # Sleep 500 ms to give time to child processes to complete. + if need_sleep: + time.sleep(0.5) + + multiprocessing.util._cleanup_tests() + + remote_globs['setUpModule'] = setUpModule + remote_globs['tearDownModule'] = tearDownModule + + +@unittest.skipIf(not hasattr(_multiprocessing, 'SemLock'), 'SemLock not available') +@unittest.skipIf(sys.platform != "linux", "Linux only") +@unittest.skipIf(sys.hexversion <= 0x30a05f0, "SemLock subclass") +class SemLockTests(unittest.TestCase): + + def test_semlock_subclass(self): + class SemLock(_multiprocessing.SemLock): + pass + name = f'test_semlock_subclass-{os.getpid()}' + s = SemLock(1, 0, 10, name, False) + _multiprocessing.sem_unlink(name) diff --git a/venv/lib/python3.10/site-packages/multiprocess/tests/__main__.py b/venv/lib/python3.10/site-packages/multiprocess/tests/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..f0de127e06a38f895225e79aa06678dc8e58e731 --- /dev/null +++ b/venv/lib/python3.10/site-packages/multiprocess/tests/__main__.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2018-2024 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE + +import glob +import os +import sys +import subprocess as sp +python = sys.executable +try: + import pox + python = pox.which_python(version=True) or python +except ImportError: + pass +shell = sys.platform[:3] == 'win' + +suite = os.path.dirname(__file__) or os.path.curdir +tests = glob.glob(suite + os.path.sep + 'test_*.py') +tests = glob.glob(suite + os.path.sep + '__init__.py') + \ + [i for i in tests if 'main' not in i] + + +if __name__ == '__main__': + + failed = 0 + for test in tests: + p = sp.Popen([python, test], shell=shell).wait() + if p: + failed = 1 + print('') + exit(failed) diff --git a/venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50ec0bb650b8c4c7d09d9d3e00c5489a06347ea3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/__main__.cpython-310.pyc b/venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67447658cff65e22004f5095475f6cea606949a4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/__main__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/mp_fork_bomb.cpython-310.pyc b/venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/mp_fork_bomb.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63a737ca923910232f82fe36976eb7fc8ca3662d Binary files /dev/null and b/venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/mp_fork_bomb.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/mp_preload.cpython-310.pyc b/venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/mp_preload.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2808c26ada9785bff35172012e56ace4ac877e6b Binary files /dev/null and b/venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/mp_preload.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_fork.cpython-310.pyc b/venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_fork.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc9e1d70a2d57c66ae9f7b2c7d2cb1cb0c9d38a7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_fork.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_forkserver.cpython-310.pyc b/venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_forkserver.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6fc351fd82a64cf2a380a6d871b3efe6b85f2778 Binary files /dev/null and b/venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_forkserver.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_main_handling.cpython-310.pyc b/venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_main_handling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a37d5132967161b798aec3b187b3d766bb5c470 Binary files /dev/null and b/venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_main_handling.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_spawn.cpython-310.pyc b/venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_spawn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..106a23c0206e487c35aa9942bee80359ee86909a Binary files /dev/null and b/venv/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_spawn.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/multiprocess/tests/mp_fork_bomb.py b/venv/lib/python3.10/site-packages/multiprocess/tests/mp_fork_bomb.py new file mode 100644 index 0000000000000000000000000000000000000000..017e010ba0e6fd4372356e7c2bef5b0f23717c1a --- /dev/null +++ b/venv/lib/python3.10/site-packages/multiprocess/tests/mp_fork_bomb.py @@ -0,0 +1,18 @@ +import multiprocessing, sys + +def foo(): + print("123") + +# Because "if __name__ == '__main__'" is missing this will not work +# correctly on Windows. However, we should get a RuntimeError rather +# than the Windows equivalent of a fork bomb. + +if len(sys.argv) > 1: + multiprocessing.set_start_method(sys.argv[1]) +else: + multiprocessing.set_start_method('spawn') + +p = multiprocessing.Process(target=foo) +p.start() +p.join() +sys.exit(p.exitcode) diff --git a/venv/lib/python3.10/site-packages/multiprocess/tests/mp_preload.py b/venv/lib/python3.10/site-packages/multiprocess/tests/mp_preload.py new file mode 100644 index 0000000000000000000000000000000000000000..a5d10606e41616d9e18c4a474f674566b39199a0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/multiprocess/tests/mp_preload.py @@ -0,0 +1,18 @@ +import multiprocessing + +multiprocessing.Lock() + + +def f(): + print("ok") + + +if __name__ == "__main__": + ctx = multiprocessing.get_context("forkserver") + modname = "multiprocess.tests.mp_preload" + # Make sure it's importable + __import__(modname) + ctx.set_forkserver_preload([modname]) + proc = ctx.Process(target=f) + proc.start() + proc.join() diff --git a/venv/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_fork.py b/venv/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_fork.py new file mode 100644 index 0000000000000000000000000000000000000000..f331e381613626c6b406f782bb2e4afb850c658c --- /dev/null +++ b/venv/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_fork.py @@ -0,0 +1,19 @@ +import unittest +from multiprocess.tests import install_tests_in_module_dict + +import sys +from test import support + +if support.PGO: + raise unittest.SkipTest("test is not helpful for PGO") + +if sys.platform == "win32": + raise unittest.SkipTest("fork is not available on Windows") + +if sys.platform == 'darwin': + raise unittest.SkipTest("test may crash on macOS (bpo-33725)") + +install_tests_in_module_dict(globals(), 'fork') + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_forkserver.py b/venv/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_forkserver.py new file mode 100644 index 0000000000000000000000000000000000000000..b3251208c2060ee3ccc3fc30f4904534e98058f2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_forkserver.py @@ -0,0 +1,16 @@ +import unittest +from multiprocess.tests import install_tests_in_module_dict + +import sys +from test import support + +if support.PGO: + raise unittest.SkipTest("test is not helpful for PGO") + +if sys.platform == "win32": + raise unittest.SkipTest("forkserver is not available on Windows") + +install_tests_in_module_dict(globals(), 'forkserver') + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_main_handling.py b/venv/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_main_handling.py new file mode 100644 index 0000000000000000000000000000000000000000..ab88d2261b3c1fa6c2b7ae7065cbc270ac2be676 --- /dev/null +++ b/venv/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_main_handling.py @@ -0,0 +1,303 @@ +# tests __main__ module handling in multiprocessing +from test import support +from test.support import import_helper +# Skip tests if _multiprocessing wasn't built. +import_helper.import_module('_multiprocessing') + +import importlib +import importlib.machinery +import unittest +import sys +import os +import os.path +import py_compile + +from test.support import os_helper +from test.support.script_helper import ( + make_pkg, make_script, make_zip_pkg, make_zip_script, + assert_python_ok) + +if support.PGO: + raise unittest.SkipTest("test is not helpful for PGO") + +# Look up which start methods are available to test +import multiprocess as multiprocessing +AVAILABLE_START_METHODS = set(multiprocessing.get_all_start_methods()) + +# Issue #22332: Skip tests if sem_open implementation is broken. +import_helper.import_module('multiprocess.synchronize') + +verbose = support.verbose + +test_source = """\ +# multiprocessing includes all sorts of shenanigans to make __main__ +# attributes accessible in the subprocess in a pickle compatible way. + +# We run the "doesn't work in the interactive interpreter" example from +# the docs to make sure it *does* work from an executed __main__, +# regardless of the invocation mechanism + +import sys +import time +sys.path.extend({0}) +from multiprocess import Pool, set_start_method + +# We use this __main__ defined function in the map call below in order to +# check that multiprocessing in correctly running the unguarded +# code in child processes and then making it available as __main__ +def f(x): + return x*x + +# Check explicit relative imports +if "check_sibling" in __file__: + # We're inside a package and not in a __main__.py file + # so make sure explicit relative imports work correctly + from . import sibling + +if __name__ == '__main__': + start_method = sys.argv[1] + set_start_method(start_method) + results = [] + with Pool(5) as pool: + pool.map_async(f, [1, 2, 3], callback=results.extend) + start_time = getattr(time,'monotonic',time.time)() + while not results: + time.sleep(0.05) + # up to 1 min to report the results + dt = getattr(time,'monotonic',time.time)() - start_time + if dt > 60.0: + raise RuntimeError("Timed out waiting for results (%.1f sec)" % dt) + + results.sort() + print(start_method, "->", results) + + pool.join() +""".format(sys.path) + +test_source_main_skipped_in_children = """\ +# __main__.py files have an implied "if __name__ == '__main__'" so +# multiprocessing should always skip running them in child processes + +# This means we can't use __main__ defined functions in child processes, +# so we just use "int" as a passthrough operation below + +if __name__ != "__main__": + raise RuntimeError("Should only be called as __main__!") + +import sys +import time +sys.path.extend({0}) +from multiprocess import Pool, set_start_method + +start_method = sys.argv[1] +set_start_method(start_method) +results = [] +with Pool(5) as pool: + pool.map_async(int, [1, 4, 9], callback=results.extend) + start_time = getattr(time,'monotonic',time.time)() + while not results: + time.sleep(0.05) + # up to 1 min to report the results + dt = getattr(time,'monotonic',time.time)() - start_time + if dt > 60.0: + raise RuntimeError("Timed out waiting for results (%.1f sec)" % dt) + +results.sort() +print(start_method, "->", results) + +pool.join() +""".format(sys.path) + +# These helpers were copied from test_cmd_line_script & tweaked a bit... + +def _make_test_script(script_dir, script_basename, + source=test_source, omit_suffix=False): + to_return = make_script(script_dir, script_basename, + source, omit_suffix) + # Hack to check explicit relative imports + if script_basename == "check_sibling": + make_script(script_dir, "sibling", "") + importlib.invalidate_caches() + return to_return + +def _make_test_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename, + source=test_source, depth=1): + to_return = make_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename, + source, depth) + importlib.invalidate_caches() + return to_return + +# There's no easy way to pass the script directory in to get +# -m to work (avoiding that is the whole point of making +# directories and zipfiles executable!) +# So we fake it for testing purposes with a custom launch script +launch_source = """\ +import sys, os.path, runpy +sys.path.insert(0, %s) +runpy._run_module_as_main(%r) +""" + +def _make_launch_script(script_dir, script_basename, module_name, path=None): + if path is None: + path = "os.path.dirname(__file__)" + else: + path = repr(path) + source = launch_source % (path, module_name) + to_return = make_script(script_dir, script_basename, source) + importlib.invalidate_caches() + return to_return + +class MultiProcessingCmdLineMixin(): + maxDiff = None # Show full tracebacks on subprocess failure + + def setUp(self): + if self.start_method not in AVAILABLE_START_METHODS: + self.skipTest("%r start method not available" % self.start_method) + + def _check_output(self, script_name, exit_code, out, err): + if verbose > 1: + print("Output from test script %r:" % script_name) + print(repr(out)) + self.assertEqual(exit_code, 0) + self.assertEqual(err.decode('utf-8'), '') + expected_results = "%s -> [1, 4, 9]" % self.start_method + self.assertEqual(out.decode('utf-8').strip(), expected_results) + + def _check_script(self, script_name, *cmd_line_switches): + if not __debug__: + cmd_line_switches += ('-' + 'O' * sys.flags.optimize,) + run_args = cmd_line_switches + (script_name, self.start_method) + rc, out, err = assert_python_ok(*run_args, __isolated=False) + self._check_output(script_name, rc, out, err) + + def test_basic_script(self): + with os_helper.temp_dir() as script_dir: + script_name = _make_test_script(script_dir, 'script') + self._check_script(script_name) + + def test_basic_script_no_suffix(self): + with os_helper.temp_dir() as script_dir: + script_name = _make_test_script(script_dir, 'script', + omit_suffix=True) + self._check_script(script_name) + + def test_ipython_workaround(self): + # Some versions of the IPython launch script are missing the + # __name__ = "__main__" guard, and multiprocessing has long had + # a workaround for that case + # See https://github.com/ipython/ipython/issues/4698 + source = test_source_main_skipped_in_children + with os_helper.temp_dir() as script_dir: + script_name = _make_test_script(script_dir, 'ipython', + source=source) + self._check_script(script_name) + script_no_suffix = _make_test_script(script_dir, 'ipython', + source=source, + omit_suffix=True) + self._check_script(script_no_suffix) + + def test_script_compiled(self): + with os_helper.temp_dir() as script_dir: + script_name = _make_test_script(script_dir, 'script') + py_compile.compile(script_name, doraise=True) + os.remove(script_name) + pyc_file = import_helper.make_legacy_pyc(script_name) + self._check_script(pyc_file) + + def test_directory(self): + source = self.main_in_children_source + with os_helper.temp_dir() as script_dir: + script_name = _make_test_script(script_dir, '__main__', + source=source) + self._check_script(script_dir) + + def test_directory_compiled(self): + source = self.main_in_children_source + with os_helper.temp_dir() as script_dir: + script_name = _make_test_script(script_dir, '__main__', + source=source) + py_compile.compile(script_name, doraise=True) + os.remove(script_name) + pyc_file = import_helper.make_legacy_pyc(script_name) + self._check_script(script_dir) + + def test_zipfile(self): + source = self.main_in_children_source + with os_helper.temp_dir() as script_dir: + script_name = _make_test_script(script_dir, '__main__', + source=source) + zip_name, run_name = make_zip_script(script_dir, 'test_zip', script_name) + self._check_script(zip_name) + + def test_zipfile_compiled(self): + source = self.main_in_children_source + with os_helper.temp_dir() as script_dir: + script_name = _make_test_script(script_dir, '__main__', + source=source) + compiled_name = py_compile.compile(script_name, doraise=True) + zip_name, run_name = make_zip_script(script_dir, 'test_zip', compiled_name) + self._check_script(zip_name) + + def test_module_in_package(self): + with os_helper.temp_dir() as script_dir: + pkg_dir = os.path.join(script_dir, 'test_pkg') + make_pkg(pkg_dir) + script_name = _make_test_script(pkg_dir, 'check_sibling') + launch_name = _make_launch_script(script_dir, 'launch', + 'test_pkg.check_sibling') + self._check_script(launch_name) + + def test_module_in_package_in_zipfile(self): + with os_helper.temp_dir() as script_dir: + zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script') + launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.script', zip_name) + self._check_script(launch_name) + + def test_module_in_subpackage_in_zipfile(self): + with os_helper.temp_dir() as script_dir: + zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script', depth=2) + launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.test_pkg.script', zip_name) + self._check_script(launch_name) + + def test_package(self): + source = self.main_in_children_source + with os_helper.temp_dir() as script_dir: + pkg_dir = os.path.join(script_dir, 'test_pkg') + make_pkg(pkg_dir) + script_name = _make_test_script(pkg_dir, '__main__', + source=source) + launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg') + self._check_script(launch_name) + + def test_package_compiled(self): + source = self.main_in_children_source + with os_helper.temp_dir() as script_dir: + pkg_dir = os.path.join(script_dir, 'test_pkg') + make_pkg(pkg_dir) + script_name = _make_test_script(pkg_dir, '__main__', + source=source) + compiled_name = py_compile.compile(script_name, doraise=True) + os.remove(script_name) + pyc_file = import_helper.make_legacy_pyc(script_name) + launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg') + self._check_script(launch_name) + +# Test all supported start methods (setupClass skips as appropriate) + +class SpawnCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): + start_method = 'spawn' + main_in_children_source = test_source_main_skipped_in_children + +class ForkCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): + start_method = 'fork' + main_in_children_source = test_source + +class ForkServerCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): + start_method = 'forkserver' + main_in_children_source = test_source_main_skipped_in_children + +def tearDownModule(): + support.reap_children() + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_spawn.py b/venv/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_spawn.py new file mode 100644 index 0000000000000000000000000000000000000000..e5236c789d4b4828b618e7c5dda80778ba0504de --- /dev/null +++ b/venv/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_spawn.py @@ -0,0 +1,12 @@ +import unittest +from multiprocess.tests import install_tests_in_module_dict + +from test import support + +if support.PGO: + raise unittest.SkipTest("test is not helpful for PGO") + +install_tests_in_module_dict(globals(), 'spawn') + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/psutil/__init__.py b/venv/lib/python3.10/site-packages/psutil/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8138db41e1a62a863a365ab1e942baee586f7cc5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/psutil/__init__.py @@ -0,0 +1,2492 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""psutil is a cross-platform library for retrieving information on +running processes and system utilization (CPU, memory, disks, network, +sensors) in Python. Supported platforms: + + - Linux + - Windows + - macOS + - FreeBSD + - OpenBSD + - NetBSD + - Sun Solaris + - AIX + +Works with Python versions 2.7 and 3.6+. +""" + +from __future__ import division + +import collections +import contextlib +import datetime +import functools +import os +import signal +import subprocess +import sys +import threading +import time + + +try: + import pwd +except ImportError: + pwd = None + +from . import _common +from ._common import AIX +from ._common import BSD +from ._common import CONN_CLOSE +from ._common import CONN_CLOSE_WAIT +from ._common import CONN_CLOSING +from ._common import CONN_ESTABLISHED +from ._common import CONN_FIN_WAIT1 +from ._common import CONN_FIN_WAIT2 +from ._common import CONN_LAST_ACK +from ._common import CONN_LISTEN +from ._common import CONN_NONE +from ._common import CONN_SYN_RECV +from ._common import CONN_SYN_SENT +from ._common import CONN_TIME_WAIT +from ._common import FREEBSD # NOQA +from ._common import LINUX +from ._common import MACOS +from ._common import NETBSD # NOQA +from ._common import NIC_DUPLEX_FULL +from ._common import NIC_DUPLEX_HALF +from ._common import NIC_DUPLEX_UNKNOWN +from ._common import OPENBSD # NOQA +from ._common import OSX # deprecated alias +from ._common import POSIX # NOQA +from ._common import POWER_TIME_UNKNOWN +from ._common import POWER_TIME_UNLIMITED +from ._common import STATUS_DEAD +from ._common import STATUS_DISK_SLEEP +from ._common import STATUS_IDLE +from ._common import STATUS_LOCKED +from ._common import STATUS_PARKED +from ._common import STATUS_RUNNING +from ._common import STATUS_SLEEPING +from ._common import STATUS_STOPPED +from ._common import STATUS_TRACING_STOP +from ._common import STATUS_WAITING +from ._common import STATUS_WAKING +from ._common import STATUS_ZOMBIE +from ._common import SUNOS +from ._common import WINDOWS +from ._common import AccessDenied +from ._common import Error +from ._common import NoSuchProcess +from ._common import TimeoutExpired +from ._common import ZombieProcess +from ._common import memoize_when_activated +from ._common import wrap_numbers as _wrap_numbers +from ._compat import PY3 as _PY3 +from ._compat import PermissionError +from ._compat import ProcessLookupError +from ._compat import SubprocessTimeoutExpired as _SubprocessTimeoutExpired +from ._compat import long + + +if LINUX: + # This is public API and it will be retrieved from _pslinux.py + # via sys.modules. + PROCFS_PATH = "/proc" + + from . import _pslinux as _psplatform + from ._pslinux import IOPRIO_CLASS_BE # NOQA + from ._pslinux import IOPRIO_CLASS_IDLE # NOQA + from ._pslinux import IOPRIO_CLASS_NONE # NOQA + from ._pslinux import IOPRIO_CLASS_RT # NOQA + +elif WINDOWS: + from . import _pswindows as _psplatform + from ._psutil_windows import ABOVE_NORMAL_PRIORITY_CLASS # NOQA + from ._psutil_windows import BELOW_NORMAL_PRIORITY_CLASS # NOQA + from ._psutil_windows import HIGH_PRIORITY_CLASS # NOQA + from ._psutil_windows import IDLE_PRIORITY_CLASS # NOQA + from ._psutil_windows import NORMAL_PRIORITY_CLASS # NOQA + from ._psutil_windows import REALTIME_PRIORITY_CLASS # NOQA + from ._pswindows import CONN_DELETE_TCB # NOQA + from ._pswindows import IOPRIO_HIGH # NOQA + from ._pswindows import IOPRIO_LOW # NOQA + from ._pswindows import IOPRIO_NORMAL # NOQA + from ._pswindows import IOPRIO_VERYLOW # NOQA + +elif MACOS: + from . import _psosx as _psplatform + +elif BSD: + from . import _psbsd as _psplatform + +elif SUNOS: + from . import _pssunos as _psplatform + from ._pssunos import CONN_BOUND # NOQA + from ._pssunos import CONN_IDLE # NOQA + + # This is public writable API which is read from _pslinux.py and + # _pssunos.py via sys.modules. + PROCFS_PATH = "/proc" + +elif AIX: + from . import _psaix as _psplatform + + # This is public API and it will be retrieved from _pslinux.py + # via sys.modules. + PROCFS_PATH = "/proc" + +else: # pragma: no cover + raise NotImplementedError('platform %s is not supported' % sys.platform) + + +# fmt: off +__all__ = [ + # exceptions + "Error", "NoSuchProcess", "ZombieProcess", "AccessDenied", + "TimeoutExpired", + + # constants + "version_info", "__version__", + + "STATUS_RUNNING", "STATUS_IDLE", "STATUS_SLEEPING", "STATUS_DISK_SLEEP", + "STATUS_STOPPED", "STATUS_TRACING_STOP", "STATUS_ZOMBIE", "STATUS_DEAD", + "STATUS_WAKING", "STATUS_LOCKED", "STATUS_WAITING", "STATUS_LOCKED", + "STATUS_PARKED", + + "CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1", + "CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT", + "CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING", "CONN_NONE", + # "CONN_IDLE", "CONN_BOUND", + + "AF_LINK", + + "NIC_DUPLEX_FULL", "NIC_DUPLEX_HALF", "NIC_DUPLEX_UNKNOWN", + + "POWER_TIME_UNKNOWN", "POWER_TIME_UNLIMITED", + + "BSD", "FREEBSD", "LINUX", "NETBSD", "OPENBSD", "MACOS", "OSX", "POSIX", + "SUNOS", "WINDOWS", "AIX", + + # "RLIM_INFINITY", "RLIMIT_AS", "RLIMIT_CORE", "RLIMIT_CPU", "RLIMIT_DATA", + # "RLIMIT_FSIZE", "RLIMIT_LOCKS", "RLIMIT_MEMLOCK", "RLIMIT_NOFILE", + # "RLIMIT_NPROC", "RLIMIT_RSS", "RLIMIT_STACK", "RLIMIT_MSGQUEUE", + # "RLIMIT_NICE", "RLIMIT_RTPRIO", "RLIMIT_RTTIME", "RLIMIT_SIGPENDING", + + # classes + "Process", "Popen", + + # functions + "pid_exists", "pids", "process_iter", "wait_procs", # proc + "virtual_memory", "swap_memory", # memory + "cpu_times", "cpu_percent", "cpu_times_percent", "cpu_count", # cpu + "cpu_stats", # "cpu_freq", "getloadavg" + "net_io_counters", "net_connections", "net_if_addrs", # network + "net_if_stats", + "disk_io_counters", "disk_partitions", "disk_usage", # disk + # "sensors_temperatures", "sensors_battery", "sensors_fans" # sensors + "users", "boot_time", # others +] +# fmt: on + + +__all__.extend(_psplatform.__extra__all__) + +# Linux, FreeBSD +if hasattr(_psplatform.Process, "rlimit"): + # Populate global namespace with RLIM* constants. + from . import _psutil_posix + + _globals = globals() + _name = None + for _name in dir(_psutil_posix): + if _name.startswith('RLIM') and _name.isupper(): + _globals[_name] = getattr(_psutil_posix, _name) + __all__.append(_name) + del _globals, _name + +AF_LINK = _psplatform.AF_LINK + +__author__ = "Giampaolo Rodola'" +__version__ = "5.9.8" +version_info = tuple([int(num) for num in __version__.split('.')]) + +_timer = getattr(time, 'monotonic', time.time) +_TOTAL_PHYMEM = None +_LOWEST_PID = None +_SENTINEL = object() + +# Sanity check in case the user messed up with psutil installation +# or did something weird with sys.path. In this case we might end +# up importing a python module using a C extension module which +# was compiled for a different version of psutil. +# We want to prevent that by failing sooner rather than later. +# See: https://github.com/giampaolo/psutil/issues/564 +if int(__version__.replace('.', '')) != getattr( + _psplatform.cext, 'version', None +): + msg = "version conflict: %r C extension " % _psplatform.cext.__file__ + msg += "module was built for another version of psutil" + if hasattr(_psplatform.cext, 'version'): + msg += " (%s instead of %s)" % ( + '.'.join([x for x in str(_psplatform.cext.version)]), + __version__, + ) + else: + msg += " (different than %s)" % __version__ + msg += "; you may try to 'pip uninstall psutil', manually remove %s" % ( + getattr( + _psplatform.cext, + "__file__", + "the existing psutil install directory", + ) + ) + msg += " or clean the virtual env somehow, then reinstall" + raise ImportError(msg) + + +# ===================================================================== +# --- Utils +# ===================================================================== + + +if hasattr(_psplatform, 'ppid_map'): + # Faster version (Windows and Linux). + _ppid_map = _psplatform.ppid_map +else: # pragma: no cover + + def _ppid_map(): + """Return a {pid: ppid, ...} dict for all running processes in + one shot. Used to speed up Process.children(). + """ + ret = {} + for pid in pids(): + try: + ret[pid] = _psplatform.Process(pid).ppid() + except (NoSuchProcess, ZombieProcess): + pass + return ret + + +def _pprint_secs(secs): + """Format seconds in a human readable form.""" + now = time.time() + secs_ago = int(now - secs) + fmt = "%H:%M:%S" if secs_ago < 60 * 60 * 24 else "%Y-%m-%d %H:%M:%S" + return datetime.datetime.fromtimestamp(secs).strftime(fmt) + + +# ===================================================================== +# --- Process class +# ===================================================================== + + +class Process(object): # noqa: UP004 + """Represents an OS process with the given PID. + If PID is omitted current process PID (os.getpid()) is used. + Raise NoSuchProcess if PID does not exist. + + Note that most of the methods of this class do not make sure + the PID of the process being queried has been reused over time. + That means you might end up retrieving an information referring + to another process in case the original one this instance + refers to is gone in the meantime. + + The only exceptions for which process identity is pre-emptively + checked and guaranteed are: + + - parent() + - children() + - nice() (set) + - ionice() (set) + - rlimit() (set) + - cpu_affinity (set) + - suspend() + - resume() + - send_signal() + - terminate() + - kill() + + To prevent this problem for all other methods you can: + - use is_running() before querying the process + - if you're continuously iterating over a set of Process + instances use process_iter() which pre-emptively checks + process identity for every yielded instance + """ + + def __init__(self, pid=None): + self._init(pid) + + def _init(self, pid, _ignore_nsp=False): + if pid is None: + pid = os.getpid() + else: + if not _PY3 and not isinstance(pid, (int, long)): + msg = "pid must be an integer (got %r)" % pid + raise TypeError(msg) + if pid < 0: + msg = "pid must be a positive integer (got %s)" % pid + raise ValueError(msg) + try: + _psplatform.cext.check_pid_range(pid) + except OverflowError: + msg = "process PID out of range (got %s)" % pid + raise NoSuchProcess(pid, msg=msg) + + self._pid = pid + self._name = None + self._exe = None + self._create_time = None + self._gone = False + self._pid_reused = False + self._hash = None + self._lock = threading.RLock() + # used for caching on Windows only (on POSIX ppid may change) + self._ppid = None + # platform-specific modules define an _psplatform.Process + # implementation class + self._proc = _psplatform.Process(pid) + self._last_sys_cpu_times = None + self._last_proc_cpu_times = None + self._exitcode = _SENTINEL + # cache creation time for later use in is_running() method + try: + self.create_time() + except AccessDenied: + # We should never get here as AFAIK we're able to get + # process creation time on all platforms even as a + # limited user. + pass + except ZombieProcess: + # Zombies can still be queried by this class (although + # not always) and pids() return them so just go on. + pass + except NoSuchProcess: + if not _ignore_nsp: + msg = "process PID not found" + raise NoSuchProcess(pid, msg=msg) + else: + self._gone = True + # This pair is supposed to identify a Process instance + # univocally over time (the PID alone is not enough as + # it might refer to a process whose PID has been reused). + # This will be used later in __eq__() and is_running(). + self._ident = (self.pid, self._create_time) + + def __str__(self): + info = collections.OrderedDict() + info["pid"] = self.pid + if self._name: + info['name'] = self._name + with self.oneshot(): + try: + info["name"] = self.name() + info["status"] = self.status() + except ZombieProcess: + info["status"] = "zombie" + except NoSuchProcess: + info["status"] = "terminated" + except AccessDenied: + pass + if self._exitcode not in (_SENTINEL, None): + info["exitcode"] = self._exitcode + if self._create_time is not None: + info['started'] = _pprint_secs(self._create_time) + return "%s.%s(%s)" % ( + self.__class__.__module__, + self.__class__.__name__, + ", ".join(["%s=%r" % (k, v) for k, v in info.items()]), + ) + + __repr__ = __str__ + + def __eq__(self, other): + # Test for equality with another Process object based + # on PID and creation time. + if not isinstance(other, Process): + return NotImplemented + if OPENBSD or NETBSD: # pragma: no cover + # Zombie processes on Open/NetBSD have a creation time of + # 0.0. This covers the case when a process started normally + # (so it has a ctime), then it turned into a zombie. It's + # important to do this because is_running() depends on + # __eq__. + pid1, ctime1 = self._ident + pid2, ctime2 = other._ident + if pid1 == pid2: + if ctime1 and not ctime2: + try: + return self.status() == STATUS_ZOMBIE + except Error: + pass + return self._ident == other._ident + + def __ne__(self, other): + return not self == other + + def __hash__(self): + if self._hash is None: + self._hash = hash(self._ident) + return self._hash + + def _raise_if_pid_reused(self): + """Raises NoSuchProcess in case process PID has been reused.""" + if not self.is_running() and self._pid_reused: + # We may directly raise NSP in here already if PID is just + # not running, but I prefer NSP to be raised naturally by + # the actual Process API call. This way unit tests will tell + # us if the API is broken (aka don't raise NSP when it + # should). We also remain consistent with all other "get" + # APIs which don't use _raise_if_pid_reused(). + msg = "process no longer exists and its PID has been reused" + raise NoSuchProcess(self.pid, self._name, msg=msg) + + @property + def pid(self): + """The process PID.""" + return self._pid + + # --- utility methods + + @contextlib.contextmanager + def oneshot(self): + """Utility context manager which considerably speeds up the + retrieval of multiple process information at the same time. + + Internally different process info (e.g. name, ppid, uids, + gids, ...) may be fetched by using the same routine, but + only one information is returned and the others are discarded. + When using this context manager the internal routine is + executed once (in the example below on name()) and the + other info are cached. + + The cache is cleared when exiting the context manager block. + The advice is to use this every time you retrieve more than + one information about the process. If you're lucky, you'll + get a hell of a speedup. + + >>> import psutil + >>> p = psutil.Process() + >>> with p.oneshot(): + ... p.name() # collect multiple info + ... p.cpu_times() # return cached value + ... p.cpu_percent() # return cached value + ... p.create_time() # return cached value + ... + >>> + """ + with self._lock: + if hasattr(self, "_cache"): + # NOOP: this covers the use case where the user enters the + # context twice: + # + # >>> with p.oneshot(): + # ... with p.oneshot(): + # ... + # + # Also, since as_dict() internally uses oneshot() + # I expect that the code below will be a pretty common + # "mistake" that the user will make, so let's guard + # against that: + # + # >>> with p.oneshot(): + # ... p.as_dict() + # ... + yield + else: + try: + # cached in case cpu_percent() is used + self.cpu_times.cache_activate(self) + # cached in case memory_percent() is used + self.memory_info.cache_activate(self) + # cached in case parent() is used + self.ppid.cache_activate(self) + # cached in case username() is used + if POSIX: + self.uids.cache_activate(self) + # specific implementation cache + self._proc.oneshot_enter() + yield + finally: + self.cpu_times.cache_deactivate(self) + self.memory_info.cache_deactivate(self) + self.ppid.cache_deactivate(self) + if POSIX: + self.uids.cache_deactivate(self) + self._proc.oneshot_exit() + + def as_dict(self, attrs=None, ad_value=None): + """Utility method returning process information as a + hashable dictionary. + If *attrs* is specified it must be a list of strings + reflecting available Process class' attribute names + (e.g. ['cpu_times', 'name']) else all public (read + only) attributes are assumed. + *ad_value* is the value which gets assigned in case + AccessDenied or ZombieProcess exception is raised when + retrieving that particular process information. + """ + valid_names = _as_dict_attrnames + if attrs is not None: + if not isinstance(attrs, (list, tuple, set, frozenset)): + msg = "invalid attrs type %s" % type(attrs) + raise TypeError(msg) + attrs = set(attrs) + invalid_names = attrs - valid_names + if invalid_names: + msg = "invalid attr name%s %s" % ( + "s" if len(invalid_names) > 1 else "", + ", ".join(map(repr, invalid_names)), + ) + raise ValueError(msg) + + retdict = {} + ls = attrs or valid_names + with self.oneshot(): + for name in ls: + try: + if name == 'pid': + ret = self.pid + else: + meth = getattr(self, name) + ret = meth() + except (AccessDenied, ZombieProcess): + ret = ad_value + except NotImplementedError: + # in case of not implemented functionality (may happen + # on old or exotic systems) we want to crash only if + # the user explicitly asked for that particular attr + if attrs: + raise + continue + retdict[name] = ret + return retdict + + def parent(self): + """Return the parent process as a Process object pre-emptively + checking whether PID has been reused. + If no parent is known return None. + """ + lowest_pid = _LOWEST_PID if _LOWEST_PID is not None else pids()[0] + if self.pid == lowest_pid: + return None + ppid = self.ppid() + if ppid is not None: + ctime = self.create_time() + try: + parent = Process(ppid) + if parent.create_time() <= ctime: + return parent + # ...else ppid has been reused by another process + except NoSuchProcess: + pass + + def parents(self): + """Return the parents of this process as a list of Process + instances. If no parents are known return an empty list. + """ + parents = [] + proc = self.parent() + while proc is not None: + parents.append(proc) + proc = proc.parent() + return parents + + def is_running(self): + """Return whether this process is running. + It also checks if PID has been reused by another process in + which case return False. + """ + if self._gone or self._pid_reused: + return False + try: + # Checking if PID is alive is not enough as the PID might + # have been reused by another process: we also want to + # verify process identity. + # Process identity / uniqueness over time is guaranteed by + # (PID + creation time) and that is verified in __eq__. + self._pid_reused = self != Process(self.pid) + return not self._pid_reused + except ZombieProcess: + # We should never get here as it's already handled in + # Process.__init__; here just for extra safety. + return True + except NoSuchProcess: + self._gone = True + return False + + # --- actual API + + @memoize_when_activated + def ppid(self): + """The process parent PID. + On Windows the return value is cached after first call. + """ + # On POSIX we don't want to cache the ppid as it may unexpectedly + # change to 1 (init) in case this process turns into a zombie: + # https://github.com/giampaolo/psutil/issues/321 + # http://stackoverflow.com/questions/356722/ + + # XXX should we check creation time here rather than in + # Process.parent()? + self._raise_if_pid_reused() + if POSIX: + return self._proc.ppid() + else: # pragma: no cover + self._ppid = self._ppid or self._proc.ppid() + return self._ppid + + def name(self): + """The process name. The return value is cached after first call.""" + # Process name is only cached on Windows as on POSIX it may + # change, see: + # https://github.com/giampaolo/psutil/issues/692 + if WINDOWS and self._name is not None: + return self._name + name = self._proc.name() + if POSIX and len(name) >= 15: + # On UNIX the name gets truncated to the first 15 characters. + # If it matches the first part of the cmdline we return that + # one instead because it's usually more explicative. + # Examples are "gnome-keyring-d" vs. "gnome-keyring-daemon". + try: + cmdline = self.cmdline() + except (AccessDenied, ZombieProcess): + # Just pass and return the truncated name: it's better + # than nothing. Note: there are actual cases where a + # zombie process can return a name() but not a + # cmdline(), see: + # https://github.com/giampaolo/psutil/issues/2239 + pass + else: + if cmdline: + extended_name = os.path.basename(cmdline[0]) + if extended_name.startswith(name): + name = extended_name + self._name = name + self._proc._name = name + return name + + def exe(self): + """The process executable as an absolute path. + May also be an empty string. + The return value is cached after first call. + """ + + def guess_it(fallback): + # try to guess exe from cmdline[0] in absence of a native + # exe representation + cmdline = self.cmdline() + if cmdline and hasattr(os, 'access') and hasattr(os, 'X_OK'): + exe = cmdline[0] # the possible exe + # Attempt to guess only in case of an absolute path. + # It is not safe otherwise as the process might have + # changed cwd. + if ( + os.path.isabs(exe) + and os.path.isfile(exe) + and os.access(exe, os.X_OK) + ): + return exe + if isinstance(fallback, AccessDenied): + raise fallback + return fallback + + if self._exe is None: + try: + exe = self._proc.exe() + except AccessDenied as err: + return guess_it(fallback=err) + else: + if not exe: + # underlying implementation can legitimately return an + # empty string; if that's the case we don't want to + # raise AD while guessing from the cmdline + try: + exe = guess_it(fallback=exe) + except AccessDenied: + pass + self._exe = exe + return self._exe + + def cmdline(self): + """The command line this process has been called with.""" + return self._proc.cmdline() + + def status(self): + """The process current status as a STATUS_* constant.""" + try: + return self._proc.status() + except ZombieProcess: + return STATUS_ZOMBIE + + def username(self): + """The name of the user that owns the process. + On UNIX this is calculated by using *real* process uid. + """ + if POSIX: + if pwd is None: + # might happen if python was installed from sources + msg = "requires pwd module shipped with standard python" + raise ImportError(msg) + real_uid = self.uids().real + try: + return pwd.getpwuid(real_uid).pw_name + except KeyError: + # the uid can't be resolved by the system + return str(real_uid) + else: + return self._proc.username() + + def create_time(self): + """The process creation time as a floating point number + expressed in seconds since the epoch. + The return value is cached after first call. + """ + if self._create_time is None: + self._create_time = self._proc.create_time() + return self._create_time + + def cwd(self): + """Process current working directory as an absolute path.""" + return self._proc.cwd() + + def nice(self, value=None): + """Get or set process niceness (priority).""" + if value is None: + return self._proc.nice_get() + else: + self._raise_if_pid_reused() + self._proc.nice_set(value) + + if POSIX: + + @memoize_when_activated + def uids(self): + """Return process UIDs as a (real, effective, saved) + namedtuple. + """ + return self._proc.uids() + + def gids(self): + """Return process GIDs as a (real, effective, saved) + namedtuple. + """ + return self._proc.gids() + + def terminal(self): + """The terminal associated with this process, if any, + else None. + """ + return self._proc.terminal() + + def num_fds(self): + """Return the number of file descriptors opened by this + process (POSIX only). + """ + return self._proc.num_fds() + + # Linux, BSD, AIX and Windows only + if hasattr(_psplatform.Process, "io_counters"): + + def io_counters(self): + """Return process I/O statistics as a + (read_count, write_count, read_bytes, write_bytes) + namedtuple. + Those are the number of read/write calls performed and the + amount of bytes read and written by the process. + """ + return self._proc.io_counters() + + # Linux and Windows + if hasattr(_psplatform.Process, "ionice_get"): + + def ionice(self, ioclass=None, value=None): + """Get or set process I/O niceness (priority). + + On Linux *ioclass* is one of the IOPRIO_CLASS_* constants. + *value* is a number which goes from 0 to 7. The higher the + value, the lower the I/O priority of the process. + + On Windows only *ioclass* is used and it can be set to 2 + (normal), 1 (low) or 0 (very low). + + Available on Linux and Windows > Vista only. + """ + if ioclass is None: + if value is not None: + msg = "'ioclass' argument must be specified" + raise ValueError(msg) + return self._proc.ionice_get() + else: + self._raise_if_pid_reused() + return self._proc.ionice_set(ioclass, value) + + # Linux / FreeBSD only + if hasattr(_psplatform.Process, "rlimit"): + + def rlimit(self, resource, limits=None): + """Get or set process resource limits as a (soft, hard) + tuple. + + *resource* is one of the RLIMIT_* constants. + *limits* is supposed to be a (soft, hard) tuple. + + See "man prlimit" for further info. + Available on Linux and FreeBSD only. + """ + if limits is not None: + self._raise_if_pid_reused() + return self._proc.rlimit(resource, limits) + + # Windows, Linux and FreeBSD only + if hasattr(_psplatform.Process, "cpu_affinity_get"): + + def cpu_affinity(self, cpus=None): + """Get or set process CPU affinity. + If specified, *cpus* must be a list of CPUs for which you + want to set the affinity (e.g. [0, 1]). + If an empty list is passed, all egible CPUs are assumed + (and set). + (Windows, Linux and BSD only). + """ + if cpus is None: + return sorted(set(self._proc.cpu_affinity_get())) + else: + self._raise_if_pid_reused() + if not cpus: + if hasattr(self._proc, "_get_eligible_cpus"): + cpus = self._proc._get_eligible_cpus() + else: + cpus = tuple(range(len(cpu_times(percpu=True)))) + self._proc.cpu_affinity_set(list(set(cpus))) + + # Linux, FreeBSD, SunOS + if hasattr(_psplatform.Process, "cpu_num"): + + def cpu_num(self): + """Return what CPU this process is currently running on. + The returned number should be <= psutil.cpu_count() + and <= len(psutil.cpu_percent(percpu=True)). + It may be used in conjunction with + psutil.cpu_percent(percpu=True) to observe the system + workload distributed across CPUs. + """ + return self._proc.cpu_num() + + # All platforms has it, but maybe not in the future. + if hasattr(_psplatform.Process, "environ"): + + def environ(self): + """The environment variables of the process as a dict. Note: this + might not reflect changes made after the process started. + """ + return self._proc.environ() + + if WINDOWS: + + def num_handles(self): + """Return the number of handles opened by this process + (Windows only). + """ + return self._proc.num_handles() + + def num_ctx_switches(self): + """Return the number of voluntary and involuntary context + switches performed by this process. + """ + return self._proc.num_ctx_switches() + + def num_threads(self): + """Return the number of threads used by this process.""" + return self._proc.num_threads() + + if hasattr(_psplatform.Process, "threads"): + + def threads(self): + """Return threads opened by process as a list of + (id, user_time, system_time) namedtuples representing + thread id and thread CPU times (user/system). + On OpenBSD this method requires root access. + """ + return self._proc.threads() + + def children(self, recursive=False): + """Return the children of this process as a list of Process + instances, pre-emptively checking whether PID has been reused. + If *recursive* is True return all the parent descendants. + + Example (A == this process): + + A ─┐ + │ + ├─ B (child) ─┐ + │ └─ X (grandchild) ─┐ + │ └─ Y (great grandchild) + ├─ C (child) + └─ D (child) + + >>> import psutil + >>> p = psutil.Process() + >>> p.children() + B, C, D + >>> p.children(recursive=True) + B, X, Y, C, D + + Note that in the example above if process X disappears + process Y won't be listed as the reference to process A + is lost. + """ + self._raise_if_pid_reused() + ppid_map = _ppid_map() + ret = [] + if not recursive: + for pid, ppid in ppid_map.items(): + if ppid == self.pid: + try: + child = Process(pid) + # if child happens to be older than its parent + # (self) it means child's PID has been reused + if self.create_time() <= child.create_time(): + ret.append(child) + except (NoSuchProcess, ZombieProcess): + pass + else: + # Construct a {pid: [child pids]} dict + reverse_ppid_map = collections.defaultdict(list) + for pid, ppid in ppid_map.items(): + reverse_ppid_map[ppid].append(pid) + # Recursively traverse that dict, starting from self.pid, + # such that we only call Process() on actual children + seen = set() + stack = [self.pid] + while stack: + pid = stack.pop() + if pid in seen: + # Since pids can be reused while the ppid_map is + # constructed, there may be rare instances where + # there's a cycle in the recorded process "tree". + continue + seen.add(pid) + for child_pid in reverse_ppid_map[pid]: + try: + child = Process(child_pid) + # if child happens to be older than its parent + # (self) it means child's PID has been reused + intime = self.create_time() <= child.create_time() + if intime: + ret.append(child) + stack.append(child_pid) + except (NoSuchProcess, ZombieProcess): + pass + return ret + + def cpu_percent(self, interval=None): + """Return a float representing the current process CPU + utilization as a percentage. + + When *interval* is 0.0 or None (default) compares process times + to system CPU times elapsed since last call, returning + immediately (non-blocking). That means that the first time + this is called it will return a meaningful 0.0 value. + + When *interval* is > 0.0 compares process times to system CPU + times elapsed before and after the interval (blocking). + + In this case is recommended for accuracy that this function + be called with at least 0.1 seconds between calls. + + A value > 100.0 can be returned in case of processes running + multiple threads on different CPU cores. + + The returned value is explicitly NOT split evenly between + all available logical CPUs. This means that a busy loop process + running on a system with 2 logical CPUs will be reported as + having 100% CPU utilization instead of 50%. + + Examples: + + >>> import psutil + >>> p = psutil.Process(os.getpid()) + >>> # blocking + >>> p.cpu_percent(interval=1) + 2.0 + >>> # non-blocking (percentage since last call) + >>> p.cpu_percent(interval=None) + 2.9 + >>> + """ + blocking = interval is not None and interval > 0.0 + if interval is not None and interval < 0: + msg = "interval is not positive (got %r)" % interval + raise ValueError(msg) + num_cpus = cpu_count() or 1 + + def timer(): + return _timer() * num_cpus + + if blocking: + st1 = timer() + pt1 = self._proc.cpu_times() + time.sleep(interval) + st2 = timer() + pt2 = self._proc.cpu_times() + else: + st1 = self._last_sys_cpu_times + pt1 = self._last_proc_cpu_times + st2 = timer() + pt2 = self._proc.cpu_times() + if st1 is None or pt1 is None: + self._last_sys_cpu_times = st2 + self._last_proc_cpu_times = pt2 + return 0.0 + + delta_proc = (pt2.user - pt1.user) + (pt2.system - pt1.system) + delta_time = st2 - st1 + # reset values for next call in case of interval == None + self._last_sys_cpu_times = st2 + self._last_proc_cpu_times = pt2 + + try: + # This is the utilization split evenly between all CPUs. + # E.g. a busy loop process on a 2-CPU-cores system at this + # point is reported as 50% instead of 100%. + overall_cpus_percent = (delta_proc / delta_time) * 100 + except ZeroDivisionError: + # interval was too low + return 0.0 + else: + # Note 1: + # in order to emulate "top" we multiply the value for the num + # of CPU cores. This way the busy process will be reported as + # having 100% (or more) usage. + # + # Note 2: + # taskmgr.exe on Windows differs in that it will show 50% + # instead. + # + # Note 3: + # a percentage > 100 is legitimate as it can result from a + # process with multiple threads running on different CPU + # cores (top does the same), see: + # http://stackoverflow.com/questions/1032357 + # https://github.com/giampaolo/psutil/issues/474 + single_cpu_percent = overall_cpus_percent * num_cpus + return round(single_cpu_percent, 1) + + @memoize_when_activated + def cpu_times(self): + """Return a (user, system, children_user, children_system) + namedtuple representing the accumulated process time, in + seconds. + This is similar to os.times() but per-process. + On macOS and Windows children_user and children_system are + always set to 0. + """ + return self._proc.cpu_times() + + @memoize_when_activated + def memory_info(self): + """Return a namedtuple with variable fields depending on the + platform, representing memory information about the process. + + The "portable" fields available on all platforms are `rss` and `vms`. + + All numbers are expressed in bytes. + """ + return self._proc.memory_info() + + @_common.deprecated_method(replacement="memory_info") + def memory_info_ex(self): + return self.memory_info() + + def memory_full_info(self): + """This method returns the same information as memory_info(), + plus, on some platform (Linux, macOS, Windows), also provides + additional metrics (USS, PSS and swap). + The additional metrics provide a better representation of actual + process memory usage. + + Namely USS is the memory which is unique to a process and which + would be freed if the process was terminated right now. + + It does so by passing through the whole process address. + As such it usually requires higher user privileges than + memory_info() and is considerably slower. + """ + return self._proc.memory_full_info() + + def memory_percent(self, memtype="rss"): + """Compare process memory to total physical system memory and + calculate process memory utilization as a percentage. + *memtype* argument is a string that dictates what type of + process memory you want to compare against (defaults to "rss"). + The list of available strings can be obtained like this: + + >>> psutil.Process().memory_info()._fields + ('rss', 'vms', 'shared', 'text', 'lib', 'data', 'dirty', 'uss', 'pss') + """ + valid_types = list(_psplatform.pfullmem._fields) + if memtype not in valid_types: + msg = "invalid memtype %r; valid types are %r" % ( + memtype, + tuple(valid_types), + ) + raise ValueError(msg) + fun = ( + self.memory_info + if memtype in _psplatform.pmem._fields + else self.memory_full_info + ) + metrics = fun() + value = getattr(metrics, memtype) + + # use cached value if available + total_phymem = _TOTAL_PHYMEM or virtual_memory().total + if not total_phymem > 0: + # we should never get here + msg = ( + "can't calculate process memory percent because total physical" + " system memory is not positive (%r)" % (total_phymem) + ) + raise ValueError(msg) + return (value / float(total_phymem)) * 100 + + if hasattr(_psplatform.Process, "memory_maps"): + + def memory_maps(self, grouped=True): + """Return process' mapped memory regions as a list of namedtuples + whose fields are variable depending on the platform. + + If *grouped* is True the mapped regions with the same 'path' + are grouped together and the different memory fields are summed. + + If *grouped* is False every mapped region is shown as a single + entity and the namedtuple will also include the mapped region's + address space ('addr') and permission set ('perms'). + """ + it = self._proc.memory_maps() + if grouped: + d = {} + for tupl in it: + path = tupl[2] + nums = tupl[3:] + try: + d[path] = map(lambda x, y: x + y, d[path], nums) + except KeyError: + d[path] = nums + nt = _psplatform.pmmap_grouped + return [nt(path, *d[path]) for path in d] # NOQA + else: + nt = _psplatform.pmmap_ext + return [nt(*x) for x in it] + + def open_files(self): + """Return files opened by process as a list of + (path, fd) namedtuples including the absolute file name + and file descriptor number. + """ + return self._proc.open_files() + + def connections(self, kind='inet'): + """Return socket connections opened by process as a list of + (fd, family, type, laddr, raddr, status) namedtuples. + The *kind* parameter filters for connections that match the + following criteria: + + +------------+----------------------------------------------------+ + | Kind Value | Connections using | + +------------+----------------------------------------------------+ + | inet | IPv4 and IPv6 | + | inet4 | IPv4 | + | inet6 | IPv6 | + | tcp | TCP | + | tcp4 | TCP over IPv4 | + | tcp6 | TCP over IPv6 | + | udp | UDP | + | udp4 | UDP over IPv4 | + | udp6 | UDP over IPv6 | + | unix | UNIX socket (both UDP and TCP protocols) | + | all | the sum of all the possible families and protocols | + +------------+----------------------------------------------------+ + """ + return self._proc.connections(kind) + + # --- signals + + if POSIX: + + def _send_signal(self, sig): + assert not self.pid < 0, self.pid + self._raise_if_pid_reused() + if self.pid == 0: + # see "man 2 kill" + msg = ( + "preventing sending signal to process with PID 0 as it " + "would affect every process in the process group of the " + "calling process (os.getpid()) instead of PID 0" + ) + raise ValueError(msg) + try: + os.kill(self.pid, sig) + except ProcessLookupError: + if OPENBSD and pid_exists(self.pid): + # We do this because os.kill() lies in case of + # zombie processes. + raise ZombieProcess(self.pid, self._name, self._ppid) + else: + self._gone = True + raise NoSuchProcess(self.pid, self._name) + except PermissionError: + raise AccessDenied(self.pid, self._name) + + def send_signal(self, sig): + """Send a signal *sig* to process pre-emptively checking + whether PID has been reused (see signal module constants) . + On Windows only SIGTERM is valid and is treated as an alias + for kill(). + """ + if POSIX: + self._send_signal(sig) + else: # pragma: no cover + self._raise_if_pid_reused() + if sig != signal.SIGTERM and not self.is_running(): + msg = "process no longer exists" + raise NoSuchProcess(self.pid, self._name, msg=msg) + self._proc.send_signal(sig) + + def suspend(self): + """Suspend process execution with SIGSTOP pre-emptively checking + whether PID has been reused. + On Windows this has the effect of suspending all process threads. + """ + if POSIX: + self._send_signal(signal.SIGSTOP) + else: # pragma: no cover + self._raise_if_pid_reused() + self._proc.suspend() + + def resume(self): + """Resume process execution with SIGCONT pre-emptively checking + whether PID has been reused. + On Windows this has the effect of resuming all process threads. + """ + if POSIX: + self._send_signal(signal.SIGCONT) + else: # pragma: no cover + self._raise_if_pid_reused() + self._proc.resume() + + def terminate(self): + """Terminate the process with SIGTERM pre-emptively checking + whether PID has been reused. + On Windows this is an alias for kill(). + """ + if POSIX: + self._send_signal(signal.SIGTERM) + else: # pragma: no cover + self._raise_if_pid_reused() + self._proc.kill() + + def kill(self): + """Kill the current process with SIGKILL pre-emptively checking + whether PID has been reused. + """ + if POSIX: + self._send_signal(signal.SIGKILL) + else: # pragma: no cover + self._raise_if_pid_reused() + self._proc.kill() + + def wait(self, timeout=None): + """Wait for process to terminate and, if process is a children + of os.getpid(), also return its exit code, else None. + On Windows there's no such limitation (exit code is always + returned). + + If the process is already terminated immediately return None + instead of raising NoSuchProcess. + + If *timeout* (in seconds) is specified and process is still + alive raise TimeoutExpired. + + To wait for multiple Process(es) use psutil.wait_procs(). + """ + if timeout is not None and not timeout >= 0: + msg = "timeout must be a positive integer" + raise ValueError(msg) + if self._exitcode is not _SENTINEL: + return self._exitcode + self._exitcode = self._proc.wait(timeout) + return self._exitcode + + +# The valid attr names which can be processed by Process.as_dict(). +# fmt: off +_as_dict_attrnames = set( + [x for x in dir(Process) if not x.startswith('_') and x not in + {'send_signal', 'suspend', 'resume', 'terminate', 'kill', 'wait', + 'is_running', 'as_dict', 'parent', 'parents', 'children', 'rlimit', + 'memory_info_ex', 'oneshot'}]) +# fmt: on + + +# ===================================================================== +# --- Popen class +# ===================================================================== + + +class Popen(Process): + """Same as subprocess.Popen, but in addition it provides all + psutil.Process methods in a single class. + For the following methods which are common to both classes, psutil + implementation takes precedence: + + * send_signal() + * terminate() + * kill() + + This is done in order to avoid killing another process in case its + PID has been reused, fixing BPO-6973. + + >>> import psutil + >>> from subprocess import PIPE + >>> p = psutil.Popen(["python", "-c", "print 'hi'"], stdout=PIPE) + >>> p.name() + 'python' + >>> p.uids() + user(real=1000, effective=1000, saved=1000) + >>> p.username() + 'giampaolo' + >>> p.communicate() + ('hi', None) + >>> p.terminate() + >>> p.wait(timeout=2) + 0 + >>> + """ + + def __init__(self, *args, **kwargs): + # Explicitly avoid to raise NoSuchProcess in case the process + # spawned by subprocess.Popen terminates too quickly, see: + # https://github.com/giampaolo/psutil/issues/193 + self.__subproc = subprocess.Popen(*args, **kwargs) + self._init(self.__subproc.pid, _ignore_nsp=True) + + def __dir__(self): + return sorted(set(dir(Popen) + dir(subprocess.Popen))) + + def __enter__(self): + if hasattr(self.__subproc, '__enter__'): + self.__subproc.__enter__() + return self + + def __exit__(self, *args, **kwargs): + if hasattr(self.__subproc, '__exit__'): + return self.__subproc.__exit__(*args, **kwargs) + else: + if self.stdout: + self.stdout.close() + if self.stderr: + self.stderr.close() + try: + # Flushing a BufferedWriter may raise an error. + if self.stdin: + self.stdin.close() + finally: + # Wait for the process to terminate, to avoid zombies. + self.wait() + + def __getattribute__(self, name): + try: + return object.__getattribute__(self, name) + except AttributeError: + try: + return object.__getattribute__(self.__subproc, name) + except AttributeError: + msg = "%s instance has no attribute '%s'" % ( + self.__class__.__name__, + name, + ) + raise AttributeError(msg) + + def wait(self, timeout=None): + if self.__subproc.returncode is not None: + return self.__subproc.returncode + ret = super(Popen, self).wait(timeout) # noqa + self.__subproc.returncode = ret + return ret + + +# ===================================================================== +# --- system processes related functions +# ===================================================================== + + +def pids(): + """Return a list of current running PIDs.""" + global _LOWEST_PID + ret = sorted(_psplatform.pids()) + _LOWEST_PID = ret[0] + return ret + + +def pid_exists(pid): + """Return True if given PID exists in the current process list. + This is faster than doing "pid in psutil.pids()" and + should be preferred. + """ + if pid < 0: + return False + elif pid == 0 and POSIX: + # On POSIX we use os.kill() to determine PID existence. + # According to "man 2 kill" PID 0 has a special meaning + # though: it refers to <> and that is not we want + # to do here. + return pid in pids() + else: + return _psplatform.pid_exists(pid) + + +_pmap = {} + + +def process_iter(attrs=None, ad_value=None): + """Return a generator yielding a Process instance for all + running processes. + + Every new Process instance is only created once and then cached + into an internal table which is updated every time this is used. + + Cached Process instances are checked for identity so that you're + safe in case a PID has been reused by another process, in which + case the cached instance is updated. + + The sorting order in which processes are yielded is based on + their PIDs. + + *attrs* and *ad_value* have the same meaning as in + Process.as_dict(). If *attrs* is specified as_dict() is called + and the resulting dict is stored as a 'info' attribute attached + to returned Process instance. + If *attrs* is an empty list it will retrieve all process info + (slow). + """ + global _pmap + + def add(pid): + proc = Process(pid) + if attrs is not None: + proc.info = proc.as_dict(attrs=attrs, ad_value=ad_value) + pmap[proc.pid] = proc + return proc + + def remove(pid): + pmap.pop(pid, None) + + pmap = _pmap.copy() + a = set(pids()) + b = set(pmap.keys()) + new_pids = a - b + gone_pids = b - a + for pid in gone_pids: + remove(pid) + try: + ls = sorted(list(pmap.items()) + list(dict.fromkeys(new_pids).items())) + for pid, proc in ls: + try: + if proc is None: # new process + yield add(pid) + else: + # use is_running() to check whether PID has been + # reused by another process in which case yield a + # new Process instance + if proc.is_running(): + if attrs is not None: + proc.info = proc.as_dict( + attrs=attrs, ad_value=ad_value + ) + yield proc + else: + yield add(pid) + except NoSuchProcess: + remove(pid) + except AccessDenied: + # Process creation time can't be determined hence there's + # no way to tell whether the pid of the cached process + # has been reused. Just return the cached version. + if proc is None and pid in pmap: + try: + yield pmap[pid] + except KeyError: + # If we get here it is likely that 2 threads were + # using process_iter(). + pass + else: + raise + finally: + _pmap = pmap + + +def wait_procs(procs, timeout=None, callback=None): + """Convenience function which waits for a list of processes to + terminate. + + Return a (gone, alive) tuple indicating which processes + are gone and which ones are still alive. + + The gone ones will have a new *returncode* attribute indicating + process exit status (may be None). + + *callback* is a function which gets called every time a process + terminates (a Process instance is passed as callback argument). + + Function will return as soon as all processes terminate or when + *timeout* occurs. + Differently from Process.wait() it will not raise TimeoutExpired if + *timeout* occurs. + + Typical use case is: + + - send SIGTERM to a list of processes + - give them some time to terminate + - send SIGKILL to those ones which are still alive + + Example: + + >>> def on_terminate(proc): + ... print("process {} terminated".format(proc)) + ... + >>> for p in procs: + ... p.terminate() + ... + >>> gone, alive = wait_procs(procs, timeout=3, callback=on_terminate) + >>> for p in alive: + ... p.kill() + """ + + def check_gone(proc, timeout): + try: + returncode = proc.wait(timeout=timeout) + except TimeoutExpired: + pass + except _SubprocessTimeoutExpired: + pass + else: + if returncode is not None or not proc.is_running(): + # Set new Process instance attribute. + proc.returncode = returncode + gone.add(proc) + if callback is not None: + callback(proc) + + if timeout is not None and not timeout >= 0: + msg = "timeout must be a positive integer, got %s" % timeout + raise ValueError(msg) + gone = set() + alive = set(procs) + if callback is not None and not callable(callback): + msg = "callback %r is not a callable" % callback + raise TypeError(msg) + if timeout is not None: + deadline = _timer() + timeout + + while alive: + if timeout is not None and timeout <= 0: + break + for proc in alive: + # Make sure that every complete iteration (all processes) + # will last max 1 sec. + # We do this because we don't want to wait too long on a + # single process: in case it terminates too late other + # processes may disappear in the meantime and their PID + # reused. + max_timeout = 1.0 / len(alive) + if timeout is not None: + timeout = min((deadline - _timer()), max_timeout) + if timeout <= 0: + break + check_gone(proc, timeout) + else: + check_gone(proc, max_timeout) + alive = alive - gone + + if alive: + # Last attempt over processes survived so far. + # timeout == 0 won't make this function wait any further. + for proc in alive: + check_gone(proc, 0) + alive = alive - gone + + return (list(gone), list(alive)) + + +# ===================================================================== +# --- CPU related functions +# ===================================================================== + + +def cpu_count(logical=True): + """Return the number of logical CPUs in the system (same as + os.cpu_count() in Python 3.4). + + If *logical* is False return the number of physical cores only + (e.g. hyper thread CPUs are excluded). + + Return None if undetermined. + + The return value is cached after first call. + If desired cache can be cleared like this: + + >>> psutil.cpu_count.cache_clear() + """ + if logical: + ret = _psplatform.cpu_count_logical() + else: + ret = _psplatform.cpu_count_cores() + if ret is not None and ret < 1: + ret = None + return ret + + +def cpu_times(percpu=False): + """Return system-wide CPU times as a namedtuple. + Every CPU time represents the seconds the CPU has spent in the + given mode. The namedtuple's fields availability varies depending on the + platform: + + - user + - system + - idle + - nice (UNIX) + - iowait (Linux) + - irq (Linux, FreeBSD) + - softirq (Linux) + - steal (Linux >= 2.6.11) + - guest (Linux >= 2.6.24) + - guest_nice (Linux >= 3.2.0) + + When *percpu* is True return a list of namedtuples for each CPU. + First element of the list refers to first CPU, second element + to second CPU and so on. + The order of the list is consistent across calls. + """ + if not percpu: + return _psplatform.cpu_times() + else: + return _psplatform.per_cpu_times() + + +try: + _last_cpu_times = {threading.current_thread().ident: cpu_times()} +except Exception: # noqa: BLE001 + # Don't want to crash at import time. + _last_cpu_times = {} + +try: + _last_per_cpu_times = { + threading.current_thread().ident: cpu_times(percpu=True) + } +except Exception: # noqa: BLE001 + # Don't want to crash at import time. + _last_per_cpu_times = {} + + +def _cpu_tot_time(times): + """Given a cpu_time() ntuple calculates the total CPU time + (including idle time). + """ + tot = sum(times) + if LINUX: + # On Linux guest times are already accounted in "user" or + # "nice" times, so we subtract them from total. + # Htop does the same. References: + # https://github.com/giampaolo/psutil/pull/940 + # http://unix.stackexchange.com/questions/178045 + # https://github.com/torvalds/linux/blob/ + # 447976ef4fd09b1be88b316d1a81553f1aa7cd07/kernel/sched/ + # cputime.c#L158 + tot -= getattr(times, "guest", 0) # Linux 2.6.24+ + tot -= getattr(times, "guest_nice", 0) # Linux 3.2.0+ + return tot + + +def _cpu_busy_time(times): + """Given a cpu_time() ntuple calculates the busy CPU time. + We do so by subtracting all idle CPU times. + """ + busy = _cpu_tot_time(times) + busy -= times.idle + # Linux: "iowait" is time during which the CPU does not do anything + # (waits for IO to complete). On Linux IO wait is *not* accounted + # in "idle" time so we subtract it. Htop does the same. + # References: + # https://github.com/torvalds/linux/blob/ + # 447976ef4fd09b1be88b316d1a81553f1aa7cd07/kernel/sched/cputime.c#L244 + busy -= getattr(times, "iowait", 0) + return busy + + +def _cpu_times_deltas(t1, t2): + assert t1._fields == t2._fields, (t1, t2) + field_deltas = [] + for field in _psplatform.scputimes._fields: + field_delta = getattr(t2, field) - getattr(t1, field) + # CPU times are always supposed to increase over time + # or at least remain the same and that's because time + # cannot go backwards. + # Surprisingly sometimes this might not be the case (at + # least on Windows and Linux), see: + # https://github.com/giampaolo/psutil/issues/392 + # https://github.com/giampaolo/psutil/issues/645 + # https://github.com/giampaolo/psutil/issues/1210 + # Trim negative deltas to zero to ignore decreasing fields. + # top does the same. Reference: + # https://gitlab.com/procps-ng/procps/blob/v3.3.12/top/top.c#L5063 + field_delta = max(0, field_delta) + field_deltas.append(field_delta) + return _psplatform.scputimes(*field_deltas) + + +def cpu_percent(interval=None, percpu=False): + """Return a float representing the current system-wide CPU + utilization as a percentage. + + When *interval* is > 0.0 compares system CPU times elapsed before + and after the interval (blocking). + + When *interval* is 0.0 or None compares system CPU times elapsed + since last call or module import, returning immediately (non + blocking). That means the first time this is called it will + return a meaningless 0.0 value which you should ignore. + In this case is recommended for accuracy that this function be + called with at least 0.1 seconds between calls. + + When *percpu* is True returns a list of floats representing the + utilization as a percentage for each CPU. + First element of the list refers to first CPU, second element + to second CPU and so on. + The order of the list is consistent across calls. + + Examples: + + >>> # blocking, system-wide + >>> psutil.cpu_percent(interval=1) + 2.0 + >>> + >>> # blocking, per-cpu + >>> psutil.cpu_percent(interval=1, percpu=True) + [2.0, 1.0] + >>> + >>> # non-blocking (percentage since last call) + >>> psutil.cpu_percent(interval=None) + 2.9 + >>> + """ + tid = threading.current_thread().ident + blocking = interval is not None and interval > 0.0 + if interval is not None and interval < 0: + msg = "interval is not positive (got %r)" % interval + raise ValueError(msg) + + def calculate(t1, t2): + times_delta = _cpu_times_deltas(t1, t2) + all_delta = _cpu_tot_time(times_delta) + busy_delta = _cpu_busy_time(times_delta) + + try: + busy_perc = (busy_delta / all_delta) * 100 + except ZeroDivisionError: + return 0.0 + else: + return round(busy_perc, 1) + + # system-wide usage + if not percpu: + if blocking: + t1 = cpu_times() + time.sleep(interval) + else: + t1 = _last_cpu_times.get(tid) or cpu_times() + _last_cpu_times[tid] = cpu_times() + return calculate(t1, _last_cpu_times[tid]) + # per-cpu usage + else: + ret = [] + if blocking: + tot1 = cpu_times(percpu=True) + time.sleep(interval) + else: + tot1 = _last_per_cpu_times.get(tid) or cpu_times(percpu=True) + _last_per_cpu_times[tid] = cpu_times(percpu=True) + for t1, t2 in zip(tot1, _last_per_cpu_times[tid]): + ret.append(calculate(t1, t2)) + return ret + + +# Use a separate dict for cpu_times_percent(), so it's independent from +# cpu_percent() and they can both be used within the same program. +_last_cpu_times_2 = _last_cpu_times.copy() +_last_per_cpu_times_2 = _last_per_cpu_times.copy() + + +def cpu_times_percent(interval=None, percpu=False): + """Same as cpu_percent() but provides utilization percentages + for each specific CPU time as is returned by cpu_times(). + For instance, on Linux we'll get: + + >>> cpu_times_percent() + cpupercent(user=4.8, nice=0.0, system=4.8, idle=90.5, iowait=0.0, + irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0) + >>> + + *interval* and *percpu* arguments have the same meaning as in + cpu_percent(). + """ + tid = threading.current_thread().ident + blocking = interval is not None and interval > 0.0 + if interval is not None and interval < 0: + msg = "interval is not positive (got %r)" % interval + raise ValueError(msg) + + def calculate(t1, t2): + nums = [] + times_delta = _cpu_times_deltas(t1, t2) + all_delta = _cpu_tot_time(times_delta) + # "scale" is the value to multiply each delta with to get percentages. + # We use "max" to avoid division by zero (if all_delta is 0, then all + # fields are 0 so percentages will be 0 too. all_delta cannot be a + # fraction because cpu times are integers) + scale = 100.0 / max(1, all_delta) + for field_delta in times_delta: + field_perc = field_delta * scale + field_perc = round(field_perc, 1) + # make sure we don't return negative values or values over 100% + field_perc = min(max(0.0, field_perc), 100.0) + nums.append(field_perc) + return _psplatform.scputimes(*nums) + + # system-wide usage + if not percpu: + if blocking: + t1 = cpu_times() + time.sleep(interval) + else: + t1 = _last_cpu_times_2.get(tid) or cpu_times() + _last_cpu_times_2[tid] = cpu_times() + return calculate(t1, _last_cpu_times_2[tid]) + # per-cpu usage + else: + ret = [] + if blocking: + tot1 = cpu_times(percpu=True) + time.sleep(interval) + else: + tot1 = _last_per_cpu_times_2.get(tid) or cpu_times(percpu=True) + _last_per_cpu_times_2[tid] = cpu_times(percpu=True) + for t1, t2 in zip(tot1, _last_per_cpu_times_2[tid]): + ret.append(calculate(t1, t2)) + return ret + + +def cpu_stats(): + """Return CPU statistics.""" + return _psplatform.cpu_stats() + + +if hasattr(_psplatform, "cpu_freq"): + + def cpu_freq(percpu=False): + """Return CPU frequency as a namedtuple including current, + min and max frequency expressed in Mhz. + + If *percpu* is True and the system supports per-cpu frequency + retrieval (Linux only) a list of frequencies is returned for + each CPU. If not a list with one element is returned. + """ + ret = _psplatform.cpu_freq() + if percpu: + return ret + else: + num_cpus = float(len(ret)) + if num_cpus == 0: + return None + elif num_cpus == 1: + return ret[0] + else: + currs, mins, maxs = 0.0, 0.0, 0.0 + set_none = False + for cpu in ret: + currs += cpu.current + # On Linux if /proc/cpuinfo is used min/max are set + # to None. + if LINUX and cpu.min is None: + set_none = True + continue + mins += cpu.min + maxs += cpu.max + + current = currs / num_cpus + + if set_none: + min_ = max_ = None + else: + min_ = mins / num_cpus + max_ = maxs / num_cpus + + return _common.scpufreq(current, min_, max_) + + __all__.append("cpu_freq") + + +if hasattr(os, "getloadavg") or hasattr(_psplatform, "getloadavg"): + # Perform this hasattr check once on import time to either use the + # platform based code or proxy straight from the os module. + if hasattr(os, "getloadavg"): + getloadavg = os.getloadavg + else: + getloadavg = _psplatform.getloadavg + + __all__.append("getloadavg") + + +# ===================================================================== +# --- system memory related functions +# ===================================================================== + + +def virtual_memory(): + """Return statistics about system memory usage as a namedtuple + including the following fields, expressed in bytes: + + - total: + total physical memory available. + + - available: + the memory that can be given instantly to processes without the + system going into swap. + This is calculated by summing different memory values depending + on the platform and it is supposed to be used to monitor actual + memory usage in a cross platform fashion. + + - percent: + the percentage usage calculated as (total - available) / total * 100 + + - used: + memory used, calculated differently depending on the platform and + designed for informational purposes only: + macOS: active + wired + BSD: active + wired + cached + Linux: total - free + + - free: + memory not being used at all (zeroed) that is readily available; + note that this doesn't reflect the actual memory available + (use 'available' instead) + + Platform-specific fields: + + - active (UNIX): + memory currently in use or very recently used, and so it is in RAM. + + - inactive (UNIX): + memory that is marked as not used. + + - buffers (BSD, Linux): + cache for things like file system metadata. + + - cached (BSD, macOS): + cache for various things. + + - wired (macOS, BSD): + memory that is marked to always stay in RAM. It is never moved to disk. + + - shared (BSD): + memory that may be simultaneously accessed by multiple processes. + + The sum of 'used' and 'available' does not necessarily equal total. + On Windows 'available' and 'free' are the same. + """ + global _TOTAL_PHYMEM + ret = _psplatform.virtual_memory() + # cached for later use in Process.memory_percent() + _TOTAL_PHYMEM = ret.total + return ret + + +def swap_memory(): + """Return system swap memory statistics as a namedtuple including + the following fields: + + - total: total swap memory in bytes + - used: used swap memory in bytes + - free: free swap memory in bytes + - percent: the percentage usage + - sin: no. of bytes the system has swapped in from disk (cumulative) + - sout: no. of bytes the system has swapped out from disk (cumulative) + + 'sin' and 'sout' on Windows are meaningless and always set to 0. + """ + return _psplatform.swap_memory() + + +# ===================================================================== +# --- disks/paritions related functions +# ===================================================================== + + +def disk_usage(path): + """Return disk usage statistics about the given *path* as a + namedtuple including total, used and free space expressed in bytes + plus the percentage usage. + """ + return _psplatform.disk_usage(path) + + +def disk_partitions(all=False): + """Return mounted partitions as a list of + (device, mountpoint, fstype, opts) namedtuple. + 'opts' field is a raw string separated by commas indicating mount + options which may vary depending on the platform. + + If *all* parameter is False return physical devices only and ignore + all others. + """ + + def pathconf(path, name): + try: + return os.pathconf(path, name) + except (OSError, AttributeError): + pass + + ret = _psplatform.disk_partitions(all) + if POSIX: + new = [] + for item in ret: + nt = item._replace( + maxfile=pathconf(item.mountpoint, 'PC_NAME_MAX'), + maxpath=pathconf(item.mountpoint, 'PC_PATH_MAX'), + ) + new.append(nt) + return new + else: + return ret + + +def disk_io_counters(perdisk=False, nowrap=True): + """Return system disk I/O statistics as a namedtuple including + the following fields: + + - read_count: number of reads + - write_count: number of writes + - read_bytes: number of bytes read + - write_bytes: number of bytes written + - read_time: time spent reading from disk (in ms) + - write_time: time spent writing to disk (in ms) + + Platform specific: + + - busy_time: (Linux, FreeBSD) time spent doing actual I/Os (in ms) + - read_merged_count (Linux): number of merged reads + - write_merged_count (Linux): number of merged writes + + If *perdisk* is True return the same information for every + physical disk installed on the system as a dictionary + with partition names as the keys and the namedtuple + described above as the values. + + If *nowrap* is True it detects and adjust the numbers which overflow + and wrap (restart from 0) and add "old value" to "new value" so that + the returned numbers will always be increasing or remain the same, + but never decrease. + "disk_io_counters.cache_clear()" can be used to invalidate the + cache. + + On recent Windows versions 'diskperf -y' command may need to be + executed first otherwise this function won't find any disk. + """ + kwargs = dict(perdisk=perdisk) if LINUX else {} + rawdict = _psplatform.disk_io_counters(**kwargs) + if not rawdict: + return {} if perdisk else None + if nowrap: + rawdict = _wrap_numbers(rawdict, 'psutil.disk_io_counters') + nt = getattr(_psplatform, "sdiskio", _common.sdiskio) + if perdisk: + for disk, fields in rawdict.items(): + rawdict[disk] = nt(*fields) + return rawdict + else: + return nt(*(sum(x) for x in zip(*rawdict.values()))) + + +disk_io_counters.cache_clear = functools.partial( + _wrap_numbers.cache_clear, 'psutil.disk_io_counters' +) +disk_io_counters.cache_clear.__doc__ = "Clears nowrap argument cache" + + +# ===================================================================== +# --- network related functions +# ===================================================================== + + +def net_io_counters(pernic=False, nowrap=True): + """Return network I/O statistics as a namedtuple including + the following fields: + + - bytes_sent: number of bytes sent + - bytes_recv: number of bytes received + - packets_sent: number of packets sent + - packets_recv: number of packets received + - errin: total number of errors while receiving + - errout: total number of errors while sending + - dropin: total number of incoming packets which were dropped + - dropout: total number of outgoing packets which were dropped + (always 0 on macOS and BSD) + + If *pernic* is True return the same information for every + network interface installed on the system as a dictionary + with network interface names as the keys and the namedtuple + described above as the values. + + If *nowrap* is True it detects and adjust the numbers which overflow + and wrap (restart from 0) and add "old value" to "new value" so that + the returned numbers will always be increasing or remain the same, + but never decrease. + "net_io_counters.cache_clear()" can be used to invalidate the + cache. + """ + rawdict = _psplatform.net_io_counters() + if not rawdict: + return {} if pernic else None + if nowrap: + rawdict = _wrap_numbers(rawdict, 'psutil.net_io_counters') + if pernic: + for nic, fields in rawdict.items(): + rawdict[nic] = _common.snetio(*fields) + return rawdict + else: + return _common.snetio(*[sum(x) for x in zip(*rawdict.values())]) + + +net_io_counters.cache_clear = functools.partial( + _wrap_numbers.cache_clear, 'psutil.net_io_counters' +) +net_io_counters.cache_clear.__doc__ = "Clears nowrap argument cache" + + +def net_connections(kind='inet'): + """Return system-wide socket connections as a list of + (fd, family, type, laddr, raddr, status, pid) namedtuples. + In case of limited privileges 'fd' and 'pid' may be set to -1 + and None respectively. + The *kind* parameter filters for connections that fit the + following criteria: + + +------------+----------------------------------------------------+ + | Kind Value | Connections using | + +------------+----------------------------------------------------+ + | inet | IPv4 and IPv6 | + | inet4 | IPv4 | + | inet6 | IPv6 | + | tcp | TCP | + | tcp4 | TCP over IPv4 | + | tcp6 | TCP over IPv6 | + | udp | UDP | + | udp4 | UDP over IPv4 | + | udp6 | UDP over IPv6 | + | unix | UNIX socket (both UDP and TCP protocols) | + | all | the sum of all the possible families and protocols | + +------------+----------------------------------------------------+ + + On macOS this function requires root privileges. + """ + return _psplatform.net_connections(kind) + + +def net_if_addrs(): + """Return the addresses associated to each NIC (network interface + card) installed on the system as a dictionary whose keys are the + NIC names and value is a list of namedtuples for each address + assigned to the NIC. Each namedtuple includes 5 fields: + + - family: can be either socket.AF_INET, socket.AF_INET6 or + psutil.AF_LINK, which refers to a MAC address. + - address: is the primary address and it is always set. + - netmask: and 'broadcast' and 'ptp' may be None. + - ptp: stands for "point to point" and references the + destination address on a point to point interface + (typically a VPN). + - broadcast: and *ptp* are mutually exclusive. + + Note: you can have more than one address of the same family + associated with each interface. + """ + has_enums = _PY3 + if has_enums: + import socket + rawlist = _psplatform.net_if_addrs() + rawlist.sort(key=lambda x: x[1]) # sort by family + ret = collections.defaultdict(list) + for name, fam, addr, mask, broadcast, ptp in rawlist: + if has_enums: + try: + fam = socket.AddressFamily(fam) + except ValueError: + if WINDOWS and fam == -1: + fam = _psplatform.AF_LINK + elif ( + hasattr(_psplatform, "AF_LINK") + and fam == _psplatform.AF_LINK + ): + # Linux defines AF_LINK as an alias for AF_PACKET. + # We re-set the family here so that repr(family) + # will show AF_LINK rather than AF_PACKET + fam = _psplatform.AF_LINK + if fam == _psplatform.AF_LINK: + # The underlying C function may return an incomplete MAC + # address in which case we fill it with null bytes, see: + # https://github.com/giampaolo/psutil/issues/786 + separator = ":" if POSIX else "-" + while addr.count(separator) < 5: + addr += "%s00" % separator + ret[name].append(_common.snicaddr(fam, addr, mask, broadcast, ptp)) + return dict(ret) + + +def net_if_stats(): + """Return information about each NIC (network interface card) + installed on the system as a dictionary whose keys are the + NIC names and value is a namedtuple with the following fields: + + - isup: whether the interface is up (bool) + - duplex: can be either NIC_DUPLEX_FULL, NIC_DUPLEX_HALF or + NIC_DUPLEX_UNKNOWN + - speed: the NIC speed expressed in mega bits (MB); if it can't + be determined (e.g. 'localhost') it will be set to 0. + - mtu: the maximum transmission unit expressed in bytes. + """ + return _psplatform.net_if_stats() + + +# ===================================================================== +# --- sensors +# ===================================================================== + + +# Linux, macOS +if hasattr(_psplatform, "sensors_temperatures"): + + def sensors_temperatures(fahrenheit=False): + """Return hardware temperatures. Each entry is a namedtuple + representing a certain hardware sensor (it may be a CPU, an + hard disk or something else, depending on the OS and its + configuration). + All temperatures are expressed in celsius unless *fahrenheit* + is set to True. + """ + + def convert(n): + if n is not None: + return (float(n) * 9 / 5) + 32 if fahrenheit else n + + ret = collections.defaultdict(list) + rawdict = _psplatform.sensors_temperatures() + + for name, values in rawdict.items(): + while values: + label, current, high, critical = values.pop(0) + current = convert(current) + high = convert(high) + critical = convert(critical) + + if high and not critical: + critical = high + elif critical and not high: + high = critical + + ret[name].append( + _common.shwtemp(label, current, high, critical) + ) + + return dict(ret) + + __all__.append("sensors_temperatures") + + +# Linux +if hasattr(_psplatform, "sensors_fans"): + + def sensors_fans(): + """Return fans speed. Each entry is a namedtuple + representing a certain hardware sensor. + All speed are expressed in RPM (rounds per minute). + """ + return _psplatform.sensors_fans() + + __all__.append("sensors_fans") + + +# Linux, Windows, FreeBSD, macOS +if hasattr(_psplatform, "sensors_battery"): + + def sensors_battery(): + """Return battery information. If no battery is installed + returns None. + + - percent: battery power left as a percentage. + - secsleft: a rough approximation of how many seconds are left + before the battery runs out of power. May be + POWER_TIME_UNLIMITED or POWER_TIME_UNLIMITED. + - power_plugged: True if the AC power cable is connected. + """ + return _psplatform.sensors_battery() + + __all__.append("sensors_battery") + + +# ===================================================================== +# --- other system related functions +# ===================================================================== + + +def boot_time(): + """Return the system boot time expressed in seconds since the epoch.""" + # Note: we are not caching this because it is subject to + # system clock updates. + return _psplatform.boot_time() + + +def users(): + """Return users currently connected on the system as a list of + namedtuples including the following fields. + + - user: the name of the user + - terminal: the tty or pseudo-tty associated with the user, if any. + - host: the host name associated with the entry, if any. + - started: the creation time as a floating point number expressed in + seconds since the epoch. + """ + return _psplatform.users() + + +# ===================================================================== +# --- Windows services +# ===================================================================== + + +if WINDOWS: + + def win_service_iter(): + """Return a generator yielding a WindowsService instance for all + Windows services installed. + """ + return _psplatform.win_service_iter() + + def win_service_get(name): + """Get a Windows service by *name*. + Raise NoSuchProcess if no service with such name exists. + """ + return _psplatform.win_service_get(name) + + +# ===================================================================== + + +def _set_debug(value): + """Enable or disable PSUTIL_DEBUG option, which prints debugging + messages to stderr. + """ + import psutil._common + + psutil._common.PSUTIL_DEBUG = bool(value) + _psplatform.cext.set_debug(bool(value)) + + +def test(): # pragma: no cover + from ._common import bytes2human + from ._compat import get_terminal_size + + today_day = datetime.date.today() + # fmt: off + templ = "%-10s %5s %5s %7s %7s %5s %6s %6s %6s %s" + attrs = ['pid', 'memory_percent', 'name', 'cmdline', 'cpu_times', + 'create_time', 'memory_info', 'status', 'nice', 'username'] + print(templ % ("USER", "PID", "%MEM", "VSZ", "RSS", "NICE", # NOQA + "STATUS", "START", "TIME", "CMDLINE")) + # fmt: on + for p in process_iter(attrs, ad_value=None): + if p.info['create_time']: + ctime = datetime.datetime.fromtimestamp(p.info['create_time']) + if ctime.date() == today_day: + ctime = ctime.strftime("%H:%M") + else: + ctime = ctime.strftime("%b%d") + else: + ctime = '' + if p.info['cpu_times']: + cputime = time.strftime( + "%M:%S", time.localtime(sum(p.info['cpu_times'])) + ) + else: + cputime = '' + + user = p.info['username'] or '' + if not user and POSIX: + try: + user = p.uids()[0] + except Error: + pass + if user and WINDOWS and '\\' in user: + user = user.split('\\')[1] + user = user[:9] + vms = ( + bytes2human(p.info['memory_info'].vms) + if p.info['memory_info'] is not None + else '' + ) + rss = ( + bytes2human(p.info['memory_info'].rss) + if p.info['memory_info'] is not None + else '' + ) + memp = ( + round(p.info['memory_percent'], 1) + if p.info['memory_percent'] is not None + else '' + ) + nice = int(p.info['nice']) if p.info['nice'] else '' + if p.info['cmdline']: + cmdline = ' '.join(p.info['cmdline']) + else: + cmdline = p.info['name'] + status = p.info['status'][:5] if p.info['status'] else '' + + line = templ % ( + user[:10], + p.info['pid'], + memp, + vms, + rss, + nice, + status, + ctime, + cputime, + cmdline, + ) + print(line[: get_terminal_size()[0]]) # NOQA + + +del memoize_when_activated, division +if sys.version_info[0] < 3: + del num, x # noqa + +if __name__ == "__main__": + test() diff --git a/venv/lib/python3.10/site-packages/psutil/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/psutil/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f606eb7d65054ee04889974c17ecb068358dfa4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/psutil/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/psutil/__pycache__/_common.cpython-310.pyc b/venv/lib/python3.10/site-packages/psutil/__pycache__/_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43e9b120b4a7db624b7bea19f8b9a0033597e57e Binary files /dev/null and b/venv/lib/python3.10/site-packages/psutil/__pycache__/_common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/psutil/__pycache__/_compat.cpython-310.pyc b/venv/lib/python3.10/site-packages/psutil/__pycache__/_compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03e0f7590c0fdf707c7ad7b6764b70e562949e81 Binary files /dev/null and b/venv/lib/python3.10/site-packages/psutil/__pycache__/_compat.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/psutil/__pycache__/_psaix.cpython-310.pyc b/venv/lib/python3.10/site-packages/psutil/__pycache__/_psaix.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af215dbd768aaecc8c1e4b40409b69eb00c34b0c Binary files /dev/null and b/venv/lib/python3.10/site-packages/psutil/__pycache__/_psaix.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/psutil/__pycache__/_psbsd.cpython-310.pyc b/venv/lib/python3.10/site-packages/psutil/__pycache__/_psbsd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce7089bdd75ce138e5433926d1ee16207f3b992f Binary files /dev/null and b/venv/lib/python3.10/site-packages/psutil/__pycache__/_psbsd.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/psutil/__pycache__/_pslinux.cpython-310.pyc b/venv/lib/python3.10/site-packages/psutil/__pycache__/_pslinux.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61606e4bf0f50cf23482072e4e69a5276cf22580 Binary files /dev/null and b/venv/lib/python3.10/site-packages/psutil/__pycache__/_pslinux.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/psutil/__pycache__/_psosx.cpython-310.pyc b/venv/lib/python3.10/site-packages/psutil/__pycache__/_psosx.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c00912f6849eba2e27ca397aa49699ee0a54ae36 Binary files /dev/null and b/venv/lib/python3.10/site-packages/psutil/__pycache__/_psosx.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/psutil/__pycache__/_psposix.cpython-310.pyc b/venv/lib/python3.10/site-packages/psutil/__pycache__/_psposix.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c33929a9aefcc9b8bf674e6458cbee9578d329d Binary files /dev/null and b/venv/lib/python3.10/site-packages/psutil/__pycache__/_psposix.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/psutil/__pycache__/_pssunos.cpython-310.pyc b/venv/lib/python3.10/site-packages/psutil/__pycache__/_pssunos.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f035600345aa87a370d0d980a7ff0e5904f4805d Binary files /dev/null and b/venv/lib/python3.10/site-packages/psutil/__pycache__/_pssunos.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/psutil/__pycache__/_pswindows.cpython-310.pyc b/venv/lib/python3.10/site-packages/psutil/__pycache__/_pswindows.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a7642caafe1e3d21f607224dd432290b5622fd0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/psutil/__pycache__/_pswindows.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/psutil/_common.py b/venv/lib/python3.10/site-packages/psutil/_common.py new file mode 100644 index 0000000000000000000000000000000000000000..6989feafda6f177199f720452ef5b979fa3e27e0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/psutil/_common.py @@ -0,0 +1,983 @@ +# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Common objects shared by __init__.py and _ps*.py modules.""" + +# Note: this module is imported by setup.py so it should not import +# psutil or third-party modules. + +from __future__ import division +from __future__ import print_function + +import collections +import contextlib +import errno +import functools +import os +import socket +import stat +import sys +import threading +import warnings +from collections import namedtuple +from socket import AF_INET +from socket import SOCK_DGRAM +from socket import SOCK_STREAM + + +try: + from socket import AF_INET6 +except ImportError: + AF_INET6 = None +try: + from socket import AF_UNIX +except ImportError: + AF_UNIX = None + + +# can't take it from _common.py as this script is imported by setup.py +PY3 = sys.version_info[0] >= 3 +if PY3: + import enum +else: + enum = None + + +PSUTIL_DEBUG = bool(os.getenv('PSUTIL_DEBUG')) +_DEFAULT = object() + +# fmt: off +__all__ = [ + # OS constants + 'FREEBSD', 'BSD', 'LINUX', 'NETBSD', 'OPENBSD', 'MACOS', 'OSX', 'POSIX', + 'SUNOS', 'WINDOWS', + # connection constants + 'CONN_CLOSE', 'CONN_CLOSE_WAIT', 'CONN_CLOSING', 'CONN_ESTABLISHED', + 'CONN_FIN_WAIT1', 'CONN_FIN_WAIT2', 'CONN_LAST_ACK', 'CONN_LISTEN', + 'CONN_NONE', 'CONN_SYN_RECV', 'CONN_SYN_SENT', 'CONN_TIME_WAIT', + # net constants + 'NIC_DUPLEX_FULL', 'NIC_DUPLEX_HALF', 'NIC_DUPLEX_UNKNOWN', + # process status constants + 'STATUS_DEAD', 'STATUS_DISK_SLEEP', 'STATUS_IDLE', 'STATUS_LOCKED', + 'STATUS_RUNNING', 'STATUS_SLEEPING', 'STATUS_STOPPED', 'STATUS_SUSPENDED', + 'STATUS_TRACING_STOP', 'STATUS_WAITING', 'STATUS_WAKE_KILL', + 'STATUS_WAKING', 'STATUS_ZOMBIE', 'STATUS_PARKED', + # other constants + 'ENCODING', 'ENCODING_ERRS', 'AF_INET6', + # named tuples + 'pconn', 'pcputimes', 'pctxsw', 'pgids', 'pio', 'pionice', 'popenfile', + 'pthread', 'puids', 'sconn', 'scpustats', 'sdiskio', 'sdiskpart', + 'sdiskusage', 'snetio', 'snicaddr', 'snicstats', 'sswap', 'suser', + # utility functions + 'conn_tmap', 'deprecated_method', 'isfile_strict', 'memoize', + 'parse_environ_block', 'path_exists_strict', 'usage_percent', + 'supports_ipv6', 'sockfam_to_enum', 'socktype_to_enum', "wrap_numbers", + 'open_text', 'open_binary', 'cat', 'bcat', + 'bytes2human', 'conn_to_ntuple', 'debug', + # shell utils + 'hilite', 'term_supports_colors', 'print_color', +] +# fmt: on + + +# =================================================================== +# --- OS constants +# =================================================================== + + +POSIX = os.name == "posix" +WINDOWS = os.name == "nt" +LINUX = sys.platform.startswith("linux") +MACOS = sys.platform.startswith("darwin") +OSX = MACOS # deprecated alias +FREEBSD = sys.platform.startswith(("freebsd", "midnightbsd")) +OPENBSD = sys.platform.startswith("openbsd") +NETBSD = sys.platform.startswith("netbsd") +BSD = FREEBSD or OPENBSD or NETBSD +SUNOS = sys.platform.startswith(("sunos", "solaris")) +AIX = sys.platform.startswith("aix") + + +# =================================================================== +# --- API constants +# =================================================================== + + +# Process.status() +STATUS_RUNNING = "running" +STATUS_SLEEPING = "sleeping" +STATUS_DISK_SLEEP = "disk-sleep" +STATUS_STOPPED = "stopped" +STATUS_TRACING_STOP = "tracing-stop" +STATUS_ZOMBIE = "zombie" +STATUS_DEAD = "dead" +STATUS_WAKE_KILL = "wake-kill" +STATUS_WAKING = "waking" +STATUS_IDLE = "idle" # Linux, macOS, FreeBSD +STATUS_LOCKED = "locked" # FreeBSD +STATUS_WAITING = "waiting" # FreeBSD +STATUS_SUSPENDED = "suspended" # NetBSD +STATUS_PARKED = "parked" # Linux + +# Process.connections() and psutil.net_connections() +CONN_ESTABLISHED = "ESTABLISHED" +CONN_SYN_SENT = "SYN_SENT" +CONN_SYN_RECV = "SYN_RECV" +CONN_FIN_WAIT1 = "FIN_WAIT1" +CONN_FIN_WAIT2 = "FIN_WAIT2" +CONN_TIME_WAIT = "TIME_WAIT" +CONN_CLOSE = "CLOSE" +CONN_CLOSE_WAIT = "CLOSE_WAIT" +CONN_LAST_ACK = "LAST_ACK" +CONN_LISTEN = "LISTEN" +CONN_CLOSING = "CLOSING" +CONN_NONE = "NONE" + +# net_if_stats() +if enum is None: + NIC_DUPLEX_FULL = 2 + NIC_DUPLEX_HALF = 1 + NIC_DUPLEX_UNKNOWN = 0 +else: + + class NicDuplex(enum.IntEnum): + NIC_DUPLEX_FULL = 2 + NIC_DUPLEX_HALF = 1 + NIC_DUPLEX_UNKNOWN = 0 + + globals().update(NicDuplex.__members__) + +# sensors_battery() +if enum is None: + POWER_TIME_UNKNOWN = -1 + POWER_TIME_UNLIMITED = -2 +else: + + class BatteryTime(enum.IntEnum): + POWER_TIME_UNKNOWN = -1 + POWER_TIME_UNLIMITED = -2 + + globals().update(BatteryTime.__members__) + +# --- others + +ENCODING = sys.getfilesystemencoding() +if not PY3: + ENCODING_ERRS = "replace" +else: + try: + ENCODING_ERRS = sys.getfilesystemencodeerrors() # py 3.6 + except AttributeError: + ENCODING_ERRS = "surrogateescape" if POSIX else "replace" + + +# =================================================================== +# --- namedtuples +# =================================================================== + +# --- for system functions + +# fmt: off +# psutil.swap_memory() +sswap = namedtuple('sswap', ['total', 'used', 'free', 'percent', 'sin', + 'sout']) +# psutil.disk_usage() +sdiskusage = namedtuple('sdiskusage', ['total', 'used', 'free', 'percent']) +# psutil.disk_io_counters() +sdiskio = namedtuple('sdiskio', ['read_count', 'write_count', + 'read_bytes', 'write_bytes', + 'read_time', 'write_time']) +# psutil.disk_partitions() +sdiskpart = namedtuple('sdiskpart', ['device', 'mountpoint', 'fstype', 'opts', + 'maxfile', 'maxpath']) +# psutil.net_io_counters() +snetio = namedtuple('snetio', ['bytes_sent', 'bytes_recv', + 'packets_sent', 'packets_recv', + 'errin', 'errout', + 'dropin', 'dropout']) +# psutil.users() +suser = namedtuple('suser', ['name', 'terminal', 'host', 'started', 'pid']) +# psutil.net_connections() +sconn = namedtuple('sconn', ['fd', 'family', 'type', 'laddr', 'raddr', + 'status', 'pid']) +# psutil.net_if_addrs() +snicaddr = namedtuple('snicaddr', + ['family', 'address', 'netmask', 'broadcast', 'ptp']) +# psutil.net_if_stats() +snicstats = namedtuple('snicstats', + ['isup', 'duplex', 'speed', 'mtu', 'flags']) +# psutil.cpu_stats() +scpustats = namedtuple( + 'scpustats', ['ctx_switches', 'interrupts', 'soft_interrupts', 'syscalls']) +# psutil.cpu_freq() +scpufreq = namedtuple('scpufreq', ['current', 'min', 'max']) +# psutil.sensors_temperatures() +shwtemp = namedtuple( + 'shwtemp', ['label', 'current', 'high', 'critical']) +# psutil.sensors_battery() +sbattery = namedtuple('sbattery', ['percent', 'secsleft', 'power_plugged']) +# psutil.sensors_fans() +sfan = namedtuple('sfan', ['label', 'current']) +# fmt: on + +# --- for Process methods + +# psutil.Process.cpu_times() +pcputimes = namedtuple( + 'pcputimes', ['user', 'system', 'children_user', 'children_system'] +) +# psutil.Process.open_files() +popenfile = namedtuple('popenfile', ['path', 'fd']) +# psutil.Process.threads() +pthread = namedtuple('pthread', ['id', 'user_time', 'system_time']) +# psutil.Process.uids() +puids = namedtuple('puids', ['real', 'effective', 'saved']) +# psutil.Process.gids() +pgids = namedtuple('pgids', ['real', 'effective', 'saved']) +# psutil.Process.io_counters() +pio = namedtuple( + 'pio', ['read_count', 'write_count', 'read_bytes', 'write_bytes'] +) +# psutil.Process.ionice() +pionice = namedtuple('pionice', ['ioclass', 'value']) +# psutil.Process.ctx_switches() +pctxsw = namedtuple('pctxsw', ['voluntary', 'involuntary']) +# psutil.Process.connections() +pconn = namedtuple( + 'pconn', ['fd', 'family', 'type', 'laddr', 'raddr', 'status'] +) + +# psutil.connections() and psutil.Process.connections() +addr = namedtuple('addr', ['ip', 'port']) + + +# =================================================================== +# --- Process.connections() 'kind' parameter mapping +# =================================================================== + + +conn_tmap = { + "all": ([AF_INET, AF_INET6, AF_UNIX], [SOCK_STREAM, SOCK_DGRAM]), + "tcp": ([AF_INET, AF_INET6], [SOCK_STREAM]), + "tcp4": ([AF_INET], [SOCK_STREAM]), + "udp": ([AF_INET, AF_INET6], [SOCK_DGRAM]), + "udp4": ([AF_INET], [SOCK_DGRAM]), + "inet": ([AF_INET, AF_INET6], [SOCK_STREAM, SOCK_DGRAM]), + "inet4": ([AF_INET], [SOCK_STREAM, SOCK_DGRAM]), + "inet6": ([AF_INET6], [SOCK_STREAM, SOCK_DGRAM]), +} + +if AF_INET6 is not None: + conn_tmap.update({ + "tcp6": ([AF_INET6], [SOCK_STREAM]), + "udp6": ([AF_INET6], [SOCK_DGRAM]), + }) + +if AF_UNIX is not None: + conn_tmap.update({"unix": ([AF_UNIX], [SOCK_STREAM, SOCK_DGRAM])}) + + +# ===================================================================== +# --- Exceptions +# ===================================================================== + + +class Error(Exception): + """Base exception class. All other psutil exceptions inherit + from this one. + """ + + __module__ = 'psutil' + + def _infodict(self, attrs): + info = collections.OrderedDict() + for name in attrs: + value = getattr(self, name, None) + if value: # noqa + info[name] = value + elif name == "pid" and value == 0: + info[name] = value + return info + + def __str__(self): + # invoked on `raise Error` + info = self._infodict(("pid", "ppid", "name")) + if info: + details = "(%s)" % ", ".join( + ["%s=%r" % (k, v) for k, v in info.items()] + ) + else: + details = None + return " ".join([x for x in (getattr(self, "msg", ""), details) if x]) + + def __repr__(self): + # invoked on `repr(Error)` + info = self._infodict(("pid", "ppid", "name", "seconds", "msg")) + details = ", ".join(["%s=%r" % (k, v) for k, v in info.items()]) + return "psutil.%s(%s)" % (self.__class__.__name__, details) + + +class NoSuchProcess(Error): + """Exception raised when a process with a certain PID doesn't + or no longer exists. + """ + + __module__ = 'psutil' + + def __init__(self, pid, name=None, msg=None): + Error.__init__(self) + self.pid = pid + self.name = name + self.msg = msg or "process no longer exists" + + +class ZombieProcess(NoSuchProcess): + """Exception raised when querying a zombie process. This is + raised on macOS, BSD and Solaris only, and not always: depending + on the query the OS may be able to succeed anyway. + On Linux all zombie processes are querable (hence this is never + raised). Windows doesn't have zombie processes. + """ + + __module__ = 'psutil' + + def __init__(self, pid, name=None, ppid=None, msg=None): + NoSuchProcess.__init__(self, pid, name, msg) + self.ppid = ppid + self.msg = msg or "PID still exists but it's a zombie" + + +class AccessDenied(Error): + """Exception raised when permission to perform an action is denied.""" + + __module__ = 'psutil' + + def __init__(self, pid=None, name=None, msg=None): + Error.__init__(self) + self.pid = pid + self.name = name + self.msg = msg or "" + + +class TimeoutExpired(Error): + """Raised on Process.wait(timeout) if timeout expires and process + is still alive. + """ + + __module__ = 'psutil' + + def __init__(self, seconds, pid=None, name=None): + Error.__init__(self) + self.seconds = seconds + self.pid = pid + self.name = name + self.msg = "timeout after %s seconds" % seconds + + +# =================================================================== +# --- utils +# =================================================================== + + +# This should be in _compat.py rather than here, but does not work well +# with setup.py importing this module via a sys.path trick. +if PY3: + if isinstance(__builtins__, dict): # cpython + exec_ = __builtins__["exec"] + else: # pypy + exec_ = getattr(__builtins__, "exec") # noqa + + exec_("""def raise_from(value, from_value): + try: + raise value from from_value + finally: + value = None + """) +else: + + def raise_from(value, from_value): + raise value + + +def usage_percent(used, total, round_=None): + """Calculate percentage usage of 'used' against 'total'.""" + try: + ret = (float(used) / total) * 100 + except ZeroDivisionError: + return 0.0 + else: + if round_ is not None: + ret = round(ret, round_) + return ret + + +def memoize(fun): + """A simple memoize decorator for functions supporting (hashable) + positional arguments. + It also provides a cache_clear() function for clearing the cache: + + >>> @memoize + ... def foo() + ... return 1 + ... + >>> foo() + 1 + >>> foo.cache_clear() + >>> + + It supports: + - functions + - classes (acts as a @singleton) + - staticmethods + - classmethods + + It does NOT support: + - methods + """ + + @functools.wraps(fun) + def wrapper(*args, **kwargs): + key = (args, frozenset(sorted(kwargs.items()))) + try: + return cache[key] + except KeyError: + try: + ret = cache[key] = fun(*args, **kwargs) + except Exception as err: # noqa: BLE001 + raise raise_from(err, None) + return ret + + def cache_clear(): + """Clear cache.""" + cache.clear() + + cache = {} + wrapper.cache_clear = cache_clear + return wrapper + + +def memoize_when_activated(fun): + """A memoize decorator which is disabled by default. It can be + activated and deactivated on request. + For efficiency reasons it can be used only against class methods + accepting no arguments. + + >>> class Foo: + ... @memoize + ... def foo() + ... print(1) + ... + >>> f = Foo() + >>> # deactivated (default) + >>> foo() + 1 + >>> foo() + 1 + >>> + >>> # activated + >>> foo.cache_activate(self) + >>> foo() + 1 + >>> foo() + >>> foo() + >>> + """ + + @functools.wraps(fun) + def wrapper(self): + try: + # case 1: we previously entered oneshot() ctx + ret = self._cache[fun] + except AttributeError: + # case 2: we never entered oneshot() ctx + try: + return fun(self) + except Exception as err: # noqa: BLE001 + raise raise_from(err, None) + except KeyError: + # case 3: we entered oneshot() ctx but there's no cache + # for this entry yet + try: + ret = fun(self) + except Exception as err: # noqa: BLE001 + raise raise_from(err, None) + try: + self._cache[fun] = ret + except AttributeError: + # multi-threading race condition, see: + # https://github.com/giampaolo/psutil/issues/1948 + pass + return ret + + def cache_activate(proc): + """Activate cache. Expects a Process instance. Cache will be + stored as a "_cache" instance attribute. + """ + proc._cache = {} + + def cache_deactivate(proc): + """Deactivate and clear cache.""" + try: + del proc._cache + except AttributeError: + pass + + wrapper.cache_activate = cache_activate + wrapper.cache_deactivate = cache_deactivate + return wrapper + + +def isfile_strict(path): + """Same as os.path.isfile() but does not swallow EACCES / EPERM + exceptions, see: + http://mail.python.org/pipermail/python-dev/2012-June/120787.html. + """ + try: + st = os.stat(path) + except OSError as err: + if err.errno in (errno.EPERM, errno.EACCES): + raise + return False + else: + return stat.S_ISREG(st.st_mode) + + +def path_exists_strict(path): + """Same as os.path.exists() but does not swallow EACCES / EPERM + exceptions. See: + http://mail.python.org/pipermail/python-dev/2012-June/120787.html. + """ + try: + os.stat(path) + except OSError as err: + if err.errno in (errno.EPERM, errno.EACCES): + raise + return False + else: + return True + + +@memoize +def supports_ipv6(): + """Return True if IPv6 is supported on this platform.""" + if not socket.has_ipv6 or AF_INET6 is None: + return False + try: + sock = socket.socket(AF_INET6, socket.SOCK_STREAM) + with contextlib.closing(sock): + sock.bind(("::1", 0)) + return True + except socket.error: + return False + + +def parse_environ_block(data): + """Parse a C environ block of environment variables into a dictionary.""" + # The block is usually raw data from the target process. It might contain + # trailing garbage and lines that do not look like assignments. + ret = {} + pos = 0 + + # localize global variable to speed up access. + WINDOWS_ = WINDOWS + while True: + next_pos = data.find("\0", pos) + # nul byte at the beginning or double nul byte means finish + if next_pos <= pos: + break + # there might not be an equals sign + equal_pos = data.find("=", pos, next_pos) + if equal_pos > pos: + key = data[pos:equal_pos] + value = data[equal_pos + 1 : next_pos] + # Windows expects environment variables to be uppercase only + if WINDOWS_: + key = key.upper() + ret[key] = value + pos = next_pos + 1 + + return ret + + +def sockfam_to_enum(num): + """Convert a numeric socket family value to an IntEnum member. + If it's not a known member, return the numeric value itself. + """ + if enum is None: + return num + else: # pragma: no cover + try: + return socket.AddressFamily(num) + except ValueError: + return num + + +def socktype_to_enum(num): + """Convert a numeric socket type value to an IntEnum member. + If it's not a known member, return the numeric value itself. + """ + if enum is None: + return num + else: # pragma: no cover + try: + return socket.SocketKind(num) + except ValueError: + return num + + +def conn_to_ntuple(fd, fam, type_, laddr, raddr, status, status_map, pid=None): + """Convert a raw connection tuple to a proper ntuple.""" + if fam in (socket.AF_INET, AF_INET6): + if laddr: + laddr = addr(*laddr) + if raddr: + raddr = addr(*raddr) + if type_ == socket.SOCK_STREAM and fam in (AF_INET, AF_INET6): + status = status_map.get(status, CONN_NONE) + else: + status = CONN_NONE # ignore whatever C returned to us + fam = sockfam_to_enum(fam) + type_ = socktype_to_enum(type_) + if pid is None: + return pconn(fd, fam, type_, laddr, raddr, status) + else: + return sconn(fd, fam, type_, laddr, raddr, status, pid) + + +def deprecated_method(replacement): + """A decorator which can be used to mark a method as deprecated + 'replcement' is the method name which will be called instead. + """ + + def outer(fun): + msg = "%s() is deprecated and will be removed; use %s() instead" % ( + fun.__name__, + replacement, + ) + if fun.__doc__ is None: + fun.__doc__ = msg + + @functools.wraps(fun) + def inner(self, *args, **kwargs): + warnings.warn(msg, category=DeprecationWarning, stacklevel=2) + return getattr(self, replacement)(*args, **kwargs) + + return inner + + return outer + + +class _WrapNumbers: + """Watches numbers so that they don't overflow and wrap + (reset to zero). + """ + + def __init__(self): + self.lock = threading.Lock() + self.cache = {} + self.reminders = {} + self.reminder_keys = {} + + def _add_dict(self, input_dict, name): + assert name not in self.cache + assert name not in self.reminders + assert name not in self.reminder_keys + self.cache[name] = input_dict + self.reminders[name] = collections.defaultdict(int) + self.reminder_keys[name] = collections.defaultdict(set) + + def _remove_dead_reminders(self, input_dict, name): + """In case the number of keys changed between calls (e.g. a + disk disappears) this removes the entry from self.reminders. + """ + old_dict = self.cache[name] + gone_keys = set(old_dict.keys()) - set(input_dict.keys()) + for gone_key in gone_keys: + for remkey in self.reminder_keys[name][gone_key]: + del self.reminders[name][remkey] + del self.reminder_keys[name][gone_key] + + def run(self, input_dict, name): + """Cache dict and sum numbers which overflow and wrap. + Return an updated copy of `input_dict`. + """ + if name not in self.cache: + # This was the first call. + self._add_dict(input_dict, name) + return input_dict + + self._remove_dead_reminders(input_dict, name) + + old_dict = self.cache[name] + new_dict = {} + for key in input_dict: + input_tuple = input_dict[key] + try: + old_tuple = old_dict[key] + except KeyError: + # The input dict has a new key (e.g. a new disk or NIC) + # which didn't exist in the previous call. + new_dict[key] = input_tuple + continue + + bits = [] + for i in range(len(input_tuple)): + input_value = input_tuple[i] + old_value = old_tuple[i] + remkey = (key, i) + if input_value < old_value: + # it wrapped! + self.reminders[name][remkey] += old_value + self.reminder_keys[name][key].add(remkey) + bits.append(input_value + self.reminders[name][remkey]) + + new_dict[key] = tuple(bits) + + self.cache[name] = input_dict + return new_dict + + def cache_clear(self, name=None): + """Clear the internal cache, optionally only for function 'name'.""" + with self.lock: + if name is None: + self.cache.clear() + self.reminders.clear() + self.reminder_keys.clear() + else: + self.cache.pop(name, None) + self.reminders.pop(name, None) + self.reminder_keys.pop(name, None) + + def cache_info(self): + """Return internal cache dicts as a tuple of 3 elements.""" + with self.lock: + return (self.cache, self.reminders, self.reminder_keys) + + +def wrap_numbers(input_dict, name): + """Given an `input_dict` and a function `name`, adjust the numbers + which "wrap" (restart from zero) across different calls by adding + "old value" to "new value" and return an updated dict. + """ + with _wn.lock: + return _wn.run(input_dict, name) + + +_wn = _WrapNumbers() +wrap_numbers.cache_clear = _wn.cache_clear +wrap_numbers.cache_info = _wn.cache_info + + +# The read buffer size for open() builtin. This (also) dictates how +# much data we read(2) when iterating over file lines as in: +# >>> with open(file) as f: +# ... for line in f: +# ... ... +# Default per-line buffer size for binary files is 1K. For text files +# is 8K. We use a bigger buffer (32K) in order to have more consistent +# results when reading /proc pseudo files on Linux, see: +# https://github.com/giampaolo/psutil/issues/2050 +# On Python 2 this also speeds up the reading of big files: +# (namely /proc/{pid}/smaps and /proc/net/*): +# https://github.com/giampaolo/psutil/issues/708 +FILE_READ_BUFFER_SIZE = 32 * 1024 + + +def open_binary(fname): + return open(fname, "rb", buffering=FILE_READ_BUFFER_SIZE) + + +def open_text(fname): + """On Python 3 opens a file in text mode by using fs encoding and + a proper en/decoding errors handler. + On Python 2 this is just an alias for open(name, 'rt'). + """ + if not PY3: + return open(fname, buffering=FILE_READ_BUFFER_SIZE) + + # See: + # https://github.com/giampaolo/psutil/issues/675 + # https://github.com/giampaolo/psutil/pull/733 + fobj = open( + fname, + buffering=FILE_READ_BUFFER_SIZE, + encoding=ENCODING, + errors=ENCODING_ERRS, + ) + try: + # Dictates per-line read(2) buffer size. Defaults is 8k. See: + # https://github.com/giampaolo/psutil/issues/2050#issuecomment-1013387546 + fobj._CHUNK_SIZE = FILE_READ_BUFFER_SIZE + except AttributeError: + pass + except Exception: + fobj.close() + raise + + return fobj + + +def cat(fname, fallback=_DEFAULT, _open=open_text): + """Read entire file content and return it as a string. File is + opened in text mode. If specified, `fallback` is the value + returned in case of error, either if the file does not exist or + it can't be read(). + """ + if fallback is _DEFAULT: + with _open(fname) as f: + return f.read() + else: + try: + with _open(fname) as f: + return f.read() + except (IOError, OSError): + return fallback + + +def bcat(fname, fallback=_DEFAULT): + """Same as above but opens file in binary mode.""" + return cat(fname, fallback=fallback, _open=open_binary) + + +def bytes2human(n, format="%(value).1f%(symbol)s"): + """Used by various scripts. See: http://goo.gl/zeJZl. + + >>> bytes2human(10000) + '9.8K' + >>> bytes2human(100001221) + '95.4M' + """ + symbols = ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') + prefix = {} + for i, s in enumerate(symbols[1:]): + prefix[s] = 1 << (i + 1) * 10 + for symbol in reversed(symbols[1:]): + if abs(n) >= prefix[symbol]: + value = float(n) / prefix[symbol] + return format % locals() + return format % dict(symbol=symbols[0], value=n) + + +def get_procfs_path(): + """Return updated psutil.PROCFS_PATH constant.""" + return sys.modules['psutil'].PROCFS_PATH + + +if PY3: + + def decode(s): + return s.decode(encoding=ENCODING, errors=ENCODING_ERRS) + +else: + + def decode(s): + return s + + +# ===================================================================== +# --- shell utils +# ===================================================================== + + +@memoize +def term_supports_colors(file=sys.stdout): # pragma: no cover + if os.name == 'nt': + return True + try: + import curses + + assert file.isatty() + curses.setupterm() + assert curses.tigetnum("colors") > 0 + except Exception: # noqa: BLE001 + return False + else: + return True + + +def hilite(s, color=None, bold=False): # pragma: no cover + """Return an highlighted version of 'string'.""" + if not term_supports_colors(): + return s + attr = [] + colors = dict( + blue='34', + brown='33', + darkgrey='30', + green='32', + grey='37', + lightblue='36', + red='91', + violet='35', + yellow='93', + ) + colors[None] = '29' + try: + color = colors[color] + except KeyError: + raise ValueError( + "invalid color %r; choose between %s" % (list(colors.keys())) + ) + attr.append(color) + if bold: + attr.append('1') + return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), s) + + +def print_color( + s, color=None, bold=False, file=sys.stdout +): # pragma: no cover + """Print a colorized version of string.""" + if not term_supports_colors(): + print(s, file=file) # NOQA + elif POSIX: + print(hilite(s, color, bold), file=file) # NOQA + else: + import ctypes + + DEFAULT_COLOR = 7 + GetStdHandle = ctypes.windll.Kernel32.GetStdHandle + SetConsoleTextAttribute = ( + ctypes.windll.Kernel32.SetConsoleTextAttribute + ) + + colors = dict(green=2, red=4, brown=6, yellow=6) + colors[None] = DEFAULT_COLOR + try: + color = colors[color] + except KeyError: + raise ValueError( + "invalid color %r; choose between %r" + % (color, list(colors.keys())) + ) + if bold and color <= 7: + color += 8 + + handle_id = -12 if file is sys.stderr else -11 + GetStdHandle.restype = ctypes.c_ulong + handle = GetStdHandle(handle_id) + SetConsoleTextAttribute(handle, color) + try: + print(s, file=file) # NOQA + finally: + SetConsoleTextAttribute(handle, DEFAULT_COLOR) + + +def debug(msg): + """If PSUTIL_DEBUG env var is set, print a debug message to stderr.""" + if PSUTIL_DEBUG: + import inspect + + fname, lineno, _, lines, index = inspect.getframeinfo( + inspect.currentframe().f_back + ) + if isinstance(msg, Exception): + if isinstance(msg, (OSError, IOError, EnvironmentError)): + # ...because str(exc) may contain info about the file name + msg = "ignoring %s" % msg + else: + msg = "ignoring %r" % msg + print( # noqa + "psutil-debug [%s:%s]> %s" % (fname, lineno, msg), file=sys.stderr + ) diff --git a/venv/lib/python3.10/site-packages/psutil/_compat.py b/venv/lib/python3.10/site-packages/psutil/_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..3db56c6019cc95a8aabb365c2aa098758271c17f --- /dev/null +++ b/venv/lib/python3.10/site-packages/psutil/_compat.py @@ -0,0 +1,483 @@ +# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Module which provides compatibility with older Python versions. +This is more future-compatible rather than the opposite (prefer latest +Python 3 way of doing things). +""" + +import collections +import contextlib +import errno +import functools +import os +import sys +import types + + +# fmt: off +__all__ = [ + # constants + "PY3", + # builtins + "long", "range", "super", "unicode", "basestring", + # literals + "u", "b", + # collections module + "lru_cache", + # shutil module + "which", "get_terminal_size", + # contextlib module + "redirect_stderr", + # python 3 exceptions + "FileNotFoundError", "PermissionError", "ProcessLookupError", + "InterruptedError", "ChildProcessError", "FileExistsError", +] +# fmt: on + + +PY3 = sys.version_info[0] >= 3 +_SENTINEL = object() + +if PY3: + long = int + xrange = range + unicode = str + basestring = str + range = range + + def u(s): + return s + + def b(s): + return s.encode("latin-1") + +else: + long = long + range = xrange + unicode = unicode + basestring = basestring + + def u(s): + return unicode(s, "unicode_escape") + + def b(s): + return s + + +# --- builtins + + +# Python 3 super(). +# Taken from "future" package. +# Credit: Ryan Kelly +if PY3: + super = super +else: + _builtin_super = super + + def super(type_=_SENTINEL, type_or_obj=_SENTINEL, framedepth=1): + """Like Python 3 builtin super(). If called without any arguments + it attempts to infer them at runtime. + """ + if type_ is _SENTINEL: + f = sys._getframe(framedepth) + try: + # Get the function's first positional argument. + type_or_obj = f.f_locals[f.f_code.co_varnames[0]] + except (IndexError, KeyError): + msg = 'super() used in a function with no args' + raise RuntimeError(msg) + try: + # Get the MRO so we can crawl it. + mro = type_or_obj.__mro__ + except (AttributeError, RuntimeError): + try: + mro = type_or_obj.__class__.__mro__ + except AttributeError: + msg = 'super() used in a non-newstyle class' + raise RuntimeError(msg) + for type_ in mro: + # Find the class that owns the currently-executing method. + for meth in type_.__dict__.values(): + # Drill down through any wrappers to the underlying func. + # This handles e.g. classmethod() and staticmethod(). + try: + while not isinstance(meth, types.FunctionType): + if isinstance(meth, property): + # Calling __get__ on the property will invoke + # user code which might throw exceptions or + # have side effects + meth = meth.fget + else: + try: + meth = meth.__func__ + except AttributeError: + meth = meth.__get__(type_or_obj, type_) + except (AttributeError, TypeError): + continue + if meth.func_code is f.f_code: + break # found + else: + # Not found. Move onto the next class in MRO. + continue + break # found + else: + msg = 'super() called outside a method' + raise RuntimeError(msg) + + # Dispatch to builtin super(). + if type_or_obj is not _SENTINEL: + return _builtin_super(type_, type_or_obj) + return _builtin_super(type_) + + +# --- exceptions + + +if PY3: + FileNotFoundError = FileNotFoundError # NOQA + PermissionError = PermissionError # NOQA + ProcessLookupError = ProcessLookupError # NOQA + InterruptedError = InterruptedError # NOQA + ChildProcessError = ChildProcessError # NOQA + FileExistsError = FileExistsError # NOQA +else: + # https://github.com/PythonCharmers/python-future/blob/exceptions/ + # src/future/types/exceptions/pep3151.py + import platform + + def _instance_checking_exception(base_exception=Exception): + def wrapped(instance_checker): + class TemporaryClass(base_exception): + def __init__(self, *args, **kwargs): + if len(args) == 1 and isinstance(args[0], TemporaryClass): + unwrap_me = args[0] + for attr in dir(unwrap_me): + if not attr.startswith('__'): + setattr(self, attr, getattr(unwrap_me, attr)) + else: + super(TemporaryClass, self).__init__( # noqa + *args, **kwargs + ) + + class __metaclass__(type): + def __instancecheck__(cls, inst): + return instance_checker(inst) + + def __subclasscheck__(cls, classinfo): + value = sys.exc_info()[1] + return isinstance(value, cls) + + TemporaryClass.__name__ = instance_checker.__name__ + TemporaryClass.__doc__ = instance_checker.__doc__ + return TemporaryClass + + return wrapped + + @_instance_checking_exception(EnvironmentError) + def FileNotFoundError(inst): + return getattr(inst, 'errno', _SENTINEL) == errno.ENOENT + + @_instance_checking_exception(EnvironmentError) + def ProcessLookupError(inst): + return getattr(inst, 'errno', _SENTINEL) == errno.ESRCH + + @_instance_checking_exception(EnvironmentError) + def PermissionError(inst): + return getattr(inst, 'errno', _SENTINEL) in (errno.EACCES, errno.EPERM) + + @_instance_checking_exception(EnvironmentError) + def InterruptedError(inst): + return getattr(inst, 'errno', _SENTINEL) == errno.EINTR + + @_instance_checking_exception(EnvironmentError) + def ChildProcessError(inst): + return getattr(inst, 'errno', _SENTINEL) == errno.ECHILD + + @_instance_checking_exception(EnvironmentError) + def FileExistsError(inst): + return getattr(inst, 'errno', _SENTINEL) == errno.EEXIST + + if platform.python_implementation() != "CPython": + try: + raise OSError(errno.EEXIST, "perm") + except FileExistsError: + pass + except OSError: + msg = ( + "broken or incompatible Python implementation, see: " + "https://github.com/giampaolo/psutil/issues/1659" + ) + raise RuntimeError(msg) + + +# --- stdlib additions + + +# py 3.2 functools.lru_cache +# Taken from: http://code.activestate.com/recipes/578078 +# Credit: Raymond Hettinger +try: + from functools import lru_cache +except ImportError: + try: + from threading import RLock + except ImportError: + from dummy_threading import RLock + + _CacheInfo = collections.namedtuple( + "CacheInfo", ["hits", "misses", "maxsize", "currsize"] + ) + + class _HashedSeq(list): + __slots__ = ('hashvalue',) + + def __init__(self, tup, hash=hash): + self[:] = tup + self.hashvalue = hash(tup) + + def __hash__(self): + return self.hashvalue + + def _make_key( + args, + kwds, + typed, + kwd_mark=(_SENTINEL,), + fasttypes=set((int, str, frozenset, type(None))), # noqa + sorted=sorted, + tuple=tuple, + type=type, + len=len, + ): + key = args + if kwds: + sorted_items = sorted(kwds.items()) + key += kwd_mark + for item in sorted_items: + key += item + if typed: + key += tuple(type(v) for v in args) + if kwds: + key += tuple(type(v) for k, v in sorted_items) + elif len(key) == 1 and type(key[0]) in fasttypes: + return key[0] + return _HashedSeq(key) + + def lru_cache(maxsize=100, typed=False): + """Least-recently-used cache decorator, see: + http://docs.python.org/3/library/functools.html#functools.lru_cache. + """ + + def decorating_function(user_function): + cache = {} + stats = [0, 0] + HITS, MISSES = 0, 1 + make_key = _make_key + cache_get = cache.get + _len = len + lock = RLock() + root = [] + root[:] = [root, root, None, None] + nonlocal_root = [root] + PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 + if maxsize == 0: + + def wrapper(*args, **kwds): + result = user_function(*args, **kwds) + stats[MISSES] += 1 + return result + + elif maxsize is None: + + def wrapper(*args, **kwds): + key = make_key(args, kwds, typed) + result = cache_get(key, root) + if result is not root: + stats[HITS] += 1 + return result + result = user_function(*args, **kwds) + cache[key] = result + stats[MISSES] += 1 + return result + + else: + + def wrapper(*args, **kwds): + if kwds or typed: + key = make_key(args, kwds, typed) + else: + key = args + lock.acquire() + try: + link = cache_get(key) + if link is not None: + (root,) = nonlocal_root + link_prev, link_next, key, result = link + link_prev[NEXT] = link_next + link_next[PREV] = link_prev + last = root[PREV] + last[NEXT] = root[PREV] = link + link[PREV] = last + link[NEXT] = root + stats[HITS] += 1 + return result + finally: + lock.release() + result = user_function(*args, **kwds) + lock.acquire() + try: + (root,) = nonlocal_root + if key in cache: + pass + elif _len(cache) >= maxsize: + oldroot = root + oldroot[KEY] = key + oldroot[RESULT] = result + root = nonlocal_root[0] = oldroot[NEXT] + oldkey = root[KEY] + root[KEY] = root[RESULT] = None + del cache[oldkey] + cache[key] = oldroot + else: + last = root[PREV] + link = [last, root, key, result] + last[NEXT] = root[PREV] = cache[key] = link + stats[MISSES] += 1 + finally: + lock.release() + return result + + def cache_info(): + """Report cache statistics.""" + lock.acquire() + try: + return _CacheInfo( + stats[HITS], stats[MISSES], maxsize, len(cache) + ) + finally: + lock.release() + + def cache_clear(): + """Clear the cache and cache statistics.""" + lock.acquire() + try: + cache.clear() + root = nonlocal_root[0] + root[:] = [root, root, None, None] + stats[:] = [0, 0] + finally: + lock.release() + + wrapper.__wrapped__ = user_function + wrapper.cache_info = cache_info + wrapper.cache_clear = cache_clear + return functools.update_wrapper(wrapper, user_function) + + return decorating_function + + +# python 3.3 +try: + from shutil import which +except ImportError: + + def which(cmd, mode=os.F_OK | os.X_OK, path=None): + """Given a command, mode, and a PATH string, return the path which + conforms to the given mode on the PATH, or None if there is no such + file. + + `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result + of os.environ.get("PATH"), or can be overridden with a custom search + path. + """ + + def _access_check(fn, mode): + return ( + os.path.exists(fn) + and os.access(fn, mode) + and not os.path.isdir(fn) + ) + + if os.path.dirname(cmd): + if _access_check(cmd, mode): + return cmd + return None + + if path is None: + path = os.environ.get("PATH", os.defpath) + if not path: + return None + path = path.split(os.pathsep) + + if sys.platform == "win32": + if os.curdir not in path: + path.insert(0, os.curdir) + + pathext = os.environ.get("PATHEXT", "").split(os.pathsep) + if any(cmd.lower().endswith(ext.lower()) for ext in pathext): + files = [cmd] + else: + files = [cmd + ext for ext in pathext] + else: + files = [cmd] + + seen = set() + for dir in path: + normdir = os.path.normcase(dir) + if normdir not in seen: + seen.add(normdir) + for thefile in files: + name = os.path.join(dir, thefile) + if _access_check(name, mode): + return name + return None + + +# python 3.3 +try: + from shutil import get_terminal_size +except ImportError: + + def get_terminal_size(fallback=(80, 24)): + try: + import fcntl + import struct + import termios + except ImportError: + return fallback + else: + try: + # This should work on Linux. + res = struct.unpack( + 'hh', fcntl.ioctl(1, termios.TIOCGWINSZ, '1234') + ) + return (res[1], res[0]) + except Exception: # noqa: BLE001 + return fallback + + +# python 3.3 +try: + from subprocess import TimeoutExpired as SubprocessTimeoutExpired +except ImportError: + + class SubprocessTimeoutExpired(Exception): + pass + + +# python 3.5 +try: + from contextlib import redirect_stderr +except ImportError: + + @contextlib.contextmanager + def redirect_stderr(new_target): + original = sys.stderr + try: + sys.stderr = new_target + yield new_target + finally: + sys.stderr = original diff --git a/venv/lib/python3.10/site-packages/psutil/_psaix.py b/venv/lib/python3.10/site-packages/psutil/_psaix.py new file mode 100644 index 0000000000000000000000000000000000000000..7310ab6c3d87cfc82712efbb6149acca4b5ee247 --- /dev/null +++ b/venv/lib/python3.10/site-packages/psutil/_psaix.py @@ -0,0 +1,582 @@ +# Copyright (c) 2009, Giampaolo Rodola' +# Copyright (c) 2017, Arnon Yaari +# All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""AIX platform implementation.""" + +import functools +import glob +import os +import re +import subprocess +import sys +from collections import namedtuple + +from . import _common +from . import _psposix +from . import _psutil_aix as cext +from . import _psutil_posix as cext_posix +from ._common import NIC_DUPLEX_FULL +from ._common import NIC_DUPLEX_HALF +from ._common import NIC_DUPLEX_UNKNOWN +from ._common import AccessDenied +from ._common import NoSuchProcess +from ._common import ZombieProcess +from ._common import conn_to_ntuple +from ._common import get_procfs_path +from ._common import memoize_when_activated +from ._common import usage_percent +from ._compat import PY3 +from ._compat import FileNotFoundError +from ._compat import PermissionError +from ._compat import ProcessLookupError + + +__extra__all__ = ["PROCFS_PATH"] + + +# ===================================================================== +# --- globals +# ===================================================================== + + +HAS_THREADS = hasattr(cext, "proc_threads") +HAS_NET_IO_COUNTERS = hasattr(cext, "net_io_counters") +HAS_PROC_IO_COUNTERS = hasattr(cext, "proc_io_counters") + +PAGE_SIZE = cext_posix.getpagesize() +AF_LINK = cext_posix.AF_LINK + +PROC_STATUSES = { + cext.SIDL: _common.STATUS_IDLE, + cext.SZOMB: _common.STATUS_ZOMBIE, + cext.SACTIVE: _common.STATUS_RUNNING, + cext.SSWAP: _common.STATUS_RUNNING, # TODO what status is this? + cext.SSTOP: _common.STATUS_STOPPED, +} + +TCP_STATUSES = { + cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED, + cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT, + cext.TCPS_SYN_RCVD: _common.CONN_SYN_RECV, + cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1, + cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2, + cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT, + cext.TCPS_CLOSED: _common.CONN_CLOSE, + cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT, + cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK, + cext.TCPS_LISTEN: _common.CONN_LISTEN, + cext.TCPS_CLOSING: _common.CONN_CLOSING, + cext.PSUTIL_CONN_NONE: _common.CONN_NONE, +} + +proc_info_map = dict( + ppid=0, + rss=1, + vms=2, + create_time=3, + nice=4, + num_threads=5, + status=6, + ttynr=7, +) + + +# ===================================================================== +# --- named tuples +# ===================================================================== + + +# psutil.Process.memory_info() +pmem = namedtuple('pmem', ['rss', 'vms']) +# psutil.Process.memory_full_info() +pfullmem = pmem +# psutil.Process.cpu_times() +scputimes = namedtuple('scputimes', ['user', 'system', 'idle', 'iowait']) +# psutil.virtual_memory() +svmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free']) + + +# ===================================================================== +# --- memory +# ===================================================================== + + +def virtual_memory(): + total, avail, free, pinned, inuse = cext.virtual_mem() + percent = usage_percent((total - avail), total, round_=1) + return svmem(total, avail, percent, inuse, free) + + +def swap_memory(): + """Swap system memory as a (total, used, free, sin, sout) tuple.""" + total, free, sin, sout = cext.swap_mem() + used = total - free + percent = usage_percent(used, total, round_=1) + return _common.sswap(total, used, free, percent, sin, sout) + + +# ===================================================================== +# --- CPU +# ===================================================================== + + +def cpu_times(): + """Return system-wide CPU times as a named tuple.""" + ret = cext.per_cpu_times() + return scputimes(*[sum(x) for x in zip(*ret)]) + + +def per_cpu_times(): + """Return system per-CPU times as a list of named tuples.""" + ret = cext.per_cpu_times() + return [scputimes(*x) for x in ret] + + +def cpu_count_logical(): + """Return the number of logical CPUs in the system.""" + try: + return os.sysconf("SC_NPROCESSORS_ONLN") + except ValueError: + # mimic os.cpu_count() behavior + return None + + +def cpu_count_cores(): + cmd = ["lsdev", "-Cc", "processor"] + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = p.communicate() + if PY3: + stdout, stderr = ( + x.decode(sys.stdout.encoding) for x in (stdout, stderr) + ) + if p.returncode != 0: + raise RuntimeError("%r command error\n%s" % (cmd, stderr)) + processors = stdout.strip().splitlines() + return len(processors) or None + + +def cpu_stats(): + """Return various CPU stats as a named tuple.""" + ctx_switches, interrupts, soft_interrupts, syscalls = cext.cpu_stats() + return _common.scpustats( + ctx_switches, interrupts, soft_interrupts, syscalls + ) + + +# ===================================================================== +# --- disks +# ===================================================================== + + +disk_io_counters = cext.disk_io_counters +disk_usage = _psposix.disk_usage + + +def disk_partitions(all=False): + """Return system disk partitions.""" + # TODO - the filtering logic should be better checked so that + # it tries to reflect 'df' as much as possible + retlist = [] + partitions = cext.disk_partitions() + for partition in partitions: + device, mountpoint, fstype, opts = partition + if device == 'none': + device = '' + if not all: + # Differently from, say, Linux, we don't have a list of + # common fs types so the best we can do, AFAIK, is to + # filter by filesystem having a total size > 0. + if not disk_usage(mountpoint).total: + continue + maxfile = maxpath = None # set later + ntuple = _common.sdiskpart( + device, mountpoint, fstype, opts, maxfile, maxpath + ) + retlist.append(ntuple) + return retlist + + +# ===================================================================== +# --- network +# ===================================================================== + + +net_if_addrs = cext_posix.net_if_addrs + +if HAS_NET_IO_COUNTERS: + net_io_counters = cext.net_io_counters + + +def net_connections(kind, _pid=-1): + """Return socket connections. If pid == -1 return system-wide + connections (as opposed to connections opened by one process only). + """ + cmap = _common.conn_tmap + if kind not in cmap: + raise ValueError( + "invalid %r kind argument; choose between %s" + % (kind, ', '.join([repr(x) for x in cmap])) + ) + families, types = _common.conn_tmap[kind] + rawlist = cext.net_connections(_pid) + ret = [] + for item in rawlist: + fd, fam, type_, laddr, raddr, status, pid = item + if fam not in families: + continue + if type_ not in types: + continue + nt = conn_to_ntuple( + fd, + fam, + type_, + laddr, + raddr, + status, + TCP_STATUSES, + pid=pid if _pid == -1 else None, + ) + ret.append(nt) + return ret + + +def net_if_stats(): + """Get NIC stats (isup, duplex, speed, mtu).""" + duplex_map = {"Full": NIC_DUPLEX_FULL, "Half": NIC_DUPLEX_HALF} + names = set([x[0] for x in net_if_addrs()]) + ret = {} + for name in names: + mtu = cext_posix.net_if_mtu(name) + flags = cext_posix.net_if_flags(name) + + # try to get speed and duplex + # TODO: rewrite this in C (entstat forks, so use truss -f to follow. + # looks like it is using an undocumented ioctl?) + duplex = "" + speed = 0 + p = subprocess.Popen( + ["/usr/bin/entstat", "-d", name], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + stdout, stderr = p.communicate() + if PY3: + stdout, stderr = ( + x.decode(sys.stdout.encoding) for x in (stdout, stderr) + ) + if p.returncode == 0: + re_result = re.search( + r"Running: (\d+) Mbps.*?(\w+) Duplex", stdout + ) + if re_result is not None: + speed = int(re_result.group(1)) + duplex = re_result.group(2) + + output_flags = ','.join(flags) + isup = 'running' in flags + duplex = duplex_map.get(duplex, NIC_DUPLEX_UNKNOWN) + ret[name] = _common.snicstats(isup, duplex, speed, mtu, output_flags) + return ret + + +# ===================================================================== +# --- other system functions +# ===================================================================== + + +def boot_time(): + """The system boot time expressed in seconds since the epoch.""" + return cext.boot_time() + + +def users(): + """Return currently connected users as a list of namedtuples.""" + retlist = [] + rawlist = cext.users() + localhost = (':0.0', ':0') + for item in rawlist: + user, tty, hostname, tstamp, user_process, pid = item + # note: the underlying C function includes entries about + # system boot, run level and others. We might want + # to use them in the future. + if not user_process: + continue + if hostname in localhost: + hostname = 'localhost' + nt = _common.suser(user, tty, hostname, tstamp, pid) + retlist.append(nt) + return retlist + + +# ===================================================================== +# --- processes +# ===================================================================== + + +def pids(): + """Returns a list of PIDs currently running on the system.""" + return [int(x) for x in os.listdir(get_procfs_path()) if x.isdigit()] + + +def pid_exists(pid): + """Check for the existence of a unix pid.""" + return os.path.exists(os.path.join(get_procfs_path(), str(pid), "psinfo")) + + +def wrap_exceptions(fun): + """Call callable into a try/except clause and translate ENOENT, + EACCES and EPERM in NoSuchProcess or AccessDenied exceptions. + """ + + @functools.wraps(fun) + def wrapper(self, *args, **kwargs): + try: + return fun(self, *args, **kwargs) + except (FileNotFoundError, ProcessLookupError): + # ENOENT (no such file or directory) gets raised on open(). + # ESRCH (no such process) can get raised on read() if + # process is gone in meantime. + if not pid_exists(self.pid): + raise NoSuchProcess(self.pid, self._name) + else: + raise ZombieProcess(self.pid, self._name, self._ppid) + except PermissionError: + raise AccessDenied(self.pid, self._name) + + return wrapper + + +class Process: + """Wrapper class around underlying C implementation.""" + + __slots__ = ["pid", "_name", "_ppid", "_procfs_path", "_cache"] + + def __init__(self, pid): + self.pid = pid + self._name = None + self._ppid = None + self._procfs_path = get_procfs_path() + + def oneshot_enter(self): + self._proc_basic_info.cache_activate(self) + self._proc_cred.cache_activate(self) + + def oneshot_exit(self): + self._proc_basic_info.cache_deactivate(self) + self._proc_cred.cache_deactivate(self) + + @wrap_exceptions + @memoize_when_activated + def _proc_basic_info(self): + return cext.proc_basic_info(self.pid, self._procfs_path) + + @wrap_exceptions + @memoize_when_activated + def _proc_cred(self): + return cext.proc_cred(self.pid, self._procfs_path) + + @wrap_exceptions + def name(self): + if self.pid == 0: + return "swapper" + # note: max 16 characters + return cext.proc_name(self.pid, self._procfs_path).rstrip("\x00") + + @wrap_exceptions + def exe(self): + # there is no way to get executable path in AIX other than to guess, + # and guessing is more complex than what's in the wrapping class + cmdline = self.cmdline() + if not cmdline: + return '' + exe = cmdline[0] + if os.path.sep in exe: + # relative or absolute path + if not os.path.isabs(exe): + # if cwd has changed, we're out of luck - this may be wrong! + exe = os.path.abspath(os.path.join(self.cwd(), exe)) + if ( + os.path.isabs(exe) + and os.path.isfile(exe) + and os.access(exe, os.X_OK) + ): + return exe + # not found, move to search in PATH using basename only + exe = os.path.basename(exe) + # search for exe name PATH + for path in os.environ["PATH"].split(":"): + possible_exe = os.path.abspath(os.path.join(path, exe)) + if os.path.isfile(possible_exe) and os.access( + possible_exe, os.X_OK + ): + return possible_exe + return '' + + @wrap_exceptions + def cmdline(self): + return cext.proc_args(self.pid) + + @wrap_exceptions + def environ(self): + return cext.proc_environ(self.pid) + + @wrap_exceptions + def create_time(self): + return self._proc_basic_info()[proc_info_map['create_time']] + + @wrap_exceptions + def num_threads(self): + return self._proc_basic_info()[proc_info_map['num_threads']] + + if HAS_THREADS: + + @wrap_exceptions + def threads(self): + rawlist = cext.proc_threads(self.pid) + retlist = [] + for thread_id, utime, stime in rawlist: + ntuple = _common.pthread(thread_id, utime, stime) + retlist.append(ntuple) + # The underlying C implementation retrieves all OS threads + # and filters them by PID. At this point we can't tell whether + # an empty list means there were no connections for process or + # process is no longer active so we force NSP in case the PID + # is no longer there. + if not retlist: + # will raise NSP if process is gone + os.stat('%s/%s' % (self._procfs_path, self.pid)) + return retlist + + @wrap_exceptions + def connections(self, kind='inet'): + ret = net_connections(kind, _pid=self.pid) + # The underlying C implementation retrieves all OS connections + # and filters them by PID. At this point we can't tell whether + # an empty list means there were no connections for process or + # process is no longer active so we force NSP in case the PID + # is no longer there. + if not ret: + # will raise NSP if process is gone + os.stat('%s/%s' % (self._procfs_path, self.pid)) + return ret + + @wrap_exceptions + def nice_get(self): + return cext_posix.getpriority(self.pid) + + @wrap_exceptions + def nice_set(self, value): + return cext_posix.setpriority(self.pid, value) + + @wrap_exceptions + def ppid(self): + self._ppid = self._proc_basic_info()[proc_info_map['ppid']] + return self._ppid + + @wrap_exceptions + def uids(self): + real, effective, saved, _, _, _ = self._proc_cred() + return _common.puids(real, effective, saved) + + @wrap_exceptions + def gids(self): + _, _, _, real, effective, saved = self._proc_cred() + return _common.puids(real, effective, saved) + + @wrap_exceptions + def cpu_times(self): + t = cext.proc_cpu_times(self.pid, self._procfs_path) + return _common.pcputimes(*t) + + @wrap_exceptions + def terminal(self): + ttydev = self._proc_basic_info()[proc_info_map['ttynr']] + # convert from 64-bit dev_t to 32-bit dev_t and then map the device + ttydev = ((ttydev & 0x0000FFFF00000000) >> 16) | (ttydev & 0xFFFF) + # try to match rdev of /dev/pts/* files ttydev + for dev in glob.glob("/dev/**/*"): + if os.stat(dev).st_rdev == ttydev: + return dev + return None + + @wrap_exceptions + def cwd(self): + procfs_path = self._procfs_path + try: + result = os.readlink("%s/%s/cwd" % (procfs_path, self.pid)) + return result.rstrip('/') + except FileNotFoundError: + os.stat("%s/%s" % (procfs_path, self.pid)) # raise NSP or AD + return "" + + @wrap_exceptions + def memory_info(self): + ret = self._proc_basic_info() + rss = ret[proc_info_map['rss']] * 1024 + vms = ret[proc_info_map['vms']] * 1024 + return pmem(rss, vms) + + memory_full_info = memory_info + + @wrap_exceptions + def status(self): + code = self._proc_basic_info()[proc_info_map['status']] + # XXX is '?' legit? (we're not supposed to return it anyway) + return PROC_STATUSES.get(code, '?') + + def open_files(self): + # TODO rewrite without using procfiles (stat /proc/pid/fd/* and then + # find matching name of the inode) + p = subprocess.Popen( + ["/usr/bin/procfiles", "-n", str(self.pid)], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + stdout, stderr = p.communicate() + if PY3: + stdout, stderr = ( + x.decode(sys.stdout.encoding) for x in (stdout, stderr) + ) + if "no such process" in stderr.lower(): + raise NoSuchProcess(self.pid, self._name) + procfiles = re.findall(r"(\d+): S_IFREG.*\s*.*name:(.*)\n", stdout) + retlist = [] + for fd, path in procfiles: + path = path.strip() + if path.startswith("//"): + path = path[1:] + if path.lower() == "cannot be retrieved": + continue + retlist.append(_common.popenfile(path, int(fd))) + return retlist + + @wrap_exceptions + def num_fds(self): + if self.pid == 0: # no /proc/0/fd + return 0 + return len(os.listdir("%s/%s/fd" % (self._procfs_path, self.pid))) + + @wrap_exceptions + def num_ctx_switches(self): + return _common.pctxsw(*cext.proc_num_ctx_switches(self.pid)) + + @wrap_exceptions + def wait(self, timeout=None): + return _psposix.wait_pid(self.pid, timeout, self._name) + + if HAS_PROC_IO_COUNTERS: + + @wrap_exceptions + def io_counters(self): + try: + rc, wc, rb, wb = cext.proc_io_counters(self.pid) + except OSError: + # if process is terminated, proc_io_counters returns OSError + # instead of NSP + if not pid_exists(self.pid): + raise NoSuchProcess(self.pid, self._name) + raise + return _common.pio(rc, wc, rb, wb) diff --git a/venv/lib/python3.10/site-packages/psutil/_psbsd.py b/venv/lib/python3.10/site-packages/psutil/_psbsd.py new file mode 100644 index 0000000000000000000000000000000000000000..da68f5efd570373d83836612fa422eafcfc67147 --- /dev/null +++ b/venv/lib/python3.10/site-packages/psutil/_psbsd.py @@ -0,0 +1,977 @@ +# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""FreeBSD, OpenBSD and NetBSD platforms implementation.""" + +import contextlib +import errno +import functools +import os +from collections import defaultdict +from collections import namedtuple +from xml.etree import ElementTree + +from . import _common +from . import _psposix +from . import _psutil_bsd as cext +from . import _psutil_posix as cext_posix +from ._common import FREEBSD +from ._common import NETBSD +from ._common import OPENBSD +from ._common import AccessDenied +from ._common import NoSuchProcess +from ._common import ZombieProcess +from ._common import conn_tmap +from ._common import conn_to_ntuple +from ._common import debug +from ._common import memoize +from ._common import memoize_when_activated +from ._common import usage_percent +from ._compat import FileNotFoundError +from ._compat import PermissionError +from ._compat import ProcessLookupError +from ._compat import which + + +__extra__all__ = [] + + +# ===================================================================== +# --- globals +# ===================================================================== + + +if FREEBSD: + PROC_STATUSES = { + cext.SIDL: _common.STATUS_IDLE, + cext.SRUN: _common.STATUS_RUNNING, + cext.SSLEEP: _common.STATUS_SLEEPING, + cext.SSTOP: _common.STATUS_STOPPED, + cext.SZOMB: _common.STATUS_ZOMBIE, + cext.SWAIT: _common.STATUS_WAITING, + cext.SLOCK: _common.STATUS_LOCKED, + } +elif OPENBSD: + PROC_STATUSES = { + cext.SIDL: _common.STATUS_IDLE, + cext.SSLEEP: _common.STATUS_SLEEPING, + cext.SSTOP: _common.STATUS_STOPPED, + # According to /usr/include/sys/proc.h SZOMB is unused. + # test_zombie_process() shows that SDEAD is the right + # equivalent. Also it appears there's no equivalent of + # psutil.STATUS_DEAD. SDEAD really means STATUS_ZOMBIE. + # cext.SZOMB: _common.STATUS_ZOMBIE, + cext.SDEAD: _common.STATUS_ZOMBIE, + cext.SZOMB: _common.STATUS_ZOMBIE, + # From http://www.eecs.harvard.edu/~margo/cs161/videos/proc.h.txt + # OpenBSD has SRUN and SONPROC: SRUN indicates that a process + # is runnable but *not* yet running, i.e. is on a run queue. + # SONPROC indicates that the process is actually executing on + # a CPU, i.e. it is no longer on a run queue. + # As such we'll map SRUN to STATUS_WAKING and SONPROC to + # STATUS_RUNNING + cext.SRUN: _common.STATUS_WAKING, + cext.SONPROC: _common.STATUS_RUNNING, + } +elif NETBSD: + PROC_STATUSES = { + cext.SIDL: _common.STATUS_IDLE, + cext.SSLEEP: _common.STATUS_SLEEPING, + cext.SSTOP: _common.STATUS_STOPPED, + cext.SZOMB: _common.STATUS_ZOMBIE, + cext.SRUN: _common.STATUS_WAKING, + cext.SONPROC: _common.STATUS_RUNNING, + } + +TCP_STATUSES = { + cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED, + cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT, + cext.TCPS_SYN_RECEIVED: _common.CONN_SYN_RECV, + cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1, + cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2, + cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT, + cext.TCPS_CLOSED: _common.CONN_CLOSE, + cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT, + cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK, + cext.TCPS_LISTEN: _common.CONN_LISTEN, + cext.TCPS_CLOSING: _common.CONN_CLOSING, + cext.PSUTIL_CONN_NONE: _common.CONN_NONE, +} + +PAGESIZE = cext_posix.getpagesize() +AF_LINK = cext_posix.AF_LINK + +HAS_PER_CPU_TIMES = hasattr(cext, "per_cpu_times") +HAS_PROC_NUM_THREADS = hasattr(cext, "proc_num_threads") +HAS_PROC_OPEN_FILES = hasattr(cext, 'proc_open_files') +HAS_PROC_NUM_FDS = hasattr(cext, 'proc_num_fds') + +kinfo_proc_map = dict( + ppid=0, + status=1, + real_uid=2, + effective_uid=3, + saved_uid=4, + real_gid=5, + effective_gid=6, + saved_gid=7, + ttynr=8, + create_time=9, + ctx_switches_vol=10, + ctx_switches_unvol=11, + read_io_count=12, + write_io_count=13, + user_time=14, + sys_time=15, + ch_user_time=16, + ch_sys_time=17, + rss=18, + vms=19, + memtext=20, + memdata=21, + memstack=22, + cpunum=23, + name=24, +) + + +# ===================================================================== +# --- named tuples +# ===================================================================== + + +# fmt: off +# psutil.virtual_memory() +svmem = namedtuple( + 'svmem', ['total', 'available', 'percent', 'used', 'free', + 'active', 'inactive', 'buffers', 'cached', 'shared', 'wired']) +# psutil.cpu_times() +scputimes = namedtuple( + 'scputimes', ['user', 'nice', 'system', 'idle', 'irq']) +# psutil.Process.memory_info() +pmem = namedtuple('pmem', ['rss', 'vms', 'text', 'data', 'stack']) +# psutil.Process.memory_full_info() +pfullmem = pmem +# psutil.Process.cpu_times() +pcputimes = namedtuple('pcputimes', + ['user', 'system', 'children_user', 'children_system']) +# psutil.Process.memory_maps(grouped=True) +pmmap_grouped = namedtuple( + 'pmmap_grouped', 'path rss, private, ref_count, shadow_count') +# psutil.Process.memory_maps(grouped=False) +pmmap_ext = namedtuple( + 'pmmap_ext', 'addr, perms path rss, private, ref_count, shadow_count') +# psutil.disk_io_counters() +if FREEBSD: + sdiskio = namedtuple('sdiskio', ['read_count', 'write_count', + 'read_bytes', 'write_bytes', + 'read_time', 'write_time', + 'busy_time']) +else: + sdiskio = namedtuple('sdiskio', ['read_count', 'write_count', + 'read_bytes', 'write_bytes']) +# fmt: on + + +# ===================================================================== +# --- memory +# ===================================================================== + + +def virtual_memory(): + mem = cext.virtual_mem() + if NETBSD: + total, free, active, inactive, wired, cached = mem + # On NetBSD buffers and shared mem is determined via /proc. + # The C ext set them to 0. + with open('/proc/meminfo', 'rb') as f: + for line in f: + if line.startswith(b'Buffers:'): + buffers = int(line.split()[1]) * 1024 + elif line.startswith(b'MemShared:'): + shared = int(line.split()[1]) * 1024 + # Before avail was calculated as (inactive + cached + free), + # same as zabbix, but it turned out it could exceed total (see + # #2233), so zabbix seems to be wrong. Htop calculates it + # differently, and the used value seem more realistic, so let's + # match htop. + # https://github.com/htop-dev/htop/blob/e7f447b/netbsd/NetBSDProcessList.c#L162 # noqa + # https://github.com/zabbix/zabbix/blob/af5e0f8/src/libs/zbxsysinfo/netbsd/memory.c#L135 # noqa + used = active + wired + avail = total - used + else: + total, free, active, inactive, wired, cached, buffers, shared = mem + # matches freebsd-memory CLI: + # * https://people.freebsd.org/~rse/dist/freebsd-memory + # * https://www.cyberciti.biz/files/scripts/freebsd-memory.pl.txt + # matches zabbix: + # * https://github.com/zabbix/zabbix/blob/af5e0f8/src/libs/zbxsysinfo/freebsd/memory.c#L143 # noqa + avail = inactive + cached + free + used = active + wired + cached + + percent = usage_percent((total - avail), total, round_=1) + return svmem( + total, + avail, + percent, + used, + free, + active, + inactive, + buffers, + cached, + shared, + wired, + ) + + +def swap_memory(): + """System swap memory as (total, used, free, sin, sout) namedtuple.""" + total, used, free, sin, sout = cext.swap_mem() + percent = usage_percent(used, total, round_=1) + return _common.sswap(total, used, free, percent, sin, sout) + + +# ===================================================================== +# --- CPU +# ===================================================================== + + +def cpu_times(): + """Return system per-CPU times as a namedtuple.""" + user, nice, system, idle, irq = cext.cpu_times() + return scputimes(user, nice, system, idle, irq) + + +if HAS_PER_CPU_TIMES: + + def per_cpu_times(): + """Return system CPU times as a namedtuple.""" + ret = [] + for cpu_t in cext.per_cpu_times(): + user, nice, system, idle, irq = cpu_t + item = scputimes(user, nice, system, idle, irq) + ret.append(item) + return ret + +else: + # XXX + # Ok, this is very dirty. + # On FreeBSD < 8 we cannot gather per-cpu information, see: + # https://github.com/giampaolo/psutil/issues/226 + # If num cpus > 1, on first call we return single cpu times to avoid a + # crash at psutil import time. + # Next calls will fail with NotImplementedError + def per_cpu_times(): + """Return system CPU times as a namedtuple.""" + if cpu_count_logical() == 1: + return [cpu_times()] + if per_cpu_times.__called__: + msg = "supported only starting from FreeBSD 8" + raise NotImplementedError(msg) + per_cpu_times.__called__ = True + return [cpu_times()] + + per_cpu_times.__called__ = False + + +def cpu_count_logical(): + """Return the number of logical CPUs in the system.""" + return cext.cpu_count_logical() + + +if OPENBSD or NETBSD: + + def cpu_count_cores(): + # OpenBSD and NetBSD do not implement this. + return 1 if cpu_count_logical() == 1 else None + +else: + + def cpu_count_cores(): + """Return the number of CPU cores in the system.""" + # From the C module we'll get an XML string similar to this: + # http://manpages.ubuntu.com/manpages/precise/man4/smp.4freebsd.html + # We may get None in case "sysctl kern.sched.topology_spec" + # is not supported on this BSD version, in which case we'll mimic + # os.cpu_count() and return None. + ret = None + s = cext.cpu_topology() + if s is not None: + # get rid of padding chars appended at the end of the string + index = s.rfind("") + if index != -1: + s = s[: index + 9] + root = ElementTree.fromstring(s) + try: + ret = len(root.findall('group/children/group/cpu')) or None + finally: + # needed otherwise it will memleak + root.clear() + if not ret: + # If logical CPUs == 1 it's obvious we' have only 1 core. + if cpu_count_logical() == 1: + return 1 + return ret + + +def cpu_stats(): + """Return various CPU stats as a named tuple.""" + if FREEBSD: + # Note: the C ext is returning some metrics we are not exposing: + # traps. + ctxsw, intrs, soft_intrs, syscalls, traps = cext.cpu_stats() + elif NETBSD: + # XXX + # Note about intrs: the C extension returns 0. intrs + # can be determined via /proc/stat; it has the same value as + # soft_intrs thought so the kernel is faking it (?). + # + # Note about syscalls: the C extension always sets it to 0 (?). + # + # Note: the C ext is returning some metrics we are not exposing: + # traps, faults and forks. + ctxsw, intrs, soft_intrs, syscalls, traps, faults, forks = ( + cext.cpu_stats() + ) + with open('/proc/stat', 'rb') as f: + for line in f: + if line.startswith(b'intr'): + intrs = int(line.split()[1]) + elif OPENBSD: + # Note: the C ext is returning some metrics we are not exposing: + # traps, faults and forks. + ctxsw, intrs, soft_intrs, syscalls, traps, faults, forks = ( + cext.cpu_stats() + ) + return _common.scpustats(ctxsw, intrs, soft_intrs, syscalls) + + +if FREEBSD: + + def cpu_freq(): + """Return frequency metrics for CPUs. As of Dec 2018 only + CPU 0 appears to be supported by FreeBSD and all other cores + match the frequency of CPU 0. + """ + ret = [] + num_cpus = cpu_count_logical() + for cpu in range(num_cpus): + try: + current, available_freq = cext.cpu_freq(cpu) + except NotImplementedError: + continue + if available_freq: + try: + min_freq = int(available_freq.split(" ")[-1].split("/")[0]) + except (IndexError, ValueError): + min_freq = None + try: + max_freq = int(available_freq.split(" ")[0].split("/")[0]) + except (IndexError, ValueError): + max_freq = None + ret.append(_common.scpufreq(current, min_freq, max_freq)) + return ret + +elif OPENBSD: + + def cpu_freq(): + curr = float(cext.cpu_freq()) + return [_common.scpufreq(curr, 0.0, 0.0)] + + +# ===================================================================== +# --- disks +# ===================================================================== + + +def disk_partitions(all=False): + """Return mounted disk partitions as a list of namedtuples. + 'all' argument is ignored, see: + https://github.com/giampaolo/psutil/issues/906. + """ + retlist = [] + partitions = cext.disk_partitions() + for partition in partitions: + device, mountpoint, fstype, opts = partition + maxfile = maxpath = None # set later + ntuple = _common.sdiskpart( + device, mountpoint, fstype, opts, maxfile, maxpath + ) + retlist.append(ntuple) + return retlist + + +disk_usage = _psposix.disk_usage +disk_io_counters = cext.disk_io_counters + + +# ===================================================================== +# --- network +# ===================================================================== + + +net_io_counters = cext.net_io_counters +net_if_addrs = cext_posix.net_if_addrs + + +def net_if_stats(): + """Get NIC stats (isup, duplex, speed, mtu).""" + names = net_io_counters().keys() + ret = {} + for name in names: + try: + mtu = cext_posix.net_if_mtu(name) + flags = cext_posix.net_if_flags(name) + duplex, speed = cext_posix.net_if_duplex_speed(name) + except OSError as err: + # https://github.com/giampaolo/psutil/issues/1279 + if err.errno != errno.ENODEV: + raise + else: + if hasattr(_common, 'NicDuplex'): + duplex = _common.NicDuplex(duplex) + output_flags = ','.join(flags) + isup = 'running' in flags + ret[name] = _common.snicstats( + isup, duplex, speed, mtu, output_flags + ) + return ret + + +def net_connections(kind): + """System-wide network connections.""" + if kind not in _common.conn_tmap: + raise ValueError( + "invalid %r kind argument; choose between %s" + % (kind, ', '.join([repr(x) for x in conn_tmap])) + ) + families, types = conn_tmap[kind] + ret = set() + + if OPENBSD: + rawlist = cext.net_connections(-1, families, types) + elif NETBSD: + rawlist = cext.net_connections(-1, kind) + else: # FreeBSD + rawlist = cext.net_connections(families, types) + + for item in rawlist: + fd, fam, type, laddr, raddr, status, pid = item + nt = conn_to_ntuple( + fd, fam, type, laddr, raddr, status, TCP_STATUSES, pid + ) + ret.add(nt) + return list(ret) + + +# ===================================================================== +# --- sensors +# ===================================================================== + + +if FREEBSD: + + def sensors_battery(): + """Return battery info.""" + try: + percent, minsleft, power_plugged = cext.sensors_battery() + except NotImplementedError: + # See: https://github.com/giampaolo/psutil/issues/1074 + return None + power_plugged = power_plugged == 1 + if power_plugged: + secsleft = _common.POWER_TIME_UNLIMITED + elif minsleft == -1: + secsleft = _common.POWER_TIME_UNKNOWN + else: + secsleft = minsleft * 60 + return _common.sbattery(percent, secsleft, power_plugged) + + def sensors_temperatures(): + """Return CPU cores temperatures if available, else an empty dict.""" + ret = defaultdict(list) + num_cpus = cpu_count_logical() + for cpu in range(num_cpus): + try: + current, high = cext.sensors_cpu_temperature(cpu) + if high <= 0: + high = None + name = "Core %s" % cpu + ret["coretemp"].append( + _common.shwtemp(name, current, high, high) + ) + except NotImplementedError: + pass + + return ret + + +# ===================================================================== +# --- other system functions +# ===================================================================== + + +def boot_time(): + """The system boot time expressed in seconds since the epoch.""" + return cext.boot_time() + + +def users(): + """Return currently connected users as a list of namedtuples.""" + retlist = [] + rawlist = cext.users() + for item in rawlist: + user, tty, hostname, tstamp, pid = item + if pid == -1: + assert OPENBSD + pid = None + if tty == '~': + continue # reboot or shutdown + nt = _common.suser(user, tty or None, hostname, tstamp, pid) + retlist.append(nt) + return retlist + + +# ===================================================================== +# --- processes +# ===================================================================== + + +@memoize +def _pid_0_exists(): + try: + Process(0).name() + except NoSuchProcess: + return False + except AccessDenied: + return True + else: + return True + + +def pids(): + """Returns a list of PIDs currently running on the system.""" + ret = cext.pids() + if OPENBSD and (0 not in ret) and _pid_0_exists(): + # On OpenBSD the kernel does not return PID 0 (neither does + # ps) but it's actually querable (Process(0) will succeed). + ret.insert(0, 0) + return ret + + +if OPENBSD or NETBSD: + + def pid_exists(pid): + """Return True if pid exists.""" + exists = _psposix.pid_exists(pid) + if not exists: + # We do this because _psposix.pid_exists() lies in case of + # zombie processes. + return pid in pids() + else: + return True + +else: + pid_exists = _psposix.pid_exists + + +def is_zombie(pid): + try: + st = cext.proc_oneshot_info(pid)[kinfo_proc_map['status']] + return PROC_STATUSES.get(st) == _common.STATUS_ZOMBIE + except OSError: + return False + + +def wrap_exceptions(fun): + """Decorator which translates bare OSError exceptions into + NoSuchProcess and AccessDenied. + """ + + @functools.wraps(fun) + def wrapper(self, *args, **kwargs): + try: + return fun(self, *args, **kwargs) + except ProcessLookupError: + if is_zombie(self.pid): + raise ZombieProcess(self.pid, self._name, self._ppid) + else: + raise NoSuchProcess(self.pid, self._name) + except PermissionError: + raise AccessDenied(self.pid, self._name) + except OSError: + if self.pid == 0: + if 0 in pids(): + raise AccessDenied(self.pid, self._name) + else: + raise + raise + + return wrapper + + +@contextlib.contextmanager +def wrap_exceptions_procfs(inst): + """Same as above, for routines relying on reading /proc fs.""" + try: + yield + except (ProcessLookupError, FileNotFoundError): + # ENOENT (no such file or directory) gets raised on open(). + # ESRCH (no such process) can get raised on read() if + # process is gone in meantime. + if is_zombie(inst.pid): + raise ZombieProcess(inst.pid, inst._name, inst._ppid) + else: + raise NoSuchProcess(inst.pid, inst._name) + except PermissionError: + raise AccessDenied(inst.pid, inst._name) + + +class Process: + """Wrapper class around underlying C implementation.""" + + __slots__ = ["pid", "_name", "_ppid", "_cache"] + + def __init__(self, pid): + self.pid = pid + self._name = None + self._ppid = None + + def _assert_alive(self): + """Raise NSP if the process disappeared on us.""" + # For those C function who do not raise NSP, possibly returning + # incorrect or incomplete result. + cext.proc_name(self.pid) + + @wrap_exceptions + @memoize_when_activated + def oneshot(self): + """Retrieves multiple process info in one shot as a raw tuple.""" + ret = cext.proc_oneshot_info(self.pid) + assert len(ret) == len(kinfo_proc_map) + return ret + + def oneshot_enter(self): + self.oneshot.cache_activate(self) + + def oneshot_exit(self): + self.oneshot.cache_deactivate(self) + + @wrap_exceptions + def name(self): + name = self.oneshot()[kinfo_proc_map['name']] + return name if name is not None else cext.proc_name(self.pid) + + @wrap_exceptions + def exe(self): + if FREEBSD: + if self.pid == 0: + return '' # else NSP + return cext.proc_exe(self.pid) + elif NETBSD: + if self.pid == 0: + # /proc/0 dir exists but /proc/0/exe doesn't + return "" + with wrap_exceptions_procfs(self): + return os.readlink("/proc/%s/exe" % self.pid) + else: + # OpenBSD: exe cannot be determined; references: + # https://chromium.googlesource.com/chromium/src/base/+/ + # master/base_paths_posix.cc + # We try our best guess by using which against the first + # cmdline arg (may return None). + cmdline = self.cmdline() + if cmdline: + return which(cmdline[0]) or "" + else: + return "" + + @wrap_exceptions + def cmdline(self): + if OPENBSD and self.pid == 0: + return [] # ...else it crashes + elif NETBSD: + # XXX - most of the times the underlying sysctl() call on + # NetBSD and OpenBSD returns a truncated string. Also + # /proc/pid/cmdline behaves the same so it looks like this + # is a kernel bug. + try: + return cext.proc_cmdline(self.pid) + except OSError as err: + if err.errno == errno.EINVAL: + if is_zombie(self.pid): + raise ZombieProcess(self.pid, self._name, self._ppid) + elif not pid_exists(self.pid): + raise NoSuchProcess(self.pid, self._name, self._ppid) + else: + # XXX: this happens with unicode tests. It means the C + # routine is unable to decode invalid unicode chars. + debug("ignoring %r and returning an empty list" % err) + return [] + else: + raise + else: + return cext.proc_cmdline(self.pid) + + @wrap_exceptions + def environ(self): + return cext.proc_environ(self.pid) + + @wrap_exceptions + def terminal(self): + tty_nr = self.oneshot()[kinfo_proc_map['ttynr']] + tmap = _psposix.get_terminal_map() + try: + return tmap[tty_nr] + except KeyError: + return None + + @wrap_exceptions + def ppid(self): + self._ppid = self.oneshot()[kinfo_proc_map['ppid']] + return self._ppid + + @wrap_exceptions + def uids(self): + rawtuple = self.oneshot() + return _common.puids( + rawtuple[kinfo_proc_map['real_uid']], + rawtuple[kinfo_proc_map['effective_uid']], + rawtuple[kinfo_proc_map['saved_uid']], + ) + + @wrap_exceptions + def gids(self): + rawtuple = self.oneshot() + return _common.pgids( + rawtuple[kinfo_proc_map['real_gid']], + rawtuple[kinfo_proc_map['effective_gid']], + rawtuple[kinfo_proc_map['saved_gid']], + ) + + @wrap_exceptions + def cpu_times(self): + rawtuple = self.oneshot() + return _common.pcputimes( + rawtuple[kinfo_proc_map['user_time']], + rawtuple[kinfo_proc_map['sys_time']], + rawtuple[kinfo_proc_map['ch_user_time']], + rawtuple[kinfo_proc_map['ch_sys_time']], + ) + + if FREEBSD: + + @wrap_exceptions + def cpu_num(self): + return self.oneshot()[kinfo_proc_map['cpunum']] + + @wrap_exceptions + def memory_info(self): + rawtuple = self.oneshot() + return pmem( + rawtuple[kinfo_proc_map['rss']], + rawtuple[kinfo_proc_map['vms']], + rawtuple[kinfo_proc_map['memtext']], + rawtuple[kinfo_proc_map['memdata']], + rawtuple[kinfo_proc_map['memstack']], + ) + + memory_full_info = memory_info + + @wrap_exceptions + def create_time(self): + return self.oneshot()[kinfo_proc_map['create_time']] + + @wrap_exceptions + def num_threads(self): + if HAS_PROC_NUM_THREADS: + # FreeBSD + return cext.proc_num_threads(self.pid) + else: + return len(self.threads()) + + @wrap_exceptions + def num_ctx_switches(self): + rawtuple = self.oneshot() + return _common.pctxsw( + rawtuple[kinfo_proc_map['ctx_switches_vol']], + rawtuple[kinfo_proc_map['ctx_switches_unvol']], + ) + + @wrap_exceptions + def threads(self): + # Note: on OpenSBD this (/dev/mem) requires root access. + rawlist = cext.proc_threads(self.pid) + retlist = [] + for thread_id, utime, stime in rawlist: + ntuple = _common.pthread(thread_id, utime, stime) + retlist.append(ntuple) + if OPENBSD: + self._assert_alive() + return retlist + + @wrap_exceptions + def connections(self, kind='inet'): + if kind not in conn_tmap: + raise ValueError( + "invalid %r kind argument; choose between %s" + % (kind, ', '.join([repr(x) for x in conn_tmap])) + ) + families, types = conn_tmap[kind] + ret = [] + + if NETBSD: + rawlist = cext.net_connections(self.pid, kind) + elif OPENBSD: + rawlist = cext.net_connections(self.pid, families, types) + else: + rawlist = cext.proc_connections(self.pid, families, types) + + for item in rawlist: + fd, fam, type, laddr, raddr, status = item[:6] + if FREEBSD: + if (fam not in families) or (type not in types): + continue + nt = conn_to_ntuple( + fd, fam, type, laddr, raddr, status, TCP_STATUSES + ) + ret.append(nt) + + self._assert_alive() + return ret + + @wrap_exceptions + def wait(self, timeout=None): + return _psposix.wait_pid(self.pid, timeout, self._name) + + @wrap_exceptions + def nice_get(self): + return cext_posix.getpriority(self.pid) + + @wrap_exceptions + def nice_set(self, value): + return cext_posix.setpriority(self.pid, value) + + @wrap_exceptions + def status(self): + code = self.oneshot()[kinfo_proc_map['status']] + # XXX is '?' legit? (we're not supposed to return it anyway) + return PROC_STATUSES.get(code, '?') + + @wrap_exceptions + def io_counters(self): + rawtuple = self.oneshot() + return _common.pio( + rawtuple[kinfo_proc_map['read_io_count']], + rawtuple[kinfo_proc_map['write_io_count']], + -1, + -1, + ) + + @wrap_exceptions + def cwd(self): + """Return process current working directory.""" + # sometimes we get an empty string, in which case we turn + # it into None + if OPENBSD and self.pid == 0: + return "" # ...else it would raise EINVAL + elif NETBSD or HAS_PROC_OPEN_FILES: + # FreeBSD < 8 does not support functions based on + # kinfo_getfile() and kinfo_getvmmap() + return cext.proc_cwd(self.pid) + else: + raise NotImplementedError( + "supported only starting from FreeBSD 8" if FREEBSD else "" + ) + + nt_mmap_grouped = namedtuple( + 'mmap', 'path rss, private, ref_count, shadow_count' + ) + nt_mmap_ext = namedtuple( + 'mmap', 'addr, perms path rss, private, ref_count, shadow_count' + ) + + def _not_implemented(self): + raise NotImplementedError + + # FreeBSD < 8 does not support functions based on kinfo_getfile() + # and kinfo_getvmmap() + if HAS_PROC_OPEN_FILES: + + @wrap_exceptions + def open_files(self): + """Return files opened by process as a list of namedtuples.""" + rawlist = cext.proc_open_files(self.pid) + return [_common.popenfile(path, fd) for path, fd in rawlist] + + else: + open_files = _not_implemented + + # FreeBSD < 8 does not support functions based on kinfo_getfile() + # and kinfo_getvmmap() + if HAS_PROC_NUM_FDS: + + @wrap_exceptions + def num_fds(self): + """Return the number of file descriptors opened by this process.""" + ret = cext.proc_num_fds(self.pid) + if NETBSD: + self._assert_alive() + return ret + + else: + num_fds = _not_implemented + + # --- FreeBSD only APIs + + if FREEBSD: + + @wrap_exceptions + def cpu_affinity_get(self): + return cext.proc_cpu_affinity_get(self.pid) + + @wrap_exceptions + def cpu_affinity_set(self, cpus): + # Pre-emptively check if CPUs are valid because the C + # function has a weird behavior in case of invalid CPUs, + # see: https://github.com/giampaolo/psutil/issues/586 + allcpus = tuple(range(len(per_cpu_times()))) + for cpu in cpus: + if cpu not in allcpus: + raise ValueError( + "invalid CPU #%i (choose between %s)" % (cpu, allcpus) + ) + try: + cext.proc_cpu_affinity_set(self.pid, cpus) + except OSError as err: + # 'man cpuset_setaffinity' about EDEADLK: + # <> + if err.errno in (errno.EINVAL, errno.EDEADLK): + for cpu in cpus: + if cpu not in allcpus: + raise ValueError( + "invalid CPU #%i (choose between %s)" + % (cpu, allcpus) + ) + raise + + @wrap_exceptions + def memory_maps(self): + return cext.proc_memory_maps(self.pid) + + @wrap_exceptions + def rlimit(self, resource, limits=None): + if limits is None: + return cext.proc_getrlimit(self.pid, resource) + else: + if len(limits) != 2: + raise ValueError( + "second argument must be a (soft, hard) tuple, got %s" + % repr(limits) + ) + soft, hard = limits + return cext.proc_setrlimit(self.pid, resource, soft, hard) diff --git a/venv/lib/python3.10/site-packages/psutil/_pslinux.py b/venv/lib/python3.10/site-packages/psutil/_pslinux.py new file mode 100644 index 0000000000000000000000000000000000000000..798dd3651ef37fdf464cff247b9497ba8130ba15 --- /dev/null +++ b/venv/lib/python3.10/site-packages/psutil/_pslinux.py @@ -0,0 +1,2366 @@ +# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Linux platform implementation.""" + +from __future__ import division + +import base64 +import collections +import errno +import functools +import glob +import os +import re +import socket +import struct +import sys +import warnings +from collections import defaultdict +from collections import namedtuple + +from . import _common +from . import _psposix +from . import _psutil_linux as cext +from . import _psutil_posix as cext_posix +from ._common import NIC_DUPLEX_FULL +from ._common import NIC_DUPLEX_HALF +from ._common import NIC_DUPLEX_UNKNOWN +from ._common import AccessDenied +from ._common import NoSuchProcess +from ._common import ZombieProcess +from ._common import bcat +from ._common import cat +from ._common import debug +from ._common import decode +from ._common import get_procfs_path +from ._common import isfile_strict +from ._common import memoize +from ._common import memoize_when_activated +from ._common import open_binary +from ._common import open_text +from ._common import parse_environ_block +from ._common import path_exists_strict +from ._common import supports_ipv6 +from ._common import usage_percent +from ._compat import PY3 +from ._compat import FileNotFoundError +from ._compat import PermissionError +from ._compat import ProcessLookupError +from ._compat import b +from ._compat import basestring + + +if PY3: + import enum +else: + enum = None + + +# fmt: off +__extra__all__ = [ + # + 'PROCFS_PATH', + # io prio constants + "IOPRIO_CLASS_NONE", "IOPRIO_CLASS_RT", "IOPRIO_CLASS_BE", + "IOPRIO_CLASS_IDLE", + # connection status constants + "CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1", + "CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT", + "CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING", +] +# fmt: on + + +# ===================================================================== +# --- globals +# ===================================================================== + + +POWER_SUPPLY_PATH = "/sys/class/power_supply" +HAS_PROC_SMAPS = os.path.exists('/proc/%s/smaps' % os.getpid()) +HAS_PROC_SMAPS_ROLLUP = os.path.exists('/proc/%s/smaps_rollup' % os.getpid()) +HAS_PROC_IO_PRIORITY = hasattr(cext, "proc_ioprio_get") +HAS_CPU_AFFINITY = hasattr(cext, "proc_cpu_affinity_get") + +# Number of clock ticks per second +CLOCK_TICKS = os.sysconf("SC_CLK_TCK") +PAGESIZE = cext_posix.getpagesize() +BOOT_TIME = None # set later +LITTLE_ENDIAN = sys.byteorder == 'little' + +# "man iostat" states that sectors are equivalent with blocks and have +# a size of 512 bytes. Despite this value can be queried at runtime +# via /sys/block/{DISK}/queue/hw_sector_size and results may vary +# between 1k, 2k, or 4k... 512 appears to be a magic constant used +# throughout Linux source code: +# * https://stackoverflow.com/a/38136179/376587 +# * https://lists.gt.net/linux/kernel/2241060 +# * https://github.com/giampaolo/psutil/issues/1305 +# * https://github.com/torvalds/linux/blob/ +# 4f671fe2f9523a1ea206f63fe60a7c7b3a56d5c7/include/linux/bio.h#L99 +# * https://lkml.org/lkml/2015/8/17/234 +DISK_SECTOR_SIZE = 512 + +if enum is None: + AF_LINK = socket.AF_PACKET +else: + AddressFamily = enum.IntEnum( + 'AddressFamily', {'AF_LINK': int(socket.AF_PACKET)} + ) + AF_LINK = AddressFamily.AF_LINK + +# ioprio_* constants http://linux.die.net/man/2/ioprio_get +if enum is None: + IOPRIO_CLASS_NONE = 0 + IOPRIO_CLASS_RT = 1 + IOPRIO_CLASS_BE = 2 + IOPRIO_CLASS_IDLE = 3 +else: + + class IOPriority(enum.IntEnum): + IOPRIO_CLASS_NONE = 0 + IOPRIO_CLASS_RT = 1 + IOPRIO_CLASS_BE = 2 + IOPRIO_CLASS_IDLE = 3 + + globals().update(IOPriority.__members__) + +# See: +# https://github.com/torvalds/linux/blame/master/fs/proc/array.c +# ...and (TASK_* constants): +# https://github.com/torvalds/linux/blob/master/include/linux/sched.h +PROC_STATUSES = { + "R": _common.STATUS_RUNNING, + "S": _common.STATUS_SLEEPING, + "D": _common.STATUS_DISK_SLEEP, + "T": _common.STATUS_STOPPED, + "t": _common.STATUS_TRACING_STOP, + "Z": _common.STATUS_ZOMBIE, + "X": _common.STATUS_DEAD, + "x": _common.STATUS_DEAD, + "K": _common.STATUS_WAKE_KILL, + "W": _common.STATUS_WAKING, + "I": _common.STATUS_IDLE, + "P": _common.STATUS_PARKED, +} + +# https://github.com/torvalds/linux/blob/master/include/net/tcp_states.h +TCP_STATUSES = { + "01": _common.CONN_ESTABLISHED, + "02": _common.CONN_SYN_SENT, + "03": _common.CONN_SYN_RECV, + "04": _common.CONN_FIN_WAIT1, + "05": _common.CONN_FIN_WAIT2, + "06": _common.CONN_TIME_WAIT, + "07": _common.CONN_CLOSE, + "08": _common.CONN_CLOSE_WAIT, + "09": _common.CONN_LAST_ACK, + "0A": _common.CONN_LISTEN, + "0B": _common.CONN_CLOSING, +} + + +# ===================================================================== +# --- named tuples +# ===================================================================== + + +# fmt: off +# psutil.virtual_memory() +svmem = namedtuple( + 'svmem', ['total', 'available', 'percent', 'used', 'free', + 'active', 'inactive', 'buffers', 'cached', 'shared', 'slab']) +# psutil.disk_io_counters() +sdiskio = namedtuple( + 'sdiskio', ['read_count', 'write_count', + 'read_bytes', 'write_bytes', + 'read_time', 'write_time', + 'read_merged_count', 'write_merged_count', + 'busy_time']) +# psutil.Process().open_files() +popenfile = namedtuple( + 'popenfile', ['path', 'fd', 'position', 'mode', 'flags']) +# psutil.Process().memory_info() +pmem = namedtuple('pmem', 'rss vms shared text lib data dirty') +# psutil.Process().memory_full_info() +pfullmem = namedtuple('pfullmem', pmem._fields + ('uss', 'pss', 'swap')) +# psutil.Process().memory_maps(grouped=True) +pmmap_grouped = namedtuple( + 'pmmap_grouped', + ['path', 'rss', 'size', 'pss', 'shared_clean', 'shared_dirty', + 'private_clean', 'private_dirty', 'referenced', 'anonymous', 'swap']) +# psutil.Process().memory_maps(grouped=False) +pmmap_ext = namedtuple( + 'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields)) +# psutil.Process.io_counters() +pio = namedtuple('pio', ['read_count', 'write_count', + 'read_bytes', 'write_bytes', + 'read_chars', 'write_chars']) +# psutil.Process.cpu_times() +pcputimes = namedtuple('pcputimes', + ['user', 'system', 'children_user', 'children_system', + 'iowait']) +# fmt: on + + +# ===================================================================== +# --- utils +# ===================================================================== + + +def readlink(path): + """Wrapper around os.readlink().""" + assert isinstance(path, basestring), path + path = os.readlink(path) + # readlink() might return paths containing null bytes ('\x00') + # resulting in "TypeError: must be encoded string without NULL + # bytes, not str" errors when the string is passed to other + # fs-related functions (os.*, open(), ...). + # Apparently everything after '\x00' is garbage (we can have + # ' (deleted)', 'new' and possibly others), see: + # https://github.com/giampaolo/psutil/issues/717 + path = path.split('\x00')[0] + # Certain paths have ' (deleted)' appended. Usually this is + # bogus as the file actually exists. Even if it doesn't we + # don't care. + if path.endswith(' (deleted)') and not path_exists_strict(path): + path = path[:-10] + return path + + +def file_flags_to_mode(flags): + """Convert file's open() flags into a readable string. + Used by Process.open_files(). + """ + modes_map = {os.O_RDONLY: 'r', os.O_WRONLY: 'w', os.O_RDWR: 'w+'} + mode = modes_map[flags & (os.O_RDONLY | os.O_WRONLY | os.O_RDWR)] + if flags & os.O_APPEND: + mode = mode.replace('w', 'a', 1) + mode = mode.replace('w+', 'r+') + # possible values: r, w, a, r+, a+ + return mode + + +def is_storage_device(name): + """Return True if the given name refers to a root device (e.g. + "sda", "nvme0n1") as opposed to a logical partition (e.g. "sda1", + "nvme0n1p1"). If name is a virtual device (e.g. "loop1", "ram") + return True. + """ + # Re-adapted from iostat source code, see: + # https://github.com/sysstat/sysstat/blob/ + # 97912938cd476645b267280069e83b1c8dc0e1c7/common.c#L208 + # Some devices may have a slash in their name (e.g. cciss/c0d0...). + name = name.replace('/', '!') + including_virtual = True + if including_virtual: + path = "/sys/block/%s" % name + else: + path = "/sys/block/%s/device" % name + return os.access(path, os.F_OK) + + +@memoize +def set_scputimes_ntuple(procfs_path): + """Set a namedtuple of variable fields depending on the CPU times + available on this Linux kernel version which may be: + (user, nice, system, idle, iowait, irq, softirq, [steal, [guest, + [guest_nice]]]) + Used by cpu_times() function. + """ + global scputimes + with open_binary('%s/stat' % procfs_path) as f: + values = f.readline().split()[1:] + fields = ['user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq'] + vlen = len(values) + if vlen >= 8: + # Linux >= 2.6.11 + fields.append('steal') + if vlen >= 9: + # Linux >= 2.6.24 + fields.append('guest') + if vlen >= 10: + # Linux >= 3.2.0 + fields.append('guest_nice') + scputimes = namedtuple('scputimes', fields) + + +try: + set_scputimes_ntuple("/proc") +except Exception as err: # noqa: BLE001 + # Don't want to crash at import time. + debug("ignoring exception on import: %r" % err) + scputimes = namedtuple('scputimes', 'user system idle')(0.0, 0.0, 0.0) + + +# ===================================================================== +# --- prlimit +# ===================================================================== + +# Backport of resource.prlimit() for Python 2. Originally this was done +# in C, but CentOS-6 which we use to create manylinux wheels is too old +# and does not support prlimit() syscall. As such the resulting wheel +# would not include prlimit(), even when installed on newer systems. +# This is the only part of psutil using ctypes. + +prlimit = None +try: + from resource import prlimit # python >= 3.4 +except ImportError: + import ctypes + + libc = ctypes.CDLL(None, use_errno=True) + + if hasattr(libc, "prlimit"): + + def prlimit(pid, resource_, limits=None): + class StructRlimit(ctypes.Structure): + _fields_ = [ + ('rlim_cur', ctypes.c_longlong), + ('rlim_max', ctypes.c_longlong), + ] + + current = StructRlimit() + if limits is None: + # get + ret = libc.prlimit(pid, resource_, None, ctypes.byref(current)) + else: + # set + new = StructRlimit() + new.rlim_cur = limits[0] + new.rlim_max = limits[1] + ret = libc.prlimit( + pid, resource_, ctypes.byref(new), ctypes.byref(current) + ) + + if ret != 0: + errno_ = ctypes.get_errno() + raise OSError(errno_, os.strerror(errno_)) + return (current.rlim_cur, current.rlim_max) + + +if prlimit is not None: + __extra__all__.extend( + [x for x in dir(cext) if x.startswith('RLIM') and x.isupper()] + ) + + +# ===================================================================== +# --- system memory +# ===================================================================== + + +def calculate_avail_vmem(mems): + """Fallback for kernels < 3.14 where /proc/meminfo does not provide + "MemAvailable", see: + https://blog.famzah.net/2014/09/24/. + + This code reimplements the algorithm outlined here: + https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/ + commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773 + + We use this function also when "MemAvailable" returns 0 (possibly a + kernel bug, see: https://github.com/giampaolo/psutil/issues/1915). + In that case this routine matches "free" CLI tool result ("available" + column). + + XXX: on recent kernels this calculation may differ by ~1.5% compared + to "MemAvailable:", as it's calculated slightly differently. + It is still way more realistic than doing (free + cached) though. + See: + * https://gitlab.com/procps-ng/procps/issues/42 + * https://github.com/famzah/linux-memavailable-procfs/issues/2 + """ + # Note about "fallback" value. According to: + # https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/ + # commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773 + # ...long ago "available" memory was calculated as (free + cached), + # We use fallback when one of these is missing from /proc/meminfo: + # "Active(file)": introduced in 2.6.28 / Dec 2008 + # "Inactive(file)": introduced in 2.6.28 / Dec 2008 + # "SReclaimable": introduced in 2.6.19 / Nov 2006 + # /proc/zoneinfo: introduced in 2.6.13 / Aug 2005 + free = mems[b'MemFree:'] + fallback = free + mems.get(b"Cached:", 0) + try: + lru_active_file = mems[b'Active(file):'] + lru_inactive_file = mems[b'Inactive(file):'] + slab_reclaimable = mems[b'SReclaimable:'] + except KeyError as err: + debug( + "%s is missing from /proc/meminfo; using an approximation for " + "calculating available memory" + % err.args[0] + ) + return fallback + try: + f = open_binary('%s/zoneinfo' % get_procfs_path()) + except IOError: + return fallback # kernel 2.6.13 + + watermark_low = 0 + with f: + for line in f: + line = line.strip() + if line.startswith(b'low'): + watermark_low += int(line.split()[1]) + watermark_low *= PAGESIZE + + avail = free - watermark_low + pagecache = lru_active_file + lru_inactive_file + pagecache -= min(pagecache / 2, watermark_low) + avail += pagecache + avail += slab_reclaimable - min(slab_reclaimable / 2.0, watermark_low) + return int(avail) + + +def virtual_memory(): + """Report virtual memory stats. + This implementation mimicks procps-ng-3.3.12, aka "free" CLI tool: + https://gitlab.com/procps-ng/procps/blob/ + 24fd2605c51fccc375ab0287cec33aa767f06718/proc/sysinfo.c#L778-791 + The returned values are supposed to match both "free" and "vmstat -s" + CLI tools. + """ + missing_fields = [] + mems = {} + with open_binary('%s/meminfo' % get_procfs_path()) as f: + for line in f: + fields = line.split() + mems[fields[0]] = int(fields[1]) * 1024 + + # /proc doc states that the available fields in /proc/meminfo vary + # by architecture and compile options, but these 3 values are also + # returned by sysinfo(2); as such we assume they are always there. + total = mems[b'MemTotal:'] + free = mems[b'MemFree:'] + try: + buffers = mems[b'Buffers:'] + except KeyError: + # https://github.com/giampaolo/psutil/issues/1010 + buffers = 0 + missing_fields.append('buffers') + try: + cached = mems[b"Cached:"] + except KeyError: + cached = 0 + missing_fields.append('cached') + else: + # "free" cmdline utility sums reclaimable to cached. + # Older versions of procps used to add slab memory instead. + # This got changed in: + # https://gitlab.com/procps-ng/procps/commit/ + # 05d751c4f076a2f0118b914c5e51cfbb4762ad8e + cached += mems.get(b"SReclaimable:", 0) # since kernel 2.6.19 + + try: + shared = mems[b'Shmem:'] # since kernel 2.6.32 + except KeyError: + try: + shared = mems[b'MemShared:'] # kernels 2.4 + except KeyError: + shared = 0 + missing_fields.append('shared') + + try: + active = mems[b"Active:"] + except KeyError: + active = 0 + missing_fields.append('active') + + try: + inactive = mems[b"Inactive:"] + except KeyError: + try: + inactive = ( + mems[b"Inact_dirty:"] + + mems[b"Inact_clean:"] + + mems[b"Inact_laundry:"] + ) + except KeyError: + inactive = 0 + missing_fields.append('inactive') + + try: + slab = mems[b"Slab:"] + except KeyError: + slab = 0 + + used = total - free - cached - buffers + if used < 0: + # May be symptomatic of running within a LCX container where such + # values will be dramatically distorted over those of the host. + used = total - free + + # - starting from 4.4.0 we match free's "available" column. + # Before 4.4.0 we calculated it as (free + buffers + cached) + # which matched htop. + # - free and htop available memory differs as per: + # http://askubuntu.com/a/369589 + # http://unix.stackexchange.com/a/65852/168884 + # - MemAvailable has been introduced in kernel 3.14 + try: + avail = mems[b'MemAvailable:'] + except KeyError: + avail = calculate_avail_vmem(mems) + else: + if avail == 0: + # Yes, it can happen (probably a kernel bug): + # https://github.com/giampaolo/psutil/issues/1915 + # In this case "free" CLI tool makes an estimate. We do the same, + # and it matches "free" CLI tool. + avail = calculate_avail_vmem(mems) + + if avail < 0: + avail = 0 + missing_fields.append('available') + elif avail > total: + # If avail is greater than total or our calculation overflows, + # that's symptomatic of running within a LCX container where such + # values will be dramatically distorted over those of the host. + # https://gitlab.com/procps-ng/procps/blob/ + # 24fd2605c51fccc375ab0287cec33aa767f06718/proc/sysinfo.c#L764 + avail = free + + percent = usage_percent((total - avail), total, round_=1) + + # Warn about missing metrics which are set to 0. + if missing_fields: + msg = "%s memory stats couldn't be determined and %s set to 0" % ( + ", ".join(missing_fields), + "was" if len(missing_fields) == 1 else "were", + ) + warnings.warn(msg, RuntimeWarning, stacklevel=2) + + return svmem( + total, + avail, + percent, + used, + free, + active, + inactive, + buffers, + cached, + shared, + slab, + ) + + +def swap_memory(): + """Return swap memory metrics.""" + mems = {} + with open_binary('%s/meminfo' % get_procfs_path()) as f: + for line in f: + fields = line.split() + mems[fields[0]] = int(fields[1]) * 1024 + # We prefer /proc/meminfo over sysinfo() syscall so that + # psutil.PROCFS_PATH can be used in order to allow retrieval + # for linux containers, see: + # https://github.com/giampaolo/psutil/issues/1015 + try: + total = mems[b'SwapTotal:'] + free = mems[b'SwapFree:'] + except KeyError: + _, _, _, _, total, free, unit_multiplier = cext.linux_sysinfo() + total *= unit_multiplier + free *= unit_multiplier + + used = total - free + percent = usage_percent(used, total, round_=1) + # get pgin/pgouts + try: + f = open_binary("%s/vmstat" % get_procfs_path()) + except IOError as err: + # see https://github.com/giampaolo/psutil/issues/722 + msg = ( + "'sin' and 'sout' swap memory stats couldn't " + + "be determined and were set to 0 (%s)" % str(err) + ) + warnings.warn(msg, RuntimeWarning, stacklevel=2) + sin = sout = 0 + else: + with f: + sin = sout = None + for line in f: + # values are expressed in 4 kilo bytes, we want + # bytes instead + if line.startswith(b'pswpin'): + sin = int(line.split(b' ')[1]) * 4 * 1024 + elif line.startswith(b'pswpout'): + sout = int(line.split(b' ')[1]) * 4 * 1024 + if sin is not None and sout is not None: + break + else: + # we might get here when dealing with exotic Linux + # flavors, see: + # https://github.com/giampaolo/psutil/issues/313 + msg = "'sin' and 'sout' swap memory stats couldn't " + msg += "be determined and were set to 0" + warnings.warn(msg, RuntimeWarning, stacklevel=2) + sin = sout = 0 + return _common.sswap(total, used, free, percent, sin, sout) + + +# ===================================================================== +# --- CPU +# ===================================================================== + + +def cpu_times(): + """Return a named tuple representing the following system-wide + CPU times: + (user, nice, system, idle, iowait, irq, softirq [steal, [guest, + [guest_nice]]]) + Last 3 fields may not be available on all Linux kernel versions. + """ + procfs_path = get_procfs_path() + set_scputimes_ntuple(procfs_path) + with open_binary('%s/stat' % procfs_path) as f: + values = f.readline().split() + fields = values[1 : len(scputimes._fields) + 1] + fields = [float(x) / CLOCK_TICKS for x in fields] + return scputimes(*fields) + + +def per_cpu_times(): + """Return a list of namedtuple representing the CPU times + for every CPU available on the system. + """ + procfs_path = get_procfs_path() + set_scputimes_ntuple(procfs_path) + cpus = [] + with open_binary('%s/stat' % procfs_path) as f: + # get rid of the first line which refers to system wide CPU stats + f.readline() + for line in f: + if line.startswith(b'cpu'): + values = line.split() + fields = values[1 : len(scputimes._fields) + 1] + fields = [float(x) / CLOCK_TICKS for x in fields] + entry = scputimes(*fields) + cpus.append(entry) + return cpus + + +def cpu_count_logical(): + """Return the number of logical CPUs in the system.""" + try: + return os.sysconf("SC_NPROCESSORS_ONLN") + except ValueError: + # as a second fallback we try to parse /proc/cpuinfo + num = 0 + with open_binary('%s/cpuinfo' % get_procfs_path()) as f: + for line in f: + if line.lower().startswith(b'processor'): + num += 1 + + # unknown format (e.g. amrel/sparc architectures), see: + # https://github.com/giampaolo/psutil/issues/200 + # try to parse /proc/stat as a last resort + if num == 0: + search = re.compile(r'cpu\d') + with open_text('%s/stat' % get_procfs_path()) as f: + for line in f: + line = line.split(' ')[0] + if search.match(line): + num += 1 + + if num == 0: + # mimic os.cpu_count() + return None + return num + + +def cpu_count_cores(): + """Return the number of CPU cores in the system.""" + # Method #1 + ls = set() + # These 2 files are the same but */core_cpus_list is newer while + # */thread_siblings_list is deprecated and may disappear in the future. + # https://www.kernel.org/doc/Documentation/admin-guide/cputopology.rst + # https://github.com/giampaolo/psutil/pull/1727#issuecomment-707624964 + # https://lkml.org/lkml/2019/2/26/41 + p1 = "/sys/devices/system/cpu/cpu[0-9]*/topology/core_cpus_list" + p2 = "/sys/devices/system/cpu/cpu[0-9]*/topology/thread_siblings_list" + for path in glob.glob(p1) or glob.glob(p2): + with open_binary(path) as f: + ls.add(f.read().strip()) + result = len(ls) + if result != 0: + return result + + # Method #2 + mapping = {} + current_info = {} + with open_binary('%s/cpuinfo' % get_procfs_path()) as f: + for line in f: + line = line.strip().lower() + if not line: + # new section + try: + mapping[current_info[b'physical id']] = current_info[ + b'cpu cores' + ] + except KeyError: + pass + current_info = {} + else: + # ongoing section + if line.startswith((b'physical id', b'cpu cores')): + key, value = line.split(b'\t:', 1) + current_info[key] = int(value) + + result = sum(mapping.values()) + return result or None # mimic os.cpu_count() + + +def cpu_stats(): + """Return various CPU stats as a named tuple.""" + with open_binary('%s/stat' % get_procfs_path()) as f: + ctx_switches = None + interrupts = None + soft_interrupts = None + for line in f: + if line.startswith(b'ctxt'): + ctx_switches = int(line.split()[1]) + elif line.startswith(b'intr'): + interrupts = int(line.split()[1]) + elif line.startswith(b'softirq'): + soft_interrupts = int(line.split()[1]) + if ( + ctx_switches is not None + and soft_interrupts is not None + and interrupts is not None + ): + break + syscalls = 0 + return _common.scpustats( + ctx_switches, interrupts, soft_interrupts, syscalls + ) + + +def _cpu_get_cpuinfo_freq(): + """Return current CPU frequency from cpuinfo if available.""" + ret = [] + with open_binary('%s/cpuinfo' % get_procfs_path()) as f: + for line in f: + if line.lower().startswith(b'cpu mhz'): + ret.append(float(line.split(b':', 1)[1])) + return ret + + +if os.path.exists("/sys/devices/system/cpu/cpufreq/policy0") or os.path.exists( + "/sys/devices/system/cpu/cpu0/cpufreq" +): + + def cpu_freq(): + """Return frequency metrics for all CPUs. + Contrarily to other OSes, Linux updates these values in + real-time. + """ + cpuinfo_freqs = _cpu_get_cpuinfo_freq() + paths = glob.glob( + "/sys/devices/system/cpu/cpufreq/policy[0-9]*" + ) or glob.glob("/sys/devices/system/cpu/cpu[0-9]*/cpufreq") + paths.sort(key=lambda x: int(re.search(r"[0-9]+", x).group())) + ret = [] + pjoin = os.path.join + for i, path in enumerate(paths): + if len(paths) == len(cpuinfo_freqs): + # take cached value from cpuinfo if available, see: + # https://github.com/giampaolo/psutil/issues/1851 + curr = cpuinfo_freqs[i] * 1000 + else: + curr = bcat(pjoin(path, "scaling_cur_freq"), fallback=None) + if curr is None: + # Likely an old RedHat, see: + # https://github.com/giampaolo/psutil/issues/1071 + curr = bcat(pjoin(path, "cpuinfo_cur_freq"), fallback=None) + if curr is None: + msg = "can't find current frequency file" + raise NotImplementedError(msg) + curr = int(curr) / 1000 + max_ = int(bcat(pjoin(path, "scaling_max_freq"))) / 1000 + min_ = int(bcat(pjoin(path, "scaling_min_freq"))) / 1000 + ret.append(_common.scpufreq(curr, min_, max_)) + return ret + +else: + + def cpu_freq(): + """Alternate implementation using /proc/cpuinfo. + min and max frequencies are not available and are set to None. + """ + return [_common.scpufreq(x, 0.0, 0.0) for x in _cpu_get_cpuinfo_freq()] + + +# ===================================================================== +# --- network +# ===================================================================== + + +net_if_addrs = cext_posix.net_if_addrs + + +class _Ipv6UnsupportedError(Exception): + pass + + +class Connections: + """A wrapper on top of /proc/net/* files, retrieving per-process + and system-wide open connections (TCP, UDP, UNIX) similarly to + "netstat -an". + + Note: in case of UNIX sockets we're only able to determine the + local endpoint/path, not the one it's connected to. + According to [1] it would be possible but not easily. + + [1] http://serverfault.com/a/417946 + """ + + def __init__(self): + # The string represents the basename of the corresponding + # /proc/net/{proto_name} file. + tcp4 = ("tcp", socket.AF_INET, socket.SOCK_STREAM) + tcp6 = ("tcp6", socket.AF_INET6, socket.SOCK_STREAM) + udp4 = ("udp", socket.AF_INET, socket.SOCK_DGRAM) + udp6 = ("udp6", socket.AF_INET6, socket.SOCK_DGRAM) + unix = ("unix", socket.AF_UNIX, None) + self.tmap = { + "all": (tcp4, tcp6, udp4, udp6, unix), + "tcp": (tcp4, tcp6), + "tcp4": (tcp4,), + "tcp6": (tcp6,), + "udp": (udp4, udp6), + "udp4": (udp4,), + "udp6": (udp6,), + "unix": (unix,), + "inet": (tcp4, tcp6, udp4, udp6), + "inet4": (tcp4, udp4), + "inet6": (tcp6, udp6), + } + self._procfs_path = None + + def get_proc_inodes(self, pid): + inodes = defaultdict(list) + for fd in os.listdir("%s/%s/fd" % (self._procfs_path, pid)): + try: + inode = readlink("%s/%s/fd/%s" % (self._procfs_path, pid, fd)) + except (FileNotFoundError, ProcessLookupError): + # ENOENT == file which is gone in the meantime; + # os.stat('/proc/%s' % self.pid) will be done later + # to force NSP (if it's the case) + continue + except OSError as err: + if err.errno == errno.EINVAL: + # not a link + continue + if err.errno == errno.ENAMETOOLONG: + # file name too long + debug(err) + continue + raise + else: + if inode.startswith('socket:['): + # the process is using a socket + inode = inode[8:][:-1] + inodes[inode].append((pid, int(fd))) + return inodes + + def get_all_inodes(self): + inodes = {} + for pid in pids(): + try: + inodes.update(self.get_proc_inodes(pid)) + except (FileNotFoundError, ProcessLookupError, PermissionError): + # os.listdir() is gonna raise a lot of access denied + # exceptions in case of unprivileged user; that's fine + # as we'll just end up returning a connection with PID + # and fd set to None anyway. + # Both netstat -an and lsof does the same so it's + # unlikely we can do any better. + # ENOENT just means a PID disappeared on us. + continue + return inodes + + @staticmethod + def decode_address(addr, family): + """Accept an "ip:port" address as displayed in /proc/net/* + and convert it into a human readable form, like: + + "0500000A:0016" -> ("10.0.0.5", 22) + "0000000000000000FFFF00000100007F:9E49" -> ("::ffff:127.0.0.1", 40521) + + The IP address portion is a little or big endian four-byte + hexadecimal number; that is, the least significant byte is listed + first, so we need to reverse the order of the bytes to convert it + to an IP address. + The port is represented as a two-byte hexadecimal number. + + Reference: + http://linuxdevcenter.com/pub/a/linux/2000/11/16/LinuxAdmin.html + """ + ip, port = addr.split(':') + port = int(port, 16) + # this usually refers to a local socket in listen mode with + # no end-points connected + if not port: + return () + if PY3: + ip = ip.encode('ascii') + if family == socket.AF_INET: + # see: https://github.com/giampaolo/psutil/issues/201 + if LITTLE_ENDIAN: + ip = socket.inet_ntop(family, base64.b16decode(ip)[::-1]) + else: + ip = socket.inet_ntop(family, base64.b16decode(ip)) + else: # IPv6 + ip = base64.b16decode(ip) + try: + # see: https://github.com/giampaolo/psutil/issues/201 + if LITTLE_ENDIAN: + ip = socket.inet_ntop( + socket.AF_INET6, + struct.pack('>4I', *struct.unpack('<4I', ip)), + ) + else: + ip = socket.inet_ntop( + socket.AF_INET6, + struct.pack('<4I', *struct.unpack('<4I', ip)), + ) + except ValueError: + # see: https://github.com/giampaolo/psutil/issues/623 + if not supports_ipv6(): + raise _Ipv6UnsupportedError + else: + raise + return _common.addr(ip, port) + + @staticmethod + def process_inet(file, family, type_, inodes, filter_pid=None): + """Parse /proc/net/tcp* and /proc/net/udp* files.""" + if file.endswith('6') and not os.path.exists(file): + # IPv6 not supported + return + with open_text(file) as f: + f.readline() # skip the first line + for lineno, line in enumerate(f, 1): + try: + _, laddr, raddr, status, _, _, _, _, _, inode = ( + line.split()[:10] + ) + except ValueError: + raise RuntimeError( + "error while parsing %s; malformed line %s %r" + % (file, lineno, line) + ) + if inode in inodes: + # # We assume inet sockets are unique, so we error + # # out if there are multiple references to the + # # same inode. We won't do this for UNIX sockets. + # if len(inodes[inode]) > 1 and family != socket.AF_UNIX: + # raise ValueError("ambiguous inode with multiple " + # "PIDs references") + pid, fd = inodes[inode][0] + else: + pid, fd = None, -1 + if filter_pid is not None and filter_pid != pid: + continue + else: + if type_ == socket.SOCK_STREAM: + status = TCP_STATUSES[status] + else: + status = _common.CONN_NONE + try: + laddr = Connections.decode_address(laddr, family) + raddr = Connections.decode_address(raddr, family) + except _Ipv6UnsupportedError: + continue + yield (fd, family, type_, laddr, raddr, status, pid) + + @staticmethod + def process_unix(file, family, inodes, filter_pid=None): + """Parse /proc/net/unix files.""" + with open_text(file) as f: + f.readline() # skip the first line + for line in f: + tokens = line.split() + try: + _, _, _, _, type_, _, inode = tokens[0:7] + except ValueError: + if ' ' not in line: + # see: https://github.com/giampaolo/psutil/issues/766 + continue + raise RuntimeError( + "error while parsing %s; malformed line %r" + % (file, line) + ) + if inode in inodes: # noqa + # With UNIX sockets we can have a single inode + # referencing many file descriptors. + pairs = inodes[inode] + else: + pairs = [(None, -1)] + for pid, fd in pairs: + if filter_pid is not None and filter_pid != pid: + continue + else: + path = tokens[-1] if len(tokens) == 8 else '' + type_ = _common.socktype_to_enum(int(type_)) + # XXX: determining the remote endpoint of a + # UNIX socket on Linux is not possible, see: + # https://serverfault.com/questions/252723/ + raddr = "" + status = _common.CONN_NONE + yield (fd, family, type_, path, raddr, status, pid) + + def retrieve(self, kind, pid=None): + if kind not in self.tmap: + raise ValueError( + "invalid %r kind argument; choose between %s" + % (kind, ', '.join([repr(x) for x in self.tmap])) + ) + self._procfs_path = get_procfs_path() + if pid is not None: + inodes = self.get_proc_inodes(pid) + if not inodes: + # no connections for this process + return [] + else: + inodes = self.get_all_inodes() + ret = set() + for proto_name, family, type_ in self.tmap[kind]: + path = "%s/net/%s" % (self._procfs_path, proto_name) + if family in (socket.AF_INET, socket.AF_INET6): + ls = self.process_inet( + path, family, type_, inodes, filter_pid=pid + ) + else: + ls = self.process_unix(path, family, inodes, filter_pid=pid) + for fd, family, type_, laddr, raddr, status, bound_pid in ls: + if pid: + conn = _common.pconn( + fd, family, type_, laddr, raddr, status + ) + else: + conn = _common.sconn( + fd, family, type_, laddr, raddr, status, bound_pid + ) + ret.add(conn) + return list(ret) + + +_connections = Connections() + + +def net_connections(kind='inet'): + """Return system-wide open connections.""" + return _connections.retrieve(kind) + + +def net_io_counters(): + """Return network I/O statistics for every network interface + installed on the system as a dict of raw tuples. + """ + with open_text("%s/net/dev" % get_procfs_path()) as f: + lines = f.readlines() + retdict = {} + for line in lines[2:]: + colon = line.rfind(':') + assert colon > 0, repr(line) + name = line[:colon].strip() + fields = line[colon + 1 :].strip().split() + + # in + ( + bytes_recv, + packets_recv, + errin, + dropin, + fifoin, # unused + framein, # unused + compressedin, # unused + multicastin, # unused + # out + bytes_sent, + packets_sent, + errout, + dropout, + fifoout, # unused + collisionsout, # unused + carrierout, # unused + compressedout, + ) = map(int, fields) + + retdict[name] = ( + bytes_sent, + bytes_recv, + packets_sent, + packets_recv, + errin, + errout, + dropin, + dropout, + ) + return retdict + + +def net_if_stats(): + """Get NIC stats (isup, duplex, speed, mtu).""" + duplex_map = { + cext.DUPLEX_FULL: NIC_DUPLEX_FULL, + cext.DUPLEX_HALF: NIC_DUPLEX_HALF, + cext.DUPLEX_UNKNOWN: NIC_DUPLEX_UNKNOWN, + } + names = net_io_counters().keys() + ret = {} + for name in names: + try: + mtu = cext_posix.net_if_mtu(name) + flags = cext_posix.net_if_flags(name) + duplex, speed = cext.net_if_duplex_speed(name) + except OSError as err: + # https://github.com/giampaolo/psutil/issues/1279 + if err.errno != errno.ENODEV: + raise + else: + debug(err) + else: + output_flags = ','.join(flags) + isup = 'running' in flags + ret[name] = _common.snicstats( + isup, duplex_map[duplex], speed, mtu, output_flags + ) + return ret + + +# ===================================================================== +# --- disks +# ===================================================================== + + +disk_usage = _psposix.disk_usage + + +def disk_io_counters(perdisk=False): + """Return disk I/O statistics for every disk installed on the + system as a dict of raw tuples. + """ + + def read_procfs(): + # OK, this is a bit confusing. The format of /proc/diskstats can + # have 3 variations. + # On Linux 2.4 each line has always 15 fields, e.g.: + # "3 0 8 hda 8 8 8 8 8 8 8 8 8 8 8" + # On Linux 2.6+ each line *usually* has 14 fields, and the disk + # name is in another position, like this: + # "3 0 hda 8 8 8 8 8 8 8 8 8 8 8" + # ...unless (Linux 2.6) the line refers to a partition instead + # of a disk, in which case the line has less fields (7): + # "3 1 hda1 8 8 8 8" + # 4.18+ has 4 fields added: + # "3 0 hda 8 8 8 8 8 8 8 8 8 8 8 0 0 0 0" + # 5.5 has 2 more fields. + # See: + # https://www.kernel.org/doc/Documentation/iostats.txt + # https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats + with open_text("%s/diskstats" % get_procfs_path()) as f: + lines = f.readlines() + for line in lines: + fields = line.split() + flen = len(fields) + # fmt: off + if flen == 15: + # Linux 2.4 + name = fields[3] + reads = int(fields[2]) + (reads_merged, rbytes, rtime, writes, writes_merged, + wbytes, wtime, _, busy_time, _) = map(int, fields[4:14]) + elif flen == 14 or flen >= 18: + # Linux 2.6+, line referring to a disk + name = fields[2] + (reads, reads_merged, rbytes, rtime, writes, writes_merged, + wbytes, wtime, _, busy_time, _) = map(int, fields[3:14]) + elif flen == 7: + # Linux 2.6+, line referring to a partition + name = fields[2] + reads, rbytes, writes, wbytes = map(int, fields[3:]) + rtime = wtime = reads_merged = writes_merged = busy_time = 0 + else: + raise ValueError("not sure how to interpret line %r" % line) + yield (name, reads, writes, rbytes, wbytes, rtime, wtime, + reads_merged, writes_merged, busy_time) + # fmt: on + + def read_sysfs(): + for block in os.listdir('/sys/block'): + for root, _, files in os.walk(os.path.join('/sys/block', block)): + if 'stat' not in files: + continue + with open_text(os.path.join(root, 'stat')) as f: + fields = f.read().strip().split() + name = os.path.basename(root) + # fmt: off + (reads, reads_merged, rbytes, rtime, writes, writes_merged, + wbytes, wtime, _, busy_time) = map(int, fields[:10]) + yield (name, reads, writes, rbytes, wbytes, rtime, + wtime, reads_merged, writes_merged, busy_time) + # fmt: on + + if os.path.exists('%s/diskstats' % get_procfs_path()): + gen = read_procfs() + elif os.path.exists('/sys/block'): + gen = read_sysfs() + else: + raise NotImplementedError( + "%s/diskstats nor /sys/block filesystem are available on this " + "system" + % get_procfs_path() + ) + + retdict = {} + for entry in gen: + # fmt: off + (name, reads, writes, rbytes, wbytes, rtime, wtime, reads_merged, + writes_merged, busy_time) = entry + if not perdisk and not is_storage_device(name): + # perdisk=False means we want to calculate totals so we skip + # partitions (e.g. 'sda1', 'nvme0n1p1') and only include + # base disk devices (e.g. 'sda', 'nvme0n1'). Base disks + # include a total of all their partitions + some extra size + # of their own: + # $ cat /proc/diskstats + # 259 0 sda 10485760 ... + # 259 1 sda1 5186039 ... + # 259 1 sda2 5082039 ... + # See: + # https://github.com/giampaolo/psutil/pull/1313 + continue + + rbytes *= DISK_SECTOR_SIZE + wbytes *= DISK_SECTOR_SIZE + retdict[name] = (reads, writes, rbytes, wbytes, rtime, wtime, + reads_merged, writes_merged, busy_time) + # fmt: on + + return retdict + + +class RootFsDeviceFinder: + """disk_partitions() may return partitions with device == "/dev/root" + or "rootfs". This container class uses different strategies to try to + obtain the real device path. Resources: + https://bootlin.com/blog/find-root-device/ + https://www.systutorials.com/how-to-find-the-disk-where-root-is-on-in-bash-on-linux/. + """ + + __slots__ = ['major', 'minor'] + + def __init__(self): + dev = os.stat("/").st_dev + self.major = os.major(dev) + self.minor = os.minor(dev) + + def ask_proc_partitions(self): + with open_text("%s/partitions" % get_procfs_path()) as f: + for line in f.readlines()[2:]: + fields = line.split() + if len(fields) < 4: # just for extra safety + continue + major = int(fields[0]) if fields[0].isdigit() else None + minor = int(fields[1]) if fields[1].isdigit() else None + name = fields[3] + if major == self.major and minor == self.minor: + if name: # just for extra safety + return "/dev/%s" % name + + def ask_sys_dev_block(self): + path = "/sys/dev/block/%s:%s/uevent" % (self.major, self.minor) + with open_text(path) as f: + for line in f: + if line.startswith("DEVNAME="): + name = line.strip().rpartition("DEVNAME=")[2] + if name: # just for extra safety + return "/dev/%s" % name + + def ask_sys_class_block(self): + needle = "%s:%s" % (self.major, self.minor) + files = glob.iglob("/sys/class/block/*/dev") + for file in files: + try: + f = open_text(file) + except FileNotFoundError: # race condition + continue + else: + with f: + data = f.read().strip() + if data == needle: + name = os.path.basename(os.path.dirname(file)) + return "/dev/%s" % name + + def find(self): + path = None + if path is None: + try: + path = self.ask_proc_partitions() + except (IOError, OSError) as err: + debug(err) + if path is None: + try: + path = self.ask_sys_dev_block() + except (IOError, OSError) as err: + debug(err) + if path is None: + try: + path = self.ask_sys_class_block() + except (IOError, OSError) as err: + debug(err) + # We use exists() because the "/dev/*" part of the path is hard + # coded, so we want to be sure. + if path is not None and os.path.exists(path): + return path + + +def disk_partitions(all=False): + """Return mounted disk partitions as a list of namedtuples.""" + fstypes = set() + procfs_path = get_procfs_path() + if not all: + with open_text("%s/filesystems" % procfs_path) as f: + for line in f: + line = line.strip() + if not line.startswith("nodev"): + fstypes.add(line.strip()) + else: + # ignore all lines starting with "nodev" except "nodev zfs" + fstype = line.split("\t")[1] + if fstype == "zfs": + fstypes.add("zfs") + + # See: https://github.com/giampaolo/psutil/issues/1307 + if procfs_path == "/proc" and os.path.isfile('/etc/mtab'): + mounts_path = os.path.realpath("/etc/mtab") + else: + mounts_path = os.path.realpath("%s/self/mounts" % procfs_path) + + retlist = [] + partitions = cext.disk_partitions(mounts_path) + for partition in partitions: + device, mountpoint, fstype, opts = partition + if device == 'none': + device = '' + if device in ("/dev/root", "rootfs"): + device = RootFsDeviceFinder().find() or device + if not all: + if device == '' or fstype not in fstypes: + continue + maxfile = maxpath = None # set later + ntuple = _common.sdiskpart( + device, mountpoint, fstype, opts, maxfile, maxpath + ) + retlist.append(ntuple) + + return retlist + + +# ===================================================================== +# --- sensors +# ===================================================================== + + +def sensors_temperatures(): + """Return hardware (CPU and others) temperatures as a dict + including hardware name, label, current, max and critical + temperatures. + + Implementation notes: + - /sys/class/hwmon looks like the most recent interface to + retrieve this info, and this implementation relies on it + only (old distros will probably use something else) + - lm-sensors on Ubuntu 16.04 relies on /sys/class/hwmon + - /sys/class/thermal/thermal_zone* is another one but it's more + difficult to parse + """ + ret = collections.defaultdict(list) + basenames = glob.glob('/sys/class/hwmon/hwmon*/temp*_*') + # CentOS has an intermediate /device directory: + # https://github.com/giampaolo/psutil/issues/971 + # https://github.com/nicolargo/glances/issues/1060 + basenames.extend(glob.glob('/sys/class/hwmon/hwmon*/device/temp*_*')) + basenames = sorted(set([x.split('_')[0] for x in basenames])) + + # Only add the coretemp hwmon entries if they're not already in + # /sys/class/hwmon/ + # https://github.com/giampaolo/psutil/issues/1708 + # https://github.com/giampaolo/psutil/pull/1648 + basenames2 = glob.glob( + '/sys/devices/platform/coretemp.*/hwmon/hwmon*/temp*_*' + ) + repl = re.compile('/sys/devices/platform/coretemp.*/hwmon/') + for name in basenames2: + altname = repl.sub('/sys/class/hwmon/', name) + if altname not in basenames: + basenames.append(name) + + for base in basenames: + try: + path = base + '_input' + current = float(bcat(path)) / 1000.0 + path = os.path.join(os.path.dirname(base), 'name') + unit_name = cat(path).strip() + except (IOError, OSError, ValueError): + # A lot of things can go wrong here, so let's just skip the + # whole entry. Sure thing is Linux's /sys/class/hwmon really + # is a stinky broken mess. + # https://github.com/giampaolo/psutil/issues/1009 + # https://github.com/giampaolo/psutil/issues/1101 + # https://github.com/giampaolo/psutil/issues/1129 + # https://github.com/giampaolo/psutil/issues/1245 + # https://github.com/giampaolo/psutil/issues/1323 + continue + + high = bcat(base + '_max', fallback=None) + critical = bcat(base + '_crit', fallback=None) + label = cat(base + '_label', fallback='').strip() + + if high is not None: + try: + high = float(high) / 1000.0 + except ValueError: + high = None + if critical is not None: + try: + critical = float(critical) / 1000.0 + except ValueError: + critical = None + + ret[unit_name].append((label, current, high, critical)) + + # Indication that no sensors were detected in /sys/class/hwmon/ + if not basenames: + basenames = glob.glob('/sys/class/thermal/thermal_zone*') + basenames = sorted(set(basenames)) + + for base in basenames: + try: + path = os.path.join(base, 'temp') + current = float(bcat(path)) / 1000.0 + path = os.path.join(base, 'type') + unit_name = cat(path).strip() + except (IOError, OSError, ValueError) as err: + debug(err) + continue + + trip_paths = glob.glob(base + '/trip_point*') + trip_points = set([ + '_'.join(os.path.basename(p).split('_')[0:3]) + for p in trip_paths + ]) + critical = None + high = None + for trip_point in trip_points: + path = os.path.join(base, trip_point + "_type") + trip_type = cat(path, fallback='').strip() + if trip_type == 'critical': + critical = bcat( + os.path.join(base, trip_point + "_temp"), fallback=None + ) + elif trip_type == 'high': + high = bcat( + os.path.join(base, trip_point + "_temp"), fallback=None + ) + + if high is not None: + try: + high = float(high) / 1000.0 + except ValueError: + high = None + if critical is not None: + try: + critical = float(critical) / 1000.0 + except ValueError: + critical = None + + ret[unit_name].append(('', current, high, critical)) + + return dict(ret) + + +def sensors_fans(): + """Return hardware fans info (for CPU and other peripherals) as a + dict including hardware label and current speed. + + Implementation notes: + - /sys/class/hwmon looks like the most recent interface to + retrieve this info, and this implementation relies on it + only (old distros will probably use something else) + - lm-sensors on Ubuntu 16.04 relies on /sys/class/hwmon + """ + ret = collections.defaultdict(list) + basenames = glob.glob('/sys/class/hwmon/hwmon*/fan*_*') + if not basenames: + # CentOS has an intermediate /device directory: + # https://github.com/giampaolo/psutil/issues/971 + basenames = glob.glob('/sys/class/hwmon/hwmon*/device/fan*_*') + + basenames = sorted(set([x.split('_')[0] for x in basenames])) + for base in basenames: + try: + current = int(bcat(base + '_input')) + except (IOError, OSError) as err: + debug(err) + continue + unit_name = cat(os.path.join(os.path.dirname(base), 'name')).strip() + label = cat(base + '_label', fallback='').strip() + ret[unit_name].append(_common.sfan(label, current)) + + return dict(ret) + + +def sensors_battery(): + """Return battery information. + Implementation note: it appears /sys/class/power_supply/BAT0/ + directory structure may vary and provide files with the same + meaning but under different names, see: + https://github.com/giampaolo/psutil/issues/966. + """ + null = object() + + def multi_bcat(*paths): + """Attempt to read the content of multiple files which may + not exist. If none of them exist return None. + """ + for path in paths: + ret = bcat(path, fallback=null) + if ret != null: + try: + return int(ret) + except ValueError: + return ret.strip() + return None + + bats = [ + x + for x in os.listdir(POWER_SUPPLY_PATH) + if x.startswith('BAT') or 'battery' in x.lower() + ] + if not bats: + return None + # Get the first available battery. Usually this is "BAT0", except + # some rare exceptions: + # https://github.com/giampaolo/psutil/issues/1238 + root = os.path.join(POWER_SUPPLY_PATH, sorted(bats)[0]) + + # Base metrics. + energy_now = multi_bcat(root + "/energy_now", root + "/charge_now") + power_now = multi_bcat(root + "/power_now", root + "/current_now") + energy_full = multi_bcat(root + "/energy_full", root + "/charge_full") + time_to_empty = multi_bcat(root + "/time_to_empty_now") + + # Percent. If we have energy_full the percentage will be more + # accurate compared to reading /capacity file (float vs. int). + if energy_full is not None and energy_now is not None: + try: + percent = 100.0 * energy_now / energy_full + except ZeroDivisionError: + percent = 0.0 + else: + percent = int(cat(root + "/capacity", fallback=-1)) + if percent == -1: + return None + + # Is AC power cable plugged in? + # Note: AC0 is not always available and sometimes (e.g. CentOS7) + # it's called "AC". + power_plugged = None + online = multi_bcat( + os.path.join(POWER_SUPPLY_PATH, "AC0/online"), + os.path.join(POWER_SUPPLY_PATH, "AC/online"), + ) + if online is not None: + power_plugged = online == 1 + else: + status = cat(root + "/status", fallback="").strip().lower() + if status == "discharging": + power_plugged = False + elif status in ("charging", "full"): + power_plugged = True + + # Seconds left. + # Note to self: we may also calculate the charging ETA as per: + # https://github.com/thialfihar/dotfiles/blob/ + # 013937745fd9050c30146290e8f963d65c0179e6/bin/battery.py#L55 + if power_plugged: + secsleft = _common.POWER_TIME_UNLIMITED + elif energy_now is not None and power_now is not None: + try: + secsleft = int(energy_now / power_now * 3600) + except ZeroDivisionError: + secsleft = _common.POWER_TIME_UNKNOWN + elif time_to_empty is not None: + secsleft = int(time_to_empty * 60) + if secsleft < 0: + secsleft = _common.POWER_TIME_UNKNOWN + else: + secsleft = _common.POWER_TIME_UNKNOWN + + return _common.sbattery(percent, secsleft, power_plugged) + + +# ===================================================================== +# --- other system functions +# ===================================================================== + + +def users(): + """Return currently connected users as a list of namedtuples.""" + retlist = [] + rawlist = cext.users() + for item in rawlist: + user, tty, hostname, tstamp, pid = item + nt = _common.suser(user, tty or None, hostname, tstamp, pid) + retlist.append(nt) + return retlist + + +def boot_time(): + """Return the system boot time expressed in seconds since the epoch.""" + global BOOT_TIME + path = '%s/stat' % get_procfs_path() + with open_binary(path) as f: + for line in f: + if line.startswith(b'btime'): + ret = float(line.strip().split()[1]) + BOOT_TIME = ret + return ret + raise RuntimeError("line 'btime' not found in %s" % path) + + +# ===================================================================== +# --- processes +# ===================================================================== + + +def pids(): + """Returns a list of PIDs currently running on the system.""" + return [int(x) for x in os.listdir(b(get_procfs_path())) if x.isdigit()] + + +def pid_exists(pid): + """Check for the existence of a unix PID. Linux TIDs are not + supported (always return False). + """ + if not _psposix.pid_exists(pid): + return False + else: + # Linux's apparently does not distinguish between PIDs and TIDs + # (thread IDs). + # listdir("/proc") won't show any TID (only PIDs) but + # os.stat("/proc/{tid}") will succeed if {tid} exists. + # os.kill() can also be passed a TID. This is quite confusing. + # In here we want to enforce this distinction and support PIDs + # only, see: + # https://github.com/giampaolo/psutil/issues/687 + try: + # Note: already checked that this is faster than using a + # regular expr. Also (a lot) faster than doing + # 'return pid in pids()' + path = "%s/%s/status" % (get_procfs_path(), pid) + with open_binary(path) as f: + for line in f: + if line.startswith(b"Tgid:"): + tgid = int(line.split()[1]) + # If tgid and pid are the same then we're + # dealing with a process PID. + return tgid == pid + raise ValueError("'Tgid' line not found in %s" % path) + except (EnvironmentError, ValueError): + return pid in pids() + + +def ppid_map(): + """Obtain a {pid: ppid, ...} dict for all running processes in + one shot. Used to speed up Process.children(). + """ + ret = {} + procfs_path = get_procfs_path() + for pid in pids(): + try: + with open_binary("%s/%s/stat" % (procfs_path, pid)) as f: + data = f.read() + except (FileNotFoundError, ProcessLookupError): + # Note: we should be able to access /stat for all processes + # aka it's unlikely we'll bump into EPERM, which is good. + pass + else: + rpar = data.rfind(b')') + dset = data[rpar + 2 :].split() + ppid = int(dset[1]) + ret[pid] = ppid + return ret + + +def wrap_exceptions(fun): + """Decorator which translates bare OSError and IOError exceptions + into NoSuchProcess and AccessDenied. + """ + + @functools.wraps(fun) + def wrapper(self, *args, **kwargs): + try: + return fun(self, *args, **kwargs) + except PermissionError: + raise AccessDenied(self.pid, self._name) + except ProcessLookupError: + self._raise_if_zombie() + raise NoSuchProcess(self.pid, self._name) + except FileNotFoundError: + self._raise_if_zombie() + if not os.path.exists("%s/%s" % (self._procfs_path, self.pid)): + raise NoSuchProcess(self.pid, self._name) + raise + + return wrapper + + +class Process: + """Linux process implementation.""" + + __slots__ = ["pid", "_name", "_ppid", "_procfs_path", "_cache"] + + def __init__(self, pid): + self.pid = pid + self._name = None + self._ppid = None + self._procfs_path = get_procfs_path() + + def _is_zombie(self): + # Note: most of the times Linux is able to return info about the + # process even if it's a zombie, and /proc/{pid} will exist. + # There are some exceptions though, like exe(), cmdline() and + # memory_maps(). In these cases /proc/{pid}/{file} exists but + # it's empty. Instead of returning a "null" value we'll raise an + # exception. + try: + data = bcat("%s/%s/stat" % (self._procfs_path, self.pid)) + except (IOError, OSError): + return False + else: + rpar = data.rfind(b')') + status = data[rpar + 2 : rpar + 3] + return status == b"Z" + + def _raise_if_zombie(self): + if self._is_zombie(): + raise ZombieProcess(self.pid, self._name, self._ppid) + + def _raise_if_not_alive(self): + """Raise NSP if the process disappeared on us.""" + # For those C function who do not raise NSP, possibly returning + # incorrect or incomplete result. + os.stat('%s/%s' % (self._procfs_path, self.pid)) + + @wrap_exceptions + @memoize_when_activated + def _parse_stat_file(self): + """Parse /proc/{pid}/stat file and return a dict with various + process info. + Using "man proc" as a reference: where "man proc" refers to + position N always subtract 3 (e.g ppid position 4 in + 'man proc' == position 1 in here). + The return value is cached in case oneshot() ctx manager is + in use. + """ + data = bcat("%s/%s/stat" % (self._procfs_path, self.pid)) + # Process name is between parentheses. It can contain spaces and + # other parentheses. This is taken into account by looking for + # the first occurrence of "(" and the last occurrence of ")". + rpar = data.rfind(b')') + name = data[data.find(b'(') + 1 : rpar] + fields = data[rpar + 2 :].split() + + ret = {} + ret['name'] = name + ret['status'] = fields[0] + ret['ppid'] = fields[1] + ret['ttynr'] = fields[4] + ret['utime'] = fields[11] + ret['stime'] = fields[12] + ret['children_utime'] = fields[13] + ret['children_stime'] = fields[14] + ret['create_time'] = fields[19] + ret['cpu_num'] = fields[36] + ret['blkio_ticks'] = fields[39] # aka 'delayacct_blkio_ticks' + + return ret + + @wrap_exceptions + @memoize_when_activated + def _read_status_file(self): + """Read /proc/{pid}/stat file and return its content. + The return value is cached in case oneshot() ctx manager is + in use. + """ + with open_binary("%s/%s/status" % (self._procfs_path, self.pid)) as f: + return f.read() + + @wrap_exceptions + @memoize_when_activated + def _read_smaps_file(self): + with open_binary("%s/%s/smaps" % (self._procfs_path, self.pid)) as f: + return f.read().strip() + + def oneshot_enter(self): + self._parse_stat_file.cache_activate(self) + self._read_status_file.cache_activate(self) + self._read_smaps_file.cache_activate(self) + + def oneshot_exit(self): + self._parse_stat_file.cache_deactivate(self) + self._read_status_file.cache_deactivate(self) + self._read_smaps_file.cache_deactivate(self) + + @wrap_exceptions + def name(self): + name = self._parse_stat_file()['name'] + if PY3: + name = decode(name) + # XXX - gets changed later and probably needs refactoring + return name + + @wrap_exceptions + def exe(self): + try: + return readlink("%s/%s/exe" % (self._procfs_path, self.pid)) + except (FileNotFoundError, ProcessLookupError): + self._raise_if_zombie() + # no such file error; might be raised also if the + # path actually exists for system processes with + # low pids (about 0-20) + if os.path.lexists("%s/%s" % (self._procfs_path, self.pid)): + return "" + raise + + @wrap_exceptions + def cmdline(self): + with open_text("%s/%s/cmdline" % (self._procfs_path, self.pid)) as f: + data = f.read() + if not data: + # may happen in case of zombie process + self._raise_if_zombie() + return [] + # 'man proc' states that args are separated by null bytes '\0' + # and last char is supposed to be a null byte. Nevertheless + # some processes may change their cmdline after being started + # (via setproctitle() or similar), they are usually not + # compliant with this rule and use spaces instead. Google + # Chrome process is an example. See: + # https://github.com/giampaolo/psutil/issues/1179 + sep = '\x00' if data.endswith('\x00') else ' ' + if data.endswith(sep): + data = data[:-1] + cmdline = data.split(sep) + # Sometimes last char is a null byte '\0' but the args are + # separated by spaces, see: https://github.com/giampaolo/psutil/ + # issues/1179#issuecomment-552984549 + if sep == '\x00' and len(cmdline) == 1 and ' ' in data: + cmdline = data.split(' ') + return cmdline + + @wrap_exceptions + def environ(self): + with open_text("%s/%s/environ" % (self._procfs_path, self.pid)) as f: + data = f.read() + return parse_environ_block(data) + + @wrap_exceptions + def terminal(self): + tty_nr = int(self._parse_stat_file()['ttynr']) + tmap = _psposix.get_terminal_map() + try: + return tmap[tty_nr] + except KeyError: + return None + + # May not be available on old kernels. + if os.path.exists('/proc/%s/io' % os.getpid()): + + @wrap_exceptions + def io_counters(self): + fname = "%s/%s/io" % (self._procfs_path, self.pid) + fields = {} + with open_binary(fname) as f: + for line in f: + # https://github.com/giampaolo/psutil/issues/1004 + line = line.strip() + if line: + try: + name, value = line.split(b': ') + except ValueError: + # https://github.com/giampaolo/psutil/issues/1004 + continue + else: + fields[name] = int(value) + if not fields: + raise RuntimeError("%s file was empty" % fname) + try: + return pio( + fields[b'syscr'], # read syscalls + fields[b'syscw'], # write syscalls + fields[b'read_bytes'], # read bytes + fields[b'write_bytes'], # write bytes + fields[b'rchar'], # read chars + fields[b'wchar'], # write chars + ) + except KeyError as err: + raise ValueError( + "%r field was not found in %s; found fields are %r" + % (err.args[0], fname, fields) + ) + + @wrap_exceptions + def cpu_times(self): + values = self._parse_stat_file() + utime = float(values['utime']) / CLOCK_TICKS + stime = float(values['stime']) / CLOCK_TICKS + children_utime = float(values['children_utime']) / CLOCK_TICKS + children_stime = float(values['children_stime']) / CLOCK_TICKS + iowait = float(values['blkio_ticks']) / CLOCK_TICKS + return pcputimes(utime, stime, children_utime, children_stime, iowait) + + @wrap_exceptions + def cpu_num(self): + """What CPU the process is on.""" + return int(self._parse_stat_file()['cpu_num']) + + @wrap_exceptions + def wait(self, timeout=None): + return _psposix.wait_pid(self.pid, timeout, self._name) + + @wrap_exceptions + def create_time(self): + ctime = float(self._parse_stat_file()['create_time']) + # According to documentation, starttime is in field 21 and the + # unit is jiffies (clock ticks). + # We first divide it for clock ticks and then add uptime returning + # seconds since the epoch. + # Also use cached value if available. + bt = BOOT_TIME or boot_time() + return (ctime / CLOCK_TICKS) + bt + + @wrap_exceptions + def memory_info(self): + # ============================================================ + # | FIELD | DESCRIPTION | AKA | TOP | + # ============================================================ + # | rss | resident set size | | RES | + # | vms | total program size | size | VIRT | + # | shared | shared pages (from shared mappings) | | SHR | + # | text | text ('code') | trs | CODE | + # | lib | library (unused in Linux 2.6) | lrs | | + # | data | data + stack | drs | DATA | + # | dirty | dirty pages (unused in Linux 2.6) | dt | | + # ============================================================ + with open_binary("%s/%s/statm" % (self._procfs_path, self.pid)) as f: + vms, rss, shared, text, lib, data, dirty = ( + int(x) * PAGESIZE for x in f.readline().split()[:7] + ) + return pmem(rss, vms, shared, text, lib, data, dirty) + + if HAS_PROC_SMAPS_ROLLUP or HAS_PROC_SMAPS: + + def _parse_smaps_rollup(self): + # /proc/pid/smaps_rollup was added to Linux in 2017. Faster + # than /proc/pid/smaps. It reports higher PSS than */smaps + # (from 1k up to 200k higher; tested against all processes). + # IMPORTANT: /proc/pid/smaps_rollup is weird, because it + # raises ESRCH / ENOENT for many PIDs, even if they're alive + # (also as root). In that case we'll use /proc/pid/smaps as + # fallback, which is slower but has a +50% success rate + # compared to /proc/pid/smaps_rollup. + uss = pss = swap = 0 + with open_binary( + "{}/{}/smaps_rollup".format(self._procfs_path, self.pid) + ) as f: + for line in f: + if line.startswith(b"Private_"): + # Private_Clean, Private_Dirty, Private_Hugetlb + uss += int(line.split()[1]) * 1024 + elif line.startswith(b"Pss:"): + pss = int(line.split()[1]) * 1024 + elif line.startswith(b"Swap:"): + swap = int(line.split()[1]) * 1024 + return (uss, pss, swap) + + @wrap_exceptions + def _parse_smaps( + self, + # Gets Private_Clean, Private_Dirty, Private_Hugetlb. + _private_re=re.compile(br"\nPrivate.*:\s+(\d+)"), + _pss_re=re.compile(br"\nPss\:\s+(\d+)"), + _swap_re=re.compile(br"\nSwap\:\s+(\d+)"), + ): + # /proc/pid/smaps does not exist on kernels < 2.6.14 or if + # CONFIG_MMU kernel configuration option is not enabled. + + # Note: using 3 regexes is faster than reading the file + # line by line. + # XXX: on Python 3 the 2 regexes are 30% slower than on + # Python 2 though. Figure out why. + # + # You might be tempted to calculate USS by subtracting + # the "shared" value from the "resident" value in + # /proc//statm. But at least on Linux, statm's "shared" + # value actually counts pages backed by files, which has + # little to do with whether the pages are actually shared. + # /proc/self/smaps on the other hand appears to give us the + # correct information. + smaps_data = self._read_smaps_file() + # Note: smaps file can be empty for certain processes. + # The code below will not crash though and will result to 0. + uss = sum(map(int, _private_re.findall(smaps_data))) * 1024 + pss = sum(map(int, _pss_re.findall(smaps_data))) * 1024 + swap = sum(map(int, _swap_re.findall(smaps_data))) * 1024 + return (uss, pss, swap) + + @wrap_exceptions + def memory_full_info(self): + if HAS_PROC_SMAPS_ROLLUP: # faster + try: + uss, pss, swap = self._parse_smaps_rollup() + except (ProcessLookupError, FileNotFoundError): + uss, pss, swap = self._parse_smaps() + else: + uss, pss, swap = self._parse_smaps() + basic_mem = self.memory_info() + return pfullmem(*basic_mem + (uss, pss, swap)) + + else: + memory_full_info = memory_info + + if HAS_PROC_SMAPS: + + @wrap_exceptions + def memory_maps(self): + """Return process's mapped memory regions as a list of named + tuples. Fields are explained in 'man proc'; here is an updated + (Apr 2012) version: http://goo.gl/fmebo. + + /proc/{PID}/smaps does not exist on kernels < 2.6.14 or if + CONFIG_MMU kernel configuration option is not enabled. + """ + + def get_blocks(lines, current_block): + data = {} + for line in lines: + fields = line.split(None, 5) + if not fields[0].endswith(b':'): + # new block section + yield (current_block.pop(), data) + current_block.append(line) + else: + try: + data[fields[0]] = int(fields[1]) * 1024 + except ValueError: + if fields[0].startswith(b'VmFlags:'): + # see issue #369 + continue + else: + raise ValueError( + "don't know how to interpret line %r" + % line + ) + yield (current_block.pop(), data) + + data = self._read_smaps_file() + # Note: smaps file can be empty for certain processes or for + # zombies. + if not data: + self._raise_if_zombie() + return [] + lines = data.split(b'\n') + ls = [] + first_line = lines.pop(0) + current_block = [first_line] + for header, data in get_blocks(lines, current_block): + hfields = header.split(None, 5) + try: + addr, perms, offset, dev, inode, path = hfields + except ValueError: + addr, perms, offset, dev, inode, path = hfields + [''] + if not path: + path = '[anon]' + else: + if PY3: + path = decode(path) + path = path.strip() + if path.endswith(' (deleted)') and not path_exists_strict( + path + ): + path = path[:-10] + ls.append(( + decode(addr), + decode(perms), + path, + data.get(b'Rss:', 0), + data.get(b'Size:', 0), + data.get(b'Pss:', 0), + data.get(b'Shared_Clean:', 0), + data.get(b'Shared_Dirty:', 0), + data.get(b'Private_Clean:', 0), + data.get(b'Private_Dirty:', 0), + data.get(b'Referenced:', 0), + data.get(b'Anonymous:', 0), + data.get(b'Swap:', 0), + )) + return ls + + @wrap_exceptions + def cwd(self): + return readlink("%s/%s/cwd" % (self._procfs_path, self.pid)) + + @wrap_exceptions + def num_ctx_switches( + self, _ctxsw_re=re.compile(br'ctxt_switches:\t(\d+)') + ): + data = self._read_status_file() + ctxsw = _ctxsw_re.findall(data) + if not ctxsw: + raise NotImplementedError( + "'voluntary_ctxt_switches' and 'nonvoluntary_ctxt_switches'" + "lines were not found in %s/%s/status; the kernel is " + "probably older than 2.6.23" % (self._procfs_path, self.pid) + ) + else: + return _common.pctxsw(int(ctxsw[0]), int(ctxsw[1])) + + @wrap_exceptions + def num_threads(self, _num_threads_re=re.compile(br'Threads:\t(\d+)')): + # Note: on Python 3 using a re is faster than iterating over file + # line by line. On Python 2 is the exact opposite, and iterating + # over a file on Python 3 is slower than on Python 2. + data = self._read_status_file() + return int(_num_threads_re.findall(data)[0]) + + @wrap_exceptions + def threads(self): + thread_ids = os.listdir("%s/%s/task" % (self._procfs_path, self.pid)) + thread_ids.sort() + retlist = [] + hit_enoent = False + for thread_id in thread_ids: + fname = "%s/%s/task/%s/stat" % ( + self._procfs_path, + self.pid, + thread_id, + ) + try: + with open_binary(fname) as f: + st = f.read().strip() + except (FileNotFoundError, ProcessLookupError): + # no such file or directory or no such process; + # it means thread disappeared on us + hit_enoent = True + continue + # ignore the first two values ("pid (exe)") + st = st[st.find(b')') + 2 :] + values = st.split(b' ') + utime = float(values[11]) / CLOCK_TICKS + stime = float(values[12]) / CLOCK_TICKS + ntuple = _common.pthread(int(thread_id), utime, stime) + retlist.append(ntuple) + if hit_enoent: + self._raise_if_not_alive() + return retlist + + @wrap_exceptions + def nice_get(self): + # with open_text('%s/%s/stat' % (self._procfs_path, self.pid)) as f: + # data = f.read() + # return int(data.split()[18]) + + # Use C implementation + return cext_posix.getpriority(self.pid) + + @wrap_exceptions + def nice_set(self, value): + return cext_posix.setpriority(self.pid, value) + + # starting from CentOS 6. + if HAS_CPU_AFFINITY: + + @wrap_exceptions + def cpu_affinity_get(self): + return cext.proc_cpu_affinity_get(self.pid) + + def _get_eligible_cpus( + self, _re=re.compile(br"Cpus_allowed_list:\t(\d+)-(\d+)") + ): + # See: https://github.com/giampaolo/psutil/issues/956 + data = self._read_status_file() + match = _re.findall(data) + if match: + return list(range(int(match[0][0]), int(match[0][1]) + 1)) + else: + return list(range(len(per_cpu_times()))) + + @wrap_exceptions + def cpu_affinity_set(self, cpus): + try: + cext.proc_cpu_affinity_set(self.pid, cpus) + except (OSError, ValueError) as err: + if isinstance(err, ValueError) or err.errno == errno.EINVAL: + eligible_cpus = self._get_eligible_cpus() + all_cpus = tuple(range(len(per_cpu_times()))) + for cpu in cpus: + if cpu not in all_cpus: + raise ValueError( + "invalid CPU number %r; choose between %s" + % (cpu, eligible_cpus) + ) + if cpu not in eligible_cpus: + raise ValueError( + "CPU number %r is not eligible; choose " + "between %s" % (cpu, eligible_cpus) + ) + raise + + # only starting from kernel 2.6.13 + if HAS_PROC_IO_PRIORITY: + + @wrap_exceptions + def ionice_get(self): + ioclass, value = cext.proc_ioprio_get(self.pid) + if enum is not None: + ioclass = IOPriority(ioclass) + return _common.pionice(ioclass, value) + + @wrap_exceptions + def ionice_set(self, ioclass, value): + if value is None: + value = 0 + if value and ioclass in (IOPRIO_CLASS_IDLE, IOPRIO_CLASS_NONE): + raise ValueError("%r ioclass accepts no value" % ioclass) + if value < 0 or value > 7: + msg = "value not in 0-7 range" + raise ValueError(msg) + return cext.proc_ioprio_set(self.pid, ioclass, value) + + if prlimit is not None: + + @wrap_exceptions + def rlimit(self, resource_, limits=None): + # If pid is 0 prlimit() applies to the calling process and + # we don't want that. We should never get here though as + # PID 0 is not supported on Linux. + if self.pid == 0: + msg = "can't use prlimit() against PID 0 process" + raise ValueError(msg) + try: + if limits is None: + # get + return prlimit(self.pid, resource_) + else: + # set + if len(limits) != 2: + msg = ( + "second argument must be a (soft, hard) " + + "tuple, got %s" % repr(limits) + ) + raise ValueError(msg) + prlimit(self.pid, resource_, limits) + except OSError as err: + if err.errno == errno.ENOSYS: + # I saw this happening on Travis: + # https://travis-ci.org/giampaolo/psutil/jobs/51368273 + self._raise_if_zombie() + raise + + @wrap_exceptions + def status(self): + letter = self._parse_stat_file()['status'] + if PY3: + letter = letter.decode() + # XXX is '?' legit? (we're not supposed to return it anyway) + return PROC_STATUSES.get(letter, '?') + + @wrap_exceptions + def open_files(self): + retlist = [] + files = os.listdir("%s/%s/fd" % (self._procfs_path, self.pid)) + hit_enoent = False + for fd in files: + file = "%s/%s/fd/%s" % (self._procfs_path, self.pid, fd) + try: + path = readlink(file) + except (FileNotFoundError, ProcessLookupError): + # ENOENT == file which is gone in the meantime + hit_enoent = True + continue + except OSError as err: + if err.errno == errno.EINVAL: + # not a link + continue + if err.errno == errno.ENAMETOOLONG: + # file name too long + debug(err) + continue + raise + else: + # If path is not an absolute there's no way to tell + # whether it's a regular file or not, so we skip it. + # A regular file is always supposed to be have an + # absolute path though. + if path.startswith('/') and isfile_strict(path): + # Get file position and flags. + file = "%s/%s/fdinfo/%s" % ( + self._procfs_path, + self.pid, + fd, + ) + try: + with open_binary(file) as f: + pos = int(f.readline().split()[1]) + flags = int(f.readline().split()[1], 8) + except (FileNotFoundError, ProcessLookupError): + # fd gone in the meantime; process may + # still be alive + hit_enoent = True + else: + mode = file_flags_to_mode(flags) + ntuple = popenfile( + path, int(fd), int(pos), mode, flags + ) + retlist.append(ntuple) + if hit_enoent: + self._raise_if_not_alive() + return retlist + + @wrap_exceptions + def connections(self, kind='inet'): + ret = _connections.retrieve(kind, self.pid) + self._raise_if_not_alive() + return ret + + @wrap_exceptions + def num_fds(self): + return len(os.listdir("%s/%s/fd" % (self._procfs_path, self.pid))) + + @wrap_exceptions + def ppid(self): + return int(self._parse_stat_file()['ppid']) + + @wrap_exceptions + def uids(self, _uids_re=re.compile(br'Uid:\t(\d+)\t(\d+)\t(\d+)')): + data = self._read_status_file() + real, effective, saved = _uids_re.findall(data)[0] + return _common.puids(int(real), int(effective), int(saved)) + + @wrap_exceptions + def gids(self, _gids_re=re.compile(br'Gid:\t(\d+)\t(\d+)\t(\d+)')): + data = self._read_status_file() + real, effective, saved = _gids_re.findall(data)[0] + return _common.pgids(int(real), int(effective), int(saved)) diff --git a/venv/lib/python3.10/site-packages/psutil/_psosx.py b/venv/lib/python3.10/site-packages/psutil/_psosx.py new file mode 100644 index 0000000000000000000000000000000000000000..673ac0db755329f57f1672ed880b56125abf1102 --- /dev/null +++ b/venv/lib/python3.10/site-packages/psutil/_psosx.py @@ -0,0 +1,555 @@ +# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""macOS platform implementation.""" + +import errno +import functools +import os +from collections import namedtuple + +from . import _common +from . import _psposix +from . import _psutil_osx as cext +from . import _psutil_posix as cext_posix +from ._common import AccessDenied +from ._common import NoSuchProcess +from ._common import ZombieProcess +from ._common import conn_tmap +from ._common import conn_to_ntuple +from ._common import isfile_strict +from ._common import memoize_when_activated +from ._common import parse_environ_block +from ._common import usage_percent +from ._compat import PermissionError +from ._compat import ProcessLookupError + + +__extra__all__ = [] + + +# ===================================================================== +# --- globals +# ===================================================================== + + +PAGESIZE = cext_posix.getpagesize() +AF_LINK = cext_posix.AF_LINK + +TCP_STATUSES = { + cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED, + cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT, + cext.TCPS_SYN_RECEIVED: _common.CONN_SYN_RECV, + cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1, + cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2, + cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT, + cext.TCPS_CLOSED: _common.CONN_CLOSE, + cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT, + cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK, + cext.TCPS_LISTEN: _common.CONN_LISTEN, + cext.TCPS_CLOSING: _common.CONN_CLOSING, + cext.PSUTIL_CONN_NONE: _common.CONN_NONE, +} + +PROC_STATUSES = { + cext.SIDL: _common.STATUS_IDLE, + cext.SRUN: _common.STATUS_RUNNING, + cext.SSLEEP: _common.STATUS_SLEEPING, + cext.SSTOP: _common.STATUS_STOPPED, + cext.SZOMB: _common.STATUS_ZOMBIE, +} + +kinfo_proc_map = dict( + ppid=0, + ruid=1, + euid=2, + suid=3, + rgid=4, + egid=5, + sgid=6, + ttynr=7, + ctime=8, + status=9, + name=10, +) + +pidtaskinfo_map = dict( + cpuutime=0, + cpustime=1, + rss=2, + vms=3, + pfaults=4, + pageins=5, + numthreads=6, + volctxsw=7, +) + + +# ===================================================================== +# --- named tuples +# ===================================================================== + + +# fmt: off +# psutil.cpu_times() +scputimes = namedtuple('scputimes', ['user', 'nice', 'system', 'idle']) +# psutil.virtual_memory() +svmem = namedtuple( + 'svmem', ['total', 'available', 'percent', 'used', 'free', + 'active', 'inactive', 'wired']) +# psutil.Process.memory_info() +pmem = namedtuple('pmem', ['rss', 'vms', 'pfaults', 'pageins']) +# psutil.Process.memory_full_info() +pfullmem = namedtuple('pfullmem', pmem._fields + ('uss', )) +# fmt: on + + +# ===================================================================== +# --- memory +# ===================================================================== + + +def virtual_memory(): + """System virtual memory as a namedtuple.""" + total, active, inactive, wired, free, speculative = cext.virtual_mem() + # This is how Zabbix calculate avail and used mem: + # https://github.com/zabbix/zabbix/blob/trunk/src/libs/zbxsysinfo/ + # osx/memory.c + # Also see: https://github.com/giampaolo/psutil/issues/1277 + avail = inactive + free + used = active + wired + # This is NOT how Zabbix calculates free mem but it matches "free" + # cmdline utility. + free -= speculative + percent = usage_percent((total - avail), total, round_=1) + return svmem(total, avail, percent, used, free, active, inactive, wired) + + +def swap_memory(): + """Swap system memory as a (total, used, free, sin, sout) tuple.""" + total, used, free, sin, sout = cext.swap_mem() + percent = usage_percent(used, total, round_=1) + return _common.sswap(total, used, free, percent, sin, sout) + + +# ===================================================================== +# --- CPU +# ===================================================================== + + +def cpu_times(): + """Return system CPU times as a namedtuple.""" + user, nice, system, idle = cext.cpu_times() + return scputimes(user, nice, system, idle) + + +def per_cpu_times(): + """Return system CPU times as a named tuple.""" + ret = [] + for cpu_t in cext.per_cpu_times(): + user, nice, system, idle = cpu_t + item = scputimes(user, nice, system, idle) + ret.append(item) + return ret + + +def cpu_count_logical(): + """Return the number of logical CPUs in the system.""" + return cext.cpu_count_logical() + + +def cpu_count_cores(): + """Return the number of CPU cores in the system.""" + return cext.cpu_count_cores() + + +def cpu_stats(): + ctx_switches, interrupts, soft_interrupts, syscalls, traps = ( + cext.cpu_stats() + ) + return _common.scpustats( + ctx_switches, interrupts, soft_interrupts, syscalls + ) + + +def cpu_freq(): + """Return CPU frequency. + On macOS per-cpu frequency is not supported. + Also, the returned frequency never changes, see: + https://arstechnica.com/civis/viewtopic.php?f=19&t=465002. + """ + curr, min_, max_ = cext.cpu_freq() + return [_common.scpufreq(curr, min_, max_)] + + +# ===================================================================== +# --- disks +# ===================================================================== + + +disk_usage = _psposix.disk_usage +disk_io_counters = cext.disk_io_counters + + +def disk_partitions(all=False): + """Return mounted disk partitions as a list of namedtuples.""" + retlist = [] + partitions = cext.disk_partitions() + for partition in partitions: + device, mountpoint, fstype, opts = partition + if device == 'none': + device = '' + if not all: + if not os.path.isabs(device) or not os.path.exists(device): + continue + maxfile = maxpath = None # set later + ntuple = _common.sdiskpart( + device, mountpoint, fstype, opts, maxfile, maxpath + ) + retlist.append(ntuple) + return retlist + + +# ===================================================================== +# --- sensors +# ===================================================================== + + +def sensors_battery(): + """Return battery information.""" + try: + percent, minsleft, power_plugged = cext.sensors_battery() + except NotImplementedError: + # no power source - return None according to interface + return None + power_plugged = power_plugged == 1 + if power_plugged: + secsleft = _common.POWER_TIME_UNLIMITED + elif minsleft == -1: + secsleft = _common.POWER_TIME_UNKNOWN + else: + secsleft = minsleft * 60 + return _common.sbattery(percent, secsleft, power_plugged) + + +# ===================================================================== +# --- network +# ===================================================================== + + +net_io_counters = cext.net_io_counters +net_if_addrs = cext_posix.net_if_addrs + + +def net_connections(kind='inet'): + """System-wide network connections.""" + # Note: on macOS this will fail with AccessDenied unless + # the process is owned by root. + ret = [] + for pid in pids(): + try: + cons = Process(pid).connections(kind) + except NoSuchProcess: + continue + else: + if cons: + for c in cons: + c = list(c) + [pid] + ret.append(_common.sconn(*c)) + return ret + + +def net_if_stats(): + """Get NIC stats (isup, duplex, speed, mtu).""" + names = net_io_counters().keys() + ret = {} + for name in names: + try: + mtu = cext_posix.net_if_mtu(name) + flags = cext_posix.net_if_flags(name) + duplex, speed = cext_posix.net_if_duplex_speed(name) + except OSError as err: + # https://github.com/giampaolo/psutil/issues/1279 + if err.errno != errno.ENODEV: + raise + else: + if hasattr(_common, 'NicDuplex'): + duplex = _common.NicDuplex(duplex) + output_flags = ','.join(flags) + isup = 'running' in flags + ret[name] = _common.snicstats( + isup, duplex, speed, mtu, output_flags + ) + return ret + + +# ===================================================================== +# --- other system functions +# ===================================================================== + + +def boot_time(): + """The system boot time expressed in seconds since the epoch.""" + return cext.boot_time() + + +def users(): + """Return currently connected users as a list of namedtuples.""" + retlist = [] + rawlist = cext.users() + for item in rawlist: + user, tty, hostname, tstamp, pid = item + if tty == '~': + continue # reboot or shutdown + if not tstamp: + continue + nt = _common.suser(user, tty or None, hostname or None, tstamp, pid) + retlist.append(nt) + return retlist + + +# ===================================================================== +# --- processes +# ===================================================================== + + +def pids(): + ls = cext.pids() + if 0 not in ls: + # On certain macOS versions pids() C doesn't return PID 0 but + # "ps" does and the process is querable via sysctl(): + # https://travis-ci.org/giampaolo/psutil/jobs/309619941 + try: + Process(0).create_time() + ls.insert(0, 0) + except NoSuchProcess: + pass + except AccessDenied: + ls.insert(0, 0) + return ls + + +pid_exists = _psposix.pid_exists + + +def is_zombie(pid): + try: + st = cext.proc_kinfo_oneshot(pid)[kinfo_proc_map['status']] + return st == cext.SZOMB + except OSError: + return False + + +def wrap_exceptions(fun): + """Decorator which translates bare OSError exceptions into + NoSuchProcess and AccessDenied. + """ + + @functools.wraps(fun) + def wrapper(self, *args, **kwargs): + try: + return fun(self, *args, **kwargs) + except ProcessLookupError: + if is_zombie(self.pid): + raise ZombieProcess(self.pid, self._name, self._ppid) + else: + raise NoSuchProcess(self.pid, self._name) + except PermissionError: + raise AccessDenied(self.pid, self._name) + + return wrapper + + +class Process: + """Wrapper class around underlying C implementation.""" + + __slots__ = ["pid", "_name", "_ppid", "_cache"] + + def __init__(self, pid): + self.pid = pid + self._name = None + self._ppid = None + + @wrap_exceptions + @memoize_when_activated + def _get_kinfo_proc(self): + # Note: should work with all PIDs without permission issues. + ret = cext.proc_kinfo_oneshot(self.pid) + assert len(ret) == len(kinfo_proc_map) + return ret + + @wrap_exceptions + @memoize_when_activated + def _get_pidtaskinfo(self): + # Note: should work for PIDs owned by user only. + ret = cext.proc_pidtaskinfo_oneshot(self.pid) + assert len(ret) == len(pidtaskinfo_map) + return ret + + def oneshot_enter(self): + self._get_kinfo_proc.cache_activate(self) + self._get_pidtaskinfo.cache_activate(self) + + def oneshot_exit(self): + self._get_kinfo_proc.cache_deactivate(self) + self._get_pidtaskinfo.cache_deactivate(self) + + @wrap_exceptions + def name(self): + name = self._get_kinfo_proc()[kinfo_proc_map['name']] + return name if name is not None else cext.proc_name(self.pid) + + @wrap_exceptions + def exe(self): + return cext.proc_exe(self.pid) + + @wrap_exceptions + def cmdline(self): + return cext.proc_cmdline(self.pid) + + @wrap_exceptions + def environ(self): + return parse_environ_block(cext.proc_environ(self.pid)) + + @wrap_exceptions + def ppid(self): + self._ppid = self._get_kinfo_proc()[kinfo_proc_map['ppid']] + return self._ppid + + @wrap_exceptions + def cwd(self): + return cext.proc_cwd(self.pid) + + @wrap_exceptions + def uids(self): + rawtuple = self._get_kinfo_proc() + return _common.puids( + rawtuple[kinfo_proc_map['ruid']], + rawtuple[kinfo_proc_map['euid']], + rawtuple[kinfo_proc_map['suid']], + ) + + @wrap_exceptions + def gids(self): + rawtuple = self._get_kinfo_proc() + return _common.puids( + rawtuple[kinfo_proc_map['rgid']], + rawtuple[kinfo_proc_map['egid']], + rawtuple[kinfo_proc_map['sgid']], + ) + + @wrap_exceptions + def terminal(self): + tty_nr = self._get_kinfo_proc()[kinfo_proc_map['ttynr']] + tmap = _psposix.get_terminal_map() + try: + return tmap[tty_nr] + except KeyError: + return None + + @wrap_exceptions + def memory_info(self): + rawtuple = self._get_pidtaskinfo() + return pmem( + rawtuple[pidtaskinfo_map['rss']], + rawtuple[pidtaskinfo_map['vms']], + rawtuple[pidtaskinfo_map['pfaults']], + rawtuple[pidtaskinfo_map['pageins']], + ) + + @wrap_exceptions + def memory_full_info(self): + basic_mem = self.memory_info() + uss = cext.proc_memory_uss(self.pid) + return pfullmem(*basic_mem + (uss,)) + + @wrap_exceptions + def cpu_times(self): + rawtuple = self._get_pidtaskinfo() + return _common.pcputimes( + rawtuple[pidtaskinfo_map['cpuutime']], + rawtuple[pidtaskinfo_map['cpustime']], + # children user / system times are not retrievable (set to 0) + 0.0, + 0.0, + ) + + @wrap_exceptions + def create_time(self): + return self._get_kinfo_proc()[kinfo_proc_map['ctime']] + + @wrap_exceptions + def num_ctx_switches(self): + # Unvoluntary value seems not to be available; + # getrusage() numbers seems to confirm this theory. + # We set it to 0. + vol = self._get_pidtaskinfo()[pidtaskinfo_map['volctxsw']] + return _common.pctxsw(vol, 0) + + @wrap_exceptions + def num_threads(self): + return self._get_pidtaskinfo()[pidtaskinfo_map['numthreads']] + + @wrap_exceptions + def open_files(self): + if self.pid == 0: + return [] + files = [] + rawlist = cext.proc_open_files(self.pid) + for path, fd in rawlist: + if isfile_strict(path): + ntuple = _common.popenfile(path, fd) + files.append(ntuple) + return files + + @wrap_exceptions + def connections(self, kind='inet'): + if kind not in conn_tmap: + raise ValueError( + "invalid %r kind argument; choose between %s" + % (kind, ', '.join([repr(x) for x in conn_tmap])) + ) + families, types = conn_tmap[kind] + rawlist = cext.proc_connections(self.pid, families, types) + ret = [] + for item in rawlist: + fd, fam, type, laddr, raddr, status = item + nt = conn_to_ntuple( + fd, fam, type, laddr, raddr, status, TCP_STATUSES + ) + ret.append(nt) + return ret + + @wrap_exceptions + def num_fds(self): + if self.pid == 0: + return 0 + return cext.proc_num_fds(self.pid) + + @wrap_exceptions + def wait(self, timeout=None): + return _psposix.wait_pid(self.pid, timeout, self._name) + + @wrap_exceptions + def nice_get(self): + return cext_posix.getpriority(self.pid) + + @wrap_exceptions + def nice_set(self, value): + return cext_posix.setpriority(self.pid, value) + + @wrap_exceptions + def status(self): + code = self._get_kinfo_proc()[kinfo_proc_map['status']] + # XXX is '?' legit? (we're not supposed to return it anyway) + return PROC_STATUSES.get(code, '?') + + @wrap_exceptions + def threads(self): + rawlist = cext.proc_threads(self.pid) + retlist = [] + for thread_id, utime, stime in rawlist: + ntuple = _common.pthread(thread_id, utime, stime) + retlist.append(ntuple) + return retlist diff --git a/venv/lib/python3.10/site-packages/psutil/_psposix.py b/venv/lib/python3.10/site-packages/psutil/_psposix.py new file mode 100644 index 0000000000000000000000000000000000000000..42bdfa7ef6c7fc28521b5b65b6b61567745ce250 --- /dev/null +++ b/venv/lib/python3.10/site-packages/psutil/_psposix.py @@ -0,0 +1,243 @@ +# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Routines common to all posix systems.""" + +import glob +import os +import signal +import sys +import time + +from ._common import MACOS +from ._common import TimeoutExpired +from ._common import memoize +from ._common import sdiskusage +from ._common import usage_percent +from ._compat import PY3 +from ._compat import ChildProcessError +from ._compat import FileNotFoundError +from ._compat import InterruptedError +from ._compat import PermissionError +from ._compat import ProcessLookupError +from ._compat import unicode + + +if MACOS: + from . import _psutil_osx + + +if PY3: + import enum +else: + enum = None + + +__all__ = ['pid_exists', 'wait_pid', 'disk_usage', 'get_terminal_map'] + + +def pid_exists(pid): + """Check whether pid exists in the current process table.""" + if pid == 0: + # According to "man 2 kill" PID 0 has a special meaning: + # it refers to <> so we don't want to go any further. + # If we get here it means this UNIX platform *does* have + # a process with id 0. + return True + try: + os.kill(pid, 0) + except ProcessLookupError: + return False + except PermissionError: + # EPERM clearly means there's a process to deny access to + return True + # According to "man 2 kill" possible error values are + # (EINVAL, EPERM, ESRCH) + else: + return True + + +# Python 3.5 signals enum (contributed by me ^^): +# https://bugs.python.org/issue21076 +if enum is not None and hasattr(signal, "Signals"): + Negsignal = enum.IntEnum( + 'Negsignal', dict([(x.name, -x.value) for x in signal.Signals]) + ) + + def negsig_to_enum(num): + """Convert a negative signal value to an enum.""" + try: + return Negsignal(num) + except ValueError: + return num + +else: # pragma: no cover + + def negsig_to_enum(num): + return num + + +def wait_pid( + pid, + timeout=None, + proc_name=None, + _waitpid=os.waitpid, + _timer=getattr(time, 'monotonic', time.time), # noqa: B008 + _min=min, + _sleep=time.sleep, + _pid_exists=pid_exists, +): + """Wait for a process PID to terminate. + + If the process terminated normally by calling exit(3) or _exit(2), + or by returning from main(), the return value is the positive integer + passed to *exit(). + + If it was terminated by a signal it returns the negated value of the + signal which caused the termination (e.g. -SIGTERM). + + If PID is not a children of os.getpid() (current process) just + wait until the process disappears and return None. + + If PID does not exist at all return None immediately. + + If *timeout* != None and process is still alive raise TimeoutExpired. + timeout=0 is also possible (either return immediately or raise). + """ + if pid <= 0: + # see "man waitpid" + msg = "can't wait for PID 0" + raise ValueError(msg) + interval = 0.0001 + flags = 0 + if timeout is not None: + flags |= os.WNOHANG + stop_at = _timer() + timeout + + def sleep(interval): + # Sleep for some time and return a new increased interval. + if timeout is not None: + if _timer() >= stop_at: + raise TimeoutExpired(timeout, pid=pid, name=proc_name) + _sleep(interval) + return _min(interval * 2, 0.04) + + # See: https://linux.die.net/man/2/waitpid + while True: + try: + retpid, status = os.waitpid(pid, flags) + except InterruptedError: + interval = sleep(interval) + except ChildProcessError: + # This has two meanings: + # - PID is not a child of os.getpid() in which case + # we keep polling until it's gone + # - PID never existed in the first place + # In both cases we'll eventually return None as we + # can't determine its exit status code. + while _pid_exists(pid): + interval = sleep(interval) + return + else: + if retpid == 0: + # WNOHANG flag was used and PID is still running. + interval = sleep(interval) + continue + + if os.WIFEXITED(status): + # Process terminated normally by calling exit(3) or _exit(2), + # or by returning from main(). The return value is the + # positive integer passed to *exit(). + return os.WEXITSTATUS(status) + elif os.WIFSIGNALED(status): + # Process exited due to a signal. Return the negative value + # of that signal. + return negsig_to_enum(-os.WTERMSIG(status)) + # elif os.WIFSTOPPED(status): + # # Process was stopped via SIGSTOP or is being traced, and + # # waitpid() was called with WUNTRACED flag. PID is still + # # alive. From now on waitpid() will keep returning (0, 0) + # # until the process state doesn't change. + # # It may make sense to catch/enable this since stopped PIDs + # # ignore SIGTERM. + # interval = sleep(interval) + # continue + # elif os.WIFCONTINUED(status): + # # Process was resumed via SIGCONT and waitpid() was called + # # with WCONTINUED flag. + # interval = sleep(interval) + # continue + else: + # Should never happen. + raise ValueError("unknown process exit status %r" % status) + + +def disk_usage(path): + """Return disk usage associated with path. + Note: UNIX usually reserves 5% disk space which is not accessible + by user. In this function "total" and "used" values reflect the + total and used disk space whereas "free" and "percent" represent + the "free" and "used percent" user disk space. + """ + if PY3: + st = os.statvfs(path) + else: # pragma: no cover + # os.statvfs() does not support unicode on Python 2: + # - https://github.com/giampaolo/psutil/issues/416 + # - http://bugs.python.org/issue18695 + try: + st = os.statvfs(path) + except UnicodeEncodeError: + if isinstance(path, unicode): + try: + path = path.encode(sys.getfilesystemencoding()) + except UnicodeEncodeError: + pass + st = os.statvfs(path) + else: + raise + + # Total space which is only available to root (unless changed + # at system level). + total = st.f_blocks * st.f_frsize + # Remaining free space usable by root. + avail_to_root = st.f_bfree * st.f_frsize + # Remaining free space usable by user. + avail_to_user = st.f_bavail * st.f_frsize + # Total space being used in general. + used = total - avail_to_root + if MACOS: + # see: https://github.com/giampaolo/psutil/pull/2152 + used = _psutil_osx.disk_usage_used(path, used) + # Total space which is available to user (same as 'total' but + # for the user). + total_user = used + avail_to_user + # User usage percent compared to the total amount of space + # the user can use. This number would be higher if compared + # to root's because the user has less space (usually -5%). + usage_percent_user = usage_percent(used, total_user, round_=1) + + # NB: the percentage is -5% than what shown by df due to + # reserved blocks that we are currently not considering: + # https://github.com/giampaolo/psutil/issues/829#issuecomment-223750462 + return sdiskusage( + total=total, used=used, free=avail_to_user, percent=usage_percent_user + ) + + +@memoize +def get_terminal_map(): + """Get a map of device-id -> path as a dict. + Used by Process.terminal(). + """ + ret = {} + ls = glob.glob('/dev/tty*') + glob.glob('/dev/pts/*') + for name in ls: + assert name not in ret, name + try: + ret[os.stat(name).st_rdev] = name + except FileNotFoundError: + pass + return ret diff --git a/venv/lib/python3.10/site-packages/psutil/_pssunos.py b/venv/lib/python3.10/site-packages/psutil/_pssunos.py new file mode 100644 index 0000000000000000000000000000000000000000..dddbece1f35af93e807619442a4fcb6c7e24cc47 --- /dev/null +++ b/venv/lib/python3.10/site-packages/psutil/_pssunos.py @@ -0,0 +1,756 @@ +# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Sun OS Solaris platform implementation.""" + +import errno +import functools +import os +import socket +import subprocess +import sys +from collections import namedtuple +from socket import AF_INET + +from . import _common +from . import _psposix +from . import _psutil_posix as cext_posix +from . import _psutil_sunos as cext +from ._common import AF_INET6 +from ._common import AccessDenied +from ._common import NoSuchProcess +from ._common import ZombieProcess +from ._common import debug +from ._common import get_procfs_path +from ._common import isfile_strict +from ._common import memoize_when_activated +from ._common import sockfam_to_enum +from ._common import socktype_to_enum +from ._common import usage_percent +from ._compat import PY3 +from ._compat import FileNotFoundError +from ._compat import PermissionError +from ._compat import ProcessLookupError +from ._compat import b + + +__extra__all__ = ["CONN_IDLE", "CONN_BOUND", "PROCFS_PATH"] + + +# ===================================================================== +# --- globals +# ===================================================================== + + +PAGE_SIZE = cext_posix.getpagesize() +AF_LINK = cext_posix.AF_LINK +IS_64_BIT = sys.maxsize > 2**32 + +CONN_IDLE = "IDLE" +CONN_BOUND = "BOUND" + +PROC_STATUSES = { + cext.SSLEEP: _common.STATUS_SLEEPING, + cext.SRUN: _common.STATUS_RUNNING, + cext.SZOMB: _common.STATUS_ZOMBIE, + cext.SSTOP: _common.STATUS_STOPPED, + cext.SIDL: _common.STATUS_IDLE, + cext.SONPROC: _common.STATUS_RUNNING, # same as run + cext.SWAIT: _common.STATUS_WAITING, +} + +TCP_STATUSES = { + cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED, + cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT, + cext.TCPS_SYN_RCVD: _common.CONN_SYN_RECV, + cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1, + cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2, + cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT, + cext.TCPS_CLOSED: _common.CONN_CLOSE, + cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT, + cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK, + cext.TCPS_LISTEN: _common.CONN_LISTEN, + cext.TCPS_CLOSING: _common.CONN_CLOSING, + cext.PSUTIL_CONN_NONE: _common.CONN_NONE, + cext.TCPS_IDLE: CONN_IDLE, # sunos specific + cext.TCPS_BOUND: CONN_BOUND, # sunos specific +} + +proc_info_map = dict( + ppid=0, + rss=1, + vms=2, + create_time=3, + nice=4, + num_threads=5, + status=6, + ttynr=7, + uid=8, + euid=9, + gid=10, + egid=11, +) + + +# ===================================================================== +# --- named tuples +# ===================================================================== + + +# psutil.cpu_times() +scputimes = namedtuple('scputimes', ['user', 'system', 'idle', 'iowait']) +# psutil.cpu_times(percpu=True) +pcputimes = namedtuple( + 'pcputimes', ['user', 'system', 'children_user', 'children_system'] +) +# psutil.virtual_memory() +svmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free']) +# psutil.Process.memory_info() +pmem = namedtuple('pmem', ['rss', 'vms']) +pfullmem = pmem +# psutil.Process.memory_maps(grouped=True) +pmmap_grouped = namedtuple( + 'pmmap_grouped', ['path', 'rss', 'anonymous', 'locked'] +) +# psutil.Process.memory_maps(grouped=False) +pmmap_ext = namedtuple( + 'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields) +) + + +# ===================================================================== +# --- memory +# ===================================================================== + + +def virtual_memory(): + """Report virtual memory metrics.""" + # we could have done this with kstat, but IMHO this is good enough + total = os.sysconf('SC_PHYS_PAGES') * PAGE_SIZE + # note: there's no difference on Solaris + free = avail = os.sysconf('SC_AVPHYS_PAGES') * PAGE_SIZE + used = total - free + percent = usage_percent(used, total, round_=1) + return svmem(total, avail, percent, used, free) + + +def swap_memory(): + """Report swap memory metrics.""" + sin, sout = cext.swap_mem() + # XXX + # we are supposed to get total/free by doing so: + # http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/ + # usr/src/cmd/swap/swap.c + # ...nevertheless I can't manage to obtain the same numbers as 'swap' + # cmdline utility, so let's parse its output (sigh!) + p = subprocess.Popen( + [ + '/usr/bin/env', + 'PATH=/usr/sbin:/sbin:%s' % os.environ['PATH'], + 'swap', + '-l', + ], + stdout=subprocess.PIPE, + ) + stdout, _ = p.communicate() + if PY3: + stdout = stdout.decode(sys.stdout.encoding) + if p.returncode != 0: + raise RuntimeError("'swap -l' failed (retcode=%s)" % p.returncode) + + lines = stdout.strip().split('\n')[1:] + if not lines: + msg = 'no swap device(s) configured' + raise RuntimeError(msg) + total = free = 0 + for line in lines: + line = line.split() + t, f = line[3:5] + total += int(int(t) * 512) + free += int(int(f) * 512) + used = total - free + percent = usage_percent(used, total, round_=1) + return _common.sswap( + total, used, free, percent, sin * PAGE_SIZE, sout * PAGE_SIZE + ) + + +# ===================================================================== +# --- CPU +# ===================================================================== + + +def cpu_times(): + """Return system-wide CPU times as a named tuple.""" + ret = cext.per_cpu_times() + return scputimes(*[sum(x) for x in zip(*ret)]) + + +def per_cpu_times(): + """Return system per-CPU times as a list of named tuples.""" + ret = cext.per_cpu_times() + return [scputimes(*x) for x in ret] + + +def cpu_count_logical(): + """Return the number of logical CPUs in the system.""" + try: + return os.sysconf("SC_NPROCESSORS_ONLN") + except ValueError: + # mimic os.cpu_count() behavior + return None + + +def cpu_count_cores(): + """Return the number of CPU cores in the system.""" + return cext.cpu_count_cores() + + +def cpu_stats(): + """Return various CPU stats as a named tuple.""" + ctx_switches, interrupts, syscalls, traps = cext.cpu_stats() + soft_interrupts = 0 + return _common.scpustats( + ctx_switches, interrupts, soft_interrupts, syscalls + ) + + +# ===================================================================== +# --- disks +# ===================================================================== + + +disk_io_counters = cext.disk_io_counters +disk_usage = _psposix.disk_usage + + +def disk_partitions(all=False): + """Return system disk partitions.""" + # TODO - the filtering logic should be better checked so that + # it tries to reflect 'df' as much as possible + retlist = [] + partitions = cext.disk_partitions() + for partition in partitions: + device, mountpoint, fstype, opts = partition + if device == 'none': + device = '' + if not all: + # Differently from, say, Linux, we don't have a list of + # common fs types so the best we can do, AFAIK, is to + # filter by filesystem having a total size > 0. + try: + if not disk_usage(mountpoint).total: + continue + except OSError as err: + # https://github.com/giampaolo/psutil/issues/1674 + debug("skipping %r: %s" % (mountpoint, err)) + continue + maxfile = maxpath = None # set later + ntuple = _common.sdiskpart( + device, mountpoint, fstype, opts, maxfile, maxpath + ) + retlist.append(ntuple) + return retlist + + +# ===================================================================== +# --- network +# ===================================================================== + + +net_io_counters = cext.net_io_counters +net_if_addrs = cext_posix.net_if_addrs + + +def net_connections(kind, _pid=-1): + """Return socket connections. If pid == -1 return system-wide + connections (as opposed to connections opened by one process only). + Only INET sockets are returned (UNIX are not). + """ + cmap = _common.conn_tmap.copy() + if _pid == -1: + cmap.pop('unix', 0) + if kind not in cmap: + raise ValueError( + "invalid %r kind argument; choose between %s" + % (kind, ', '.join([repr(x) for x in cmap])) + ) + families, types = _common.conn_tmap[kind] + rawlist = cext.net_connections(_pid) + ret = set() + for item in rawlist: + fd, fam, type_, laddr, raddr, status, pid = item + if fam not in families: + continue + if type_ not in types: + continue + # TODO: refactor and use _common.conn_to_ntuple. + if fam in (AF_INET, AF_INET6): + if laddr: + laddr = _common.addr(*laddr) + if raddr: + raddr = _common.addr(*raddr) + status = TCP_STATUSES[status] + fam = sockfam_to_enum(fam) + type_ = socktype_to_enum(type_) + if _pid == -1: + nt = _common.sconn(fd, fam, type_, laddr, raddr, status, pid) + else: + nt = _common.pconn(fd, fam, type_, laddr, raddr, status) + ret.add(nt) + return list(ret) + + +def net_if_stats(): + """Get NIC stats (isup, duplex, speed, mtu).""" + ret = cext.net_if_stats() + for name, items in ret.items(): + isup, duplex, speed, mtu = items + if hasattr(_common, 'NicDuplex'): + duplex = _common.NicDuplex(duplex) + ret[name] = _common.snicstats(isup, duplex, speed, mtu, '') + return ret + + +# ===================================================================== +# --- other system functions +# ===================================================================== + + +def boot_time(): + """The system boot time expressed in seconds since the epoch.""" + return cext.boot_time() + + +def users(): + """Return currently connected users as a list of namedtuples.""" + retlist = [] + rawlist = cext.users() + localhost = (':0.0', ':0') + for item in rawlist: + user, tty, hostname, tstamp, user_process, pid = item + # note: the underlying C function includes entries about + # system boot, run level and others. We might want + # to use them in the future. + if not user_process: + continue + if hostname in localhost: + hostname = 'localhost' + nt = _common.suser(user, tty, hostname, tstamp, pid) + retlist.append(nt) + return retlist + + +# ===================================================================== +# --- processes +# ===================================================================== + + +def pids(): + """Returns a list of PIDs currently running on the system.""" + return [int(x) for x in os.listdir(b(get_procfs_path())) if x.isdigit()] + + +def pid_exists(pid): + """Check for the existence of a unix pid.""" + return _psposix.pid_exists(pid) + + +def wrap_exceptions(fun): + """Call callable into a try/except clause and translate ENOENT, + EACCES and EPERM in NoSuchProcess or AccessDenied exceptions. + """ + + @functools.wraps(fun) + def wrapper(self, *args, **kwargs): + try: + return fun(self, *args, **kwargs) + except (FileNotFoundError, ProcessLookupError): + # ENOENT (no such file or directory) gets raised on open(). + # ESRCH (no such process) can get raised on read() if + # process is gone in meantime. + if not pid_exists(self.pid): + raise NoSuchProcess(self.pid, self._name) + else: + raise ZombieProcess(self.pid, self._name, self._ppid) + except PermissionError: + raise AccessDenied(self.pid, self._name) + except OSError: + if self.pid == 0: + if 0 in pids(): + raise AccessDenied(self.pid, self._name) + else: + raise + raise + + return wrapper + + +class Process: + """Wrapper class around underlying C implementation.""" + + __slots__ = ["pid", "_name", "_ppid", "_procfs_path", "_cache"] + + def __init__(self, pid): + self.pid = pid + self._name = None + self._ppid = None + self._procfs_path = get_procfs_path() + + def _assert_alive(self): + """Raise NSP if the process disappeared on us.""" + # For those C function who do not raise NSP, possibly returning + # incorrect or incomplete result. + os.stat('%s/%s' % (self._procfs_path, self.pid)) + + def oneshot_enter(self): + self._proc_name_and_args.cache_activate(self) + self._proc_basic_info.cache_activate(self) + self._proc_cred.cache_activate(self) + + def oneshot_exit(self): + self._proc_name_and_args.cache_deactivate(self) + self._proc_basic_info.cache_deactivate(self) + self._proc_cred.cache_deactivate(self) + + @wrap_exceptions + @memoize_when_activated + def _proc_name_and_args(self): + return cext.proc_name_and_args(self.pid, self._procfs_path) + + @wrap_exceptions + @memoize_when_activated + def _proc_basic_info(self): + if self.pid == 0 and not os.path.exists( + '%s/%s/psinfo' % (self._procfs_path, self.pid) + ): + raise AccessDenied(self.pid) + ret = cext.proc_basic_info(self.pid, self._procfs_path) + assert len(ret) == len(proc_info_map) + return ret + + @wrap_exceptions + @memoize_when_activated + def _proc_cred(self): + return cext.proc_cred(self.pid, self._procfs_path) + + @wrap_exceptions + def name(self): + # note: max len == 15 + return self._proc_name_and_args()[0] + + @wrap_exceptions + def exe(self): + try: + return os.readlink( + "%s/%s/path/a.out" % (self._procfs_path, self.pid) + ) + except OSError: + pass # continue and guess the exe name from the cmdline + # Will be guessed later from cmdline but we want to explicitly + # invoke cmdline here in order to get an AccessDenied + # exception if the user has not enough privileges. + self.cmdline() + return "" + + @wrap_exceptions + def cmdline(self): + return self._proc_name_and_args()[1].split(' ') + + @wrap_exceptions + def environ(self): + return cext.proc_environ(self.pid, self._procfs_path) + + @wrap_exceptions + def create_time(self): + return self._proc_basic_info()[proc_info_map['create_time']] + + @wrap_exceptions + def num_threads(self): + return self._proc_basic_info()[proc_info_map['num_threads']] + + @wrap_exceptions + def nice_get(self): + # Note #1: getpriority(3) doesn't work for realtime processes. + # Psinfo is what ps uses, see: + # https://github.com/giampaolo/psutil/issues/1194 + return self._proc_basic_info()[proc_info_map['nice']] + + @wrap_exceptions + def nice_set(self, value): + if self.pid in (2, 3): + # Special case PIDs: internally setpriority(3) return ESRCH + # (no such process), no matter what. + # The process actually exists though, as it has a name, + # creation time, etc. + raise AccessDenied(self.pid, self._name) + return cext_posix.setpriority(self.pid, value) + + @wrap_exceptions + def ppid(self): + self._ppid = self._proc_basic_info()[proc_info_map['ppid']] + return self._ppid + + @wrap_exceptions + def uids(self): + try: + real, effective, saved, _, _, _ = self._proc_cred() + except AccessDenied: + real = self._proc_basic_info()[proc_info_map['uid']] + effective = self._proc_basic_info()[proc_info_map['euid']] + saved = None + return _common.puids(real, effective, saved) + + @wrap_exceptions + def gids(self): + try: + _, _, _, real, effective, saved = self._proc_cred() + except AccessDenied: + real = self._proc_basic_info()[proc_info_map['gid']] + effective = self._proc_basic_info()[proc_info_map['egid']] + saved = None + return _common.puids(real, effective, saved) + + @wrap_exceptions + def cpu_times(self): + try: + times = cext.proc_cpu_times(self.pid, self._procfs_path) + except OSError as err: + if err.errno == errno.EOVERFLOW and not IS_64_BIT: + # We may get here if we attempt to query a 64bit process + # with a 32bit python. + # Error originates from read() and also tools like "cat" + # fail in the same way (!). + # Since there simply is no way to determine CPU times we + # return 0.0 as a fallback. See: + # https://github.com/giampaolo/psutil/issues/857 + times = (0.0, 0.0, 0.0, 0.0) + else: + raise + return _common.pcputimes(*times) + + @wrap_exceptions + def cpu_num(self): + return cext.proc_cpu_num(self.pid, self._procfs_path) + + @wrap_exceptions + def terminal(self): + procfs_path = self._procfs_path + hit_enoent = False + tty = wrap_exceptions(self._proc_basic_info()[proc_info_map['ttynr']]) + if tty != cext.PRNODEV: + for x in (0, 1, 2, 255): + try: + return os.readlink( + '%s/%d/path/%d' % (procfs_path, self.pid, x) + ) + except FileNotFoundError: + hit_enoent = True + continue + if hit_enoent: + self._assert_alive() + + @wrap_exceptions + def cwd(self): + # /proc/PID/path/cwd may not be resolved by readlink() even if + # it exists (ls shows it). If that's the case and the process + # is still alive return None (we can return None also on BSD). + # Reference: http://goo.gl/55XgO + procfs_path = self._procfs_path + try: + return os.readlink("%s/%s/path/cwd" % (procfs_path, self.pid)) + except FileNotFoundError: + os.stat("%s/%s" % (procfs_path, self.pid)) # raise NSP or AD + return "" + + @wrap_exceptions + def memory_info(self): + ret = self._proc_basic_info() + rss = ret[proc_info_map['rss']] * 1024 + vms = ret[proc_info_map['vms']] * 1024 + return pmem(rss, vms) + + memory_full_info = memory_info + + @wrap_exceptions + def status(self): + code = self._proc_basic_info()[proc_info_map['status']] + # XXX is '?' legit? (we're not supposed to return it anyway) + return PROC_STATUSES.get(code, '?') + + @wrap_exceptions + def threads(self): + procfs_path = self._procfs_path + ret = [] + tids = os.listdir('%s/%d/lwp' % (procfs_path, self.pid)) + hit_enoent = False + for tid in tids: + tid = int(tid) + try: + utime, stime = cext.query_process_thread( + self.pid, tid, procfs_path + ) + except EnvironmentError as err: + if err.errno == errno.EOVERFLOW and not IS_64_BIT: + # We may get here if we attempt to query a 64bit process + # with a 32bit python. + # Error originates from read() and also tools like "cat" + # fail in the same way (!). + # Since there simply is no way to determine CPU times we + # return 0.0 as a fallback. See: + # https://github.com/giampaolo/psutil/issues/857 + continue + # ENOENT == thread gone in meantime + if err.errno == errno.ENOENT: + hit_enoent = True + continue + raise + else: + nt = _common.pthread(tid, utime, stime) + ret.append(nt) + if hit_enoent: + self._assert_alive() + return ret + + @wrap_exceptions + def open_files(self): + retlist = [] + hit_enoent = False + procfs_path = self._procfs_path + pathdir = '%s/%d/path' % (procfs_path, self.pid) + for fd in os.listdir('%s/%d/fd' % (procfs_path, self.pid)): + path = os.path.join(pathdir, fd) + if os.path.islink(path): + try: + file = os.readlink(path) + except FileNotFoundError: + hit_enoent = True + continue + else: + if isfile_strict(file): + retlist.append(_common.popenfile(file, int(fd))) + if hit_enoent: + self._assert_alive() + return retlist + + def _get_unix_sockets(self, pid): + """Get UNIX sockets used by process by parsing 'pfiles' output.""" + # TODO: rewrite this in C (...but the damn netstat source code + # does not include this part! Argh!!) + cmd = ["pfiles", str(pid)] + p = subprocess.Popen( + cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) + stdout, stderr = p.communicate() + if PY3: + stdout, stderr = ( + x.decode(sys.stdout.encoding) for x in (stdout, stderr) + ) + if p.returncode != 0: + if 'permission denied' in stderr.lower(): + raise AccessDenied(self.pid, self._name) + if 'no such process' in stderr.lower(): + raise NoSuchProcess(self.pid, self._name) + raise RuntimeError("%r command error\n%s" % (cmd, stderr)) + + lines = stdout.split('\n')[2:] + for i, line in enumerate(lines): + line = line.lstrip() + if line.startswith('sockname: AF_UNIX'): + path = line.split(' ', 2)[2] + type = lines[i - 2].strip() + if type == 'SOCK_STREAM': + type = socket.SOCK_STREAM + elif type == 'SOCK_DGRAM': + type = socket.SOCK_DGRAM + else: + type = -1 + yield (-1, socket.AF_UNIX, type, path, "", _common.CONN_NONE) + + @wrap_exceptions + def connections(self, kind='inet'): + ret = net_connections(kind, _pid=self.pid) + # The underlying C implementation retrieves all OS connections + # and filters them by PID. At this point we can't tell whether + # an empty list means there were no connections for process or + # process is no longer active so we force NSP in case the PID + # is no longer there. + if not ret: + # will raise NSP if process is gone + os.stat('%s/%s' % (self._procfs_path, self.pid)) + + # UNIX sockets + if kind in ('all', 'unix'): + ret.extend([ + _common.pconn(*conn) + for conn in self._get_unix_sockets(self.pid) + ]) + return ret + + nt_mmap_grouped = namedtuple('mmap', 'path rss anon locked') + nt_mmap_ext = namedtuple('mmap', 'addr perms path rss anon locked') + + @wrap_exceptions + def memory_maps(self): + def toaddr(start, end): + return '%s-%s' % ( + hex(start)[2:].strip('L'), + hex(end)[2:].strip('L'), + ) + + procfs_path = self._procfs_path + retlist = [] + try: + rawlist = cext.proc_memory_maps(self.pid, procfs_path) + except OSError as err: + if err.errno == errno.EOVERFLOW and not IS_64_BIT: + # We may get here if we attempt to query a 64bit process + # with a 32bit python. + # Error originates from read() and also tools like "cat" + # fail in the same way (!). + # Since there simply is no way to determine CPU times we + # return 0.0 as a fallback. See: + # https://github.com/giampaolo/psutil/issues/857 + return [] + else: + raise + hit_enoent = False + for item in rawlist: + addr, addrsize, perm, name, rss, anon, locked = item + addr = toaddr(addr, addrsize) + if not name.startswith('['): + try: + name = os.readlink( + '%s/%s/path/%s' % (procfs_path, self.pid, name) + ) + except OSError as err: + if err.errno == errno.ENOENT: + # sometimes the link may not be resolved by + # readlink() even if it exists (ls shows it). + # If that's the case we just return the + # unresolved link path. + # This seems an incosistency with /proc similar + # to: http://goo.gl/55XgO + name = '%s/%s/path/%s' % (procfs_path, self.pid, name) + hit_enoent = True + else: + raise + retlist.append((addr, perm, name, rss, anon, locked)) + if hit_enoent: + self._assert_alive() + return retlist + + @wrap_exceptions + def num_fds(self): + return len(os.listdir("%s/%s/fd" % (self._procfs_path, self.pid))) + + @wrap_exceptions + def num_ctx_switches(self): + return _common.pctxsw( + *cext.proc_num_ctx_switches(self.pid, self._procfs_path) + ) + + @wrap_exceptions + def wait(self, timeout=None): + return _psposix.wait_pid(self.pid, timeout, self._name) diff --git a/venv/lib/python3.10/site-packages/psutil/_psutil_linux.abi3.so b/venv/lib/python3.10/site-packages/psutil/_psutil_linux.abi3.so new file mode 100644 index 0000000000000000000000000000000000000000..e08c8ce9cf544039cfba215199b8e127a4cfd3b0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/psutil/_psutil_linux.abi3.so differ diff --git a/venv/lib/python3.10/site-packages/psutil/_psutil_posix.abi3.so b/venv/lib/python3.10/site-packages/psutil/_psutil_posix.abi3.so new file mode 100644 index 0000000000000000000000000000000000000000..1f83194fa380abfa36fbe7dd3ca5ac60248fb4f3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/psutil/_psutil_posix.abi3.so differ diff --git a/venv/lib/python3.10/site-packages/psutil/_pswindows.py b/venv/lib/python3.10/site-packages/psutil/_pswindows.py new file mode 100644 index 0000000000000000000000000000000000000000..2d3a0c9fdb8c4aa27e80b342ffe19b04d2661149 --- /dev/null +++ b/venv/lib/python3.10/site-packages/psutil/_pswindows.py @@ -0,0 +1,1170 @@ +# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Windows platform implementation.""" + +import contextlib +import errno +import functools +import os +import signal +import sys +import time +from collections import namedtuple + +from . import _common +from ._common import ENCODING +from ._common import ENCODING_ERRS +from ._common import AccessDenied +from ._common import NoSuchProcess +from ._common import TimeoutExpired +from ._common import conn_tmap +from ._common import conn_to_ntuple +from ._common import debug +from ._common import isfile_strict +from ._common import memoize +from ._common import memoize_when_activated +from ._common import parse_environ_block +from ._common import usage_percent +from ._compat import PY3 +from ._compat import long +from ._compat import lru_cache +from ._compat import range +from ._compat import unicode +from ._psutil_windows import ABOVE_NORMAL_PRIORITY_CLASS +from ._psutil_windows import BELOW_NORMAL_PRIORITY_CLASS +from ._psutil_windows import HIGH_PRIORITY_CLASS +from ._psutil_windows import IDLE_PRIORITY_CLASS +from ._psutil_windows import NORMAL_PRIORITY_CLASS +from ._psutil_windows import REALTIME_PRIORITY_CLASS + + +try: + from . import _psutil_windows as cext +except ImportError as err: + if ( + str(err).lower().startswith("dll load failed") + and sys.getwindowsversion()[0] < 6 + ): + # We may get here if: + # 1) we are on an old Windows version + # 2) psutil was installed via pip + wheel + # See: https://github.com/giampaolo/psutil/issues/811 + msg = "this Windows version is too old (< Windows Vista); " + msg += "psutil 3.4.2 is the latest version which supports Windows " + msg += "2000, XP and 2003 server" + raise RuntimeError(msg) + else: + raise + +if PY3: + import enum +else: + enum = None + +# process priority constants, import from __init__.py: +# http://msdn.microsoft.com/en-us/library/ms686219(v=vs.85).aspx +# fmt: off +__extra__all__ = [ + "win_service_iter", "win_service_get", + # Process priority + "ABOVE_NORMAL_PRIORITY_CLASS", "BELOW_NORMAL_PRIORITY_CLASS", + "HIGH_PRIORITY_CLASS", "IDLE_PRIORITY_CLASS", "NORMAL_PRIORITY_CLASS", + "REALTIME_PRIORITY_CLASS", + # IO priority + "IOPRIO_VERYLOW", "IOPRIO_LOW", "IOPRIO_NORMAL", "IOPRIO_HIGH", + # others + "CONN_DELETE_TCB", "AF_LINK", +] +# fmt: on + + +# ===================================================================== +# --- globals +# ===================================================================== + +CONN_DELETE_TCB = "DELETE_TCB" +ERROR_PARTIAL_COPY = 299 +PYPY = '__pypy__' in sys.builtin_module_names + +if enum is None: + AF_LINK = -1 +else: + AddressFamily = enum.IntEnum('AddressFamily', {'AF_LINK': -1}) + AF_LINK = AddressFamily.AF_LINK + +TCP_STATUSES = { + cext.MIB_TCP_STATE_ESTAB: _common.CONN_ESTABLISHED, + cext.MIB_TCP_STATE_SYN_SENT: _common.CONN_SYN_SENT, + cext.MIB_TCP_STATE_SYN_RCVD: _common.CONN_SYN_RECV, + cext.MIB_TCP_STATE_FIN_WAIT1: _common.CONN_FIN_WAIT1, + cext.MIB_TCP_STATE_FIN_WAIT2: _common.CONN_FIN_WAIT2, + cext.MIB_TCP_STATE_TIME_WAIT: _common.CONN_TIME_WAIT, + cext.MIB_TCP_STATE_CLOSED: _common.CONN_CLOSE, + cext.MIB_TCP_STATE_CLOSE_WAIT: _common.CONN_CLOSE_WAIT, + cext.MIB_TCP_STATE_LAST_ACK: _common.CONN_LAST_ACK, + cext.MIB_TCP_STATE_LISTEN: _common.CONN_LISTEN, + cext.MIB_TCP_STATE_CLOSING: _common.CONN_CLOSING, + cext.MIB_TCP_STATE_DELETE_TCB: CONN_DELETE_TCB, + cext.PSUTIL_CONN_NONE: _common.CONN_NONE, +} + +if enum is not None: + + class Priority(enum.IntEnum): + ABOVE_NORMAL_PRIORITY_CLASS = ABOVE_NORMAL_PRIORITY_CLASS + BELOW_NORMAL_PRIORITY_CLASS = BELOW_NORMAL_PRIORITY_CLASS + HIGH_PRIORITY_CLASS = HIGH_PRIORITY_CLASS + IDLE_PRIORITY_CLASS = IDLE_PRIORITY_CLASS + NORMAL_PRIORITY_CLASS = NORMAL_PRIORITY_CLASS + REALTIME_PRIORITY_CLASS = REALTIME_PRIORITY_CLASS + + globals().update(Priority.__members__) + +if enum is None: + IOPRIO_VERYLOW = 0 + IOPRIO_LOW = 1 + IOPRIO_NORMAL = 2 + IOPRIO_HIGH = 3 +else: + + class IOPriority(enum.IntEnum): + IOPRIO_VERYLOW = 0 + IOPRIO_LOW = 1 + IOPRIO_NORMAL = 2 + IOPRIO_HIGH = 3 + + globals().update(IOPriority.__members__) + +pinfo_map = dict( + num_handles=0, + ctx_switches=1, + user_time=2, + kernel_time=3, + create_time=4, + num_threads=5, + io_rcount=6, + io_wcount=7, + io_rbytes=8, + io_wbytes=9, + io_count_others=10, + io_bytes_others=11, + num_page_faults=12, + peak_wset=13, + wset=14, + peak_paged_pool=15, + paged_pool=16, + peak_non_paged_pool=17, + non_paged_pool=18, + pagefile=19, + peak_pagefile=20, + mem_private=21, +) + + +# ===================================================================== +# --- named tuples +# ===================================================================== + + +# fmt: off +# psutil.cpu_times() +scputimes = namedtuple('scputimes', + ['user', 'system', 'idle', 'interrupt', 'dpc']) +# psutil.virtual_memory() +svmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free']) +# psutil.Process.memory_info() +pmem = namedtuple( + 'pmem', ['rss', 'vms', + 'num_page_faults', 'peak_wset', 'wset', 'peak_paged_pool', + 'paged_pool', 'peak_nonpaged_pool', 'nonpaged_pool', + 'pagefile', 'peak_pagefile', 'private']) +# psutil.Process.memory_full_info() +pfullmem = namedtuple('pfullmem', pmem._fields + ('uss', )) +# psutil.Process.memory_maps(grouped=True) +pmmap_grouped = namedtuple('pmmap_grouped', ['path', 'rss']) +# psutil.Process.memory_maps(grouped=False) +pmmap_ext = namedtuple( + 'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields)) +# psutil.Process.io_counters() +pio = namedtuple('pio', ['read_count', 'write_count', + 'read_bytes', 'write_bytes', + 'other_count', 'other_bytes']) +# fmt: on + + +# ===================================================================== +# --- utils +# ===================================================================== + + +@lru_cache(maxsize=512) +def convert_dos_path(s): + r"""Convert paths using native DOS format like: + "\Device\HarddiskVolume1\Windows\systemew\file.txt" + into: + "C:\Windows\systemew\file.txt". + """ + rawdrive = '\\'.join(s.split('\\')[:3]) + driveletter = cext.QueryDosDevice(rawdrive) + remainder = s[len(rawdrive) :] + return os.path.join(driveletter, remainder) + + +def py2_strencode(s): + """Encode a unicode string to a byte string by using the default fs + encoding + "replace" error handler. + """ + if PY3: + return s + else: + if isinstance(s, str): + return s + else: + return s.encode(ENCODING, ENCODING_ERRS) + + +@memoize +def getpagesize(): + return cext.getpagesize() + + +# ===================================================================== +# --- memory +# ===================================================================== + + +def virtual_memory(): + """System virtual memory as a namedtuple.""" + mem = cext.virtual_mem() + totphys, availphys, totsys, availsys = mem + # + total = totphys + avail = availphys + free = availphys + used = total - avail + percent = usage_percent((total - avail), total, round_=1) + return svmem(total, avail, percent, used, free) + + +def swap_memory(): + """Swap system memory as a (total, used, free, sin, sout) tuple.""" + mem = cext.virtual_mem() + + total_phys = mem[0] + total_system = mem[2] + + # system memory (commit total/limit) is the sum of physical and swap + # thus physical memory values need to be subtracted to get swap values + total = total_system - total_phys + # commit total is incremented immediately (decrementing free_system) + # while the corresponding free physical value is not decremented until + # pages are accessed, so we can't use free system memory for swap. + # instead, we calculate page file usage based on performance counter + if total > 0: + percentswap = cext.swap_percent() + used = int(0.01 * percentswap * total) + else: + percentswap = 0.0 + used = 0 + + free = total - used + percent = round(percentswap, 1) + return _common.sswap(total, used, free, percent, 0, 0) + + +# ===================================================================== +# --- disk +# ===================================================================== + + +disk_io_counters = cext.disk_io_counters + + +def disk_usage(path): + """Return disk usage associated with path.""" + if PY3 and isinstance(path, bytes): + # XXX: do we want to use "strict"? Probably yes, in order + # to fail immediately. After all we are accepting input here... + path = path.decode(ENCODING, errors="strict") + total, free = cext.disk_usage(path) + used = total - free + percent = usage_percent(used, total, round_=1) + return _common.sdiskusage(total, used, free, percent) + + +def disk_partitions(all): + """Return disk partitions.""" + rawlist = cext.disk_partitions(all) + return [_common.sdiskpart(*x) for x in rawlist] + + +# ===================================================================== +# --- CPU +# ===================================================================== + + +def cpu_times(): + """Return system CPU times as a named tuple.""" + user, system, idle = cext.cpu_times() + # Internally, GetSystemTimes() is used, and it doesn't return + # interrupt and dpc times. cext.per_cpu_times() does, so we + # rely on it to get those only. + percpu_summed = scputimes(*[sum(n) for n in zip(*cext.per_cpu_times())]) + return scputimes( + user, system, idle, percpu_summed.interrupt, percpu_summed.dpc + ) + + +def per_cpu_times(): + """Return system per-CPU times as a list of named tuples.""" + ret = [] + for user, system, idle, interrupt, dpc in cext.per_cpu_times(): + item = scputimes(user, system, idle, interrupt, dpc) + ret.append(item) + return ret + + +def cpu_count_logical(): + """Return the number of logical CPUs in the system.""" + return cext.cpu_count_logical() + + +def cpu_count_cores(): + """Return the number of CPU cores in the system.""" + return cext.cpu_count_cores() + + +def cpu_stats(): + """Return CPU statistics.""" + ctx_switches, interrupts, dpcs, syscalls = cext.cpu_stats() + soft_interrupts = 0 + return _common.scpustats( + ctx_switches, interrupts, soft_interrupts, syscalls + ) + + +def cpu_freq(): + """Return CPU frequency. + On Windows per-cpu frequency is not supported. + """ + curr, max_ = cext.cpu_freq() + min_ = 0.0 + return [_common.scpufreq(float(curr), min_, float(max_))] + + +_loadavg_inititialized = False + + +def getloadavg(): + """Return the number of processes in the system run queue averaged + over the last 1, 5, and 15 minutes respectively as a tuple. + """ + global _loadavg_inititialized + + if not _loadavg_inititialized: + cext.init_loadavg_counter() + _loadavg_inititialized = True + + # Drop to 2 decimal points which is what Linux does + raw_loads = cext.getloadavg() + return tuple([round(load, 2) for load in raw_loads]) + + +# ===================================================================== +# --- network +# ===================================================================== + + +def net_connections(kind, _pid=-1): + """Return socket connections. If pid == -1 return system-wide + connections (as opposed to connections opened by one process only). + """ + if kind not in conn_tmap: + raise ValueError( + "invalid %r kind argument; choose between %s" + % (kind, ', '.join([repr(x) for x in conn_tmap])) + ) + families, types = conn_tmap[kind] + rawlist = cext.net_connections(_pid, families, types) + ret = set() + for item in rawlist: + fd, fam, type, laddr, raddr, status, pid = item + nt = conn_to_ntuple( + fd, + fam, + type, + laddr, + raddr, + status, + TCP_STATUSES, + pid=pid if _pid == -1 else None, + ) + ret.add(nt) + return list(ret) + + +def net_if_stats(): + """Get NIC stats (isup, duplex, speed, mtu).""" + ret = {} + rawdict = cext.net_if_stats() + for name, items in rawdict.items(): + if not PY3: + assert isinstance(name, unicode), type(name) + name = py2_strencode(name) + isup, duplex, speed, mtu = items + if hasattr(_common, 'NicDuplex'): + duplex = _common.NicDuplex(duplex) + ret[name] = _common.snicstats(isup, duplex, speed, mtu, '') + return ret + + +def net_io_counters(): + """Return network I/O statistics for every network interface + installed on the system as a dict of raw tuples. + """ + ret = cext.net_io_counters() + return dict([(py2_strencode(k), v) for k, v in ret.items()]) + + +def net_if_addrs(): + """Return the addresses associated to each NIC.""" + ret = [] + for items in cext.net_if_addrs(): + items = list(items) + items[0] = py2_strencode(items[0]) + ret.append(items) + return ret + + +# ===================================================================== +# --- sensors +# ===================================================================== + + +def sensors_battery(): + """Return battery information.""" + # For constants meaning see: + # https://msdn.microsoft.com/en-us/library/windows/desktop/ + # aa373232(v=vs.85).aspx + acline_status, flags, percent, secsleft = cext.sensors_battery() + power_plugged = acline_status == 1 + no_battery = bool(flags & 128) + charging = bool(flags & 8) + + if no_battery: + return None + if power_plugged or charging: + secsleft = _common.POWER_TIME_UNLIMITED + elif secsleft == -1: + secsleft = _common.POWER_TIME_UNKNOWN + + return _common.sbattery(percent, secsleft, power_plugged) + + +# ===================================================================== +# --- other system functions +# ===================================================================== + + +_last_btime = 0 + + +def boot_time(): + """The system boot time expressed in seconds since the epoch.""" + # This dirty hack is to adjust the precision of the returned + # value which may have a 1 second fluctuation, see: + # https://github.com/giampaolo/psutil/issues/1007 + global _last_btime + ret = float(cext.boot_time()) + if abs(ret - _last_btime) <= 1: + return _last_btime + else: + _last_btime = ret + return ret + + +def users(): + """Return currently connected users as a list of namedtuples.""" + retlist = [] + rawlist = cext.users() + for item in rawlist: + user, hostname, tstamp = item + user = py2_strencode(user) + nt = _common.suser(user, None, hostname, tstamp, None) + retlist.append(nt) + return retlist + + +# ===================================================================== +# --- Windows services +# ===================================================================== + + +def win_service_iter(): + """Yields a list of WindowsService instances.""" + for name, display_name in cext.winservice_enumerate(): + yield WindowsService(py2_strencode(name), py2_strencode(display_name)) + + +def win_service_get(name): + """Open a Windows service and return it as a WindowsService instance.""" + service = WindowsService(name, None) + service._display_name = service._query_config()['display_name'] + return service + + +class WindowsService: + """Represents an installed Windows service.""" + + def __init__(self, name, display_name): + self._name = name + self._display_name = display_name + + def __str__(self): + details = "(name=%r, display_name=%r)" % ( + self._name, + self._display_name, + ) + return "%s%s" % (self.__class__.__name__, details) + + def __repr__(self): + return "<%s at %s>" % (self.__str__(), id(self)) + + def __eq__(self, other): + # Test for equality with another WindosService object based + # on name. + if not isinstance(other, WindowsService): + return NotImplemented + return self._name == other._name + + def __ne__(self, other): + return not self == other + + def _query_config(self): + with self._wrap_exceptions(): + display_name, binpath, username, start_type = ( + cext.winservice_query_config(self._name) + ) + # XXX - update _self.display_name? + return dict( + display_name=py2_strencode(display_name), + binpath=py2_strencode(binpath), + username=py2_strencode(username), + start_type=py2_strencode(start_type), + ) + + def _query_status(self): + with self._wrap_exceptions(): + status, pid = cext.winservice_query_status(self._name) + if pid == 0: + pid = None + return dict(status=status, pid=pid) + + @contextlib.contextmanager + def _wrap_exceptions(self): + """Ctx manager which translates bare OSError and WindowsError + exceptions into NoSuchProcess and AccessDenied. + """ + try: + yield + except OSError as err: + if is_permission_err(err): + msg = ( + "service %r is not querable (not enough privileges)" + % self._name + ) + raise AccessDenied(pid=None, name=self._name, msg=msg) + elif err.winerror in ( + cext.ERROR_INVALID_NAME, + cext.ERROR_SERVICE_DOES_NOT_EXIST, + ): + msg = "service %r does not exist" % self._name + raise NoSuchProcess(pid=None, name=self._name, msg=msg) + else: + raise + + # config query + + def name(self): + """The service name. This string is how a service is referenced + and can be passed to win_service_get() to get a new + WindowsService instance. + """ + return self._name + + def display_name(self): + """The service display name. The value is cached when this class + is instantiated. + """ + return self._display_name + + def binpath(self): + """The fully qualified path to the service binary/exe file as + a string, including command line arguments. + """ + return self._query_config()['binpath'] + + def username(self): + """The name of the user that owns this service.""" + return self._query_config()['username'] + + def start_type(self): + """A string which can either be "automatic", "manual" or + "disabled". + """ + return self._query_config()['start_type'] + + # status query + + def pid(self): + """The process PID, if any, else None. This can be passed + to Process class to control the service's process. + """ + return self._query_status()['pid'] + + def status(self): + """Service status as a string.""" + return self._query_status()['status'] + + def description(self): + """Service long description.""" + return py2_strencode(cext.winservice_query_descr(self.name())) + + # utils + + def as_dict(self): + """Utility method retrieving all the information above as a + dictionary. + """ + d = self._query_config() + d.update(self._query_status()) + d['name'] = self.name() + d['display_name'] = self.display_name() + d['description'] = self.description() + return d + + # actions + # XXX: the necessary C bindings for start() and stop() are + # implemented but for now I prefer not to expose them. + # I may change my mind in the future. Reasons: + # - they require Administrator privileges + # - can't implement a timeout for stop() (unless by using a thread, + # which sucks) + # - would require adding ServiceAlreadyStarted and + # ServiceAlreadyStopped exceptions, adding two new APIs. + # - we might also want to have modify(), which would basically mean + # rewriting win32serviceutil.ChangeServiceConfig, which involves a + # lot of stuff (and API constants which would pollute the API), see: + # http://pyxr.sourceforge.net/PyXR/c/python24/lib/site-packages/ + # win32/lib/win32serviceutil.py.html#0175 + # - psutil is typically about "read only" monitoring stuff; + # win_service_* APIs should only be used to retrieve a service and + # check whether it's running + + # def start(self, timeout=None): + # with self._wrap_exceptions(): + # cext.winservice_start(self.name()) + # if timeout: + # giveup_at = time.time() + timeout + # while True: + # if self.status() == "running": + # return + # else: + # if time.time() > giveup_at: + # raise TimeoutExpired(timeout) + # else: + # time.sleep(.1) + + # def stop(self): + # # Note: timeout is not implemented because it's just not + # # possible, see: + # # http://stackoverflow.com/questions/11973228/ + # with self._wrap_exceptions(): + # return cext.winservice_stop(self.name()) + + +# ===================================================================== +# --- processes +# ===================================================================== + + +pids = cext.pids +pid_exists = cext.pid_exists +ppid_map = cext.ppid_map # used internally by Process.children() + + +def is_permission_err(exc): + """Return True if this is a permission error.""" + assert isinstance(exc, OSError), exc + if exc.errno in (errno.EPERM, errno.EACCES): + return True + # On Python 2 OSError doesn't always have 'winerror'. Sometimes + # it does, in which case the original exception was WindowsError + # (which is a subclass of OSError). + if getattr(exc, "winerror", -1) in ( + cext.ERROR_ACCESS_DENIED, + cext.ERROR_PRIVILEGE_NOT_HELD, + ): + return True + return False + + +def convert_oserror(exc, pid=None, name=None): + """Convert OSError into NoSuchProcess or AccessDenied.""" + assert isinstance(exc, OSError), exc + if is_permission_err(exc): + return AccessDenied(pid=pid, name=name) + if exc.errno == errno.ESRCH: + return NoSuchProcess(pid=pid, name=name) + raise exc + + +def wrap_exceptions(fun): + """Decorator which converts OSError into NoSuchProcess or AccessDenied.""" + + @functools.wraps(fun) + def wrapper(self, *args, **kwargs): + try: + return fun(self, *args, **kwargs) + except OSError as err: + raise convert_oserror(err, pid=self.pid, name=self._name) + + return wrapper + + +def retry_error_partial_copy(fun): + """Workaround for https://github.com/giampaolo/psutil/issues/875. + See: https://stackoverflow.com/questions/4457745#4457745. + """ + + @functools.wraps(fun) + def wrapper(self, *args, **kwargs): + delay = 0.0001 + times = 33 + for _ in range(times): # retries for roughly 1 second + try: + return fun(self, *args, **kwargs) + except WindowsError as _: + err = _ + if err.winerror == ERROR_PARTIAL_COPY: + time.sleep(delay) + delay = min(delay * 2, 0.04) + continue + raise + msg = ( + "{} retried {} times, converted to AccessDenied as it's still" + "returning {}".format(fun, times, err) + ) + raise AccessDenied(pid=self.pid, name=self._name, msg=msg) + + return wrapper + + +class Process: + """Wrapper class around underlying C implementation.""" + + __slots__ = ["pid", "_name", "_ppid", "_cache"] + + def __init__(self, pid): + self.pid = pid + self._name = None + self._ppid = None + + # --- oneshot() stuff + + def oneshot_enter(self): + self._proc_info.cache_activate(self) + self.exe.cache_activate(self) + + def oneshot_exit(self): + self._proc_info.cache_deactivate(self) + self.exe.cache_deactivate(self) + + @memoize_when_activated + def _proc_info(self): + """Return multiple information about this process as a + raw tuple. + """ + ret = cext.proc_info(self.pid) + assert len(ret) == len(pinfo_map) + return ret + + def name(self): + """Return process name, which on Windows is always the final + part of the executable. + """ + # This is how PIDs 0 and 4 are always represented in taskmgr + # and process-hacker. + if self.pid == 0: + return "System Idle Process" + if self.pid == 4: + return "System" + return os.path.basename(self.exe()) + + @wrap_exceptions + @memoize_when_activated + def exe(self): + if PYPY: + try: + exe = cext.proc_exe(self.pid) + except WindowsError as err: + # 24 = ERROR_TOO_MANY_OPEN_FILES. Not sure why this happens + # (perhaps PyPy's JIT delaying garbage collection of files?). + if err.errno == 24: + debug("%r translated into AccessDenied" % err) + raise AccessDenied(self.pid, self._name) + raise + else: + exe = cext.proc_exe(self.pid) + if not PY3: + exe = py2_strencode(exe) + if exe.startswith('\\'): + return convert_dos_path(exe) + return exe # May be "Registry", "MemCompression", ... + + @wrap_exceptions + @retry_error_partial_copy + def cmdline(self): + if cext.WINVER >= cext.WINDOWS_8_1: + # PEB method detects cmdline changes but requires more + # privileges: https://github.com/giampaolo/psutil/pull/1398 + try: + ret = cext.proc_cmdline(self.pid, use_peb=True) + except OSError as err: + if is_permission_err(err): + ret = cext.proc_cmdline(self.pid, use_peb=False) + else: + raise + else: + ret = cext.proc_cmdline(self.pid, use_peb=True) + if PY3: + return ret + else: + return [py2_strencode(s) for s in ret] + + @wrap_exceptions + @retry_error_partial_copy + def environ(self): + ustr = cext.proc_environ(self.pid) + if ustr and not PY3: + assert isinstance(ustr, unicode), type(ustr) + return parse_environ_block(py2_strencode(ustr)) + + def ppid(self): + try: + return ppid_map()[self.pid] + except KeyError: + raise NoSuchProcess(self.pid, self._name) + + def _get_raw_meminfo(self): + try: + return cext.proc_memory_info(self.pid) + except OSError as err: + if is_permission_err(err): + # TODO: the C ext can probably be refactored in order + # to get this from cext.proc_info() + info = self._proc_info() + return ( + info[pinfo_map['num_page_faults']], + info[pinfo_map['peak_wset']], + info[pinfo_map['wset']], + info[pinfo_map['peak_paged_pool']], + info[pinfo_map['paged_pool']], + info[pinfo_map['peak_non_paged_pool']], + info[pinfo_map['non_paged_pool']], + info[pinfo_map['pagefile']], + info[pinfo_map['peak_pagefile']], + info[pinfo_map['mem_private']], + ) + raise + + @wrap_exceptions + def memory_info(self): + # on Windows RSS == WorkingSetSize and VSM == PagefileUsage. + # Underlying C function returns fields of PROCESS_MEMORY_COUNTERS + # struct. + t = self._get_raw_meminfo() + rss = t[2] # wset + vms = t[7] # pagefile + return pmem(*(rss, vms) + t) + + @wrap_exceptions + def memory_full_info(self): + basic_mem = self.memory_info() + uss = cext.proc_memory_uss(self.pid) + uss *= getpagesize() + return pfullmem(*basic_mem + (uss,)) + + def memory_maps(self): + try: + raw = cext.proc_memory_maps(self.pid) + except OSError as err: + # XXX - can't use wrap_exceptions decorator as we're + # returning a generator; probably needs refactoring. + raise convert_oserror(err, self.pid, self._name) + else: + for addr, perm, path, rss in raw: + path = convert_dos_path(path) + if not PY3: + path = py2_strencode(path) + addr = hex(addr) + yield (addr, perm, path, rss) + + @wrap_exceptions + def kill(self): + return cext.proc_kill(self.pid) + + @wrap_exceptions + def send_signal(self, sig): + if sig == signal.SIGTERM: + cext.proc_kill(self.pid) + # py >= 2.7 + elif sig in ( + getattr(signal, "CTRL_C_EVENT", object()), + getattr(signal, "CTRL_BREAK_EVENT", object()), + ): + os.kill(self.pid, sig) + else: + msg = ( + "only SIGTERM, CTRL_C_EVENT and CTRL_BREAK_EVENT signals " + "are supported on Windows" + ) + raise ValueError(msg) + + @wrap_exceptions + def wait(self, timeout=None): + if timeout is None: + cext_timeout = cext.INFINITE + else: + # WaitForSingleObject() expects time in milliseconds. + cext_timeout = int(timeout * 1000) + + timer = getattr(time, 'monotonic', time.time) + stop_at = timer() + timeout if timeout is not None else None + + try: + # Exit code is supposed to come from GetExitCodeProcess(). + # May also be None if OpenProcess() failed with + # ERROR_INVALID_PARAMETER, meaning PID is already gone. + exit_code = cext.proc_wait(self.pid, cext_timeout) + except cext.TimeoutExpired: + # WaitForSingleObject() returned WAIT_TIMEOUT. Just raise. + raise TimeoutExpired(timeout, self.pid, self._name) + except cext.TimeoutAbandoned: + # WaitForSingleObject() returned WAIT_ABANDONED, see: + # https://github.com/giampaolo/psutil/issues/1224 + # We'll just rely on the internal polling and return None + # when the PID disappears. Subprocess module does the same + # (return None): + # https://github.com/python/cpython/blob/ + # be50a7b627d0aa37e08fa8e2d5568891f19903ce/ + # Lib/subprocess.py#L1193-L1194 + exit_code = None + + # At this point WaitForSingleObject() returned WAIT_OBJECT_0, + # meaning the process is gone. Stupidly there are cases where + # its PID may still stick around so we do a further internal + # polling. + delay = 0.0001 + while True: + if not pid_exists(self.pid): + return exit_code + if stop_at and timer() >= stop_at: + raise TimeoutExpired(timeout, pid=self.pid, name=self._name) + time.sleep(delay) + delay = min(delay * 2, 0.04) # incremental delay + + @wrap_exceptions + def username(self): + if self.pid in (0, 4): + return 'NT AUTHORITY\\SYSTEM' + domain, user = cext.proc_username(self.pid) + return py2_strencode(domain) + '\\' + py2_strencode(user) + + @wrap_exceptions + def create_time(self): + # Note: proc_times() not put under oneshot() 'cause create_time() + # is already cached by the main Process class. + try: + user, system, created = cext.proc_times(self.pid) + return created + except OSError as err: + if is_permission_err(err): + return self._proc_info()[pinfo_map['create_time']] + raise + + @wrap_exceptions + def num_threads(self): + return self._proc_info()[pinfo_map['num_threads']] + + @wrap_exceptions + def threads(self): + rawlist = cext.proc_threads(self.pid) + retlist = [] + for thread_id, utime, stime in rawlist: + ntuple = _common.pthread(thread_id, utime, stime) + retlist.append(ntuple) + return retlist + + @wrap_exceptions + def cpu_times(self): + try: + user, system, created = cext.proc_times(self.pid) + except OSError as err: + if not is_permission_err(err): + raise + info = self._proc_info() + user = info[pinfo_map['user_time']] + system = info[pinfo_map['kernel_time']] + # Children user/system times are not retrievable (set to 0). + return _common.pcputimes(user, system, 0.0, 0.0) + + @wrap_exceptions + def suspend(self): + cext.proc_suspend_or_resume(self.pid, True) + + @wrap_exceptions + def resume(self): + cext.proc_suspend_or_resume(self.pid, False) + + @wrap_exceptions + @retry_error_partial_copy + def cwd(self): + if self.pid in (0, 4): + raise AccessDenied(self.pid, self._name) + # return a normalized pathname since the native C function appends + # "\\" at the and of the path + path = cext.proc_cwd(self.pid) + return py2_strencode(os.path.normpath(path)) + + @wrap_exceptions + def open_files(self): + if self.pid in (0, 4): + return [] + ret = set() + # Filenames come in in native format like: + # "\Device\HarddiskVolume1\Windows\systemew\file.txt" + # Convert the first part in the corresponding drive letter + # (e.g. "C:\") by using Windows's QueryDosDevice() + raw_file_names = cext.proc_open_files(self.pid) + for _file in raw_file_names: + _file = convert_dos_path(_file) + if isfile_strict(_file): + if not PY3: + _file = py2_strencode(_file) + ntuple = _common.popenfile(_file, -1) + ret.add(ntuple) + return list(ret) + + @wrap_exceptions + def connections(self, kind='inet'): + return net_connections(kind, _pid=self.pid) + + @wrap_exceptions + def nice_get(self): + value = cext.proc_priority_get(self.pid) + if enum is not None: + value = Priority(value) + return value + + @wrap_exceptions + def nice_set(self, value): + return cext.proc_priority_set(self.pid, value) + + @wrap_exceptions + def ionice_get(self): + ret = cext.proc_io_priority_get(self.pid) + if enum is not None: + ret = IOPriority(ret) + return ret + + @wrap_exceptions + def ionice_set(self, ioclass, value): + if value: + msg = "value argument not accepted on Windows" + raise TypeError(msg) + if ioclass not in ( + IOPRIO_VERYLOW, + IOPRIO_LOW, + IOPRIO_NORMAL, + IOPRIO_HIGH, + ): + raise ValueError("%s is not a valid priority" % ioclass) + cext.proc_io_priority_set(self.pid, ioclass) + + @wrap_exceptions + def io_counters(self): + try: + ret = cext.proc_io_counters(self.pid) + except OSError as err: + if not is_permission_err(err): + raise + info = self._proc_info() + ret = ( + info[pinfo_map['io_rcount']], + info[pinfo_map['io_wcount']], + info[pinfo_map['io_rbytes']], + info[pinfo_map['io_wbytes']], + info[pinfo_map['io_count_others']], + info[pinfo_map['io_bytes_others']], + ) + return pio(*ret) + + @wrap_exceptions + def status(self): + suspended = cext.proc_is_suspended(self.pid) + if suspended: + return _common.STATUS_STOPPED + else: + return _common.STATUS_RUNNING + + @wrap_exceptions + def cpu_affinity_get(self): + def from_bitmask(x): + return [i for i in range(64) if (1 << i) & x] + + bitmask = cext.proc_cpu_affinity_get(self.pid) + return from_bitmask(bitmask) + + @wrap_exceptions + def cpu_affinity_set(self, value): + def to_bitmask(ls): + if not ls: + raise ValueError("invalid argument %r" % ls) + out = 0 + for b in ls: + out |= 2**b + return out + + # SetProcessAffinityMask() states that ERROR_INVALID_PARAMETER + # is returned for an invalid CPU but this seems not to be true, + # therefore we check CPUs validy beforehand. + allcpus = list(range(len(per_cpu_times()))) + for cpu in value: + if cpu not in allcpus: + if not isinstance(cpu, (int, long)): + raise TypeError( + "invalid CPU %r; an integer is required" % cpu + ) + else: + raise ValueError("invalid CPU %r" % cpu) + + bitmask = to_bitmask(value) + cext.proc_cpu_affinity_set(self.pid, bitmask) + + @wrap_exceptions + def num_handles(self): + try: + return cext.proc_num_handles(self.pid) + except OSError as err: + if is_permission_err(err): + return self._proc_info()[pinfo_map['num_handles']] + raise + + @wrap_exceptions + def num_ctx_switches(self): + ctx_switches = self._proc_info()[pinfo_map['ctx_switches']] + # only voluntary ctx switches are supported + return _common.pctxsw(ctx_switches, 0) diff --git a/venv/lib/python3.10/site-packages/psutil/tests/test_aix.py b/venv/lib/python3.10/site-packages/psutil/tests/test_aix.py new file mode 100644 index 0000000000000000000000000000000000000000..e7e0c8aa516e0dd4bea91d981a6bd0c25e53b6b6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/psutil/tests/test_aix.py @@ -0,0 +1,137 @@ +#!/usr/bin/env python3 + +# Copyright (c) 2009, Giampaolo Rodola' +# Copyright (c) 2017, Arnon Yaari +# All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""AIX specific tests.""" + +import re +import unittest + +import psutil +from psutil import AIX +from psutil.tests import PsutilTestCase +from psutil.tests import sh + + +@unittest.skipIf(not AIX, "AIX only") +class AIXSpecificTestCase(PsutilTestCase): + def test_virtual_memory(self): + out = sh('/usr/bin/svmon -O unit=KB') + re_pattern = r"memory\s*" + for field in ("size inuse free pin virtual available mmode").split(): + re_pattern += r"(?P<%s>\S+)\s+" % (field,) + matchobj = re.search(re_pattern, out) + + self.assertIsNotNone( + matchobj, "svmon command returned unexpected output" + ) + + KB = 1024 + total = int(matchobj.group("size")) * KB + available = int(matchobj.group("available")) * KB + used = int(matchobj.group("inuse")) * KB + free = int(matchobj.group("free")) * KB + + psutil_result = psutil.virtual_memory() + + # TOLERANCE_SYS_MEM from psutil.tests is not enough. For some reason + # we're seeing differences of ~1.2 MB. 2 MB is still a good tolerance + # when compared to GBs. + TOLERANCE_SYS_MEM = 2 * KB * KB # 2 MB + self.assertEqual(psutil_result.total, total) + self.assertAlmostEqual( + psutil_result.used, used, delta=TOLERANCE_SYS_MEM + ) + self.assertAlmostEqual( + psutil_result.available, available, delta=TOLERANCE_SYS_MEM + ) + self.assertAlmostEqual( + psutil_result.free, free, delta=TOLERANCE_SYS_MEM + ) + + def test_swap_memory(self): + out = sh('/usr/sbin/lsps -a') + # From the man page, "The size is given in megabytes" so we assume + # we'll always have 'MB' in the result + # TODO maybe try to use "swap -l" to check "used" too, but its units + # are not guaranteed to be "MB" so parsing may not be consistent + matchobj = re.search( + r"(?P\S+)\s+" + r"(?P\S+)\s+" + r"(?P\S+)\s+" + r"(?P\d+)MB", + out, + ) + + self.assertIsNotNone( + matchobj, "lsps command returned unexpected output" + ) + + total_mb = int(matchobj.group("size")) + MB = 1024**2 + psutil_result = psutil.swap_memory() + # we divide our result by MB instead of multiplying the lsps value by + # MB because lsps may round down, so we round down too + self.assertEqual(int(psutil_result.total / MB), total_mb) + + def test_cpu_stats(self): + out = sh('/usr/bin/mpstat -a') + + re_pattern = r"ALL\s*" + for field in ( + "min maj mpcs mpcr dev soft dec ph cs ics bound rq " + "push S3pull S3grd S0rd S1rd S2rd S3rd S4rd S5rd " + "sysc" + ).split(): + re_pattern += r"(?P<%s>\S+)\s+" % (field,) + matchobj = re.search(re_pattern, out) + + self.assertIsNotNone( + matchobj, "mpstat command returned unexpected output" + ) + + # numbers are usually in the millions so 1000 is ok for tolerance + CPU_STATS_TOLERANCE = 1000 + psutil_result = psutil.cpu_stats() + self.assertAlmostEqual( + psutil_result.ctx_switches, + int(matchobj.group("cs")), + delta=CPU_STATS_TOLERANCE, + ) + self.assertAlmostEqual( + psutil_result.syscalls, + int(matchobj.group("sysc")), + delta=CPU_STATS_TOLERANCE, + ) + self.assertAlmostEqual( + psutil_result.interrupts, + int(matchobj.group("dev")), + delta=CPU_STATS_TOLERANCE, + ) + self.assertAlmostEqual( + psutil_result.soft_interrupts, + int(matchobj.group("soft")), + delta=CPU_STATS_TOLERANCE, + ) + + def test_cpu_count_logical(self): + out = sh('/usr/bin/mpstat -a') + mpstat_lcpu = int(re.search(r"lcpu=(\d+)", out).group(1)) + psutil_lcpu = psutil.cpu_count(logical=True) + self.assertEqual(mpstat_lcpu, psutil_lcpu) + + def test_net_if_addrs_names(self): + out = sh('/etc/ifconfig -l') + ifconfig_names = set(out.split()) + psutil_names = set(psutil.net_if_addrs().keys()) + self.assertSetEqual(ifconfig_names, psutil_names) + + +if __name__ == '__main__': + from psutil.tests.runner import run_from_name + + run_from_name(__file__) diff --git a/venv/lib/python3.10/site-packages/psutil/tests/test_bsd.py b/venv/lib/python3.10/site-packages/psutil/tests/test_bsd.py new file mode 100644 index 0000000000000000000000000000000000000000..7b502bcb8020884bb3a95ded8340ee20c920362e --- /dev/null +++ b/venv/lib/python3.10/site-packages/psutil/tests/test_bsd.py @@ -0,0 +1,632 @@ +#!/usr/bin/env python3 + +# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# TODO: (FreeBSD) add test for comparing connections with 'sockstat' cmd. + + +"""Tests specific to all BSD platforms.""" + + +import datetime +import os +import re +import time +import unittest + +import psutil +from psutil import BSD +from psutil import FREEBSD +from psutil import NETBSD +from psutil import OPENBSD +from psutil.tests import HAS_BATTERY +from psutil.tests import TOLERANCE_SYS_MEM +from psutil.tests import PsutilTestCase +from psutil.tests import retry_on_failure +from psutil.tests import sh +from psutil.tests import spawn_testproc +from psutil.tests import terminate +from psutil.tests import which + + +if BSD: + from psutil._psutil_posix import getpagesize + + PAGESIZE = getpagesize() + # muse requires root privileges + MUSE_AVAILABLE = os.getuid() == 0 and which('muse') +else: + PAGESIZE = None + MUSE_AVAILABLE = False + + +def sysctl(cmdline): + """Expects a sysctl command with an argument and parse the result + returning only the value of interest. + """ + result = sh("sysctl " + cmdline) + if FREEBSD: + result = result[result.find(": ") + 2 :] + elif OPENBSD or NETBSD: + result = result[result.find("=") + 1 :] + try: + return int(result) + except ValueError: + return result + + +def muse(field): + """Thin wrapper around 'muse' cmdline utility.""" + out = sh('muse') + for line in out.split('\n'): + if line.startswith(field): + break + else: + raise ValueError("line not found") + return int(line.split()[1]) + + +# ===================================================================== +# --- All BSD* +# ===================================================================== + + +@unittest.skipIf(not BSD, "BSD only") +class BSDTestCase(PsutilTestCase): + """Generic tests common to all BSD variants.""" + + @classmethod + def setUpClass(cls): + cls.pid = spawn_testproc().pid + + @classmethod + def tearDownClass(cls): + terminate(cls.pid) + + @unittest.skipIf(NETBSD, "-o lstart doesn't work on NETBSD") + def test_process_create_time(self): + output = sh("ps -o lstart -p %s" % self.pid) + start_ps = output.replace('STARTED', '').strip() + start_psutil = psutil.Process(self.pid).create_time() + start_psutil = time.strftime( + "%a %b %e %H:%M:%S %Y", time.localtime(start_psutil) + ) + self.assertEqual(start_ps, start_psutil) + + def test_disks(self): + # test psutil.disk_usage() and psutil.disk_partitions() + # against "df -a" + def df(path): + out = sh('df -k "%s"' % path).strip() + lines = out.split('\n') + lines.pop(0) + line = lines.pop(0) + dev, total, used, free = line.split()[:4] + if dev == 'none': + dev = '' + total = int(total) * 1024 + used = int(used) * 1024 + free = int(free) * 1024 + return dev, total, used, free + + for part in psutil.disk_partitions(all=False): + usage = psutil.disk_usage(part.mountpoint) + dev, total, used, free = df(part.mountpoint) + self.assertEqual(part.device, dev) + self.assertEqual(usage.total, total) + # 10 MB tolerance + if abs(usage.free - free) > 10 * 1024 * 1024: + raise self.fail("psutil=%s, df=%s" % (usage.free, free)) + if abs(usage.used - used) > 10 * 1024 * 1024: + raise self.fail("psutil=%s, df=%s" % (usage.used, used)) + + @unittest.skipIf(not which('sysctl'), "sysctl cmd not available") + def test_cpu_count_logical(self): + syst = sysctl("hw.ncpu") + self.assertEqual(psutil.cpu_count(logical=True), syst) + + @unittest.skipIf(not which('sysctl'), "sysctl cmd not available") + @unittest.skipIf(NETBSD, "skipped on NETBSD") # we check /proc/meminfo + def test_virtual_memory_total(self): + num = sysctl('hw.physmem') + self.assertEqual(num, psutil.virtual_memory().total) + + @unittest.skipIf(not which('ifconfig'), "ifconfig cmd not available") + def test_net_if_stats(self): + for name, stats in psutil.net_if_stats().items(): + try: + out = sh("ifconfig %s" % name) + except RuntimeError: + pass + else: + self.assertEqual(stats.isup, 'RUNNING' in out, msg=out) + if "mtu" in out: + self.assertEqual( + stats.mtu, int(re.findall(r'mtu (\d+)', out)[0]) + ) + + +# ===================================================================== +# --- FreeBSD +# ===================================================================== + + +@unittest.skipIf(not FREEBSD, "FREEBSD only") +class FreeBSDPsutilTestCase(PsutilTestCase): + @classmethod + def setUpClass(cls): + cls.pid = spawn_testproc().pid + + @classmethod + def tearDownClass(cls): + terminate(cls.pid) + + @retry_on_failure() + def test_memory_maps(self): + out = sh('procstat -v %s' % self.pid) + maps = psutil.Process(self.pid).memory_maps(grouped=False) + lines = out.split('\n')[1:] + while lines: + line = lines.pop() + fields = line.split() + _, start, stop, perms, res = fields[:5] + map = maps.pop() + self.assertEqual("%s-%s" % (start, stop), map.addr) + self.assertEqual(int(res), map.rss) + if not map.path.startswith('['): + self.assertEqual(fields[10], map.path) + + def test_exe(self): + out = sh('procstat -b %s' % self.pid) + self.assertEqual( + psutil.Process(self.pid).exe(), out.split('\n')[1].split()[-1] + ) + + def test_cmdline(self): + out = sh('procstat -c %s' % self.pid) + self.assertEqual( + ' '.join(psutil.Process(self.pid).cmdline()), + ' '.join(out.split('\n')[1].split()[2:]), + ) + + def test_uids_gids(self): + out = sh('procstat -s %s' % self.pid) + euid, ruid, suid, egid, rgid, sgid = out.split('\n')[1].split()[2:8] + p = psutil.Process(self.pid) + uids = p.uids() + gids = p.gids() + self.assertEqual(uids.real, int(ruid)) + self.assertEqual(uids.effective, int(euid)) + self.assertEqual(uids.saved, int(suid)) + self.assertEqual(gids.real, int(rgid)) + self.assertEqual(gids.effective, int(egid)) + self.assertEqual(gids.saved, int(sgid)) + + @retry_on_failure() + def test_ctx_switches(self): + tested = [] + out = sh('procstat -r %s' % self.pid) + p = psutil.Process(self.pid) + for line in out.split('\n'): + line = line.lower().strip() + if ' voluntary context' in line: + pstat_value = int(line.split()[-1]) + psutil_value = p.num_ctx_switches().voluntary + self.assertEqual(pstat_value, psutil_value) + tested.append(None) + elif ' involuntary context' in line: + pstat_value = int(line.split()[-1]) + psutil_value = p.num_ctx_switches().involuntary + self.assertEqual(pstat_value, psutil_value) + tested.append(None) + if len(tested) != 2: + raise RuntimeError("couldn't find lines match in procstat out") + + @retry_on_failure() + def test_cpu_times(self): + tested = [] + out = sh('procstat -r %s' % self.pid) + p = psutil.Process(self.pid) + for line in out.split('\n'): + line = line.lower().strip() + if 'user time' in line: + pstat_value = float('0.' + line.split()[-1].split('.')[-1]) + psutil_value = p.cpu_times().user + self.assertEqual(pstat_value, psutil_value) + tested.append(None) + elif 'system time' in line: + pstat_value = float('0.' + line.split()[-1].split('.')[-1]) + psutil_value = p.cpu_times().system + self.assertEqual(pstat_value, psutil_value) + tested.append(None) + if len(tested) != 2: + raise RuntimeError("couldn't find lines match in procstat out") + + +@unittest.skipIf(not FREEBSD, "FREEBSD only") +class FreeBSDSystemTestCase(PsutilTestCase): + @staticmethod + def parse_swapinfo(): + # the last line is always the total + output = sh("swapinfo -k").splitlines()[-1] + parts = re.split(r'\s+', output) + + if not parts: + raise ValueError("Can't parse swapinfo: %s" % output) + + # the size is in 1k units, so multiply by 1024 + total, used, free = (int(p) * 1024 for p in parts[1:4]) + return total, used, free + + def test_cpu_frequency_against_sysctl(self): + # Currently only cpu 0 is frequency is supported in FreeBSD + # All other cores use the same frequency. + sensor = "dev.cpu.0.freq" + try: + sysctl_result = int(sysctl(sensor)) + except RuntimeError: + self.skipTest("frequencies not supported by kernel") + self.assertEqual(psutil.cpu_freq().current, sysctl_result) + + sensor = "dev.cpu.0.freq_levels" + sysctl_result = sysctl(sensor) + # sysctl returns a string of the format: + # / /... + # Ordered highest available to lowest available. + max_freq = int(sysctl_result.split()[0].split("/")[0]) + min_freq = int(sysctl_result.split()[-1].split("/")[0]) + self.assertEqual(psutil.cpu_freq().max, max_freq) + self.assertEqual(psutil.cpu_freq().min, min_freq) + + # --- virtual_memory(); tests against sysctl + + @retry_on_failure() + def test_vmem_active(self): + syst = sysctl("vm.stats.vm.v_active_count") * PAGESIZE + self.assertAlmostEqual( + psutil.virtual_memory().active, syst, delta=TOLERANCE_SYS_MEM + ) + + @retry_on_failure() + def test_vmem_inactive(self): + syst = sysctl("vm.stats.vm.v_inactive_count") * PAGESIZE + self.assertAlmostEqual( + psutil.virtual_memory().inactive, syst, delta=TOLERANCE_SYS_MEM + ) + + @retry_on_failure() + def test_vmem_wired(self): + syst = sysctl("vm.stats.vm.v_wire_count") * PAGESIZE + self.assertAlmostEqual( + psutil.virtual_memory().wired, syst, delta=TOLERANCE_SYS_MEM + ) + + @retry_on_failure() + def test_vmem_cached(self): + syst = sysctl("vm.stats.vm.v_cache_count") * PAGESIZE + self.assertAlmostEqual( + psutil.virtual_memory().cached, syst, delta=TOLERANCE_SYS_MEM + ) + + @retry_on_failure() + def test_vmem_free(self): + syst = sysctl("vm.stats.vm.v_free_count") * PAGESIZE + self.assertAlmostEqual( + psutil.virtual_memory().free, syst, delta=TOLERANCE_SYS_MEM + ) + + @retry_on_failure() + def test_vmem_buffers(self): + syst = sysctl("vfs.bufspace") + self.assertAlmostEqual( + psutil.virtual_memory().buffers, syst, delta=TOLERANCE_SYS_MEM + ) + + # --- virtual_memory(); tests against muse + + @unittest.skipIf(not MUSE_AVAILABLE, "muse not installed") + def test_muse_vmem_total(self): + num = muse('Total') + self.assertEqual(psutil.virtual_memory().total, num) + + @unittest.skipIf(not MUSE_AVAILABLE, "muse not installed") + @retry_on_failure() + def test_muse_vmem_active(self): + num = muse('Active') + self.assertAlmostEqual( + psutil.virtual_memory().active, num, delta=TOLERANCE_SYS_MEM + ) + + @unittest.skipIf(not MUSE_AVAILABLE, "muse not installed") + @retry_on_failure() + def test_muse_vmem_inactive(self): + num = muse('Inactive') + self.assertAlmostEqual( + psutil.virtual_memory().inactive, num, delta=TOLERANCE_SYS_MEM + ) + + @unittest.skipIf(not MUSE_AVAILABLE, "muse not installed") + @retry_on_failure() + def test_muse_vmem_wired(self): + num = muse('Wired') + self.assertAlmostEqual( + psutil.virtual_memory().wired, num, delta=TOLERANCE_SYS_MEM + ) + + @unittest.skipIf(not MUSE_AVAILABLE, "muse not installed") + @retry_on_failure() + def test_muse_vmem_cached(self): + num = muse('Cache') + self.assertAlmostEqual( + psutil.virtual_memory().cached, num, delta=TOLERANCE_SYS_MEM + ) + + @unittest.skipIf(not MUSE_AVAILABLE, "muse not installed") + @retry_on_failure() + def test_muse_vmem_free(self): + num = muse('Free') + self.assertAlmostEqual( + psutil.virtual_memory().free, num, delta=TOLERANCE_SYS_MEM + ) + + @unittest.skipIf(not MUSE_AVAILABLE, "muse not installed") + @retry_on_failure() + def test_muse_vmem_buffers(self): + num = muse('Buffer') + self.assertAlmostEqual( + psutil.virtual_memory().buffers, num, delta=TOLERANCE_SYS_MEM + ) + + def test_cpu_stats_ctx_switches(self): + self.assertAlmostEqual( + psutil.cpu_stats().ctx_switches, + sysctl('vm.stats.sys.v_swtch'), + delta=1000, + ) + + def test_cpu_stats_interrupts(self): + self.assertAlmostEqual( + psutil.cpu_stats().interrupts, + sysctl('vm.stats.sys.v_intr'), + delta=1000, + ) + + def test_cpu_stats_soft_interrupts(self): + self.assertAlmostEqual( + psutil.cpu_stats().soft_interrupts, + sysctl('vm.stats.sys.v_soft'), + delta=1000, + ) + + @retry_on_failure() + def test_cpu_stats_syscalls(self): + # pretty high tolerance but it looks like it's OK. + self.assertAlmostEqual( + psutil.cpu_stats().syscalls, + sysctl('vm.stats.sys.v_syscall'), + delta=200000, + ) + + # def test_cpu_stats_traps(self): + # self.assertAlmostEqual(psutil.cpu_stats().traps, + # sysctl('vm.stats.sys.v_trap'), delta=1000) + + # --- swap memory + + def test_swapmem_free(self): + total, used, free = self.parse_swapinfo() + self.assertAlmostEqual( + psutil.swap_memory().free, free, delta=TOLERANCE_SYS_MEM + ) + + def test_swapmem_used(self): + total, used, free = self.parse_swapinfo() + self.assertAlmostEqual( + psutil.swap_memory().used, used, delta=TOLERANCE_SYS_MEM + ) + + def test_swapmem_total(self): + total, used, free = self.parse_swapinfo() + self.assertAlmostEqual( + psutil.swap_memory().total, total, delta=TOLERANCE_SYS_MEM + ) + + # --- others + + def test_boot_time(self): + s = sysctl('sysctl kern.boottime') + s = s[s.find(" sec = ") + 7 :] + s = s[: s.find(',')] + btime = int(s) + self.assertEqual(btime, psutil.boot_time()) + + # --- sensors_battery + + @unittest.skipIf(not HAS_BATTERY, "no battery") + def test_sensors_battery(self): + def secs2hours(secs): + m, s = divmod(secs, 60) + h, m = divmod(m, 60) + return "%d:%02d" % (h, m) + + out = sh("acpiconf -i 0") + fields = dict( + [(x.split('\t')[0], x.split('\t')[-1]) for x in out.split("\n")] + ) + metrics = psutil.sensors_battery() + percent = int(fields['Remaining capacity:'].replace('%', '')) + remaining_time = fields['Remaining time:'] + self.assertEqual(metrics.percent, percent) + if remaining_time == 'unknown': + self.assertEqual(metrics.secsleft, psutil.POWER_TIME_UNLIMITED) + else: + self.assertEqual(secs2hours(metrics.secsleft), remaining_time) + + @unittest.skipIf(not HAS_BATTERY, "no battery") + def test_sensors_battery_against_sysctl(self): + self.assertEqual( + psutil.sensors_battery().percent, sysctl("hw.acpi.battery.life") + ) + self.assertEqual( + psutil.sensors_battery().power_plugged, + sysctl("hw.acpi.acline") == 1, + ) + secsleft = psutil.sensors_battery().secsleft + if secsleft < 0: + self.assertEqual(sysctl("hw.acpi.battery.time"), -1) + else: + self.assertEqual(secsleft, sysctl("hw.acpi.battery.time") * 60) + + @unittest.skipIf(HAS_BATTERY, "has battery") + def test_sensors_battery_no_battery(self): + # If no battery is present one of these calls is supposed + # to fail, see: + # https://github.com/giampaolo/psutil/issues/1074 + with self.assertRaises(RuntimeError): + sysctl("hw.acpi.battery.life") + sysctl("hw.acpi.battery.time") + sysctl("hw.acpi.acline") + self.assertIsNone(psutil.sensors_battery()) + + # --- sensors_temperatures + + def test_sensors_temperatures_against_sysctl(self): + num_cpus = psutil.cpu_count(True) + for cpu in range(num_cpus): + sensor = "dev.cpu.%s.temperature" % cpu + # sysctl returns a string in the format 46.0C + try: + sysctl_result = int(float(sysctl(sensor)[:-1])) + except RuntimeError: + self.skipTest("temperatures not supported by kernel") + self.assertAlmostEqual( + psutil.sensors_temperatures()["coretemp"][cpu].current, + sysctl_result, + delta=10, + ) + + sensor = "dev.cpu.%s.coretemp.tjmax" % cpu + sysctl_result = int(float(sysctl(sensor)[:-1])) + self.assertEqual( + psutil.sensors_temperatures()["coretemp"][cpu].high, + sysctl_result, + ) + + +# ===================================================================== +# --- OpenBSD +# ===================================================================== + + +@unittest.skipIf(not OPENBSD, "OPENBSD only") +class OpenBSDTestCase(PsutilTestCase): + def test_boot_time(self): + s = sysctl('kern.boottime') + sys_bt = datetime.datetime.strptime(s, "%a %b %d %H:%M:%S %Y") + psutil_bt = datetime.datetime.fromtimestamp(psutil.boot_time()) + self.assertEqual(sys_bt, psutil_bt) + + +# ===================================================================== +# --- NetBSD +# ===================================================================== + + +@unittest.skipIf(not NETBSD, "NETBSD only") +class NetBSDTestCase(PsutilTestCase): + @staticmethod + def parse_meminfo(look_for): + with open('/proc/meminfo') as f: + for line in f: + if line.startswith(look_for): + return int(line.split()[1]) * 1024 + raise ValueError("can't find %s" % look_for) + + # --- virtual mem + + def test_vmem_total(self): + self.assertEqual( + psutil.virtual_memory().total, self.parse_meminfo("MemTotal:") + ) + + def test_vmem_free(self): + self.assertAlmostEqual( + psutil.virtual_memory().free, + self.parse_meminfo("MemFree:"), + delta=TOLERANCE_SYS_MEM, + ) + + def test_vmem_buffers(self): + self.assertAlmostEqual( + psutil.virtual_memory().buffers, + self.parse_meminfo("Buffers:"), + delta=TOLERANCE_SYS_MEM, + ) + + def test_vmem_shared(self): + self.assertAlmostEqual( + psutil.virtual_memory().shared, + self.parse_meminfo("MemShared:"), + delta=TOLERANCE_SYS_MEM, + ) + + def test_vmem_cached(self): + self.assertAlmostEqual( + psutil.virtual_memory().cached, + self.parse_meminfo("Cached:"), + delta=TOLERANCE_SYS_MEM, + ) + + # --- swap mem + + def test_swapmem_total(self): + self.assertAlmostEqual( + psutil.swap_memory().total, + self.parse_meminfo("SwapTotal:"), + delta=TOLERANCE_SYS_MEM, + ) + + def test_swapmem_free(self): + self.assertAlmostEqual( + psutil.swap_memory().free, + self.parse_meminfo("SwapFree:"), + delta=TOLERANCE_SYS_MEM, + ) + + def test_swapmem_used(self): + smem = psutil.swap_memory() + self.assertEqual(smem.used, smem.total - smem.free) + + # --- others + + def test_cpu_stats_interrupts(self): + with open('/proc/stat', 'rb') as f: + for line in f: + if line.startswith(b'intr'): + interrupts = int(line.split()[1]) + break + else: + raise ValueError("couldn't find line") + self.assertAlmostEqual( + psutil.cpu_stats().interrupts, interrupts, delta=1000 + ) + + def test_cpu_stats_ctx_switches(self): + with open('/proc/stat', 'rb') as f: + for line in f: + if line.startswith(b'ctxt'): + ctx_switches = int(line.split()[1]) + break + else: + raise ValueError("couldn't find line") + self.assertAlmostEqual( + psutil.cpu_stats().ctx_switches, ctx_switches, delta=1000 + ) + + +if __name__ == '__main__': + from psutil.tests.runner import run_from_name + + run_from_name(__file__) diff --git a/venv/lib/python3.10/site-packages/psutil/tests/test_posix.py b/venv/lib/python3.10/site-packages/psutil/tests/test_posix.py new file mode 100644 index 0000000000000000000000000000000000000000..53852cef8e467b22f96b313477e52e14e94d0a2e --- /dev/null +++ b/venv/lib/python3.10/site-packages/psutil/tests/test_posix.py @@ -0,0 +1,493 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""POSIX specific tests.""" + +import datetime +import errno +import os +import re +import subprocess +import time +import unittest + +import psutil +from psutil import AIX +from psutil import BSD +from psutil import LINUX +from psutil import MACOS +from psutil import OPENBSD +from psutil import POSIX +from psutil import SUNOS +from psutil.tests import HAS_NET_IO_COUNTERS +from psutil.tests import PYTHON_EXE +from psutil.tests import PsutilTestCase +from psutil.tests import mock +from psutil.tests import retry_on_failure +from psutil.tests import sh +from psutil.tests import skip_on_access_denied +from psutil.tests import spawn_testproc +from psutil.tests import terminate +from psutil.tests import which + + +if POSIX: + import mmap + import resource + + from psutil._psutil_posix import getpagesize + + +def ps(fmt, pid=None): + """Wrapper for calling the ps command with a little bit of cross-platform + support for a narrow range of features. + """ + + cmd = ['ps'] + + if LINUX: + cmd.append('--no-headers') + + if pid is not None: + cmd.extend(['-p', str(pid)]) + else: + if SUNOS or AIX: + cmd.append('-A') + else: + cmd.append('ax') + + if SUNOS: + fmt = fmt.replace("start", "stime") + + cmd.extend(['-o', fmt]) + + output = sh(cmd) + + output = output.splitlines() if LINUX else output.splitlines()[1:] + + all_output = [] + for line in output: + line = line.strip() + + try: + line = int(line) + except ValueError: + pass + + all_output.append(line) + + if pid is None: + return all_output + else: + return all_output[0] + + +# ps "-o" field names differ wildly between platforms. +# "comm" means "only executable name" but is not available on BSD platforms. +# "args" means "command with all its arguments", and is also not available +# on BSD platforms. +# "command" is like "args" on most platforms, but like "comm" on AIX, +# and not available on SUNOS. +# so for the executable name we can use "comm" on Solaris and split "command" +# on other platforms. +# to get the cmdline (with args) we have to use "args" on AIX and +# Solaris, and can use "command" on all others. + + +def ps_name(pid): + field = "command" + if SUNOS: + field = "comm" + return ps(field, pid).split()[0] + + +def ps_args(pid): + field = "command" + if AIX or SUNOS: + field = "args" + out = ps(field, pid) + # observed on BSD + Github CI: '/usr/local/bin/python3 -E -O (python3.9)' + out = re.sub(r"\(python.*?\)$", "", out) + return out.strip() + + +def ps_rss(pid): + field = "rss" + if AIX: + field = "rssize" + return ps(field, pid) + + +def ps_vsz(pid): + field = "vsz" + if AIX: + field = "vsize" + return ps(field, pid) + + +@unittest.skipIf(not POSIX, "POSIX only") +class TestProcess(PsutilTestCase): + """Compare psutil results against 'ps' command line utility (mainly).""" + + @classmethod + def setUpClass(cls): + cls.pid = spawn_testproc( + [PYTHON_EXE, "-E", "-O"], stdin=subprocess.PIPE + ).pid + + @classmethod + def tearDownClass(cls): + terminate(cls.pid) + + def test_ppid(self): + ppid_ps = ps('ppid', self.pid) + ppid_psutil = psutil.Process(self.pid).ppid() + self.assertEqual(ppid_ps, ppid_psutil) + + def test_uid(self): + uid_ps = ps('uid', self.pid) + uid_psutil = psutil.Process(self.pid).uids().real + self.assertEqual(uid_ps, uid_psutil) + + def test_gid(self): + gid_ps = ps('rgid', self.pid) + gid_psutil = psutil.Process(self.pid).gids().real + self.assertEqual(gid_ps, gid_psutil) + + def test_username(self): + username_ps = ps('user', self.pid) + username_psutil = psutil.Process(self.pid).username() + self.assertEqual(username_ps, username_psutil) + + def test_username_no_resolution(self): + # Emulate a case where the system can't resolve the uid to + # a username in which case psutil is supposed to return + # the stringified uid. + p = psutil.Process() + with mock.patch("psutil.pwd.getpwuid", side_effect=KeyError) as fun: + self.assertEqual(p.username(), str(p.uids().real)) + assert fun.called + + @skip_on_access_denied() + @retry_on_failure() + def test_rss_memory(self): + # give python interpreter some time to properly initialize + # so that the results are the same + time.sleep(0.1) + rss_ps = ps_rss(self.pid) + rss_psutil = psutil.Process(self.pid).memory_info()[0] / 1024 + self.assertEqual(rss_ps, rss_psutil) + + @skip_on_access_denied() + @retry_on_failure() + def test_vsz_memory(self): + # give python interpreter some time to properly initialize + # so that the results are the same + time.sleep(0.1) + vsz_ps = ps_vsz(self.pid) + vsz_psutil = psutil.Process(self.pid).memory_info()[1] / 1024 + self.assertEqual(vsz_ps, vsz_psutil) + + def test_name(self): + name_ps = ps_name(self.pid) + # remove path if there is any, from the command + name_ps = os.path.basename(name_ps).lower() + name_psutil = psutil.Process(self.pid).name().lower() + # ...because of how we calculate PYTHON_EXE; on MACOS this may + # be "pythonX.Y". + name_ps = re.sub(r"\d.\d", "", name_ps) + name_psutil = re.sub(r"\d.\d", "", name_psutil) + # ...may also be "python.X" + name_ps = re.sub(r"\d", "", name_ps) + name_psutil = re.sub(r"\d", "", name_psutil) + self.assertEqual(name_ps, name_psutil) + + def test_name_long(self): + # On UNIX the kernel truncates the name to the first 15 + # characters. In such a case psutil tries to determine the + # full name from the cmdline. + name = "long-program-name" + cmdline = ["long-program-name-extended", "foo", "bar"] + with mock.patch("psutil._psplatform.Process.name", return_value=name): + with mock.patch( + "psutil._psplatform.Process.cmdline", return_value=cmdline + ): + p = psutil.Process() + self.assertEqual(p.name(), "long-program-name-extended") + + def test_name_long_cmdline_ad_exc(self): + # Same as above but emulates a case where cmdline() raises + # AccessDenied in which case psutil is supposed to return + # the truncated name instead of crashing. + name = "long-program-name" + with mock.patch("psutil._psplatform.Process.name", return_value=name): + with mock.patch( + "psutil._psplatform.Process.cmdline", + side_effect=psutil.AccessDenied(0, ""), + ): + p = psutil.Process() + self.assertEqual(p.name(), "long-program-name") + + def test_name_long_cmdline_nsp_exc(self): + # Same as above but emulates a case where cmdline() raises NSP + # which is supposed to propagate. + name = "long-program-name" + with mock.patch("psutil._psplatform.Process.name", return_value=name): + with mock.patch( + "psutil._psplatform.Process.cmdline", + side_effect=psutil.NoSuchProcess(0, ""), + ): + p = psutil.Process() + self.assertRaises(psutil.NoSuchProcess, p.name) + + @unittest.skipIf(MACOS or BSD, 'ps -o start not available') + def test_create_time(self): + time_ps = ps('start', self.pid) + time_psutil = psutil.Process(self.pid).create_time() + time_psutil_tstamp = datetime.datetime.fromtimestamp( + time_psutil + ).strftime("%H:%M:%S") + # sometimes ps shows the time rounded up instead of down, so we check + # for both possible values + round_time_psutil = round(time_psutil) + round_time_psutil_tstamp = datetime.datetime.fromtimestamp( + round_time_psutil + ).strftime("%H:%M:%S") + self.assertIn(time_ps, [time_psutil_tstamp, round_time_psutil_tstamp]) + + def test_exe(self): + ps_pathname = ps_name(self.pid) + psutil_pathname = psutil.Process(self.pid).exe() + try: + self.assertEqual(ps_pathname, psutil_pathname) + except AssertionError: + # certain platforms such as BSD are more accurate returning: + # "/usr/local/bin/python2.7" + # ...instead of: + # "/usr/local/bin/python" + # We do not want to consider this difference in accuracy + # an error. + adjusted_ps_pathname = ps_pathname[: len(ps_pathname)] + self.assertEqual(ps_pathname, adjusted_ps_pathname) + + # On macOS the official python installer exposes a python wrapper that + # executes a python executable hidden inside an application bundle inside + # the Python framework. + # There's a race condition between the ps call & the psutil call below + # depending on the completion of the execve call so let's retry on failure + @retry_on_failure() + def test_cmdline(self): + ps_cmdline = ps_args(self.pid) + psutil_cmdline = " ".join(psutil.Process(self.pid).cmdline()) + self.assertEqual(ps_cmdline, psutil_cmdline) + + # On SUNOS "ps" reads niceness /proc/pid/psinfo which returns an + # incorrect value (20); the real deal is getpriority(2) which + # returns 0; psutil relies on it, see: + # https://github.com/giampaolo/psutil/issues/1082 + # AIX has the same issue + @unittest.skipIf(SUNOS, "not reliable on SUNOS") + @unittest.skipIf(AIX, "not reliable on AIX") + def test_nice(self): + ps_nice = ps('nice', self.pid) + psutil_nice = psutil.Process().nice() + self.assertEqual(ps_nice, psutil_nice) + + +@unittest.skipIf(not POSIX, "POSIX only") +class TestSystemAPIs(PsutilTestCase): + """Test some system APIs.""" + + @retry_on_failure() + def test_pids(self): + # Note: this test might fail if the OS is starting/killing + # other processes in the meantime + pids_ps = sorted(ps("pid")) + pids_psutil = psutil.pids() + + # on MACOS and OPENBSD ps doesn't show pid 0 + if MACOS or OPENBSD and 0 not in pids_ps: + pids_ps.insert(0, 0) + + # There will often be one more process in pids_ps for ps itself + if len(pids_ps) - len(pids_psutil) > 1: + difference = [x for x in pids_psutil if x not in pids_ps] + [ + x for x in pids_ps if x not in pids_psutil + ] + raise self.fail("difference: " + str(difference)) + + # for some reason ifconfig -a does not report all interfaces + # returned by psutil + @unittest.skipIf(SUNOS, "unreliable on SUNOS") + @unittest.skipIf(not which('ifconfig'), "no ifconfig cmd") + @unittest.skipIf(not HAS_NET_IO_COUNTERS, "not supported") + def test_nic_names(self): + output = sh("ifconfig -a") + for nic in psutil.net_io_counters(pernic=True): + for line in output.split(): + if line.startswith(nic): + break + else: + raise self.fail( + "couldn't find %s nic in 'ifconfig -a' output\n%s" + % (nic, output) + ) + + # @unittest.skipIf(CI_TESTING and not psutil.users(), "unreliable on CI") + @retry_on_failure() + def test_users(self): + out = sh("who -u") + if not out.strip(): + raise self.skipTest("no users on this system") + lines = out.split('\n') + users = [x.split()[0] for x in lines] + terminals = [x.split()[1] for x in lines] + self.assertEqual(len(users), len(psutil.users())) + with self.subTest(psutil=psutil.users(), who=out): + for idx, u in enumerate(psutil.users()): + self.assertEqual(u.name, users[idx]) + self.assertEqual(u.terminal, terminals[idx]) + if u.pid is not None: # None on OpenBSD + psutil.Process(u.pid) + + @retry_on_failure() + def test_users_started(self): + out = sh("who -u") + if not out.strip(): + raise self.skipTest("no users on this system") + tstamp = None + # '2023-04-11 09:31' (Linux) + started = re.findall(r"\d\d\d\d-\d\d-\d\d \d\d:\d\d", out) + if started: + tstamp = "%Y-%m-%d %H:%M" + else: + # 'Apr 10 22:27' (macOS) + started = re.findall(r"[A-Z][a-z][a-z] \d\d \d\d:\d\d", out) + if started: + tstamp = "%b %d %H:%M" + else: + # 'Apr 10' + started = re.findall(r"[A-Z][a-z][a-z] \d\d", out) + if started: + tstamp = "%b %d" + else: + # 'apr 10' (sunOS) + started = re.findall(r"[a-z][a-z][a-z] \d\d", out) + if started: + tstamp = "%b %d" + started = [x.capitalize() for x in started] + + if not tstamp: + raise unittest.SkipTest( + "cannot interpret tstamp in who output\n%s" % (out) + ) + + with self.subTest(psutil=psutil.users(), who=out): + for idx, u in enumerate(psutil.users()): + psutil_value = datetime.datetime.fromtimestamp( + u.started + ).strftime(tstamp) + self.assertEqual(psutil_value, started[idx]) + + def test_pid_exists_let_raise(self): + # According to "man 2 kill" possible error values for kill + # are (EINVAL, EPERM, ESRCH). Test that any other errno + # results in an exception. + with mock.patch( + "psutil._psposix.os.kill", side_effect=OSError(errno.EBADF, "") + ) as m: + self.assertRaises(OSError, psutil._psposix.pid_exists, os.getpid()) + assert m.called + + def test_os_waitpid_let_raise(self): + # os.waitpid() is supposed to catch EINTR and ECHILD only. + # Test that any other errno results in an exception. + with mock.patch( + "psutil._psposix.os.waitpid", side_effect=OSError(errno.EBADF, "") + ) as m: + self.assertRaises(OSError, psutil._psposix.wait_pid, os.getpid()) + assert m.called + + def test_os_waitpid_eintr(self): + # os.waitpid() is supposed to "retry" on EINTR. + with mock.patch( + "psutil._psposix.os.waitpid", side_effect=OSError(errno.EINTR, "") + ) as m: + self.assertRaises( + psutil._psposix.TimeoutExpired, + psutil._psposix.wait_pid, + os.getpid(), + timeout=0.01, + ) + assert m.called + + def test_os_waitpid_bad_ret_status(self): + # Simulate os.waitpid() returning a bad status. + with mock.patch( + "psutil._psposix.os.waitpid", return_value=(1, -1) + ) as m: + self.assertRaises( + ValueError, psutil._psposix.wait_pid, os.getpid() + ) + assert m.called + + # AIX can return '-' in df output instead of numbers, e.g. for /proc + @unittest.skipIf(AIX, "unreliable on AIX") + @retry_on_failure() + def test_disk_usage(self): + def df(device): + try: + out = sh("df -k %s" % device).strip() + except RuntimeError as err: + if "device busy" in str(err).lower(): + raise self.skipTest("df returned EBUSY") + raise + line = out.split('\n')[1] + fields = line.split() + total = int(fields[1]) * 1024 + used = int(fields[2]) * 1024 + free = int(fields[3]) * 1024 + percent = float(fields[4].replace('%', '')) + return (total, used, free, percent) + + tolerance = 4 * 1024 * 1024 # 4MB + for part in psutil.disk_partitions(all=False): + usage = psutil.disk_usage(part.mountpoint) + try: + total, used, free, percent = df(part.device) + except RuntimeError as err: + # see: + # https://travis-ci.org/giampaolo/psutil/jobs/138338464 + # https://travis-ci.org/giampaolo/psutil/jobs/138343361 + err = str(err).lower() + if ( + "no such file or directory" in err + or "raw devices not supported" in err + or "permission denied" in err + ): + continue + raise + else: + self.assertAlmostEqual(usage.total, total, delta=tolerance) + self.assertAlmostEqual(usage.used, used, delta=tolerance) + self.assertAlmostEqual(usage.free, free, delta=tolerance) + self.assertAlmostEqual(usage.percent, percent, delta=1) + + +@unittest.skipIf(not POSIX, "POSIX only") +class TestMisc(PsutilTestCase): + def test_getpagesize(self): + pagesize = getpagesize() + self.assertGreater(pagesize, 0) + self.assertEqual(pagesize, resource.getpagesize()) + self.assertEqual(pagesize, mmap.PAGESIZE) + + +if __name__ == '__main__': + from psutil.tests.runner import run_from_name + + run_from_name(__file__)